blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b6bd4dc82c8cf84dbce6044aa01c87f88278dc4 | d89a7734d4b31a357fb52965f0b377ac0d46dd44 | /apps/organization/apps.py | a08d9aa5be9cd556716258fa9f1c8ca78cc88cf9 | [] | no_license | qiao-7/qqOnline | 96b973c8eea10abc40944618b182dfcf4237bb59 | 8bdf4abe61bca1eba52bafda56f40fa764621ee1 | refs/heads/master | 2023-01-20T20:13:38.688672 | 2020-11-30T07:39:47 | 2020-11-30T07:39:47 | 312,284,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.apps import AppConfig
class OrganizationConfig(AppConfig):
name = 'organization'
verbose_name = '授课机构' | [
"say_it7@163.com"
] | say_it7@163.com |
645b5682e9763727540ac5d791536bf21623922f | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5020/309005020.py | 83361578777dc5a5345e3f1329482955522de273 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,711 | py | from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AQ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'M10', MIN: 1, MAX: 1},
{ID: 'VEH', MIN: 0, MAX: 10},
{ID: 'CII', MIN: 0, MAX: 3},
{ID: 'NM1', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'P4', MIN: 1, MAX: 20, LEVEL: [
{ID: 'LX', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'M13', MIN: 0, MAX: 1},
{ID: 'M11', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 999},
{ID: 'N1', MIN: 0, MAX: 20, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'X1', MIN: 0, MAX: 1},
]},
{ID: 'M12', MIN: 0, MAX: 1, LEVEL: [
{ID: 'R4', MIN: 0, MAX: 10},
]},
{ID: 'VID', MIN: 0, MAX: 999, LEVEL: [
{ID: 'M7', MIN: 0, MAX: 5},
{ID: 'N10', MIN: 0, MAX: 999, LEVEL: [
{ID: 'VC', MIN: 0, MAX: 999},
{ID: 'MAN', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 99, LEVEL: [
{ID: 'H2', MIN: 0, MAX: 99},
]},
]},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
d55a5e3bf308de3d82c7417c796be27aad6eb046 | 476f76044c7d6aa36c6f414be483dfaf939695c9 | /apps/user/api.py | 8d952e3e6dbe4018c355370722bcdf64c5299e43 | [] | no_license | JayGitH/autocronjob | 210fb88bfc79aca65da224740ffd0e63d8804940 | f7b62360e3291b02d619a09ba900f57c73ad0a5b | refs/heads/master | 2023-04-08T17:34:25.711073 | 2020-04-03T12:30:53 | 2020-04-03T12:30:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,858 | py | # -*- coding:utf-8 -*-
__author__ = 'qing.cai@horizon.ai'
from django.utils.decorators import method_decorator
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from rest_framework import viewsets
from rest_framework import mixins, status
from user.models import UserProfile
from user.serializers import UserProfileSerializer
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from django.db.models import Q
User = get_user_model()
def jwt_response_payload_handler(token, user=None, request=None):
"""
登录成功后自定义返回
:param token:
:param user:
:param request:
:return:
"""
return {
"status": "ok",
'user': UserProfileSerializer(user, context={'request': request}).data,
"token": token
}
class CustomBackend(ModelBackend):
"""
自定义认证
"""
def authenticate(self, request, username=None, password=None, **kwargs):
try:
user = User.objects.get(Q(username=username))
if user.check_password(password):
return user
except Exception as e:
return None
class UserProfileViewSet(mixins.ListModelMixin, mixins.UpdateModelMixin,
mixins.RetrieveModelMixin, mixins.DestroyModelMixin,
mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
用户接口
"""
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
def create(self, request, *args, **kwargs):
try:
super().create(request, *args, **kwargs)
except Exception as e:
return JsonResponse({"status": False, "message": "create failed, error:%s" % e},
status=status.HTTP_403_FORBIDDEN)
return JsonResponse({"status": True, "message": "create success!"},
status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
try:
super().update(request, *args, **kwargs)
except Exception as e:
return JsonResponse({"status": False, "message": "update failed, error:%s" % e},
status=status.HTTP_403_FORBIDDEN)
return JsonResponse({"status": True, "message": "update success!"},
status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
try:
super().destroy(request, *args, **kwargs)
except Exception as e:
return JsonResponse({"status": False, "message": "delete failed, error:%s" % e},
status=status.HTTP_403_FORBIDDEN)
return JsonResponse({"status": True, "message": "deleted success!"},
status=status.HTTP_200_OK)
| [
"caiqing@caiqingdeMacBook-Pro.local"
] | caiqing@caiqingdeMacBook-Pro.local |
1d9afdb0ea4509b7913abd4f6fb44d8844d80bd7 | 79c8e8ac5715134fb071a88cffbde83d819458ba | /texar/modules/memory/__init__.py | 02ff95b7c6398bccf2f2ecf496d24d0b9059308b | [
"Apache-2.0"
] | permissive | Holmeswww/Text_Infilling | 3075f1f86413a5adbf5542303116881708bfe275 | f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3 | refs/heads/master | 2020-09-06T20:23:34.370572 | 2019-11-09T03:24:48 | 2019-11-09T03:24:48 | 220,540,281 | 0 | 0 | Apache-2.0 | 2019-11-08T20:11:45 | 2019-11-08T20:11:44 | null | UTF-8 | Python | false | false | 222 | py | #
"""
Memory modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from texar.modules.memory.memory_network import *
| [
"wwt.cpp@gmail.com"
] | wwt.cpp@gmail.com |
910cf67597245bf1ba8892bf92bb0db602c3321b | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.main/branches/sterch-issue734/bungeni/core/workflows/exctractwfi18n.py | 06c97d9cf945aa6212f36e04912984315d94e0ae | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | """
This is used to make the workflow actions and states translatable
it simply builds a pagetemplate which i18nextract will look into
to build the pot file.
To run (from within ~/cinst/bungeni):
$ bin/python src/bungeni.main/bungeni/core/workflows/exctractwfi18n.py
"""
import os
from bungeni.core.workflows.adapters import get_workflow
path = os.path.split(os.path.abspath(__file__))[0]
f = open("%s/wfi18n.pt" % path, "w")
f.write("""
<html xmlns:tal="http://xml.zope.org/namespaces/tal"
xmlns:metal="http://xml.zope.org/namespaces/metal"
xmlns:i18n="http://xml.zope.org/namespaces/i18n"
i18n:domain="bungeni"> <body>
""")
for name in [
"bill",
"question",
"motion",
"groupsitting",
"user",
"group",
"address",
"tableddocument",
"agendaitem",
"committee",
"parliament",
"attachedfile",
"event",
"report"
]:
workflow = get_workflow(name)
for status, state in workflow.states.items():
f.write("""<b i18n:translate="">%s</b>""" % (state.title))
f.write("\n")
for transition in workflow._transitions_by_id.values():
f.write("""<b i18n:translate="">%s</b>""" % (transition.title))
f.write("\n")
f.write("</body></html>")
f.write("\n")
f.close()
| [
"jura.stakhiv@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | jura.stakhiv@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
cfbb99712cff4a0b7d417962cff66eefd0f9013b | dc68848e09b3fcd91539c8e2aef6d293be3681db | /bin27/drought_index_SPI_cal.py | 0346b646c96b453ebe10a1408ce402ce65f710f6 | [] | no_license | NiceSugar/bnu_project04_py27 | ae4e2b7b0558318e62ef933840ed4b459429050b | 09e8aeb88f97131ee07e846650141b9e652dc147 | refs/heads/master | 2020-09-29T22:09:54.870782 | 2019-07-18T09:30:08 | 2019-07-18T09:30:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,878 | py | # coding=utf-8
import os
this_root = os.getcwd()+'\\..\\'
import numpy as np
import time
from scipy import interpolate
from matplotlib import pyplot as plt
from multiprocessing import Process
import multiprocessing as mp
# import psutil
import numba as nb
from numba import float64
from climate_indices import indices
from climate_indices import compute
def mk_dir(dir):
if not os.path.isdir(dir):
os.mkdir(dir)
class PCI:
def __init__(self):
pass
def cal_kernel(self,val):
val_normal = np.array([0.] * len(val))
for i in range(len(val)):
v_i = (val[i] - np.min(val)) / (np.max(val) - np.min(val))
val_normal[i] = v_i
return val_normal
def loop(self, pre, save_folder, save_name, func):
# import tool
pre_npz = np.load(pre)
mk_dir(save_folder)
# save_name = npz_file.split('\\')[-1]
save_path = save_folder + '\\' + save_name
start = time.time()
# npz = np.load(npz_file)
normalized_vals_dic = {}
npy_flag = 0
for npy in pre_npz:
# print(npy)
npy_flag += 1.
if npy_flag % 1000 == 0:
print(save_name, npy_flag / len(pre_npz) * 100, '%', '%02f' % (time.time() - start))
try:
vals_pre = pre_npz[npy]
except:
vals_pre = []
try:
if len(vals_pre) > 1:
normalized_vals_dic[npy] = func(vals_pre)
# plt.figure()
# plt.plot(vals_NDVI)
# plt.title('ndvi')
# plt.figure()
# plt.plot(vals_pre)
# plt.title('pre')
# plt.figure()
# plt.plot(func(vals_pre))
# plt.title('pci')
# plt.figure()
# plt.scatter(vals_NDVI,vals_LST)
# plt.show()
else:
normalized_vals_dic[npy] = []
except:
normalized_vals_dic[npy] = []
print('saving ' + save_path)
np.savez(save_path, **normalized_vals_dic)
print('save ' + save_path + ' success\ntime:', '%02f' % (time.time() - start))
def run(self):
save_folder = this_root + 'CRU_precip\\'
# print(f)
# exit()
pre_npz = this_root+'CRU_precip\\cru_transfomed.npz'
pre = pre_npz
save_name = 'pci'
func = self.cal_kernel
self.loop(pre, save_folder, save_name, func)
class SPI:
def __init__(self):
pass
def cal_kernel(self,val):
# values = val
scale = 1
distribution = indices.Distribution.gamma
data_start_year = 2003
calibration_year_initial = 2003
calibration_year_final = 2016
periodicity = compute.Periodicity.monthly
spi = indices.spi(val,
scale,
distribution,
data_start_year,
calibration_year_initial,
calibration_year_final, periodicity
)
return spi
def loop(self, pre, save_folder, save_name, func):
# import tool
pre_npz = np.load(pre)
mk_dir(save_folder)
# save_name = npz_file.split('\\')[-1]
save_path = save_folder + '\\' + save_name
start = time.time()
# npz = np.load(npz_file)
normalized_vals_dic = {}
npy_flag = 0
for npy in pre_npz:
# print(npy)
npy_flag += 1.
if npy_flag % 1000 == 0:
print(save_name, npy_flag / len(pre_npz) * 100, '%', '%02f' % (time.time() - start))
try:
vals_pre = pre_npz[npy]
except:
vals_pre = []
try:
if len(vals_pre) > 1:
normalized_vals_dic[npy] = func(vals_pre)
# plt.figure()
# plt.plot(vals_NDVI)
# plt.title('ndvi')
# plt.figure()
# plt.plot(vals_pre)
# plt.title('pre')
# plt.figure()
# plt.plot(func(vals_pre))
# plt.title('pci')
# # plt.figure()
# plt.show()
else:
normalized_vals_dic[npy] = []
except:
normalized_vals_dic[npy] = []
print('saving ' + save_path)
np.savez(save_path, **normalized_vals_dic)
print('save ' + save_path + ' success\ntime:', '%02f' % (time.time() - start))
def run(self):
save_folder = this_root + 'CRU_precip\\'
# print(f)
# exit()
pre_npz = this_root+'CRU_precip\\cru_transfomed.npz'
pre = pre_npz
save_name = 'spi'
func = self.cal_kernel
self.loop(pre, save_folder, save_name, func)
def interp_1d(self,val):
# 1、插缺失值
x = []
val_new = []
for i in range(len(val)):
if val[i] >= 100:
index = i
x = np.append(x, index)
val_new = np.append(val_new, val[i])
interp = interpolate.interp1d(x, val_new, kind='nearest', fill_value="extrapolate")
xi = range(len(val))
yi = interp(xi)
# 2、利用三倍sigma,去除离群值
# print(len(yi))
val_mean = np.mean(yi)
sigma = np.std(yi)
n = 3
yi[(val_mean - n * sigma) > yi] = -999999
yi[(val_mean + n * sigma) < yi] = 999999
bottom = val_mean - n * sigma
top = val_mean + n * sigma
# plt.scatter(range(len(yi)),yi)
# print(len(yi),123)
# plt.scatter(range(len(yi)),yi)
# plt.plot(yi)
# plt.show()
# print(len(yi))
# 3、插离群值
xii = []
val_new_ii = []
for i in range(len(yi)):
if -999999 < yi[i] < 999999:
index = i
xii = np.append(xii, index)
val_new_ii = np.append(val_new_ii, yi[i])
interp_1 = interpolate.interp1d(xii, val_new_ii, kind='nearest', fill_value="extrapolate")
xiii = range(len(val))
yiii = interp_1(xiii)
# for i in range(len(yi)):
# if yi[i] == -999999:
# val_new_ii = np.append(val_new_ii, bottom)
# elif yi[i] == 999999:
# val_new_ii = np.append(val_new_ii, top)
# else:
# val_new_ii = np.append(val_new_ii, yi[i])
return yiii
def interp_1d(val):
# 1、插缺失值
x = []
val_new = []
for i in range(len(val)):
# if val[i] >= 100:
if not np.isnan(val[i]) and val[i]>0:
index = i
x = np.append(x,index)
val_new = np.append(val_new,val[i])
interp = interpolate.interp1d(x, val_new, kind='nearest', fill_value="extrapolate")
xi = range(len(val))
yi = interp(xi)
# 2、利用三倍sigma,去除离群值
# print(len(yi))
val_mean = np.mean(yi)
sigma = np.std(yi)
n = 3
yi[(val_mean - n * sigma) > yi] = -999999
yi[(val_mean + n * sigma) < yi] = 999999
bottom = val_mean - n * sigma
top = val_mean + n * sigma
# plt.scatter(range(len(yi)),yi)
# print(len(yi),123)
# plt.scatter(range(len(yi)),yi)
# plt.plot(yi)
# plt.show()
# print(len(yi))
# 3、插离群值
xii = []
val_new_ii = []
for i in range(len(yi)):
if -999999 < yi[i] < 999999:
index = i
xii = np.append(xii, index)
val_new_ii = np.append(val_new_ii, yi[i])
interp_1 = interpolate.interp1d(xii, val_new_ii, kind='nearest', fill_value="extrapolate")
xiii = range(len(val))
yiii = interp_1(xiii)
# for i in range(len(yi)):
# if yi[i] == -999999:
# val_new_ii = np.append(val_new_ii, bottom)
# elif yi[i] == 999999:
# val_new_ii = np.append(val_new_ii, top)
# else:
# val_new_ii = np.append(val_new_ii, yi[i])
return yiii
def interp_nan():
npz = np.load('D:\\MODIS\\bin\\..\\CRU_precip\\spi.npz')
print(len(npz))
vals_dic = {}
for npy in npz:
vals = npz[npy]
interp = interp_1d(vals)
vals_dic[npy] = interp
# print(npy)
# plt.plot(vals,alpha=0.5)
# plt.plot(interp,alpha=0.5)
# plt.show()
# print('*'*8)
# time.sleep(1)
print('saving dic')
np.savez('D:\\MODIS\\bin\\..\\CRU_precip\\pci_no_nan.npz',**vals_dic)
def main():
# interp_nan()
# break
# PCI().run()
npz = np.load('D:\\MODIS\\bin\\..\\in_situ_data\\pre_dic.npz')
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# plt.ion()
count = 0
spi_dic = {}
for k,i in enumerate(npz):
# plt.pause(0.01)
# print(i)
vals = []
flag = 0
for y in range(2003,2017):
for m in range(1,13):
y = str(y)
m = '%02d'%m
date = y+m
val_dic = dict(npz[i].item())
try:
vals.append(val_dic[date])
except:
vals.append(-999999)
flag = 1
if flag == 1:
count += 1
print(k,'/',len(npz),count)
if flag:
pass
else:
vals = np.array(vals)
vals = interp_1d(vals)
spi = SPI().cal_kernel(vals)
# print(spi)
# exit()
# try:
# ax.lines.remove(line[0])
# except:
# pass
date_spi_index = 0
for y in range(2003, 2017):
for m in range(1, 13):
y = str(y)
m = '%02d' % m
date = y + m
key = i+'_'+date
# print(key)
spi_dic[key] = spi[date_spi_index]
date_spi_index += 1
# line = ax.plot(spi,c='b')
# plt.title(i)
# plt.ioff()
# plt.show()
print('saving spi_dic')
np.save('spi_dic',spi_dic)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | NiceSugar.noreply@github.com |
79a1b319f5b818e6524b164b4e9f7bd9ab4bf1a2 | 9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af | /services/web__indianexpress_com.py | bdbfa8cdd938e92431fd175e26823fc61ed0fc61 | [] | no_license | rudolphos/NewsGrabber | f9bddc9a9b3a9e02f716133fd746f48cee635b36 | 86354fb769b2710ac7cdd5bd8795e43158b70ad2 | refs/heads/master | 2021-01-12T12:07:55.335079 | 2016-10-09T22:39:17 | 2016-10-09T22:39:17 | 72,316,773 | 0 | 0 | null | 2016-10-30T00:35:08 | 2016-10-30T00:35:08 | null | UTF-8 | Python | false | false | 537 | py | refresh = 5
version = 20160403.01
urls = ['http://indianexpress.com/',
'http://indianexpress.com/india/',
'http://indianexpress.com/world/',
'http://indianexpress.com/opinion/',
'http://indianexpress.com/sports/',
'http://indianexpress.com/entertainment/',
'http://indianexpress.com/lifestyle/',
'http://indianexpress.com/technology/',
'http://indianexpress.com/trending/',
'http://indianexpress.com/photos/',
'http://indianexpress.com/videos/']
regex = [r'^https?:\/\/[^\/]*indianexpress\.com']
videoregex = []
liveregex = [] | [
"yorick@0xf.nl"
] | yorick@0xf.nl |
e071373aecc4977e1de9a65d7e3fd50a12987e55 | 82b1b6baf781ffcf6ff1ea9ee33adff94fdfd596 | /p1.py | 1ab37c58cb37d8da4757407f486d2b75c1793d82 | [] | no_license | WangDachuiBeijing/TestForGit | 0acf7d57e864744905c6fc7d5b0d70f3f1b6bb5b | 6784c50f0e7b51faa99f454e1e2e5ab68e062116 | refs/heads/master | 2020-03-31T07:47:12.650766 | 2018-10-08T07:19:42 | 2018-10-08T07:19:42 | 152,034,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | # do something
# add new line
# modify in master | [
"zjx-63@163.com"
] | zjx-63@163.com |
3fa94711deee1501fffaea2ebd96a02444740ebb | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_03_preview/aio/operations/_private_endpoint_connections_operations.py | 9539dfaa9da4998ba5e5dbec5e4b63fc87b7dedd | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 27,952 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2021_03_03_preview.aio.IotHubClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> List[_models.PrivateEndpointConnection]:
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[List[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[PrivateEndpointConnection]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _update_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(private_endpoint_connection, (IO, bytes)):
_content = private_endpoint_connection
else:
_json = self._serialize.body(private_endpoint_connection, "PrivateEndpointConnection")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties. Is
either a PrivateEndpointConnection type or a IO type. Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[Optional[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
3703bf9ff94d49f6d5280afb4115dc2e33306e49 | 5e82904fac967b15b44972d4e80460e1f218a69c | /Tensorflow201708复习/ptb/ptb_word__lm2.py | fdba515bdd44a8e7e543c4c611db61ccd44e48f6 | [] | no_license | jiangweisuc/Tensorflow_Learn | 641851c0dc89a9fd874392589a44d88a9cdc2700 | 9d1c25776b3a2afd2779bffd2d85acc400ecb12b | refs/heads/master | 2021-01-02T09:24:37.930565 | 2017-11-16T07:23:35 | 2017-11-16T07:23:35 | 99,210,965 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,645 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
import ptb.reader as reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("save_path", '/home/jiang/PycharmProjects/TensorflowLearn/20170719复习tensorflow/ptblog',
"Model output directory.")
flags.DEFINE_string("data_path", '/home/jiang/PycharmProjects/TensorflowLearn/20170719复习tensorflow/ptb/data/', "data_path")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
# Reshape logits to be 3-D tensor for sequence loss
logits = tf.reshape(logits, [batch_size, num_steps, vocab_size])
# use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
input_.targets,
tf.ones([batch_size, num_steps], dtype=data_type()),
average_across_timesteps=False,
average_across_batch=True
)
# update the cost variables
self._cost = cost = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run() | [
"jiang_suc@foxmail.com"
] | jiang_suc@foxmail.com |
2ce3ecec1a6412c51a2f7a4f067baa58e00e7263 | 8b57a24a8ce48795f0d7d8b36a2753060e852a62 | /ros/build/styx_msgs/cmake/styx_msgs-genmsg-context.py | c149bf5541d620cd89934ead000ad7613c27c53c | [
"MIT"
] | permissive | MikeBMW/destiny_awaits_Mike | 46884e0605edf5c4418a30ad5292b67b6e1dd9e7 | dd0034855e5ac8fbb2f01fb3f380e4e0ae2de487 | refs/heads/master | 2020-03-10T11:45:07.084317 | 2018-04-14T14:24:11 | 2018-04-14T14:24:11 | 129,362,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/student/destiny_awaits_Mike/ros/src/styx_msgs/msg/TrafficLight.msg;/home/student/destiny_awaits_Mike/ros/src/styx_msgs/msg/TrafficLightArray.msg;/home/student/destiny_awaits_Mike/ros/src/styx_msgs/msg/Waypoint.msg;/home/student/destiny_awaits_Mike/ros/src/styx_msgs/msg/Lane.msg"
services_str = ""
pkg_name = "styx_msgs"
dependencies_str = "geometry_msgs;sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "styx_msgs;/home/student/destiny_awaits_Mike/ros/src/styx_msgs/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"niyingxiang@126.com"
] | niyingxiang@126.com |
b529f3d8e9455ae4d9565939c53ef8b1d5221fd4 | 6716177942e6012f55d9d1bcc1aeef9ed00916df | /src/numformat/__init__.py | 9ba6217fbc36f296507eba9228644a7c821f5675 | [
"MIT"
] | permissive | sgowris2/numformat | 96e81af10ae151d36c2234c11f6ca7a23953ed74 | 299806b548be1ae282077a7b2d8faf2c6ca57f52 | refs/heads/master | 2023-04-02T20:31:50.665748 | 2021-04-03T13:11:32 | 2021-04-03T13:11:32 | 352,017,523 | 0 | 0 | MIT | 2021-04-03T13:11:33 | 2021-03-27T08:10:53 | Python | UTF-8 | Python | false | false | 91 | py | from numformat.numformat import autoformat, sciformat, enggformat, get_sigfigs, set_sigfigs | [
"sgowris2@gmail.com"
] | sgowris2@gmail.com |
a0109b36d0dd15a3b4b88cbc73389d83e8bb43ff | dd6746d069c5be54158cd6b107556ede84083001 | /0x04-python-more_data_structures/3-common_elements.py | d6ade97ba0e3a776ac1301ef614b9dede61d82cf | [] | no_license | cmlesquivel/holbertonschool-higher_level_programming | 77db30753d16e04a29fe8aeed6764c1bed3fe340 | 0846675c615cb7bf64753c9465f7492ceea41f73 | refs/heads/master | 2020-09-29T06:37:06.077765 | 2020-05-20T20:01:16 | 2020-05-20T20:01:16 | 226,977,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/python3
def common_elements(set_1, set_2):
list_common = []
for i in set_1:
for x in set_2:
if i in x:
list_common.append(x)
return list_common
| [
"anayacamilo@yahoo.es"
] | anayacamilo@yahoo.es |
831b968759e37f9985b6802ddf028b1cc17ca0c6 | ebda08c82e16de435c1adfcdfeccae4f381cf005 | /2020/10/1.py | ce0104b369d1921cc0aafbbe02359b72d9babc7c | [] | no_license | patcoet/Advent-of-Code | 2c0d05f25afa98120a5e8d69c4be082876797050 | 9a36017197825c2106196fd23844de3cc83863f3 | refs/heads/main | 2022-12-21T13:32:36.705704 | 2022-12-21T04:52:52 | 2022-12-21T04:52:52 | 225,160,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | ratings = []
with open("input.txt") as file:
for line in file:
ratings.append(int(line.strip()))
ratings.sort()
ratings.append(max(ratings) + 3)
diffs = []
for i in range(len(ratings)):
prev = 0
if i > 0:
prev = ratings[i - 1]
diffs.append(ratings[i] - prev)
print(len([x for x in diffs if x == 1]) * len([x for x in diffs if x == 3])) | [
"patcoet@users.noreply.github.com"
] | patcoet@users.noreply.github.com |
10bcb6a6cca24a31397972415ea766cbddfa555c | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /147_best.py | 3d1e8b37f5da10cd271490da0e35045823c72455 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None:
return None
l=ListNode(0)
p=head
while p!=None:
q=l
while q.next!=None and q.next.val<p.val :
q=q.next
np=p.next
p.next=q.next
q.next=p
p=np
return l.next
| [
"noelsun@mowennaierdeMacBook-Pro.local"
] | noelsun@mowennaierdeMacBook-Pro.local |
db519c53722500869472aec0223014097c6fa8d5 | b8bd2c861f413a59a9460c63b2828e6ae4b8098d | /server/tv/migrations/0003_videofile-to-tvvideofile-data.py | 21748b69e8288ffb4c57fb23f1f2f82232da75ba | [] | no_license | Din-Houn/medianav | 7509503baf8e50518091bd01d426483e522860e9 | ea5fe2cc689d05f106fc168ca6eb35de7cd2ccb2 | refs/heads/master | 2021-06-01T23:07:39.088664 | 2010-09-20T19:20:41 | 2010-09-20T19:20:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,316 | py |
from south.db import db
from django.db import models
from tv.models import *
class Migration:
# During a dry the ORM cannot be accessed
# since this whole migration is a data migration
# disable dry runs
no_dry_run = True
def forwards(self, orm):
for o in orm['tv.VideoFile'].objects.all():
new = orm.TVVideoFile(show = o.show)
new.basevideofile_ptr = orm['mnav.BaseVideoFile'].objects.create()
new.basevideofile_ptr.name = o.name
new.basevideofile_ptr.save()
new.save()
new.episodes = o.episodes.all()
def backwards(self, orm):
for o in orm['tv.TVVideoFile'].objects.all():
new = orm.VideoFile(show = o.show)
new.name = o.basevideofile_ptr.name
new.save()
new.episodes = o.episodes.all()
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mnav.basevideofile': {
'audio_bitrate': ('mnav.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'audio_channels': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'audio_codec': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'audio_codec_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'audio_format': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'audio_language': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'audio_resolution': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'audio_samplerate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ctime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'file_size': ('mnav.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'general_bitrate': ('mnav.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'general_codec': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'general_duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'general_format': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'general_size': ('mnav.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'video_bitrate': ('mnav.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_codec': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'video_codec_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'video_displayaspect': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'video_format': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'video_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_pixelaspect': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'video_scantype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'video_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'tv.alternateshowname': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'show': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tv.Show']", 'blank': 'True'})
},
'tv.episode': {
'director': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'episode_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'first_aired': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'guest_stars': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'overview': ('django.db.models.fields.CharField', [], {'max_length': '4024', 'blank': 'True'}),
'production_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'season_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'seen_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"}),
'show': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tv.Show']", 'blank': 'True'}),
'tvdb_episodeid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tvdb_image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'tvdb_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'tvdb_last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tvdb_rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'writer': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'tv.show': {
'airs_day': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'airs_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'content_rating': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'fav_of': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"}),
'first_aired': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'overview': ('django.db.models.fields.CharField', [], {'max_length': '4024', 'blank': 'True'}),
'runtime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'tvdb_banner_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'tvdb_fanart_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'tvdb_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'tvdb_last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tvdb_poster_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'tvdb_rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tvdb_showid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'tv.tvvideofile': {
'basevideofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['mnav.BaseVideoFile']", 'unique': 'True', 'primary_key': 'True'}),
'episodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['tv.Episode']"}),
'show': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tv.Show']", 'null': 'True', 'blank': 'True'})
},
'tv.videofile': {
'episodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['tv.Episode']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'show': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tv.Show']", 'null': 'True', 'blank': 'True'})
},
'tv.videofilepattern': {
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
're': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['tv']
| [
"AndreFMiller@3ae9964c-60fe-11de-bc6d-35d2ebd5fd4e"
] | AndreFMiller@3ae9964c-60fe-11de-bc6d-35d2ebd5fd4e |
69557076e06f8b3d0204e2e1aac37313d99ccd32 | a4be1b028aaa245dc48cb5bbfaff5fe110206488 | /zhaopintest/urls.py | 977a756ed066372e3a5f9c61a90f7cb7c47433ac | [] | no_license | sunwei17/zhaopin | aff6b2cf53bf1a4b923dcfbda583b72b2392a3fb | 36d44ad41c2d9cd4b343c751e8436c99783115d2 | refs/heads/master | 2020-04-16T19:04:19.699063 | 2019-01-15T14:01:34 | 2019-01-15T14:01:34 | 165,845,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | """zhaopintest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from webtest import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^index/',views.get),
url(r'^getList2/$',views.getType),
url(r'^getList/$', views.getList),
]
| [
"1952436511@qq.com"
] | 1952436511@qq.com |
8b9a535cdf7c40b63b81d76e243fd29d96568a87 | a723757da384622943186069ecd3ee63ab9edb7e | /crawler.py | 81a699cec23293adc779ea02be0309493ac0c801 | [] | no_license | sainadhreddy92/webcrawler | 110a4af60768284566107250a0a72384856560f3 | 41b844cf4df419a2ab6b374fc0de79470148e6f5 | refs/heads/master | 2020-06-12T07:25:03.889036 | 2020-05-02T20:05:15 | 2020-05-02T20:05:15 | 75,597,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,373 | py | import re
import sys
import time
import math
import urllib2
import urlparse
import optparse
from cgi import escape
from traceback import format_exc
from Queue import Queue, Empty as QueueEmpty
from bs4 import BeautifulSoup
USAGE = "%prog"
VERSION = "0.2"
__version__ = "0.2"
AGENT = "%s/%s" % (__name__, __version__)
class Fetcher(object):
def __init__(self,url):
self.url = url
self.urls = []
def __getitem__(self, x):
return self.urls[x]
def _addHeaders(self, request):
request.add_header("User-Agent", AGENT)
def open(self):
url = self.url
try:
request = urllib2.Request(url)
handle = urllib2.build_opener()
except IOError:
return None
return (request, handle)
def fetch(self):
request, handle = self.open()
self._addHeaders(request)
if handle:
try:
#content = unicode(handle.open(request).read(), "utf-8",
#errors="replace")
content = handle.open(request).read()
soup = BeautifulSoup(content,"html.parser")
tags = soup('a')
except urllib2.HTTPError, error:
if error.code == 404:
print >> sys.stderr, "ERROR: %s -> %s" % (error, error.url)
else:
print >> sys.stderr, "ERROR: %s" % error
tags = []
except urllib2.URLError, error:
print >> sys.stderr, "ERROR: %s" % error
tags = []
for tag in tags:
href = tag.get("href")
if href is not None:
url = urlparse.urljoin(self.url, escape(href))
if url not in self.urls:
self.urls.append(url)
class Crawler(object):
def __init__(self, root, depth):
self.root = root
self.depth = depth
self.host = urlparse.urlparse(root)[1]
self.urls = []
self.links = 0
self.followed = 0
def crawl(self):
page = Fetcher(self.root)
page.fetch()
q = Queue()
for url in page.urls:
q.put(url)
followed = [self.root]
n = 0
while True:
try:
url = q.get()
except QueueEmpty:
break
n += 1
if url not in followed:
try:
host = urlparse.urlparse(url)[1]
if re.match(".*%s" % self.host, host):
followed.append(url)
self.followed += 1
page = Fetcher(url)
page.fetch()
for i, url in enumerate(page):
if url not in self.urls:
self.links += 1
q.put(url)
self.urls.append(url)
if n > self.depth and self.depth > 0:
break
except Exception, e:
print "ERROR: Can't process url '%s' (%s)" % (url, e)
print format_exc()
def parse_options():
parser = optparse.OptionParser(usage=USAGE, version=VERSION)
parser.add_option("-q", "--quiet",
action="store_true", default=False, dest="quiet",
help="Enable quiet mode")
parser.add_option("-l", "--links",
action="store_true", default=False, dest="links",
help="Get links for specified url only")
parser.add_option("-d", "--depth",
action="store", type="int", default=30, dest="depth",
help="Maximum depth to traverse")
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
raise SystemExit, 1
return opts, args
def get_links(url):
pge = Fetcher(url)
page.fetch()
for i, url in enumerate(page):
print "%d. %s" % (i, url)
def main():
opts, args = parse_options()
opts, args = parse_options()
url = args[0]
if opts.links:
getLinks(url)
raise SystemExit, 0
depth = opts.depth
sTime = time.time()
print "Crawling %s (Max Depth: %d)" % (url, depth)
crawler = Crawler(url, depth)
crawler.crawl()
print "\n".join(crawler.urls)
eTime = time.time()
tTime = eTime - sTime
print "Found: %d" % crawler.links
print "Followed: %d" % crawler.followed
print "Stats: (%d/s after %0.2fs)" % (
int(math.ceil(float(crawler.links) / tTime)), tTime)
if __name__=='__main__':
main()
| [
"sainadhreddy92@gmail.com"
] | sainadhreddy92@gmail.com |
86e2f5900c36d9b63245f8f3616dc0ba042bae03 | 935caaf659ecb2214800ec58fdd9f8b9fd4589a0 | /locating.py | f825ef89a6fccc485363f36a69708f9dbac4d0fb | [] | no_license | xiaosongshaokao/EI331 | 0df343fd28e1604b5938ee609956fbdd10e25653 | 8c95be572f108320c0cb96f22c457876845635e1 | refs/heads/master | 2023-01-13T13:34:08.374659 | 2020-11-30T09:21:42 | 2020-11-30T09:21:42 | 269,298,752 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,971 | py | import cv2
import matplotlib.pyplot as plt
watch_cascade = cv2.CascadeClassifier('cascade.xml')
def detectPlateRough(image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05):
if top_bottom_padding_rate>0.2:
print("error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate)
exit(1)
height = image_gray.shape[0]
padding = int(height*top_bottom_padding_rate)
scale = image_gray.shape[1]/float(image_gray.shape[0])
image = cv2.resize(image_gray, (int(scale*resize_h), resize_h))
image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]]
image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY)
watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40))
cropped_images = []
for (x, y, w, h) in watches:
#cv2.rectangle(image_color_cropped, (x, y), (x + w, y + h), (0, 0, 255), 1)
x -= w * 0.14
w += w * 0.28
y -= h * 0.15
h += h * 0.3
#cv2.rectangle(image_color_cropped, (int(x), int(y)), (int(x + w), int(y + h)), (0, 0, 255), 1)
cropped = cropImage(image_color_cropped, (int(x), int(y), int(w), int(h)))
cropped_images.append(cropped)
#cropped_images.append([cropped,[x, y+padding, w, h]])
cv2.imshow("imageShow", cropped)
cv2.waitKey(0)
return cropped_images
def cropImage(image,rect):
#cv2.imshow("imageShow", image)
#cv2.waitKey(0)
x, y, w, h = computeSafeRegion(image.shape,rect)
#cv2.imshow("imageShow", image[y:y+h,x:x+w])
cvt_img=cv2.cvtColor(image[y-30:y+h+30,x-30:x+w+30], cv2.COLOR_BGR2RGB)
plt.title('locating')
plt.imshow(cvt_img)
#cv2.waitKey(0)
#img=cv2.Canny(image[y:y+h,x:x+w],100,250)
#cv2.imshow('canny',img)
#cv2.waitKey(0)
return image[y-30:y+h+30,x-30:x+w+30] #做了修改
def computeSafeRegion(shape,bounding_rect):
top = bounding_rect[1] # y
bottom = bounding_rect[1] + bounding_rect[3] # y + h
left = bounding_rect[0] # x
right = bounding_rect[0] + bounding_rect[2] # x + w
min_top = 0
max_bottom = shape[0]
min_left = 0
max_right = shape[1]
#print(left,top,right,bottom)
#print(max_bottom,max_right)
if top < min_top:
top = min_top
if left < min_left:
left = min_left
if bottom > max_bottom:
bottom = max_bottom
if right > max_right:
right = max_right
return [left,top,right-left,bottom-top]
if __name__=='__main__':
image = cv2.imread('./carIdentityData/images/33.jpg' )
images = detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) | [
"noreply@github.com"
] | xiaosongshaokao.noreply@github.com |
fdd4557a894da9541447bb5b18bb62cde4332a64 | 0a67b90b959ed57681f9af49eb46183c05dee798 | /security/sec1.py | f1531ce02a6940b9fe47c5da4a6a9ae5310a3c94 | [] | no_license | dheerajchakkz27/AOEP | 29621316229e7a4e5c249f1adbf1539096034414 | c9a965eeb6473cf87062b28224cd44b4005551eb | refs/heads/main | 2023-06-25T08:56:26.691280 | 2021-07-19T06:06:12 | 2021-07-19T06:06:12 | 356,787,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,426 | py | import cv2
import numpy as np
import math
import pyaudio
import wave
import pickle
import os
import aubio
import keyboard
from scipy.io.wavfile import read
from IPython.display import Audio, display, clear_output
import time
import tensorflow as tf
from sklearn.externals import joblib
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
UpSampling2D,
ZeroPadding2D,
BatchNormalization
)
from tensorflow.keras.regularizers import l2
import wget
from main_functions import *
from face_detector import get_face_detector, find_faces
from face_landmarks import get_landmark_model, detect_marks, draw_marks
warning_count=0
def recognize():
# Voice Authentication
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 2
FILENAME = "./test.wav"
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
time.sleep(2.0)
#print("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
#print("finished recording")
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
# saving wav file
waveFile = wave.open(FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
modelpath = "./gmm_models/"
gmm_files = [os.path.join(modelpath,fname) for fname in
os.listdir(modelpath) if fname.endswith('.gmm')]
models = [pickle.load(open(fname,'rb')) for fname in gmm_files]
speakers = [fname.split("/")[-1].split(".gmm")[0] for fname
in gmm_files]
if len(models) == 0:
print("No Users in the Database!")
return
#read test file
sr,audio = read(FILENAME)
# extract mfcc features
vector = extract_features(audio,sr)
log_likelihood = np.zeros(len(models))
#checking with each model one by one
for i in range(len(models)):
gmm = models[i]
scores = np.array(gmm.score(vector))
log_likelihood[i] = scores.sum()
pred = np.argmax(log_likelihood)
identity = speakers[pred]
# if voice not recognized than terminate the process
if identity == 'unknown':
print("another persons voice detected")
warning_count+=1
return
print( " voice Recognized as - ", identity)
def calc_hist(img):
"""
To calculate histogram of an RGB image
Parameters
----------
img : Array of uint8
Image whose histogram is to be calculated
Returns
-------
histogram : np.array
The required histogram
"""
histogram = [0] * 3
for j in range(3):
histr = cv2.calcHist([img], [j], None, [256], [0, 256])
histr *= 255.0 / histr.max()
histogram[j] = histr
return np.array(histogram)
def load_darknet_weights(model, weights_file):
'''
Helper function used to load darknet weights.
:param model: Object of the Yolo v3 model
:param weights_file: Path to the file with Yolo V3 weights
'''
#Open the weights file
wf = open(weights_file, 'rb')
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
#Define names of the Yolo layers (just for a reference)
layers = ['yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
'yolo_conv_2',
'yolo_output_2']
for layer_name in layers:
sub_model = model.get_layer(layer_name)
for i, layer in enumerate(sub_model.layers):
if not layer.name.startswith('conv2d'):
continue
#Handles the special, custom Batch normalization layer
batch_norm = None
if i + 1 < len(sub_model.layers) and \
sub_model.layers[i + 1].name.startswith('batch_norm'):
batch_norm = sub_model.layers[i + 1]
filters = layer.filters
size = layer.kernel_size[0]
in_dim = layer.input_shape[-1]
if batch_norm is None:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
else:
# darknet [beta, gamma, mean, variance]
bn_weights = np.fromfile(
wf, dtype=np.float32, count=4 * filters)
# tf [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(
wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(
conv_shape).transpose([2, 3, 1, 0])
if batch_norm is None:
layer.set_weights([conv_weights, conv_bias])
else:
layer.set_weights([conv_weights])
batch_norm.set_weights(bn_weights)
assert len(wf.read()) == 0, 'failed to read all data'
wf.close()
def draw_outputs(img, outputs, class_names):
'''
Helper, util, function that draws predictons on the image.
:param img: Loaded image
:param outputs: YoloV3 predictions
:param class_names: list of all class names found in the dataset
'''
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, '{} {:.4f}'.format(
class_names[int(classes[i])], objectness[i]),
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, kernel_size, strides=1, batch_norm=True):
'''
Call this function to define a single Darknet convolutional layer
:param x: inputs
:param filters: number of filters in the convolutional layer
:param kernel_size: Size of kernel in the Conv layer
:param strides: Conv layer strides
:param batch_norm: Whether or not to use the custom batch norm layer.
'''
#Image padding
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = 'valid'
#Defining the Conv layer
x = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
'''
Call this function to define a single DarkNet Residual layer
:param x: inputs
:param filters: number of filters in each Conv layer.
'''
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
'''
Call this function to define a single DarkNet Block (made of multiple Residual layers)
:param x: inputs
:param filters: number of filters in each Residual layer
:param blocks: number of Residual layers in the block
'''
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
'''
The main function that creates the whole DarkNet.
'''
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2) # skip connection
x = x_36 = DarknetBlock(x, 256, 8) # skip connection
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def YoloConv(filters, name=None):
'''
Call this function to define the Yolo Conv layer.
:param flters: number of filters for the conv layer
:param name: name of the layer
'''
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
'''
This function defines outputs for the Yolo V3. (Creates output projections)
:param filters: number of filters for the conv layer
:param anchors: anchors
:param classes: list of classes in a dataset
:param name: name of the layer
'''
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
'''
Call this function to get bounding boxes from network predictions
:param pred: Yolo predictions
:param anchors: anchors
:param classes: List of classes from the dataset
'''
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
#Extract box coortinates from prediction vectors
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
#Normalize coortinates
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=100,
max_total_size=100,
iou_threshold=0.5,
score_threshold=0.6
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80):
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
"""Return the 3D points present as 2D for making annotation box"""
point_3d = []
dist_coeffs = np.zeros((4,1))
rear_size = val[0]
rear_depth = val[1]
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = val[2]
front_depth = val[3]
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d img points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
camera_matrix,
dist_coeffs)
point_2d = np.int32(point_2d.reshape(-1, 2))
return point_2d
def draw_annotation_box(img, rotation_vector, translation_vector, camera_matrix,
rear_size=300, rear_depth=0, front_size=500, front_depth=400,
color=(255, 255, 0), line_width=2):
"""
Draw a 3D anotation box on the face for head pose estimation
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
rear_size : int, optional
Size of rear box. The default is 300.
rear_depth : int, optional
The default is 0.
front_size : int, optional
Size of front box. The default is 500.
front_depth : int, optional
Front depth. The default is 400.
color : tuple, optional
The color with which to draw annotation box. The default is (255, 255, 0).
line_width : int, optional
line width of lines drawn. The default is 2.
Returns
-------
None.
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size*2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
# # Draw all the lines
cv2.polylines(img, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
def head_pose_points(img, rotation_vector, translation_vector, camera_matrix):
"""
Get the points to estimate head pose sideways
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
Returns
-------
(x, y) : tuple
Coordinates of line to estimate head pose
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size*2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
y = (point_2d[5] + point_2d[8])//2
x = point_2d[2]
return (x, y)
def weights_download(out='models/yolov3.weights'):
_ = wget.download('https://pjreddie.com/media/files/yolov3.weights', out='models/yolov3.weights')
# weights_download() # to download weights
yolo = YoloV3()
load_darknet_weights(yolo, 'models/yolov3.weights')
clf = joblib.load('models/face_spoofing.pkl')
face_model = get_face_detector()
landmark_model = get_landmark_model()
cap = cv2.VideoCapture(0)
ret, imagereal = cap.read()
size = imagereal.shape
font = cv2.FONT_HERSHEY_SIMPLEX
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
d_outer = [0]*5
inner_points = [[61, 67], [62, 66], [63, 65]]
d_inner = [0]*3
sample_number = 1
count = 0
measures = np.zeros(sample_number, dtype=np.float)
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
faces = find_faces(imagereal, face_model)
for face in faces:
marks = detect_marks(imagereal, landmark_model, face)
for i in range(100):
for i, (p1, p2) in enumerate(outer_points):
d_outer[i] += marks[p2][1] - marks[p1][1]
for i, (p1, p2) in enumerate(inner_points):
d_inner[i] += marks[p2][1] - marks[p1][1]
d_outer[:] = [x / 100 for x in d_outer]
d_inner[:] = [x / 100 for x in d_inner]
tc=0 #facespoofcount
mouth_open=0
speeking_count=0
speech_checking=0
BUFFER_SIZE = 2048
CHANNELS = 1
FORMAT = pyaudio.paFloat32
METHOD = "default"
SAMPLE_RATE = 44100
HOP_SIZE = BUFFER_SIZE//2
PERIOD_SIZE_IN_FRAME = HOP_SIZE
pA = pyaudio.PyAudio()
# Open the microphone stream.
mic = pA.open(format=FORMAT, channels=CHANNELS ,rate=SAMPLE_RATE, input=True,frames_per_buffer=PERIOD_SIZE_IN_FRAME)
# Initiating o's pitch detection object.
pDetection = aubio.pitch(METHOD, BUFFER_SIZE,HOP_SIZE, SAMPLE_RATE)
# Set unit.
pDetection.set_unit("Hz")
# Frequency under -40 dB will considered
# as a silence.
pDetection.set_silence(-40)
while True:
ret, imagereal = cap.read()
if keyboard.is_pressed('1'):
print('eye is moving up')
if ret == True:
data = mic.read(PERIOD_SIZE_IN_FRAME)
# Convert into number that Aubio understand.
samples = np.fromstring(data,dtype=aubio.float_type)
# Finally get the pitch.
pitch = pDetection(samples)[0]
# Compute the energy (volume)
# of the current frame.
volume = np.sum(samples**2)/len(samples)
#print(int(volume*1000))
if int(volume*1000)>2:
recognize()
faces = find_faces(imagereal, face_model)
measures[count%sample_number]=0
height, width = imagereal.shape[:2]
for x, y, x1, y1 in faces:
roi = imagereal[y:y1, x:x1]
point = (0,0)
img_ycrcb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)
img_luv = cv2.cvtColor(roi, cv2.COLOR_BGR2LUV)
ycrcb_hist = calc_hist(img_ycrcb)
luv_hist = calc_hist(img_luv)
# feature_vector = np.append(ycrcb_hist.ravel(), luv_hist.ravel())
# feature_vector = feature_vector.reshape(1, len(feature_vector))
# prediction = clf.predict_proba(feature_vector)
# prob = prediction[0][1]
# measures[count % sample_number] = prob
# #cv2.rectangle(imagereal, (x, y), (x1, y1), (255, 0, 0), 2)
# point = (x, y-5)
# # print (measures, np.mean(measures))
# if 0 not in measures:
# text = "True"
# if np.mean(measures) >= 0.7:
# text = "False"
# if tc==0:
# start=time.time()
# tc=1
# font = cv2.FONT_HERSHEY_SIMPLEX
# #cv2.putText(img=imagereal, text=text, org=point, fontFace=font, fontScale=0.9, color=(0, 0, 255),thickness=2, lineType=cv2.LINE_AA)
# else:
# if tc==1:
# stop=time.time()
# if stop-start >13:
# warning_count+=1
# print('face spoofing detected')
# tc=0
# font = cv2.FONT_HERSHEY_SIMPLEX
# #cv2.putText(img=imagereal, text=text, org=point, fontFace=font, fontScale=0.9,color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
count+=1
#cv2.imshow('img_rgb', imagereal)
for face in faces:
marks = detect_marks(imagereal, landmark_model, face)
cnt_outer = 0
cnt_inner = 0
#draw_marks(imagereal, marks[48:])
# for i, (p1, p2) in enumerate(outer_points):
# if d_outer[i] + 3 < marks[p2][1] - marks[p1][1]:
# cnt_outer += 1
# for i, (p1, p2) in enumerate(inner_points):
# if d_inner[i] + 2 < marks[p2][1] - marks[p1][1]:
# cnt_inner += 1
# if cnt_outer > 3 and cnt_inner > 2:
# print('Mouth open')
#cv2.putText(img, 'Mouth open', (30, 30), font,1, (0, 255, 255), 2)
# mark_detector.draw_marks(img, marks, color=(0, 255, 0))
image_points = np.array([
marks[30], # Nose tip
marks[8], # Chin
marks[36], # Left eye left corner
marks[45], # Right eye right corne
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
#for p in image_points:
#cv2.circle(imagereal, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
x1, x2 = head_pose_points(imagereal, rotation_vector, translation_vector, camera_matrix)
#cv2.line(imagereal, p1, p2, (0, 255, 255), 2)
#cv2.line(imagereal, tuple(x1), tuple(x2), (255, 255, 0), 2)
# for (x, y) in marks:
# cv2.circle(img, (x, y), 4, (255, 255, 0), -1)
# cv2.putText(img, str(p1), p1, font, 1, (0, 255, 255), 1)
try:
m = (p2[1] - p1[1])/(p2[0] - p1[0])
ang1 = int(math.degrees(math.atan(m)))
except:
ang1 = 90
try:
m = (x2[1] - x1[1])/(x2[0] - x1[0])
ang2 = int(math.degrees(math.atan(-1/m)))
except:
ang2 = 90
# print('div by zero error')
if ang1 >= 50:
print('Head down')
warning_count+=1
break
#cv2.putText(imagereal, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
elif ang1 <= -48:
print('Head up')
warning_count+=1
break
#cv2.putText(imagereal, 'Head up', (30, 30), font, 2, (255, 255, 128), 3)
if ang2 >= 40:
print('Head right')
warning_count+=1
break
#cv2.putText(imagereal, 'Head right', (90, 30), font, 2, (255, 255, 128), 3)
elif ang2 <= -54:
print('Head left')
warning_count+=1
break
#cv2.putText(imagereal, 'Head left', (90, 30), font, 2, (255, 255, 128), 3)
#cv2.putText(imagereal, str(ang1), tuple(p1), font, 2, (128, 255, 255), 3)
#cv2.putText(imagereal, str(ang2), tuple(x1), font, 2, (255, 255, 128), 3)
for i, (p1, p2) in enumerate(outer_points):
if d_outer[i] + 3 < marks[p2][1] - marks[p1][1]:
cnt_outer += 1
for i, (p1, p2) in enumerate(inner_points):
if d_inner[i] + 2 < marks[p2][1] - marks[p1][1]:
cnt_inner += 1
#if int(volume*1000)>1:
if cnt_outer > 3 and cnt_inner > 2 and speech_checking == 0:
mstart=time.time()
speeking_count=1
mouth_open=1
speech_checking=1
if mouth_open == 1 and speech_checking ==1 and (cnt_outer <=3 or cnt_inner <=2) :
mouth_open=0
speeking_count+=1
if mouth_open == 0 and cnt_outer >3 and cnt_inner >2 and speech_checking==1:
mouth_open=1
speeking_count+=1
if speech_checking==1 and time.time()-mstart > 5 :
mstop=time.time()
#print("count is",speeking_count)
if speeking_count > 1:
print('speeching detected')
recognize()
warning_count+=1
speech_checking=0
speeking_count=0
cv2.imshow('img', imagereal)
img = cv2.cvtColor(imagereal, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (320, 320))
img = img.astype(np.float32)
img = np.expand_dims(img, 0)
img = img / 255
class_names = [c.strip() for c in open("models/classes.TXT").readlines()]
boxes, scores, classes, nums = yolo(img)
count=0
for i in range(nums[0]):
if int(classes[0][i] == 0):
count +=1
if int(classes[0][i] == 67):
print('Mobile Phone detected')
warning_count+=1
if count == 0:
print('No person detected')
warning_count+=1
elif count > 1:
print('More than one person detected')
warning_count+=1
#imagereal = draw_outputs(imagereal, (boxes, scores, classes, nums), class_names)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Total Warning Count is",warning_count)
break
else:
break
cv2.destroyAllWindows()
cap.release()
| [
"dheerajchakkz27@gmail.com"
] | dheerajchakkz27@gmail.com |
ffbba23a3c4c45c2d06645337aa75f9d54d24f4c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_243/ch161_2020_06_15_19_33_27_198209.py | f921b1b82956792ae479cd3fccf38b2e9021b5f4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def PiWallis(num):
numerador=1
denominador=2
i=0
multi = 1
while i < num:
multi *= numerador/denominador
if i%2 == 0:
denominador += 2
else:
numerador += 2
i+=1
return multi | [
"you@example.com"
] | you@example.com |
17443d48e14b9c51e3399739df9833c81a42bef8 | 886436fe7993aa2913e339ebe70b0eddfacac44c | /build/lib/armin/api/share/utils.py | e68eddb20a572579f23515d616640d6bb6bc3c91 | [] | no_license | singajeet/armin | 581793cac1ac3b1ab638d274b356965ee5d76750 | 99f61a0ce0f2d5c587002ddf8d2843e83d9538d3 | refs/heads/master | 2021-04-28T07:15:42.509397 | 2018-03-19T17:30:09 | 2018-03-19T17:30:09 | 122,219,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | """
.. module:: source_driver
:platform: Unix, Windows
:synopsis: A default implementation of source system driver
"""
from typing import Type, Dict, Any
import pathlib
from armin.api.share.constants import N, F, V
from tinydb import TinyDB, Query
def get_meta_table(meta_repo_details:Type[Dict]):
"""Returns the table from meta repo based on details passed as args
"""
__db_path = meta_repo_details[N.DB_URI]
if __db_path.find('~') >= 0:
__db_path = pathlib.Path(__db_path).expanduser()
else:
__db_path = pathlib.Path(__db_path).absolute()
__meta_db = TinyDB(__db_path)
if __meta_db is None:
return (F.FAILED, 'Unable to create instance of TinyDB')
__source_sys_meta_table = __meta_db\
.table(meta_repo_details[N.META_TABLE])
if __source_sys_meta_table is None:
return (F.FAILED, 'Inconsistent meta repo. Can not find source\
system details table - %s' % meta_repo_details[N.META_TABLE])
else:
return (F.SUCCESS, __source_sys_meta_table)
def connect_to_meta(meta_repo_details:Type[Dict], name:str) -> (Type[F], Any):
"""Connect to metadata database using the details provided asparameters in the constructor
Args:
meta_repo_details (Dict): Repository details for making connection and query
name (str): Name of the item that needs to be queried
Returns:
status (Tuple): Returns flag Success or Failed and details in case of failure and table record in case of success
"""
__record = None
(status, result_obj) = get_meta_table(meta_repo_details)
if status == F.SUCCESS:
__source_sys_meta_table = result_obj
__record = __source_sys_meta_table\
.get(Query()[N.NAME] == name)
else:
return (status, result_obj)
if __record is not None:
return (F.SUCCESS, __record)
return (F.FAILED, 'Record not found in meta repo')
| [
"singajeet@gmail.com"
] | singajeet@gmail.com |
80116b3f1732361ea1e9baa559ea412dc3968bd1 | 7f26860ac9c383508a66a2b936b67579b462a6f5 | /14_natural_language_processing/imdb_data_preparation.py | ba509803852b8f8dc61e5a14dfd6ace4c51e9648 | [] | no_license | AndriiLatysh/ml_4 | 5de8f4fcfc35e4577c96bc9a2cb2671895b57e0a | 643be0ed479808b38a1e4cd781f4978d9aaaeb57 | refs/heads/master | 2023-02-15T01:32:52.666101 | 2021-01-06T20:33:44 | 2021-01-06T20:33:44 | 292,947,269 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | import pandas as pd
import numpy as np
import re
import string
import nltk
imdb_reviews = pd.read_csv("data/IMDB Dataset.csv")
# imdb_reviews_count = imdb_reviews.groupby(by="sentiment").count()
# print(imdb_reviews_count)
N = len(imdb_reviews)
X = imdb_reviews["review"].iloc[:N]
y = imdb_reviews["sentiment"].iloc[:N]
y.replace({"positive": 1, "negative": 0}, inplace=True)
X = np.array(X)
# stemmer = nltk.PorterStemmer()
# stemmer = nltk.LancasterStemmer()
lemmatizer = nltk.WordNetLemmatizer()
for x_row in range(len(X)):
X[x_row] = re.sub("<.*?>", " ", X[x_row])
X[x_row] = X[x_row].lower()
X[x_row] = X[x_row].translate(str.maketrans("", "", string.punctuation))
X[x_row] = nltk.word_tokenize(X[x_row])
# X[x_row] = [stemmer.stem(word) for word in X[x_row]]
X[x_row] = [lemmatizer.lemmatize(word) for word in X[x_row]]
X[x_row] = " ".join(X[x_row])
if (x_row + 1) % 100 == 0:
print("{}/{} reviews prepared.".format(x_row+1, len(X)))
else:
print("Preparation finished.")
# print(X[3])
imdb_reviews["review"] = X
imdb_reviews["sentiment"] = y
imdb_reviews.to_csv("data/imdb_dataset_prepared.csv", index=False)
| [
"krulkevych@gmail.com"
] | krulkevych@gmail.com |
44ad04a59f6f8b2df27bfda02eaab12a2aa8d256 | 06a045819cf99c7059afde40dca12cf9d3eb5f81 | /pandas/tests/indexing/test_at.py | 01315647c464b7573433bf36515371ffed05e411 | [
"BSD-3-Clause"
] | permissive | MarcoGorelli/pandas | b9882c6ac1e4bc753819b7bc7c8b567964efd275 | 86a4ee01c7899ef454d35b95cde11e9593921c9d | refs/heads/main | 2023-08-22T12:35:45.122152 | 2023-05-04T22:11:07 | 2023-05-04T22:11:07 | 164,618,359 | 4 | 1 | BSD-3-Clause | 2023-05-05T09:02:23 | 2019-01-08T09:55:54 | Python | UTF-8 | Python | false | false | 7,983 | py | from datetime import (
datetime,
timezone,
)
import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
DatetimeIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
def test_at_timezone():
# https://github.com/pandas-dev/pandas/issues/33544
result = DataFrame({"foo": [datetime(2000, 1, 1)]})
result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc)
expected = DataFrame(
{"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object
)
tm.assert_frame_equal(result, expected)
def test_selection_methods_of_assigned_col():
# GH 29282
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
df2 = DataFrame(data={"c": [7, 8, 9]}, index=[2, 1, 0])
df["c"] = df2["c"]
df.at[1, "c"] = 11
result = df
expected = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
result = df.at[1, "c"]
assert result == 11
result = df["c"]
expected = Series([9, 11, 7], name="c")
tm.assert_series_equal(result, expected)
result = df[["c"]]
expected = DataFrame({"c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
class TestAtSetItem:
def test_at_setitem_item_cache_cleared(self):
# GH#22372 Note the multi-step construction is necessary to trigger
# the original bug. pandas/issues/22372#issuecomment-413345309
df = DataFrame(index=[0])
df["x"] = 1
df["cost"] = 2
# accessing df["cost"] adds "cost" to the _item_cache
df["cost"]
# This loc[[0]] lookup used to call _consolidate_inplace at the
# BlockManager level, which failed to clear the _item_cache
df.loc[[0]]
df.at[0, "x"] = 4
df.at[0, "cost"] = 789
expected = DataFrame({"x": [4], "cost": 789}, index=[0])
tm.assert_frame_equal(df, expected)
# And in particular, check that the _item_cache has updated correctly.
tm.assert_series_equal(df["cost"], expected["cost"])
def test_at_setitem_mixed_index_assignment(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
ser.at["a"] = 11
assert ser.iat[0] == 11
ser.at[1] = 22
assert ser.iat[3] == 22
def test_at_setitem_categorical_missing(self):
df = DataFrame(
index=range(3), columns=range(3), dtype=CategoricalDtype(["foo", "bar"])
)
df.at[1, 1] = "foo"
expected = DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, "foo", np.nan],
[np.nan, np.nan, np.nan],
],
dtype=CategoricalDtype(["foo", "bar"]),
)
tm.assert_frame_equal(df, expected)
def test_at_setitem_multiindex(self):
df = DataFrame(
np.zeros((3, 2), dtype="int64"),
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
df.at[0, "a"] = 10
expected = DataFrame(
[[10, 10], [0, 0], [0, 0]],
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("row", (Timestamp("2019-01-01"), "2019-01-01"))
def test_at_datetime_index(self, row):
# Set float64 dtype to avoid upcast when setting .5
df = DataFrame(
data=[[1] * 2], index=DatetimeIndex(data=["2019-01-01", "2019-01-02"])
).astype({0: "float64"})
expected = DataFrame(
data=[[0.5, 1], [1.0, 1]],
index=DatetimeIndex(data=["2019-01-01", "2019-01-02"]),
)
df.at[row, 0] = 0.5
tm.assert_frame_equal(df, expected)
class TestAtSetItemWithExpansion:
def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture):
# GH#25506
ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture)
result = Series(ts)
result.at[1] = ts
expected = Series([ts, ts])
tm.assert_series_equal(result, expected)
class TestAtWithDuplicates:
def test_at_with_duplicate_axes_requires_scalar_lookup(self):
# GH#33041 check that falling back to loc doesn't allow non-scalar
# args to slip in
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
msg = "Invalid call for scalar access"
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]]
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]]
with pytest.raises(ValueError, match=msg):
df.at[:, "A"]
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]] = 1
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]] = 1
with pytest.raises(ValueError, match=msg):
df.at[:, "A"] = 1
class TestAtErrors:
# TODO: De-duplicate/parametrize
# test_at_series_raises_key_error2, test_at_frame_raises_key_error2
def test_at_series_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=[3, 2, 1])
result = indexer_al(ser)[1]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(ser)["a"]
def test_at_frame_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = indexer_al(df)[1, 0]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(df)["a", 0]
with pytest.raises(KeyError, match="a"):
indexer_al(df)[1, "a"]
def test_at_series_raises_key_error2(self, indexer_al):
# at should not fallback
# GH#7814
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=list("abc"))
result = indexer_al(ser)["a"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(ser)[0]
def test_at_frame_raises_key_error2(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = indexer_al(df)["a", "A"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(df)["a", 0]
def test_at_frame_multiple_columns(self):
# GH#48296 - at shouldn't modify multiple columns
df = DataFrame({"a": [1, 2], "b": [3, 4]})
new_row = [6, 7]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at[5] = new_row
def test_at_getitem_mixed_index_no_fallback(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
with pytest.raises(KeyError, match="^0$"):
ser.at[0]
with pytest.raises(KeyError, match="^4$"):
ser.at[4]
def test_at_categorical_integers(self):
# CategoricalIndex with integer categories that don't happen to match
# the Categorical's codes
ci = CategoricalIndex([3, 4])
arr = np.arange(4).reshape(2, 2)
frame = DataFrame(arr, index=ci)
for df in [frame, frame.T]:
for key in [0, 1]:
with pytest.raises(KeyError, match=str(key)):
df.at[key, key]
def test_at_applied_for_rows(self):
# GH#48729 .at should raise InvalidIndexError when assigning rows
df = DataFrame(index=["a"], columns=["col1", "col2"])
new_row = [123, 15]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at["a"] = new_row
| [
"noreply@github.com"
] | MarcoGorelli.noreply@github.com |
ff9246c76b9cd0425b56d9757496e4256b8b3b1e | 0f3b6f1788df3feadd79df39e0bd930874572e01 | /Computer Graphics OpenGL/Lab1.py | 465a01f5443074bc54acbc4509a118484501a8dd | [] | no_license | ROOOOO/Study | 978c0c81cdc75573ac1480da2427bcf0e23e12ae | caf5636f8c12979ba6aa9941e6293f3ad8cfc3a7 | refs/heads/master | 2021-01-10T16:10:50.001231 | 2016-03-18T18:27:23 | 2016-03-18T18:27:23 | 54,218,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import glfw
import OpenGL
from OpenGL.GL import *
delta = 0.1
angle = 0.0
def main():
global delta
global angle
# Initialize the library
if not glfw.init():
return
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(640, 640, "Lab1", None, None)
if not window:
glfw.terminate()
return
# Make the window's context current
glfw.make_context_current(window)
glfw.set_key_callback(window, key_callback)
# Loop until the user closes the window
while not glfw.window_should_close(window):
# Render here, e.g. using pyOpenGL
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glClearColor(1.0, 1.0, 1.0, 1.0)
glPushMatrix()
glRotatef(angle, 0, 0, 1)
glBegin(GL_QUADS)
glColor3f(0.0,0.0,0.0)
glVertex3f( 0.5, 0.5, 0.0)
glColor3f(1.0,0.0,0.0)
glVertex3f(-0.5, 0.5, 0.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-0.5, -0.5, 0.0)
glColor3f(1.0,0.0,1.0)
glVertex3f( 0.5, -0.5, 0.0)
glEnd()
glPopMatrix()
angle += delta
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
glfw.destroy_window(window)
glfw.terminate()
def key_callback(window, key, scancode, action, mods):
global delta
global angle
if action == glfw.PRESS:
if key == 262:
delta = -0.1
angle = angle%360
if key == 263:
delta = 0.1
angle = angle%360
main() | [
"Roman@MacBook-Air-Roman.local"
] | Roman@MacBook-Air-Roman.local |
fecdbdc10272d81600d3f4dcf617b867c2191dd9 | 4033d6ac5ce12e635db8f0b8faf78099573f8fa3 | /add_audio_video.py | 2e057c85384accf6a8d8f10be0cdfba79a78241e | [
"MIT"
] | permissive | rohitmadrileno15/Geronimo | d78fd0d9f88e597c1cf52f0c47e4df5c3375dd4c | bf90125277e56896c50c96f4e1e92641a6dcdaec | refs/heads/master | 2022-11-19T06:45:59.616987 | 2020-07-24T07:20:06 | 2020-07-24T07:20:06 | 281,310,163 | 0 | 0 | null | 2020-07-21T06:11:52 | 2020-07-21T06:05:26 | null | UTF-8 | Python | false | false | 797 | py | import moviepy.editor as mpe
import os
from mutagen.mp3 import MP3
def add_audio_and_video(video_ip , audio_ip , video_op):
clip = mpe.VideoFileClip(video_ip)
audio_bg = mpe.AudioFileClip(audio_ip)
audio = MP3(audio_ip)
audio_info = audio.info
length_in_secs = int(audio_info.length)
final_clip = clip.set_audio(audio_bg)
print("set Audio")
final_clip.write_videofile(video_op)
clip1 = mpe.VideoFileClip(video_op)
print("duration ", clip1.duration)
final_clip1 = clip1.cutout(length_in_secs , int(clip1.duration))
final_clip1.write_videofile(video_op)
#removing the original input file
os.remove(video_ip)
os.remove(audio_ip)
return f"Video Available on {video_op}"
| [
"noreply@github.com"
] | rohitmadrileno15.noreply@github.com |
6f343a820ccf355ca8b580949b578755f591cfcf | 289d2fc51d9e0504e07145bb0294326fdb15b82f | /ResearchProject/evaluation/computeF1.py | 8aa8cb7ab503d9ea781240fc8b329c8a8b4b8842 | [] | no_license | nishant4498/CrossLanguageClassifier | 7d0de75f9ca3f9b6bf4955bf16011a34166168a2 | fef5483cff569ebf053e3b2bada0f9f40e05ff73 | refs/heads/master | 2020-12-24T08:00:02.296448 | 2016-09-02T03:26:37 | 2016-09-02T03:26:37 | 56,204,112 | 1 | 3 | null | 2016-04-30T05:11:32 | 2016-04-14T03:22:40 | Python | UTF-8 | Python | false | false | 1,263 | py | '''
Created on Apr 13, 2016
@author: dell
'''
from sklearn.metrics import f1_score
def calculateScore(path):
y_true = []
y_pred = []
with open(path,'r') as f:
for line in f:
#print line
line = line.replace("\n", "")
classes = line.split(",") #comma separated line with filePath, annotated class and classified class
#print classes
y_true.append(classes[1])
y_pred.append(classes[2])
#what does micro, macro, weighted signify?
print "Calculate metrics globally by counting the total true positives, false negatives and false positives\nF1 score="
print f1_score(y_true, y_pred, average='micro')
'''
print "average='macro'"
print f1_score(y_true, y_pred, average='macro')
print "average='weighted'"
print f1_score(y_true, y_pred, average='weighted')
print "average=None"
print f1_score(y_true, y_pred, average=None)
'''
filePath = "../output/Classified_Marathi_News_NB.txt"
print "F1 score for Naive Bayes classification"
calculateScore(filePath)
filePath = "../output/Classified_Marathi_News_SVM.txt"
print "F1 score for SVM classification"
calculateScore(filePath) | [
"degaonka@usc.edu"
] | degaonka@usc.edu |
8ecd30a91ded3bea1b6a3c9bac2c881216e78dd3 | 1228450b54dcafbc8fcdb6c99c2ab007871032a3 | /ecnet/data.py | fdf2b57e3afeeafcb86eeca9654c326300088a85 | [
"BSD-3-Clause"
] | permissive | nnguyen19/ECNet | 41752847b72120587a074fe6632618720ff390c2 | 27fcdd04c5bf2e36361608c53bc859821045ad09 | refs/heads/main | 2023-05-03T18:58:03.050296 | 2021-05-24T21:35:26 | 2021-05-24T21:35:26 | 369,907,994 | 0 | 0 | BSD-3-Clause | 2021-05-22T21:22:01 | 2021-05-22T21:22:00 | null | UTF-8 | Python | false | false | 10,563 | py | import numpy as np
import pandas as pd
from Bio import SeqIO
import torch.utils.data
from sklearn.model_selection import KFold, ShuffleSplit
from ecnet import vocab
from ecnet.local_feature import CCMPredEncoder
from ecnet.global_feature import TAPEEncoder
class SequenceData(torch.utils.data.Dataset):
def __init__(self, sequences, labels):
self.sequences = sequences
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return self.sequences[index], self.labels[index]
class MetagenesisData(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def index_encoding(sequences):
'''
Modified from https://github.com/openvax/mhcflurry/blob/master/mhcflurry/amino_acid.py#L110-L130
Parameters
----------
sequences: list of equal-length sequences
Returns
-------
np.array with shape (#sequences, length of sequences)
'''
df = pd.DataFrame(iter(s) for s in sequences)
encoding = df.replace(vocab.AMINO_ACID_INDEX)
encoding = encoding.values.astype(np.int)
return encoding
class Dataset(object):
def __init__(self,
train_tsv=None, test_tsv=None,
fasta=None, ccmpred_output=None,
use_loc_feat=True, use_glob_feat=True,
split_ratio=[0.9, 0.1],
random_seed=42):
"""
split_ratio: [train, valid] or [train, valid, test]
"""
self.train_tsv = train_tsv
self.test_tsv = test_tsv
self.fasta = fasta
self.use_loc_feat = use_loc_feat
self.use_glob_feat = use_glob_feat
self.split_ratio = split_ratio
self.rng = np.random.RandomState(random_seed)
self.native_sequence = self._read_native_sequence()
self.full_df = self._read_mutation_df(train_tsv)
if test_tsv is None:
assert len(split_ratio) == 3, \
'split_ratio should have 3 elements if test_tsv is None'
self.train_df, self.valid_df, self.test_df = \
self._split_dataset_df(self.full_df, split_ratio)
else:
assert len(split_ratio) == 2
'split_ratio should have 2 elements if test_tsv is provided'
self.test_df = self._read_mutation_df(test_tsv)
self.train_df, self.valid_df, _ = \
self._split_dataset_df(self.full_df, split_ratio)
self.train_valid_df = pd.concat(
[self.train_df, self.valid_df]).reset_index(drop=True)
self.ccmpred_encoder = CCMPredEncoder(
ccmpred_output=ccmpred_output, seq_len=len(self.native_sequence))
self.tape_encoder = TAPEEncoder()
def _read_native_sequence(self):
fasta = SeqIO.read(self.fasta, 'fasta')
native_sequence = str(fasta.seq)
return native_sequence
def _check_split_ratio(self, split_ratio):
"""
Modified from: https://github.com/pytorch/text/blob/3d28b1b7c1fb2ddac4adc771207318b0a0f4e4f9/torchtext/data/dataset.py#L284-L311
"""
test_ratio = 0.
if isinstance(split_ratio, float):
assert 0. < split_ratio < 1., (
"Split ratio {} not between 0 and 1".format(split_ratio))
valid_ratio = 1. - split_ratio
return (split_ratio, valid_ratio, test_ratio)
elif isinstance(split_ratio, list):
length = len(split_ratio)
assert length == 2 or length == 3, (
"Length of split ratio list should be 2 or 3, got {}".format(split_ratio))
ratio_sum = sum(split_ratio)
if not ratio_sum == 1.:
split_ratio = [float(ratio) / ratio_sum for ratio in split_ratio]
if length == 2:
return tuple(split_ratio + [test_ratio])
return tuple(split_ratio)
else:
raise ValueError('Split ratio must be float or a list, got {}'
.format(type(split_ratio)))
def _split_dataset_df(self, input_df, split_ratio, resample_split=False):
"""
Modified from:
https://github.com/pytorch/text/blob/3d28b1b7c1fb2ddac4adc771207318b0a0f4e4f9/torchtext/data/dataset.py#L86-L136
"""
_rng = self.rng.randint(512) if resample_split else self.rng
df = input_df.copy()
df = df.sample(frac=1, random_state=_rng).reset_index(drop=True)
N = len(df)
train_ratio, valid_ratio, test_ratio = self._check_split_ratio(split_ratio)
train_len = int(round(train_ratio * N))
valid_len = N - train_len if not test_ratio else int(round(valid_ratio * N))
train_df = df.iloc[:train_len].reset_index(drop=True)
valid_df = df.iloc[train_len:train_len + valid_len].reset_index(drop=True)
test_df = df.iloc[train_len + valid_len:].reset_index(drop=True)
return train_df, valid_df, test_df
def _mutation_to_sequence(self, mutation):
'''
Parameters
----------
mutation: ';'.join(WiM) (wide-type W at position i mutated to M)
'''
sequence = self.native_sequence
for mut in mutation.split(';'):
wt_aa = mut[0]
mt_aa = mut[-1]
pos = int(mut[1:-1])
assert wt_aa == sequence[pos - 1],\
"%s: %s->%s (fasta WT: %s)"%(pos, wt_aa, mt_aa, sequence[pos - 1])
sequence = sequence[:(pos - 1)] + mt_aa + sequence[pos:]
return sequence
def _mutations_to_sequences(self, mutations):
return [self._mutation_to_sequence(m) for m in mutations]
def _drop_invalid_mutation(self, df):
'''
Drop mutations WiM where
- W is incosistent with the i-th AA in native_sequence
- M is ambiguous, e.g., 'X'
'''
flags = []
for mutation in df['mutation'].values:
for mut in mutation.split(';'):
wt_aa = mut[0]
mt_aa = mut[-1]
pos = int(mut[1:-1])
valid = True if wt_aa == self.native_sequence[pos - 1] else False
valid = valid and (mt_aa not in ['X'])
flags.append(valid)
df = df[flags].reset_index(drop=True)
return df
def _read_mutation_df(self, tsv):
df = pd.read_table(tsv)
df = self._drop_invalid_mutation(df)
df['sequence'] = self._mutations_to_sequences(df['mutation'].values)
return df
def encode_seq_enc(self, sequences):
seq_enc = index_encoding(sequences)
seq_enc = torch.from_numpy(seq_enc.astype(np.int))
return seq_enc
def encode_loc_feat(self, sequences):
feat = self.ccmpred_encoder.encode(sequences)
feat = torch.from_numpy(feat).float()
return feat
def encode_glob_feat(self, sequences):
feat = self.tape_encoder.encode(sequences)
feat = torch.from_numpy(feat).float()
return feat
def build_data(self, mode, return_df=False):
if mode == 'train':
df = self.train_df.copy()
elif mode == 'valid':
df = self.valid_df.copy()
elif mode == 'test':
df = self.test_df.copy()
else:
raise NotImplementedError
sequences = df['sequence'].values
seq_enc = self.encode_seq_enc(sequences)
if self.use_loc_feat:
loc_feat = self.encode_loc_feat(sequences)
if self.use_glob_feat:
glob_feat = self.encode_glob_feat(sequences)
labels = df['score'].values
labels = torch.from_numpy(labels.astype(np.float32))
samples = []
for i in range(len(df)):
sample = {
'sequence':sequences[i],
'label':labels[i],
'seq_enc': seq_enc[i],
}
if self.use_loc_feat:
sample['loc_feat'] = loc_feat[i]
if self.use_glob_feat:
sample['glob_feat'] = glob_feat[i]
samples.append(sample)
data = MetagenesisData(samples)
if return_df:
return data, df
else:
return data
def get_dataloader(self, mode, batch_size=128,
return_df=False, resample_train_valid=False):
if resample_train_valid:
self.train_df, self.valid_df, _ = \
self._split_dataset_df(
self.train_valid_df, self.split_ratio[:2], resample_split=True)
if mode == 'train_valid':
train_data, train_df = self.build_data('train', return_df=True)
valid_data, valid_df = self.build_data('valid', return_df=True)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size)
if return_df:
return (train_loader, train_df), (valid_loader, valid_df)
else:
return train_loader, valid_loader
elif mode == 'test':
test_data, test_df = self.build_data('test', return_df=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
if return_df:
return test_loader, test_df
else:
return test_loader
else:
raise NotImplementedError
if __name__ == '__main__':
protein_name = 'gb1'
dataset_name = 'Envision_Gray2018'
dataset = Dataset(
train_tsv=f'../../output/mutagenesis/{dataset_name}/{protein_name}/data.tsv',
fasta=f'../../output/mutagenesis/{dataset_name}/{protein_name}/native_sequence.fasta',
ccmpred_output=f'../../output/homologous/{dataset_name}/{protein_name}/hhblits/ccmpred/{protein_name}.braw',
split_ratio=[0.7, 0.1, 0.2],
use_loc_feat=False, use_glob_feat=False,
)
# dataset.build_data('train')
(loader, df), (_, _) = dataset.get_dataloader('train_valid',
batch_size=32, return_df=True)
print(df.head())
print(len(loader.__iter__()))
(loader, df), (_, _) = dataset.get_dataloader('train_valid',
batch_size=32, return_df=True, resample_train_valid=True)
print(df.head())
print(len(loader.__iter__()))
loader, df = dataset.get_dataloader('test',
batch_size=32, return_df=True, resample_train_valid=True)
print(next(loader.__iter__())) | [
"luoyunan@gmail.com"
] | luoyunan@gmail.com |
1e6636fb55f18e39d3f3b40e7d40897b910101f7 | fd4de865ac652b6b1760a471b690fe7103a3572e | /hanoi_4.py | c022708569cfe4c6227beff0f5b8424aec99b275 | [] | no_license | Sheriff-AA/7COM1034-AI | 17d9e93b96d7ec3ecf4ab9ac69bb2a3424a80832 | c47e5264fd2e340f7cb90417f4a8dc7ec55647bf | refs/heads/master | 2023-04-05T00:53:35.980077 | 2021-04-09T09:18:38 | 2021-04-09T09:18:38 | 355,812,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | def move(a, b):
print("Move form {} to {}".format(a, b))
def hanoi(n, start, target, helper1, helper2):
if n == 1:
move(start, target)
return
if n == 0:
pass
else:
hanoi(n-2, start, helper2, helper1, target)
move(start, helper1)
move(start, target)
move(helper1, target)
hanoi(n-2, helper2, target, helper1, start)
hanoi(3, "A", "D", "B", "C")
| [
"sherifatitebi@gmail.com"
] | sherifatitebi@gmail.com |
3730426a331bcc75745f9af0cdfc8efaf059a9b9 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/numpy/array_api/tests/test_elementwise_functions.py | b2fb44e766f8adfc368d988bd7d17c2ac418b386 | [
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-or-later",
"GCC-exception-3.1",
"BSD-3-Clause",
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,619 | py | from inspect import getfullargspec
from numpy.testing import assert_raises
from .. import asarray, _elementwise_functions
from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
from .._dtypes import (
_dtype_categories,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
)
def nargs(func):
return len(getfullargspec(func).args)
def test_function_types():
# Test that every function accepts only the required input types. We only
# test the negative cases here (error). The positive cases are tested in
# the array API test suite.
elementwise_function_input_types = {
"abs": "numeric",
"acos": "floating-point",
"acosh": "floating-point",
"add": "numeric",
"asin": "floating-point",
"asinh": "floating-point",
"atan": "floating-point",
"atan2": "floating-point",
"atanh": "floating-point",
"bitwise_and": "integer or boolean",
"bitwise_invert": "integer or boolean",
"bitwise_left_shift": "integer",
"bitwise_or": "integer or boolean",
"bitwise_right_shift": "integer",
"bitwise_xor": "integer or boolean",
"ceil": "numeric",
"cos": "floating-point",
"cosh": "floating-point",
"divide": "floating-point",
"equal": "all",
"exp": "floating-point",
"expm1": "floating-point",
"floor": "numeric",
"floor_divide": "numeric",
"greater": "numeric",
"greater_equal": "numeric",
"isfinite": "numeric",
"isinf": "numeric",
"isnan": "numeric",
"less": "numeric",
"less_equal": "numeric",
"log": "floating-point",
"logaddexp": "floating-point",
"log10": "floating-point",
"log1p": "floating-point",
"log2": "floating-point",
"logical_and": "boolean",
"logical_not": "boolean",
"logical_or": "boolean",
"logical_xor": "boolean",
"multiply": "numeric",
"negative": "numeric",
"not_equal": "all",
"positive": "numeric",
"pow": "numeric",
"remainder": "numeric",
"round": "numeric",
"sign": "numeric",
"sin": "floating-point",
"sinh": "floating-point",
"sqrt": "floating-point",
"square": "numeric",
"subtract": "numeric",
"tan": "floating-point",
"tanh": "floating-point",
"trunc": "numeric",
}
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for x in _array_vals():
for func_name, types in elementwise_function_input_types.items():
dtypes = _dtype_categories[types]
func = getattr(_elementwise_functions, func_name)
if nargs(func) == 2:
for y in _array_vals():
if x.dtype not in dtypes or y.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x, y))
else:
if x.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x))
def test_bitwise_shift_error():
# bitwise shift functions should raise when the second argument is negative
assert_raises(
ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
)
assert_raises(
ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
)
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
bff330c40b52db644df787ad662accfa1e26780e | d2f3e688b68dcbf69ed856e7696e8bdbc8bfde26 | /download.py | ffec6cea9683530954f80520bc739769cd5e0d8b | [] | no_license | crickers/stocktrader | 65cdd53fefeb50ec818df4e5a256aba40df4a6be | d21344cb1f5d3a0809e9bf1b7751e66bebf8ed0b | refs/heads/master | 2023-03-18T17:18:18.989452 | 2021-03-09T01:04:53 | 2021-03-09T01:04:53 | 345,841,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import yfinance
df = yfinance.download('AAPL', start='2020-01-01', end='2020-10-02')
df.to_csv('AAPL.csv')
CREATE TABLE IF NOT EXISTS stock(
id INTEGER PRIMARY KEY,
symbol TEXT NOT NULL UNIQUE,
company TEXT NOT NULL
)
| [
"crickmonster@gmail.com"
] | crickmonster@gmail.com |
73f94213f62cf8ebac644699c8b006a09a55921c | 56ab30ba2507e60af5e49e09bde699ad99f97961 | /concepts_practice/list_tuples_sets.py | dbc95b5ee55bc19f3d611a1218f16f301986fe9c | [] | no_license | aditiabhang/my-python-stuff | ebbdeb33509a19ce3ad59ee5e2dfc8c3f54be8e6 | 7f960571df50d04f18ffb4196b905fd1dd415980 | refs/heads/master | 2021-07-17T12:56:56.827626 | 2020-08-12T22:27:37 | 2020-08-12T22:27:37 | 202,188,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,749 | py | ##------ LISTS ------
courses = ['Algorithms and Data Analysis', 'Software Quality', 'Formal Languages', 'Python Self Study']
print("Printing the list: ",courses)
print("Printing the length of the list: ",len(courses))
print("Printing the last element of the list: ",courses[3])
print("Printing the second last element of the list using negative indexing: ",courses[-2])
print("")
# If we try to access the index that doesnt exist, an error is occured saying;
# -> IndexError: list out of range
# accesing only the part of the list
print("Accessing part of list: ", courses[0:2])
# First part of index in inclusive but the second is NOT.
courses.append('PHP Project')
print("Appending to the list: ", courses)
print("")
# inserting an element to a specifix index
courses.insert(2, 'Survey of SW Engg')
print("Inserting new course at the index 2: ", courses)
print("")
# we can even add list within a list, like below
print("List within a list:")
courses_1 = ['Masters', 'CompSci']
courses.insert(0, courses_1)
print(courses)
print("")
# adding values of second list to original list
print("Extending the list with other list:")
courses_2 = ['Library', 'TSU']
courses_1.extend(courses_2)
print(courses_1)
print("")
# NOTE:
# append() adds up the entire list (inclusing the brackets) to the list.
# extend() adds the each individual elements of a list to the original list.
# Simple methods: -
# remove(value) - to remove the value
# pop() - to remove the last value of the list
# - we can check which value is popped by simply applying the method to a variable
popped = courses.pop()
print("Popped value: ",popped)
print("")
# reverse() - reverse the order of the list
# sort() - sorts the list alphabetically or numerically ascending order
# sort(reverse = True) - to sort the list in reverse/descending order
nums = [3, 6, 33, 9, 66, 13]
courses_1.reverse()
courses_1.sort()
courses_1.sort(reverse = True)
nums.sort(reverse = True)
print("Reverse alphabets: ", courses_1)
print("Sorting list of courses: ", courses_1)
print("Sorting the list in reverse order: ", courses_1)
print("Sorting the number list in reverse order: ", nums)
print("")
# NOTE: sort() and sorted()
# sort() method changes the list
# sorted method doesnt change the list instead shows the version of sorted list.
# A variable is created to keep the sorted version of list.
# More simple methonds: -
# min(), max(), and sum().
# The name implies the meaning.
# index() - returns the index of the value
print("At what index is the value - Masters? It is at: ", courses_1.index('Masters'))
print("")
# in - to check if the value is in the list
print("Is Library keyword value in the list?: ", 'Library' in courses_1)
# loop to print each item in the list
for item in courses_1:
print(item)
print("")
# enumerate function: -
# - Access the index as well as values of the list
# - returns index and the value
for index, item in enumerate(courses):
print(index,item)
print("")
# If we dont want to start at 0, and start at 1, we simple do as follows:
for index, item in enumerate(courses, start = 1):
print(index,item)
print("")
# join() -
# - convert the list into string separated by a certain value
courses_str = ', '.join(courses_1) # used '.' as a separater
courses_str1 = ' - '.join(courses_2) # used '-' as a separater
print(courses_str)
print(courses_str1)
# split() -
# - splits the list separated by a certain value
courses_str2 = courses_str.split('-') #this has to be done on join
print(courses_str2)
print("")
## ------ TUPLES ------
# - Lists are mutable but tuples are NOT mutable
# Proof: lists are mutable
print("Lists are mutable.")
list1 = ['Amazon', 'IBM', 'Facebook', 'Google']
list2 = list1
print (list2)
list1[0] = 'Apple'
print("List 1: ", list1)
print("List 2: ",list2)
print("")
# If we want to update/change the object, we use lists. Hence, mutable.
# Proof: tuples are not mutable
print("Tuples are immutable.")
tuple1 = ('Amazon', 'IBM', 'Facebook', 'Google')
tuple2 = tuple1
print (tuple2)
# tuple1[0] = 'Apple'
print("Tuple 1: ", tuple1)
print("Tuple 2: ", tuple2)
print("When we try to change the tuple like we did in the lists above, we get the above error.")
print("")
# Difference between lists and tuple:
# - we cant append, insert, or remove any elements in a tuple, like we can do it list.
# Other than this difference, list and tuple behaves the same.
# We can loops through lists and tuples, access the elements, etc.
## ------ SETS ------
# Sets are sets of ordered or unordered values, has no duplicates.
# Has curly braces.
# Every time we print a set, it is displayed in different order.
# Bcz, sets dont care about the order, it is mainly used to see if the value is a part of
# a set, also used to remove duplicate values.
set1 = {'Amazon', 'IBM', 'Google', 'Facebook', 'Facebook'}
# When we print a set with duplicate values, the duplicate value will be thrown away, and
# display only one value.
print(set1)
# Membership test - checks if a value is part of the set.
# Sets performs this better than lists or tuples
print(" Is Amazon in set1?", 'Amazon' in set1)
# Sets determine what values they share or dont share with other sets
# intersection() - determines the common.
set2 = {'Amazon', 'Facebook', 'Intel', 'Dell'}
print("Intersection of sets: ", set1.intersection(set2))
# difference() - What is in set1 but not in set2?
print("Difference: ", set1.difference(set2))
# union() - combine all the values in both sets
print("Union: ", set1.union(set2))
# Tip: Creatig empty set of lists, tuples and sets.
empty_list = []
empty_list = list[]
empty_tuple = ()
empty_tuple = tuple()
empty_set = () # this is not correct. It creates an empty dictionary
empty_set = set() | [
"aditi.abhang91@gmail.com"
] | aditi.abhang91@gmail.com |
da6fc1e46386d87e9b389b5ee8beb11f81049273 | ccecb159f1ad83585e277bf30e22e997d206b755 | /users/models.py | a3509bde9e533100c9522ab7a65476a46e4328c7 | [] | no_license | jeevandhakal/todo-app | f2b1d1e87e5ff967d7a889bd046dfd8b6d090d6e | 905bc1c01f727db8636794f022aa934f94963e16 | refs/heads/main | 2023-06-01T18:08:33.768368 | 2021-06-18T03:31:09 | 2021-06-18T03:31:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.first_name +' '+ self.last_name
| [
"dhakaljeevan165@gmail.com"
] | dhakaljeevan165@gmail.com |
18b3cba686509ec133d4f5a264ff3372b3ccf240 | 422c1f42c9998f1f0fb90a11af91996701b5ba56 | /P21Feb.py | 70ad0e4890ce74d65244e2ef688a06160fa3647a | [] | no_license | laraib-sidd/python-projects | b429e0108ed61751d47881e60b5dfbe5586afe62 | c351aab5d77a5a935547577ba2cac43c3360b57b | refs/heads/master | 2023-08-26T22:39:40.890788 | 2021-10-01T00:46:22 | 2021-10-01T00:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | """theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',
'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',
'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
printBoard(theBoard)
print('Hi! There, How are You?')
print('''Hi! There,
How are You?''')"""
check = 'Hello' in 'Hello! Wrold'
if check==True:
print('You Were Right :)')
| [
"priyanshusingh0610@gmail.com"
] | priyanshusingh0610@gmail.com |
f71b9de5becd2661e819cbbe597b1db6fce5ada9 | f9c757dfb73e1571f076846e67ad09966f8309eb | /run_test.py | b4a4c43a0fab291d78a83fcc36e31d7576d84bfe | [] | no_license | johnsonliu33/KeyWordAndDataDrivenFrameWork | 106f5fa9b0bfef8c23c7416dba9646983e528bed | dce1b377214fd6fe71e61f82c54e94d2b00dbc45 | refs/heads/master | 2020-08-09T22:35:47.336445 | 2019-08-25T22:39:57 | 2019-08-25T22:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from testscript.test_send_mail_and_create_contacts import test_send_mail_and_create_contacts
if __name__ == '__main__':
test_send_mail_and_create_contacts() | [
"18367157420@163.com"
] | 18367157420@163.com |
7017f876ad8f266045bc1a17ba0ea45c073b4566 | fbb6774428f4ca3fc981622c7b90522b02a9c1c4 | /WS.py | 349bd75502a05c7155fa5d27fb552b59cbb058ad | [] | no_license | haiphong129/Words-Segmention | 80e33f4b8148c305ed0d5d81d490b347fcbf2754 | 95b26ed3a63b20170351196caa32021446c5051e | refs/heads/master | 2021-01-24T08:29:55.378275 | 2017-06-05T09:08:32 | 2017-06-05T09:08:32 | 93,384,131 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | import os
import io
import math
import random
pathWS='dataws_test'
file_name='dictionary.txt'
file_input='input.txt'
#=============
def dictionary_write_out(wso):
f=open(file_name,'w',encoding='utf-8')
li=wso.keys()
x=''
for i in li:
x=x+str(i)+' '
f.write(x)
f.close()
#==============
def dictionary_read():
f=open(file_name,'r',encoding='utf-8')
st=f.read()
st=st.split()
wso={}
s=''
for i in range(len(st)):
if st[i] not in wso:
wso[st[i]]=1
return wso
#==============
def tokenize(st):
list=['. ',', ','\'','"',':','!','?','\\',' .',' ,','-','/',')','(','}','{']
for i in range(len(list)):
st=st.replace(list[i],' '+list[i]+' ')
while(st.find(' ')>-1):
st=st.replace(' ',' ')
return st
#===============
def words_seg_dictionary():
wso={}
p_dir=os.listdir(pathWS)
for i in range(len(p_dir)):
file_name_dir=pathWS+'/'+p_dir[i]
print(file_name_dir)
st=open(file_name_dir,'r',encoding='utf-8').read()
st=st.lower()
st=tokenize(st)
words=st.split()
for j in range(len(words)):
if (words[j][0]!='<')and(words[j] not in wso):
wso[words[j]]='1'
lt=wso.keys()
#print(len(lt))
return wso
#==============
def words_seg(st):
st_out=''
save=''
st=tokenize(st)
print(st)
st=st.split()
print(st)
while len(st)>0:
for i in range(len(st)):
if i==0:
s=st[i]
else:
s=s+'_'+st[i]
if s in wso:
vt=i
save=s
if save!='':
st_out=st_out+' '+save
st=st[vt+1:len(st)]
else:
st_out=st_out+' '+st[0]
st=st[1:len(st)]
save=''
return st_out[1:len(st_out)]
#===========
#ws=words_seg_dictionary()
#dictionary_write_out(ws)
wso=dictionary_read()
st=open(file_input,'r',encoding='utf-16').read()
st=st.lower()
print(words_seg(st))
| [
"noreply@github.com"
] | haiphong129.noreply@github.com |
7d013c0418c1a25f023038ef709831ff541cd73a | cce03f793139520a6ab8d67c31ba079546edbe60 | /venv/bin/pip3 | 80997c5047d10afd5747b16bef23cc32e96c511b | [] | no_license | AskhatB/qmart-scanner | a2d171e5fffa552b4bb2bd0e0548bc9db6b59fa3 | 2258809180623123e44ce5a922b578990086cc1b | refs/heads/master | 2020-05-17T19:12:07.253470 | 2019-05-07T10:56:22 | 2019-05-07T10:56:22 | 183,907,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/Users/askhatbaltabayev/Documents/university/dp/scanner/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"baltabaev.a2509@gmail.com"
] | baltabaev.a2509@gmail.com | |
0dcf4b6b5bcf74c86dfbcba79e56758e85c90377 | 08c7844a2bd2d94d16e851ce78109a7f33ffc53f | /config.py | 58407e73518f4329eb385d50488e096f33660915 | [] | no_license | jreiher2003/menu-app | dd5bd4a44688f43086f6a284684ebafff74daf2a | cc93f6a41539ab00b2d85bae21ee308987c93afe | refs/heads/master | 2021-01-10T09:26:51.673657 | 2015-11-17T19:11:25 | 2015-11-17T19:11:25 | 46,355,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'menu.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository') | [
"jeffreiher@gmail.com"
] | jeffreiher@gmail.com |
ce98eefb259bcfd6b08c0a00991650e1df803ecc | e7b88888fab19e701f325fd41ce6da96522ba2d3 | /journal/models.py | 3240dc2043cddfb7ec8e391a9a1bcd656a0c42af | [] | no_license | jdmansour/modulejournal | 71d95debdbe20583adfa901a0190046e6f5be433 | b70ab99c7aea66f32635ef420c98c026124a5798 | refs/heads/master | 2021-01-01T15:52:45.701807 | 2017-07-19T13:40:10 | 2017-07-19T13:40:10 | 97,721,162 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,387 | py | from datetime import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
import eventfile
class Module(models.Model):
""" Represents one module """
name = models.CharField(max_length=200)
produced = models.DateField()
def __str__(self):
return self.name
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('module_details', args=[str(self.pk)])
class JournalEntry(models.Model):
""" Different things can go into a module's journal: Notes,
Images, and Runs (recorded data). This is the common
base class. Every entry currently has a posted date
in common.
TODO: add user (there is a build-in user model in Django,
and there is a UserField field to use this)
"""
module = models.ForeignKey(Module)
posted = models.DateTimeField(default=timezone.now)
posted_by = models.ForeignKey(User)
class Meta:
ordering = ['-posted']
verbose_name_plural = "journal entries"
class NoteEntry(JournalEntry):
text = models.TextField()
class Meta:
verbose_name_plural = "note entries"
# https://docs.djangoproject.com/en/1.11/ref/models/fields/#django.db.models.FileField.upload_to
def module_upload_path(journalentry: JournalEntry, filename: str):
""" Determines where to save uploaded files. """
return 'module_{0}/{1}'.format(journalentry.module.id, filename)
class ImageEntry(JournalEntry):
image = models.ImageField(upload_to=module_upload_path)
class Meta:
verbose_name_plural = "image entries"
def image_tag(self):
return u'<img src="%s" style="max-width: 300px; max-height: 300px;" />' % self.image.url
image_tag.short_description = "Image"
image_tag.allow_tags = True
class RunEntry(JournalEntry):
runnumber = models.IntegerField()
eventcount = models.BigIntegerField()
recorded = models.DateTimeField()
data = models.FileField(upload_to=module_upload_path)
class Meta:
verbose_name_plural = "run entries"
def clean(self):
""" Used to validate the uploaded file, and to read data from
the header (recorded date and event count).abs
TODO: It is probably stupid to have the event count in the
header, since we do not know how many events we are going
to have when we start recording!
We should probably write an "event size" to the header, and
then divide the file size by the event size.
"""
try:
header, data = eventfile.read_event_file(self.data)
except ValueError:
raise ValidationError("File is not a valid run file")
self.runnumber = header['runnumber']
self.eventcount = header['eventcount']
recorded_str = header['recorded']
recorded = datetime.strptime(recorded_str, '%Y-%m-%dT%H:%M:%S.%fZ')
self.recorded = recorded
def __str__(self):
return "Run {0} ({1} events)".format(self.runnumber, self.eventcount)
class AnalysisTool(models.Model):
""" This is something we can run over one or multiple runs. """
name = models.CharField(max_length=200)
class ToolRun(models.Model):
""" This is the result of using an AnalysisTool on runs.
It has the `tool`, the `inputRuns`, and multiple
`OutputImage`s (below) can point to it.
"""
inputRuns = models.ManyToManyField(RunEntry)
tool = models.ForeignKey(AnalysisTool)
def __str__(self):
return "Run of %s" % self.tool.name
def toolrun_upload_path(obj, filename):
""" Tells where to save the result of a tool run """
return 'toolrun_{0}/{1}'.format(obj.toolrun.id, filename)
class OutputImage(models.Model):
""" The result of an AnalysisTool, as a graphic.
TODO: We could add another type to save other kinds
of data, e.g. text, or a calculated number.
"""
toolrun = models.ForeignKey(ToolRun)
image = models.ImageField(upload_to=toolrun_upload_path)
def image_tag(self):
return u'<img src="%s" style="max-width: 300px; max-height: 300px;" />' % self.image.url
image_tag.short_description = "Image"
image_tag.allow_tags = True
| [
"jd.mansour@gmail.com"
] | jd.mansour@gmail.com |
e8de88bcf323a38964672d61eeadedf2845023cc | 92c61d8e53033dd08b93d4f746e66fb5b2c8a0b6 | /sanitize_input.py | f72586fb5900398ce41d1a363976089d92190832 | [] | no_license | maradude/aps-assignment | a5006cc523f07375d906efcd38347c49f4a027f6 | 279f3051e9af3512154bb5ab1c6dcfbd139e1727 | refs/heads/master | 2022-09-27T20:53:14.931506 | 2019-04-04T13:18:13 | 2019-04-04T13:18:13 | 172,369,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py | #!/usr/bin/env python3
import sys
from json import loads
from re import sub
def get_input(type="1d"):
"""
:param type: type of input request (Default value = "1d")
'1d' = 1-dimensional
'kd' = k-dimensional
'pkd' = polygonal k-dimensional
"""
if type == "1d":
return _get_1d_input()
if type == "kd":
return _get_kd_input()
if type == "pkd":
return _get_polygonal_kd_input()
def _get_polygonal_kd_input():
""" """
pass
def _get_kd_tree_parts():
"""
read standard input for
:return tests, points and dimensionality of given input
"""
try:
tests = (line for line in sys.stdin)
except TypeError as e:
print(e)
sys.exit()
len_points, k_dimensions, len_ranges = map(int, next(tests).split(' '))
points = []
for _ in range(len_points):
points.append(tuple(int(n) for n in next(tests).split(' ')))
return tests, points, k_dimensions, len_ranges
def _get_kd_input():
"""Read k numbers per line from stdin,
first line needs to have amount of points, number of dimensions,
and number of test ranges, subsequent r lines are points,
lines after r are the tests cases where each test case needs
be made of 2 elements per line, either a pairs of brackets for each query
denoting min and max values for each dimension or for spherical
cases brackets with kd point followed by a integer denoting radius
e.g. "[9 20 5] [99 1000 9]" for a 3-dimensional rectange range query
"[10 -20 30 40] 5" for a 4-dimensional sphere range suery
:return: Test data object with points, ranges and dimensionality
"""
tests, points, k_dimensions, len_ranges = _get_kd_tree_parts()
arrays = [f"[{sub(' ', ',', next(tests))}]" for _ in range(len_ranges)]
r = [loads(arr) for arr in arrays]
return Tests(elements=points, ranges=r, dimensions=k_dimensions)
def _get_1d_input():
"""
Read from standard input values and ranges
first line is an interger value of how many values x and ranges y
next x lines are integer values to be used as points
next y lines have integer range pairs
:return: Tets data object with points and ranges
"""
try:
tests = (line for line in sys.stdin)
except TypeError as e:
print(e)
sys.exit()
len_elements, len_ranges = map(int, next(tests).split(' '))
elements = [int(next(tests)) for _ in range(len_elements)]
ranges = [parse_range(next(tests)) for _ in range(len_ranges)]
return Tests(elements=elements, ranges=ranges)
def parse_range(range_string):
"""
:param range_string: String
:return: Tuple[int]
"""
return tuple(int(e) for e in range_string.split(' '))
class Tests:
"""just a record to hold elements list and range tuples"""
def __init__(self, elements, ranges, dimensions=1):
self.elements = elements
self.ranges = ranges
self.dimensions = dimensions
if __name__ == '__main__':
a = get_input()
print(a.elements)
print(a.ranges)
| [
"martti@aukia.com"
] | martti@aukia.com |
87425bd40514474ab3c3ac4cc8db5f99b6024c94 | 3142aa6565bd112822ed26807b52796468cb4e37 | /flower/urls.py | 7daf4a382885380f9f76087b9644cceea8432992 | [
"MIT"
] | permissive | fevenMwoldu/FlowerShop | 37fef7ab0b4a28e4a80ecb29703b55fbabf0582e | c6f4116be931a4b73b57c632a14aa4e6023578e3 | refs/heads/master | 2022-12-10T19:15:38.376660 | 2019-08-16T06:18:34 | 2019-08-16T06:18:34 | 202,285,184 | 1 | 0 | MIT | 2022-11-22T04:11:42 | 2019-08-14T06:04:22 | Python | UTF-8 | Python | false | false | 1,207 | py | """flower URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from flowers import views as my_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('flowers.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^profile/$', my_views.add_profile, name='add-profile'),
url(r'^vendor/$', my_views.add_vendor, name='add-vendor'),
url(r'^flower/$', my_views.add_flower, name = 'add-flower'),
]
| [
"feven.m.woldu@gmail.com"
] | feven.m.woldu@gmail.com |
4e45df07d83b1d94159d686e4c96c5b73358f5df | e9670ebcd4b554d6ffe2f7b23c89f2982df39ddb | /Django/third_project/third_project/settings.py | a021a94f8eb94ce851f6b2659ffc03de8650b15f | [] | no_license | Rushi-Bhavsar/BFDL-Full-Stack-Training | 3ab4f58be23522a632a4c346a9738d35c2cb4cc2 | 0648d37568be2406b0027bacb0509e30987e8b38 | refs/heads/main | 2023-06-20T07:05:32.307145 | 2021-07-14T17:00:08 | 2021-07-14T17:00:08 | 374,981,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | """
Django settings for third_project project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ps#c7dx$^zx(ijpkxvvta1n2s9^!@#4@dr4&t46rskmsra!xs5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'third_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'third_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'third_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"rushi.bhavsar.57@gmail.com"
] | rushi.bhavsar.57@gmail.com |
b517f0bb5ca6346a38ef4745c26d781ed5b2d2cd | e83f2198cb765f048398e6485f138cf4e172199f | /src/pywaz/sprite/__init__.py | 2b4fa577eabb5f9d7b1f852d71ca2119cee7f2c3 | [] | no_license | giginet/MachiMatch | 6d1c2cb2a77323043e8e04e90df5d5e1d8e010d5 | 69b0e788f75966bf6e2fbfaba19e66da5ce22415 | refs/heads/master | 2021-01-13T01:36:19.399768 | 2011-12-25T02:40:10 | 2011-12-25T02:40:10 | 1,630,776 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import pygame
from pygame.sprite import Sprite
class _Mixin(object):
def draw(self, surface):
for sprite in self.sprites():
if isinstance(sprite, Sprite):
sprite.draw(surface)
else:
surface.blit(sprite.image, sprite.rect)
class _Mixin2(object):
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
if isinstance(s, Sprite):
newrect = s.draw(surface)
else:
newrect = surface_blit(s.image, s.rect)
if r is 0:
dirty_append(newrect)
else:
if newrect and newrect.colliderect(r):
dirty_append(newrect.union(r))
elif newrect:
dirty_append(newrect)
dirty_append(r)
spritedict[s] = newrect
return dirty
# group -----------------------------------------------------------------------------------
#
# Notice:
# The order of inheritation is IMPORTANT
#
class Group(_Mixin, pygame.sprite.Group):
pass
class RenderUpdates(_Mixin2, pygame.sprite.RenderUpdates):
pass
class OrderedUpdates(_Mixin2, pygame.sprite.OrderedUpdates):
pass
class LayeredUpdates(_Mixin2, pygame.sprite.LayeredUpdates):
pass
# collide ---------------------------------------------------------------------------------
#
# Notice:
# Only `collide_rect` and `spritecollide` is modified
#
from pygame.sprite import collide_rect_ratio
from pygame.sprite import collide_circle, collide_circle_ratio
from pygame.sprite import collide_mask
from pygame.sprite import groupcollide, spritecollideany
def collide_rect(left, right):
u"""collision detection between two sprites, using `colrect` of each sprite"""
return left.coltest_rect.colliderect(right.coltest_rect)
def spritecollide(sprite, group, dokill, collided = None):
if collided is None:
collided = collide_rect
return pygame.sprite.spritecollide(sprite, group, dokill, collided)
| [
"giginet.net@gmail.com"
] | giginet.net@gmail.com |
d63333e72ccdd3e1db745dcd92c76b8eb6523227 | 77bae4adbdea1cc5d8f22e0df0dbe3e20e445d17 | /system/views_daily.py | f4c2d6faff4d18a5e933b9320825823ed3bd00d0 | [] | no_license | Gavin188/WGT1 | d8d015c22edf4613e91db353bea9d7394c1ffaa4 | ecb28f0172ccbe5f99e71f6b8fb5b96fe256e587 | refs/heads/master | 2020-08-29T05:19:03.188790 | 2019-11-20T09:11:36 | 2019-11-20T09:11:36 | 217,935,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,902 | py | import datetime
import json
import pandas as pd
from django.core.paginator import Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import connection
from django.http import HttpResponse
from django.shortcuts import render
from django.views import View
from overtime.time_transformation import time_tran
from system.mixin import LoginRequiredMixin
from system.models import EngineerRank, VersionView, DutyView, UserProfile
class ExcelUpload(LoginRequiredMixin, View):
'''测试说明书 视图'''
def get(self, request):
# context = {
#
# }
return render(request, 'system/ExcelUpload.html')
def post(self, request):
res = dict(result=False)
f1 = request.FILES.get('f1')
f2 = request.FILES.get('f2')
f3 = request.FILES.get('f3')
# 上传Excel 文件
if f1:
if f1.name.endswith('.xlsx') or f1.name.endswith('.xls'):
df = pd.read_excel(f1)
df.fillna('', inplace=True)
if list(df.columns) == ['日期', '版本', '平台']:
VersionView.objects.all().delete()
for i in range(len(df)):
# 写入数据库
version = VersionView()
version.date = df.loc[i, '日期']
version.version = df.loc[i, '版本']
version.platform = df.loc[i, '平台']
version.save()
res['msg'] = '上传成功!'
res['result'] = True
else:
res['msg'] = "版本概览格式有誤"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
else:
res['msg'] = "请上传有效的文件"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
if f2:
if f2.name.endswith('.xlsx') or f2.name.endswith('.xls'):
df = pd.read_excel(f2)
df.fillna('', inplace=True)
if list(df.columns) == ['日期', '名字']:
DutyView.objects.all().delete()
for i in range(len(df)):
# 写入数据库
duty = DutyView()
duty.weekend = df.loc[i, '日期']
duty.name = df.loc[i, '名字']
duty.date = datetime.date.today().strftime('%Y-%m-%d')
duty.save()
res['msg'] = '上传成功!'
res['result'] = True
else:
res['msg'] = "值日概览格式有誤"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
else:
res['msg'] = "请上传有效的文件"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
if f3:
if f3.name.endswith('.xlsx') or f3.name.endswith('.xls'):
df = pd.read_excel(f3)
df.fillna('', inplace=True)
# df.drop(columns=['No.'])
if list(df.columns) == ['Originator', 'Count']:
# EngineerRank.objects.all().delete()
for i in range(len(df)):
# 写入数据库
name = list(UserProfile.objects.filter(radar=df.loc[i, 'Originator']))
rank = EngineerRank()
if name:
rank.name = df.loc[i, 'Originator']
rank.count = df.loc[i, 'Count']
rank.date = datetime.date.today().strftime('%Y-%m-%d')
rank.save()
res['msg'] = '上传成功!'
res['result'] = True
else:
res['msg'] = "工程师排名格式有誤"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
else:
res['msg'] = "请上传有效的文件"
res['result'] = False
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
if res['result']:
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
class HistoryManView(LoginRequiredMixin, View):
'''显示 工程师 历史记录'''
def get(self, request):
today = datetime.date.today() # - relativedelta(months=-1)
next_date = today.strftime('%Y')
next_moth = today.strftime('%m')
context = {
'next_date': next_date,
'next_moth': next_moth,
}
return render(request, 'system/historyManager.html', context)
def post(self, request):
filters = {}
res = {"success": "",
"totalRows": "",
"curPage": 1,
"data": " "}
# 获取 后台 查找 u用户
person = request.POST.get('person')
if request.POST.get('person'):
filters['name__contains'] = person
fields = ['id', 'name', 'count', 'date']
# 获取查找的时间
date = request.POST.get('date')
moth = request.POST.get('moth')
next_today = date + '-' + moth + '-01'
month = time_tran(next_today)[0:7]
# 查询出 未签核 和 已签核 加班时间
filters['date__startswith'] = month
datas = list(
EngineerRank.objects.filter(**filters).values(*fields))
# 先查询出 加班的所有用户 放入 arr_per
data_list = []
arr_per = []
# 解析数据,将数据按照时间的字段
for i in datas:
if i['name'] not in arr_per:
arr_per.append(i['name'])
for per in arr_per:
data_dict = {}
count = 0
for i in datas:
if i['name'] == per:
data_dict['name'] = i['name']
data_dict['username'] = list(UserProfile.objects.filter(radar=i['name']).values('name'))[0][
'name']
data_dict[i['date'][8:10]] = i['count']
count = count + int(i['count'])
data_dict['Count'] = count
data_list.append(data_dict)
res["totalRows"] = len(datas)
pageIndex = request.POST.get('curPage')
pageSize = request.POST.get('pageSize')
pageInator = Paginator(data_list, pageSize)
contacts = pageInator.page(pageIndex)
data_list = [] # 最终返回的结果集合
for contact in contacts:
data_list.append(contact)
res["data"] = data_list
res["curPage"] = pageIndex
res['success'] = True
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
class ExcelDeleteUpload(LoginRequiredMixin, View):
def post(self, request):
res = dict(result=False)
today = datetime.date.today().strftime('%Y-%m-%d')
data = EngineerRank.objects.filter(date=today)
if len(data) == 0:
res['message'] = '今日的文件还没有上传!'
else:
data.delete()
res['result'] = True
return HttpResponse(json.dumps(res, cls=DjangoJSONEncoder), content_type='application/json')
| [
"632929331@qq.com"
] | 632929331@qq.com |
78358680c607b628ec233b77c129be64d8f1150d | f8f31666adfa17cf94bda1ff8d818778eeaf4f81 | /Time series .py | 5889285bef48c5d9a912848e9db19149a87f6983 | [] | no_license | JophyLiu/Time-Series-in-manage-hard-drive | 38cdd03bd24ee63a1897a8d3b92ac85d1edf80b1 | 273e9e1f84febf3138a631801c88f59b98203457 | refs/heads/master | 2021-04-27T00:08:33.417552 | 2018-03-04T04:17:29 | 2018-03-04T04:17:29 | 123,756,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,847 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 03 21:45:41 2018
@author: LIU
"""
#smooth
import pandas as pd
import numpy as np
#import data
datafile='d:/data/discdata.xls'
data=pd.read_excel(datafile,encoding='utf-8')
data=data[data['TARGET_ID']==184].copy()
#preprocess the data
group_data=data.groupby('COLLECTTIME')
def reform(x):
result=pd.Series(index=['sys_name','cwxt_bd:184:c:\\','cwxt_bd:184:d:\\','collecttime'])
result['sys_name']=x['SYS_NAME'].iloc[0]
result['cwxt_bd:184:c:\\']=x['VALUE'].iloc[0]
result['cwxt_bd:184:d:\\']=x['VALUE'].iloc[1]
result['collecttime']=x['COLLECTTIME'].iloc[0]
return result
data_processed=group_data.apply(reform)
print(data_processed.head())
#plot the timeseries plot
column=['cwxt_bd:184:d:\\']
data_d=data_processed[column]
column2=['cwxt_bd:184:c:\\']
data_c=data_processed[column2]
import matplotlib.pyplot as plt
plt.plot(data_d)
plt.plot(data_c)
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=12)
rolstd = pd.rolling_std(timeseries, window=12)
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
#Perform Dickey-Fuller test:
print 'Results of Dickey-Fuller Test:'
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print dfoutput
test_stationarity(data_d)
# check the stationality(find the best diff value)
data_time=data_processed.iloc[:len(data_processed)-5]
from statsmodels.tsa.stattools import adfuller as ADF
diff=0
adf=ADF(data_time['cwxt_bd:184:d:\\'])
while adf[1]>=0.05:
diff=diff+1
adf=ADF(data_time['cwxt_bd:184:d:\\'].diff(diff).dropna())
print('the original is smooth after %s diff, p_value is %s'%(diff,adf[1]))
plt.plot(data_time['cwxt_bd:184:d:\\'].diff(1))
diff=0
adf=ADF(data_time['cwxt_bd:184:c:\\'])
while adf[1]>=0.05:
diff=diff+1
adf=ADF(data_time['cwxt_bd:184:c:\\'].diff(diff).dropna())
print('the original is smooth after %s diff, p_value is %s'%(diff,adf[1]))
plt.plot(data_time['cwxt_bd:184:c:\\'].diff(1))
data_station=pd.DataFrame()
data_station=data_time
#check the while noise
from statsmodels.stats.diagnostic import acorr_ljungbox
[[lb], [p]] = acorr_ljungbox(data_time['cwxt_bd:184:d:\\'], lags = 1)
if p < 0.05:
print('original data is not white noise series,p-value is %s' %p)
else:
print('original data is white noise series,p-value is %s' %p)
a=data_time['cwxt_bd:184:d:\\'].diff(1).dropna()
[[lb1], [p1]] = acorr_ljungbox(a.dropna(),lags=1)
if p1 < 0.05:
print('one diff data is not white noise data:%s' %p)
else:
print('one diff data is not white noise data:%s' %p)
#fit the model and find the p,d,q
data_fit = data_processed.iloc[: len(data)-5]
xdata = data_fit['cwxt_bd:184:d:\\']
from statsmodels.tsa.arima_model import ARIMA
pmax = int(len(xdata)/10)# normal no more than 10
qmax = int(len(xdata)/10)# normal no more than 10
bic_matrix = [] #bic matrix
for p in range(pmax+1):
tmp = []
for q in range(qmax+1):
try: # there may exist error so use try to skip the error
tmp.append(ARIMA(xdata, (p,1,q)).fit().bic)
except:
tmp.append(None)
bic_matrix.append(tmp)
bic_matrix = pd.DataFrame(bic_matrix) #can find the min from matrix
p,q = bic_matrix.stack().idxmin() # use stack the seperate and them use idxmin find the min
print('BIC MIN P AND Q VALUE IS:%s、%s' %(p,q))
#check the model
lagnum = 12 #the number of lag
from statsmodels.tsa.arima_model import ARIMA #ARIMA(0,1,1)
arima = ARIMA(xdata, (0, 1, 1)).fit() #bulid and train
xdata_pred = arima.predict(typ = 'levels') #predict
pred_error = (xdata_pred - xdata).dropna() #error
from statsmodels.stats.diagnostic import acorr_ljungbox #white noise
lb, p= acorr_ljungbox(pred_error, lags = lagnum)
h = (p < 0.05).sum() #when p-value smaller than, it is no while noise
if h > 0:
print('ARIMA(0,1,1) is not white noise')
else:
print('ARIMA(0,1,1) is white noise')
abs_ = (xdata_pred - xdata).abs()
mae_ = abs_.mean() # mae
rmse_ = ((abs_**2).mean())**0.5 # rmse
mape_ = (abs_/xdata).mean() # mape
print('mape: %0.4f,\n mse:%0.4f,\n rmse:%0.6f。' %(mae_, rmse_, mape_))
| [
"noreply@github.com"
] | JophyLiu.noreply@github.com |
321a116a35d05ef1decae8de61fd36d7bc8c7549 | 11003f40816b5e18d4f09bce5035a52f2168aca7 | /NPVCalc_Graph.py | ae9dad969acb78cfd084d36b6dbe87db7f4d8a9b | [] | no_license | robertawatson/py_beginner | 8d288411d50e72b33c260b1904ad1806200ac51d | 59ae4c0ecb3c57f19bd103c14d72137cca8d69b6 | refs/heads/master | 2023-06-02T08:36:01.098580 | 2021-06-20T03:00:22 | 2021-06-20T03:00:22 | 265,307,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | # A very simple net present value (NPV) calculator that graphs the inputs and the calculated discounted values.
# NPV is found by summing the discounted values of all future cashflows.
# User inputs the number of periods, a cash flow for each period, and the discount rate.
# Zero and negativbe values are acceptable.
# The discount rate must be entered as a decimal, e.g. 9% == 0.09
# As Python lists start w/ index 0:
# "Period 0" is t=0, e.g. the initial cashflow immeadiately at the start of the series.
# A user that enters "5" for periods will need to enter 6 cashflows.
# To correct for the indexing issue, periods += 1 has been included.
# See the bottom for an example.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
cash_flow = []
discounted_value = []
period = []
df = pd.DataFrame(columns=['Period','Cashflow', 'Present Value'])
def main():
welcome()
input_periods = int(input('Number of Periods: '))
input_periods +=1
for i in range(input_periods):
cf = float(input('Cashflow for Period ' + str(i) + ': '))
cash_flow.append(cf)
period.append(i)
compount_rate = float(input('Discount Rate (as a decimal): '))
calc(cash_flow, compound_rate)
plot(input_periods)
def welcome():
print('Welcome.')
print('This is a simple Net Present Value Calculator.')
print('______________________________________________________________________')
def calc(p,r):
# main calculator that iterates over each item in the list cash_flow
# to calculate separate discounted values
# and then appends each discounted value to the list discounted_value
df['Period'] = period
for cf in p:
n = period.pop(0)
pv = cf/((1+r)**n)
discounted_value.append(pv)
npv = sum(discounted_value)
data()
print_out(npv)
def data():
# inserts items from the lists into the dataframe
df['Cashflow'] = cash_flow
df['Present Value'] = discounted_value
df.to_csv('Cashflow.csv', index=False)
def print_out(n):
# prints dataframe and the value returned to npv in calc()
print('______________________________________________________________________')
print(df)
print('______________________________________________________________________')
print('The Net Present Value of your cashflow inputs is: ', n)
print('______________________________________________________________________')
def plot(per):
# configures settings for a line chart to be created with matplotlib
# and styled with seaborn
df2 = df[['Cashflow', 'Present Value']]
sns.set(style="whitegrid")
sns.set_style("ticks")
graph = sns.lineplot(data=df2, palette="bright", linewidth=2.5)
graph.set(xlabel='Period', ylabel='Dollar Values')
plt.xticks(np.arange(0, per, step=1))
plt.legend(loc='lower right')
plt.grid(False)
plt.show()
if __name__ == "__main__":
main()
# An example:
# Cashflows over five full years (5 periods) would normally have 6 cashflows;
# An initial outflow (representing an investment),
# followed by a series of inflows (representing the investment returns).
# -100, 100, 110, 121, 133.10, 146.41
# This file was actually the basis for NPVCalc_Basic.py, but grew to the above code. NPVCalc_Basic.py was reuploaded.
| [
"noreply@github.com"
] | robertawatson.noreply@github.com |
36c897c6c10db94bf4ae5086d8b95a3b90272cf1 | 623b6460414b1c94778f46f22d3168855c2f81d2 | /level_1/urls.py | 90a00134ee459efa6e57260ea99b594d37da8894 | [] | no_license | Akanksha18/geekbattle | db6f36cf77a8b7900011c0e8dec33e2df6dfa88b | 3a15cdbbd62e41fc25a4a30f2257adad315ac153 | refs/heads/master | 2021-01-19T19:33:23.585954 | 2015-01-15T14:23:07 | 2015-01-15T14:23:07 | 29,292,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'level_1.views.home', name='home'),
url(r'^start', 'level_1.views.start', name='start'),
url(r'^question/(?P<qid>\d+)', 'level_1.views.question', name='question'),
url(r'^submit', 'level_1.views.submit', name='submit'),
url(r'^mark', 'level_1.views.mark', name='mark'),
url(r'^question_list', 'level_1.views.question_list', name='question_list'),
url(r'^question_json/(?P<qid>\d+)','level_1.views.question_json', name="question_json"),
url(r'^answer_json','level_1.views.answer_json', name="answer_json"),
)
| [
"akanksha.shrivastava9@gmail.com"
] | akanksha.shrivastava9@gmail.com |
a38d498ea8d2bba1f9997d93bf043f46369a8934 | 3be3b30d66411ee1df0427959ee4c999f99b154f | /scripts/img_resizer.py | b5d8430457e9a63ceeb097750d8c5c5f86b65d47 | [
"MIT"
] | permissive | Suckzoo/suckzoo.github.io | 737f3e313317bcc63b9e12a14cf656e5cacad59a | 75125530ad6faaa7f7e30a76b52df442d670aa22 | refs/heads/master | 2023-06-22T13:22:08.452024 | 2023-06-08T05:54:46 | 2023-06-08T05:54:46 | 78,521,273 | 3 | 0 | null | 2023-04-11T22:44:52 | 2017-01-10T10:07:03 | JavaScript | UTF-8 | Python | false | false | 297 | py | from PIL import Image
BASE_DIR = '../assets/images/raft/'
imgs = list(map(lambda index: BASE_DIR + 'log_rep_{}.png'.format(index), range(1,6)))
for img_route in imgs:
img = Image.open(img_route)
w, h = img.size
img = img.resize((w//2, h//2), Image.ANTIALIAS)
img.save(img_route)
| [
"tjkj555@gmail.com"
] | tjkj555@gmail.com |
6a5d45e0d2c78c047748b951c1c2eba6ab80505d | 7ed5a71eec86d8330ffbd15ff111a687650f9426 | /data/code/convert_color.py | ff66d89ed2e1bdadfd86d58896e64358a1ebe849 | [
"MIT"
] | permissive | NewYinbao/keras_yolov3 | 27496db155f06eee8edb9a3c144cf1941a199a41 | e84449faa467b9e540d4e53af6b9ac8f0c031ebb | refs/heads/master | 2020-09-11T22:22:43.393105 | 2020-05-17T09:34:30 | 2020-05-17T09:34:30 | 222,209,184 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # @Author: newyinbao
# @Date: 2019-09-21 22:09:03
# @Function: 图片通道转换
# @TODO:
# @Last Modified by: newyinbao
# @Last Modified time: 2019-09-21 22:09:03
import os
import numpy as np
from utils import convert_br
if __name__ == "__main__":
oldpath = '/media/ship/documents/8.231/3/'
newpath = '/media/ship/documents/8.231/3/'
images_name = os.listdir(oldpath)
for img in images_name:
convert_br(oldpath+img, newpath+img)
print('success!' + ' from ' + oldpath+img+' save to '+ newpath) | [
"842162949@qq.com"
] | 842162949@qq.com |
d67adb2ab0d8a0b286acb3f6cd4a86506687adfd | fc98d388b77233270bfe6f0a96195400e3b70bf2 | /database/auxScripts/usearch_validate.py | 1ecbf7c41e619419637968af14e641064e1b22b0 | [] | no_license | Bobay-Ochman/ConSpeciFix | 2581b3a52f89f7c8bfde07aa712ed9da30dec1ba | d9ee41d4a47aabe4c01262a4faa6ae5fd172f6b9 | refs/heads/master | 2021-01-19T04:11:03.799756 | 2019-03-12T20:57:28 | 2019-03-12T20:57:28 | 87,354,256 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | from multiprocessing import *
import multiprocessing
import subprocess
import math
import time
from config import *
sentinel = ['no work to be done, go home']
maxThreads = 36
totalWorkPut = 0
totalSequencesRemoved = 0
def work(jobQ,remQ):
#info('function f')
while True:
work = jobQ.get()
if work == sentinel:
print 'done'
return
path = work[0]
sp = work[1]
v1 = work[2]
v2 = work[3]
name = v1 + '-' + v2
h = None
try:
h = open(PATH_TO_OUTPUT + sp + '/'+USEARCH_FOLDER+'/' + name,'r')
except:
continue
lines = h.readlines()
for l in lines:
lenLine = len(l.split('\t'))
if lenLine != 12:
#print lenLine
print sp + '/'+USEARCH_FOLDER+'/' + name
remQ.put([PATH_TO_OUTPUT,sp,v1,v2])
h.close()
def printInvalid(remQ):
incomFile = open('todo/usearch_re_do.txt','w')
while True:
work = remQ.get()
if work == sentinel:
print 'done'
return
print 'working!!!!! ' + str(work)
sp = work[1]
v1 = work[2]
v2 = work[3]
incomFile.write(PATH_TO_OUTPUT +'\t'+ sp+'\t'+v1+'\t'+v2+'\n')
incomFile.flush()
if __name__ == '__main__':
#info('main line')
jobQ = Queue(maxsize=maxThreads)#so we only ever at most have one thing waiting for a job -> ensures minimum number of similar things get processed
remQ = Queue()
killList = []
processes = []
largeSpecList = []
f = open('todo/LargeSpec.txt','r')
for line in f:
largeSpecList.append(line.strip('\n'))
f.close()
p = Process(target=printInvalid, args=([remQ]))
p.start()
for i in range(maxThreads):
p = Process(target=work, args=(jobQ,remQ))
p.start()
processes.append(p)
genomes = getGenomes(getSpecies())
for sp in genomes:
if sp not in largeSpecList:
print 'doing '+sp +' ' +str(len(genomes[sp]))
for v1 in genomes[sp]:
for v2 in genomes[sp]:
if v1!=v2:
jobQ.put([PATH_TO_OUTPUT,sp,v1,v2])
for i in range(maxThreads):
jobQ.put(sentinel)
print 'done producing'
for p in processes:
p.join()
remQ.put(sentinel)
print 'all work is done!'
| [
"brian.e2014@gmail.com"
] | brian.e2014@gmail.com |
b2a81028133c50cdfce5739f88545a2d3c8f5b30 | 41c6f5d0514ff051ed5de4dbae719818711ccb3b | /Aws-stepfn-working-final/process_fail.py | e844e218bf906ca3ffa7e5ac578a031712d19f12 | [] | no_license | sgouda0412/Aws-Python | e2ba5426a21d6c1314a7ab16230f89781ebf7a63 | e613b85040e1c5f2ff39ba6c88fe067454f65736 | refs/heads/main | 2023-08-19T06:07:01.307062 | 2021-10-27T05:41:10 | 2021-10-27T05:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | from __future__ import print_function
import json
import urllib
import boto3
import datetime
s3 = boto3.resource('s3')
clientname=boto3.client('s3')
def lambda_handler(message,context):
bucket = 'initialbucket369'
try:
response = clientname.list_objects(
Bucket=bucket,
MaxKeys=5
)
for record in response['Contents']:
key = record['Key']
copy_source = {
'Bucket': bucket,
'Key': key
}
try:
destbucket = s3.Bucket('finalbucket369')
destbucket.copy(copy_source, key)
print('{} transferred to destination bucket'.format(key))
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. '.format(key, bucket))
raise e
except Exception as e:
print(e)
raise e
#1. Log input message
print('Received message from Step Function:')
print(message)
#2. Construct response
response = {}
response['TransactionType'] = message['TransactionType']
response['Timestamp'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
response['Message'] = 'Hello from Process Fail!'
#3. Return response
print(response)
return (response) | [
"54799446+vishnuvs369@users.noreply.github.com"
] | 54799446+vishnuvs369@users.noreply.github.com |
886b2bb7e0fa8f73b199442febe2b4dbd85d3ac0 | d835fc3156f515f9c68b01a5c0685d3deeac3b0f | /src/oca_github_bot/utils.py | bd76d6d8d613992f3627febcfe9645d2aa0932f2 | [
"MIT"
] | permissive | OCA/oca-github-bot | b9c35ef85a07753f7c40015eafe3c4d6693bc4b1 | 5eeb2f8503f572eb224de9c8964a4e7d805f4131 | refs/heads/master | 2023-08-22T03:03:35.355098 | 2023-08-02T06:32:09 | 2023-08-02T06:32:09 | 149,272,713 | 36 | 63 | MIT | 2023-09-11T21:49:50 | 2018-09-18T10:49:51 | Python | UTF-8 | Python | false | false | 895 | py | # Copyright (c) ACSONE SA/NV 2021
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
import re
import shlex
import time
from typing import Sequence
from . import config
def hide_secrets(s: str) -> str:
# TODO do we want to hide other secrets ?
return s.replace(config.GITHUB_TOKEN, "***")
def retry_on_exception(
func, exception_regex: str, max_retries: int = 3, sleep_time: float = 5.0
):
"""Retry a function call if it raises an exception matching a regex."""
counter = 0
while True:
try:
return func()
except Exception as e:
if not re.search(exception_regex, str(e)):
raise
if counter >= max_retries:
raise
counter += 1
time.sleep(sleep_time)
def cmd_to_str(cmd: Sequence[str]) -> str:
return shlex.join(str(c) for c in cmd)
| [
"stephane.bidoul@acsone.eu"
] | stephane.bidoul@acsone.eu |
b3772a9cc08ef1ddccb42a9c86928cc0f4b11fa9 | 6c487ff1aae1d23b1de897829535d0061ce53b5e | /payments_usd_mxn/models/models.py | ab0ccc2eafb1575f57dd42b6129073dd78c1e7b3 | [] | no_license | xmarts/payments_usd_mxn | b7c71335240997f1dbb817539367f2e1757104e3 | 6627f3b1eccbe4a527d6272b03e9abdfdfb3f7a7 | refs/heads/master | 2020-06-16T03:24:15.964248 | 2019-07-17T18:41:09 | 2019-07-17T18:41:09 | 195,465,975 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class wizar_paGOS_USD_MXN(models.Model):
_name = 'wizar.usd.mxn'
name = fields.Char(string="name")
type = fields.Selection([('out_invoice','Factura de Cliente'), ('in_invoice','Factura de Proveedor')],string="Tipo de Factura")
@api.multi
def pagos(self):
self.ensure_one()
if self.type :
dom =[]
title_sel = ""
if self.type == 'out_invoice':
dom = [('type','=','out_invoice')]
title_sel = ' por Cliente'
elif self.type == 'in_invoice':
dom = [('type', '=' , 'in_invoice' )]
title_sel = " por Proveedor"
else:
dom = []
title_sel = ' General '
tree_view_id = self.env.ref('payments_usd_mxn.id_view_historial_pagos_tree').id
action = {
'type': 'ir.actions.act_window',
'views': [(tree_view_id, 'tree')],
'view_mode': 'tree',
'name': ('Informe ' + str(title_sel)),
'res_model': 'account.invoice',
'domain': dom
}
return action
class camposNuevos(models.Model):
_inherit = 'account.invoice'
importe_mxn = fields.Monetary(string="Total MXN", compute="_funcionusd")
importe_usd = fields.Monetary(string ="Total USD", compute="_funcionusd")
mxn = fields.Monetary(string="Pagar MXN", compute="_funcion")
usd = fields.Monetary(string="Pagar USD", compute="_funcion")
@api.one
def _funcion(self):
if self.residual:
self.usd = self.env['res.currency']._compute(self.currency_id, self.env['res.currency'].search([('name','=','USD')], limit=1), self.residual)
self.mxn = self.env['res.currency']._compute(self.currency_id, self.env['res.currency'].search([('name','=','MXN')], limit=1), self.residual)
@api.one
def _funcionusd(self):
if self.amount_total:
self.importe_usd = self.env['res.currency']._compute(self.currency_id, self.env['res.currency'].search([('name','=','USD')], limit=1), self.amount_total)
self.importe_mxn = self.env['res.currency']._compute(self.currency_id, self.env['res.currency'].search([('name','=','MXN')], limit=1), self.amount_total)
| [
"angelchicles97@gmail.com"
] | angelchicles97@gmail.com |
5e7eae6b648b87e1195f66e8de1baf28ed5cc3b4 | 176088b355fd48f89aa377d1358bc54fd5d9d35d | /backend/task_category/migrations/0001_initial.py | 9093194c12138f4db006dc787f9880e94c74f40c | [] | no_license | crowdbotics-apps/fashion-by-genesis-18024 | bbf2c78adaefcaf5297b208a23d291ec8c7b0f0f | a725add80913c3ecb4f9e049baa3c78c8de3ffbd | refs/heads/master | 2022-10-26T19:09:33.359374 | 2020-06-11T18:21:20 | 2020-06-11T18:21:20 | 271,617,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | # Generated by Django 2.2.13 on 2020-06-11 18:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('description', models.TextField(blank=True, null=True)),
('is_recurring', models.BooleanField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Subcategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subcategory_category', to='task_category.Category')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
5b93c2a71b7fe9860423932a99487ea380b7ad1b | e7307703a08ccdc0615bfa3b7a963a2ba2e9e732 | /bots/courses_bot/data_models/student_profile.py | 8f2b9d06af726a5cb4e8919a976c563d36878473 | [] | no_license | liyocee/cs_course_bot | 7817c43975c56aeb6edf31d28d9a7f553d107c26 | 93354ade3713293bf31a494a75bd11c3229814a8 | refs/heads/master | 2023-05-24T23:29:34.309303 | 2020-03-15T14:37:15 | 2020-03-15T14:37:15 | 246,835,877 | 0 | 0 | null | 2023-05-22T22:42:22 | 2020-03-12T13:03:32 | Python | UTF-8 | Python | false | false | 707 | py | from enum import Enum
from typing import Optional
from botbuilder.schema import Attachment
from .course_unit import CourseUnit
class StudentProfile:
def __init__(
self,
name: str = None,
admission_number: str = None,
course_unit: CourseUnit = None,
picture: Attachment = None
):
self.name: Optional[str] = name
self.admission_number: Optional[str] = admission_number
self.course_unit: Optional[CourseUnit] = course_unit
self.picture: Optional[Attachment] = picture
class StudentProfileAttributes(Enum):
NAME = "name"
ADMISSION_NUMBER = "admission_number"
COURSE_UNIT = "course_unit"
PICTURE = "picture"
| [
"collinskivale@gmail.com"
] | collinskivale@gmail.com |
e6bd33f51f7f97a316e684b04836d4e3d5974f4b | 9148efb07cb949e686b8c1017460526b74c16319 | /actions/utils.py | fba66c004d23b9f68027aa3f3bfc12c60a15a875 | [] | no_license | kangsgo/pineapple | af5277144395135bc018552bcef2237a8c1cd011 | d6f95eb1cf3cc30d97157a9b6fe35ad1889f6a82 | refs/heads/master | 2021-01-12T15:52:03.496417 | 2016-10-06T11:47:59 | 2016-10-06T11:47:59 | 70,171,425 | 1 | 0 | null | 2016-10-06T16:20:18 | 2016-10-06T16:20:18 | null | UTF-8 | Python | false | false | 805 | py | import datetime
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from .models import Action
def create_action(user, verb, target=None):
now = timezone.now()
last_minute = now - datetime.timedelta(seconds=120)
similar_actions = Action.objects.filter(user_id=user.id,
verb=verb,
created__gte=last_minute)
if target:
target_ct = ContentType.objects.get_for_model(target)
similar_actions = similar_actions.filter(target_ct=target_ct,
target_id=target.id)
if not similar_actions:
action = Action(user=user, verb=verb, target=target)
action.save()
return True
return False
| [
"tonnie.lwt@gmail.com"
] | tonnie.lwt@gmail.com |
c66564b00ca526b4d15eb6f39a4705b4258e03d0 | 54827505fac36a02c2f97f140d79136be8a34faa | /Beakjun/1012/1012.py | 79b09b34b6ed3f9380c1d7d56f0b615ad852d207 | [] | no_license | Malgus1995/Algorithm_strudy | 508907104803d9e18c35bf6d331eccda8e13b5b2 | 1fd16179adea91ad947359d4fabb49a9b730cb1b | refs/heads/master | 2020-06-13T03:29:57.749048 | 2020-01-20T13:04:04 | 2020-01-20T13:04:04 | 194,518,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | import sys
sys.setrecursionlimit(10**6)
farm_count = int(input())
farm_list =[]
tuple_index_lst = []
for ch in range(farm_count):
wid_hei_cabb = input()
wid_hei_cabb= wid_hei_cabb.split(' ')
wid_hei_cabb = [ int(ele) for ele in wid_hei_cabb ]
cabb_count = wid_hei_cabb[2]
farm = []
for a in range(wid_hei_cabb[0]+2):
farm.append([0]*(wid_hei_cabb[1]+2))
for i in range(cabb_count):
tmp_index = input()
tmp_index = tmp_index.split(' ')
tmp_index =[int(ele) for ele in tmp_index]
farm[tmp_index[0]+1][tmp_index[1]+1] = 1
farm_list.append(farm)
white_bug_count =0;
def find_bfs_cabbage(i,j,farm):
if(farm[i+1][j]==1):
farm[i+1][j]=-1
find_bfs_cabbage(i+1,j,farm)
if(farm[i-1][j]==1):
farm[i-1][j]=-1
find_bfs_cabbage(i-1,j,farm)
if(farm[i][j+1]==1):
farm[i][j+1]=-1
find_bfs_cabbage(i,j+1,farm)
if(farm[i][j-1]==1):
farm[i][j-1]=-1
find_bfs_cabbage(i,j-1,farm)
return ;
for farm in farm_list:
white_bug_count =0;
for i,ilst in enumerate(farm):
for j,ele in enumerate(ilst):
if(ele==1):
farm[i][j]=-1
white_bug_count=white_bug_count+1
find_bfs_cabbage(i,j,farm)
print(white_bug_count)
"""
"""
| [
"xrjseka615@gmail.com"
] | xrjseka615@gmail.com |
46369b83cc40c5f1da5508b337758b74c8e74918 | 35edc8e37d4beec178cd6af6287fcd72502f74b4 | /Air_Hockey/airhockey.py | 52d897b277d6fc6edf4ba1cf05e4499fd7916253 | [] | no_license | sivaramprasanth/air-hockey | 167d48583b68f26c225addd55498929acbd88f8f | 3e4f7066b451d68a63eb12c8810dcc7b5259472e | refs/heads/master | 2020-03-12T08:53:13.470891 | 2019-10-25T18:11:40 | 2019-10-25T18:11:40 | 130,538,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,675 | py | import pygame,sys
import module1 as P2
from pygame.locals import*
import math
import time
def run():
pygame.init()
clock = pygame.time.Clock()
WHITE = (255,255,255)
BLUE = (56,142,142)
black = (0,0,0)
screen = pygame.display.set_mode((800,700))
pygame.display.set_caption('Air Hockey')
score = 0
#pygame.mixer.music.load('collision.wav')
def distance(x1,x2,y1,y2):
return math.hypot(x2 - x1, y2 - y1)
#A class to create the background.
class Background(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
# A class to create the paddles.
class Paddle(pygame.sprite.Sprite):
def __init__(self,image_file,location):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.centerx,self.rect.centery = location
self.dx = 1 #self.score = 0
self.dy= 1
#Functions to assign keys to handle the paddle.
def Handle_keys_1(self):
key = pygame.key.get_pressed()
distance = 4
if key[pygame.K_DOWN]:
self.rect.centery += distance
elif key[pygame.K_UP]:
self.rect.centery -= distance
elif key[pygame.K_RIGHT]:
self.rect.centerx += distance
elif key[pygame.K_LEFT]:
self.rect.centerx -= distance
def Handle_keys_2(self):
key = pygame.key.get_pressed()
distance = 4
if key[pygame.K_s]:
self.rect.centery += distance
elif key[pygame.K_w]:
self.rect.centery -= distance
elif key[pygame.K_d]:
self.rect.centerx += distance
elif key[pygame.K_a]:
self.rect.centerx -= distance
# Function to make sure the paddle doesn't move out of the screen.
def Restrict_bound_1(self):
if self.rect.centerx<0 + 20 + 25:
self.rect.centerx = 0 + 20 + 25
if self.rect.centerx>500 - 60 - 25 :
self.rect.centerx = 500 - 60 - 25
if self.rect.centery<350 + 25 :
self.rect.centery = 350 + 25
if self.rect.centery>700 - 20 -25 :
self.rect.centery = 700 - 20 -25
def Restrict_bound_2(self):
if self.rect.centerx<0 + 20 + 25:
self.rect.centerx = 0 + 20 + 25
if self.rect.centerx>500 - 60 - 25 :
self.rect.centerx = 500 - 60 - 25
if self.rect.centery>350 - 25 :
self.rect.centery = 350 - 25
if self.rect.centery<0 + 20 + 25 :
self.rect.centery = 0 + 20 + 25
# A class to create the puck.
class Puck(pygame.sprite.Sprite):
def __init__(self,image_file,location,change_y = 10 , change_x = 10):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.centerx,self.rect.centery = location
self.dx = 0
self.dy = 4
self.score_1 = 0
self.score_2 = 0
self.score_comp_1 = 0
self.score_comp_2 = 0
#Function to make sure the puck doesn't move out of the screen.
def update_puck(self):
if self.rect.centerx < 0 + 20 +15 :
#pygame.mixer.music.play()
self.rect.centerx = 35
self.dx *= -1
elif self.rect.centerx > 500 - 15 - 60 :
#pygame.mixer.music.play()
self.rect.centerx = 500 - 15 - 60
self.dx *= -1
if self.rect.centery < 0 + 20 + 15 :
#pygame.mixer.music.play()
self.rect.centery = 35
self.dy *= -1
elif self.rect.centery > 700 - 15 - 18 :
#pygame.mixer.music.play()
self.rect.centery = 700 - 15 -20
self.dy *= -1
#Limiting the speed of the puck.
def limit_speed(self):
if self.dx > 9 :
self.dx = 9
if self.dx <-9 :
self.dx = -8
if self.dy > 9 :
self.dy = 9
if self.dy <-9 :
self.dy = -9
def reset(self):
if(145 < self.rect.centerx < 320):
if(self.rect.centery <= 12 or self.rect.centery >= 685 ):
self.rect.centerx = 250
self.rect.centery = 350
self.dy = 4
self.dx = 0
# Keeping track of the players' score.
def player_score(self):
if(158 < self.rect.centerx < 325):
if(self.rect.centery <= 34 ):
self.rect.centerx = 250
self.rect.centery = 350
self.score_1 += 1
self.dy = 3
self.dx = 0
time.sleep(3)
if(self.rect.centery >= 670):
self.rect.centery = 250
self.rect.centerx = 348
self.score_2 += 1
self.dy = 3
self.dx = 0
time.sleep(3)
#Displaying the scores.
def score_message(score_1,score_2):
P2.text_button("PLAYER_1",black,500,150,300,40,size = "small")
P2.text_button(score_1,black,500,180,300,40,size = "small")
P2.text_button("PLAYER_2",black,500,400,300,40,size = "small")
P2.text_button(score_2,black,500,440,300,40,size = "small")
P2.message_to_screen(score_1,black,[500,150])
P2.message_to_screen(score_2,black,[500,400])
# Function to check collision between two sprites.
def Check_collision(sprite1,sprite2):
if(distance(sprite1.rect.centerx,sprite2.rect.centerx,sprite1.rect.centery,sprite2.rect.centery) <= 50):
return True
return False
BackGround = Background('./images/board_1.png', [0,0]) #here is also changed
paddle1 = Paddle('./images/paddle1.png',[250,525])
paddle2 = Paddle('./images/paddle2.png',[250,175])
puck = Puck('./images/puck.png',[250,350])
game_run = True
while game_run is True :
for event in pygame.event.get():
if event.type == pygame.QUIT :
pygame.quit()
sys.exit()
screen.fill(BLUE)
screen.blit(BackGround.image, BackGround.rect)
paddle1.Handle_keys_1()
screen.blit(paddle1.image, paddle1.rect)
paddle1.Restrict_bound_1()
paddle2.Handle_keys_2()
screen.blit(paddle2.image,paddle2.rect)
paddle2.Restrict_bound_2()
screen.blit(puck.image,puck.rect)
puck.score_comp_1 = puck.score_1
puck.score_comp_2 = puck.score_2
if Check_collision(puck,paddle1) :
#pygame.mixer.music.play()
puck.dx = -1 * puck.dx
puck.dy = -1 * puck.dy
puck.rect.centerx += puck.dx
puck.rect.centery += puck.dy
puck.rect.centery += puck.dy
puck.rect.centerx += puck.dx
puck.rect.centery += puck.dy
puck.rect.centerx += puck.dx
time.sleep(0.03)
paddle1.rect.centery += 5
if Check_collision(puck,paddle2) :
#pygame.mixer.music.play()
puck.dx = -1*puck.dx + paddle2.dx
puck.dy = -1*puck.dy + paddle2.dy
puck.rect.centerx += puck.dx
puck.rect.centery += puck.dy
puck.rect.centery += puck.dy
puck.rect.centerx += puck.dx
puck.rect.centery += puck.dy
puck.rect.centerx += puck.dx
time.sleep(0.03)
paddle2.rect.centery -= 5
puck.limit_speed()
puck.player_score()
if(puck.score_2 == 10):
P2.text_button("PLAYER_2 WON",(176,23,31),500,290,300,40,size = "small")
P2.message_to_screen("game ",(255,0,0),[70,300])
P2.message_to_screen("over ",(255,255,0),[250,300])
game_run = False
if(puck.score_1 == 10):
P2.text_button("PLAYER_1 WON",(0,0,128),475,290,300,40,size = "small")
P2.message_to_screen("game ",(0,255,255),[70,300])
P2.message_to_screen("over ",(0,255,0),[250,300])
game_run = False
puck.update_puck()
puck.rect.centery += puck.dy
puck.rect.centerx += puck.dx
score_message(str(puck.score_1),str(puck.score_2))
pygame.display.update()
clock.tick(60)
pygame.display.update()
time.sleep(2)
quit()
if(__name__ == "__main__"):
run()
| [
"sivaramprasanth81@gmail.com"
] | sivaramprasanth81@gmail.com |
3b149f6e85546f839ae7d4b5a10d6be1ce92703c | b525e6b11658fc0273f1606d51748596ad8d5803 | /project/sharemarkdown/views/DocumentView.py | 87046697fe2acaa2ec8d2abe2467f793afe33282 | [] | no_license | sharemarkdown/sharemarkdown | 5740d78ee585be4124ff9199627031e316553ba4 | ed44380d598506768203d468c3cea3f5bdd76fa4 | refs/heads/master | 2020-03-08T07:22:51.073447 | 2018-05-01T13:56:34 | 2018-05-01T13:56:34 | 127,993,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,356 | py | from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from sharemarkdown.models import Document
from sharemarkdown.serializers import DocumentSerializer
class ListCreateDocument(generics.ListCreateAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = DocumentSerializer
model = Document
def create(self, request, *args, **kwargs):
query_dict = request.data.copy()
query_dict['owner'] = request.user.id
doc_serializer = DocumentSerializer(data=query_dict)
if doc_serializer.is_valid(raise_exception=True):
doc_serializer.save()
return Response(doc_serializer.data, status=201)
def get_queryset(self):
user = self.request.user
return Document.objects.filter(owner=user)
class GetUpdateDeleteDocument(generics.RetrieveUpdateDestroyAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = DocumentSerializer
model = Document
def get_object(self, edit_right=False):
obj = get_object_or_404(Document, **self.kwargs)
if edit_right:
if self.request.user not in obj.editors.all():
raise PermissionDenied(
detail='You do not have permission')
else:
if obj.owner.id != self.request.user.id:
raise PermissionDenied(
detail='You do not have permission')
return obj
def retrieve(self, request, *args, **kwargs):
old_doc = self.get_object(edit_right=True)
return Response(DocumentSerializer(old_doc).data, status=200)
def update(self, request, *args, **kwargs):
old_doc = self.get_object(edit_right=True)
query_dict = request.data.copy()
query_dict['owner'] = request.user.id
doc_serializer = DocumentSerializer(old_doc, data=query_dict)
if doc_serializer.is_valid(raise_exception=True):
doc_serializer.save()
return Response(doc_serializer.data, status=200)
| [
"paul841029@gmail.com"
] | paul841029@gmail.com |
d2b884f6075045816f2e691f37472ed561d74f07 | 1a53bb875c909436fabcf1b59e4a947a2a82f8af | /IBK/fileCompare_bl.py | e869ecbc5fc4f530c2ec4b2af3b38fbfc54856a1 | [] | no_license | bullseye73/python | e1175d908bdd946ea66b1d4cb29c003a6f66a153 | db55a34f6a21bfdb4d50b75a68b420a1e5663978 | refs/heads/master | 2022-10-06T14:47:31.525172 | 2022-09-26T12:29:11 | 2022-09-26T12:29:11 | 188,925,580 | 2 | 0 | null | 2019-10-08T08:01:15 | 2019-05-28T00:29:16 | Python | UTF-8 | Python | false | false | 6,321 | py | #-*- coding: utf-8 -*-
import os, codecs
import openpyxl
import sys, re, time
from libs import diff_match_patch as dmp_module
import numpy as np
'''
def makeFileName (str):
fn = re.sub('[-=.#/?:$}]', '', str)
now = time.localtime()
s = "%04d%02d%02d_%02d%02d%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return fn + "_" + s + ".xlsx"
'''
def makeFileName_ex (str):
return str + ".xlsx"
'''
def search(dir):
files = os.listdir(dir)
workbook = openpyxl.Workbook()
workbook.remove(workbook['Sheet'])
for file in files:
fullFilename = os.path.join(dir, file)
print("is file : {0} ".format(fullFilename))
fname, ext = os.path.splitext(file)
worksheet = workbook.create_sheet(fname)
readTxtFile(worksheet, fullFilename)
workbook.save(makeFileName_ex(fname))
workbook.close()
'''
def readXlsxFile(oriPath):
wb = openpyxl.load_workbook(oriPath)
ws = wb.active
rows = ws.rows
values = []
for row in rows:
cv = []
for cell in row:
cv.append(cell.value)
values.append(cv)
wb.close()
return values
def compareData(org, ret):
result = list()
for i in range(len(org)):
if org[i] == None and ret[i] == None:
#print('[{0}][{1}][{2}]'.format(i, org[i], ret[i]))
result.append(0)
else:
# 인식률(Recognition rate) 값
#print('[{0}][{1}]'.format(org[i], ret[i]))
result.append(recognitionRate(ret[i], org[i])) # (OCR 인식결과, 정답셋 글자수)
return result
def recognitionRate(str1, str2):
text1 = str(str1)
text2 = str(str2)
if str1 == None or str2 == None:
return 0
diffCount = countDiff(text1, text2)
return diffCount
def countDiff(text1, text2):
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 0.0
diff = dmp.diff_main(text1, text2, False)
# similarity
common_text = sum([len(txt) for op, txt in diff if op == 0])
text_length = max(len(text1), len(text2))
sim = common_text / text_length
return sim * 100
def averageRecRate(ls):
avrColSum = list()
lRow = list()
arrData = np.array(ls)
totalCol = len(arrData[0])
totalRow = len(arrData)
for i in range(totalCol):
lRow.clear()
for j in range(totalRow):
lRow.append(float(arrData[j][i]))
#print("[{0}][{1}][{2}]".format(i, totalRow, arrData[j][i]))
avrColSum.append(sum(lRow)/totalRow)
#print(avrColSum)
return avrColSum
def writeCompareResult(xlsxData, txtData, retFileName):
if len(xlsxData) != len(txtData):
print("Can not compare.")
return
workbook = openpyxl.Workbook()
workbook.remove(workbook['Sheet'])
worksheet = workbook.create_sheet(retFileName)
nRows = len(xlsxData)
comData = list()
colAvr = list()
#print('[{0}][{1}]'.format(len(txtData), len(txtData[0])))
for i in range(nRows):
if i == 0:
worksheet.append(xlsxData[i])
continue
else:
comData = compareData(xlsxData[i], txtData[i])
#new_line = list(map(float, comData))
avr = sum(comData)/len(comData)
#print("[{0}]".format(avr))
#comData.extend(avr)
comData.append(avr)
worksheet.append(txtData[i])
worksheet.append(xlsxData[i])
worksheet.append(comData)
colAvr.append(comData)
#averageRecRate(colAvr)
worksheet.append(averageRecRate(colAvr))
os.remove(retFileName)
workbook.save('ret_'+ retFileName)
workbook.close()
'''
def readTxtFile(ws, fn):
#strType = fn.split('_', 1)
with codecs.open(fn, 'r', encoding="utf-8-sig") as f:
row = 1 #cell의 row, col의 값이 1부터임. 0이면 오류
col = 1
excepts = ['category', 'category number'] # 사용하지 않는 column
for line in f:
r = line.replace('\r\n', '').strip()
rData = r.replace('"', '').split(',', 1)
rlen = len(rData)
if rlen <= 1:
continue
if 'set name' in rData[0].lower():
row += 1
col = 1
elif rData[0].lower() in excepts:
continue
else:
val = rData[rlen - 1].replace(",", "")
if row == 2:
ws.cell(row=row-1, column=col).value = rData[0]
ws.cell(row, col).value = val
col += 1
'''
def readTxtFile_ex(fn):
workbook = openpyxl.Workbook()
workbook.remove(workbook['Sheet'])
fi = os.path.split(fn)
fname, ext = os.path.splitext(fi[1])
worksheet = workbook.create_sheet(fname)
#strType = fn.split('_', 1)
with codecs.open(fn, 'r', encoding="utf-8-sig") as f:
row = 1 #cell의 row, col의 값이 1부터임. 0이면 오류
col = 1
excepts = ['category', 'category number'] # 사용하지 않는 column
for line in f:
r = line.replace('\r\n', '').strip()
rData = r.replace('"', '').split(',', 1)
rlen = len(rData)
if rlen <= 1:
continue
if 'set name' in rData[0].lower():
row += 1
col = 1
elif rData[0].lower() in excepts:
continue
else:
#val = rData[rlen - 1].replace(",", "")
val = rData[rlen - 1]
if row == 2:
worksheet.cell(row=row-1, column=col).value = rData[0]
worksheet.cell(row, col).value = val
col += 1
workbook.save(makeFileName_ex(fname))
workbook.close()
def Usage():
print ("Usage: input 2 file fullpath \n python fileCompare_bl.py [result txt fullpath name] [xlsx file fullpath name]")
def main():
if len(sys.argv) != 3:
Usage()
sys.exit()
fi = os.path.split(sys.argv[1])
fname, ext = os.path.splitext(fi[1])
readTxtFile_ex(sys.argv[1])
txtRet = readXlsxFile(makeFileName_ex(fname))
xlsxRet = readXlsxFile(sys.argv[2])
writeCompareResult(xlsxRet, txtRet, makeFileName_ex(fname))
if __name__ == '__main__':
main() | [
"bullseye73@gmail.com"
] | bullseye73@gmail.com |
e9ecb75365952dcfdbd0a1230288c1560299d85e | 02b45eafdcac782cb487195860b7df8635d2dbe4 | /Python/src/Aggregated poblem/two_sided_two_period.py | 9a9621bb77f00e7f5a73ccf29b8c5ea8175245c1 | [] | no_license | iriosu/Assortment | a3146118410a8a8684734a3da602c12ffdfea082 | 32b234b3ac8a2f75bd8c2719e14b3f1a1a269db6 | refs/heads/master | 2020-04-16T06:09:20.280085 | 2019-04-13T16:48:22 | 2019-04-13T16:48:22 | 165,334,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,571 | py | from gurobipy import *
import sys, os
import numpy, math
import itertools
import copy
numpy.random.seed(1)
import subprocess, multiprocessing, time, gc, json
import matplotlib.pyplot as plt
# ==============================
# AUXILIAR FUNCTIONS
# ==============================
def CreatePairsMapping(n):
all_pairings = []
for i in range(n):
for j in range(n):
all_pairings.append([i,j])
mp = {}
for i in range(len(all_pairings)):
mp[all_pairings[i][0],all_pairings[i][1]] = i
DI = []
DJ = []
for i in range(n):
DI.append([int(pair[0] == i) for pair in all_pairings])
DJ.append([int(pair[1] == i) for pair in all_pairings])
DI = numpy.array(DI)
DJ = numpy.array(DJ)
return DI, DJ, mp, all_pairings
def CreateInputs(n,m,delta,boo_m=True):
def phi(i, A, values, beta=1):
# Assume uniform distribution for outside option
if i not in A:
return 0
else:
return max(min(values[i] + beta*sum([values[i]-values[j] for j in A]),1),0)
U = sorted(list(numpy.random.uniform(0,1,n)), reverse=True)
U = [round(u,3) for u in U]
V = sorted(list(numpy.random.uniform(0,1,n)), reverse=True)
V = [round(v,3) for v in V]
S = []
if boo_m:
for comb in itertools.combinations(range(n), m):
S.append(comb)
else:
for mm in range(1,m+1):
for comb in itertools.combinations(range(n), mm):
S.append(comb)
d, pI, pJ = [], [], []
for i in range(n):
d.append([int(i in comb) for comb in S]) # mapping of users to assortments
pI.append([phi(i, comb, V, delta) for comb in S]) # probabilities that users on side J are liked
pJ.append([phi(i, comb, U, delta) for comb in S]) # probabilities that users on side I are liked
DI, DJ, mp, all_pairings = CreatePairsMapping(n)
return DI, DJ, mp, S, d, pI, pJ, all_pairings, U, V
def ConstructBacklogs(n,m,ls, out,boo_m):
if len(ls) == n and sum(ls) <= n-m*(t-1) and boo_m: # assumes we must show at least one profile per user
out.append(copy.copy(ls))
elif len(ls) == n and sum(ls) <= n-(t-1) and not boo_m: # assumes we must show at least one profile per user
out.append(copy.copy(ls))
elif len(ls) >= n:
pass
else:
for j in range(2):
ls.append(j)
ConstructBacklogs(n,m,ls, out, boo_m)
ls.pop()
def FeasibleBacklogsInd(n,m,t,boo_m=True):
out, ls = [], []
for i in range(2):
ls.append(i)
ConstructBacklogs(n,m,ls, out,boo_m)
ls.pop()
return out
def FeasibleBacklogs(n,m,t,pairs,boo_m=True):
def MergeBacklogs(n,m,ls, out):
if len(ls) == n:
if sum([sum([ls[i][j] for i in range(n)])<=m for j in range(n)]) == n:
out.append(copy.copy(ls))
else:
for l1 in inp:
ls.append(l1)
MergeBacklogs(n,m,ls, out)
ls.pop()
inp = FeasibleBacklogsInd(n,m,t,boo_m)
jaux = []
lst = []
for l in inp:
lst.append(l)
MergeBacklogs(n,m,lst, jaux)
lst.pop()
jout = []
for b in jaux:
# first level, scenario; second level, side of market;
# third level, specific i in I (or j in J; forth level, specific j for i
for b1 in jaux:
if sum([b[pairs[p][0]][pairs[p][1]] + b1[pairs[p][1]][pairs[p][0]] <= 1 for p in range(len(pairs))]) == len(pairs):
jout.append([b,b1])
return jout
def ConstructPairings(n,m,ls,out, t,DI,DJ, boo_m):
if len(ls) == n**2 and sum(DI.dot(ls)<= n-m*(t-1)) == n and sum(DJ.dot(ls)<= n-m*(t-1)) == n and boo_m:
out.append(copy.copy(ls))
elif len(ls) == n**2 and sum(DI.dot(ls)<= n-(t-1)) == n and sum(DJ.dot(ls)<= n-(t-1)) == n and not boo_m:
out.append(copy.copy(ls))
elif len(ls) >= n**2:
pass
else:
for j in range(2):
ls.append(j)
ConstructPairings(n,m,ls,out, t,DI,DJ, boo_m)
ls.pop()
def FeasiblePairings(n,m,t,DI,DJ,boo_m=True):
out, ls = [], []
for i in range(2):
ls.append(i)
ConstructPairings(n,m,ls,out,t,DI,DJ,boo_m)
ls.pop()
return out
def BuildModelParallel(data):
# init model
n, Bs, Ps, S, d, prI, prJ, mp, ct, outdir = data[0],data[1],data[2],data[3],data[4],\
data[5],data[6],data[7], data[8],data[9]
nS = len(S)
model = Model('mip1')
xI,xJ,Z = {},{},{}
for i in range(n):
for s in range(nS):
xI[i,s] = model.addVar(vtype=GRB.CONTINUOUS, name='xI'+str(i)+str(s))
for j in range(n):
for s in range(nS):
xJ[j,s] = model.addVar(vtype=GRB.CONTINUOUS, name='xJ'+str(j)+str(s))
for i in range(n):
for j in range(n):
for r in range(nS):
for s in range(nS):
Z[i,j,r,s] = model.addVar(vtype=GRB.CONTINUOUS, name='Z'+str(i)+str(j)+str(r)+str(s))
# =====================
# add constraints
# =====================
# if users are not in potentials nor in backlog they cannot be shown
for i in range(n):
for j in range(n):
model.addConstr(sum([xI[i,s] * d[j][s] for s in range(nS)]) <= Bs[0][i][j] + Ps[mp[i,j]], "PI"+str(i)+str(j))
for j in range(n):
for i in range(n):
model.addConstr(sum([xJ[j,s] * d[i][s] for s in range(nS)]) <= Bs[1][j][i] + Ps[mp[i,j]], "PJ"+str(i)+str(j))
# only one assortment per period
for i in range(n):
model.addConstr(sum([xI[i,s] for s in range(nS)]) <= 1, "OAI"+str(i))
for j in range(n):
model.addConstr(sum([xJ[j,s] for s in range(nS)]) <= 1, "OAJ"+str(j))
# simultaneous shows
for i in range(n):
for j in range(n):
for r in range(nS):
for s in range(nS):
model.addConstr(Z[i,j,r,s] <= xI[i,r], "MSI"+str(i)+str(j)+str(r)+str(s))
model.addConstr(Z[i,j,r,s] <= xJ[j,s], "MSJ"+str(i)+str(j)+str(r)+str(s))
# Set objective
model.setObjective(sum([xI[i,r] * d[j][r] * Bs[0][i][j] * prI[j][r] for i in range(n) for j in range(n) for r in range(nS)]) \
+ sum([xJ[j,s] * d[i][s] * Bs[1][j][i] * prJ[i][s] for i in range(n) for j in range(n) for s in range(nS)]) \
+ sum([Z[i,j,r,s]* d[j][r] * d[i][s] * prI[j][r] * prJ[i][s] for i in range(n) for j in range(n) for r in range(nS) for s in range(nS)]) \
, GRB.MAXIMIZE)
model.optimize()
obj = model.objVal
xI_opt = {}
xJ_opt = {}
for v in model.getVars():
if v.varName[0] == 'x':
key = v.varName[2:]
if 'xI' in v.varName:
xI_opt[key] = v.x
else:
xJ_opt[key] = v.x
else:
pass
scn_data = {'P':Ps, 'B':Bs, 'ct':ct, 'obj':obj, 'xI': xI_opt, 'xJ': xJ_opt}
with open(os.path.join(outdir, 'scenario_s='+str(ct)+'.txt'), 'w') as outfile:
json.dump(scn_data, outfile)
def FeasibleSolutions(n,m,t,D,boo_m):
out, ls = [], []
for i in range(2):
ls.append(i)
ConstructSolution(n,m,ls, out,D, boo_m)
ls.pop()
return out
def ConstructSolution(n,m,ls,out, D, boo_m):
# i.e. every user is offered m profiles
if len(ls) == n**2 and sum(D.dot(ls)== m) == n and boo_m:
out.append(copy.copy(ls))
# i.e. every user is offered at most m profiles, and at least 1
elif len(ls) == n**2 and sum(D.dot(ls)<= m) == n and sum(D.dot(ls)>= 1) == n and not boo_m:
out.append(copy.copy(ls))
elif len(ls) >= n**2:
pass
else:
for j in range(2):
ls.append(j)
ConstructSolution(n,m,ls,out, D, boo_m)
ls.pop()
def GetVars(xi, xj, n, S):
SI = {i:[sum([(xi[n*i+j] == 0 and j not in s) or (xi[n*i+j] == 1 and j in s) for j in range(n)]) == n\
for s in S] for i in range(n)}
SJ = {j:[sum([(xj[n*i+j] == 0 and i not in s) or (xj[n*i+j] == 1 and i in s) for i in range(n)]) == n\
for s in S] for j in range(n)}
return SI, SJ
def ComputeTransitionProb(xIS,xJS,prI,prJ,Bsn,n,nS,d):
probI=1
probJ=1
for i in range(n):
for r in range(nS):
if xIS[i][r] == 0:
continue
for j in range(n):
if Bsn[1][j][i] == 1 and xIS[i][r]*d[j][r] == 0:
return 0 # cannot be in backlog if does not see
elif Bsn[1][j][i] == 1 and sum([d[i][s]*xJS[j][s] for s in range(nS)]) == 1:
return 0 # cannot be in backlog and being seen in same period
elif Bsn[1][j][i] == 0 and xIS[i][r]*d[j][r] == 0:
probI*=1 # no update if not shown and backlog compatible
elif sum([d[i][s]*xJS[j][s] for s in range(nS)]) == 1:
probI*=1 # simultaneous shows
else: # no simultaneous shows
probI*=math.pow(prI[j][r], Bsn[1][j][i]) * math.pow(1-prI[j][r], 1-Bsn[1][j][i])
for j in range(n):
for s in range(nS):
if xJS[j][s] == 0:
continue
for i in range(n):
if Bsn[0][i][j] == 1 and xJS[j][s]*d[i][s] == 0:
return 0
elif Bsn[0][i][j] == 1 and sum([d[j][r]*xIS[i][r] for r in range(nS)]) == 1:
return 0
elif Bsn[0][i][j] == 0 and xJS[j][s]*d[i][s] == 0:
probJ*=1
elif sum([d[j][r]*xIS[i][r] for r in range(nS)]) == 1:
probJ*=1
else:
probJ*=math.pow(prJ[i][s], Bsn[0][i][j]) * math.pow(1-prJ[i][s], 1-Bsn[0][i][j])
prob = probI*probJ
return prob
# ==============================
# MAIN FUNCTIONS PER STAGE
# ==============================
def SolveSecondStageParallel(n,m,t,delta,outdir='aux', boo_m=True, num_processors=3):
if not os.path.exists(outdir):
os.makedirs(outdir)
DI, DJ, mp, S, d, pI, pJ, pairs, U, V = CreateInputs(n,m,delta,False) # these are inputs for second stage model
setup_data = {'n':n, 'm':m, 't':t, 'delta':delta,'S':S,'d':d,'pI':pI,'pJ':pJ,'U':U,'V':V}
with open(os.path.join(outdir, 'setup.txt'), 'w') as outfile:
json.dump(setup_data, outfile)
# feasible backlogs and potentials in second stage,
# assuming that in first stage everyones is offered m profiles
feasible_backlogs = FeasibleBacklogs(n,m,t,pairs,boo_m)
potential_pairs = FeasiblePairings(n,m,t,DI,DJ, boo_m)
Bs, Ps = {}, {}
ct = 0
indata = []
for fb in feasible_backlogs:
for fp in potential_pairs:
if sum([(fb[0][pairs[p][0]][pairs[p][1]] + fb[1][pairs[p][1]][pairs[p][0]] + fp[p] <= 1) for p in range(len(pairs))]) == len(pairs):
indata.append((n, fb, fp, S, d, pI, pJ, mp, ct, outdir))
ct+=1
np = min(num_processors,multiprocessing.cpu_count())
# We execute our process for each replication with a pool
pool = multiprocessing.Pool(processes=min(np, len(indata)))
pool.map(BuildModelParallel, indata)
pool.close()
pool.join()
def SolveFirstStage(n,m,t,outdir, boo_m=True):
# Create inputs
DI, DJ, mp, pairs = CreatePairsMapping(n)
# Read second stage
dataSS = {}
for filename in os.listdir(outdir):
if filename.endswith(".txt"):
if 'setup' in filename or 'first_stage' in filename:
continue
ct = int(filename.split('=')[1].split('.')[0])
with open(os.path.join(outdir, filename)) as json_file:
dataSS[ct] = json.load(json_file)
else:
continue
# Initialize
B1 = [[[0 for i in range(n)] for j in range(n)] for k in range(2)]
P1 = [1 for i in range(n**2)]
with open(os.path.join(outdir, 'setup.txt')) as json_file:
setup = json.load(json_file)
prI, prJ, S, d = setup['pI'], setup['pJ'], setup['S'], setup['d']
nS = len(S)
V_opt,xI_opt,xJ_opt = 0,0,0
# feasible solutions for first stage
fxI = FeasibleSolutions(n,m,t,DI, boo_m) # True to force to offer assortment of size m
fxJ = FeasibleSolutions(n,m,t,DJ, boo_m) # True to force to offer assortment of size m
for xI in fxI:
for xJ in fxJ:
xIS, xJS = GetVars(xI, xJ, n, S)
mx = [max(xI[k], xJ[k]) for k in range(len(xI))]
P2 = [P1[k] - mx[k] for k in range(len(mx))]
exp_val = sum([xIS[i][r] * xJS[j][s] * d[j][r] * d[i][s] * prI[j][r] * prJ[i][s]\
for i in range(n) for j in range(n) \
for r in range(nS) for s in range(nS)])
cum_prob = 0
for scn in dataSS:
Bsn = dataSS[scn]['B']
Psn = dataSS[scn]['P']
if P2 != Psn: # the probability of this scenario is 0 is P2 != Psn
continue
if sum([B1[0][i][j] == 1 and Bsn[0][i][j] != 1-xI[n*i+j] \
for i in range(n) for j in range(n)])\
!= sum([B1[0][i][j] == 1 for i in range(n) for j in range(n)]):
continue
prob = ComputeTransitionProb(xIS,xJS,prI,prJ,Bsn,n,nS,d)
if prob == 0:
continue
cum_prob += prob
exp_val += dataSS[scn]['obj']*prob
if cum_prob < 1-1e-4:
print('Error: transition probabilities do not add up to 1')
print(xI, xJ)
print(cum_prob)
sys.exit(1)
if exp_val > V_opt:
V_opt = exp_val
xI_opt = xI
xJ_opt = xJ
fs_data = {'obj':V_opt, 'xI': xI_opt, 'xJ': xJ_opt}
with open(os.path.join(outdir, 'first_stage.txt'), 'w') as outfile:
json.dump(fs_data, outfile)
def SolveStages(n,m,t,delta,outdir,boo_m=True,num_processors=3):
# Solve second stage for every scenario
SolveSecondStageParallel(n,m,t,delta,outdir,boo_m,num_processors)
# Solve initial stage by brute force
SolveFirstStage(n,m,t,outdir,boo_m)
if __name__ == '__main__':
n,m,t,delta,bind_m = 3,2,2,0.1,True
num_sim, num_processors = 1, 3
for sim in range(num_sim):
if bind_m:
outdir = 'sim_bind_'+str(sim)
else:
outdir = 'sim_nonbind_'+str(sim)
SolveStages(n,m,t,delta,outdir,bind_m,num_processors)
| [
"ignacio.rios.u@gmail.com"
] | ignacio.rios.u@gmail.com |
c072c513deba039b750e3b8889e19fe875c1bdf6 | 53fca3fa4662a5d04e99445c3baeaaa4169083a1 | /network/gnn08551_final_iter3.py | 0d34db379927cd20545608c0f7c8d3cc6de81609 | [] | no_license | yougoforward/Iter_ParseNet | 223b99aa5eef70b83f2fa5493c26549266a11fe9 | 39367c3332b076678105489ce4a5cf8f250491e5 | refs/heads/master | 2021-07-12T20:49:19.866085 | 2020-09-06T12:32:35 | 2020-09-06T12:32:35 | 200,991,268 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,649 | py | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
from inplace_abn.bn import InPlaceABNSync
from modules.com_mod import Bottleneck, ResGridNet, SEModule
from modules.parse_mod import MagicModule
from modules.senet import se_resnext50_32x4d, se_resnet101, senet154
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
from modules.convGRU import ConvGRU
from modules.dcn import DFConv2d
class Composition(nn.Module):
def __init__(self, hidden_dim, parts=2):
super(Composition, self).__init__()
self.conv_ch = nn.Sequential(
nn.Conv2d(2 * hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.com_att = nn.Sequential(
nn.Conv2d(parts * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0, stride=1, bias=True),
)
def forward(self, xh, xp_list):
com_att = self.com_att(torch.cat(xp_list, dim=1))
xph_message = sum([self.conv_ch(torch.cat([xh, xp * torch.sigmoid(com_att)], dim=1)) for xp in xp_list])
return xph_message, com_att
class Decomposition(nn.Module):
def __init__(self, hidden_dim=10, parts=2):
super(Decomposition, self).__init__()
self.conv_fh = nn.Sequential(
nn.Conv2d(2 * hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.decomp_att = Decomp_att(hidden_dim=hidden_dim, parts=parts)
def forward(self, xf, xh_list):
decomp_att_list, maps = self.decomp_att(xf, xh_list)
decomp_fh_list = [self.conv_fh(torch.cat([xf * decomp_att_list[i+1], xh_list[i]], dim=1)) for i in
range(len(xh_list))]
return decomp_fh_list, decomp_att_list, maps
class Decomp_att(nn.Module):
def __init__(self, hidden_dim=10, parts=2):
super(Decomp_att, self).__init__()
self.conv_fh = nn.Conv2d(hidden_dim, parts+1, kernel_size=1, padding=0, stride=1, bias=True)
self.softmax= nn.Softmax(dim=1)
def forward(self, xf, xh_list):
decomp_map = self.conv_fh(xf)
decomp_att = self.softmax(decomp_map)
decomp_att_list = list(torch.split(decomp_att, 1, dim=1))
return decomp_att_list, decomp_map
def generate_spatial_batch(featmap_H, featmap_W):
import numpy as np
spatial_batch_val = np.zeros((1, featmap_H, featmap_W, 8), dtype=np.float32)
for h in range(featmap_H):
for w in range(featmap_W):
xmin = w / featmap_W * 2 - 1
xmax = (w + 1) / featmap_W * 2 - 1
xctr = (xmin + xmax) / 2
ymin = h / featmap_H * 2 - 1
ymax = (h + 1) / featmap_H * 2 - 1
yctr = (ymin + ymax) / 2
spatial_batch_val[:, h, w, :] = \
[xmin, ymin, xmax, ymax, xctr, yctr, 1 / featmap_W, 1 / featmap_H]
return spatial_batch_val
class Dep_Context(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10):
super(Dep_Context, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.W = nn.Parameter(torch.ones(in_dim, hidden_dim))
self.sigmoid = nn.Sigmoid()
self.coord_fea = torch.from_numpy(generate_spatial_batch(30, 30))
self.maxpool = nn.AdaptiveMaxPool2d(1)
self.project = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.img_conv = nn.Sequential(nn.Conv2d(in_dim + 8, in_dim, kernel_size=1, stride=1, padding=0, bias=True))
self.node_conv = nn.Sequential(nn.Conv2d(hidden_dim + 8, hidden_dim, kernel_size=1, stride=1, padding=0, bias=True))
self.pool = nn.MaxPool2d(kernel_size=(2,2))
def forward(self, p_fea, hu):
n, c, h, w = p_fea.size()
query = self.pool(p_fea)
key = self.pool(hu)
n, c, hp, wp = query.size()
# coord_fea = torch.from_numpy(generate_spatial_batch(n,h,w)).to(p_fea.device).view(n,-1,8) #n,hw,8
coord_fea = self.coord_fea.to(p_fea.device).repeat((n, 1, 1, 1)).permute(0, 3, 1, 2)
project1 = torch.matmul(self.img_conv(torch.cat([query, coord_fea], dim=1)).view(n, -1, hp*wp).permute(0,2,1), self.W) # n,hw,hidden
energy = torch.matmul(project1, self.node_conv(torch.cat([key, coord_fea], dim=1)).view(n, -1, hp*wp)) # n,hw,hw
attention = torch.softmax(energy, dim=-1)
co_context = torch.bmm(key.view(n, -1, hp*wp), attention.permute(0, 2, 1)).view(n, -1, hp, wp)
co_context = self.project(co_context)
co_context =F.interpolate(co_context, (h, w), mode="bilinear", align_corners=True)
return co_context
class Contexture(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10, parts=6, part_list_list=None):
super(Contexture, self).__init__()
self.hidden_dim =hidden_dim
self.F_cont = nn.ModuleList(
[Dep_Context(in_dim, hidden_dim) for i in range(len(part_list_list))])
self.dp_cont = nn.ModuleList(
[nn.Conv2d(in_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=True)
for i in range(len(part_list_list))])
self.parts = parts
self.att_list = nn.ModuleList([nn.Conv2d(hidden_dim, len(part_list_list[i])+ 1, kernel_size=1, padding=0, stride=1, bias=True)
for i in range(len(part_list_list))])
self.context_att_list = nn.ModuleList([nn.Sequential(
nn.Conv2d(hidden_dim, 2, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
) for i in range(len(part_list_list))])
self.softmax = nn.Softmax(dim=1)
def forward(self, xp_list, p_fea, part_list_list):
F_dep_list =[self.F_cont[i](p_fea, xp_list[i]) for i in range(len(xp_list))]
att_list = [self.att_list[i](self.dp_cont[i](p_fea)) for i in range(len(xp_list))]
# context_att_list = [self.context_att_list[i](F_dep_list[i]) for i in range(len(xp_list))]
att_list_list = [list(torch.split(self.softmax(att_list[i]), 1, dim=1)) for i in range(len(xp_list))]
return F_dep_list, att_list_list, att_list
class Part_Dependency(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10):
super(Part_Dependency, self).__init__()
self.R_dep = nn.Sequential(
nn.Conv2d(2*hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
def forward(self, F_dep_hu, hv):
huv = self.R_dep(torch.cat([F_dep_hu, hv], dim=1))
return huv
class conv_Update(nn.Module):
def __init__(self, hidden_dim=10):
super(conv_Update, self).__init__()
self.hidden_dim = hidden_dim
dtype = torch.cuda.FloatTensor
self.update = ConvGRU(input_dim=hidden_dim,
hidden_dim=hidden_dim,
kernel_size=(1, 1),
num_layers=1,
dtype=dtype,
batch_first=True,
bias=True,
return_all_layers=False)
def forward(self, x, message):
_, out = self.update(message.unsqueeze(1), [x])
return out[0][0]
class DecoderModule(nn.Module):
def __init__(self, num_classes):
super(DecoderModule, self).__init__()
self.conv0 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(512), nn.ReLU(inplace=False))
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False))
# self.conv2 = nn.Sequential(nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(48), nn.ReLU(inplace=False))
#
# self.conv3 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(256), nn.ReLU(inplace=False),
# nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(256), nn.ReLU(inplace=False))
# self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
self.alpha = nn.Parameter(torch.ones(1))
def forward(self, xt, xm, xl):
_, _, h, w = xm.size()
xt = self.conv0(F.interpolate(xt, size=(h, w), mode='bilinear', align_corners=True) + self.alpha * xm)
_, _, th, tw = xl.size()
xt_fea = self.conv1(xt)
# xt = F.interpolate(xt_fea, size=(th, tw), mode='bilinear', align_corners=True)
# xl = self.conv2(xl)
# x = torch.cat([xt, xl], dim=1)
# x_fea = self.conv3(x)
# x_seg = self.conv4(x_fea)
return xt_fea
class AlphaHBDecoder(nn.Module):
def __init__(self, hbody_cls):
super(AlphaHBDecoder, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16))
self.alpha_hb = nn.Parameter(torch.ones(1))
def forward(self, x, skip):
_, _, h, w = skip.size()
xup = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
xfuse = xup + self.alpha_hb * skip
xfuse = self.conv1(xfuse)
return xfuse
class AlphaFBDecoder(nn.Module):
def __init__(self, fbody_cls):
super(AlphaFBDecoder, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16))
self.alpha_fb = nn.Parameter(torch.ones(1))
def forward(self, x, skip):
_, _, h, w = skip.size()
xup = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
xfuse = xup + self.alpha_fb * skip
xfuse = self.conv1(xfuse)
return xfuse
class Full_Graph(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10, cls_p=7, cls_h=3, cls_f=2):
super(Full_Graph, self).__init__()
self.hidden = hidden_dim
self.comp_h = Composition(hidden_dim, parts=2)
self.conv_Update = conv_Update(hidden_dim)
def forward(self, xf, xh_list, xp_list):
comp_h, com_map = self.comp_h(xf, xh_list)
xf = self.conv_Update(xf, comp_h)
return xf, com_map
class Half_Graph(nn.Module):
def __init__(self, upper_part_list=[1, 2, 3, 4], lower_part_list=[5, 6], in_dim=256, hidden_dim=10, cls_p=7,
cls_h=3, cls_f=2):
super(Half_Graph, self).__init__()
self.cls_h = cls_h
self.upper_part_list = upper_part_list
self.lower_part_list = lower_part_list
self.upper_parts_len = len(upper_part_list)
self.lower_parts_len = len(lower_part_list)
self.hidden = hidden_dim
self.decomp_fh_list = Decomposition(hidden_dim, parts=2)
self.comp_u = Composition(hidden_dim, parts=len(upper_part_list))
self.comp_l = Composition(hidden_dim, parts=len(lower_part_list))
self.update_u = conv_Update(hidden_dim)
self.update_l = conv_Update(hidden_dim)
def forward(self, xf, xh_list, xp_list, h_fea):
decomp_list, decomp_att_list, decomp_att_map = self.decomp_fh_list(xf, xh_list)
# upper half
upper_parts = []
for part in self.upper_part_list:
upper_parts.append(xp_list[part - 1])
comp_u, com_u_map = self.comp_u(xh_list[0], upper_parts)
message_u = decomp_list[0] + comp_u
xh_u = self.update_u(xh_list[0], message_u)
# lower half
lower_parts = []
for part in self.lower_part_list:
lower_parts.append(xp_list[part - 1])
comp_l, com_l_map = self.comp_l(xh_list[1], lower_parts)
message_l = decomp_list[1] + comp_l
xh_l = self.update_l(xh_list[1], message_l)
xh_list_new = [xh_u, xh_l]
return xh_list_new, decomp_att_map, com_u_map, com_l_map
class Part_Graph(nn.Module):
def __init__(self, adj_matrix, upper_part_list=[1, 2, 3, 4], lower_part_list=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(Part_Graph, self).__init__()
self.cls_p = cls_p
self.upper_part_list = upper_part_list
self.lower_part_list = lower_part_list
self.edge_index = torch.nonzero(adj_matrix)
self.edge_index_num = self.edge_index.shape[0]
self.part_list_list = [[] for i in range(self.cls_p - 1)]
for i in range(self.edge_index_num):
self.part_list_list[self.edge_index[i, 1]].append(self.edge_index[i, 0])
self.F_dep_list = Contexture(in_dim=in_dim, hidden_dim=hidden_dim, parts=self.cls_p - 1, part_list_list=self.part_list_list)
self.decomp_hpu_list = Decomposition(hidden_dim, parts=len(upper_part_list))
self.decomp_hpl_list = Decomposition(hidden_dim, parts=len(lower_part_list))
self.part_dp = nn.ModuleList([Part_Dependency(in_dim, hidden_dim) for i in range(self.edge_index_num)])
self.node_update_list = nn.ModuleList([conv_Update(hidden_dim) for i in range(self.cls_p - 1)])
def forward(self, xf, xh_list, xp_list, xp):
# upper half
upper_parts = []
for part in self.upper_part_list:
upper_parts.append(xp_list[part - 1])
# lower half
lower_parts = []
for part in self.lower_part_list:
lower_parts.append(xp_list[part - 1])
decomp_pu_list, decomp_pu_att_list, decomp_pu_att_map = self.decomp_hpu_list(xh_list[0], upper_parts)
decomp_pl_list, decomp_pl_att_list, decomp_pl_att_map = self.decomp_hpl_list(xh_list[1], lower_parts)
#dp
F_dep_list, att_list_list, Fdep_att_list = self.F_dep_list(xp_list, xp, self.part_list_list)
xpp_list_list = [[] for i in range(self.cls_p - 1)]
for i in range(self.edge_index_num):
xpp_list_list[self.edge_index[i, 1]].append(
self.part_dp[i](att_list_list[self.edge_index[i, 0]][
1 + self.part_list_list[self.edge_index[i, 0]].index(self.edge_index[i, 1])] *
F_dep_list[self.edge_index[i, 0]], xp_list[self.edge_index[i, 1]]))
xp_list_new = []
for i in range(self.cls_p - 1):
if i + 1 in self.upper_part_list:
message = decomp_pu_list[self.upper_part_list.index(i + 1)] + sum(xpp_list_list[i])
elif i + 1 in self.lower_part_list:
message = decomp_pl_list[self.lower_part_list.index(i + 1)] + sum(xpp_list_list[i])
xp_list_new.append(self.node_update_list[i](xp_list[i], message))
return xp_list_new, decomp_pu_att_map, decomp_pl_att_map, Fdep_att_list
class GNN(nn.Module):
def __init__(self, adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(GNN, self).__init__()
self.cp = cls_p
self.ch = cls_h
self.cf = cls_f
self.ch_in = in_dim
self.hidden = hidden_dim
self.upper_half_node = upper_half_node
self.upper_node_len = len(self.upper_half_node)
self.lower_half_node = lower_half_node
self.lower_node_len = len(self.lower_half_node)
self.full_infer = Full_Graph(in_dim, hidden_dim, cls_p, cls_h, cls_f)
self.half_infer = Half_Graph(self.upper_half_node, self.lower_half_node, in_dim, hidden_dim, cls_p, cls_h,
cls_f)
self.part_infer = Part_Graph(adj_matrix, self.upper_half_node, self.lower_half_node, in_dim, hidden_dim, cls_p,
cls_h, cls_f)
def forward(self, xp_list, xh_list, xf, p_fea, h_fea):
# for full body node
xf_new, com_map = self.full_infer(xf, xh_list, xp_list)
# for half body node
xh_list_new, decomp_fh_att_map, com_u_map, com_l_map = self.half_infer(xf, xh_list, xp_list, h_fea)
# for part node
xp_list_new, decomp_up_att_map, decomp_lp_att_map, Fdep_att_list = self.part_infer(xf, xh_list, xp_list, p_fea)
return xp_list_new, xh_list_new, xf_new, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list
class GNN_infer(nn.Module):
def __init__(self, adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(GNN_infer, self).__init__()
self.cls_p = cls_p
self.cls_h = cls_h
self.cls_f = cls_f
self.in_dim = in_dim
self.hidden_dim = hidden_dim
# feature transform
self.p_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_p - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_p - 1)), nn.ReLU(inplace=False))
self.h_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_h - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_h - 1)), nn.ReLU(inplace=False))
self.f_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_f - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_f - 1)), nn.ReLU(inplace=False))
self.bg_conv = nn.Sequential(
nn.Conv2d(3 * in_dim, hidden_dim, kernel_size=1, padding=0, stride=1,
bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False))
# gnn infer
self.gnn = GNN(adj_matrix, upper_half_node, lower_half_node, self.in_dim, self.hidden_dim, self.cls_p,
self.cls_h, self.cls_f)
# node supervision
self.p_cls = nn.Conv2d(hidden_dim * (cls_p-1), (cls_p -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_p-1))
self.h_cls = nn.Conv2d(hidden_dim * (cls_h-1), (cls_h -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_h-1))
self.f_cls = nn.Conv2d(hidden_dim * (cls_f-1), (cls_f -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_f-1))
self.bg_cls = nn.Conv2d(hidden_dim, 1,
kernel_size=1, padding=0, stride=1, bias=True,
groups=1)
self.softmax = nn.Softmax(dim=1)
self.final_cls = Final_classifer(in_dim, hidden_dim, cls_p, cls_h, cls_f)
def forward(self, xp, xh, xf, xl):
# _, _, th, tw = xp.size()
# _, _, h, w = xh.size()
#
# xh = F.interpolate(xh, (th, tw), mode='bilinear', align_corners=True)
# xf = F.interpolate(xf, (th, tw), mode='bilinear', align_corners=True)
# feature transform
f_node = self.f_conv(xf)
p_conv = self.p_conv(xp)
p_node_list = list(torch.split(p_conv, self.hidden_dim, dim=1))
h_conv = self.h_conv(xh)
h_node_list = list(torch.split(h_conv, self.hidden_dim, dim=1))
bg_node = self.bg_conv(torch.cat([xp, xh, xf], dim=1))
# node supervision
bg_cls = self.bg_cls(bg_node)
p_cls = self.p_cls(p_conv)
h_cls = self.h_cls(h_conv)
f_cls = self.f_cls(f_node)
f_seg = torch.cat([bg_cls, f_cls], dim=1)
h_seg = torch.cat([bg_cls, h_cls], dim=1)
p_seg = torch.cat([bg_cls, p_cls], dim=1)
# output
p_seg = [p_seg]
h_seg = [h_seg]
f_seg = [f_seg]
decomp_fh_att_map = []
decomp_up_att_map = []
decomp_lp_att_map = []
com_map = []
com_u_map = []
com_l_map = []
Fdep_att_list = []
# input
p_node_list = [p_node_list]
h_node_list = [h_node_list]
f_node = [f_node]
for iter in range(3):
p_fea_list_new, h_fea_list_new, f_fea_new, decomp_fh_att_map_new, decomp_up_att_map_new, \
decomp_lp_att_map_new, com_map_new, com_u_map_new, com_l_map_new, Fdep_att_list_new = self.gnn(
p_node_list[iter], h_node_list[iter], f_node[iter], xp, xh)
p_node_list.append(p_fea_list_new)
h_node_list.append(h_fea_list_new)
f_node.append(f_fea_new)
decomp_fh_att_map.append(decomp_fh_att_map_new)
decomp_up_att_map.append(decomp_up_att_map_new)
decomp_lp_att_map.append(decomp_lp_att_map_new)
com_map.append(com_map_new)
com_u_map.append(com_u_map_new)
com_l_map.append(com_l_map_new)
Fdep_att_list.append(Fdep_att_list_new)
# node supervision
p_cls_new = self.p_cls(torch.cat(p_fea_list_new, dim=1))
h_cls_new = self.h_cls(torch.cat(h_fea_list_new, dim=1))
f_cls_new = self.f_cls(f_fea_new)
f_seg_new = torch.cat([bg_cls, f_cls_new], dim=1)
h_seg_new = torch.cat([bg_cls, h_cls_new], dim=1)
p_seg_new = torch.cat([bg_cls, p_cls_new], dim=1)
p_seg.append(p_seg_new)
h_seg.append(h_seg_new)
f_seg.append(f_seg_new)
xphf_infer = torch.cat([bg_node] + p_fea_list_new, dim=1)
p_seg_final = self.final_cls(xphf_infer, xp, xh, xf, xl)
p_seg.append(p_seg_final)
return p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list
class Final_classifer(nn.Module):
def __init__(self, in_dim=256, hidden_dim=20, cls_p=7, cls_h=3, cls_f=2):
super(Final_classifer, self).__init__()
self.cp = cls_p
self.ch = cls_h
self.cf = cls_f
self.ch_in = in_dim
# classifier
self.conv0 = nn.Sequential(nn.Conv2d(in_dim + cls_p*hidden_dim, in_dim, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False),
nn.Conv2d(in_dim, in_dim, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False)
)
self.conv2 = nn.Sequential(nn.Conv2d(in_dim, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
BatchNorm2d(48), nn.ReLU(inplace=False))
self.conv3 = nn.Sequential(nn.Conv2d(in_dim + 48, in_dim, kernel_size=1, padding=0, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False),
nn.Conv2d(in_dim, in_dim, kernel_size=1, padding=0, dilation=1, bias=False),
BatchNorm2d(in_dim)
)
self.relu = nn.ReLU(inplace=False)
self.p_cls = nn.Conv2d(in_dim, cls_p, kernel_size=1, padding=0, dilation=1, bias=True)
def forward(self, xphf, xp, xh, xf, xl):
# classifier
_, _, th, tw = xl.size()
xt = F.interpolate(self.conv0(torch.cat([xphf, xp], dim=1)), size=(th, tw), mode='bilinear', align_corners=True)
xl = self.conv2(xl)
x = torch.cat([xt, xl], dim=1)
x_fea = self.relu(self.conv3(x)+xt)
xp_seg = self.p_cls(x_fea)
return xp_seg
class Decoder(nn.Module):
def __init__(self, num_classes=7, hbody_cls=3, fbody_cls=2):
super(Decoder, self).__init__()
self.layer5 = MagicModule(2048, 512, 1)
self.layer6 = DecoderModule(num_classes)
self.layerh = AlphaHBDecoder(hbody_cls)
self.layerf = AlphaFBDecoder(fbody_cls)
#
self.adj_matrix = torch.tensor(
[[0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0]], requires_grad=False)
self.gnn_infer = GNN_infer(adj_matrix=self.adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6],
in_dim=256, hidden_dim=10, cls_p=7, cls_h=3, cls_f=2)
#
self.layer_dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
BatchNorm2d(512), nn.ReLU(inplace=False),
nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
x_dsn = self.layer_dsn(x[-2])
seg = self.layer5(x[-1])
# direct infer
x_fea = self.layer6(seg, x[1], x[0])
alpha_hb_fea = self.layerh(seg, x[1])
alpha_fb_fea = self.layerf(seg, x[1])
# gnn infer
p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, \
com_u_map, com_l_map, Fdep_att_list = self.gnn_infer(x_fea, alpha_hb_fea, alpha_fb_fea, x[0])
return p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list, x_dsn
class OCNet(nn.Module):
def __init__(self, block, layers, num_classes):
super(OCNet, self).__init__()
self.encoder = ResGridNet(block, layers)
self.decoder = Decoder(num_classes=num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, InPlaceABNSync):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_model(num_classes=20):
# model = OCNet(Bottleneck, [3, 4, 6, 3], num_classes) #50
model = OCNet(Bottleneck, [3, 4, 23, 3], num_classes) # 101
# model = OCNet(Bottleneck, [3, 8, 36, 3], num_classes) #152
return model
| [
"908865817@qq.com"
] | 908865817@qq.com |
3d9e7266e67cdabdfc23cd0f21161aea3d94ebdc | a15ebaad0cd50c5f6eea59cad3fe127011f9ba00 | /python/chapter3/page109.py | fb82979dd032b03132758af38f91799eb957b93f | [] | no_license | cocvu99/head-first | 28553f0c0c72007634e3acb49138d58f98748cfc | 639ca7b37c030e7b999464adcb43c31b5f420949 | refs/heads/master | 2020-09-12T17:08:58.205187 | 2019-11-24T15:38:19 | 2019-11-24T15:38:19 | 222,489,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | man = []
other = []
try:
data = open('sketch.txt')
for each_line in data:
try:
(role, line_spoken) = each_line.split(':', 1)
line_spoken = line_spoken.strip()
if role == 'Man':
man.append(line_spoken)
elif role == 'Other Man':
other.append(line_spoken)
except ValueError:
pass
data.close()
except IOError:
print('The datafile is missing!')
print(man)
print(other)
| [
"cocvu99@users.noreply.github.com"
] | cocvu99@users.noreply.github.com |
783dd1d568ddd1d977bf4a922d2fa5e961d99b73 | 12977c4d2eae4bfd3b7112814a7e63c506e7bbbe | /ad_purchase_compare/__init__.py | be87bd2c85658bae154f92cdd8ca4bdaa35cc039 | [] | no_license | aryaadiputra/addons60_ptgbu_2013 | 211f3ab9fc74cc3e3f4df770b6ada65d24b83977 | b5cf28bdbb347df4c39ffe3ca32355bd2206077b | refs/heads/master | 2020-04-06T04:11:37.667486 | 2016-11-25T03:27:54 | 2016-11-25T03:27:54 | 58,649,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | import purchase
import purchase_requisition_partner
import stock | [
"aryalemon.mail@gmail.com"
] | aryalemon.mail@gmail.com |
4f8e6e8d113f27968d0974785e6b5f79ea3de04e | 7dc90ef953604c5c595f1423709a07eb43a30db5 | /models/models.py | e9673bc60bb9662bcae3e8ed3e7c828ecf36639e | [
"LicenseRef-scancode-public-domain"
] | permissive | fndiaz/padrao | d59b15e54ac68c3c9f68703204b441c1aa7d09e1 | 18c4c5a0c3aa280ecbc491441cdb354b4165a0ea | refs/heads/master | 2021-01-01T19:47:18.818811 | 2013-09-25T15:27:24 | 2013-09-25T15:27:24 | 11,139,800 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #gravacao_lg = db.define_table("gravacao_lg",
# Field("datetime"),
# Field("acao"),
# Field("pastas"),
# #auth.signature
# format="%(nome)s")
| [
"fndiaz02@gmail.com"
] | fndiaz02@gmail.com |
c7889803a0cc67246207f4bf3ef0c10c87f657ae | 8c5145862894377a3dabe4ea3e68c910f2f8be9e | /inscriptions/migrations/0005_auto_20161114_2340.py | 5ecc69a1841ab2db4a9d2a6b79e13b2f44b9e185 | [] | no_license | farfanoide/dssd | 5fbcdd398dcd46fdc035e794792706b7e8bb27e4 | 96dff9422f351f7800c77e6d8c9ecf32bfef1787 | refs/heads/master | 2021-01-17T17:51:16.723727 | 2016-11-18T18:55:56 | 2016-11-18T18:55:56 | 70,639,839 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-14 23:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inscriptions', '0004_auto_20161108_2022'),
]
operations = [
migrations.AddField(
model_name='paper',
name='gdrive_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='paper',
name='gdrive_link',
field=models.CharField(max_length=255, null=True),
),
]
| [
"ivan6258@gmail.com"
] | ivan6258@gmail.com |
917bd9f6527d49bec8fcec3e72daeaa51069ba29 | ce5b4fb6da608c6b287fdca4ce243d52e327adb0 | /manage.py | 24aa8f1e57adaf09852296b344a3baa1b98e577c | [
"MIT"
] | permissive | ilearnToday/django_series | cc671bd0ef171d59f75c85b28510adabc0a81685 | aaff52cade1ac45e459d9a5e0bade8c16b53e248 | refs/heads/master | 2022-12-11T00:48:29.634660 | 2019-12-01T13:24:58 | 2019-12-01T13:24:58 | 217,735,830 | 0 | 0 | MIT | 2022-11-22T04:49:46 | 2019-10-26T16:19:04 | Python | UTF-8 | Python | false | false | 691 | py | #!/usr/bin/env python
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', 'Local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"goodeitime@gmail.com"
] | goodeitime@gmail.com |
172643e855cd188afe22ff26608f8ab8e34b2fcb | 05c575ae5f9a036b257acce7c12d33baa5185121 | /main.py | 4f1b9f9a495163c9cb0019cac15a795bc3e17875 | [] | no_license | LaVlad/compliler-basics-1 | 2ee247362d1b60bb7e7178889d46d0f27fca4bc9 | 6d084dea75640e77797a51a59d086839e7f17eac | refs/heads/master | 2021-10-27T02:08:45.177194 | 2019-04-15T09:57:38 | 2019-04-15T09:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | stack = ['#']
symbol_dict = {
'S': 0,
'X': 1,
'Y': 2,
'Z': 3,
'A': 4,
'B': 5,
'C': 6,
'a': 7,
'b': 8,
'c': 9,
'#': 10
}
rules_dict = {
'XYZ': 'S',
'XZ': 'S',
'aA': 'A',
'a': 'A',
'bB': 'B',
'b': 'B',
'cC': 'C',
'c': 'C',
'A': 'X',
'B': 'Y',
'C': 'Z'
}
precedence_matrix = [
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '>'],
[' ', ' ', '=', '=', ' ', '<', '<', ' ', '<', '<', '>'],
[' ', ' ', ' ', '=', ' ', ' ', '<', ' ', ' ', '<', '>'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '>'],
[' ', ' ', '>', '>', ' ', '>', '>', ' ', '>', '>', '>'],
[' ', ' ', ' ', '>', ' ', ' ', '>', ' ', ' ', '>', '>'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '>'],
[' ', ' ', '>', '>', '=', '>', '>', '<', '>', '>', '>'],
[' ', ' ', ' ', '>', ' ', '=', '>', ' ', '<', '>', '>'],
[' ', ' ', ' ', ' ', ' ', ' ', '=', ' ', ' ', '<', '>'],
['<', '<', '<', '<', '<', '<', '<', '<', '<', '<', ' ']
]
lang_line = input('Enter line for validation: ')
lang_line += '#'
expr_stack = []
i = 0
while i < len(lang_line):
cur_symb = lang_line[i]
if stack == ['#', 'S']:
print('Line is valid')
break
cur_relation = precedence_matrix[symbol_dict[stack[-1]]][symbol_dict[cur_symb]]
if cur_relation == '<' or cur_relation == '=':
stack.append(cur_symb)
i += 1
continue
elif cur_relation == '>':
while cur_relation != '<':
expr_stack.append(stack.pop())
cur_relation = precedence_matrix[symbol_dict[stack[-1]]][symbol_dict[expr_stack[-1]]]
expr_stack.reverse()
expr_str = ''.join(expr_stack)
if expr_str not in rules_dict:
print('Line is invalid')
break
else:
stack.append(rules_dict[expr_str])
expr_stack = []
else:
print('Line is invalid')
break
| [
"32141567+Axis256@users.noreply.github.com"
] | 32141567+Axis256@users.noreply.github.com |
0afce4a486662c21f3f20f8138585aedac36d9b0 | 4aca68e32f6dbced44b85abfdecca20c079f5c94 | /music/urls.py | 790bd265950414806eec255c687e575aeee8b4c5 | [] | no_license | NimishMhatre/Acoustic | 22b21200bceb076fc5eb97c5f4524786133de899 | 831495faa5d9e46f8462d688403de0c0599013cc | refs/heads/master | 2023-02-04T08:07:46.353231 | 2020-12-19T09:28:37 | 2020-12-19T09:28:37 | 322,809,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.urls import path
from . import views
app_name = 'music'
urlpatterns = [
#/music/
path('', views.IndexView.as_view(), name='index'),
#/music/<album_id>/
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
] | [
"nimishm27@gmail.com"
] | nimishm27@gmail.com |
ea7631b35d3423abb7a8d4269f81069b44c3a390 | 5e69fa930d8ec69d915d4d7792acf2cb75044e28 | /main.py | 10e6785316aa3c40a9a5103392af717b1f5094d4 | [] | no_license | elizabethrsotomayor/py-set-mutations | 2742e5289b3a1bf3b81372a9d5e26dc1cde810f4 | 7f4e748143eeea3cf8ff0464b1a73a067bd33a39 | refs/heads/main | 2023-07-15T06:48:09.717925 | 2021-09-03T23:26:55 | 2021-09-03T23:26:55 | 402,919,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
num = int(input())
i = input().split()
A = set([int(k) for k in i])
n = int(input())
for x in range(0, n):
operation = input().split()
p = input().split()
B = set([int(o) for o in p])
if (operation[0] == "intersection_update"):
A.intersection_update(B)
elif (operation[0] == "update"):
A.update(B)
elif (operation[0] == "symmetric_difference_update"):
A.symmetric_difference_update(B)
elif (operation[0] == "difference_update"):
A.difference_update(B)
print(sum(A))
| [
"elizabethrsotomayor@gmail.com"
] | elizabethrsotomayor@gmail.com |
32c61b8bb00de7040d5763d5b699413917f4be73 | fddb03d03f22932fcf0be6e1b71b315ac5bba7c4 | /algo.py | fdd149196d7f24898236021b79eb545000dae344 | [] | no_license | tbgracy/youtubeBot | ef90ccce0fa85c4872d7d5e61b209101a06b29e8 | ccc8850510713f7b6d87b6f9451daee4e699faa3 | refs/heads/main | 2023-05-02T02:24:56.303055 | 2021-05-21T14:54:41 | 2021-05-21T14:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | # l = [i for i in range(20)]
def show(list_, page, n):
i = (page-1)*n
s = page*n
return list_[i:s]
# print(show(l, 6))
| [
"tina@localhost.localdomain"
] | tina@localhost.localdomain |
179a046688ec86cdc0a1838723c43484ef4af058 | 6f57761c60582c546423a2a08c769f18236fd153 | /benchmarks/data/codes/run_pk_param_space.py | da414557736a952df54584c6f3e3878402cbf9b5 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"BSD-2-Clause"
] | permissive | LSSTDESC/CCL | 30644922fead0b017c1056e628bec23cf6bc4dfb | 29d46978445678d86a4bee485cb29d30246ff64a | refs/heads/master | 2023-09-03T17:03:17.012019 | 2023-08-08T11:01:33 | 2023-08-08T11:01:33 | 57,389,367 | 118 | 68 | BSD-3-Clause | 2023-08-30T13:25:25 | 2016-04-29T14:08:38 | C | UTF-8 | Python | false | false | 3,398 | py | #!/usr/bin/env python
"""
Generate a set of CLASS power spectra across a set of sample points in
cosmological parameter space, and compare with CCL.
"""
from param_space import *
import os, sys
# Need to specify directory containing 'class' executable
CLASS_ROOT = None
if len(sys.argv) > 1: CLASS_ROOT = sys.argv[1]
assert CLASS_ROOT is not None, \
"Must specify CLASS_ROOT (as argument or in source file)."
PREFIX = "std" # Prefix to use for this run
NSAMP = 100 # No. of sample points in parameter space
SEED = 10 # Random seed to use for sampling
ZVALS = np.arange(0., 3., 0.5) # Redshifts to evaluate P(k) at
# Define parameter space to sample over
param_dict = {
'h': (0.55, 0.8),
'Omega_cdm': (0.15, 0.35),
'Omega_b': (0.018, 0.052),
'A_s': (1.5e-9, 2.5e-9),
'n_s': (0.94, 0.98)
}
# Check that expected output data directories exist
class_datadir = "%s/data/class" % os.path.abspath(".")
ccl_datadir = "%s/data/ccl" % os.path.abspath(".")
if not os.path.exists(class_datadir): os.makedirs(class_datadir)
if not os.path.exists(ccl_datadir): os.makedirs(ccl_datadir)
# Get root filename for CLASS and CCL filenames
root = "%s/%s" % (class_datadir, PREFIX)
ccl_root = "%s/%s" % (ccl_datadir, PREFIX)
# Generate sample points on Latin hypercube
sample_points = generate_latin_hypercube( samples=NSAMP, param_dict=param_dict,
class_root=CLASS_ROOT, seed=SEED )
save_hypercube("%s_params.dat" % root, sample_points)
# Generate CLASS .ini files
print("Writing CLASS linear .ini files")
generate_class_ini(sample_points, root="%s_lin_std" % root,
nonlinear=False, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_lin_pre" % root,
nonlinear=False, redshifts=ZVALS)
print("Writing CLASS nonlinear .ini files")
generate_class_ini(sample_points, root="%s_nl_std" % root,
nonlinear=True, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_nl_pre" % root,
nonlinear=True, redshifts=ZVALS)
# Run CLASS on generated .ini files
print("Running CLASS on .ini files")
run_class(fname_pattern="%s_lin_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_lin_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
run_class(fname_pattern="%s_nl_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_nl_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
# Run CCL for the same sets of parameters
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_std" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='pre')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_std" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='pre')
| [
"philbull@gmail.com"
] | philbull@gmail.com |
cd4365ff9a3b734a11bc2058bfbc4ac4a997349e | 937820b0baba21ebdd95d6abb447476cf3da8c12 | /venv/bin/wheel | 6d37daef56d7d29dcdb5deeabb0eb861c0ac3b04 | [] | no_license | JhonSidney/Genomika2018 | 2398ec674eab7d442f36e4677653ec93804934c7 | db3b1ca65359a748f067f7c64a97fb4352bdf660 | refs/heads/master | 2021-04-30T08:53:40.085390 | 2018-02-18T01:55:46 | 2018-02-18T01:55:46 | 121,386,241 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/home/jhon/PycharmProjects/Genomika2018/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jhon.sidney@gmail.com"
] | jhon.sidney@gmail.com | |
92ef4b4e9614a0d33b0fef08a4e11237e8eb0a19 | e565d656a2f17c82c5948748411862062150fd56 | /Data/Make_data_to_bin.py | 7862061a6c4063afc6133fd0e22da6fb8def6016 | [] | no_license | joytsay/Face_Recognition | 1361712316b309cb2d2f55232763d48acf838852 | aad02a489e9e1f1e253eef5040978e5f23f0a80b | refs/heads/master | 2021-07-11T10:04:58.700574 | 2020-08-06T08:42:05 | 2020-08-06T08:42:05 | 187,615,622 | 1 | 0 | null | 2019-05-20T10:03:20 | 2019-05-20T10:03:20 | null | UTF-8 | Python | false | false | 12,473 | py | import numpy as np
import tensorflow as tf
from PIL import Image
from Net import triplet_loss as triplet
import time
import threading
from queue import Queue
import os
import dlib
from sys import getsizeof
import io
"""
def write_to_bin(f_bin,f_inx,img_path):
with open(img_path,"rb") as f_img:
start_index = f_bin.tell()
f_bin.write(f_img.read())
end_index = f_bin.tell()
f_inx.write("{} {:d} {:d}\n".format(img_path,start_index,end_index))
"""
def write_to_bin(f_bin,f_inx,img_path,img_label,img_bytes):
start_index = f_bin.tell()
f_bin.write(img_bytes)
end_index = f_bin.tell()
f_inx.write("{},{:d},{:d},{:d}\n".format(img_path,img_label,start_index,end_index))
def load_path_lists(data_dir):
lists = os.listdir(data_dir)
lists = [os.path.join(data_dir,f) for f in lists]
print(lists)
lists = [f for f in lists if os.path.isdir(f)]
results = []
labels = []
for i, f in enumerate(lists):
temp_array = [os.path.join(f,path).replace("\\","/") for path in os.listdir(f)]
#results.append(temp_array)
results += temp_array
labels += [ i for f in range(len(temp_array))]
return np.array(results), np.array(labels, dtype = np.int32)
def load_path_lists_FR(data_dir,flie_name):
with open(os.path.join(data_dir,flie_name),"r") as f:
lines = f.readlines()
lines = [f.strip() for f in lines]
label_list = [f.strip().split(" ",1)[0].split("/")[1] for f in lines]
path_list = np.array([data_dir+"/"+f.strip().split(" ",1)[0].replace("\\","/") for f in lines])
lm_list = np.array([f.strip().split(" ",1)[1].split(" ")[1:] for f in lines],dtype = np.float32)
hush_table = {}
path_list_by_id = []
lm_list_by_id =[]
keys_list_temp=[]
for i,l in enumerate(label_list):
if l not in hush_table.keys():
hush_table[l] = [i]
keys_list_temp.append(l)
else:
hush_table[l] += [i]
for k in keys_list_temp:
path_list_by_id.append(path_list[hush_table[k]])
lm_list_by_id.append(lm_list[hush_table[k]])
labels = []
for i, ls in enumerate(path_list_by_id):
labels += [i for num in range(ls.shape[0])]
labels = np.array(labels, dtype = np.int32)
del label_list
del hush_table
return path_list_by_id, lm_list_by_id, path_list, lm_list, labels
class Data_Thread(threading.Thread):
def __init__(self, threadID,batch_size, img_height,img_width,q):
threading.Thread.__init__(self)
self.threadID = threadID
self.queue = q
self._batch_size = batch_size
self._img_height = img_height
self._img_width = img_width
self._channels = 3
self._thread_stop = False
self._detector = dlib.get_frontal_face_detector()
self._sp = dlib.shape_predictor("shape_predictor_5_face_landmarks.dat")
self._padding = 0.15
self.start_index = 0
#self._jitter_count = jitter_count
def get_data(self):
global index
global train_paths
global train_lms
global train_labels
global g_Lock
global detect_c
with g_Lock:
m_index = index
global_list_len = len(train_paths)
end = min(m_index + self._batch_size, global_list_len)
if (m_index == end):
return None
m_path_list = train_paths[m_index:end]
if train_lms.shape[0] == 0:
m_train_lm = np.array([None])
else:
m_train_lm = train_lms[m_index:end]
m_labels = train_labels[m_index:end]
index = end
res = {
"img": [],
"path_list": [],
"label" :[],
"last_batch": False
}
if end == global_list_len:
res["last_batch"] = True
count = 0
for i,path in enumerate(m_path_list):
#try :
img = Image.open(path)
img = img.convert('RGB')
if m_train_lm.any() == None:
crop_img, _ = self.Crop_1_face_no_FD (img, self._img_height , self._padding)
else:
crop_img, _ = self.Crop_1_face_wiht_lm(img, m_train_lm[i] , self._img_height , self._padding)
crop_img = Image.fromarray(np.uint8(crop_img))
#h,w = img.size
#if (h != 150 or w != 150):
# crop_img = img.resize((self._img_height, self._img_width), Image.ANTIALIAS)
#else:
# crop_img = img
byteIO = io.BytesIO()
crop_img.save(byteIO, format='JPEG')
res["img"].append(byteIO.getvalue())
res["path_list"].append(path)
res["label"].append(m_labels[i])
count += 1
#except:
# print("index= ",i+ m_index)
# print("load_img_error: ",path)
if len(res["path_list"]) == 0:
return res
elif (len(res["path_list"]) < self._batch_size):
res["img"] = res["img"][0:len(res["path_list"])]
detect_c += len(res["img"])
#if self._jitter_count:
# list_imgs =[]
# for i in range(res["img"].shape[0]):
# list_imgs += dlib.jitter_image(np.uint8(res["img"][i]), num_jitters=self._jitter_count, disturb_colors=True)
# res["img"] = np.array(list_imgs,dtype = np.float32)
#res["img"] = pre_process(res["img"])
return res
def Crop_1_face_wiht_lm (self,img, lm , size = 224 , padding = 0.25):
h,w = img.size
eye_dist = lm[2] - lm[0]
extend = 1.5
left = int(max(lm[0] - eye_dist*extend+0.5 , 0))
top = int(max(lm[1] - eye_dist*extend+0.5 , 0))
rihgt = int(min(lm[2] + eye_dist*extend+0.5,w))
bottom = int(min(lm[3]+ eye_dist + eye_dist*extend +0.5,h))
dlib_rect = dlib.rectangle(left,top,rihgt,bottom)
#img = img.crop((left, top, rihgt, bottom))
img = np.array(img)
faces = dlib.full_object_detections()
faces.append(self._sp(img, dlib_rect))
image = dlib.get_face_chip(img, faces[0], size, padding)
return image,1
def Crop_1_face_no_FD (self,img, size = 224 , padding = 0.25):
h,w = img.size
#eye_dist = lm[2] - lm[0]
#extend = 1.5
left = 0
top = 0
rihgt = w
bottom = h
dlib_rect = dlib.rectangle(left,top,rihgt,bottom)
#img = img.crop((left, top, rihgt, bottom))
img = np.array(img)
faces = dlib.full_object_detections()
faces.append(self._sp(img, dlib_rect))
image = dlib.get_face_chip(img, faces[0], size, padding)
return image,1
def FD_Crop_1_face (self,img , size = 224 , padding = 0.25):
img = np.array(img)
dets = self._detector(img)
num_face = len(dets)
index = 0
if num_face == 0:
#print ("no_face")
return None , num_face, None, None
elif num_face > 1:
distance = 100000000;
img_center_x = img.shape[0] * 0.5;
img_center_y = img.shape[1] * 0.5;
for i,det in enumerate(dets):
center_x = ( det.left() + det.right() ) * 0.5;
center_y = ( det.bottom() + det.top() ) * 0.5;
temp_dis = (img_center_x - center_x)**2 + (img_center_y - center_y)**2
if (temp_dis < distance):
distance = temp_dis
index = i
faces = dlib.full_object_detections()
faces.append(self._sp(img, dets[index]))
image = dlib.get_face_chip(img, faces[0], size, padding)
return image, num_face, dets[index], faces[0]
def run(self):
global index
global train_paths
while not self._thread_stop:
if index > len(train_paths):
self._thread_stop = True
break
datas = self.get_data()
if datas != None:
if len(datas["path_list"]) == 0:
continue
self.queue.put(datas)
else:
self._thread_stop = True
break
#try:
#self.queue.put(datas,True,100)
#except:
# print ("get time_out Thread_ID = %d" % self.threadID)
print ("Load_Thread_ID = %d run end" % self.threadID)
tf.reset_default_graph()
#data_dir = "training/age_data"
#floder = "__age_valid_2"
#train_paths, train_labels = load_path_lists(data_dir+"/"+floder)
#train_lms = np.array([])
data_dir = "training/FR_original_data"
FR_file_name = "West_training"
_,_,train_paths,train_lms,train_labels = load_path_lists_FR(data_dir,FR_file_name)
index = 0
FD_Lost_c = 0
detect_c = 0
g_Lock = threading.Lock()
print(train_paths[0])
print(train_paths.shape)
print(train_lms.shape)
print(train_labels.shape)
my_queue = Queue(maxsize=100)
batch_size = 20
thread_num = 1
img_H = 224
img_W = 224
jitter_count = 0
data_loader = []
for i in range(thread_num):
data_loader.append(Data_Thread(i+1,batch_size, img_H,img_W, my_queue))
data_loader[i].start()
jitter_count = 0
run_count = 0
last_batch = False
target_floder = data_dir
bin_path = os.path.join(target_floder,"FR_west_training_pad_15.bin")
idx_path = os.path.join(target_floder,"FR_west_training_pad_15.idx")
while(1):
if run_count%50==0:
print ("batch_run= ",run_count," Index= ", index)
run_count += 1
if last_batch and my_queue.empty():
test_bool = True
for i in range(thread_num):
test_bool = (test_bool and data_loader[i]._thread_stop)
if test_bool:
break
test_batch = my_queue.get()
my_queue.task_done()
if not last_batch:
last_batch = test_batch["last_batch"]
count = 0
"""
if jitter_count:
list_imgs =[]
for i in range(test_batch["img"].shape[0]):
print(count)
count += 1
list_imgs += dlib.jitter_image(np.uint8(test_batch["img"][i]), num_jitters=jitter_count, disturb_colors=True)
imgs = np.array(list_imgs,dtype = np.float32)
else:
imgs = test_batch["img"]
"""
imgs = test_batch["img"]
"""
if jitter_count:
for i,path in enumerate(test_batch["path_list"]):
file_name, ext = path.rsplit("\\",1)[-1].rsplit(".",1)
for j in range(jitter_count):
img_s= Image.fromarray(np.uint8(imgs[i*jitter_count + j]))
new_path = os.path.join(floder,file_name+"_"+str(j)+"."+ext)
print(new_path)
img_s.save(new_path)
else:
for i,path in enumerate(test_batch["path_list"]):
img_s= Image.fromarray(np.uint8(imgs[i]))
img_s.save(os.path.join(floder,path.rsplit("\\",1)[-1]))
"""
f_bin = open(bin_path,"ab")
f_inx = open(idx_path,"a")
for i,path in enumerate(test_batch["path_list"]):
#save_path = os.path.join(target,path.split("/",2)[-1])
save_path = path.split(data_dir)[-1].split("/",1)[-1]
label = test_batch["label"][i]
#print(save_path)
write_to_bin(f_bin,f_inx,save_path,label,imgs[i])
#if not os.path.exists(save_path.rsplit("/",1)[0]):
# os.makedirs(save_path.rsplit("/",1)[0])
#try:
# img_s= Image.fromarray(np.uint8(imgs[i]))
# img_s.save(save_path)
#except:
# print("Error: ",save_path)
f_bin.close()
f_inx.close()
for i in range(thread_num):
data_loader[i]._thread_stop=True
data_loader[i].join()
"""
if __name__ == "__main__":
main()
""" | [
"noreply@github.com"
] | joytsay.noreply@github.com |
12cec63da6e971b5cad8de43fcf79f03f00ad0f0 | 5ef8ca2f94f39cd826e617f1c9106a3630021cef | /realestate/realestateProject/realestateProject/asgi.py | dd9dfc9ef8f69f763d0fd5457804157553a5bd54 | [] | no_license | kyliecamila/django_project | 236ffa7b04600d70b478dfb482dc108a5802a992 | 8d7fd922bf020a9eb49691aece9e29b49c0d94ba | refs/heads/master | 2023-04-27T03:41:32.807111 | 2021-05-16T12:55:59 | 2021-05-16T12:55:59 | 366,652,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for realestateProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'realestateProject.settings')
application = get_asgi_application()
| [
"kyliecamila@korea.ac.kr"
] | kyliecamila@korea.ac.kr |
7c8adfc6ac9b7c4d2cfc7646e4a81d2e1b1bf46f | 92261e39149278c18580a2f9961c9209eee1899e | /MODELS/sam.py | 879205206dd0b000e9469fdd7ff3919f5db6023e | [
"MIT"
] | permissive | tugui/attention-module | 4db5c8a49fd090d4342df4d9fd3dfd22a8756ef6 | 67b19ed2b5029d0234617e597aad0364c646295e | refs/heads/master | 2022-11-06T23:42:37.653052 | 2020-06-23T02:46:14 | 2020-06-23T02:46:14 | 273,424,518 | 0 | 0 | null | 2020-06-19T06:47:54 | 2020-06-19T06:47:54 | null | UTF-8 | Python | false | false | 3,009 | py | import torch
import math
import torch.nn as nn
class BasicBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, activation=None, bn=True, bias=False):
super(BasicBlock, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channel, eps=1e-5, momentum=0.01, affine=True) if bn else None
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'leakyrelu':
self.activation = nn.leakyReLU()
else:
self.activation = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.activation is not None:
x = self.activation(x)
return x
class SpatialGate(nn.Module):
def __init__(self, in_channel, kernel_size=3, block_num=0):
super(SpatialGate, self).__init__()
if block_num > 0:
self.layer = self._make_layer(in_channel, block_num)
else:
self.layer = None
self.spatial = BasicBlock(in_channel, 1, kernel_size, padding=(kernel_size-1) // 2)
def forward(self, x):
x_out = self.spatial(self.layer(x) if self.layer is not None else x)
scale = torch.sigmoid(x_out)
return x * scale
def _make_layer(self, in_channel, block_num):
layers = []
for i in range(block_num):
layers.append(BasicBlock(in_channel, in_channel, kernel_size, padding=(kernel_size-1) // 2))
return nn.Sequential(*layers)
class SpatialGate2(nn.Module):
def __init__(self, in_channel, kernel_size=3, block_num=0):
super(SpatialGate2, self).__init__()
if block_num > 0:
self.layer = self._make_layer(in_channel, block_num)
else:
self.layer = None
self.spatial = BasicBlock(in_channel, 1, kernel_size, padding=(kernel_size-1) // 2)
def forward(self, x):
x_out = self.spatial(self.layer(x) if self.layer is not None else x)
scale = torch.tanh(x_out)
return x * scale
def _make_layer(self, in_channel, block_num):
layers = []
for i in range(block_num):
layers.append(BasicBlock(in_channel, in_channel, kernel_size, padding=(kernel_size-1) // 2))
return nn.Sequential(*layers)
class SAM(nn.Module):
def __init__(self, in_channel, kernel_size, block_num):
super(SAM, self).__init__()
self.SpatialGate = SpatialGate(in_channel, kernel_size, block_num)
def forward(self, x):
return self.SpatialGate(x)
class SAM2(nn.Module):
def __init__(self, in_channel, kernel_size, block_num):
super(SAM2, self).__init__()
self.SpatialGate = SpatialGate2(in_channel, kernel_size, block_num)
def forward(self, x):
return self.SpatialGate2(x)
| [
"350247720@qq.com"
] | 350247720@qq.com |
7581a3a62e3915d8b3671f6ab9dc3e0ace45f6b7 | 4a6f878b8b74178092bb999f6b114467e2953f30 | /src/env/bin/pip3.7 | 46318dcc82f03b7be613311fc2db334b4402e29c | [] | no_license | jgarcia1599/SmallWorld_WebApp | 5f0973cebd61cd3981eb65cb1f4b3bdb3659d41a | dcbc77c8ff6a5b7a9e64267e77abbea42cf13c8b | refs/heads/master | 2023-05-26T01:38:15.203252 | 2020-05-14T16:53:33 | 2020-05-14T16:53:33 | 254,818,105 | 0 | 0 | null | 2021-06-10T22:54:23 | 2020-04-11T07:45:15 | HTML | UTF-8 | Python | false | false | 304 | 7 | #!/Users/juniorgarcia/Documents/Classes/Spring_2020/software_engineering/SmallWorld/env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"junior.f.garcia99@gmail.com"
] | junior.f.garcia99@gmail.com |
d6a9f2dd57d0dcad5f00719ba180996a44fa52ee | 5064e15590cb6ac4e0fa8158a68570f7cef067dc | /pages/create_recipe.py | c820d1f0e346c65d187a3c20b2f3b526a72aa8d3 | [
"MIT"
] | permissive | joniahola/RuokalistaCommandLine | e190d74c67b0dd26427519f55cb2418d5433ceb2 | c565b720f3560507db16b1003979fe3641a29965 | refs/heads/master | 2022-03-09T05:48:12.225547 | 2014-10-09T08:56:10 | 2014-10-09T08:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | # -*- coding: utf-8 -*-
from modules import *
from classes import *
def main():
clear()
print "RESEPTIN LUONTI (pääset takaisin kirjoittamalla 'exit')"
print "Kirjoita 1 jos haluat luoda yhden reseptin"
print "Kirjoita 2 jos haluat luoda useamman reseptin samalla kategorialla"
print "Kirjoita 3 jos haluat luoda useamman reseptin eri kategorioilla (samoja voi myös käyttää)"
select = read_int("Pääset reseptin luonnista pois kirjoittamalla 'exit'")
if select == 'exit': return
elif select == 1:
single_recipe()
elif select == 2:
multiple_recipe_with_same_category()
elif select == 3:
multiple_recipe_with_different_category()
def single_recipe():
name = read_input("Kirjoita reseptin nimi:")
if name == 'exit':
return
category = read_input("Hyvä. Kirjoitappa sitten mihin kategoriaan se kuuluu?")
if category == 'exit':
return
recept = Recipe(name, category)
save_recept_to_file(recept)
print "Talennus onnistui!"
print "Haluatko lisätä uuden reseptin?"
print "Kirjoita 'K', jos haluat tehdä uuden"
select = read_input("Muutoin palataan pääsivulle", True)
if select == 'K': single_recipe()
def multiple_recipe_with_same_category():
category = read_input("Mihin kategoriaan reseptit kuuluu?")
if category == 'exit':
return
while True:
name = read_input("Kirjoita reseptin nimi:", True)
if name == 'exit':
return
elif name == '':
print "tyhjä"
return
recept = Recipe(name, category)
save_recept_to_file(recept)
press_to_continue("Talennus onnistui! (pääset päävalikkoon kirjoittamalla 'exit')")
def multiple_recipe_with_different_category():
while True:
name = read_input("Kirjoita reseptin nimi:")
if name == 'exit':
return
category = read_input("Hyvä. Kirjoitappa sitten mihin kategoriaan se kuuluu?")
if category == 'exit':
return
recept = Recipe(name, category)
save_recept_to_file(recept)
press_to_continue("Talennus onnistui! (pääset päävalikkoon kirjoittamalla 'exit')")
| [
"joni@juotos.fi"
] | joni@juotos.fi |
d6f5fa3386159b6965ed7333dd16ed1db4d16433 | 048a189f16b7c3d76eec85d86bad6fcebca2afef | /cvat_backup.py | e19011a149f2f23e80fddea2976e708d8e2bd5a4 | [] | no_license | kmori20201226/showcase | 22f16dd1336a9a3604ec17c695aa0a2f8df51d63 | a2af69e6b90ef395cd241df5e6c0b9a82a804fe6 | refs/heads/main | 2023-03-24T18:40:42.008095 | 2021-03-22T00:41:17 | 2021-03-22T00:41:17 | 349,869,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,653 | py | """cvat のタスクを調べ、アノテーションを所定の場所に保存する。
保存されるファイル名は <タスク名>_<日付>.xml となる。同名のファイルが存在する
場合は <タスク名>_<日付>_<番号>.xml となる。この場合番号は 00 からファイル名が
かぶらない番号が付けられる。
このプログラムは前回アノテーションを取得された時のデータの更新日時が覚えられ、
その日時から更新が入っていないタスクについてはなにもしない。
前回アノテーションを更新日時は、 cvat_task_timestamp.csv というファイルに記載される。
必須モジュール
requests
"""
import os
import sys
import pprint
import requests
import json
from io import BytesIO
from zipfile import ZipFile
import time
import urllib3
# ルートURL の後に /api/v1 を必ずつける
CVAT_API_URL = "https://cvat.org/api/v1"
CVAT_LOGIN_USER = "<your login user>"
CVAT_LOGIN_EMAIL = "<your email address>"
CVAT_LOGIN_PASSWORD = "<your password>"
SAVE_PATH = r"<backup-path eg. c:\temp>"
def saving_name(task_name):
time_suffix = time.strftime("%Y%m%d")
full_name = os.path.join(SAVE_PATH, "%s_%s.xml" % (task_name, time_suffix))
n = 0
while os.path.exists(full_name):
full_name = os.path.join(SAVE_PATH, "%s_%s_%02d.xml" % (task_name, time_suffix, n))
n += 1
return full_name
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
"accept": "application/json",
"Content-Type": "application/json",
}
MAX_RETRY = 3
TIMESTAMP_FILE = "cvat_task_timestamp.csv"
class TaskTimestamp:
"""タスク毎に更新日時を覚えておくクラス"""
def __init__(self):
self._tspfile = os.path.join(os.path.dirname(__file__), TIMESTAMP_FILE)
self._task_tsp = {}
def read(self):
"""記録されたタイムスタンプファイルを読み込む"""
if os.path.exists(self._tspfile):
with open(self._tspfile) as f:
for line in f:
v = line.rstrip().split("\t")
self._task_tsp[int(v[0])] = (v[1], v[2])
def write(self):
"""タイムスタンプファイルへの書き込み"""
with open(self._tspfile, "w") as f:
for key, value in self._task_tsp.items():
print("%s\t%s\t%s" % (key, value[0], value[1]), file=f)
def is_newer(self, task_id, upd_time):
"""与えられた task_id のものの更新日時が前回記録したものより新しいか
記録そのものがない時に真を返す"""
if task_id in self._task_tsp:
return upd_time > self._task_tsp[task_id][1]
else:
return True
def set_update_time(self, task_id, name, upd_time):
self._task_tsp[task_id] = (name, upd_time)
def main():
#GETパラメータはparams引数に辞書で指定する
payload = {
'username': CVAT_LOGIN_USER,
'email': CVAT_LOGIN_EMAIL,
'password': CVAT_LOGIN_PASSWORD
}
json_data = json.dumps(payload).encode("utf-8")
response = requests.post(
f'{CVAT_API_URL}/auth/login',
headers=headers,
data=json_data,
verify=False
)
if response.status_code != 200:
raise Exception("cvat にログインできません Status=%d" % (response.status_code,))
login_response = response.json()
try:
key = login_response['key']
except:
raise Exception("cvat にログインできません (key が見つからない)")
headers.update(
{'Authorization': 'Token ' + key}
)
response = requests.get(
f'{CVAT_API_URL}/tasks',
headers = headers,
verify=False
)
tasks = response.json()
tsp = TaskTimestamp()
tsp.read()
for task in tasks['results']:
task_id = task['id']
task_name = task['name']
upd_time = task['updated_date']
if not tsp.is_newer(task_id, upd_time):
print("%s[#%s] 変更なし" % (task_name, task_id))
continue
again = True
retry_cnt = 0
while again and retry_cnt < MAX_RETRY:
retry_cnt += 1
response = requests.get(
f'{CVAT_API_URL}/tasks/{task_id}/annotations',
params = {
"format": "CVAT for images 1.1",
#"format": "COCO 1.0",
"filename": "output.zip",
"action": "download"
},
headers = headers,
verify = False
)
if response.status_code == 200:
zip_contents = ZipFile(BytesIO(response.content))
for name in zip_contents.namelist():
b = zip_contents.read(name)
s = b.decode("utf-8")
sav_name = saving_name(task_name)
with open(sav_name, "wb") as outf:
outf.write(b)
print("%s[#%s] 保存 %s" % (task_name, task_id, sav_name))
tsp.set_update_time(task_id, task_name, upd_time)
again = False
else:
time.sleep(1)
if retry_cnt == MAX_RETRY:
raise Exception("cvat からタスクデータの読み込み中にエラーが発生しました %s" % (response.status_code,))
tsp.write()
if __name__== '__main__':
try:
main()
except Exception as ex:
print(ex)
sys.exit(1)
sys.exit(0)
| [
"mori@yscc.co.jp"
] | mori@yscc.co.jp |
b2273cc6001e9679fb38668b27765077c84a0295 | eb6f4797185b4e4f579c9d530b4b0254a4cf0d81 | /recursion/foundation_algos/print_combinations.py | 9ffd71004bd1f1c1dadc32380569dddd908ad7db | [] | no_license | DeviAyyagari/interviewPrep | c67ebf18034ac90dd6d59a88a163b16bb55c195e | 6b65acede92afe1e959a4b5de2733dbe12665d42 | refs/heads/main | 2023-05-01T11:40:03.596005 | 2021-05-17T22:36:12 | 2021-05-17T22:36:12 | 364,025,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | """
Problem Statement: Given a set S of n distinct numbers, print all its subsets.
Intuition: In the recursive tree, at root node, you choose to either include the
element or exclude the element from the subset. There are 2 choices to make for every
node at every level. Every node at every level is a valid solution.
NOTE: In the case of permuations, the root node will have n choices to make and the number of
choices that can be made by nodes at subsequent levels decreases as height increases.
Time Complexity: There are 2 nodes at level 1, 2^2 nodes at level 2, ...., 2^n nodes
at level n. Total number of nodes = 2 + 2^2 + 2^3 + .... + 2^n ~ O(n).
Every node at every level performs O(n) work(printing). Total complexity
~ O(n * 2^n)
Space Complexity: At any point in time only one leaf nodes' tree structure
resides on the call stack. The height of the tree is n.
Space complexity: O(n)
"""
def print_combinations(numbers):
print_combinations_helper("", numbers)
def print_combinations_helper(slate, domain):
if len(domain) == 0:
print(slate)
return
index = len(domain) - 1
new_domain = domain[:index] + domain[index + 1:]
print_combinations_helper(slate + str(domain[index]), new_domain)
print_combinations_helper(slate, new_domain)
if __name__ == "__main__":
test_set = [[1], [1, 2], [1,2,3]]
for each in test_set:
print_combinations(each) | [
"akdevi.92@gmail.com"
] | akdevi.92@gmail.com |
bed7788bca74ac533df2a8c80710fad2f2fac7eb | be0dc98761ccd534b77f9cd030b2b92886f8cd19 | /predavanje11/tkinter9.py | 158bf0c1cb669cba4fd1aa68365f847e2f616a45 | [
"MIT"
] | permissive | vmilkovic/uvod_u_programiranje | 7e0c662b1dffc8097edb5bb5a4a532f2c3a9fab0 | 209611e38c8fe84c727649df4b868a4278eb77c3 | refs/heads/master | 2022-04-07T00:33:17.059137 | 2020-02-16T19:28:41 | 2020-02-16T19:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from tkinter import Tk, Canvas
glavni = Tk()
c1 = Canvas(glavni, bg="blue", height=200, width=400)
#height i width unutar Canvas naredbe nema utjecaj, te vrijednosti se postavljaju unutar place metode
c1.place(height=200, width=400)
c2 = Canvas(glavni, bg="red")
c2.place(height=200, width=400)
#plavi canvas se ne vidi jer je crveni preko njega
c3 = Canvas(glavni, bg="green", height=200, width=200)
c3.place()
#zeleni canvas se ne vidi jer mu nisu definirane širina i visina u metodi place
glavni.mainloop() | [
"vedran.milkovic25@gmail.com"
] | vedran.milkovic25@gmail.com |
0dfb4e900ca15f51a299211a28da3fbd5aa42d53 | 12e85a370874821f7eb5bfa69aced21535082f39 | /gravity_game_core.py | 359f9775ea0251ef121bd38c90913aaa023436f9 | [] | no_license | mattgilmartin/Gravity_Game | d7be68338530a0a9dbc363ba7f61d089490b69cb | 904f29edc93219b626b7a4f0dfe57272ab7916a5 | refs/heads/master | 2020-12-20T01:09:38.032117 | 2020-01-24T00:04:04 | 2020-01-24T00:04:04 | 235,909,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 13 20:01:57 2019
@author: Matt
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 11:01:52 2019
http://usingpython.com/pygame-tilemaps/
@author: usingpython.com
"""
import pygame
from pygame.locals import *
import render_engine, physics_engine, events_engine, init_game, resources
import random, time
class App:
def __init__(self):
self._running = True
self._display_surf = None
self.tilesize = 64
self.mapwidth = 25
self.mapheight = 12
self.size = (1920,1080)
## Clean Up Game (Exit) -------------------------------------------------------
def cleanup_(self):
pygame.quit()
## Execute Game Functions -----------------------------------------------------
def execute_(self):
init_game.init_game(self)
# if self.init_game.init_game() == False:
# self._running = False
while self._running:
for event in pygame.event.get():
events_engine.process_events(self,event)
events_engine.process_pressed(self)
physics_engine.run_physics(self)
render_engine.render(self)
# print(len(self.jet_objs)) # Number of Active Jet particles
#Endgame Conditions
if self.victory:
# render_engine.message_display(self,'Mission Sucess!')
#Play Victory Sound
pygame.mixer.Sound(r'..\sounds\airhorn_mult.wav').play()
self.score =+self.player.fuel
self._running = False
if self.failure:
# render_engine.message_display(self,'Failure!')
pygame.mixer.Sound(r'..\sounds\priceIsRightTrombone.wav').play()
self._running = False
# print(self._running,self.victory, self.failure)
# while True:
# if pygame.event.get():
text_obj = self.fuel_font.render(str('Press Any Key to Continue'),True,[255,255,255])
self._display_surf.blit(text_obj,(1000,1750))
pygame.display.update()
hold_screen = True
while hold_screen:
for event in pygame.event.get():
if event.type == KEYDOWN:
hold_screen = False
# time.sleep(10)
# self.cleanup_()
if __name__ == "__main__" :
theApp = App()
theApp.execute_() | [
"mgilmartin3@gatech.edu"
] | mgilmartin3@gatech.edu |
5ab38604ea5882b183fe3ba6d9388fdd549432d4 | e45aeb09ffe598010888599d0e38142b57ca602f | /video/videoproject/users/views.py | 1eec15cfc611b1173d68ebd73cf4044b59ccb354 | [] | no_license | Chen2358/Python-example | c89ede19bfea6e93f9bb7e2a556d7dfdc47a1bc8 | fcec266f1b8916394832cfd03a75a8616ba22f7a | refs/heads/master | 2021-06-22T07:13:45.305679 | 2020-12-23T03:49:59 | 2020-12-23T03:49:59 | 145,352,383 | 0 | 0 | null | 2019-11-13T09:53:04 | 2018-08-20T01:41:37 | Python | UTF-8 | Python | false | false | 151 | py | from django.shortcuts import render
from django.contrib.auth import authenticate, login as auth_login, louout as auth_logout
# Create your views here.
| [
"chen@2358"
] | chen@2358 |
cf2f6f420b3a8781e72d4fa7fe5407a64dfa640f | 9343d475b1fc10d2accffc60ad5bcbc99048b0bf | /Problem023.py | 8c1828051dd1592c9bd7426d286bf0aebb735015 | [] | no_license | amcameron/Project-Euler-Python | 4260940ad07f8c7483d6235eb7266cb0a7e42dba | 5f88a4abfe19583c39ac2c904d286a8769e860d3 | refs/heads/master | 2021-01-01T19:16:20.799115 | 2011-07-03T16:27:49 | 2011-07-03T16:27:49 | 278,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,816 | py | from utils import IsPrime
from itertools import combinations
_isPrime = IsPrime()
def primeFactors(n):
"""Find the list of prime factors of an integer n.
For example, the prime factors of 12 are
2, 2, and 3.
Factors are returned in a monotonic increasing list.
For example, primeFactors(12) will never return
[2, 3, 2] or [3, 2, 2].
"""
if int(n) != n:
raise TypeError("cannot find the factors of a non-integer.")
if n < 1:
raise ValueError("cannot find the factors of a non-positive integer.")
# initialize list of prime factors and IsPrime object
facts = []
# ensure list of known primes is long enough to state whether
# n is prime or compound
_isPrime.extend(n)
# divide by factors until none remain
for prime in _isPrime.primes:
while n % prime == 0:
facts.append(prime)
n /= prime
if n == 1:
break
return facts
def properFactors(n):
"""Find the set of proper factors of an integer n.
For example, the proper factors of 12 are
1, 2, 3, 4, and 6.
"""
# The proper factors of a number can be found by taking
# the product of all unique combinations of its prime factors,
# except for all of the prime factors (this just yields the number).
# For example, the prime factorization of 12 yiels [2, 2, 3].
# The possible combinations are:
# [2, 2, 3] nCr 0: []
# [2, 2, 3] nCr 1: [2, 3]
# [2, 2, 3] nCr 2: [[2, 2], [2, 3]]
# [2, 2, 3] nCr 3: not included, because prod([2, 2, 3]) = 12.
# The products of these are [1], [2, 3], and [4, 6], respectively.
# Therefore, the proper factors of 12 are [1, 2, 3, 4, 6].
primeFacts = primeFactors(n)
properFacts = set()
# Iterate over the number of combinations to take (that is, the
# r in "nCr"), but don't take the combination of ALL prime
# factors. (Do take the combination of NONE of them: 1 is
# always a proper factor.)
for i in xrange(len(primeFacts)):
# Find all combinations of i elements, and take the products
# of each of these combinations. We might end up with
# duplicates, so use set() to remove them. Then add them
# to the list of proper factors!
properFacts.update(map(_product, combinations(primeFacts, i)))
return properFacts
def _product(seq):
"""Find the product of all elements in a sequence.
The product of an empty sequence is the multiplicative
identity: 1.
"""
# Make sure the sequence can behave like a list
# (so that we can pop it)
seq = list(seq)
if len(seq) == 0:
return 1
else:
return seq.pop() * _product(seq)
def isAbundant(n):
"""A number n is abundant if the sum of its proper factors
is greater than the number itself.
For example, 12 is abundant because 1 + 2 + 3 + 4 + 6 = 16,
and 16 > 12.
"""
return sum(properFactors(n)) > n
def isPerfect(n):
"""A number n is perfect if the sum of its proper factors
is exactly equal to the number itself.
For example, 6 is perfect because 1 + 2 + 3 = 6.
"""
return sum(properFactors(n)) == n
def isDeficient(n):
"""A number n is deficient if the sum of its proper factors
is less than the number itself.
For example, 15 is deficient because 1 + 3 + 5 = 8,
and 8 < 15.
"""
return sum(properFactors(n)) < n
class abundantSum:
"""Return True if a number n can be expressed as the sum of two
abundant numbers, False otherwise.
"""
def __init__(self):
self.abundants = [12]
self.sums = set([24])
def __call__(self, n):
if self.abundants[-1] < n:
self.extend(n)
return n in self.sums
def extend(self, n):
self.abundants.extend(num
for num in xrange(self.abundants[-1] + 1, n + 1)
if isAbundant(num))
self.sums = set(map(sum, combinations(self.abundants, 2)))
self.sums.update(2*num for num in self.abundants)
if __name__ == "__main__":
a = abundantSum()
a.extend(28124)
print sum(filter(lambda num: not a(num), xrange(1, 28124)))
| [
"amcameron@gmail.com"
] | amcameron@gmail.com |
4db596e6c7138de5c3da2c08096ca5ff74647e04 | 0bc777a57e39c466a9482af9a6eda698ab3c1437 | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPu3PFJetSequence_PbPb_data_cff.py | 51ef615578166024b0ea28233b748df5a4cde187 | [] | no_license | stahlleiton/cmssw | 3c78d80b9372fdf2a37f424372504b23c9dc4f78 | fcfda663dc8c315b505eb6bcc7e936401c01c4d1 | refs/heads/EWQAnalysis2017_8030 | 2023-08-23T13:50:40.837198 | 2017-11-09T17:45:31 | 2017-11-09T17:45:31 | 45,795,305 | 0 | 3 | null | 2021-04-30T07:36:28 | 2015-11-08T19:28:54 | C++ | UTF-8 | Python | false | false | 14,698 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPu3PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu3PFJets"),
matched = cms.InputTag("ak3HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.3
)
akPu3PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak3HiSignalGenJets"),
matched = cms.InputTag("ak3HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.3
)
akPu3PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu3PFJets")
)
akPu3PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu3PFJets"),
payload = "AKPu3PF_offline"
)
akPu3PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu3CaloJets'))
#akPu3PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak3HiSignalGenJets'))
akPu3PFbTagger = bTaggers("akPu3PF",0.3)
#create objects locally since they dont load properly otherwise
#akPu3PFmatch = akPu3PFbTagger.match
akPu3PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu3PFJets"), matched = cms.InputTag("hiSignalGenParticles"))
akPu3PFPatJetFlavourAssociationLegacy = akPu3PFbTagger.PatJetFlavourAssociationLegacy
akPu3PFPatJetPartons = akPu3PFbTagger.PatJetPartons
akPu3PFJetTracksAssociatorAtVertex = akPu3PFbTagger.JetTracksAssociatorAtVertex
akPu3PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.CombinedSecondaryVertexV2BJetTags
akPu3PFJetBProbabilityBJetTags = akPu3PFbTagger.JetBProbabilityBJetTags
akPu3PFSoftPFMuonByPtBJetTags = akPu3PFbTagger.SoftPFMuonByPtBJetTags
akPu3PFSoftPFMuonByIP3dBJetTags = akPu3PFbTagger.SoftPFMuonByIP3dBJetTags
akPu3PFTrackCountingHighEffBJetTags = akPu3PFbTagger.TrackCountingHighEffBJetTags
akPu3PFTrackCountingHighPurBJetTags = akPu3PFbTagger.TrackCountingHighPurBJetTags
akPu3PFPatJetPartonAssociationLegacy = akPu3PFbTagger.PatJetPartonAssociationLegacy
akPu3PFImpactParameterTagInfos = akPu3PFbTagger.ImpactParameterTagInfos
akPu3PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu3PFJetProbabilityBJetTags = akPu3PFbTagger.JetProbabilityBJetTags
akPu3PFSecondaryVertexTagInfos = akPu3PFbTagger.SecondaryVertexTagInfos
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.CombinedSecondaryVertexV2BJetTags
akPu3PFSecondaryVertexNegativeTagInfos = akPu3PFbTagger.SecondaryVertexNegativeTagInfos
akPu3PFNegativeSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPu3PFNegativeSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPu3PFNegativeCombinedSecondaryVertexBJetTags = akPu3PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akPu3PFPositiveCombinedSecondaryVertexBJetTags = akPu3PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akPu3PFNegativeCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPu3PFPositiveCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPu3PFSoftPFMuonsTagInfos = akPu3PFbTagger.SoftPFMuonsTagInfos
akPu3PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu3PFSoftPFMuonBJetTags = akPu3PFbTagger.SoftPFMuonBJetTags
akPu3PFSoftPFMuonByIP3dBJetTags = akPu3PFbTagger.SoftPFMuonByIP3dBJetTags
akPu3PFSoftPFMuonByPtBJetTags = akPu3PFbTagger.SoftPFMuonByPtBJetTags
akPu3PFNegativeSoftPFMuonByPtBJetTags = akPu3PFbTagger.NegativeSoftPFMuonByPtBJetTags
akPu3PFPositiveSoftPFMuonByPtBJetTags = akPu3PFbTagger.PositiveSoftPFMuonByPtBJetTags
akPu3PFPatJetFlavourIdLegacy = cms.Sequence(akPu3PFPatJetPartonAssociationLegacy*akPu3PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPu3PFPatJetFlavourAssociation = akPu3PFbTagger.PatJetFlavourAssociation
#akPu3PFPatJetFlavourId = cms.Sequence(akPu3PFPatJetPartons*akPu3PFPatJetFlavourAssociation)
akPu3PFJetBtaggingIP = cms.Sequence(akPu3PFImpactParameterTagInfos *
(akPu3PFTrackCountingHighEffBJetTags +
akPu3PFTrackCountingHighPurBJetTags +
akPu3PFJetProbabilityBJetTags +
akPu3PFJetBProbabilityBJetTags
)
)
akPu3PFJetBtaggingSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexTagInfos
* (akPu3PFSimpleSecondaryVertexHighEffBJetTags+
akPu3PFSimpleSecondaryVertexHighPurBJetTags+
akPu3PFCombinedSecondaryVertexBJetTags+
akPu3PFCombinedSecondaryVertexV2BJetTags
)
)
akPu3PFJetBtaggingNegSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexNegativeTagInfos
* (akPu3PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akPu3PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akPu3PFNegativeCombinedSecondaryVertexBJetTags+
akPu3PFPositiveCombinedSecondaryVertexBJetTags+
akPu3PFNegativeCombinedSecondaryVertexV2BJetTags+
akPu3PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPu3PFJetBtaggingMu = cms.Sequence(akPu3PFSoftPFMuonsTagInfos * (akPu3PFSoftPFMuonBJetTags
+
akPu3PFSoftPFMuonByIP3dBJetTags
+
akPu3PFSoftPFMuonByPtBJetTags
+
akPu3PFNegativeSoftPFMuonByPtBJetTags
+
akPu3PFPositiveSoftPFMuonByPtBJetTags
)
)
akPu3PFJetBtagging = cms.Sequence(akPu3PFJetBtaggingIP
*akPu3PFJetBtaggingSV
*akPu3PFJetBtaggingNegSV
# *akPu3PFJetBtaggingMu
)
akPu3PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu3PFJets"),
genJetMatch = cms.InputTag("akPu3PFmatch"),
genPartonMatch = cms.InputTag("akPu3PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu3PFcorr")),
JetPartonMapSource = cms.InputTag("akPu3PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPu3PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu3PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPu3PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu3PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPu3PFJetBProbabilityBJetTags"),
cms.InputTag("akPu3PFJetProbabilityBJetTags"),
#cms.InputTag("akPu3PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPu3PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighEffBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu3PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPu3PFNjettiness = Njettiness.clone(
src = cms.InputTag("akPu3PFJets"),
R0 = cms.double( 0.3)
)
akPu3PFpatJetsWithBtagging.userData.userFloats.src += ['akPu3PFNjettiness:tau1','akPu3PFNjettiness:tau2','akPu3PFNjettiness:tau3']
akPu3PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu3PFpatJetsWithBtagging"),
genjetTag = 'ak3HiSignalGenJets',
rParam = 0.3,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = False,
isMC = False,
doSubEvent = False,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPu3PF"),
jetName = cms.untracked.string("akPu3PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak3GenJets"),
doGenTaus = cms.untracked.bool(False),
genTau1 = cms.InputTag("ak3GenNjettiness","tau1"),
genTau2 = cms.InputTag("ak3GenNjettiness","tau2"),
genTau3 = cms.InputTag("ak3GenNjettiness","tau3"),
doGenSym = cms.untracked.bool(False),
genSym = cms.InputTag("ak3GenJets","sym"),
genDroppedBranches = cms.InputTag("ak3GenJets","droppedBranches")
)
akPu3PFJetSequence_mc = cms.Sequence(
#akPu3PFclean
#*
akPu3PFmatch
#*
#akPu3PFmatchGroomed
*
akPu3PFparton
*
akPu3PFcorr
*
#akPu3PFJetID
#*
akPu3PFPatJetFlavourIdLegacy
#*
#akPu3PFPatJetFlavourId # Use legacy algo till PU implemented
*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_data = cms.Sequence(akPu3PFcorr
*
#akPu3PFJetID
#*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFNjettiness
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_jec = cms.Sequence(akPu3PFJetSequence_mc)
akPu3PFJetSequence_mb = cms.Sequence(akPu3PFJetSequence_mc)
akPu3PFJetSequence = cms.Sequence(akPu3PFJetSequence_data)
| [
"marta.verweij@cern.ch"
] | marta.verweij@cern.ch |
1416874157729825714165b2eecc1af24e692c63 | d3196fb38078fdbe966bd5af8a8a4f2924a47c20 | /wandb/sdk/wandb_manager.py | 69e8c503a571a70e1c710938889cb33c97a665cf | [
"MIT"
] | permissive | morganmcg1/client | a1ae01ea302f13a6c9850972411ecabcb900dbc6 | 099f7aa938fb62c5a5d3e12f7d2067196498b67c | refs/heads/master | 2023-09-06T01:14:40.282234 | 2021-11-13T03:01:01 | 2021-11-13T03:01:01 | 427,620,124 | 0 | 0 | MIT | 2021-11-13T09:22:12 | 2021-11-13T09:22:11 | null | UTF-8 | Python | false | false | 3,831 | py | """Manage wandb processes.
Create a grpc manager channel.
"""
import atexit
import multiprocessing
import os
from typing import Callable, Optional, Tuple, TYPE_CHECKING
from wandb import env
from wandb.sdk.lib.exit_hooks import ExitHooks
if TYPE_CHECKING:
from wandb.sdk.service import service
from wandb.sdk.wandb_settings import Settings
class _ManagerToken:
_token_str: Optional[str]
def __init__(self) -> None:
self._token_str = None
def probe(self) -> None:
token = os.environ.get(env.SERVICE)
if not token:
return
self._token_str = token
def configure(self, port: int) -> None:
version = "1"
pid = os.getpid()
token = "-".join([version, str(pid), str(port)])
os.environ[env.SERVICE] = token
self._token_str = token
def parse(self) -> Tuple[str, int, int]:
assert self._token_str
parts = self._token_str.split("-")
assert len(parts) == 3, f"token must have 3 parts: {parts}"
# TODO: make more robust?
version, pid_str, port_str = parts
pid_int = int(pid_str)
port_int = int(port_str)
return version, pid_int, port_int
@property
def token(self) -> Optional[str]:
return self._token_str
@property
def port(self) -> int:
_, _, port = self.parse()
return port
class _Manager:
_token: _ManagerToken
_atexit_lambda: Optional[Callable[[], None]]
_hooks: Optional[ExitHooks]
def __init__(self) -> None:
# TODO: warn if user doesnt have grpc installed
from wandb.sdk.service import service
self._atexit_lambda = None
self._hooks = None
self._token = _ManagerToken()
self._service = service._Service()
self._setup_mp()
self._setup()
def _setup_mp(self) -> None:
# NOTE: manager does not support fork yet, support coming later
start_method = multiprocessing.get_start_method(allow_none=True)
assert start_method != "fork", "start method 'fork' is not supported yet"
if start_method is None:
multiprocessing.set_start_method("spawn")
def _setup(self) -> None:
self._token.probe()
if not self._token.token:
self._setup_service()
port = self._token.port
self._service.connect(port=port)
def _setup_service(self) -> None:
port = self._service.start()
assert port
self._token.configure(port=port)
self._atexit_setup()
def _atexit_setup(self) -> None:
self._atexit_lambda = lambda: self._atexit_teardown()
self._hooks = ExitHooks()
self._hooks.hook()
atexit.register(self._atexit_lambda)
def _atexit_teardown(self) -> None:
exit_code = self._hooks.exit_code if self._hooks else 0
self._teardown(exit_code)
def _teardown(self, exit_code: int) -> None:
if self._atexit_lambda:
atexit.unregister(self._atexit_lambda)
self._atexit_lambda = None
self._inform_teardown(exit_code)
def _get_service(self) -> "service._Service":
return self._service
def _inform_init(self, settings: "Settings", run_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_init(settings=settings, run_id=run_id)
def _inform_attach(self, attach_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_attach(attach_id=attach_id)
def _inform_finish(self, run_id: str = None) -> None:
svc = self._service
assert svc
svc._svc_inform_finish(run_id=run_id)
def _inform_teardown(self, exit_code: int) -> None:
svc = self._service
assert svc
svc._svc_inform_teardown(exit_code)
| [
"noreply@github.com"
] | morganmcg1.noreply@github.com |
a778f90e545e61c423df01e02861dbf5ed9a4647 | 02467e9975b50c14b4dc8cdc6dc03748f9aa8245 | /openshift/test/test_v1_scale_status.py | 2fa26692bfeeef412de792161c348de67a929f4f | [
"Apache-2.0"
] | permissive | ashcrow/python-openshift | 3995e3c4b72bf52a62bc6b07dabf3d0f709444ae | 74c9ade612def941938016385842631342e926de | refs/heads/master | 2021-01-11T19:29:04.419005 | 2017-01-18T19:31:58 | 2017-01-18T19:31:58 | 79,377,387 | 0 | 0 | null | 2017-01-18T19:46:04 | 2017-01-18T19:46:04 | null | UTF-8 | Python | false | false | 4,142 | py | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v1.5.0-alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from openshift.client.rest import ApiException
from openshift.client.models.v1_scale_status import V1ScaleStatus
class TestV1ScaleStatus(unittest.TestCase):
""" V1ScaleStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleStatus(self):
"""
Test V1ScaleStatus
"""
model = openshift.client.models.v1_scale_status.V1ScaleStatus()
if __name__ == '__main__':
unittest.main()
| [
"jdetiber@redhat.com"
] | jdetiber@redhat.com |
8c90438efa58517e72baabd14cc293e405b9c8e1 | 20ffcf659d71c70dd61ea043ea79f172eb451801 | /Macros/Common/check_durability.py | ba80ff71dbb7ab07c69663de3623eafdc0dc8b77 | [] | no_license | marcoeqms/classicassistmacrocopy | 5cfc3421270623d850c46f4ffd035e91fd56178a | e00cc23d34b722c5b2a0c2f6668a52a2eee6ae08 | refs/heads/master | 2023-06-24T12:32:46.640400 | 2021-07-22T14:06:47 | 2021-07-22T14:06:47 | 388,484,002 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # Name: Durability check
# Description: Checks all items for durability and show alerts
# Author: Mordor
# Era: AOS
from Assistant import Engine
layers = [
'OneHanded',
'TwoHanded',
'Shoes',
'Pants',
'Shirt',
'Helm',
'Gloves',
'Ring',
'Talisman',
'Neck',
'Waist',
'InnerTorso',
'Bracelet',
'MiddleTorso',
'Earrings',
'Arms',
'Cloak',
'OuterTorso',
'OuterLegs',
'InnerLegs',
]
# Amount of durability to alert
minDurability = 20
# Checks every 5 secs
checkDelay = 1000 * 60 # every 1 min
def property_exists(serial, cliloc):
item = Engine.Items.GetItem(serial)
if (item == None or item.Properties == None):
return False
for x in item.Properties:
if x.Cliloc == cliloc:
return True
return False
def check_durability():
while not Dead('self'):
for layer in layers:
if FindLayer(layer) and property_exists(GetAlias('found'), 1060639):
durability = PropertyValue[int]('found', 'durability')
Pause(500)
if durability < minDurability:
HeadMsg("ATTENTION!! \"" + layer + "\": " +
str(durability), 'self')
MoveItem('found', 'self')
Pause(checkDelay)
check_durability()
| [
"noreply@github.com"
] | marcoeqms.noreply@github.com |
2ec8a1396d4dcd18d4a725aa77355c4afd5cf0ef | 9ee42f8fd6eba471fc45a0aab25827c6a5caa9fa | /report/scrapper/utils.py | e685ccd53babc5766dc422a914756c562a79c6fd | [] | no_license | CITGuru/websitereport | bfff6d19e492ca783dfcee4622be2c3ab6053482 | 0eb0d2fc4a20a52469f4ed691b81c1f74d57c9c4 | refs/heads/master | 2021-10-22T09:58:09.393358 | 2019-03-09T22:19:22 | 2019-03-09T22:19:22 | 169,621,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | import requests
cookies = {
'_omappvp': 'oRlqyTdkxzYopr6UWBDOKHJYmtKEsZnX0GvinZub0m16Dw9pMPCXSA6pWixKShASeHHwGxa13pRHz2MWkpVmAgLPllIynoBt',
'cookiebanner-accepted': '1',
'_ga': 'GA1.2.1541156287.1548626961',
'__utmc': '183319076',
'__utmz': '183319076.1548813690.2.2.utmcsr=l.facebook.com|utmccn=(referral)|utmcmd=referral|utmcct=/',
'__utma': '183319076.1541156287.1548626961.1548923427.1548926978.6',
'__unam': 'b14a8e2-168915c1f53-ea03126-90',
'__utmb': '183319076.3.10.1548926979',
}
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,ha;q=0.8',
}
def get_html(url):
print(url)
request = requests.get(url, headers=headers, cookies=cookies)
print(request)
if request.status_code == 200:
return request.content
else:
raise Exception("Error occured")
def strip_url(url):
_url = url.replace("http://", "").replace("https://", "").replace("www.", "")
return _url
def get_name(url):
_url = strip_url(url)
_url = _url.split(".")
name = _url[-2]
return name
# print(get_html("http://www.siteworthtraffic.com/report/siitgo.com")) | [
"oyetoketoby80@gmail.com"
] | oyetoketoby80@gmail.com |
24533554737d02ec529fd90c4d73b299a0fce83d | 9d5b1ecb56d2cb6862f1c8f31dcf79a4a5c388b9 | /tools/plot_JPLUS_results.py | 339f0a4a63f433be9840b0545c905999b0001daa | [] | no_license | danielespinoso/lya_emitters-code | c6d91a8767766df0a0a9409e5ee39f7d936041ff | d21f5e8a7cfdea8d291af834a84568b0fa9a3e4a | refs/heads/master | 2021-01-21T18:39:04.997713 | 2017-08-31T11:43:54 | 2017-08-31T11:43:54 | 92,069,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,932 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as col
import sys
import os
import tools.jplus_filter_system
from tools.converters import magtoflux as MtF
from tools.converters import fluxtomag as FtM
import tools.settings
setup = tools.settings.set_up()
sys.path.append(setup['jplus_code'])
import jplus
def plot_spec(lamb, x, errx, title, unt='mags', limits=[3400., 9000., 25.5, 16.5], idd=0):
fig = plt.figure(figsize=(12,10))
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rcParams.update({'lines.linewidth': 3})
matplotlib.rcParams.update({'lines.markersize': 6})
ax = plt.subplot(111)
kol = (1.,0,1.) #uJAVA
ax.errorbar(lamb[0], x[0], yerr=errx[0], c=kol, fmt='s', ecolor=kol, alpha=0.7, label='uJAVA')
kol = (0.7,0,0.7) #J0378
ax.errorbar(lamb[1], x[1], yerr=errx[1], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0378')
kol = (0.5,0,1.) #J0395
ax.errorbar(lamb[2], x[2], yerr=errx[2], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0395')
kol = (0.,0.,1.) #J0410
ax.errorbar(lamb[3], x[3], yerr=errx[3], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0410')
kol = (0.,0.8,0.8) #J0430
ax.errorbar(lamb[4], x[4], yerr=errx[4], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0430')
kol = (0.,0.6,0.6) #gJAVA
ax.errorbar(lamb[5], x[5], yerr=errx[5], c=kol, fmt='s', ecolor=kol, alpha=0.7, label='gJAVA')
kol = (0.4,0.8,0.) #J0515
ax.errorbar(lamb[6], x[6], yerr=errx[6], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0515')
kol = (1.,0.5,0.) #rJAVA
ax.errorbar(lamb[7], x[7], yerr=errx[7], c=kol, fmt='s', ecolor=kol, alpha=0.7, label='rJAVA')
kol = (1.,0.,0.) #J0660
ax.errorbar(lamb[8], x[8], yerr=errx[8], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0660')
kol = (0.8,0.,0.) #iJAVA
ax.errorbar(lamb[9], x[9], yerr=errx[9], c=kol, fmt='s', ecolor=kol, alpha=0.7, label='iJAVA')
kol = (0.6,0.,0.) #J0861
ax.errorbar(lamb[10], x[10], yerr=errx[10], c=kol, fmt='o', ecolor=kol, alpha=0.7, label='J0861')
kol = (0.3,0.,0.) #zJAVA
ax.errorbar(lamb[11], x[11], yerr=errx[11], c=kol, fmt='s', ecolor=kol, alpha=0.7, label='zJAVA')
ax.plot(lamb, x, 'k-', alpha=0.3, linewidth=0.8) #solid line to join points
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# limits=[3400., 9000., 25., 17.]
ax.axis(limits)
ax.set_title(title, fontsize=15)
ax.set_xlabel(r'$\rm{wavelenght}\quad[\AA]$')
if unt == 'mags':
ax.set_ylabel(setup['mag_type']+' [mags]')
else:
#ax.set_ylabel(setup['mag_type']+'_flux [erg/s*cm2]')
ax.set_ylabel(r'$\rmF_\lambda\quad[erg/s*cm^2]$')
ax.legend(loc='center left', fontsize = 'small', bbox_to_anchor=(1, 0.5))
#plt.savefig(setup['plots']+'spectra/spectrum_BPZobj'+str(idd+1)+'_BFtemplate_withANDnoNB.png')
#plt.savefig(setup['plots']+title+'.png')
#plt.savefig(setup['plots']+'composite_'+title+'.png')
#plt.savefig(setup['home']+'Dropbox/lae_ddt_proposal/photospectra/'+title[13:19]+'_photospec.eps', format='eps', dpi=800)
#plt.savefig(setup['plots']+'meetings/26-04-2016/qso'+str(idd)+'_flux.png')
#plt.savefig(setup['plots']+setup['mag_type']+'_mags_'+setup['filters'][0]+'_('+setup['data_rels']+'_data)_'+title[:4]+'_candidates_composite.eps', format='eps', dpi=2000)
#plt.savefig(setup['plots']+setup['mag_type']+'_mags_'+setup['filters'][0]+'_('+setup['data_rels']+'_data)_'+'compact_candidates_median.eps', format='eps', dpi=2000)
plt.show()
plt.close()
def plot_MEDspectra(data, mask=[], titol=''):
if len(mask) == 0:
mask = np.ones(len(data['rJAVA'][:,0]), dtype=bool)
dataset = jplus.tools.select_object(data, mask)
for i in np.arange(0, 12):
flt = tools.jpflt(i)
fl, dfl = MtF(dataset[flt][:,0], band=flt, dmags=dataset[flt][:,1], unit='l')
key = flt + '_flux'
dataset[key] = np.array([fl, dfl]).T
ll = len(dataset['rJAVA'][:,0])
newspec = {}
lamb = np.empty(12)
median = np.empty(12)
median_err = np.empty(12)
fref = 1.0e-16 # I choose to put the rJAVA band of all the spectra at 1.e-16
for i in np.arange(0, 12):
flt = tools.jpflt(i)
key = flt + '_flux'
newspec[flt] = fref*(dataset[key][:,0]/dataset['rJAVA_flux'][:,0])
lamb[i] = tools.jplus_filter_system.jplus_pivot(flt)
median[i] = np.median(newspec[flt])
median_err[i] = np.median(dataset[key][:,1])
plot_spec(lamb, median, median_err, titol, unt='flux', limits=[3400., 9000., 0., 7.0e-16])
#return median, median_err
# plots the difference between two composite spectra (mask1 - mask2, not the opposite)
def plot_MEDspectra_diff(data, mask1=[], mask2=[], titol=''):
mask = np.array([mask1, mask2]).T
lamb = np.empty(12)
median = np.empty((12,2))
median_err = np.empty((12,2))
for qq in range(0,mask.shape[1]) :
dataset = jplus.tools.select_object(data, mask[:,qq])
for i in np.arange(0, 12):
flt = tools.jpflt(i)
fl, dfl = MtF(dataset[flt][:,0], band=flt, dmags=dataset[flt][:,1], unit='l')
key = flt + '_flux'
dataset[key] = np.array([fl, dfl]).T
ll = len(dataset['rJAVA'][:,0])
newspec = {}
fref = 1.0e-16 # I choose to put the rJAVA band of all the spectra at 1.e-16 (arbitrary)
for i in np.arange(0, 12):
flt = tools.jpflt(i)
key = flt + '_flux'
newspec[flt] = fref*(dataset[key][:,0]/dataset['rJAVA_flux'][:,0])
lamb[i] = tools.jplus_filter_system.jplus_pivot(flt)
median[i,qq] = np.median(newspec[flt])
median_err[i,qq] = np.median(dataset[key][:,1])
diff = median[:,0] - median[:,1] # ATTENTION!! It's (mask1 - mask2), not the opposite!!
diff_err = np.sqrt(median_err[:,0]**2. + median_err[:,1]**2.)
plot_spec(lamb, diff, diff_err, titol, unt='flux', limits=[3400., 9000., -3.0e-16, 3.0e-16])
#return diff, diff_err
def plot_JPLUSphotoSpectra(first_data, first_objid, mask=[], units='mags', zsdss=0, zfromSDSS=False, number=0):
if len(mask) == 0:
mask = np.ones(len(first_data['rJAVA'][:,0]), dtype=bool)
objid = first_objid
else:
objid = int(len(first_data['rJAVA'][:,0])*first_objid/len(first_data['rJAVA'][:,0]))
data = jplus.tools.select_object(first_data, mask)
lamb = np.empty(12)
mag = np.empty(12); flux = np.empty(12)
errmag = np.empty(12); errflux = np.empty(12)
for i, ifilter in zip(np.arange(0,12), jplus.datasets.jplus_filter_names(only_bb=False)):
lamb[i] = tools.jplus_filter_system.jplus_pivot(ifilter)
mag[i] = data[ifilter][objid,0]
errmag[i] = data[ifilter][objid,1]
flux[i], errflux[i] = tools.singleMtoF(mag[i], band=ifilter, dmags=errmag[i], unit='l')
ra = (int(data['coords'][objid,0]*10000.))/10000.
dec = (int(data['coords'][objid,1]*10000.))/10000.
if zfromSDSS == True:
rsh = (int(zsdss*1000.))/1000.
else:
rsh = (int(data['redshift'][objid]*1000.))/1000.
if units == 'mags' :
values = mag
errors = errmag
lims=[3400., 9000., 25.5, 16.5]
elif units == 'flux' :
values = flux
errors = errflux
lims = [3400., 9000., -0.4e-17, 3.5e-16]
# LIMITS FOR SDSS QSOs
# if number == 1:
# lims = [3400., 9000., 0.0, 4.0e-16]
# elif number == 5 or number == 6:
# lims = [3400., 9000., 0.0, 4.0e-17]
# elif number == 11:
# lims = [3400., 9000., 0.0, 2.3e-16]
# elif number == 12:
# lims = [3400., 9000., 0.0, 3.0e-16]
#titul = 'ra: '+str(ra)+' dec: '+str(dec)+' z: '+str(rsh)
titul = 'OBJECT: LAE_extd_1 ra: '+str(ra)+' dec: '+str(dec)
plot_spec(lamb, values, errors, titul, unt=units, limits=lims, idd=number)
#-------------- PLOT ALL BPZ TEMPLATES --------------#
# fig = plt.figure()
# ax = plt.subplot(111)
# for i,j in zip(tmpl_list, np.arange(len(tmpl_list))):
# rf_lamb, tmplt = np.genfromtxt(home+'BPZ/PSFsimulations/pros/bpz/SED/'+i, unpack=True)
# obs_lamb = rf_lamb*(1.+ 2.24) all templates at z=2.24
# shift_tmplt = tmplt*(6.**j)
# mask = ((obs_lamb > 2800.) & (obs_lamb < 9000.))
# tem_kol = ((11-j)/11., 0., j/11.)
# ax.plot(obs_lamb[mask], shift_tmplt[mask], c=tem_kol, linewidth=3, alpha=0.8, label=i)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# ax.set_title('All BPZ templates at z=2.24')
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# ax.set_xlabel('wavelenght [A]')
# ax.set_ylabel(r'$f_{\lambda}\quad\rm{[erg/s cm}^2\rm{A}]$')
# ax.set_yscale('log')
# plt.show()
def plot_lumiFunc(fsky, comovDist_interpolation, properVol=False):
lum = np.genfromtxt(setup['final_list'], skip_header=1, usecols=(5), unpack=True)
Loglum = np.log10(lum) ; notinf = (Loglum > 0) ; Loglum = Loglum[notinf]
if properVol == True:
dp_min = comovDist_interpolation(setup['zmin'])/(1+setup['zmin']) #now in Mpc
dp_max = comovDist_interpolation(setup['zmax'])/(1+setup['zmax']) #now in Mpc
dVp = abs((4.*np.pi*(dp_max**3. - dp_min**3.)/3.))*fsky
dV = dVp
else:
dc_min = comovDist_interpolation(setup['zmin']) #now in Mpc
dc_max = comovDist_interpolation(setup['zmax']) #now in Mpc
dVc = (4.*np.pi*(dc_max**3. - dc_min**3.)/3.)*fsky
dV = dVc
if setup['galexmask'] == True:
dLogL = 0.12
else:
dLogL = 0.15
from tools.histo import histogram
Lmin = min(Loglum) ; Lmax = max(Loglum)
centers, histo = histogram(Loglum, Lmin, Lmax, dLogL)
phi = histo*(1./(dV*dLogL))
errs = (1./np.sqrt(histo))*(1./(dV*dLogL))
kon_list = setup['home'] + 'works/lya_emitters/datasets/konno_points.txt'
kon_Loglum, kon_phi, kon_errs = np.genfromtxt(kon_list,skip_header=1,unpack=True,usecols=(0,1,2))
plt.errorbar(centers, phi, yerr=errs, fmt='or', ecolor='b', markersize=3, label='JPLUS data')
plt.errorbar(kon_Loglum, kon_phi, yerr=kon_errs, fmt='og', ecolor='m', markersize=3, label='Konno et al.')
plt.xlabel('Log L [erg/s]', fontsize=12)
plt.ylabel(r'$\Phi\quad 1\,/\,[\rm{Mpc}^3\ \rm{dLog L}]$', fontsize=12)
plt.yscale('log')
plt.title('Luminosity function - JPLUS lya candidates')
#plt.savefig(setup['plots'] + 'LumiFunc_z2.24_'+str(dLogL)+'bin_better.png')
plt.legend()
plt.show()
plt.close()
def plot_footprint_and_data(jpdata, other=[], plot_jplus=False):
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.markersize': 4})
fig = plt.figure(figsize=(12,10))
for i in set(jpdata['tile_id']):
tilemask = (jpdata['tile_id'] == i)
minra = min(jpdata['coords'][tilemask,0]); maxra = max(jpdata['coords'][tilemask,0])
mindec = min(jpdata['coords'][tilemask,1]); maxdec = max(jpdata['coords'][tilemask,1])
#plot jplus
if plot_jplus == True:
coordmask = ((jpdata['coords'][:,0] > minra) & (jpdata['coords'][:,0] < maxra) & \
(jpdata['coords'][:,1] > mindec) & (jpdata['coords'][:,1] < maxdec))
plt.plot(jpdata['coords'][coordmask,0], jpdata['coords'][coordmask,1], 'ob', alpha=0.2)
#plot other data
if len(other) != 0:
coordmask = ((other['coords'][:,0] > minra) & (other['coords'][:,0] < maxra) & \
(other['coords'][:,1] > mindec) & (other['coords'][:,1] < maxdec))
plt.plot(other['coords'][coordmask,0], other['coords'][coordmask,1], 'or', alpha=0.2)
#plot jplus footprint (one square per tile)
plt.plot( (minra, maxra), (mindec, mindec), 'k-', alpha=0.8 )
plt.plot( (maxra, maxra), (mindec, maxdec), 'k-', alpha=0.8 )
plt.plot( (maxra, minra), (maxdec, maxdec), 'k-', alpha=0.8 )
plt.plot( (minra, minra), (maxdec, mindec), 'k-', alpha=0.8 )
plt.title(setup['data_rels']+' footprint')
plt.xlabel('ra [deg]')
plt.ylabel('dec [deg]')
plt.legend()
#plt.show()
#plt.close()
def composite_spec(data, mask=[], titol=''):
if len(mask) == 0:
mask = np.ones(len(data['rJAVA'][:,0]), dtype=bool)
g = tools.singleMtoF(data['gJAVA'][0,0], band='gJAVA')
r = tools.singleMtoF(data['rJAVA'][0,0], band='rJAVA')
fref = (g+r)/2. # I choose to normalize all the spectra to the mean of rJAVA-gJAVA bands of the first spectra
mask = ((mask) & (data['J0395'][:,0] > 18.8))
dataset = jplus.tools.select_object(data, mask)
# construct a matrix with fluxes and flux-errors
for i in np.arange(0, 12):
flt = tools.jpflt(i)
fl, dfl = MtF(dataset[flt][:,0], band=flt, dmags=dataset[flt][:,1], unit='l')
key = flt + '_flux'
dataset[key] = np.array([fl, dfl]).T
ll = len(dataset['rJAVA'][:,0])
newspec = {}
ww = {}
lamb = np.empty(12)
compo = np.empty(12)
compo_err = np.empty(12)
# fref = 1.0e-16 # I choose to put the rJAVA band of all the spectra at 1.e-16
for i in np.arange(0, 12):
flt = tools.jpflt(i)
key = flt + '_flux'
# newspec[flt] = fref*(dataset[key][:,0]/dataset['rJAVA_flux'][:,0])
DD = (dataset['rJAVA_flux'][:,0]+dataset['gJAVA_flux'][:,0])/2.
newspec[flt] = fref*(dataset[key][:,0]/DD)
ww[flt] = (1./dataset[key][:,1])
mask = (ww[flt] == -1.*np.inf)
ww[flt][mask] = 0.
lamb[i] = tools.jplus_filter_system.jplus_pivot(flt)
compo[i] = np.average(newspec[flt], weights=ww[flt])
compo_err[i] = np.std(newspec[flt])/np.sqrt(len(newspec[flt]))#np.median(dataset[key][:,1])
# if compo_err[i] > 4.e-18:
# compo_err[i] = 1.5e-18
#compo_err[i] = np.sqrt(sum(dataset[key][:,1]**2.))/np.sqrt(len(dataset[key][:,1]))#np.median(dataset[key][:,1])
tools.plot_spec(lamb, compo, compo_err, titol, unt='flux', limits=[3400., 9000., 0., 1.e-16])
def plot_SDSSandJPLUS_spectra(sdss_file, first_data, first_objid, mask=[], units='mags', zsdss=0, zfromSDSS=False, number=0):
# SDSS SPECTRUM
# wave, flu = np.genfromtxt(sdss_file, unpack=True, usecols=(0,1), delimiter=',', skip_header=1)
import pyfits as pf
data, header = pf.getdata(sdss_file, 0, header=True)
flu = np.empty(len(data))
wave = np.empty(len(data))
for i,j in zip(range(len(data)), data):
flu[i] = j[0]
wave[i] = 10**j[1]
flu = flu*1.3e-17 # normalization to match jplus data. SDSS spectra are in units of 10^-17 erg/s/cm2/A
f=plt.figure(figsize=(12,10))
plt.plot(wave, flu, 'k-', linewidth=0.5, alpha=0.3, label='SDSS')
# JPLUS SPECTRUM
if len(mask) == 0:
mask = np.ones(len(first_data['rJAVA'][:,0]), dtype=bool)
objid = first_objid
else:
objid = int(len(first_data['rJAVA'][:,0])*first_objid/len(first_data['rJAVA'][:,0]))
data = jplus.tools.select_object(first_data, mask)
lamb = np.empty(12)
mag = np.empty(12); flux = np.empty(12)
errmag = np.empty(12); errflux = np.empty(12)
for i, ifilter in zip(np.arange(0,12), jplus.datasets.jplus_filter_names(only_bb=False)):
lamb[i] = tools.jplus_filter_system.jplus_pivot(ifilter)
mag[i] = data[ifilter][objid,0]
errmag[i] = data[ifilter][objid,1]
flux[i], errflux[i] = tools.singleMtoF(mag[i], band=ifilter, dmags=errmag[i], unit='l')
ra = (int(data['coords'][objid,0]*10000.))/10000.
dec = (int(data['coords'][objid,1]*10000.))/10000.
if zfromSDSS == True:
rsh = (int(zsdss*1000.))/1000.
else:
rsh = (int(data['redshift'][objid]*1000.))/1000.
if units == 'mags' :
values = mag
errors = errmag
lims=[3400., 9000., 25.5, 16.5]
elif units == 'flux' :
values = flux
errors = errflux
lims = [3400., 9000., -5.0e-17, 3.3e-16]
titul = 'object: SDSS QSO ra: '+str(ra)+' dec: '+str(dec)+' z: '+str(rsh)
matplotlib.rcParams.update({'font.size': 18})
matplotlib.rcParams.update({'lines.linewidth': 5})
matplotlib.rcParams.update({'lines.markersize': 10})
kol = (1.,0,1.) #uJAVA
plt.errorbar(lamb[0], values[0], yerr=errors[0], c=kol, fmt='s', ecolor=kol, alpha=0.95, label='uJAVA')
kol = (0.7,0,0.7) #J0378
plt.errorbar(lamb[1], values[1], yerr=errors[1], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0378')
kol = (0.5,0,1.) #J0395
plt.errorbar(lamb[2], values[2], yerr=errors[2], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0395')
kol = (0.,0.,1.) #J0410
plt.errorbar(lamb[3], values[3], yerr=errors[3], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0410')
kol = (0.,0.8,0.8) #J0430
plt.errorbar(lamb[4], values[4], yerr=errors[4], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0430')
kol = (0.,0.6,0.6) #gJAVA
plt.errorbar(lamb[5], values[5], yerr=errors[5], c=kol, fmt='s', ecolor=kol, alpha=0.95, label='gJAVA')
kol = (0.4,0.8,0.) #J0515
plt.errorbar(lamb[6], values[6], yerr=errors[6], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0515')
kol = (1.,0.5,0.) #rJAVA
plt.errorbar(lamb[7], values[7], yerr=errors[7], c=kol, fmt='s', ecolor=kol, alpha=0.95, label='rJAVA')
kol = (1.,0.,0.) #J0660
plt.errorbar(lamb[8], values[8], yerr=errors[8], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0660')
kol = (0.8,0.,0.) #iJAVA
plt.errorbar(lamb[9], values[9], yerr=errors[9], c=kol, fmt='s', ecolor=kol, alpha=0.95, label='iJAVA')
kol = (0.6,0.,0.) #J0861
plt.errorbar(lamb[10], values[10], yerr=errors[10], c=kol, fmt='s', mfc='white', ecolor=kol, alpha=0.95, label='J0861')
kol = (0.3,0.,0.) #zJAVA
plt.errorbar(lamb[11], values[11], yerr=errors[11], c=kol, fmt='s', ecolor=kol, alpha=0.95, label='zJAVA')
plt.axis(lims)
plt.title(titul, fontsize=18)
plt.xlabel(r'$\rm{wavelenght}\quad[\AA]$', fontsize=18)
if units == 'mags':
plt.ylabel(setup['mag_type']+' [mags]', fontsize=18)
else:
plt.ylabel(r'$\rmF_\lambda\quad[\,erg\,/\,(\AA\,s\,cm^2)\,]$', fontsize=18)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.legend(loc='upper right', fontsize = 'small')
plt.savefig(setup['plots']+'JPLUS_EDRobj-8461-19884_and_SDSS_spec-4447-55542-0346.pdf')
#plt.show()
plt.close()
| [
"dspinoso@cefca-ws-010"
] | dspinoso@cefca-ws-010 |
49092143647b73c2aae1c19f938a57e4fb7ce942 | 08ad25c35d7b671b202a03d9be529d79353498d5 | /src/old/spin.py | 17850fa0bb40029f435f0f59500e94cada1144b0 | [] | no_license | Rathal/UoL-Robot-Maze-Solver | 7a729e75b3551076898c717e042650f6f5dd16bf | fc45c597477e7a00d3a3b783599ea047acce6e9f | refs/heads/master | 2022-09-05T11:58:01.382199 | 2020-06-01T12:18:43 | 2020-06-01T12:18:43 | 245,222,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Int16
class Spinner:
def __init__(self):
rospy.init_node('Spinner')
self.rotator_sub = rospy.Subscriber('/odom', Odometry, callback=self.cb)
self.spinner_pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=1)
self.state_sub = rospy.Subscriber('/state', Int16, self.stateCB)
self.state_pub = rospy.Publisher('/state', Int16, queue_size=1)
self.state = 0
self.t = Twist()
self.origin = 0
self.rotations = 1
def stateCB(self, state):
self.state = state
def cb(self, e):
import math
if self.state == 0:
ori = e.pose.pose.orientation
orientation_list = [ori.x, ori.y, ori.z, ori.w]
(r, p, y) = euler_from_quaternion(orientation_list)
if self.origin == 0:
self.origin = round(y,2)
#print y
#print self.origin
if self.rotations > 0:
if (abs(y-self.origin)) > 0.1:
print 'Has Moved'
self.rotations -= 1
self.t.angular.z = -1
elif round(y, 2) == self.origin:
print 'stopped'
self.t.angular.z = 0
self.state_pub.publish(1)
# print "Theta: ", round(theta,2)
# print "Origin: ", round(self.origin, 2)
self.spinner_pub.publish(self.t)
else:
self.origin = 0
self.rotations = 1
spinner = Spinner()
rospy.spin() | [
"noreply@github.com"
] | Rathal.noreply@github.com |
bfd99c37854c269ae7937012f17c63e5e0f061cd | 223590e81400eb8192aeb0a56b36b5a80408d4b4 | /House Robber III.py | 584a90ed9a8be182d31caf96c6718832136be16d | [] | no_license | TianyaoHua/LeetCodeSolutions | c47fd3b6ae0bf60c0656ce12fb88290672c129ed | 418172cee1bf48bb2aed3b84fe8b4defd9ef4fdf | refs/heads/master | 2020-03-06T19:48:13.338630 | 2018-08-10T18:27:52 | 2018-08-10T18:27:52 | 127,037,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def value(self, node, dict):
if not node:
return 0
elif node in dict:
return dict[node]
else:
money1 = self.value(node.lert, dict) + self.value(node.right ,dict)
money2 = node.val
if node.left:
money2 += (self.value(node.left.left, dict) + self.value(node.left.right, dict))
if node.right:
money2 += (self.value(node.right.left, dict) + self.value(node.right.right, dict))
money = max(money1, money2)
dict.update({node: money})
return money
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.value(root, {}) | [
"hua.tianyao@columbia.edu"
] | hua.tianyao@columbia.edu |
a0e0dab8c2a325f89741de884acc365a9f985508 | 299d790be6c9a1c026d1ce8f43764dcf12f5480c | /events/migrations/0005_auto_20190225_2056.py | 068206ba10ac485b550b830715974daa20df6441 | [] | no_license | MISHAL90/django_event_planner | 6e836d197c62b0e6c208138b6cfae40589aa1a4d | fb22209b2f367094e0762f2c550b8fe6522d2c5c | refs/heads/master | 2020-04-25T02:46:28.195575 | 2019-02-28T13:11:55 | 2019-02-28T13:11:55 | 172,451,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # Generated by Django 2.1.7 on 2019-02-25 20:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0004_auto_20190225_1451'),
]
operations = [
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dashboard_title', models.CharField(max_length=120)),
('dashboard_description', models.TextField()),
('dashboard_date', models.DateField()),
('dashboard_img', models.ImageField(upload_to='')),
('dashboard_location', models.TextField()),
],
),
migrations.AlterField(
model_name='booking',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='booking', to='events.Event'),
),
]
| [
"sh3lool@hotmail.com"
] | sh3lool@hotmail.com |
7b37fe51dee07355e068552a18f4ed82f22a1856 | e4f983e2dce6264105e47c340d44f11beac1d427 | /code/helper/data_processing.py | 79a214cd8f35b0f61050b7ed5976423b11a089d4 | [
"MIT"
] | permissive | ayushbaid/AttendToSleep | 42f2d780875e595b4fdb8a1644ec0171830e24f5 | 5563a29b028bdfa240a490670e53b10ad94efe28 | refs/heads/master | 2021-04-06T21:12:42.799187 | 2020-04-26T20:50:30 | 2020-04-26T20:50:30 | 248,613,932 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,799 | py | '''
This code is for data processing
Author: Yujia Xie
'''
import findspark
import argparse
import pyspark
from pyspark.sql import SQLContext, SparkSession
from os import listdir
from os.path import isfile, join
import numpy as np
import os
def load(data_dir):
sc = pyspark.SparkContext("local", "Sleep")
sql = SparkSession.builder.master("local").appName("SleepSQL").getOrCreate()
# Load data from CSV files
fpzcz = sql.read.csv("%s/*0R.csv" % data_dir, header="True").rdd
pzoz = sql.read.csv("%s/*1R.csv" % data_dir, header="True").rdd
horizontal = sql.read.csv("%s/*2R.csv" % data_dir, header="True").rdd
label = sql.read.csv("%s/*L.csv" % data_dir, header="True").rdd
# Attach label to raw channel signal data: time, patientID, signal, startTime, label
fpzcz_label = fpzcz.map(lambda x:((x[0], x[2]), x[1])).join(label.map(lambda x:((x[2], x[0]), (x[1], x[3])))).map(lambda x:(int(x[0][0]), x[0][1], float(x[1][0]), int(x[1][1][0]), x[1][1][1]))
pzoz_label = pzoz.map(lambda x:((x[0], x[2]), x[1])).join(label.map(lambda x:((x[2], x[0]), (x[1], x[3])))).map(lambda x:(int(x[0][0]), x[0][1], float(x[1][0]), int(x[1][1][0]), x[1][1][1]))
horizontal_label = horizontal.map(lambda x:((x[0], x[2]), x[1])).join(label.map(lambda x:((x[2], x[0]), (x[1], x[3])))).map(lambda x:(int(x[0][0]), x[0][1], float(x[1][0]), int(x[1][1][0]), x[1][1][1]))
# Filter out unknown stages
fpzcz_filtered = fpzcz_label.filter(lambda x: (x[4] == "1" or x[4] == "2" or x[4] == "3" or x[4] == "4" or x[4] == "W" or x[4] == "R"))
pzoz_filtered = pzoz_label.filter(lambda x: (x[4] == "1" or x[4] == "2" or x[4] == "3" or x[4] == "4" or x[4] == "W" or x[4] == "R"))
horizontal_filtered = horizontal_label.filter(lambda x: (x[4] == "1" or x[4] == "2" or x[4] == "3" or x[4] == "4" or x[4] == "W" or x[4] == "R"))
# Segment into 30s epochs, frame rate = 100.0, for label data
label_segmented = fpzcz_filtered.filter(lambda x: ((x[0] - x[3]) % 3000 == 0))
# Output as dataframe, switch labels, and split the signals into 30s epochs
patients = [patient for patient in label_segmented.map(lambda x:x[1]).distinct().collect()]
for patient in patients:
master = label_segmented.filter(lambda x: x[1] == patient).sortBy(lambda x: x[0])
output_label = master.map(lambda x: x[4]).collect()
output_label = vectorize(np.array([label for label in output_label])).astype(int)
maxTime = master.map(lambda x:x[0]).collect()[-1]
fpzcz_segment = fpzcz_filtered.filter(lambda x:(x[0] < maxTime + 2 and x[1] == patient)).sortBy(lambda x: x[0]).map(lambda x: x[2]).collect()
output_fpzcz = processSignal([signal for signal in fpzcz_segment])
pzoz_segment = pzoz_filtered.filter(lambda x:(x[0] < maxTime + 2 and x[1] == patient)).sortBy(lambda x: x[0]).map(lambda x: x[2]).collect()
output_pzoz = processSignal([signal for signal in pzoz_segment])
horizontal_segment = horizontal_filtered.filter(lambda x:(x[0] < maxTime + 2 and x[1] == patient)).sortBy(lambda x: x[0]).map(lambda x: x[2]).collect()
output_horizontal = processSignal([signal for signal in horizontal_segment])
saveFile(data_dir, patient, output_fpzcz, None, None, output_label)
# save the files as a numpy archive for model training
def saveFile(data_dir, patient, fpzcz, pzoz, horizontal, label):
if not os.path.exists("%s/eeg_fpz_cz" % data_dir):
os.makedirs("%s/eeg_fpz_cz")
if not os.path.exists("%s/eeg_pz_oz" % data_dir):
os.makedirs("%s/eeg_pz_oz")
if not os.path.exists("%s/eog_horizontal" % data_dir):
os.makedirs("%s/eog_horizontal")
output_name = patient + ".npz"
fpzcz_dict = {"x": fpzcz, "y": label}
pzoz_dict = {"x": pzoz, "y": label}
horizontal_dict = {"x": horizontal, "y": label}
np.savez(os.path.join("%s/eeg_fpz_cz" % data_dir, output_name), **fpzcz_dict)
np.savez(os.path.join("%s/eeg_pz_oz" % data_dir, output_name), **pzoz_dict)
np.savez(os.path.join("%s/eog_horizontal" % data_dir, output_name), **horizontal_dict)
# represent the stage with numerical values
def vectorize(array):
array = np.where(array=='W', 0, array)
array = np.where(array=='1', 1, array)
array = np.where(array=='2', 2, array)
array = np.where(array=='3', 3, array)
array = np.where(array=='4', 3, array)
array = np.where(array=='R', 4, array)
return array
# split the signal into 30s echpos
def processSignal(array):
split = len(array) / 3000
array = np.array(array).astype(np.float32)
array = np.asarray(np.split(array, split))
return array
def script():
findspark.init()
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="/data/physionet_sleep",
help="File path to the CSV file that contains RAW and LABEL data.")
args = parser.parse_args()
data_dir = args.data_dir
load(data_dir)
if __name__ == "__main__":
script()
| [
"noreply@github.com"
] | ayushbaid.noreply@github.com |
1226dd2c5a9a51b542246bedd7bd3c1873fdbad6 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/auth/models/registration_requests.py | 2b82b271bd15c697c17e87bacc2dcbf1d924edf3 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py |
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import ARRAY, JSON
from werkzeug.datastructures import MultiDict
from fossir.core.db import db
from fossir.util.locators import locator_property
from fossir.util.string import format_repr, return_ascii
class RegistrationRequest(db.Model):
__tablename__ = 'registration_requests'
__table_args__ = (
db.CheckConstraint('email = lower(email)', 'lowercase_email'),
{'schema': 'users'}
)
id = db.Column(
db.Integer,
primary_key=True
)
comment = db.Column(
db.Text,
nullable=False,
default=''
)
email = db.Column(
db.String,
unique=True,
nullable=False,
index=True
)
extra_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
user_data = db.Column(
JSON,
nullable=False
)
_identity_data = db.Column(
'identity_data',
JSON,
nullable=False
)
settings = db.Column(
JSON,
nullable=False
)
@locator_property
def locator(self):
return {'request_id': self.id}
@property
def identity_data(self):
identity_data = self._identity_data.copy()
# if we have data in identity_data, it was converted from a
# MultiDict so we need to convert it back.
if 'data' in identity_data:
tmp = MultiDict()
tmp.update(self._identity_data['data'])
identity_data['data'] = tmp
return identity_data
@identity_data.setter
def identity_data(self, identity_data):
identity_data = identity_data.copy()
# `identity_data['data']` for multipass-based identities is a
# MultiDict, but json-encoding it would lose all extra values
# for a key, so we convert it to a dict of lists first
if 'data' in identity_data:
identity_data['data'] = dict(identity_data['data'].lists())
self._identity_data = identity_data
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'email')
| [
"hodardhazwinayo@gmail.com"
] | hodardhazwinayo@gmail.com |
200965dee0eb656eb093016a71b2039d85c62f0a | 56c1aa96d2b5d8195d2ae2f5b92061603d2efbf8 | /Programmers/Level2/영어끝말잇기.py | c51fece8da9b91a42a99f6e7f35f447ea0fba163 | [] | no_license | SeunghyoKu/Algorithms | b1c41fdb4a0b01bc7e90a25b698db8d2da2ac7a6 | 800b49a3ce1cb63479165c0e95852c70c037792f | refs/heads/master | 2022-04-28T05:14:51.014395 | 2022-04-21T15:44:19 | 2022-04-21T15:44:19 | 189,375,638 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import math
def solution(n, words):
mentioned_words = []
count = 0
for i in range(len(words)):
if words[i] in mentioned_words:
return [i % n + 1, math.ceil((i + 1) / n)]
if i >= 1 and words[i][0] != words[i-1][-1]:
return [i % n + 1, math.ceil((i + 1) / n)]
else:
mentioned_words.append(words[i])
return [0, 0]
| [
"noreply@github.com"
] | SeunghyoKu.noreply@github.com |
7b571d83f84608ebeeaddbfae06938549a457d9b | 54d17336ca03801bd9c9ef37be8642b332ab71c4 | /osm/SO/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py | 2023db5a8ce00b6b1b6982b49c0b0047939c92fb | [] | no_license | dennis-me/Pishahang | 2428379c4f7d3ee85df4b85727ce92e8fe69957a | cdd0abe80a76d533d08a51c7970d8ded06624b7d | refs/heads/master | 2020-09-07T12:35:54.734782 | 2020-01-24T20:11:33 | 2020-01-24T20:11:33 | 220,782,212 | 2 | 0 | null | 2019-11-10T11:46:44 | 2019-11-10T11:46:43 | null | UTF-8 | Python | false | false | 8,136 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2016 RIFT.io Inc
import importlib
import os
from rift.mano.yang_translator.common.exception import YangClassAttributeError
from rift.mano.yang_translator.common.exception import YangClassImportError
from rift.mano.yang_translator.common.exception import YangModImportError
from rift.mano.yang_translator.common.utils import _
from rift.mano.yang_translator.conf.config import ConfigProvider \
as translatorConfig
from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
import ToscaResource
class TranslateDescriptors(object):
'''Translate YANG NodeTemplates to RIFT.io MANO Resources.'''
YANG_DESC = (NSD, VNFD) = ('nsd', 'vnfd')
###########################
# Module utility Functions
# for dynamic class loading
###########################
YANG_TO_TOSCA_TYPE = None
def _load_classes(log, locations, classes):
'''Dynamically load all the classes from the given locations.'''
for cls_path in locations:
# Use the absolute path of the class path
abs_path = os.path.dirname(os.path.abspath(__file__))
abs_path = abs_path.replace('rift/mano/yang_translator/rwmano',
cls_path)
log.debug(_("Loading classes from %s") % abs_path)
# Grab all the yang type module files in the given path
mod_files = [f for f in os.listdir(abs_path) if (
f.endswith('.py') and
not f.startswith('__init__') and
f.startswith('yang_'))]
# For each module, pick out the target translation class
for f in mod_files:
f_name, ext = f.rsplit('.', 1)
mod_name = cls_path + '/' + f_name
mod_name = mod_name.replace('/', '.')
try:
mod = importlib.import_module(mod_name)
target_name = getattr(mod, 'TARGET_CLASS_NAME')
clazz = getattr(mod, target_name)
classes.append(clazz)
except ImportError:
raise YangModImportError(mod_name=mod_name)
except AttributeError:
if target_name:
raise YangClassImportError(name=target_name,
mod_name=mod_name)
else:
# TARGET_CLASS_NAME is not defined in module.
# Re-raise the exception
raise
def _generate_type_map(log):
'''Generate YANG translation types map.
Load user defined classes from location path specified in conf file.
Base classes are located within the yang directory.
'''
# Base types directory
BASE_PATH = 'rift/mano/yang_translator/rwmano/yang'
# Custom types directory defined in conf file
custom_path = translatorConfig.get_value('DEFAULT',
'custom_types_location')
# First need to load the parent module, for example 'contrib.mano',
# for all of the dynamically loaded classes.
classes = []
TranslateDescriptors._load_classes(log,
(BASE_PATH, custom_path),
classes)
try:
types_map = {clazz.yangtype: clazz for clazz in classes}
log.debug(_("Type maps loaded: {}").format(types_map.keys()))
except AttributeError as e:
raise YangClassAttributeError(message=e.message)
return types_map
def __init__(self, log, yangs, tosca_template, vnfd_files=None):
self.log = log
self.yangs = yangs
self.tosca_template = tosca_template
self.vnfd_files = vnfd_files
# list of all TOSCA resources generated
self.tosca_resources = []
self.metadata = {}
log.debug(_('Mapping between YANG nodetemplate and TOSCA resource.'))
def translate(self):
if TranslateDescriptors.YANG_TO_TOSCA_TYPE is None:
TranslateDescriptors.YANG_TO_TOSCA_TYPE = \
TranslateDescriptors._generate_type_map(self.log)
return self._translate_yang()
def translate_metadata(self):
"""Translate and store the metadata in instance"""
FIELDS_MAP = {
'ID': 'name',
'vendor': 'vendor',
'version': 'version',
}
metadata = {}
# Initialize to default values
metadata['name'] = 'yang_to_tosca'
metadata['vendor'] = 'RIFT.io'
metadata['version'] = '1.0'
if 'nsd' in self.yangs:
yang_meta = self.yang['nsd'][0]
elif 'vnfd' in self.yangs:
yang_meta = self.yang['vnfd'][0]
for key in FIELDS_MAP:
if key in yang_meta.keys():
metadata[key] = str(yang_meta[FIELDS_MAP[key]])
self.log.debug(_("Metadata {0}").format(metadata))
self.metadata = metadata
def _translate_yang(self):
self.log.debug(_('Translating the descriptors.'))
if self.NSD in self.yangs:
for nsd in self.yangs[self.NSD]:
self.log.debug(_("Translate descriptor of type nsd: {}").
format(nsd))
node_name = nsd.pop(ToscaResource.NAME).replace(' ','_')
node_name = node_name if node_name.endswith('nsd') else ''.join([node_name, '_nsd'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.NSD](
self.log,
node_name,
self.NSD,
nsd,
self.vnfd_files)
self.tosca_resources.append(tosca_node)
vnfd_name_list = []
if self.VNFD in self.yangs:
for vnfd in self.yangs[self.VNFD]:
if vnfd['name'] not in vnfd_name_list:
self.log.debug(_("Translate descriptor of type vnfd: {}").
format(vnfd))
vnfd_name_list.append(vnfd['name'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.VNFD](
self.log,
vnfd.pop(ToscaResource.NAME),
self.VNFD,
vnfd)
self.tosca_resources.append(tosca_node)
# First translate VNFDs
for node in self.tosca_resources:
if node.type == self.VNFD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang()
# Now translate NSDs
for node in self.tosca_resources:
if node.type == self.NSD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang(self.tosca_resources)
return self.tosca_resources
def find_tosca_resource(self, name):
for resource in self.tosca_resources:
if resource.name == name:
return resource
def _find_yang_node(self, yang_name):
for node in self.nodetemplates:
if node.name == yang_name:
return node
| [
"github@OrangeOnBlack.de"
] | github@OrangeOnBlack.de |
7c56516f441b0a72ae06e9f44126a1862c11d9ef | 8bd63bc56b39d26458ad54b7f18c4b149c1e3ce2 | /sphinx-files/rst-files/Data/code/2017/07/000715/jgpwjtuaoawbmpf.py | 871c670e4634997240d6eaa9ce8ab46848bc20d0 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | isabella232/scipy-central-rescue | 43270c0e1850b989fbe9a5b1a06c3be11d16464a | 2b331610d52c189ae96bea4f4ce2ec343146b608 | refs/heads/master | 2021-09-06T09:17:30.627497 | 2018-02-04T19:41:11 | 2018-02-04T19:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | gotOZN http://www.LnAJ7K8QSpkiStk3sLL0hQP6MO2wQ8gO.com | [
"jiayue.li@berkeley.edu"
] | jiayue.li@berkeley.edu |
02bef1d7f5fb0e007ba6d281c279b2f0012eb8ec | b9aa903204eb4a01152ff8c22dda75c4738810a9 | /cookies/cookies.py | da14ccf2eee606cddb3fdeeaec9c1a281c9911fb | [
"MIT"
] | permissive | GitAcrown/RedAppsv2 | 6b99c36487c1e7292efb0d7b096f0889a6e110f8 | a3a1fb5a5c659ce6e54e62503012a79a71763d47 | refs/heads/main | 2023-09-06T09:20:19.398307 | 2021-09-27T14:43:20 | 2021-09-27T14:43:20 | 338,360,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,020 | py | import asyncio
import json
import logging
import operator
import re
import random
import aiohttp
import os
import string
import time
from datetime import datetime, timedelta
from fuzzywuzzy import process
import discord
from redbot.core.data_manager import cog_data_path, bundled_data_path
from redbot.core.utils.menus import start_adding_reactions, menu, DEFAULT_CONTROLS
from typing import Union, Tuple, List
from copy import copy
from redbot.core import Config, commands, checks, errors
from redbot.core.utils.chat_formatting import box, humanize_number, humanize_timedelta
from tabulate import tabulate
logger = logging.getLogger("red.RedAppsv2.Cookies")
class Cookies(commands.Cog):
"""Simulateur de Fortune cookies"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, identifier=736144321857978388, force_registration=True)
default_guild = {'Cookies': {},
'price': 30,
'reward': 10,
'cookie_life': 3,
'cooldown': 3600,
'free_cooldown': 1800,
'cookie_delay': 86400,
'report_channel': None,
'reports': []}
default_member = {'last_cookie': {},
'last_free_cookie': 0}
self.config.register_guild(**default_guild)
self.config.register_member(**default_member)
self.session = aiohttp.ClientSession()
async def import_old_data(self):
"""Attention : écrase les données présentes sur le serveur"""
try:
fortune_config = Config.get_conf(None, identifier=736144321857978388, cog_name="Fortune")
guilds = self.bot.guilds
n = 1
for guild in guilds:
imported = {}
logger.info(msg=f"{n}. Importation des données Fortune de : {guild.id}")
cookies = await fortune_config.guild(guild).COOKIES()
for c in cookies:
cookie = {'text': cookies[c]['text'], 'author': cookies[c]['author'], 'posts': cookies[c]['logs'], 'score': 1}
imported[c] = cookie
await self.config.guild(guild).Cookies.set(imported)
n += 1
except:
return False
return True
async def get_random_cookie(self, guild: discord.Guild, filter_users: List[discord.Member]):
cookies = await self.config.guild(guild).Cookies()
weighted = {}
def last_posted(k):
try:
return k[-1]
except IndexError:
return 0
for c in cookies:
if cookies[c]['author'] in [m.id for m in filter_users] or last_posted(cookies[c]['posts']) + await self.config.guild(guild).cookie_delay() > time.time():
continue
weighted[c] = cookies[c]['score']
if weighted:
return random.choices(list(weighted.keys()), weights=list(weighted.values()), k=1)[0]
return None
async def fetch_inspirobot_quote(self):
"""Récupère une image quote d'Inspirobot.me"""
try:
async with self.session.request("GET", "http://inspirobot.me/api?generate=true") as page:
pic = await page.text(encoding="utf-8")
return pic
except Exception as e:
return None
@commands.command(name='cookie', aliases=['f'])
@commands.guild_only()
@commands.cooldown(1, 30, commands.BucketType.user)
async def get_fortune_cookie(self, ctx):
"""Obtenir un fortune cookie aléatoire"""
guild, author = ctx.guild, ctx.author
config = await self.config.guild(guild).all()
cookie_id = await self.get_random_cookie(guild, [author])
like, dislike = '👍', '👎'
if not cookie_id:
quote = await self.fetch_inspirobot_quote()
fcd = await self.config.member(author).last_free_cookie()
if not quote:
return await ctx.reply("**Réserve vide** › Il n'y a actuellement aucun cookie disponible.\nVous pouvez contribuer à en ajouter avec `;addf` !",
mention_author=False)
elif fcd + config['free_cooldown'] <= time.time():
em = discord.Embed(color=author.color)
em.set_image(url=quote)
em.set_footer(text="Gratuit · Cookie offert en raison du manque de stock", icon_url=self.bot.user.avatar_url)
await self.config.member(author).last_free_cookie.set(time.time())
return await ctx.reply(embed=em, mention_author=False)
else:
td = humanize_timedelta(seconds=int(
(fcd + config['free_cooldown']) - time.time()))
return await ctx.reply(f"**Cooldown** › Vous devez attendre encore "
f"*{td}* avant de pouvoir obtenir un autre cookie (même gratuit).", mention_author=False)
eco = self.bot.get_cog('XPay')
currency = await eco.get_currency(guild)
def special_formatter(string: str):
scan = re.compile(r"<([\w\s:'\-|]*)>", re.DOTALL | re.IGNORECASE).findall(string)
for b in scan:
chunk = f'<{b}>'
if len(chunk) > 200:
continue
b, *p = re.split(':|\|', b)
b = b.lower()
if b == 'number':
seuils = [int(i) for i in p[0].split('_')] if p else (0, 10)
try:
string = string.replace(chunk, str(random.randint(*seuils)))
except:
pass
if b == 'member':
mem = random.choice(guild.members)
string = string.replace(chunk, mem.mention)
if b == 'bool':
string = string.replace(chunk, random.choice(('✅', '❎')))
if b == 'random' and p:
c = random.choice(list(p[0].split('_')))
string = string.replace(chunk, c)
return string
lc = await self.config.member(author).last_cookie()
if lc:
cooldown = lc['timestamp']
if cooldown + config['cooldown'] > time.time():
td = humanize_timedelta(seconds=int(
(cooldown + config['cooldown']) - time.time()))
return await ctx.reply(f"**Cooldown** › Vous devez attendre encore "
f"*{td}* avant de pouvoir acheter un autre cookie.", mention_author=False)
if not await eco.check_balance(author, config['price']):
return await ctx.reply(f"**Solde insuffisant** › Il vous faut {config['price']}{currency} pour acheter un cookie.", mention_author=False)
cookie = config['Cookies'][cookie_id]
async with ctx.typing():
await eco.withdraw_credits(author, config['price'], reason="Achat d'un fortune cookie")
cookie_author = guild.get_member(cookie['author'])
random_member = random.choice(guild.members)
date, hour = datetime.now().strftime('%d/%m/%Y'), datetime.now().strftime('%H:%M')
rdm_ten = random.randint(0, 10)
rdm_hundred = random.randint(0, 100)
rdm_bool = random.choice(("Vrai", "Faux"))
text = special_formatter(cookie['text']).format(buyer=author,
guild=guild,
server=guild,
cookie_author=cookie_author,
random_member=random_member,
date=date,
hour=hour,
random_ten=rdm_ten,
random_hundred=rdm_hundred,
random_bool=rdm_bool)
em = discord.Embed(description=text, color=author.color)
em.set_footer(
text=f"Vous avez payé {config['price']}{currency}", icon_url='https://i.imgur.com/Lv9E1uL.png')
if 'http' in cookie['text']:
scan = re.compile(r'(https?://\S*\.\S*)', re.DOTALL | re.IGNORECASE).findall(cookie['text'])
if scan:
em.set_image(url=scan[0])
name = scan[0].split('/')[-1]
if "?" in name:
name = name.split('?')[0]
if not name:
name = "URL"
txt = text.replace(scan[0], f"[[{name}]]({scan[0]})")
em.description = txt
msg = await ctx.reply(embed=em, mention_author=False)
await self.config.member(author).last_cookie.set({'author': cookie_author.id if cookie_author else None, 'text': em.description,
'tipped': False, 'timestamp': time.time(), 'cookie_id': cookie_id})
rfooter = ""
if cookie_author:
rfooter += f"{cookie_author.name}"
else:
rfooter += f'Auteur inconnu'
start_adding_reactions(msg, [like, dislike])
try:
react, _ = await self.bot.wait_for("reaction_add", check=lambda m, u: u == ctx.author and m.message.id == msg.id,
timeout=60)
except asyncio.TimeoutError:
await msg.clear_reactions()
await self.config.guild(guild).Cookies.clear_raw(cookie_id)
em.set_footer(text=rfooter)
if cookie_author:
em.set_footer(text=rfooter, icon_url=cookie_author.avatar_url)
else:
await msg.clear_reactions()
if cookie_author:
if react.emoji == like:
cookie['score'] *= 2
rfooter += f" {config['reward']:+}{currency}"
await eco.deposit_credits(cookie_author, config['reward'], reason="Like d'un de vos fortune cookie")
elif react.emoji == dislike:
cookie['score'] /= 2
if cookie['posts']:
rfooter += ' ♻️'
cookie['posts'].append(time.time())
await self.config.guild(guild).Cookies.set_raw(cookie_id, value=cookie)
if cookie['score'] <= 0.25:
rfooter += ' 🗑️'
await self.config.guild(guild).Cookies.clear_raw(cookie_id)
if len(cookie['posts']) >= config['cookie_life']:
rfooter += ' ⌛'
await self.config.guild(guild).Cookies.clear_raw(cookie_id)
if cookie_author:
em.set_footer(text=rfooter, icon_url=cookie_author.avatar_url)
else:
em.set_footer(text=rfooter)
return await msg.edit(embed=em, mention_author=False)
@commands.command(name="testcookie")
async def test_cookie_formating(self, ctx, *, text: str):
"""Permet de tester le formattage d'un cookie (balises et fonctions)
__Balises__
Vous pouvez mettre les balises suivantes dans vos cookies pour exploiter les objets renvoyés directement dans le texte
/!\\ Mettre plusieurs fois la même balise renvoie le même objet !
*{buyer}* = Acheteur du cookie
*{guild}* / *{server}* = Serveur où vous êtes
*{cookie_author}* = Créateur du cookie (vous-même)
*{random_member}* = Membre aléatoire du serveur
*{date}* = Date au moment de l'ouverture du cookie au format dd/mm/aaaa
*{hour}* = Heure au moment de l'ouverture du cookie au format hh:mm
*{random_ten}* / *{random_hundred}* = Nombre aléatoire entre 0 et 10 / entre 0 et 100
*{random_bool}* = Booléen au hasard (Vrai ou Faux)
__Fonctions__
n = ID de la fonction si vous comptez en mettre plusieures identiques
<number|X_Y|n> = Génère un nombre aléatoire entre X et Y
<member|n> = Génère une mention de membre aléatoire
<bool|n> = Génère un emoji booléen au hasard
<random|A_B_C...> = Choisir une réponse aléatoire parmi les options (délim. par _)
Il est possible d'utiliser `:` à la place de `|`"""
guild, author = ctx.guild, ctx.author
original = copy(text)
def special_formatter(string: str):
scan = re.compile(r"<([\w\s:'\-|]*)>", re.DOTALL | re.IGNORECASE).findall(string)
for b in scan:
chunk = f'<{b}>'
if len(chunk) > 200:
continue
b, *p = re.split(':|\|', b)
b = b.lower()
if b == 'number':
seuils = [int(i) for i in p[0].split('_')] if p else (0, 10)
try:
string = string.replace(chunk, str(random.randint(*seuils)))
except:
pass
if b == 'member':
mem = random.choice(guild.members)
string = string.replace(chunk, mem.mention)
if b == 'bool':
string = string.replace(chunk, random.choice(('✅', '❎')))
if b == 'random' and p:
c = random.choice(list(p[0].split('_')))
string = string.replace(chunk, c)
return string
cookie_author = '`Auteur du cookie`'
random_member = random.choice(guild.members)
date, hour = datetime.now().strftime('%d/%m/%Y'), datetime.now().strftime('%H:%M')
rdm_ten = random.randint(0, 10)
rdm_hundred = random.randint(0, 100)
rdm_bool = random.choice(("Vrai", "Faux"))
text = special_formatter(text).format(buyer=author,
guild=guild,
server=guild,
cookie_author=cookie_author,
random_member=random_member,
date=date,
hour=hour,
random_ten=rdm_ten,
random_hundred=rdm_hundred,
random_bool=rdm_bool)
em = discord.Embed(description=box(original), color=author.color)
if 'http' in text:
scan = re.compile(r'(https?://\S*\.\S*)', re.DOTALL | re.IGNORECASE).findall(text)
if scan:
em.set_image(url=scan[0])
name = scan[0].split('/')[-1]
if "?" in name:
name = name.split('?')[0]
if not name:
name = "URL"
txt = text.replace(scan[0], f"[[{name}]]({scan[0]})")
em.description = txt
em.add_field(name="Résultat", value=text, inline=False)
em.set_footer(text="Ceci est une démonstration de ce que donnerait votre texte s'il était obtenu par quelqu'un")
await ctx.reply(embed=em, mention_author=False)
@commands.command(name='cookieadd', aliases=['addf', 'fadd'])
@commands.guild_only()
@commands.cooldown(1, 10, commands.BucketType.user)
async def add_new_cookie(self, ctx, *, texte: str):
"""Ajouter un nouveau fortune cookie sur ce serveur
- Vous êtes récompensé lorsqu'un membre like votre cookie
- Les cookies expirent automatiquement au bout d'un certain nombre d'apparitions
- Un cookie peut être supprimé si son score est trop bas (<= 0.25)
- Vous ne pouvez pas tomber sur vos propres cookies
- Les URL sont automatiquement réduites et les images peuvent s'afficher directement dans le cookie
Voir `;help testcookie` pour voir comment utiliser les balises et les fonctions"""
guild, author = ctx.guild, ctx.author
config = await self.config.guild(guild).all()
eco = self.bot.get_cog('XPay')
curr = await eco.get_currency(guild)
if len(texte) < 10 or len(texte) > 1000:
return await ctx.reply("**Longueur invalide** › Le message du cookie doit faire entre 10 et 1000 caractères, liens compris.", mention_author=False)
if len(ctx.message.mentions) > 3:
return await ctx.reply("**Spam de mentions** › Votre message comporte trop de mentions de membres.", mention_author=False)
is_flood = lambda t: bool([m for m in t.split() if t.split().count(m) > len(t.split()) / 2] if len(t.split()) >= 4 else False)
if is_flood(texte):
return await ctx.reply("**Flood** › Votre message comporte trop de répétitions et s'apparente à du flood.", mention_author=False)
all_cookies = [config['Cookies'][c]['text'].lower() for c in config['Cookies']]
dist = process.extractOne(texte.lower(), all_cookies, score_cutoff=91)
if dist:
return await ctx.reply("**Doublon** › Votre message est trop proche (> 90% de similiarité) avec un cookie déjà présent.", mention_author=False)
cookie = {'text': texte, 'author': author.id, 'posts': [], 'score': 1}
cookie_id = f"{int(time.time())}-{author.id}"
await self.config.guild(guild).Cookies.set_raw(cookie_id, value=cookie)
await ctx.reply(f"**Cookie ajouté** › Votre cookie a été ajouté, vous recevrez une récompense de {config['reward']}{curr} si un membre like votre message.",
mention_author=False, delete_after=20)
try:
await ctx.message.delete(delay=10)
except Exception:
pass
@commands.command(name='tip', aliases=['tips'])
@commands.guild_only()
async def tip_cookie_author(self, ctx, somme: int = None):
"""Permet de donner un tip à l'auteur du dernier cookie acheté
Par défaut le tip prendra la valeur définie comme récompense lors des like de cookie"""
author, guild = ctx.author, ctx.guild
if not somme:
somme = await self.config.guild(guild).reward() if await self.config.guild(guild).reward() > 0 else 1
last_cookie = await self.config.member(author).last_cookie()
eco = self.bot.get_cog('XPay')
currency = await eco.get_currency(guild)
confirm, cancel = self.bot.get_emoji(
812451214037221439), self.bot.get_emoji(812451214179434551)
if not last_cookie:
return await ctx.send("**Aucun cookie acheté** › Vous n'avez aucun cookie dans votre historique d'achats.")
cookie_price = await self.config.guild(guild).price()
if somme <= 0 or somme > cookie_price:
return await ctx.send(f"**Valeur invalide** › Le tip doit être compris entre 1 et la valeur d'achat ({cookie_price}{currency}).")
if not await eco.check_balance(author, somme):
return await ctx.send("**Solde insuffisant** › Vous n'avez pas les moyens de tipper cette somme.")
if last_cookie['tipped']:
return await ctx.send("**Déjà tippé** › Vous ne pouvez pas donner plus d'un seul tip par cookie.")
if last_cookie['author']:
lc_author = guild.get_member(last_cookie['author'])
em = discord.Embed(title=f"Envoyer un tip à **{lc_author.name}**", color=author.color)
em.add_field(name="Texte du cookie", value=box(last_cookie['text']))
em.set_footer(text=f"Voulez-vous envoyer {somme}{currency} à l'auteur de ce cookie pour le récompenser ?")
msg = await ctx.reply(embed=em, mention_author=False)
start_adding_reactions(msg, [confirm, cancel])
try:
react, _ = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == msg.id,
timeout=30)
except asyncio.TimeoutError:
return await msg.delete()
if react.emoji == confirm:
await msg.clear_reactions()
await self.config.member(author).last_cookie.set_raw('tipped', value=True)
try:
await eco.withdraw_credits(author, somme, reason="Tip de fortune cookie")
except:
em.set_footer(text=f"Erreur dans l'envoi du tips")
return await msg.edit(embed=em)
else:
await eco.deposit_credits(lc_author, somme, reason="Tip reçu pour un fortune cookie")
em.set_footer(text=f"Vous avez envoyé {somme}{currency} à {lc_author.name}")
return await msg.edit(embed=em, mention_author=False)
else:
return await msg.delete()
else:
await ctx.send("**Auteur inconnu** › L'auteur de votre dernier cookie acheté ne semble plus être sur ce serveur et ne peut donc recevoir de tips.")
@commands.command(name='report')
@commands.guild_only()
async def report_cookie(self, ctx):
"""Signaler le contenu du dernier cookie obtenu"""
author, guild = ctx.author, ctx.guild
last_cookie = await self.config.member(author).last_cookie()
config = await self.config.guild(guild).all()
confirm, cancel = self.bot.get_emoji(
812451214037221439), self.bot.get_emoji(812451214179434551)
if not last_cookie:
return await ctx.send("**Aucun cookie acheté** › Vous n'avez aucun cookie dans votre historique d'achats.")
cookies = await self.config.guild(guild).Cookies()
try:
cookie_id = last_cookie['cookie_id']
_c = cookies[cookie_id]
except:
return await ctx.send("**Cookie inexistant** › Le cookie en question n'existe déjà plus dans la base de données, il a peut-être expiré ou a déjà été supprimé.")
em = discord.Embed(title=f"Signaler le cookie *{cookie_id}*", color=author.color)
em.add_field(name="Texte du cookie", value=box(last_cookie['text']))
em.set_footer(text=f"Voulez-vous signaler ce cookie aux modérateurs ?")
msg = await ctx.reply(embed=em, mention_author=False)
start_adding_reactions(msg, [confirm, cancel])
try:
react, _ = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == msg.id,
timeout=30)
except asyncio.TimeoutError:
return await msg.delete()
if react.emoji == confirm:
await msg.clear_reactions()
if config['report_channel']:
chan = self.bot.get_channel(config['report_channel'])
if chan:
r = discord.Embed(title="Signalement d'un cookie", description=f"**ID :** `{cookie_id}`", color=discord.Color.red())
r.add_field(name="Texte du cookie signalé", value=box(last_cookie['text']))
r.set_footer(text=f"Supprimer : \";cookieset delete {cookie_id}\"\nVoir tous les signalements : \";reports\"")
await chan.send(embed=r)
async with self.config.guild(guild).reports() as reports:
if cookie_id not in reports:
reports.append(cookie_id)
em.set_footer(text=f"Votre signalement a bien été enregistré")
await msg.edit(embed=em, mention_author=False)
await msg.delete(delay=20)
else:
return await msg.delete()
@commands.command(name='reports')
@commands.guild_only()
@checks.admin_or_permissions(manage_messages=True)
async def list_reports(self, ctx):
"""Liste tous les cookies signalés
Pour en supprimer, utilisez ';cookieset delete'"""
guild = ctx.guild
reports = await self.config.guild(guild).reports()
cookies = await self.config.guild(guild).Cookies()
tabl = []
to_del = []
if not reports:
return await ctx.send("**Aucun signalement** › Aucun cookie n'a été signalé.")
for r in reports:
try:
cookie = cookies[r]
tabl.append((r, cookie['text'] if len(cookie['text']) < 40 else cookie['text'][:37] + '...'))
except:
to_del.append(r)
new_reports = reports[::]
for d in to_del:
new_reports.remove(d)
await self.config.guild(guild).reports.set(new_reports)
if tabl:
rem = discord.Embed(title="Cookies signalés", description="```py\n" + tabulate(tabl, headers=('ID', 'Texte')) + "```", color=discord.Color.red())
rem.set_footer(text="Supprimez-en avec ';cookieset delete <ID>'")
await ctx.send(embed=rem)
else:
await ctx.send("**Liste vide** › Aucun cookie présentement en circulation n'est signalé.")
@commands.group(name="cookieset")
@checks.admin_or_permissions(manage_messages=True)
async def _cookie_settings(self, ctx):
"""Groupe des commandes de gestion des fortune cookies"""
@_cookie_settings.command()
async def price(self, ctx, val: int = 30):
"""Modifie le prix des cookies
C'est aussi la valeur maximale d'un tip
Par défaut 30 crédits"""
guild = ctx.guild
if val >= 0:
await self.config.guild(guild).price.set(val)
await ctx.send(f"**Valeur modifiée** • Les fortunes cookies coûteront désormais {val} crédits.")
else:
await ctx.send(f"**Valeur invalide** • Le prix du fortune cookie doit être supérieur ou égal à 0 crédits.")
@_cookie_settings.command()
async def reward(self, ctx, val: int = 10):
"""Modifie la valeur de la récompense attribuée lors d'un like
Par défaut 10 crédits"""
guild = ctx.guild
if val >= 0:
await self.config.guild(guild).reward.set(val)
await ctx.send(f"**Valeur modifiée** • Les fortunes cookies coûteront désormais {val} crédits.")
else:
await ctx.send(f"**Valeur invalide** • Le prix du fortune cookie doit être supérieur ou égal à 0 crédits.")
@_cookie_settings.command()
async def cooldown(self, ctx, val: int = 3600):
"""Modifie le temps en secondes entre l'achat de deux cookies
Par défaut 1h (3600s)"""
guild=ctx.guild
if val >= 0:
await self.config.guild(guild).cooldown.set(val)
await ctx.send(f"**Valeur modifiée** • Les fortunes cookies pourront désormais être achetés toutes les {val}s.")
else:
await ctx.send(f"**Valeur invalide** • La valeur doit être supérieure ou égale à 0s.")
@_cookie_settings.command()
async def freecooldown(self, ctx, val: int = 1800):
"""Modifie le temps en secondes entre l'obtention de deux cookies gratuit (manque de stock)
Par défaut 30m (1800s)"""
guild=ctx.guild
if val >= 0:
await self.config.guild(guild).free_cooldown.set(val)
await ctx.send(f"**Valeur modifiée** • Les cookies gratuits (fillers) pourront désormais être obtenus toutes les {val}s.")
else:
await ctx.send(f"**Valeur invalide** • La valeur doit être supérieure ou égale à 0s.")
@_cookie_settings.command()
async def delay(self, ctx, val: int = 86400):
"""Modifie le temps minimal en secondes qu'il faut à un cookie pour réapparaitre une nouvelle fois
Par défaut 1j (86400s)"""
guild=ctx.guild
if val >= 0:
await self.config.guild(guild).cookie_delay.set(val)
await ctx.send(f"**Valeur modifiée** • Un même fortune cookie pourra désormais réapparaitre toutes les {val}s.")
else:
await ctx.send(f"**Valeur invalide** • La valeur doit être supérieure ou égale à 0s.")
@_cookie_settings.command()
async def life(self, ctx, val: int = 3):
"""Modifie le nombre d'apparitions maximales d'un cookie (durée de vie)
Par défaut 3x"""
guild=ctx.guild
if val >= 1:
await self.config.guild(guild).cookie_life.set(val)
await ctx.send(f"**Valeur modifiée** • Les cookies pourront désormais apparaître {val}x.")
else:
await ctx.send(f"**Valeur invalide** • La valeur doit être supérieure ou égale à 1.")
@_cookie_settings.command()
async def reportchannel(self, ctx, channel: discord.TextChannel = None):
"""Configure un channel écrit pour recevoir les signalements, ne rien mettre désactive cette fonctionnalité
Si aucun salon n'est configuré, vous pouvez toujours voir les signalements avec ';reports'"""
guild=ctx.guild
if channel:
await self.config.guild(guild).report_channel.set(channel.id)
await ctx.send(f"**Salon modifiée** • Les signalements seront envoyés sur {channel.mention}.")
else:
await ctx.send(f"**Salon retiré** • Les signalements ne seront pas envoyés sur un salon. Utilisez `;reports` pour voir les signalements.")
@_cookie_settings.command()
async def deletetext(self, ctx, *, texte: str):
"""Supprimer un cookie - par une recherche de texte"""
config = await self.config.guild(ctx.guild).all()
all_cookies = [config['Cookies'][c]['text'].lower() for c in config['Cookies']]
dist = process.extractOne(texte.lower(), all_cookies, score_cutoff=70)
emcolor = discord.Color.red()
confirm, cancel = self.bot.get_emoji(812451214037221439), self.bot.get_emoji(812451214179434551)
if dist:
txt = dist[0]
for cook in config['Cookies']:
if config['Cookies'][cook]['text'].lower() == txt:
cookie = config['Cookies'][cook]
em = discord.Embed(title="Supprimer un fortune cookie", description=box(cookie['text']),
color=emcolor)
seller = ctx.guild.get_member(cookie['author'])
seller = str(seller) if seller else str(cookie['author'])
em.set_footer(text=f"Confirmez-vous la suppression de ce cookie de {seller} ?")
msg = await ctx.send(embed=em)
start_adding_reactions(msg, [confirm, cancel])
try:
react, _ = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == msg.id,
timeout=30)
except asyncio.TimeoutError:
return await msg.clear_reactions()
if react.emoji == confirm:
await msg.clear_reactions()
await self.config.guild(ctx.guild).Cookies.clear_raw(cook)
em.set_footer(text="Le cookie a été supprimé avec succès")
async with self.config.guild(ctx.guild).reports() as reports:
if cook in reports:
reports.remove(cook)
return await msg.edit(embed=em, mention_author=False)
else:
return await msg.delete()
await ctx.send("**Introuvable** • Donnez une partie plus importante du texte du cookie pour que je puisse le trouver")
@_cookie_settings.command()
async def delete(self, ctx, cookie_id: str):
"""Supprimer un cookie - par l'identifiant"""
guild = ctx.guild
confirm, cancel = self.bot.get_emoji(
812451214037221439), self.bot.get_emoji(812451214179434551)
cookies = await self.config.guild(guild).Cookies()
try:
cookie = cookies[cookie_id]
except:
return await ctx.send("**Cookie inexistant** › Le cookie en question n'existe déjà plus dans la base de données, il a peut-être expiré ou a déjà été supprimé.")
em = discord.Embed(title="Supprimer un fortune cookie", description=box(cookie['text']), color=discord.Color.red())
seller = guild.get_member(cookie['author'])
seller = str(seller) if seller else str(cookie['author'])
em.set_footer(text=f"Confirmez-vous la suppression de ce cookie de {seller} ?")
msg = await ctx.send(embed=em)
start_adding_reactions(msg, [confirm, cancel])
try:
react, _ = await self.bot.wait_for("reaction_add",
check=lambda m,
u: u == ctx.author and m.message.id == msg.id,
timeout=30)
except asyncio.TimeoutError:
return await msg.clear_reactions()
if react.emoji == confirm:
await msg.clear_reactions()
await self.config.guild(ctx.guild).Cookies.clear_raw(cookie_id)
em.set_footer(text="Le cookie a été supprimé avec succès")
async with self.config.guild(guild).reports() as reports:
if cookie_id in reports:
reports.remove(cookie_id)
return await msg.edit(embed=em, mention_author=False)
else:
await msg.delete()
@_cookie_settings.command()
async def deleteuser(self, ctx, users: commands.Greedy[discord.Member]):
"""Supprime tous les cookies créées par les utilisateurs visés"""
guild = ctx.guild
cookies = await self.config.guild(guild).Cookies()
nb = 0
for c in cookies:
if cookies[c]['author'] in [u.id for u in users]:
await self.config.guild(guild).Cookies.clear_raw(c)
nb += 1
await ctx.send(f"**Cookies supprimés** • {nb} cookies des membres visés ont été supprimés avec succès.")
@_cookie_settings.command()
async def resetlast(self, ctx, users: commands.Greedy[discord.Member]):
"""Reset les informations sur le dernier cookie des utilisateurs visés"""
for user in users:
await self.config.member(user).cookie_last.clear()
await ctx.send(f"**Données reset** • Les membres sélectionnés n'ont plus aucun 'dernier cookie' enregistré.")
@_cookie_settings.command()
async def clearall(self, ctx):
"""Supprime tous les fortune cookies du serveur"""
await self.config.guild(ctx.guild).clear_raw('Cookies')
await ctx.send(
f"**Fortune cookies supprimés** • La liste est désormais vide pour ce serveur.")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| [
"Acrown@outlook.fr"
] | Acrown@outlook.fr |
bb14c06ec99b4ea2b5d05a238a9bad7bf2310e7f | 75ab2f47f0a31e712f2168451414c03aa8b9e230 | /env/Lib/site-packages/pip/_vendor/requests/status_codes.py | 28bf2506b6a7ff06a6509fc8ff961a14b36ca9ac | [] | no_license | imaxaliev/TechnoHub | b369c9decf7a740132b3878e7e675a311ad8521e | c49e1ee2c62e7389eeb1053123cceb7dab6d83ca | refs/heads/master | 2023-01-27T13:52:53.356008 | 2020-12-13T13:30:06 | 2020-12-13T13:30:06 | 320,065,849 | 0 | 0 | null | 2020-12-13T13:30:08 | 2020-12-09T20:01:16 | JavaScript | UTF-8 | Python | false | false | 4,129 | py | # -*- coding: utf-8 -*-
r"""
The ``codes`` object defines a mapping from styles names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(('\\', '/')):
setattr(codes, title.upper(), code)
def doc(code):
names = ', '.join('``%s``' % n for n in _codes[code])
return '* %d: %s' % (code, names)
global __doc__
__doc__ = (__doc__ + '\n' +
'\n'.join(doc(code) for code in sorted(_codes))
if __doc__ is not None else None)
_init()
| [
"anxious-mind"
] | anxious-mind |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.