blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39e336c9307f7c8274b6a2cec8f3b48bf9af55e5 | 3b2e5a456e4198a0f743b4a2c0fb2a65d0671d5c | /db.py | bdfa69b5eb6321bef8d2d1af586faf4a8209e30e | [] | no_license | solistz/ptn | bd7a299580f3647b26f1c22b7f02cea0151adb0e | f8e4324a14ab05bc825672cc735e292416db0ec0 | refs/heads/main | 2023-06-19T11:42:04.105202 | 2021-05-26T11:29:42 | 2021-05-26T11:29:42 | 352,918,112 | 0 | 0 | null | 2021-05-26T11:31:06 | 2021-03-30T07:58:59 | Python | UTF-8 | Python | false | false | 838 | py | # Написать базу данных, где записи это массив словарей
#while True:
def controller():
a = ('\n'
'| 1 - Create '
'| 2 - Modify '
'| 3 - Remove '
'| 4 - View Dictionary |'
'\n'
)
print(a)
def input_dictionary_element():
print('ВВедіть число 1 - 4 : ')
number = int(input())
return number
def create_dictionary_element():
print('CREATE')
def modify_dictionary_element():
print('MODIFY')
def remove_dictionary_element():
pass
def view_dictionary_element():
print('hellow')
def all_function_controller():
pass
def main():
#dictionary_master = {}
controller()
number_function = input_dictionary_element()
print(number_function, type(number_function))
if __name__ == "__main__":
main()
| [
"zhydan@gmail.com"
] | zhydan@gmail.com |
604072554a7f0eb42cfef7693e16063de53c0997 | 1d112fedae5e194fe6920e58789b1af39533e7ac | /mjrl/__init__.py | 008b1339d26925e4bf892f54a20ff82019bb5b08 | [
"Apache-2.0"
] | permissive | BonsaiAI/mjrl | 7ff130e120be1e3ddcf764df03540c281dc013f4 | 9e60d50518706cddf5decdcbec67e452ce698b82 | refs/heads/master | 2023-02-26T16:27:48.587950 | 2021-02-06T02:05:34 | 2021-02-06T02:05:34 | 336,432,596 | 0 | 0 | Apache-2.0 | 2021-02-06T02:05:35 | 2021-02-06T01:49:20 | null | UTF-8 | Python | false | false | 16 | py | import mjrl.envs | [
"vikashplus@gmail.com"
] | vikashplus@gmail.com |
141cdeaafce86af549785cb128257d8d76e48872 | cd5cd3b98129442c688036f8ad292b42e84f66da | /A1/Python/extra/assignment1_2_new.py | 18e01a8cb4f27724ab6672d1a03629e10be00618 | [] | no_license | manishtanwar/Machine-Learning | bee672d93f4e6e16f3a9242d6f92a7d96c252b1b | e6cb29207b5cf93ace93aa563887f3de829b0137 | refs/heads/master | 2021-11-03T20:26:40.071813 | 2019-04-27T00:02:27 | 2019-04-27T00:02:27 | 167,661,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py |
# coding: utf-8
# In[400]:
import numpy as np
# In[401]:
def train(X_in, Y, tau):
"""
Input :
Ouput :
"""
X_un = np.copy(X_in)
# X_in = (X_in - X_in.mean())/np.std(X_in)
n = 2
m = X_in.size
X = np.ones((m,n))
X[:,0] = X_in
Y_pred = np.zeros(m)
X_T = np.transpose(X)
for i in range(m):
W = np.diag(np.exp(-(np.square(X_in-X_in[i]))/(2.*tau*tau)))
W[i][i] = 0.
theta = np.matmul(np.linalg.inv(np.matmul(X_T, np.matmul(W, X))), np.matmul(X_T, np.matmul(W, Y)))
# print(theta.shape)
# print(type(theta),type(Y_pred[i]),type(X))
# print(theta.shape,X.shape)
Y_pred[i] = np.matmul(X[i],theta)
return Y_pred
# In[402]:
x_in = np.genfromtxt('ass1_data/weightedX.csv',delimiter=',')
y_in = np.genfromtxt('ass1_data/weightedY.csv',delimiter=',')
tau = 0.1
y_pred = train(x_in, y_in, tau)
# train(x_in, y_in, tau)
# print(y_pred,y_in)
# for i in range(x_in.size):
# print(x_in[i],y_pred[i],y_in[i])
import matplotlib.pyplot as plt
plt.plot(x_in,y_in,'ro',color='blue')
plt.plot(x_in,y_pred,'ro',color='red')
# plt.scatter(x,y,color='red')
# plt.scatter(x,y1,color='blue')
plt.show()
# In[403]:
A1 = np.array([[1.,4.35,1.], [4.375, 1.,1.], [4., 1.,1.]])
print(A1)
B1 = np.linalg.inv(A1)
print(B1)
print(A1.dot(B1))
| [
"manish.tanwar19@gmail.com"
] | manish.tanwar19@gmail.com |
6f227c5ce3a2d188622f589658f80e019ffc2ec6 | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_scripts.py | c6028329d884b48f3b57d3958e624f02c6d43b3a | [] | no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 1,812 | py | import string
from module_info import *
from module_scripts import *
from process_common import *
from process_operations import *
from module_info import wb_compile_switch as is_wb
def save_scripts(variable_list,variable_uses,scripts,tag_uses,quick_strings):
file = open(export_dir + "scripts.txt","w")
file.write("scriptsfile version 1\n")
file.write("%d\n"%len(scripts))
temp_list = []
list_type = type(temp_list)
for i_script in xrange(len(scripts)):
func = scripts[i_script]
if (type(func[1]) == list_type):
file.write("%s -1\n"%(convert_to_identifier(func[0])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[1], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
else:
file.write("%s %s\n"%(convert_to_identifier(func[0]), swytrailzro(func[1])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[2], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
file.write("\n")
file.close()
def save_python_header():
if (is_wb):
file = open("./IDs/ID_scripts_wb.py","w")
else:
file = open("./IDs/ID_scripts_mb.py","w")
for i_script in xrange(len(scripts)):
file.write("script_%s = %d\n"%(convert_to_identifier(scripts[i_script][0]),i_script))
file.write("\n\n")
file.close()
print "Exporting scripts..."
save_python_header()
variable_uses = []
variables = load_variables(export_dir, variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
save_scripts(variables,variable_uses,scripts,tag_uses,quick_strings)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir, tag_uses)
save_quick_strings(export_dir,quick_strings)
| [
"swyterzone@gmail.com"
] | swyterzone@gmail.com |
9c06c0a29a8845ed289678b35982f9e2dbc2a720 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03957/s736073526.py | 4d07ff9077817797927326fee3fdb9b2cb662fdf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | s=input()
ans="No"
for i in range(len(s)):
if s[i]=="C":
for j in range(i+1,len(s)):
if s[j]=="F":
print("Yes")
exit()
else:
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5caf39f7f04ef450e3262dadb721eb0ac0d15d05 | 5cccdb3ae388d9234904b8bcf58ce115a1945e0b | /08_apples_and_bananas/test.py | 7d70b88fa05d0215145e74adc3c88ecf6e225cad | [
"MIT"
] | permissive | Akawi85/Tiny-Python-Project | 3e5962dfa0116eed8eb3bdbd0d9c83ded3a0da31 | 63d8956e0d114ac4e68b3cf26da9b24ece1ff249 | refs/heads/main | 2023-02-03T14:20:56.787277 | 2020-12-18T15:39:42 | 2020-12-18T15:39:42 | 308,365,557 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | #!/usr/bin/env python3
"""tests for apples.py"""
import re
import os
from subprocess import getstatusoutput, getoutput
prg = './apples.py'
fox = '../inputs/fox.txt'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_bad_vowel():
"""Should fail on a bad vowel"""
rv, out = getstatusoutput(f'{prg} -v x foo')
assert rv != 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_command_line():
""" foo -> faa """
out = getoutput(f'{prg} foo')
assert out.strip() == 'faa'
# --------------------------------------------------
def test_command_line_with_vowel():
""" foo -> fii """
out = getoutput(f'{prg} -v i foo')
assert out.strip() == 'fii'
# --------------------------------------------------
def test_command_line_with_vowel_preserve_case():
""" foo -> fii """
out = getoutput(f'{prg} "APPLES AND BANANAS" --vowel i')
assert out.strip() == 'IPPLIS IND BININIS'
# --------------------------------------------------
def test_file():
""" fox.txt """
out = getoutput(f'{prg} {fox}')
assert out.strip() == 'Tha qaack brawn fax jamps avar tha lazy dag.'
# --------------------------------------------------
def test_file_with_vowel():
""" fox.txt """
out = getoutput(f'{prg} --vowel o {fox}')
assert out.strip() == 'Tho qoock brown fox jomps ovor tho lozy dog.'
| [
"ifeanyi.akawi85@gmail.com"
] | ifeanyi.akawi85@gmail.com |
a546526b405a6825d7312a49a2cd25bcb0d101ae | 0abd812a50ba3330734fcbb0088a74c5ad6735a2 | /python/asKeyword.py | 77dc239390af38863bc903a53b3a7baf0e65c86c | [] | no_license | scMarth/Learning | a914af6f6327454234e5f98dfc8cf95d6d4f8077 | ae696461c2c8edc9944879503cce01d525cf4ce0 | refs/heads/master | 2023-08-03T05:13:03.162533 | 2023-07-28T22:58:51 | 2023-07-28T22:58:51 | 120,689,926 | 2 | 0 | null | 2022-12-11T13:14:07 | 2018-02-08T00:33:42 | JavaScript | UTF-8 | Python | false | false | 56 | py | import re
import re as regex
print(id(re) == id(regex)) | [
"vlantaca@gmail.com"
] | vlantaca@gmail.com |
2427dae02e66a14871a1c061b91cc2507d560ca0 | 6d5c15c193218fd352bbdcf188b019f35bf6d762 | /try/try/urls.py | 32bef94fc7d0b80833e9f0d4c093e4e1b63ab705 | [] | no_license | Yeshey-Gyatso/UI-of-prosperity-college- | 4dd32e7afb3982feced658d86dd75780c616fda2 | 1d9cc3d5ec7ed47c7786c24d7247c2f7f02f1903 | refs/heads/master | 2022-12-04T04:52:30.019586 | 2020-08-26T13:02:45 | 2020-08-26T13:02:45 | 290,494,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | """try URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from f import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.base,name='base'),
path('student/',views.studentform,name='student_forms'),
path('employee/',views.employeeform,name='employee_forms'),
path('aboutus/',views.aboutus,name='aboutus'),
path('index/',views.index,name='index'),
path('contactus/',views.contactus,name='contactus'),
path('courses/',views.courses,name="courses"),
path('slider/',views.slider,name="slider"),
# path('base/',views.base,name='base'),
]
| [
"yesheygyatsoo.yg@gmail.com"
] | yesheygyatsoo.yg@gmail.com |
7c02d2ad4e1e378078eab256ef590b3dbb318934 | 43d8b1639320fbafb9635f47c17e56fceed9faba | /edashi_1428/urls.py | f5d1d9d26e849b64a4b3c7e0ed6e30da099c7ba3 | [] | no_license | crowdbotics-apps/edashi-1428 | fd731df62fb45e554031f56fd85700149a2acb18 | 30615d9118d998e82c463df6bddcc1f858d9bb86 | refs/heads/master | 2022-12-10T04:09:32.671280 | 2019-03-17T09:38:05 | 2019-03-17T09:38:05 | 176,081,279 | 0 | 0 | null | 2022-12-08T04:55:54 | 2019-03-17T09:37:04 | Python | UTF-8 | Python | false | false | 1,042 | py | """edashi_1428 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Edashi'
admin.site.site_title = 'Edashi Admin Portal'
admin.site.index_title = 'Edashi Admin'
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1029bac9469ac0dd2993476c4e8baf8890222e70 | 4568ce0f4b7cfbbf0d6f7da34d029837ef0e71c4 | /douban/douban/items.py | 6a71ccda81ce9e525ee3a67b9ec619deda6c297c | [] | no_license | jiayunyan/python_scrapy | 731b87f15aa2da68c179611fe972b1cdc15efa4a | 230eb912cddab3400671cfbe66de8e9beb569955 | refs/heads/master | 2020-03-25T11:09:43.575976 | 2018-08-06T12:36:38 | 2018-08-06T12:36:38 | 143,721,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DoubanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
| [
"noreply@github.com"
] | jiayunyan.noreply@github.com |
f95d3da2b751d3a48dcc63bacb2925b0fde282b2 | 42f9db714a14cf18238cf743cf0353e26aff7ad8 | /leadmanager/leadmanager/settings.py | b1a1f59902ffa066965c03653a5f1324cafcd005 | [] | no_license | taochenlei/leadmanager | 29e2250e82734aa5bbb81bf1803a7be6eebb1db6 | a3b078569c7a40a0f7e03a78a3dbeb7a6197cda4 | refs/heads/main | 2023-08-20T19:25:18.878735 | 2021-10-20T04:11:24 | 2021-10-20T04:11:24 | 416,196,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | """
Django settings for leadmanager project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nfmog(^q$5*(kc@f&2-x^^hy1#=4kx8a55kgda%746par5)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'leads',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leadmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leadmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"taochenlei@gmail.com"
] | taochenlei@gmail.com |
c0fd4082245f364263b9f86f142b2a86aa5f9785 | 4760101a6c297c4b4b0e96f5ae7fb8c94c2abda6 | /cs_591/project/pymir/AudioFile.py | 23b0ca35865b12fd6d0f202e5f9e5f7903931323 | [] | no_license | ssikdar1/grad_school | 6fc739c7638e64a0a9974c920ac808c3989c9109 | cfcdcd70fab5b4083515abb2afe10c9dd5c27923 | refs/heads/master | 2016-09-06T11:46:42.754021 | 2014-10-27T16:17:10 | 2014-10-27T16:17:10 | 13,286,075 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,991 | py | """
AudioFile class
Load audio files (wav or mp3) into ndarray subclass
Last updated: 15 December 2012
"""
import os
from subprocess import Popen, PIPE
import numpy
from numpy import *
import scipy.io.wavfile
from pymir import Frame
import pyaudio
class AudioFile(Frame.Frame):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = numpy.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
obj.sampleRate = 0
obj.channels = 1
obj.format = pyaudio.paFloat32
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.sampleRate = getattr(obj, 'sampleRate', None)
self.channels = getattr(obj, 'channels', None)
self.format = getattr(obj, 'format', None)
# We do not need to return anything
@staticmethod
def open(filename, sampleRate=44100):
"""
Open a file (WAV or MP3), return instance of this class with data loaded in
Note that this is a static method. This is the preferred method of constructing this object
"""
filename = filename.lower()
if filename.endswith('mp3') or filename.endswith('m4a'):
ffmpeg = Popen([
"ffmpeg",
"-i", filename,
"-vn", "-acodec", "pcm_s16le", # Little Endian 16 bit PCM
"-ac", "1", "-ar", str(sampleRate), # -ac = audio channels (1)
"-f", "s16le", "-"], # -f wav for WAV file
stdin = PIPE, stdout = PIPE, stderr = open(os.devnull, "w"))
rawData = ffmpeg.stdout
mp3Array = numpy.fromstring(rawData.read(), numpy.int16)
mp3Array = mp3Array.astype('float32') / 32767.0
audioFile = mp3Array.view(AudioFile)
audioFile.sampleRate = sampleRate
audioFile.channels = 1
audioFile.format = pyaudio.paFloat32
return audioFile
elif filename.endswith('wav'):
sampleRate, samples = scipy.io.wavfile.read(filename)
# Convert to float
samples = samples.astype('float32') / 32767.0
# Get left channel
if len(samples.shape) > 1:
samples = samples[:, 0]
audioFile = samples.view(AudioFile)
audioFile.sampleRate = sampleRate
audioFile.channels = 1
audioFile.format = pyaudio.paFloat32
return audioFile | [
"shan.sikdar@gmail.com"
] | shan.sikdar@gmail.com |
150ce4eec30ddd6d30b5c7b40d66ea2a1fdb3802 | 9a9ba1dc8eacc8680bbdc28ac2fd842f098cac42 | /gym_psketch/env/craft.py | 83da46fd30681cd789dd02ee754af3a590ac1c7a | [] | no_license | Ordered-Memory-RL/ompn_craft | 6236ff65ce820d01d4932ff8ddc09ca02d0ca8c2 | 4bc3de80bfd7984f5e690e157afcdf4723ece064 | refs/heads/master | 2023-03-23T04:47:14.158351 | 2021-03-10T04:23:59 | 2021-03-10T04:23:59 | 346,228,258 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 17,343 | py | from . import array
from .cookbook import COOKBOOK as CB
from .rendering import *
from .window import Window
import enum
import numpy as np
from skimage.measure import block_reduce
from gym import Env
import attr
import matplotlib.pyplot as plt
__all__ = ['Actions', 'ID2ACTIONS', 'CraftWorld', 'CraftState', 'ACTION_VOCAB']
class Actions(enum.Enum):
DOWN = 0
UP = 1
LEFT = 2
RIGHT = 3
USE = 4
DONE = 5
ID2DIR = ['down', 'up', 'left', 'right']
ID2ACTIONS = [a for a in Actions]
ACTION_VOCAB = ['↓', '↑', '←', '→', 'u', 'D']
def random_free(grid, random, width, height):
pos = None
while pos is None:
(x, y) = (random.randint(width), random.randint(height))
if grid[x, y] != 0:
continue
pos = (x, y)
return pos
def neighbors(pos, width, height, dir=None):
x, y = pos
res = []
if x > 0 and (dir is None or dir == 'left'):
res.append((x-1, y))
if y > 0 and (dir is None or dir == 'down'):
res.append((x, y-1))
if x < width - 1 and (dir is None or dir == 'right'):
res.append((x+1, y))
if y < height - 1 and (dir is None or dir == 'up'):
res.append((x, y+1))
return res
@attr.s
class CraftState:
""" Internal state for each episode """
inventory = attr.ib(None)
pos = attr.ib(None)
dir = attr.ib(None)
grid = attr.ib(None)
cache = attr.ib(None)
sketch_id = attr.ib(None)
class CraftWorld(Env):
metadata = {'render.modes': ['ansi']}
# Some meta data for each env
instruction = None
sketchs = None
env_id = None
cached_tiles = {}
def __init__(self, goal, width=10, height=10, window_width=5,
window_height=5, num_ing=1, dense_reward=True, fullobs=False):
assert goal in CB.possible_goals, "Invalid Goals"
self.fullobs = fullobs
self.num_ing = num_ing
self.dense_reward = dense_reward
self.width = width
self.height = height
self.window_width = window_width
self.window_height = window_height
self.n_features = 2 * window_width * window_height * CB.n_kinds + \
CB.n_kinds + \
4
self.n_actions = Actions.__len__()
self.non_grabbable_indices = CB.non_grabbable_indices
self.grabbable_indices = CB.grabbable_indices
self.workshop_indices = CB.workshops
self.water_index = CB.index["water"]
self.stone_index = CB.index["stone"]
self.random = np.random.RandomState(None)
# Set goal ids
self.goal_id = CB.index[goal]
self.make_island = self.goal_id == CB.index['gold']
self.make_cave = self.goal_id == CB.index['gem']
# State variable for each episode
self.state = None
def seed(self, seed=None):
np.random.seed(seed)
self.random = np.random.RandomState(seed)
def reset(self):
grid = self._gen_grid(make_island=self.make_island,
make_cave=self.make_cave)
self.state = CraftState(inventory=np.zeros(CB.n_kinds),
pos=random_free(grid=grid,
random=self.random,
width=self.width,
height=self.height),
dir=self.random.choice(ID2DIR),
grid=grid,
cache=None,
sketch_id=0)
return self._obs()
def step(self, action):
if isinstance(action, int):
action = ID2ACTIONS[action]
prev_inventory = self.state.inventory.copy()
self.state = self._update_grid_and_inventory(self.state, action)
reward = self._reward(action, prev_inventory) if self.dense_reward else 0
done = self.satisfy()
return self._obs(), reward, done, {}
def _reward(self, action, prev_inventory):
if self.state.sketch_id >= len(self.sketchs):
return 0
sketch = self.sketchs[self.state.sketch_id]
target = sketch.split()[-1]
target_id = CB.object2id(target)
if action == Actions.USE:
inventory_diff = self.state.inventory - prev_inventory
if target_id in self.grabbable_indices:
satisfy = inventory_diff[target_id] > 0
elif target_id in self.workshop_indices:
front_coord = neighbors(self.state.pos, self.width, self.height, self.state.dir)[0]
front_thing = self.state.grid[front_coord]
correct_workshop = front_thing == target_id
use_success = (inventory_diff != 0).sum() > 0
satisfy = correct_workshop and use_success
else:
raise ValueError('Invalid target id', target_id)
else:
satisfy = False
reward = 1 if satisfy else 0
if satisfy:
self.state.sketch_id += 1
return reward
def satisfy(self):
return (self.state.inventory[self.goal_id] > 0).item()
def _update_grid_and_inventory(self, state: CraftState, action: Actions) -> CraftState:
""" Update agent state """
x, y = state.pos
n_dir = state.dir
n_inventory = state.inventory
n_grid = self.state.grid
# move actions
if action == Actions.DOWN:
dx, dy = (0, -1)
n_dir = 'down'
elif action == Actions.UP:
dx, dy = (0, 1)
n_dir = 'up'
elif action == Actions.LEFT:
dx, dy = (-1, 0)
n_dir = 'left'
elif action == Actions.RIGHT:
dx, dy = (1, 0)
n_dir = 'right'
elif action == Actions.DONE:
dx, dy = (0, 0)
# use actions
elif action == Actions.USE:
dx, dy = (0, 0)
nx, ny = neighbors(state.pos, dir=state.dir,
width=self.width,
height=self.height)[0]
thing = self.state.grid[nx, ny]
if thing != 0:
# Copy
n_inventory = self.state.inventory.copy()
n_grid = self.state.grid.copy()
if thing in self.grabbable_indices:
n_inventory[thing] += 1
n_grid[nx, ny] = 0
elif thing in self.workshop_indices:
workshop = CB.index.get(thing)
for output, inputs in CB.recipes.items():
if inputs["_at"] != workshop:
continue
yld = inputs["_yield"] if "_yield" in inputs else 1
ing = [i for i in inputs if isinstance(i, int)]
if any(n_inventory[i] < inputs[i] for i in ing):
continue
n_inventory[output] += yld
for i in ing:
n_inventory[i] -= inputs[i]
elif thing == self.water_index:
if n_inventory[CB.index["bridge"]] > 0:
n_grid[nx, ny] = 0
elif thing == self.stone_index:
if n_inventory[CB.index["axe"]] > 0:
n_grid[nx, ny] = 0
# other
else:
raise Exception("Unexpected action: %s" % action)
n_x = x + dx
n_y = y + dy
if self.state.grid[n_x, n_y] != 0:
n_x, n_y = x, y
new_state = CraftState(pos=(n_x, n_y),
dir=n_dir,
inventory=n_inventory,
grid=n_grid,
cache=None,
sketch_id=state.sketch_id)
return new_state
def _gen_grid(self, make_island=False, make_cave=False):
# generate grid
grid = np.zeros((self.width, self.height), dtype=int)
i_bd = CB.index["boundary"]
grid[0, :] = i_bd
grid[self.width - 1:, :] = i_bd
grid[:, 0] = i_bd
grid[:, self.height - 1:] = i_bd
# treasure
if make_island or make_cave:
(gx, gy) = (1 + self.random.randint(self.width - 2), 1)
treasure_index = CB.index["gold"] if make_island else CB.index["gem"]
wall_index = self.water_index if make_island else self.stone_index
grid[gx, gy] = treasure_index
for i in range(-1, 2):
for j in range(-1, 2):
if grid[gx + i, gy + j] == 0:
grid[gx + i, gy + j] = wall_index
# ingredients
for primitive in CB.primitives:
if primitive == CB.index["gold"] or \
primitive == CB.index["gem"]:
continue
for i in range(self.num_ing):
(x, y) = random_free(grid, self.random, self.width, self.height)
grid[x, y] = primitive
# generate crafting stations
for ws_id in self.workshop_indices:
ws_x, ws_y = random_free(grid, self.random, self.width, self.height)
grid[ws_x, ws_y] = ws_id
# generate init pos
return grid
def _obs(self):
if self.state is None:
raise ValueError
if self.state.cache is None:
x, y = self.state.pos
hw = self.window_width // 2
hh = self.window_height // 2
bhw = (self.window_width * self.window_width) // 2
bhh = (self.window_height * self.window_height) // 2
oh_grid = np.eye(CB.n_kinds)[self.state.grid.reshape(-1)].reshape([self.width, self.height, -1])
grid_feats = array.pad_slice(oh_grid, (x - hw, x + hw + 1),
(y-hh, y+hh+1), pad_value=0)
if self.fullobs:
grid_feats_big = array.pad_slice(oh_grid, (x - bhw, x + bhw + 1),
(y-bhh, y+bhh+1), pad_value=0)
grid_feats_big_red = block_reduce(grid_feats_big,
(self.window_width, self.window_height, 1), func=np.max)
else:
grid_feats_big_red = np.zeros_like(grid_feats)
#pos_feats = np.asarray(self.state.pos)
#pos_feats[0] /= self.width
#pos_feats[1] /= self.height
dir_features = np.zeros(4)
dir_features[ID2DIR.index(self.state.dir)] = 1
features = np.concatenate((grid_feats.ravel(),
grid_feats_big_red.ravel(), self.state.inventory,
dir_features))
self.state.cache = {'features': features, 'inventory': self.state.inventory,
'pos': self.state.pos, 'dir_id': ID2DIR.index(self.state.dir),
'img': self.state.grid if self.fullobs
else array.pad_slice(self.state.grid, (x - hw, x + hw + 1),
(y-hh, y+hh+1), pad_value=0)}
return self.state.cache
def render(self, mode='ansi'):
if mode == 'ansi':
return self.pretty()
elif mode == 'rgb':
return self.get_rgb(tile_size=TILE_PIXELS)
else:
return super(CraftWorld, self).render(mode=mode)
def get_rgb(self, tile_size):
window = Window('test')
assert self.state is not None
grid = self.state.grid
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
if not self.fullobs:
pos_x, pos_y = self.state.pos
hw = self.window_width // 2
hh = self.window_height // 2
x_range = [i for i in range(pos_x - hw, pos_x + hw + 1)]
y_range = [i for i in range(pos_y - hh, pos_y + hh + 1)]
else:
x_range = [i for i in range(self.width)]
y_range = [i for i in range(self.height)]
for y in reversed(range(self.height)):
for x in range(self.width):
if x in x_range and y in y_range:
cell = grid[x, y]
tile_img = self.render_cell(cell,
tile_size=tile_size,
has_agent=(x,y) == self.state.pos,
direction=self.state.dir)
ymin = y * tile_size
ymax = (y+1) * tile_size
xmin = x * tile_size
xmax = (x+1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
window.show_img(img)
# Get Inventory String
inventory_str = ["Inventory:"]
for inventory_id, val in enumerate(self.state.inventory):
if val > 0:
inventory_str.append("{}: {}".format(CB.index.get(inventory_id),
int(val)))
window.set_caption(' '.join(inventory_str))
fig = window.fig
w, h = fig.canvas.get_width_height()
fig.tight_layout()
fig.canvas.draw()
p_img = np.fromstring(fig.canvas.tostring_rgb(),
dtype=np.uint8).reshape(h, w, 3)
window.close()
return p_img
@classmethod
def render_cell(cls, cell, tile_size, subdivs=1, has_agent=False, direction=0):
key = cls._tile_key(cell, has_agent, dir=direction)
if key in cls.cached_tiles:
return cls.cached_tiles[key]
# draw wall
try:
if CB.id2object(cell) == 'boundary':
img = np.ones(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8) * 0
else:
img = load_icons(CB.id2object(cell), tile_size * subdivs, tile_size * subdivs)
except FileNotFoundError:
img = np.ones(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8) * 255
# Draw the grid lines (top and left edges)
fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))
fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))
# Plot agent
if has_agent:
tri_fn = point_in_triangle(
(0.12, 0.19),
(0.87, 0.50),
(0.12, 0.81),
)
# Rotate the agent based on its direction
dir_id = RENDER_DIRID[direction]
tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5 * math.pi * dir_id)
fill_coords(img, tri_fn, COLORS['red'])
img = downsample(img, subdivs)
cls.cached_tiles[key] = img
return img
@staticmethod
def _tile_key(cell, has_agent, dir):
if has_agent:
return '{}_{}'.format(CB.id2object(cell), dir)
else:
return CB.id2object(cell)
def pretty(self):
""" Pretty print to strings """
if self.state is None:
return ""
# Grid to string
lines = []
if not self.fullobs:
pos_x, pos_y = self.state.pos
hw = self.window_width // 2
hh = self.window_height // 2
x_range = [i for i in range(pos_x - hw, pos_x + hw + 1)]
y_range = [i for i in range(pos_y - hh, pos_y + hh + 1)]
else:
x_range = [i for i in range(self.width)]
y_range = [i for i in range(self.height)]
for y in reversed(range(self.height)):
line = []
for x in range(self.width):
# Empty if out of boundary
if x in x_range and y in y_range:
# Plot agent
if (x, y) == self.state.pos:
if self.state.dir == 'left':
line.append("<@")
elif self.state.dir == 'right':
line.append("@>")
elif self.state.dir == 'up':
line.append("^@")
elif self.state.dir == 'down':
line.append("@v")
else:
raise ValueError
# Plot that thing
else:
cell = self.state.grid[x, y]
if cell == 0:
line.append(" ")
else:
obj_str = CB.index.get(cell)
line.append(obj_str[0] + obj_str[-1])
else:
line.append(" ")
lines.append(' '.join(line))
# Plot Inventory
lines.append("")
lines.append('Inventory:')
for inventory_id, val in enumerate(self.state.inventory):
if val > 0:
lines.append("{}: {}".format(CB.index.get(inventory_id),
val))
return '\n'.join(lines)
| [
"fdsafdsa@dsafdsa.com"
] | fdsafdsa@dsafdsa.com |
bafd53e16b68d5c5315f2de4dc3a24be45844475 | ae9bb7babce2a0349ae932985cf418a03057c670 | /test_ProjectPractice/test_requests/wework/__init__.py | 3c6ed16fe565082753b7192859a99a55e588806c | [] | no_license | Veraun/HogwartsSDET17-1 | d2592fcb4c9c63724c19bcf9edde349ebcd2c8af | 6648dbfb640b065ff2c76cb6889a8f9e4f124b91 | refs/heads/main | 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | '''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/3/28 19:30
@Email: Warron.Wang
''' | [
"wei1.wang@ximalaya.com"
] | wei1.wang@ximalaya.com |
37e1f185250c2fa42c1ca814fbe24ef8d4a33173 | b4ab09af253b2060d2a8ba7a4ef2a6add43dd89d | /Object-Counter-using-Opencv-Instance-Segmentation-master/Object-Counter-using-Opencv-Instance-Segmentation-master/instance_segmentation_final.py | 952652564b775f677c5e6875bf4b0a19021ee6aa | [] | no_license | jonatan1040/final-project | 3b39ad4b8ef4074c0ad3ed7e2b6dde917342b71d | ef632a4450c1bedca7079a71d054e23c55a4753b | refs/heads/master | 2021-01-09T16:03:47.657454 | 2020-02-22T15:24:53 | 2020-02-22T15:24:53 | 242,365,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | import numpy as np
import cv2
import os
import imutils
# load the COCO class labels our Mask R-CNN was trained on
#All 90 classes are listed in this text file, one per line.
labelsPath = os.path.sep.join(["mask-rcnn-coco", "object_detection_classes_coco.txt"])
LABELS = open(labelsPath).read().strip().split("\n")
#Mask R-CNN model weights. The weights are pre-trained on the COCO dataset
weightsPath = os.path.sep.join(["mask-rcnn-coco", "frozen_inference_graph.pb"])
#Mask R-CNN model configuration
configPath = os.path.sep.join(["mask-rcnn-coco", "mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"])
print("Loading Mask R-CNN from disk...")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
for i in range(1, 4):
path="images/person/"+str(i)+".jpg"
print(path)
#grab a frame
src=cv2.imread(path)
show = imutils.resize(src, width=500)
#resize it to a known width, maintaining aspect ratio
frame = imutils.resize(src, width=1000)
(H, W) = frame.shape[:2]
#construct a blob and complete a forward pass through the network
blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
#print(blob)
net.setInput(blob)
#result is both boxes and masks
(boxes, masks) = net.forward(["detection_out_final", "detection_masks"])
#print(boxes)
print(boxes)
#sorts the indexes of the bounding boxes by their corresponding prediction probability
idxs = np.argsort(boxes[0, 0, :, 2])[::-1]
#print(len(boxes))
print("Detection Length",len(idxs))
count=1
for i in idxs:
#extract the classID and confidence using boxes
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
print(LABELS[classID])
print(confidence)
#for banana change the class label to banana
if LABELS[classID] == "person":
#ensures the confidence of the prediction exceeds the threshold
if confidence > 0.8:
count=count+1
print("Number of persons:",count-1)
value="count="+str(count-1)
#print the count on frames
cv2.putText(show, value, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 4)
cv2.imshow("Output", show)
cv2.waitKey(0)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cv2.destroyAllWindows() | [
"jonatan1040@gmail.com"
] | jonatan1040@gmail.com |
13fe7acffc167ef651043847166ade55dfbe7fad | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/thienthientanvn.py | 80cce4c40f006fcc7e32a7cdf2085a62934bb55f | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mytextarea']/span",
'price' : "//span/div[@class='price']",
'category' : "//div[@id='accordion']/ul/li",
'description' : "//div[@class='table_center']/div[2]/table/tbody/tr/td|//div[@class='table_center']/div[3]/table/tbody/tr/td",
'images' : "//img[@id='ctl00_MainPlaceHolder_ctl00_imgLaptop']/@src|//ul/li/a[@class='highslide']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'thienthientan.vn'
allowed_domains = ['thienthientan.vn']
start_urls = ['http://www.thienthientan.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(), 'parse_item'),
Rule(LinkExtractor(), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
cdfd5d92a7f2b7c1f5784d6c1b9d01e19c0254b1 | 8e200c8e41b4ecb46053a61f74476889be560c98 | /nerdtalk/nerdtalk/wsgi.py | dfffe9738220ec67451b87cdc6329d792938b40f | [] | no_license | Buffer0x7cd/nerdtalk | d39869076b310a22182b9d2b5f70e36659884deb | 931e3bfb2e8a464fa486b8d1f1c54563979402ae | refs/heads/master | 2021-05-09T18:02:24.226556 | 2018-01-30T18:26:45 | 2018-01-30T18:26:45 | 119,149,191 | 0 | 0 | null | 2018-01-27T09:04:17 | 2018-01-27T09:04:17 | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for nerdtalk project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nerdtalk.settings")
application = get_wsgi_application()
| [
"manvendra021997@gmail.com"
] | manvendra021997@gmail.com |
6ac161c929a268cf47e452298e375d4446129656 | 3783ca6629411e32911b646bbfdea333a839fb6c | /World.py | bdde9ba1eb032b4e7e04f04371c2902a4ebecc48 | [
"MIT"
] | permissive | davidnarciso/PyGorillas | 963e628fbea44d4311b7d8c64c8a30c720ea566a | 30a4d3daf678db29749368123bef2cd0baeefe38 | refs/heads/master | 2020-12-31T00:54:44.215991 | 2017-02-19T01:05:59 | 2017-02-19T01:05:59 | 80,593,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,485 | py | from direct.showbase.DirectObject import DirectObject
from BuildingEnvironment import BuildingEnvironment
from direct.interval.IntervalGlobal import *
import direct.directbase.DirectStart
from pandac.PandaModules import *
from UserInterface import UserInterface
import sys,random
from Menu import Menu
class World(DirectObject):
def __init__(self, AudioL):
self.worldNotDone=1
self.number = -1
base.disableMouse()
self.AL = AudioL
self.worldNode = render.attachNewNode("world")
base.camLens.setFar(1700)
self.superMan = self.AL.getAudio(9)
self.wildNFree = self.AL.getAudio(12)
self.funky = self.AL.getAudio(11)
self.bohemian = self.AL.getAudio(6)
self.time = 0
self.accept("space",self.incrementSideWindowNumber)
self.accept("p",self.togglePositiveBGBuildings)
self.accept("n",self.toggleNegitiveBGBuildings)
self.accept("0",self.stopSounds)
self.accept("1",self.bohemianRhapsody)
self.accept("2",self.funkyItUp)
self.accept("3",self.supMan)
self.accept("4",self.wildItUP)
self.supMan()
def menuBackGround(self):
myBuildingEnvironment = BuildingEnvironment(self.worldNode)
self.buildingToggle = 1
self.positiveBackgroundBuildingBool = 1
self.negitiveBackgroundBuildingBool = 1
self.backgroundBuildingNum = 14
self.loadmodels(1)
self.togglePositiveBGBuildings()
self.toggleNegitiveBGBuildings()
self.funWithBuildings(1)
self.toggleBuildings()
self.spin()
def spin(self):
self.cam = render.attachNewNode("cam")
self.cam.reparentTo(self.worldNode)
self.cam.setPos(0,0,150)
self.camSpin1 = LerpFunc(self.spinCamera,
duration = 10)
self.camNode = render.attachNewNode("camNode")
self.camNode.setPos(250,250,150)
self.camNode.reparentTo(self.cam)
self.camNode.lookAt(self.cam)
base.camera.reparentTo(self.camNode)
self.camSpin1.loop()
def setCameraControl(self, task):
if self.worldNotDone:
if task.time < 3.0:
base.camera.setPos(0, (97 * task.time) - 800, 105 + (task.time*80))
base.camera.setHpr(0, (task.time * -7), 0)
return task.cont
## elif task.time < 5.0:
## base.camera.setPos((self.gorillas[0].getX()/2) * (task.time - 3), self.gorillas[0].getY() - 50, self.gorillas[0].getZ())
## return Task.cont
## elif task.time <8.0:
## base.camera.setHpr(-29 *(task.time - 5), (task.time - 5)*3, 0)
## return Task.cont
## pos = self.gorillas[0].getX()
## return task.cont
print "removed"
taskMgr.remove("camControl")
return task.done
else:
base.camera.setPos(0,0,0)
taskMgr.remove("camControl")
def incrementSideWindowNumber(self):
self.number = self.number + 1
def spinCamera(self,rad):
self.cam.setH(rad*360)
def createWorld(self,scores):
taskMgr.add(self.setCameraControl,"camControl")
base.camera.reparentTo(render)
self.camNode.detachNode()
self.cam.detachNode()
self.camSpin1.finish()
self.worldNode.detachNode()
self.worldNode = render.attachNewNode("world")
self.scores = scores
taskMgr.add(self.checkScore,"checkScore")
myBuildingEnvironment = BuildingEnvironment(self.worldNode)
self.myBuildingList = myBuildingEnvironment.getBuildingList()
self.gorillas = myBuildingEnvironment.getGorillas()
self.backgroundBuildingNum = 14
self.loadmodels(2)
self.funWithBuildings(2)
self.toggleBuildings()
def supMan(self):
self.wildNFree.stop()
self.bohemian.stop()
self.funky.stop()
self.superMan.play()
def bohemianRhapsody(self):
self.wildNFree.stop()
self.funky.stop()
self.superMan.stop()
self.bohemian.play()
def funkyItUp(self):
self.wildNFree.stop()
self.superMan.stop()
self.bohemian.stop()
self.funky.play()
def wildItUP(self):
self.bohemian.stop()
self.funky.stop()
self.superMan.stop()
self.wildNFree.play()
def stopSounds(self):
self.bohemian.stop()
self.funky.stop()
self.superMan.stop()
self.wildNFree.stop()
def createUI(self,p1Name, p2Name):
self.UI = UserInterface(1,self.gorillas,self.myBuildingList,self.scores,self.sun, self.worldNode,self.AL,p1Name,p2Name)
def resetUI(self):
self.UI.reset(self.gorillas,self.myBuildingList,self.scores,self.sun,self.worldNode)
def checkScore(self,task):
if self.UI.done:
if self.scores[2]>self.scores[1] or self.scores[2]>self.scores[0]:
taskMgr.add(self.resetEverything,"resetEverything")
return task.cont
def resetEverything(self,task):
if task.time>11:
self.worldNode.detachNode()
self.worldNode = render.attachNewNode("world")
self.createWorld(self.scores)
self.UI.done = 0
self.resetUI()
self.time = 0
taskMgr.remove("resetEverything")
return task.cont
def removeAll(self):
self.worldNode.detachNode()
self.UI.removeAll()
self.stopSounds()
def loadmodels(self,menuOrNot):
self.clouds = loader.loadModel('models/env')
self.clouds.reparentTo(self.worldNode)
self.clouds.setScale(Vec3(400,400,500))
self.sun = loader.loadModel('models/sun')
self.backgroundBuilding = [loader.loadModelCopy('models/glassbuilding')
for i in range(self.backgroundBuildingNum*8)]
self.buildingNode = render.attachNewNode("buildings")
self.buildingNode.reparentTo(self.worldNode)
self.loadBuildings(menuOrNot)
self.time = 0
self.makeRoad(50)
self.makeRoad(-25)
def loadBuildings(self, menuOrNot):
self.loadBackGroundBuildings(0,80,0)
self.loadBackGroundBuildings(35,140,self.backgroundBuildingNum)
self.loadBackGroundBuildings(-20,200,self.backgroundBuildingNum*2)
self.loadBackGroundBuildings(15,260,self.backgroundBuildingNum*3)
if menuOrNot==1:
self.loadBackGroundBuildings(0,-150,self.backgroundBuildingNum*4)
self.loadBackGroundBuildings(35,-200,self.backgroundBuildingNum*5)
self.loadBackGroundBuildings(-20,-260,self.backgroundBuildingNum*6)
self.loadBackGroundBuildings(15,-330,self.backgroundBuildingNum*7)
def toggleBuildings(self):
self.myParallel.loop()
def funWithBuildings(self, menuOrNot):
if menuOrNot==1:
self.buildingFun = [self.backgroundBuilding[0].scaleInterval
for i in range(self.backgroundBuildingNum*8)]
self.myParallel = Parallel(name = "buildingFun")
for i in range(self.backgroundBuildingNum*8):
movementSpeed = random.randint(2,6)/10.
h = random.randint(20,40)/2./10.
hpr1 = random.randint(-20,20)
hpr2 = random.randint(-20,20)
hprMove1 = self.backgroundBuilding[i].hprInterval(movementSpeed,Vec3(hpr1,-hpr1/2,hpr1/2),startHpr = Vec3(hpr2,hpr2/2,-hpr2/2))
hprMove2 = self.backgroundBuilding[i].hprInterval(movementSpeed,Vec3(hpr2,hpr2/2,-hpr2/2),startHpr = Vec3(hpr1,-hpr1/2,hpr1/2))
firstMovement = self.backgroundBuilding[i].scaleInterval(movementSpeed,Vec3(1.15,1.15,h),startScale = self.backgroundBuilding[i].getScale())
secondMovement = self.backgroundBuilding[i].scaleInterval(movementSpeed,self.backgroundBuilding[i].getScale(),startScale = Vec3(1.15,1.15,h))
sequence1 = Sequence(firstMovement, secondMovement)
sequence2 = Sequence(hprMove1,hprMove2)
self.myParallel.append(sequence1)
self.myParallel.append(sequence2)
elif menuOrNot == 2:
self.buildingFun = [self.backgroundBuilding[0].scaleInterval
for i in range(self.backgroundBuildingNum*4)]
self.myParallel = Parallel(name = "buildingFun")
for i in range(self.backgroundBuildingNum*4):
movementSpeed = random.randint(2,6)/10.
h = random.randint(20,40)/2./10.
hpr1 = random.randint(-20,20)
hpr2 = random.randint(-20,20)
hprMove1 = self.backgroundBuilding[i].hprInterval(movementSpeed,Vec3(hpr1,-hpr1/2,hpr1/2),startHpr = Vec3(hpr2,hpr2/2,-hpr2/2))
hprMove2 = self.backgroundBuilding[i].hprInterval(movementSpeed,Vec3(hpr2,hpr2/2,-hpr2/2),startHpr = Vec3(hpr1,-hpr1/2,hpr1/2))
firstMovement = self.backgroundBuilding[i].scaleInterval(movementSpeed,Vec3(1.15,1.15,h),startScale = self.backgroundBuilding[i].getScale())
secondMovement = self.backgroundBuilding[i].scaleInterval(movementSpeed,self.backgroundBuilding[i].getScale(),startScale = Vec3(1.15,1.15,h))
sequence1 = Sequence(firstMovement, secondMovement)
sequence2 = Sequence(hprMove1,hprMove2)
self.myParallel.append(sequence1)
self.myParallel.append(sequence2)
def makeRoad(self,y):
numberOfPieces = 20
road = [ loader.loadModelCopy('models/road')
for i in range(numberOfPieces)]
for i in range(numberOfPieces):
x = -numberOfPieces*48.25/2+(i*48.25)
z = 0
road[i].reparentTo(self.worldNode)
road[i].setH(90)
road[i].setPos(Vec3(x,y,z))
road[i].setScale(Vec3(1.5,10,.05))
car = [loader.loadModelCopy("models/carnsx")
for i in range(2)]
for i in range(2):
car[i].setColor(random.random(), random.random(), random.random())
car[i].setPos(Vec3(-numberOfPieces*48.25*1/2,y+i*7-3,0))
car[i].setH(90)
car[i].setScale(2.5)
car[i].reparentTo(self.worldNode)
carPosInterval1= car[i].posInterval(random.randint(6,15),Point3(numberOfPieces*48.25*1/2, y+i*7-3, 0), startPos=Point3(-numberOfPieces*48.25*1/2, y+i*7-3, 0))
carPosInterval2= car[i].posInterval(random.randint(6,15),Point3(-numberOfPieces*48.25*1/2, y+i*7-3, 0), startPos=Point3(numberOfPieces*48.25*1/2, y+i*7-3, 0))
carHprInterval1= car[i].hprInterval(random.uniform(.20, .50) * 5,Point3(270,0,0), startHpr=Point3(90,0,0))
carHprInterval2= car[i].hprInterval(random.uniform(.20, .50) * 5,Point3(90,0,0), startHpr=Point3(270,0,0))
carMove = Sequence(carPosInterval1, carHprInterval1, carPosInterval2,
carHprInterval2, name = "carMove"+str(i)+str(y))
carMove.loop()
def togglePositiveBGBuildings(self):
if(self.positiveBackgroundBuildingBool):
for i in range(self.backgroundBuildingNum*4):
self.backgroundBuilding[i].show()
self.positiveBackgroundBuildingBool = 0
else:
for i in range(self.backgroundBuildingNum*4):
self.backgroundBuilding[i].hide()
self.positiveBackgroundBuildingBool = 1
def toggleNegitiveBGBuildings(self):
if(self.negitiveBackgroundBuildingBool):
for i in range(self.backgroundBuildingNum*4):
self.backgroundBuilding[i+self.backgroundBuildingNum*4].show()
self.negitiveBackgroundBuildingBool = 0
else:
for i in range(self.backgroundBuildingNum*4):
self.backgroundBuilding[i+self.backgroundBuildingNum*4].hide()
self.negitiveBackgroundBuildingBool = 1
def loadBackGroundBuildings(self, offset, y, buildingNumber):
for i in range(self.backgroundBuildingNum):
self.backgroundBuilding[i+buildingNumber].reparentTo(self.buildingNode)
x = -self.backgroundBuildingNum*50/2+(50*i)+offset
z = 0
h = random.randint(20,40)/2./10.
self.backgroundBuilding[i+buildingNumber].setScale(Vec3(1.15,1.15,h))
self.backgroundBuilding[i+buildingNumber].setPos(Vec3(x,y,z))
def makeSunMad(self):
self.sun.setColor(Vec4(.8,.2,.5,.5))
| [
"david.lee.narciso@gmail.com"
] | david.lee.narciso@gmail.com |
7207dd2b3b0752a700fa46d98bceecee95897454 | 7e13522ec24b429c831ec09d4e0592d0c42ba5ef | /Self-Study/NX_API_CLI/showInterfaces.py | 86757128c349f2e81db33a8970c96f7988734320 | [] | no_license | ds3010/Projects | 85b64656e101781587956797154eed60431c296f | 604d132470b958a32435cc423cc0f6721cce837e | refs/heads/master | 2023-01-06T18:20:26.802366 | 2020-11-07T17:23:53 | 2020-11-07T17:23:53 | 294,784,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import requests
import json
url='https://10.10.20.58/ins'
switchuser='admin'
switchpassword='Cisco123'
myheaders={'content-type':'application/json'}
payload={
"ins_api":{
"version": "1.0",
"type": "cli_show",
"chunk": "0",
"sid": "1",
"input": "show ip int brief",
"output_format": "json"
}
}
response = requests.post(url, data=json.dumps(payload), headers=myheaders, auth=(switchuser, switchpassword), verify=False).json()
print(json.dumps(response, indent=1, sort_keys=True)) | [
"daniel_seijas3010@hotmail.com"
] | daniel_seijas3010@hotmail.com |
c76dc5ce812d0c4b138ab5c2e331dbe1d7bda634 | d5312598eb9cff609eedee5a9d51db5e46bb5a38 | /assignment_3/tester-a3p1c.py | 82ee6579c6b73d9b3fdad256430bf71a1cb12b02 | [] | no_license | paolo-torres/ECE_406 | a7b5b53398f1c7a2e6f2acf0dfe1411d734ce69c | f4625b86d931fe4181b877b85c96317b25353bcd | refs/heads/main | 2023-04-03T21:30:19.806787 | 2021-04-22T20:41:14 | 2021-04-22T20:41:14 | 328,792,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | #! /usr/bin/python3
import sys
import random
from a3p1c import robotpath
"""
Some tests.
"""
def main():
# First, a couple of simple 2x2 grid tests
n = 2
c = dict()
c[str([[1,1],[2,1]])] = 1
c[str([[1,1],[2,2]])] = 1000
c[str([[1,2],[2,1]])] = 1000
c[str([[1,2],[2,2]])] = 1
print('Test 1:', end = ' ')
src = [1,1]
p = robotpath(n, src, c)
if p and len(p) == 2 and p[0] == [1,1] and p[1] == [2,1]:
print('Passed')
else:
print('Failed')
print('Test 2:', end = ' ')
src = [1,2]
p = robotpath(n, src, c)
if p and len(p) == 2 and p[0] == [1,2] and p[1] == [2,2]:
print('Passed')
else:
print('Failed')
print('Test 3:', end = ' ')
# A 4x4 grid
n = 4
src = [1,2]
c = dict()
# 1st row
for i in range(1,5):
c[str([[1,i],[2,i]])] = 1
if i - 1 > 0:
c[str([[1,i],[2,i-1]])] = 1
if i + 1 <= n:
c[str([[1,i],[2,i+1]])] = 1
c[str([[1,2],[2,2]])] = 1000
#2nd row
for i in range(1,5):
c[str([[2,i],[3,i]])] = 1000
if i - 1 > 0:
c[str([[2,i],[3,i-1]])] = 1000
if i + 1 <= n:
c[str([[2,i],[3,i+1]])] = 1000
c[str([[2,2],[3,2]])] = 1
#3rd row
for i in range(1,5):
c[str([[3,i],[4,i]])] = 1000
if i - 1 > 0:
c[str([[3,i],[4,i-1]])] = 1000
if i + 1 <= n:
c[str([[3,i],[4,i+1]])] = 1000
c[str([[3,2],[4,2]])] = 1
p = robotpath(n, src, c)
if p and len(p) == 4 and p[0] == [1,2] and p[2] == [3,2] and p[3] == [4,2]:
print('Passed')
else:
print('Failed')
print('Test 4: correctness will be checked at marking-time.')
# A random grid between 3x3 and 10x10
n = random.randint(3,10)
# a random source square
src = [1,random.randint(1,n)]
# random costs between 1 and 10
c = dict()
for i in range(1,n):
for j in range(1,n+1):
# [i,j] is our "from" square
c[str([[i,j],[i+1,j]])] = random.randint(1, 10)
if j - 1 > 0:
c[str([[i,j],[i+1,j-1]])] = random.randint(1, 10)
if j + 1 <= n:
c[str([[i,j],[i+1,j+1]])] = random.randint(1, 10)
p = robotpath(n, src, c)
print('Grid is', n, 'x', n, '. Source square is', src)
print('Path found:', p)
if __name__ == '__main__':
main()
| [
"paolo-torres@hotmail.com"
] | paolo-torres@hotmail.com |
873e8a25015fde4f500581381f3660e3d4848ad0 | 9ed44f44698df9a3cff4c8ae2972d5edcc9638c6 | /userbot/plugins/logic.py | d4d666f5f75553309847cad9282aea3c852c595a | [
"MIT"
] | permissive | piggy03/catuserbot | 3493315c28de6f32f30fe1da57a867979e2e4e81 | 3d2676c23341d391fc2cbd705b8875716d9ad0e6 | refs/heads/master | 2022-09-21T06:19:49.183745 | 2020-06-05T20:51:06 | 2020-06-05T20:51:06 | 269,922,217 | 1 | 0 | MIT | 2020-06-06T08:46:14 | 2020-06-06T08:46:14 | null | UTF-8 | Python | false | false | 1,324 | py | """No Logic Pligon for @PepeBot
\nCoding by Legend @NeoMatrix90
\nType .logic to see many logical fact
"""
from telethon import events
import asyncio
import random
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=f"logic", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.logic", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("`processing man wait 2sec can't you wait......`")
await asyncio.sleep(2)
x=(random.randrange(1,7))
if x==1:
await event.edit("`Do You Know, Some Mosquitos Became Ghosts, When you *Killed* Them...`")
if x==2:
await event.edit("`Do You Know, Mosquitoes has Teleportation Power...`")
if x==3:
await event.edit("`Do You Know, When you see a bearded Goat, that means you juat saw a *Smarter Goat* than YOU....`")
if x==4:
await event.edit("`Do You Know, when You give some ruppess to a Bus Conductor, He will give You a Piece of Paper, *Called Ticket*...`")
if x==5:
await event.edit("`Do You Know, Bus are called Bus, Because they are Bus....`")
if x==6:
await event.edit("`Do You Know, There's a Huge Difference between *Cartoon amd Anime*...`")
if x==7:
await event.edit("`Do You Know, We can't see Ghosts But Ghosts Can see Us...`")
| [
"58665444+sandy1709@users.noreply.github.com"
] | 58665444+sandy1709@users.noreply.github.com |
88f4853707e89c9212e5da10da342596d16845e6 | 4ca680ae47b6762e425c2f1e04c350a310c6428c | /Model.py | 2327278bc2460590b4f4929095b5ad91c78eb8ff | [] | no_license | ID132457890/CSS458_critters | b5e803720fb7732be9c2edebec60c4c47812f515 | e2a218fbfeafb337ae16feab75603621faa1bfc4 | refs/heads/master | 2016-09-14T18:25:38.045627 | 2016-05-17T18:38:14 | 2016-05-17T18:38:14 | 58,654,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | from Environment import *
from Creature import *
import AnalyticsReporting as AR
import Logger as L
import unittest
# basic stubs for how the model could be ran
class Model(object):
def __init__(self, conf = {}):
self.grid_size = conf['grid_size'] if 'grid_size' in conf else 100
self.veg_den = conf['vegitation_density'] if 'vegitation_density' in conf else .3
self.creature_den = conf['creature_density'] if 'creature_density' in conf else .08
self.carnivore_chance = conf['carnivore_chance'] if 'carnivore_chance' in conf else .01
self.omnivore_chance = conf['omnivore_chance'] if 'omnivore_chance' in conf else .01
self.sim_length = conf['sim_length'] if 'sim_length' in conf else 500
self.steps_day = conf['steps_day'] if 'steps_day' in conf else 1
self.delta_t = 1 / float(self.steps_day)
self.analytics = AR.AnalyticsReporting(self)
self.logger = L.Logger(self)
def run_simulation(self):
self.env = Environment(self)
for x in range (self.sim_length * self.steps_day):
for agent in self.env.agents:
# delta_t, x, and steps per day can be used if an agent should have different behaviors
# at different times of day (if sim_length were days, and delta_t was 1/24,
# then (x % steps_day) between 20 and 06 could be times that day-dwellers sleep, for example
# Can be ignored if we'd rather not deal with it.
for actions in range(agent.movement_speed):
action = agent.take_turn(self.delta_t, x, self.steps_day).do_action()
if action == False:
self.logger.log(10, 'Action error occurred! agent: %r action: %r' % (agent, action))
self.analytics.turn_analyze()
if x != 0 and x % self.steps_day == 0:
for agent in self.env.agents:
agent.daily_agent_maintenance()
self.analytics.round_analyze()
# Report any interesting statistiscs, etc
self.analytics.finish_analyze()
class ModelTests(unittest.TestCase):
def tests(self):
m = Model({'grid_size': 30})
m.run_simulation()
if __name__ == "__main__":
tests = ModelTests()
tests.tests() | [
"nightjars@gmail.com"
] | nightjars@gmail.com |
97943c431e6ed92d297e167d8cd89df19b6be7b2 | ea3aedb776de06a649fba2c18182f37d00ee89b2 | /flasggerui.py | df0e75415d4721f7ba90766b8b4ee53b83da72dc | [] | no_license | kevalpipalia/Fake-currency-detector | 5491abad631c78303d59d524b080f0346216d08e | 4c29597f537f2864af103ab59763d587d55018b4 | refs/heads/main | 2022-12-28T14:30:56.507156 | 2020-10-18T08:36:56 | 2020-10-18T08:36:56 | 299,906,024 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 21:35:01 2020
@author: keval
"""
import pandas as pd
import numpy as np
from flask import Flask,request
import pickle
import flasgger
from flasgger import Swagger
app = Flask(__name__)
Swagger(app)
pkl_in = open('classifier.pkl','rb')
classifier = pickle.load(pkl_in)
@app.route('/')
def welcome():
return "Welcome to UWash"
@app.route('/predict')
def predict_note():
"""Fake Currency note detection System
---
parameters:
- name: variance
in: query
type: number
required: true
- name: skewness
in: query
type: number
required: true
- name: curtosis
in: query
type: number
required: true
- name: entropy
in: query
type: number
required: true
responces:
200:
description: The prediction is
"""
variance = request.args.get('variance')
skewness = request.args.get('skewness')
curtosis = request.args.get('curtosis')
entropy = request.args.get('entropy')
pred = classifier.predict([[variance,skewness,curtosis,entropy]])
return "Your note measurements predicts it is" + str(pred)
@app.route('/predict_file', methods=["POST"])
def predict_note_file():
"""Fake currency note detection System
---
parameters:
- name: file
in: formData
type: file
required: true
responses:
200:
The predictions are
"""
df_test = pd.read_csv(request.files.get("file"))
pred = classifier.predict(df_test)
return "Your note measurements predicts it is" + str(list(pred))
if __name__ == '__main__':
app.run()
| [
"keval.pipalia009@yahoo.com"
] | keval.pipalia009@yahoo.com |
ed65b61b19d5534c134fa007de61bf2d20a5c32f | eb7b9086b0eb2a04775ea1f04c531137be76c8ca | /coordinator/EdgeOracles/dom_reverse_rare_seeding_oracle.py | c7e47d26b2db150813d308a599151c183a1d06ba | [
"Apache-2.0"
] | permissive | TotalKnob/muse | b8195d36416e6d680dd31bfecc2554f0ec4a7e73 | 3c4a05d2a3ffc3bceb6827207a2a4a1a63aba503 | refs/heads/master | 2023-06-09T14:40:48.017143 | 2021-06-29T16:39:34 | 2021-06-29T16:39:34 | 334,089,666 | 0 | 0 | Apache-2.0 | 2021-06-29T16:10:28 | 2021-01-29T08:59:05 | Python | UTF-8 | Python | false | false | 6,302 | py | #!/usr/bin/env python
import math
import utils
import sys
import os
import csv
import ConfigParser
from utils import bcolors
from operator import itemgetter
import itertools
def oracle_info(s):
print bcolors.HEADER+"[Edge-Oracle-Info]"+bcolors.ENDC, "{0}".format(s)
class DomRevRareSeedingOracle:
def __init__(self, config, target_bin):
#map[edge]={'bbl','dom_num'}, singleton, constantly updating
self.edge_dom_map = dict()
#map['BBID']={'DOMNUM'}, singleton, load once
self.bb_dom_map = None
self.config = config
self.target_prog = target_bin
self.target_dir = os.path.dirname(os.path.abspath(self.target_prog).split()[0])
self.edge_dom_not_exist = set()
self.get_oracle_config()
self.load_bb2dom_map()
self.global_dom = True
# self.global_dom = True
def __repr__(self):
return "dom-reverse-tfidf"
def get_oracle_config(self):
config = ConfigParser.ConfigParser()
config.read(self.config)
try:
self.bb_to_dom_file = config.get("auxiliary info", "bbl_dom_map").replace("@target", self.target_dir)
except Exception:
utils.error_msg("bbl_dom_map file not found in %s"%self.target_dir)
sys.exit(-1)
def load_bb2dom_map(self):
try:
self.bb_dom_map = dict()
with open(self.bb_to_dom_file) as b2d_file:
reader = csv.DictReader(b2d_file, delimiter=',')
for row in reader:
self.bb_dom_map[row['BBID']] = row['DOMNUM']
oracle_info('Loading BBL to Domination Map %s'%self.bb_to_dom_file)
except Exception:
utils.error_msg("can't load bb_dom_map: %s"%self.bb_to_dom_file)
sys.exit(-1)
def get_result(self, raw_data, max_results, edge_threshold=1.0):
total_execs = float(raw_data['0']['inputs'])
stats = []
for e, raw in raw_data.iteritems():
if e == '0':
continue
stat = raw.copy()
stat['edge_id'] = e
try:
#favor more freq, more seed and more dom
stat['rev-tfidf'] = math.log(2+float(raw['seeds'])) * (float(raw['inputs'])/total_execs)
stat['tfidf'] = math.log(1+float(raw['seeds'])) * math.log(total_execs/float(raw['inputs']))
except Exception:
utils.error_msg("[rev-tfidf computation wrong, fallback]")
print stat
stat['rev-tfidf'] = 0.1
stats.append(stat)
if self.global_dom:
#if we want to get dom factor for all edges and calc scores based on this
self.prep_edge_dom(stat['edge_id'], stat)
stats = sorted(stats, key=itemgetter('rev-tfidf'), reverse=True)
if self.global_dom:
#this is for completeness
top_candidate = stats
else:
#we only do the traceloc generation for the top 10*max tfidf results
# top_candidate = itertools.islice(stats, max_results * 10)
#this is for scalability
top_candidate = stats[:max_results*10]
for stat in top_candidate:
if stat['edge_id'] not in self.edge_dom_not_exist:
self.prep_edge_dom(stat['edge_id'], stat)
try:
stat['dom-rev-tfidf'] = stat['rev-tfidf'] * math.log(1 + float(self.edge_dom_map[stat['edge_id']]['dom_num']))
# oracle_info("dom for %s edge exist"%e)
except KeyError:
#fallback to tfidf
self.edge_dom_not_exist.add(stat['edge_id'])
# oracle_info("dom for %s edge not exist, fallback to tfidf"%e)
stat['dom-rev-tfidf'] = stat['tfidf']
#oracle_info("seed: %s, tfidf: %d, dom-tfidf: %d"%(stat['edge_id'],stat['tfidf'],stat['dom-tfidf'] ))
top_dom_rev_tfidf = sorted(top_candidate, key=itemgetter('dom-rev-tfidf'), reverse=True)
result = {}
for stat in top_dom_rev_tfidf:
edge_id = stat['edge_id']
score = stat['dom-rev-tfidf']
input_file = stat['first_seen']
if input_file not in result:
# Don't add more results than requested
if max_results != -1 and len(result) >= max_results:
break
result[input_file] = {
'score': score,
'interesting_edges': [edge_id],
'input': input_file
}
elif score >= edge_threshold * result[input_file]['score']:
result[input_file]['interesting_edges'].append(edge_id)
return result
def prep_edge_dom(self, edge, row_log):
"""row_log is {'inputs','seeds', 'first_seen', 'edge_id','tfidf'} """
if edge == '0' or row_log['first_seen'] is None:
return
if self.edge_dom_map.has_key(edge):
return
if utils.gen_loctrace_file(self.target_prog, row_log['first_seen']):
#be careful with the loctrace file, to avoid interferece with other sefuzz instances
with open(self.target_dir+'/loctrace.csv') as trace_file:
reader = csv.reader(trace_file, delimiter=',')
for row in reader:
#cache all the encountered edges
try:
bid = row[0]
eid = row[1]
if not self.bb_dom_map.has_key(bid):
# print "fuck bbdom map does not have dom for this bb"
continue
if self.edge_dom_map.has_key(eid):
continue
self.edge_dom_map[eid] = {
'bbl': bid,
'dom_num': self.bb_dom_map[bid]
}
except IndexError:
continue
# utils.rmfile_force(self.target_dir+'./loctrace.csv', silent=True)
else:
utils.error_msg('[Fallback]loctrace not found Can not get bbid for edge %s, using input %s'%(edge, row_log['first_seen']))
print row_log
| [
"mirzazadefarkhani.r@husky.neu.edu"
] | mirzazadefarkhani.r@husky.neu.edu |
5ae2c762306e5a94e26a671d98093c5b02e5db3d | f167dffa2f767a0419aa82bf434852069a8baeb8 | /lib/youtube_dl/extractor/arcpublishing.py | ca6a6c4d87f9f13a259f8402f5e7ef51ad097088 | [
"MIT"
] | permissive | firsttris/plugin.video.sendtokodi | d634490b55149adfdcb62c1af1eb77568b8da3f5 | 1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3 | refs/heads/master | 2023-08-18T10:10:39.544848 | 2023-08-15T17:06:44 | 2023-08-15T17:06:44 | 84,665,460 | 111 | 31 | MIT | 2022-11-11T08:05:21 | 2017-03-11T16:53:06 | Python | UTF-8 | Python | false | false | 7,970 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
parse_iso8601,
try_get,
)
class ArcPublishingIE(InfoExtractor):
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
_TESTS = [{
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'only_matching': True,
}, {
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
'only_matching': True,
}, {
# https://www.actionnewsjax.com/video/live-stream/
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
'only_matching': True,
}, {
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
'only_matching': True,
}, {
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
'only_matching': True,
}, {
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
'only_matching': True,
}, {
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
'only_matching': True,
}, {
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
'only_matching': True,
}, {
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
'only_matching': True,
}, {
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
'only_matching': True,
}, {
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
'only_matching': True,
}, {
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
'only_matching': True,
}]
_POWA_DEFAULTS = [
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
([
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
], 'video-api-cdn.%s.arcpublishing.com/api'),
]
@staticmethod
def _extract_urls(webpage):
entries = []
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
powa = extract_attributes(powa_el) or {}
org = powa.get('data-org')
uuid = powa.get('data-uuid')
if org and uuid:
entries.append('arcpublishing:%s:%s' % (org, uuid))
return entries
def _real_extract(self, url):
org, uuid = re.match(self._VALID_URL, url).groups()
for orgs, tmpl in self._POWA_DEFAULTS:
if org in orgs:
base_api_tmpl = tmpl
break
else:
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
if org == 'wapo':
org = 'washpost'
video = self._download_json(
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
uuid, query={'uuid': uuid})[0]
title = video['headlines']['basic']
is_live = video.get('status') == 'live'
urls = []
formats = []
for s in video.get('streams', []):
s_url = s.get('url')
if not s_url or s_url in urls:
continue
urls.append(s_url)
stream_type = s.get('stream_type')
if stream_type == 'smil':
smil_formats = self._extract_smil_formats(
s_url, uuid, fatal=False)
for f in smil_formats:
if f['url'].endswith('/cfx/st'):
f['app'] = 'cfx/st'
if not f['play_path'].startswith('mp4:'):
f['play_path'] = 'mp4:' + f['play_path']
if isinstance(f['tbr'], float):
f['vbr'] = f['tbr'] * 1000
del f['tbr']
f['format_id'] = 'rtmp-%d' % f['vbr']
formats.extend(smil_formats)
elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats(
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False)
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
continue
for f in m3u8_formats:
if f.get('acodec') == 'none':
f['preference'] = -40
elif f.get('vcodec') == 'none':
f['preference'] = -50
height = f.get('height')
if not height:
continue
vbr = self._search_regex(
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
if vbr:
f['vbr'] = int(vbr)
formats.extend(m3u8_formats)
else:
vbr = int_or_none(s.get('bitrate'))
formats.append({
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
'vbr': vbr,
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'filesize': int_or_none(s.get('filesize')),
'url': s_url,
'preference': -1,
})
self._sort_formats(
formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id'))
subtitles = {}
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
subtitle_url = subtitle.get('url')
if subtitle_url:
subtitles.setdefault('en', []).append({'url': subtitle_url})
return {
'id': uuid,
'title': self._live_title(title) if is_live else title,
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
'description': try_get(video, lambda x: x['subheadlines']['basic']),
'formats': formats,
'duration': int_or_none(video.get('duration'), 100),
'timestamp': parse_iso8601(video.get('created_date')),
'subtitles': subtitles,
'is_live': is_live,
}
| [
"noreply@github.com"
] | firsttris.noreply@github.com |
c1d2bdf1cfba820e44c9ad97648ee80b603e7f51 | d43bd00c2540ae7520bc11d4c13be0f4160cc762 | /src/chap/2/deliciousrec.py | 55d766ad258157a095d3641dd331c9430bcb991d | [] | no_license | maple-cat/CollectiveIntelligence | c96b8775e53ac25655e0ba3870ab7940190b6d26 | 3c864b7bf196cdfc3d2cfceffdcc43c4293fa6a2 | refs/heads/master | 2020-03-18T23:14:51.611932 | 2013-04-02T16:12:55 | 2013-04-02T16:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | '''
Created on 2013-2-1
@author: duanhong
'''
from pydelicious import get_popular,get_userposts,get_urlposts
#import pydelicious
import time
def initializeUserDict(tag,count=5):
user_dict={}
for p1 in get_popular(tag=tag)[0:count]:
for p2 in get_urlposts(p1['href']):
user=p2['user']
user_dict[user]={}
return user_dict
def fillItems(user_dict):
all_items={}
for user in user_dict:
for i in range(3):
try:
posts=get_userposts(user)
break
except:
print "Failed user "+user+", retring"
time.sleep(4)
for post in posts:
url=post['href']
user_dict[user][url]=1.0
all_items[url]=1
for ratings in user_dict.values():
for item in all_items:
if item not in ratings:
ratings[item]=0.0 | [
"duanhong169@gmail.com"
] | duanhong169@gmail.com |
a9644f9f2a5b3bcdfe9af426286195f43770ac4b | 1882da61b4526e35369e03f643492070086624dd | /tests/unit/test_batches.py | 1a8e3d80c3c86be83f356afadf4b912c7bd7d5de | [
"CC0-1.0"
] | permissive | notWorthy/python-architecture-patterns | 1c235c94f978c9da9c194f00413047c5ecd1d1ee | 94e43c0882522231f4d4853e7aec83c058d7eb5a | refs/heads/master | 2022-12-09T18:39:53.808028 | 2020-09-09T00:56:42 | 2020-09-09T00:56:42 | 291,617,050 | 0 | 0 | CC0-1.0 | 2020-09-09T00:56:44 | 2020-08-31T04:48:13 | Python | UTF-8 | Python | false | false | 1,635 | py | from datetime import date
from domain.model import Batch, OrderLine
today = date.today()
def make_batch_and_line(sku, batch_qty, line_qty):
return (
Batch("batch-001", sku, batch_qty, eta=today),
OrderLine("order-123", sku, line_qty)
)
def test_allocating_to_a_batch_reduces_the_available_quantity():
batch, line = make_batch_and_line("SMALL-TABLE", 20, 2)
batch.allocate(line)
assert batch.available_quantity == 18
def test_allocation_is_idempotent():
batch, line = make_batch_and_line("SMALL-TABLE", 20, 2)
batch.allocate(line)
batch.allocate(line)
assert batch.available_quantity == 18
def test_can_only_deallocate_allocated_lines():
batch, unallocated_line = make_batch_and_line("DECORATIVE-TRINKET", 20, 2)
batch.deallocate(unallocated_line)
assert batch.available_quantity == 20
def test_can_allocate_if_available_greater_than_required():
large_batch, small_line = make_batch_and_line("ELEGEANT-LAMP", 20, 2)
assert large_batch.can_allocate(small_line)
def test_cannot_allocate_if_available_smaller_than_required():
small_batch, large_line = make_batch_and_line("ELEGANT-LAMP", 2, 20)
assert small_batch.can_allocate(large_line) is False
def test_can_allocate_if_available_equal_to_required():
batch, line = make_batch_and_line("ELEGANT-LAMP", 2, 2)
assert batch.can_allocate(line)
def test_cannot_allocate_if_skus_do_not_match():
batch = Batch("batch-001", "UNCOMFORTABLE-CHAIR", 100, eta=None)
different_sku_line = OrderLine("order-123", "EXPENSIVE-TOASTER", 10)
assert batch.can_allocate(different_sku_line) is False | [
"developer@mailible.com"
] | developer@mailible.com |
483cc434f7750ca41c1475f6670f1c174d708d87 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /nlp/dialogue_generation/cpm/pytorch/iluvatar/cpm/config/layers/fast_self_multihead_attn_func.py | 01c7eedcf77eaaaf34487a80bdb34ae4fd3a42be | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 13,572 | py | import torch
# import fast_self_multihead_attn
try:
import ext_ops as fast_self_multihead_attn_bias
except:
pass
# import fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function) :
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases, output_biases, pad_mask, mask_additive, dropout_prob):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
mask_additive_t= torch.tensor([mask_additive])
if use_biases_t[0]:
if not mask_additive:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
null_tensor, \
null_tensor, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
else:
input_lin_results, \
bmm1_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias_additive_mask.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
null_tensor, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
else:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
null_tensor, \
null_tensor, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
output_grads = output_grads.contiguous()
if use_biases_t[0]:
if not mask_additive_t[0]:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
else:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias_additive_mask.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
bmm1_results, \
pad_mask, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
else:
input_bias_grads = None
output_bias_grads = None
input_grads, \
input_weight_grads, \
output_weight_grads = \
fast_self_multihead_attn.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, input_grads, input_weight_grads, output_weight_grads,input_bias_grads, output_bias_grads, None, None, None
fast_self_attn_func = FastSelfAttnFunc.apply
| [
"jia.guo@iluvatar.ai"
] | jia.guo@iluvatar.ai |
880c0e3f07b8dc18b0b9a657319a7a80bacb079d | e0683e20c9b831e2d6411f41a1907c876135b051 | /cauchyCode/results/size.py | 85e7689783a14242373e9c16d364d222bdee1ea8 | [] | no_license | bharathGitUser/fds | 1df93f73b36f3427599f77bfce7df58c8980f321 | 3b59fcb6d5526d3e9514508b58ae87b037c5a7f8 | refs/heads/main | 2023-01-14T16:46:32.851493 | 2020-11-15T15:47:52 | 2020-11-15T15:47:52 | 313,061,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | #!/usr/bin/python
import os
import sys
import stats
import sys
import numpy as np
import matplotlib.pyplot as plt
from operator import add
def foo(section, dict):
dict[section] = {}
dict[section]['xval'] = []
dict[section]['yval'] = []
def boo(d, dict):
for (key,val) in d.items():
dict[key]['yval'].append(map(lambda x : x[1], val))
if __name__ == "__main__":
assert len(sys.argv)>1
file2Parse = str(sys.argv[1])
handleFile2Parse = open(file2Parse,'r')
data = {}
trialNumber = -1
opNumber = 10
for line in handleFile2Parse:
if line.startswith('Trial'):
trialNumber = trialNumber+1
data[trialNumber] = {}
elif line.startswith('-'):
section = line[:-1].strip('-')
data[trialNumber][section] = []
else:
tmp = line.split(',')
print tmp
list = map(float, tmp)
data[trialNumber][section].append(list)
#data[trialNumber][section].append(map(float,line.split(',')[1:3]))
handleFile2Parse.close()
print trialNumber
#Linked List Size
numPrimaries = map(lambda x : float(x[0]), data[0]['LL_NEW_FUSION_SIZE_REC'])
llOldFusionNodes = map(lambda x : float(x[2]), data[0]['LL_OLD_FUSION_SIZE_REC'])
llNewFusionNodes = map(lambda x : float(x[2]), data[0]['LL_NEW_FUSION_SIZE_REC'])
plt.xlabel('Number of Primaries')
plt.ylabel('Number of Backup Nodes')
plt.plot(numPrimaries,llOldFusionNodes,'k:',numPrimaries,llNewFusionNodes,'k-')
plt.legend(('Old Fusion','New Fusion'),loc='upper right');
plt.savefig('.'.join(["LL_SIZE","ps"]))
plt.close();
#Linked List Recovery
numPrimaries = map(lambda x : float(x[0]), data[0]['LL_NEW_FUSION_SIZE_REC'])
llOldFusionRecTime = map(lambda x : float(x[4]), data[0]['LL_OLD_FUSION_SIZE_REC'])
llNewFusionRecTime = map(lambda x : float(x[4]), data[0]['LL_NEW_FUSION_SIZE_REC'])
plt.xlabel('Number of Primaries')
plt.ylabel('Recovery Time in Microseconds')
plt.plot(numPrimaries, llOldFusionRecTime,'k:',numPrimaries,llNewFusionRecTime,'k-')
plt.legend(('Old Fusion','New Fusion'),loc='upper right');
plt.savefig('.'.join(["LL_REC","ps"]))
plt.close();
#Linked List Update Time
numOperations = map(lambda x : float(x[1]), data[0]['LL_NEW_FUSION_UPDATE'])
llOldUpdateTime = map(lambda x : float(x[3]), data[0]['LL_OLD_FUSION_UPDATE'])
llNewUpdateTime = map(lambda x : float(x[3]), data[0]['LL_NEW_FUSION_UPDATE'])
plt.xlabel('Number of Operations')
plt.ylabel('Update Time in Microseconds')
plt.plot(numPrimaries,llOldUpdateTime,'k:',numPrimaries,llNewUpdateTime,'k-')
plt.legend(('Old Fusion','New Fusion'),loc='upper right');
plt.savefig('.'.join(["LL_UPDATE","ps"]))
plt.close();
| [
"bharathb@snrs-MacBook-Pro.local"
] | bharathb@snrs-MacBook-Pro.local |
b1effa43176d77ba4cd5d71fe491629591f33413 | 978a0ff297cfe68baa8b62a30aaacefa3efdd48d | /flaskfiles/flaskpractice.py | 4fe3f0d5817107d5f5eb70af42986722a0a65e5e | [] | no_license | pavi535/pythonpratice | d55f263cf4170ace3fa8ba7f4a26d67f950af7ce | 9f66be3e609f2b4fbc1a035e67d6fcf08992818a | refs/heads/main | 2023-08-27T06:38:30.446752 | 2021-11-10T03:03:24 | 2021-11-10T03:03:24 | 426,094,134 | 0 | 0 | null | 2021-11-10T03:03:25 | 2021-11-09T04:46:11 | Python | UTF-8 | Python | false | false | 2,593 | py | from datetime import datetime
from flask import Flask, render_template, url_for, flash, redirect
from flask_sqlalchemy import SQLAlchemy
from forms import RegistrationForm, LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db=SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
posts=[{'Product_name': 'car',
'car_make':'Toyota',
'car_year': 2019,
'car_description':'It is a black car with 52000 miles on it' },
{'Product_name': 'jeep',
'car_make':'Wrangler',
'car_year': 2020,
'car_description':'It is a black car with 12000 miles on it' }
]
@app.route('/')
def home():
return render_template('home.html', posts=posts)
@app.route('/help')
def help():
return render_template('help.html', title='help')
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
if form.email.data == 'admin@blog.com' and form.password.data == 'password':
flash('You have been logged in!', 'success')
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login', form=form)
if __name__=='__main__':
app.run(debug=True) | [
"you@example.com"
] | you@example.com |
8a995bd441b1bf0410c40c17856e88cacb7fdc00 | 840ca6face6cb369104eec228fe7b51630bd10f1 | /剑指offer/52-两个链表的第一个公共节点.py | ca8ed7ed73921386ff21cac0c21a687a57434913 | [] | no_license | Leofighting/Practice-on-LeetCode | 56e6245eb03f76ca254e54dc0a0cdd2c71ec3dd0 | 6d7dad991922abe862f19009b261b5146e059955 | refs/heads/master | 2021-08-16T04:21:04.699124 | 2020-06-29T22:48:38 | 2020-06-29T22:48:38 | 197,718,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding:utf-8 -*-
__author__ = "leo"
# 输入两个链表,找出它们的第一个公共节点。
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def get_intersection_node(self, headA, headB):
node1, node2 = headA, headB
while node1 != node2:
node1 = node1.next if node1 else headB
node2 = node2.next if node2 else headA
return node1
| [
"leoxiaojw@gmail.com"
] | leoxiaojw@gmail.com |
783f8f6e5173dae2d5474b7632052fd9b4c4e950 | 330e77e53d580a73e883e705b6bc8caca3456194 | /CyTin/wsgi.py | 40f683ebf7732c411b6390db6bd00f72b43a095d | [] | no_license | Chandan-97/CyTin | d2995970eade13ec46c7874ecb5c2922328e5367 | e8612124a52a307a44c6833ddefe02b06a50c919 | refs/heads/master | 2020-03-10T11:39:18.188372 | 2018-04-13T04:26:01 | 2018-04-13T04:26:01 | 129,360,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | """
WSGI config for CyTin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyTin.settings")
application = get_wsgi_application()
| [
"cp564738@gmail.com"
] | cp564738@gmail.com |
e304f960f0f1b7f61d60fe21801e60380b87bf4f | ef20a522c892f2f38bd232048c60336acb88bf65 | /stockscraper/stockscraper/pipelines.py | 25ec6a50cb3a67cd0357aa81987646eae910daee | [] | no_license | KR-CodingMonkey/Crawl_Stock_Info | 5faee5724d8ccd018e136063e14218cff35abbd8 | ef2eaf14713b5377bf417675c0b232cd7a4e7d40 | refs/heads/main | 2023-03-26T02:34:53.905765 | 2021-03-21T23:59:03 | 2021-03-21T23:59:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | from kafka import KafkaProducer
from json import dumps
import time
class StockscraperPipeline(object):
def __init__(self):
self.producer = KafkaProducer(acks=0,
compression_type='gzip',
bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: dumps(x).encode('utf-8'))
def process_item(self, item, spider):
item = dict(item)
item["current_price"] = item["current_price"].replace(",", "")
item["foreigner_investor"] = item["foreigner_investor"].replace(",", "")
item["trading_volume"] = item["trading_volume"].replace(",", "")
if len(item["trading_volume"]) <= 10:
add_zero_count = 10 - len(item["trading_volume"])
add_zero_str = '0' * add_zero_count
item["trading_volume"] = add_zero_str + item["trading_volume"]
data = {"schema":{"type":"struct","fields":[{"type":"int32","optional":False,"field":"id"},{"type":"string","optional":True,"field":"stock_code"},{"type":"string","optional":True,"field":"stock_name"},{"type":"string","optional":True,"field":"current_price"},{"type":"string","optional":True,"field":"fluctuation_rate"},{"type":"string","optional":True,"field":"created_at"},{"type":"string","optional":True,"field":"foreigner_investor"},{"type":"string","optional":True,"field":"trading_volume"}],"optional":False,"name":"my_stock_table"}, "payload":{"id":2,"stock_code":item['stock_code'],"stock_name":item['stock_name'],"current_price":item['current_price'],"fluctuation_rate":item['fluctuation_rate'],"created_at":item['created_at'],"foreigner_investor":item['foreigner_investor'],"trading_volume":item['trading_volume']}}
self.producer.send("my6_stock_table", value=data)
# time.sleep(0.3)
self.producer.flush() | [
"ahippo@naver.com"
] | ahippo@naver.com |
451038cce2a5fc5ba8d5d2f5557c9b07b6b6b2ab | df6b2801a70c3a7ec36424f331edab812f1c9ce3 | /urllib_demo/request.py | 08836b51657fa819fa676b4f386286810b929037 | [] | no_license | ilovepeppa/crawler_demo | 28cc5fc06559382cb81ed915e4b92bb20dff7665 | 720122066342c31dfdd21ee2c8cd12bff70e0e94 | refs/heads/master | 2020-04-25T22:21:19.793628 | 2019-02-28T12:36:15 | 2019-02-28T12:36:15 | 173,108,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import urllib.request
import http.cookiejar
def save_cookie():
filename = 'cookies.txt'
cookie = http.cookiejar.LWPCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
response = opener.open('http://www.baidu.com')
for item in cookie:
print(item.name + '=' + item.value)
cookie.save(ignore_discard=True, ignore_expires=True)
def load_cookie():
cookie = http.cookiejar.LWPCookieJar()
cookie.load('cookies.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.read().decode('utf8'))
save_cookie()
# load_cookie()
| [
"ilovecode@yeah.net"
] | ilovecode@yeah.net |
20270c779390d99731d6a40814246f08c030f539 | 94d149c437b82d37cdc0f4a3af08452097776456 | /templates/plantillas/temp_route.py | 91e2fbc31ab6b65613a7673a5f2debc843a75782 | [] | no_license | edyadan/qaqc | a9d8e15ab7c69a65135b2c1416bbc41ada5543e8 | 38311d9fbf2ec7ee596261633d53af10bf897512 | refs/heads/master | 2022-07-08T04:31:43.186144 | 2019-08-04T21:12:43 | 2019-08-04T21:12:43 | 200,541,612 | 0 | 0 | null | 2022-06-21T22:27:49 | 2019-08-04T21:06:29 | Python | UTF-8 | Python | false | false | 1,759 | py | from {{name}}_model import {{name}},{{name}}_form,{{name}}_convert
from db_setup import init_db, db_session
from flask import Blueprint, Flask, jsonify, request, render_template, redirect, url_for, json
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import create_engine
import pandas as pd
{{name}}_app = Blueprint('{{name}}_app', __name__)
@{{name}}_app.route('/{{name}}_list')
def {{name}}_list():
qry = db_session.query({{name}}).all()
return render_template('{{name}}_list.html', table=qry )
@{{name}}_app.route('/{{name}}_new', methods=['GET', 'POST'])
def {{name}}_new():
if request.method == 'GET':
return render_template('{{name}}_new.html')
if request.method == 'POST':
form = {{name}}_form(request.form)
db_session.add({{name}}_convert({{name}}(), form))
db_session.commit()
return redirect('/{{name}}_list')
@{{name}}_app.route('/{{name}}_edit', methods=['GET', 'POST'])
def {{name}}_edit():
if request.method == 'GET':
id=request.args.get('id', None)
qry = db_session.query({{name}}).filter({{name}}.id==id).first()
return render_template('{{name}}_edit.html', val=qry)
if request.method == 'POST':
form = {{name}}_form(request.form)
fo={{name}}_convert({{name}}(), form)
flag_modified(fo,'{{name}}')
db_session.merge(fo)
db_session.flush()
db_session.commit()
return redirect('/{{name}}_list')
@{{name}}_app.route('/{{name}}_delete', methods=['GET', 'POST'])
def {{name}}_delete():
id=request.args.get('id', None)
qry = db_session.query({{name}}).filter({{name}}.id==int(id))
db_session.delete(qry.first())
db_session.commit()
return redirect('/{{name}}_list') | [
"edyadan@outlook.com"
] | edyadan@outlook.com |
b06e3ace791dfcd120050816b47cf3cea36e3caf | 056adbbdfb968486ecc330f913f0de6f51deee33 | /609-find-duplicate-file-in-system/find-duplicate-file-in-system.py | 369212a29b568e52d671b267faa76bb344d532b9 | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | # -*- coding:utf-8 -*-
# Given a list of directory info including directory path, and all the files with contents in this directory, you need to find out all the groups of duplicate files in the file system in terms of their paths.
#
# A group of duplicate files consists of at least two files that have exactly the same content.
#
# A single directory info string in the input list has the following format:
# "root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
# It means there are n files (f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively) in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0, it means the directory is just the root directory.
#
# The output is a list of group of duplicate file paths. For each group, it contains all the file paths of the files that have the same content. A file path is a string that has the following format:
# "directory_path/file_name.txt"
#
#
# Example 1:
#
# Input:
# ["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
# Output:
# [["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
#
#
#
#
# Note:
#
# No order is required for the final output.
# You may assume the directory name, file name and file content only has letters and digits, and the length of file content is in the range of [1,50].
# The number of files given is in the range of [1,20000].
# You may assume no files or directories share the same name in the same directory.
# You may assume each given directory info represents a unique directory. Directory path and file info are separated by a single blank space.
#
#
#
#
# Follow-up beyond contest:
#
# Imagine you are given a real file system, how will you search files? DFS or BFS?
# If the file content is very large (GB level), how will you modify your solution?
# If you can only read the file by 1kb each time, how will you modify your solution?
# What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
# How to make sure the duplicated files you find are not false positive?
#
class Solution(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
dicts = collections.defaultdict(list)
for path in paths:
files = path.split(" ")
dir = files[0]
for f in files[1:]:
filename,_,content = f.partition('(')
dicts[content[:-1]].append(dir+'/'+filename)
return [g for g in dicts.values() if len(g)>1]
| [
"hyan90@ucsc.edu"
] | hyan90@ucsc.edu |
400c7f2f344fefd2314e135379867552bfc87fba | 6d14be90b76feb9d6f16ef93ea2ac7aa8deb2f89 | /bagic1/th.py | 71ac0a73ec2ffaefee8e8b6a663a742348cbf689 | [] | no_license | homg93/PS | 7fe9a543f396c1190186a3b28edb5dafe835c51f | d06a3039ff3fac06459853ab12033662e1a0d7d6 | refs/heads/master | 2020-07-18T13:32:24.406492 | 2019-09-04T09:55:29 | 2019-09-04T09:55:29 | 206,254,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | a = [5,1,1,1,2]
s = sorted(a)
print(s)
print(s[s.count(1)])
# d = 0
# l = len(a)
# for i in range(len(s)):
# if i == 0:
# print(s[i]*(l- i))
# d += s[i]*(l- i)
# else:
# d += (s[i]-s[i-1])*(l- i)
# print((s[i]-s[i-1]),(l- i),end=' ')
# print((s[i]-s[i-1])*(l- i))
| [
"homg93@gmail.com"
] | homg93@gmail.com |
b0aa51ba1503361c7fb86dc2c4d27a52bc4a2d8f | 9b6a3902e8a33e83dd6b24dc83b8a989c6666461 | /migrations/versions/b29f8f5e455b_create_controller_tables.py | 943eeb4c785717398c9135631082d60090eb7962 | [
"MIT"
] | permissive | MTES-MCT/mobilic-api | 91dd171c1fb22e23f4cd672d380ce27feca24366 | 1e189f1e4d175feb275585d8eba8ec08b5aa8465 | refs/heads/master | 2023-09-04T10:56:54.731547 | 2023-08-29T13:51:37 | 2023-08-29T13:51:37 | 238,493,241 | 1 | 0 | MIT | 2023-09-14T19:37:52 | 2020-02-05T16:14:13 | Python | UTF-8 | Python | false | false | 2,210 | py | """create controller tables
Revision ID: b29f8f5e455b
Revises: 420dbfec0b28
Create Date: 2022-07-12 11:55:45.368316
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "b29f8f5e455b"
down_revision = "420dbfec0b28"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"controller_user",
sa.Column("creation_time", sa.DateTime(), nullable=False),
sa.Column("agent_connect_id", sa.String(length=255), nullable=False),
sa.Column(
"agent_connect_info",
postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
nullable=True,
),
sa.Column(
"organizational_unit", sa.String(length=255), nullable=False
),
sa.Column("email", sa.String(length=255), nullable=True),
sa.Column("first_name", sa.String(length=255), nullable=False),
sa.Column("last_name", sa.String(length=255), nullable=False),
sa.Column("id", sa.Integer(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("agent_connect_id"),
)
op.create_table(
"controller_refresh_token",
sa.Column("creation_time", sa.DateTime(), nullable=False),
sa.Column("token", sa.String(length=128), nullable=False),
sa.Column("controller_user_id", sa.Integer(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["controller_user_id"],
["controller_user.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("token"),
)
op.create_index(
op.f("ix_controller_refresh_token_controller_user_id"),
"controller_refresh_token",
["controller_user_id"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
op.drop_index(
op.f("ix_controller_refresh_token_controller_user_id"),
table_name="controller_refresh_token",
)
op.drop_table("controller_refresh_token")
op.drop_table("controller_user")
# ### end Alembic commands ###
| [
"raphael.taieb.fr@gmail.com"
] | raphael.taieb.fr@gmail.com |
7863e3a9c1b084f2424bfe6b8e926d7afd714b98 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04030/s580737724.py | e5017d598b9bc2f7e817eadffadd9076c0229da3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | S = input()
Z = []
X = len(S)
i = 0
while i < X :
if S[i] == "0":
Z.append(0)
elif S[i] == "1":
Z.append(1)
elif S[i] == "B":
if len(Z)== 0:
pass
else:
Z.pop()
i += 1
i = 0
X = len(Z)
while i < X:
print(Z[i] , end ="")
i +=1 | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
98e8cfe4279ac7e1dfe3f4566e407a589595201e | 82f993631da2871933edf83f7648deb6c59fd7e4 | /w2/L6/11.py | 2cf14d2b9a804b5ec5eaec7caa0a54bb13eddce8 | [] | no_license | bobur554396/PPII2021Summer | 298f26ea0e74c199af7b57a5d40f65e20049ecdd | 7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2 | refs/heads/master | 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | a = ['a', 'b', 'c']
b = ['hello', 'hi', 'hola']
c = [12, 30, 20]
for i in zip(a, b, c):
print(i) | [
"bobur.muhsimbaev@gmail.com"
] | bobur.muhsimbaev@gmail.com |
3267827c6172fd22712c30402e7fc68868d81061 | 42b84b02e64d21234372501a20bf820e0bcbf281 | /site/threath/apps/user_profiles/views.py | 2054832130d5f203d6bf0ea498dde605276bad9c | [] | no_license | gage/proto | 861d1e1190770b0cc74f51a6fe140157cc0ac12e | e13ac7d0ee5c6acce2557dcf71a00a941543c006 | refs/heads/master | 2020-04-06T06:44:01.712532 | 2013-06-28T06:30:59 | 2013-06-28T06:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | import time
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.auth.models import User
from globals.utils import bigpipe_pagelet
def verify_email(request):
'''
When user click the activation link, the email will be verified.
'''
activation_code = request.GET.get('activation_code')
email = request.GET.get('email')
uid = request.GET.get('id')
# print "activation_code: %s" % activation_code
# print "email: %s" % email
# print "id: %s" % uid
user = User.objects.get(id=uid)
# print user
profile = user.get_profile()
if profile.verify_email(email, activation_code):
return HttpResponse("Email has been verified successfully.")
else:
return HttpResponse("This activation code is expired.")
@login_required
def user_main(request, user_id=None):
def stream_response_generator():
context = {
'BIG_PIPE': True
}
base_view = render_to_string("main.html", context, context_instance=RequestContext(request))
yield base_view.ljust(4096)
yield bp_testpagelet(request).ljust(4096)
yield render_to_string("bp_page_end.html", {}, context_instance=RequestContext(request))
return HttpResponse(stream_response_generator(), mimetype='text/html', stream_content=True)
@bigpipe_pagelet
def bp_testpagelet(request):
innerHTML = render_to_string("bp_testpagelet.html", {'BIG_PIPE': True}, context_instance=RequestContext(request))
return ['testpagelet',
innerHTML,
'chatRoom/chatRoom',
['base.css','test.css']
]
| [
"sean.cheng@geniecapital.com"
] | sean.cheng@geniecapital.com |
8633166a9a9d8761557dd75a22580ac2bb386aee | e6429af0061bc64a6a38256b9865bf4355ee1e32 | /Python-DatavisulizationWebpage/testing/main.py | 3e70d2c4264fb32cfdcc05fb5ffe1954fe4dad39 | [] | no_license | rala8730/software-development-methods-and-tools | 4002823e33922daef3030bf568633eb6a0a8f87b | 1ab996a987cbd47baa3ad7f025d8d9ca0ae846f5 | refs/heads/master | 2021-01-12T04:03:11.094326 | 2016-12-29T04:49:28 | 2016-12-29T04:49:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | #!/usr/bin/env python
import MySQLdb
# open databases connection
#here, please enter the the username, passwaord, and mysql database you created
#you will have to do this for all four webpages
db = MySQLdb.connect("localhost","root","root","pollutiondata")
##beginning of the HTML header/webpage
print "content-type: text/html"
print
con = '''
<html>
<head>
<title> Carbon Emissions (1990) </title>
<link rel="stylesheet" type="text/css" href="MyStyle.css">
</head>
<body style="margin-left: 200px; margin-right: 200px; margin-top: 50px; margin-bottom: 50px;">
<h2>Carbon Emissions In Each State</h2>
<p>
The map below visualizes data taken from the U.S. Energy Information Administration (EIA) Website for
annual carbon emissions per State in Million Metric tons of CO<sub>2</sub>, and includes a breakdown of the
individual contributions in each state due to Petroleum, Gas, and Coal.</p></br>
<!--These three link to the source code for the map visualization -->
<script src="http://d3js.org/d3.v3.min.js"></script>
<script src="http://d3js.org/topojson.v1.min.js"></script>
<script src="http://datamaps.github.io/scripts/0.5.4/datamaps.all.min.js"></script>
<!--Allows user to switch between years. We had to link to multiple webpages -->
<form> <span class="Button_explanation">Choose year to visualise: </span>
<button formaction="/cgi-bin/main.py">1990</button>
<button formaction="/cgi-bin/main2.py"> 2000</button>
<button formaction="/cgi-bin/main3.py">2010</button>
<button formaction="/cgi-bin/main4.py">2013</button>
</form>
<!-- creates a 'container' to hold the map visualization -->
<div id="container" style="position: relative; width: 900px; height: 600px; margin: auto;"></div>
<script>
//Creates an instance of the map
var map = new Datamap({
scope: 'usa',
element: document.getElementById('container'),
//this fetches the map JSON
geographyConfig: {
highlightBorderColor: '#bada55',
//creates the information displayed in the Popup hover
popupTemplate: function(geography, data) {
return '<div class="hoverinfo" style="background-color:rgba(38, 82, 91, 0.9); color:white;">' + '<b>'+ geography.properties.name +'</b> </br>' +
'Total:' + data.Total + '</br>' + 'Coal:' + data.Coal + '</br>' + 'Petroleum:' + data.Pet + '</br>' + 'Gas:' + data.Gas +' '
},
highlightBorderWidth: 3
},
//map chloropeth fills
fills: {
'0-50': '#ccffff',
'51-100': '#66ccff',
'101-150': '#3399ff',
'151-200': '#0066ff',
'201-300': '#0000ff',
'301-400': '#0000cc',
'400+': '#000099',
defaultFill: '#EDDC4E'
},
data:{
'''
print con
cursor=db.cursor()
#in each .py file a different table in the database in accessed
cursor.execute("SELECT * FROM `1990`")
#defines the colors to be returned by the chloropeth
def filler(value):
if value > 400:
return "400+"
elif value >300:
return "301-400"
elif value > 200:
return "201-300"
elif value > 150:
return "151-200"
elif value > 100:
return "101-150"
elif value > 50:
return "51-100"
else:
return "0-50"
#this loops through the database, and fills in values according to each state.
for row in cursor.fetchall():
#print(row[0])
state = row[0]
coal = row[1]
pet = row[2]
gas = row[3]
total = row[4]
color = filler(total)
#test for tes
if state == "CO":
tes = total
print '''
"%s": {
"fillKey": "%s",
"Coal": %d,
"Pet": %d,
"Gas": %d,
"Total": %d
},
'''%(state, color, coal, pet, gas, total)
footer = '''
}
});
map.legend();
map.labels();
</script>
</br></br></br>
<p>
Carbon occurs naturally in the atmosphere, however, human activities alter the carbon cycle by adding more CO<sub>2</sub> to it. The main human activity that emits CO<sub>2</sub> is the combustion of fossil fuels (oil, natural gas, and coal).</p>
<p>
Changes in Carbon emissions are influenced by many factors, some being changes in population, seasonal temperatures, and new technologies. Visualizing this data is useful in analyzing trends present in changing CO<sub>2</sub> levels; this data reveal a slight increase in emissions (about 9%) since 1990, which reflects increased energy usage due to a growing population and changing economy.</p>
'''
print footer
# cgi-bin
print '''
</br><font color = "#FFFFFF">%d</font>
</body>
'''%tes
# disconnect from server
db.close()
| [
"rala8730@colorado.edu"
] | rala8730@colorado.edu |
82189181de0942cad8396d024a5d049ec946f19f | 7fa001f59be0f5d92ad3a9820e619a6971e44431 | /web_app/urls.py | 2892e738f80860f9a820a055bd76fb4e13903863 | [] | no_license | arrazy100/sigas-sukosari-2021 | 8c8ef7fc876ea65d1e55e2eedbab0cc5512bd0dc | b3c3f13e03c41560822f627add0dd72f38beb012 | refs/heads/main | 2023-06-08T06:20:04.099776 | 2021-06-26T00:53:56 | 2021-06-26T00:53:56 | 378,805,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """web_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("task_management.urls")),
]
| [
"afdhalarrazy111@gmail.com"
] | afdhalarrazy111@gmail.com |
0039e5a8aec878cb771be2ecdc89116f71a7cd5f | 48ff6b01dabc631c8924f3c51996010d9e0d2086 | /psypl/experiments/variable_count.py | 8f9bdaeef8f1030d231ba50b2ed17beb7e2c70bb | [] | no_license | willcrichton/psypl-experiments | b4522908f17ba9fbc023fa627a260e645a511bc4 | 7b0a134cc17919e62707d005fc03f2e22938eb13 | refs/heads/master | 2022-12-18T01:41:20.964024 | 2021-02-13T00:46:55 | 2021-02-13T00:46:55 | 219,410,440 | 3 | 0 | null | 2022-12-07T14:42:13 | 2019-11-04T03:33:04 | Jupyter Notebook | UTF-8 | Python | false | false | 3,326 | py | from enum import Enum
from ..utils import random_tree, ConstNode, OpNode, all_names, shuffle, try_int
from random import sample
from itertools import combinations, product
from ..base import Experiment
from pprint import pprint
import pandas as pd
import numpy as np
import experiment_widgets
class VariableCountExperiment(Experiment):
Widget = experiment_widgets.FunctionBasicExperiment
all_n_op = [6, 9]
all_n_var = [0, 2, 4]
class Condition(Enum):
#Random = 1
Even = 1
Frontloaded = 2
def generate_experiment(self, N_trials=24):
conditions = list(product(self.all_n_var, self.all_n_op, list(self.Condition)))
return {
"trials": shuffle(
[
self.generate_trial(*conds)
for conds in conditions
for _ in range(N_trials // len(conditions))
]
),
"between_trials_time": 4000,
}
def node_size(self, t, idxs):
if isinstance(t, OpNode):
lmap, lsize = self.node_size(t.left, idxs)
rmap, rsize = self.node_size(t.right, idxs)
size = lsize + rsize + 1
if t.index in idxs:
return {t.index: size, **lmap, **rmap}, 0
else:
return {**lmap, **rmap}, size
else:
return {}, 0
def generate_trial(self, N_var, N_op, cond):
tree = random_tree(N_op)
if N_var > 0:
coverings = pd.DataFrame([{
'sizes': self.node_size(tree, idxs)[0],
'remaining': self.node_size(tree, idxs)[1],
'idxs': idxs
} for idxs in combinations(list(range(N_op-1)), N_var)])
coverings['size_seq'] = coverings.apply(
lambda row: [t[1] for t in sorted(row.sizes.items(), key=lambda t: t[0])] + [row.remaining],
axis=1)
if cond == self.Condition.Even:
coverings.score = coverings.size_seq.map(lambda seq: np.std(seq))
elif cond == self.Condition.Frontloaded:
def compute_score(seq):
return np.sum([(i+1) * seq[i] for i in range(len(seq))])
coverings['score'] = coverings.size_seq.map(compute_score)
best_rows = coverings[coverings.score == coverings.score.min()]
row = best_rows.sample().iloc[0]
indices = row.idxs
size_seq = row.size_seq
names = sample(all_names, k=N_var)
defs, call = tree.to_mixed_str({i: n for i, n in zip(indices, names)})
else:
defs = []
call = tree.to_paren_str()
size_seq = [N_op]
program = '\n'.join(defs + [call])
globls = {}
exec(program, globls, globls)
answer = eval(call, globls, globls)
return {
'program': program,
'call': call if N_var > 0 else None,
'cond': str(cond),
'N_var': N_var,
'N_op': N_op,
'size_seq': size_seq,
'answer': str(answer)
}
def eval_trial(self, trial, result):
return {
"correct": 1 if int(trial["answer"]) == try_int(result["response"]) else 0,
"cond": trial["cond"]
}
| [
"wcrichto@cs.stanford.edu"
] | wcrichto@cs.stanford.edu |
79596651586815bcc674706587990ca8182e59eb | 3b63518ccee10b0b85758fe09b69b2c03a7b7da4 | /text_summarization_amazon_reviews/model_py_files/s2s_model_emb4_b50.py | 7e3ca02d159689d0e4905a0917a81f68ec370efa | [] | no_license | srodas7/danalytics | e7ecd989ab48a28fcfa7d5f5d6dc8f208eb2cc75 | aa20f2daa077412848954be43a88fc4482456901 | refs/heads/master | 2020-11-26T03:09:06.053468 | 2020-09-07T16:22:34 | 2020-09-07T16:22:34 | 228,947,966 | 0 | 0 | null | 2020-09-07T16:22:36 | 2019-12-19T01:04:10 | null | UTF-8 | Python | false | false | 6,243 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 22:18:04 2019
@author: sherryrodas
"""
########################################## LOAD DATA ##########################################
# HDF5 encoder and decoder data
import h5py
import numpy as np
filename = 'capstone/processed_amazon_txt.h5'
f = h5py.File(filename, 'r')
encoder_input_data = np.array(f['encoder_input_data'])
decoder_input_data = np.array(f['decoder_input_data'])
decoder_target_data = np.array(f['decoder_target_data'])
# CSV text and summary data
import pandas as pd
reviews = pd.read_csv('capstone/amazon_reviews_red.csv')
texts_red = reviews['text']
summaries_red = reviews['summary']
# CSV text and summary data
import pickle
pickle_in = open("capstone/int_to_vocab.pkl","rb")
int_to_vocab = pickle.load(pickle_in)
pickle_in = open("capstone/vocab_to_int.pkl","rb")
vocab_to_int = pickle.load(pickle_in)
pickle_in = open("capstone/dec_int_to_index.pkl","rb")
dec_int_to_index = pickle.load(pickle_in)
pickle_in = open("capstone/enc_int_to_index.pkl","rb")
enc_int_to_index = pickle.load(pickle_in)
num_encoder_tokens = len(enc_int_to_index)
num_decoder_tokens = len(dec_int_to_index)
print("num encoder tokens ",num_encoder_tokens)
print("num decoder tokens ",num_decoder_tokens)
########################################## BUILD MODEL ##########################################
from keras.layers import Input, LSTM, Embedding, Dense
from keras.models import Model
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
embedding_size = 100
# Encoder Model
encoder_inputs = Input(shape=(None,))
text_x= Embedding(num_encoder_tokens, embedding_size)(encoder_inputs)
encoder = LSTM(100, return_state=True)
encoder_outputs, state_h, state_c = encoder(text_x)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Decoder Model
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
sum_x= Embedding(num_decoder_tokens, embedding_size)
final_x= sum_x(decoder_inputs)
decoder_lstm = LSTM(100, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(final_x,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Compile Model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
filepath='capstone/s2s_emb4_b50.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping_monitor = EarlyStopping(monitor='val_loss',patience = 10)
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=50,
epochs=100,
validation_split=0.2,
callbacks=[checkpoint,early_stopping_monitor],
verbose = 1)
encoder_model = Model(encoder_inputs, encoder_states)
encoder_model.summary()
# Create Sampling Model
decoder_state_input_h = Input(shape=(100,))
decoder_state_input_c = Input(shape=(100,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
final_x2= sum_x(decoder_inputs)
decoder_outputs2, state_h2, state_c2 = decoder_lstm(final_x2, initial_state=decoder_states_inputs)
decoder_states2 = [state_h2, state_c2]
decoder_outputs2 = decoder_dense(decoder_outputs2)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
# Reverse-lookup token index to decode sequences back to something readable.
#reverse_input_char_index = dict((i, char) for char, i in enc_int_to_index.items())
reverse_target_char_index = dict((i, char) for char, i in dec_int_to_index.items())
encoder_model.save('capstone/s2s_emb_encoder4_b50.hdf5')
decoder_model.save('capstone/s2s_emb_decoder4_b50.hdf5')
#from keras.models import load_model
#filepath='capstone/s2s_emb_mac.hdf5'
#finalmodel = load_model(filepath)
#
#filepath='capstone/s2s_emb_encoder_mac.hdf5'
#finalenc = load_model(filepath)
#
#filepath='capstone/s2s_emb_decoder_mac.hdf5'
#finaldec = load_model(filepath)
# Function to generate sequences
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = dec_int_to_index[63160]
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = []
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence.append(sampled_char)
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == 7893 or
len(decoded_sentence) > 8):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
predicted_reviews = []
for seq_index in range(0,len(summaries_red)):
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
decoded_sentence = " ".join([int_to_vocab[word_int] for word_int in decoded_sentence])
predicted_reviews.append(decoded_sentence)
#print('-')
#print('Input sentence:', texts_red[seq_index: seq_index + 1])
#print('Decoded sentence:', decoded_sentence)
pred_reviews_emb_1 = pd.DataFrame(list(zip(texts_red,summaries_red,predicted_reviews)),columns=['text','summary','predicted'])
pred_reviews_emb_1.to_csv("capstone/pred_reviews_emb_4_b50.csv",sep=',',index=False)
| [
"sherryrodas@Sherrys-MacBook-Air.local"
] | sherryrodas@Sherrys-MacBook-Air.local |
ab5a8efe6ee474ebb3d0874bd150540fd5990e8f | b05ae08859d3b593b6c815a10e0705e13c1ae1eb | /RinoNakasone/RinoNakasone/spiders/piaohua.py | 46b6392f62d63419047047495d160ab00d756622 | [] | no_license | jacksonyoudi/Rino_nakasone_backend | 32425bcd9087384fa25db1fe51e854b7a4f1fa12 | e838668a6f67a6a4eca52d7658ad84b61b4123db | refs/heads/master | 2021-04-15T18:21:17.678794 | 2019-03-02T15:16:30 | 2019-03-02T15:16:30 | 126,698,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from RinoNakasone.settings import PIAOHUA
class PiaohuaSpider(scrapy.Spider):
name = 'piaohua'
allowed_domains = ['www.piaohua.com']
start_urls = ['http://www.piaohua.com/']
def parse(self, response):
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
for i in soup.find_all('a', class_="img"):
if i.attrs.get('href'):
url = i.attrs.get('href')
full_url = urljoin(PIAOHUA, url)
yield scrapy.Request(full_url, callback=self.parse_detail)
next_url = urljoin(response.url.split('list_')[0],
soup.find('div', class_='page tk').find_all('a')[-2].attrs.get('href'))
yield scrapy.Request(next_url, callback=self.parse)
def parse_detail(self, response):
item = IreadweekItem()
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
img_url = urljoin(CDN, soup.find('img').attrs.get('src').replace('//', '/'))
download_url = soup.find('a', class_='downloads').attrs.get('href')
title = soup.find_all('div', class_='hanghang-za-title')
name = title[0].text
content = soup.find_all('div', class_='hanghang-za-content')
author_info = content[0].text
directory = '\n'.join([i.text.replace("\u3000", '') for i in content[1].find_all('p')])
info = soup.find('div', class_='hanghang-shu-content-font').find_all('p')
author = info[0].text.split('作者:')[1]
category = info[1].text.split('分类:')[1]
score = info[2].text.split('豆瓣评分:')[1]
introduction = info[4].text
item['name'] = name
item['img_url'] = img_url
item['download_url'] = download_url
item['author'] = author
item['author_info'] = author_info
item['category'] = category
item['score'] = score
item['introduction'] = introduction
item['directory'] = directory
return item
| [
"liangchangyoujackson@gmail.com"
] | liangchangyoujackson@gmail.com |
5e1b29564b9c98be9378b1854de1cbd89cb8286f | 317cd9ae774e3932ea17e33b1455ee63c141e0aa | /celery_compliance/subcon_total_log_hours.py | 56fd4bebf975856ca0286247c1a99c202de70f90 | [] | no_license | restrictedaccess/mq | 7608e379e3f67661f8d5a0cd36fef50471f469e2 | e0276af8fe7f66c7148f06bb10af7163454fb593 | refs/heads/master | 2021-08-17T08:21:47.922524 | 2017-11-21T00:43:51 | 2017-11-21T00:43:51 | 111,478,815 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,435 | py | # 2014-07-08 Normaneil E. Macutay <normanm@remotestaff.com.au>
# - task to get the total log hours of staff based on start and end date
import settings
import couchdb
from celery.task import task, Task
from celery.execute import send_task
from celery.task.sets import TaskSet
from datetime import date, datetime, timedelta
import pytz
from pytz import timezone
from decimal import Decimal, ROUND_HALF_UP
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
TWOPLACES = Decimal(10) ** -2
import attendance_report
def get_ph_time(as_array=False):
"""returns a philippines datetime
"""
utc = timezone('UTC')
phtz = timezone('Asia/Manila')
now = utc.localize(datetime.utcnow())
now = now.astimezone(phtz)
if as_array:
return [now.year, now.month, now.day, now.hour, now.minute, now.second]
else:
return datetime(now.year, now.month, now.day, now.hour, now.minute, now.second)
@task
def get_total_log_hrs(sid, start_date, end_date, userid):
now = get_ph_time()
total_work_hours = Decimal('0.00')
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
s = couchdb.Server(settings.COUCH_DSN)
db = s['rssc_time_records']
userid = int(int(userid))
r = db.view('rssc_reports/userid_timein',
startkey=[userid, [ int(start_date.strftime('%Y')), int(start_date.strftime('%m')), int(start_date.strftime('%d')),0,0,0,0]],
endkey=[userid, [ int(end_date.strftime('%Y')), int(end_date.strftime('%m')), int(end_date.strftime('%d')),23,59,59,0]],
ascending=True
)
phtz = timezone('Asia/Manila')
timezone_ref = phtz
for row in r.rows:
record_type, b, leads_id, subcon_id = row['value']
userid, a = row['key']
if subcon_id != None:
if record_type == 'quick break':
continue
if int(sid) != int(subcon_id):
continue
start = datetime(a[0], a[1], a[2], a[3], a[4], a[5], tzinfo=phtz)
if b == None or b == False:
end = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, tzinfo=phtz)
else:
end = datetime(b[0], b[1], b[2], b[3], b[4], b[5], tzinfo=phtz)
start = start.astimezone(timezone_ref)
if end != None:
end = end.astimezone(timezone_ref)
#update totals
time_diff = end - start
time_diff_decimal = Decimal('%s' % (time_diff.seconds / 3600.0)).quantize(TWOPLACES, rounding=ROUND_HALF_UP)
if record_type == 'time record':
total_work_hours += time_diff_decimal
elif record_type == 'lunch record':
total_work_hours -= time_diff_decimal
return "%0.2f" % total_work_hours
@task(ignore_result=True)
def process_doc_id(doc_id):
logging.info('checking %s' % doc_id)
s = couchdb.Server(settings.COUCH_DSN)
db = s['subconlist_reporting']
doc = db.get(doc_id)
if doc == None:
raise Exception('subconlist_reporting document not found : %s' % doc_id)
subcontractor_ids = doc['subcontractor_ids']
start_date = doc['start_date']
end_date = doc['end_date']
page_usage=""
if 'page_usage' in doc:
page_usage = doc['page_usage']
#start_date = datetime.strptime(start_date, '%Y-%m-%d')
#end_date = datetime.strptime(end_date, '%Y-%m-%d')
str =""
total_log_hrs_result={}
for sid in subcontractor_ids:
userid = doc['subcon_userid'][sid]
total_hrs = get_total_log_hrs(sid, start_date, end_date, userid)
total_log_hrs_result[int(sid)] = total_hrs
doc['total_log_hrs_result'] = total_log_hrs_result
db.save(doc)
if page_usage == "attendance report":
attendance_report.process_doc_id(doc_id)
if __name__ == '__main__':
logging.info('tests')
logging.info(process_doc_id('bc7205ad9e81255ebe9b4b7496003c7b')) | [
"fryxxter@gmail.com"
] | fryxxter@gmail.com |
2cd0bb00f525bb6ae79e78058f812031c18b3248 | d58e221b8a08b8d536d7249eea3d3ef31a03c8e5 | /tryTkinter.py | 9d3a4899226f5834eccc7ecaf1002a784f90e947 | [] | no_license | first-odd/myaccess | 9ef9ae5384ff15db2e195fb929ea69d1158b93df | 2e35f208a23efb40da971da6153b167571ae6e59 | refs/heads/master | 2021-12-20T22:54:12.098958 | 2021-12-18T15:34:36 | 2021-12-18T15:34:36 | 250,950,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | from tkinter import *
from tkinter.messagebox import *
a = Tk()
a.title('Try')
a.geometry('800x600+400+400')
#root.iconbitmap('/Users/jacksonandrew/Downloads/favicon.ico') 插入一个icon
a.resizable(0,0) #固定
#设置结果窗口
root = StringVar()
root.set('0') #初始值
#Label
result = Label(a,text='结果', width=20,height=1,bd=10,anchor=NW,font=('',30,'bold'),bg='lightblue',fg='red',cursor='plus',textvariable='root') #cursor 光标变化 arrow、circle、cross、plus
result.grid(row=0,column=0,sticky=W,padx=2) #紧贴左边,外边距2
#功能函数
def getnum(num):
temp = root.get()
root.set(temp)
print(root)
'''#entry
word = Entry(a,bg='yellow')
word.grid(row=1,column=0,sticky=W)'''
#Button
n1 = Button(a,text='1',width=4,bd=10,height=2,anchor=NW,command=lambda:getnum('1'))
n1.grid(row=2,column=0,sticky=W)
n2 = Button(a,text='+',width=4,bd=10,height=2,anchor=NW,command=lambda:getnum('+'))
n2.grid(row=3,column=0,sticky=W)
n3 = Button(a,text='=',width=4,bd=10,height=2,command=lambda:getnum('='))
n3.grid(row=4,column=0,sticky=W)
a.mainloop() | [
"2921106404@qq.com"
] | 2921106404@qq.com |
bb854340247f638ffd766c3a2e12f7b3be2a4e92 | 02dc9d80deaa9f29afb7f30ecd7aed1b78bd128e | /app/node_modules/grpc/build/config.gypi | 0165e6af8744884c2817f8600150b4ea2f82cfd1 | [] | no_license | liu-sf/voteApp | 7c707c0add90e8cfcc511078abfee7d33590bad3 | 24d14c02edce5cf0aefb5abdc21af5e53d19eb23 | refs/heads/master | 2022-11-02T00:58:15.234213 | 2019-04-29T08:07:49 | 2019-04-29T08:07:49 | 166,231,390 | 0 | 2 | null | 2022-10-26T12:41:14 | 2019-01-17T13:35:44 | HTML | UTF-8 | Python | false | false | 4,660 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.48",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/liu/.node-gyp/6.9.5",
"standalone_static_library": 1,
"fallback_to_build": "true",
"library": "static_library",
"module": "/home/liu/go/src/github.com/hyperledger/voteApp/app/node_modules/grpc/src/node/extension_binary/node-v48-linux-x64-glibc/grpc_node.node",
"module_name": "grpc_node",
"module_path": "/home/liu/go/src/github.com/hyperledger/voteApp/app/node_modules/grpc/src/node/extension_binary/node-v48-linux-x64-glibc",
"cache_lock_stale": "60000",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/3.10.10 node/v6.9.5 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/liu/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"progress": "true",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/liu/.nvm/versions/node/v6.9.5",
"registry": "https://registry.npm.taobao.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/liu/.npm",
"global_style": "",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "6.9.5",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/liu/.nvm/versions/node/v6.9.5/etc/npmrc",
"init_module": "/home/liu/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/liu/.nvm/versions/node/v6.9.5/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"1350985242@qq.com"
] | 1350985242@qq.com |
429521c688445ef43a8fc4dc15cacbffb8501777 | f9d9c91bc1d643c051f3fbea7605bdc2df890b12 | /institution/migrations/0002_auto_20161111_1528.py | a3a6d2c73e09494d320c57e40bdc9a400e78bd47 | [] | no_license | crodriguezanton/BEMSeducation | fbc0ac60b4387a120d2e350e10a4ad6eb5cf9004 | 5fc0ed9297734398044bfab17450576cb68704a8 | refs/heads/master | 2021-01-12T18:05:06.628332 | 2017-05-15T11:45:40 | 2017-05-15T11:45:40 | 69,903,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-11 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('institution', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='classroom',
options={'verbose_name': 'Classroom', 'verbose_name_plural': 'Classrooms'},
),
migrations.AlterModelOptions(
name='grade',
options={'verbose_name': 'Grade', 'verbose_name_plural': 'Grades'},
),
migrations.AlterModelOptions(
name='group',
options={'verbose_name': 'Group', 'verbose_name_plural': 'Groups'},
),
migrations.AlterModelOptions(
name='stage',
options={'verbose_name': 'Stage', 'verbose_name_plural': 'Stages'},
),
]
| [
"carlos@beatbcn.com"
] | carlos@beatbcn.com |
c86638a84b3e4b58b7ca7e6d789b29da4bc9e467 | 3d5712aaaa5972efe418ae82bd289e4e7ddfd01b | /problem_031.py | bdf6a766ffe15493b223ffed23d1e4864095922b | [
"MIT"
] | permissive | smrmkt/project_euler | 5252c56d77aeb43247a80f1c94e76182b03bda4c | 7d5901731dd1c06809a378c12829a8b8b506eb21 | refs/heads/master | 2020-04-10T15:12:07.720911 | 2014-12-09T14:28:31 | 2014-12-09T14:28:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
In England the currency is made up of pound, £, and pence,
p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
'''
import timeit
# slow
def loop(total):
cnt = 0
for i in range(int(total/200)+1):
for j in range(int(total/100)+1):
for k in range(int(total/50)+1):
for l in range(int(total/20)+1):
for m in range(int(total/10)+1):
for n in range(int(total/5)+1):
for o in range(int(total/2)+1):
if i*200+j*100+k*50+l*20++m*10+n*5+o*2 <= total:
cnt += 1
return cnt
# fast
def recursive(total, coins):
if coins == [1]:
return 1
cnt = 0
for i in range(0, int(total/coins[0])+1):
cnt += recursive(total-coins[0]*i, coins[1:])
return cnt
if __name__ == '__main__':
print loop(200)
print recursive(200, [200, 100, 50, 20, 10, 5, 2, 1])
print timeit.Timer('problem_031.loop(200)', 'import problem_031').timeit(1)
print timeit.Timer('problem_031.recursive(200, [200, 100, 50, 20, 10, 5, 2, 1])',
'import problem_031').timeit(1)
| [
"smrmkt@gmail.com"
] | smrmkt@gmail.com |
1814a51cda515b0e4f7442cea9030b0fdb09db8f | 6463900393667324d938d7f54e215cb1ea89d63e | /data_process/data_process.py | 27dec511f00a7b3a3f258ee99b8ad66274fe9ee1 | [
"MIT"
] | permissive | 1146976048qq/GAST | b19512cb231b85a6d3891900b82a4edf9bed4175 | 4654fc4e711a1eb192e720f505a9fe514294f7aa | refs/heads/master | 2023-02-19T17:14:21.436548 | 2021-01-22T11:40:24 | 2021-01-22T11:40:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | '''
Biaffine Dependency parser from AllenNLP
'''
import argparse
import json
import os
import re
import sys
import pandas as np
from allennlp.predictors.predictor import Predictor
from lxml import etree
from nltk.tokenize import TreebankWordTokenizer
from tqdm import tqdm
pt_models_dir = '/data/kkzhang/Cross_GAT/AllenNLP'
model_path = os.path.join(pt_models_dir, "biaffine-dependency-parser-ptb-2020.04.06.tar.gz")
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--model_path', type=str, default=model_path,
help='Path of biaffine dependency parser.')
parser.add_argument('--data_path', type=str, default='/data/kkzhang/Cross_GAT/dataset/raw_data',
help='Directory of where original data held.')
return parser.parse_args()
def text2docs(file_path, predictor):
'''
Annotate the sentences from extracted txt file using AllenNLP's predictor.
'''
with open(file_path, 'r') as f:
sentences = f.readlines()
docs = []
print('Predicting dependency information...')
for i in tqdm(range(len(sentences))):
docs.append(predictor.predict(sentence=sentences[i]))
return docs
def dependencies2format(doc): # doc.sentences[i]
'''
Format annotation: sentence of keys
- tokens
- tags
- predicted_dependencies
- predicted_heads
- dependencies
'''
sentence = {}
sentence['tokens'] = doc['words']
sentence['tags'] = doc['pos']
# sentence['energy'] = doc['energy']
predicted_dependencies = doc['predicted_dependencies']
predicted_heads = doc['predicted_heads']
sentence['predicted_dependencies'] = doc['predicted_dependencies']
sentence['predicted_heads'] = doc['predicted_heads']
sentence['dependencies'] = []
for idx, item in enumerate(predicted_dependencies):
dep_tag = item
frm = predicted_heads[idx]
to = idx + 1
sentence['dependencies'].append([dep_tag, frm, to])
return sentence
def get_dependencies(file_path, predictor):
docs = text2docs(file_path, predictor)
sentences = [dependencies2format(doc) for doc in docs]
return sentences
def syntax2json(sentences, origin_file):
json_data = []
tk = TreebankWordTokenizer()
mismatch_counter = 0
idx = 0
with open(origin_file, 'r') as fopen:
raw = fopen.readlines()
for sentence in raw:
example = dict()
example["sentence"] = sentence
example['tokens'] = sentences[idx]['tokens']
example['tags'] = sentences[idx]['tags']
example['predicted_dependencies'] = sentences[idx]['predicted_dependencies']
example['predicted_heads'] = sentences[idx]['predicted_heads']
example['dependencies'] = sentences[idx]['dependencies']
json_data.append(example)
idx+=1
# extended_filename = origin_file.replace('.txt', '_biaffine_depparsed.json')
extended_filename = origin_file.replace('.txt', '_depparse.json')
with open(extended_filename, 'w') as f:
json.dump(json_data, f)
print('done,json_data length:', len(json_data))
print('idx_length:', idx) # DataItem Number
def main():
args = parse_args()
print("-------------", args.model_path)
predictor = Predictor.from_path(args.model_path)
# data = [('books/review_negative.txt', 'books/review_positive.txt', 'books/review_unlabeled'),
# ('dvd/review_negative.txt', 'dvd/review_positive.txt', 'dvd/review_unlabeled.txt'),
# ('electronics/review_negative.txt', 'electronics/review_positive.txt', 'electronics/review_unlabeled.txt'),
# ('kitchen/review_negative.txt', 'kitchen/review_positive.txt', 'kitchen/review_unlabeled.txt'),
# ('video/review_negative.txt', 'video/review_positive.txt', 'video/review_unlabeled.txt')
# ]
# for neg_file, pos_file, unlabel_file in data:
# neg_sentences = get_dependencies(os.path.join(args.data_path, neg_file), predictor)
# pos_sentences = get_dependencies(os.path.join(args.data_path, pos_file), predictor)
# unlabel_sentences = get_dependencies(os.path.join(args.data_path, unlabel_file), predictor)
# print(len(neg_sentences), len(pos_sentences), len(unlabel_sentences))
# syntax2json(neg_sentences, os.path.join(args.data_path, neg_file))
# syntax2json(pos_sentences, os.path.join(args.data_path, pos_file))
# syntax2json(unlabel_sentences, os.path.join(args.data_path, unlabel_file))
data = [('books/review_negative.txt', 'books/review_positive.txt'),
('dvd/review_negative.txt', 'dvd/review_positive.txt'),
('electronics/review_negative.txt', 'electronics/review_positive.txt'),
('kitchen/review_negative.txt', 'kitchen/review_positive.txt'),
('video/review_negative.txt', 'video/review_positive.txt')
]
for neg_file, pos_file in data:
neg_sentences = get_dependencies(os.path.join(args.data_path, neg_file), predictor)
pos_sentences = get_dependencies(os.path.join(args.data_path, pos_file), predictor)
print(len(neg_sentences), len(pos_sentences))
syntax2json(neg_sentences, os.path.join(args.data_path, neg_file))
syntax2json(pos_sentences, os.path.join(args.data_path, pos_file))
if __name__ == "__main__":
main()
| [
"1146976048@qq.com"
] | 1146976048@qq.com |
187166ea2567f6b8c7bba448205dd30a929a7111 | f156beb6c5d911e86c28ea71f70f7422391a2c12 | /ipynb/biplot.py | 8dce7fa3067b700bbaf52f0c0f3842ed253917e4 | [] | no_license | knightlab-analyses/office-study | 31382dc259b1b21c3288709e5a49070186c5e66b | 506e1d037c982e23538aec4742305ccd2508d844 | refs/heads/master | 2021-01-12T14:36:42.088868 | 2017-08-20T23:46:57 | 2017-08-20T23:46:57 | 72,040,888 | 0 | 2 | null | 2017-08-20T23:46:58 | 2016-10-26T20:17:35 | Jupyter Notebook | UTF-8 | Python | false | false | 17,786 | py | from __future__ import division
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
import pandas as pd
from collections import OrderedDict
def make_biplot(samples,
features=None,
sample_metadata=None,
feature_metadata=None,
sample_color_category=None,
feature_color_category=None,
sample_color_dict=None,
feature_color_dict=None,
sample_zorder=None,
feature_zorder=None,
**kwargs):
figure_size = (15, 15)
samples_x = 'PCA1'
samples_y = 'PCA2'
samp_col = 'RdGy'
samp_alpha = 1
samp_marker = 'o'
samp_ms = 8
samp_leg_loc = 2
features_x = 'PCA1'
features_y = 'PCA2'
feat_col = 'Set1'
feat_alpha = 1
arrow_width = 0.02
arrow_head = 0.05
feat_leg_loc = 1
feature_order = 0
sample_drop_list = []
show_color_drop = False
sample_drop_col = ['#FFFFFF']
eigenvalues = []
x_pad = 0.3
y_pad = 0.3
for key, value in kwargs.items():
if key == 'figure_size':
figure_size = value
if key == 'samples_x':
samples_x = value
if key == 'samples_y':
samples_y = value
if key == 'samp_col':
samp_col = value
if key == 'samp_alpha':
samp_alpha = value
if key == 'samp_marker':
samp_marker = value
if key == 'samp_ms':
samp_ms = value
if key == 'samp_leg_loc':
samp_leg_loc = value
if key == 'features_x':
samples_x = value
if key == 'features_y':
samples_y = value
if key == 'feat_col':
feat_col = value
if key == 'feat_alpha':
feat_alpha = value
if key == 'arrow_width':
arrow_width = value
if key == 'arrow_head':
arrow_head = value
if key == 'feat_leg_loc':
feat_leg_loc = value
if key == 'feature_order':
if value == 0:
feature_order = 0
if value == 1:
feature_order = 1
if key == 'sample_drop_list':
sample_drop_list = value
if key == 'show_color_drop':
show_color_drop = value
if key == 'sample_drop_col':
sample_drop_col = value
if key == 'eigenvalues':
eigenvalues = value
if key == 'x_pad':
x_pad = value
if key == 'y_pad':
y_pad = value
if not isinstance(samples, pd.core.frame.DataFrame):
raise ValueError('`samples` must be a `pd.DataFrame`, '
'not %r.' % type(samples).__name__)
if features is not None:
if not isinstance(features, pd.core.frame.DataFrame):
raise ValueError('`features` must be a `pd.DataFrame`, '
'not %r.' % type(features).__name__)
if sample_metadata is not None:
if not isinstance(sample_metadata, pd.core.frame.DataFrame):
raise ValueError('`sample_metadata` must be a `pd.DataFrame`, '
'not %r.' % type(sample_metadata).__name__)
if feature_metadata is not None:
if not isinstance(feature_metadata, pd.core.frame.DataFrame):
raise ValueError('`feature_metadata` must be a `pd.DataFrame`, '
'not %r.' % type(feature_metadata).__name__)
if sample_color_dict is not None:
if not isinstance(sample_color_dict, dict):
raise ValueError('`sample_color_dict` must be a `dictionary`, '
'not %r.' % type(sample_color_dict).__name__)
if feature_color_dict is not None:
if not isinstance(feature_color_dict, dict):
raise ValueError('`feature_color_dict` must be a `dictionary`, '
'not %r.' % type(feature_color_dict).__name__)
if sample_metadata is not None and sample_color_dict is None:
if sample_color_category is None:
raise ValueError('sample_color_category must be a specified')
if sample_metadata is not None and sample_color_dict is not None:
if sample_color_category is None:
raise ValueError('sample_color_category must be a specified')
if feature_metadata is not None and feature_color_dict is not None:
if feature_color_category is None:
raise ValueError('feature_color_category must be a specified')
if sample_drop_list is not None:
if not isinstance(sample_drop_list, list):
raise ValueError('`sample_drop_list` must be a `list`, '
'not %r.' % type(sample_drop_list).__name__)
if sample_drop_col is not None:
if not isinstance(sample_drop_col, list):
raise ValueError('`sample_drop_col` must be a `list`, '
'not %r.' % type(sample_drop_col).__name__)
if sample_metadata is not None:
if (samples.index != sample_metadata.index).any():
samples = samples.sort_index(axis=0)
sample_metadata = sample_metadata.sort_index(axis=0)
fig = plt.figure(figsize=figure_size)
ax = fig.add_subplot(111)
sample_colors = plt.get_cmap(samp_col)
feature_colors = plt.get_cmap(feat_col)
sample_group_append = []
colorVal = []
if sample_metadata is None:
ax.plot(np.ravel(samples[samples_x]),
np.ravel(samples[samples_y]),
marker=samp_marker, linestyle='',
ms=samp_ms, alpha=samp_alpha)
if (sample_metadata is not None and sample_color_dict is None):
sample_groups = samples.groupby(sample_metadata[sample_color_category])
if len(sample_drop_list) > 0:
def dropf(x):
return x not in sample_drop_list
index_drop = sample_metadata[sample_color_category].apply(dropf)
samp_r = samples.loc[sample_metadata.index]
samp_met_r = sample_metadata.loc[index_drop][sample_color_category]
for name, group in samp_r.groupby(samp_met_r):
sample_group_append.append(name)
sample_group_append = sorted(list(set(sample_group_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(sample_group_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=sample_colors)
for index, row in enumerate(sample_group_append):
colorVal.append(scalarMap.to_rgba(index))
if not show_color_drop:
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in samp_r.groupby(samp_met_r):
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
else:
color_drop_append = []
if len(sample_drop_col) == 1:
for index in range(len(sample_drop_list)):
color_drop_append.append(sample_drop_col[0])
colorVal = colorVal + color_drop_append
if len(sample_drop_col) == len(sample_drop_list):
for index in range(len(sample_drop_list)):
color_drop_append.append(sample_drop_col[index])
colorVal = colorVal + color_drop_append
sample_group_append = list(sample_group_append)
sample_group_append += list(sample_drop_list)
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
if name not in sample_drop_list:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
for name, group in sample_groups:
if name in sample_drop_list:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
else:
sample_group_append = []
for name, group in sample_groups:
sample_group_append.append(name)
sample_group_append = sorted(list(set(sample_group_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(sample_group_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=sample_colors)
for index, row in enumerate(sample_group_append):
colorVal.append(scalarMap.to_rgba(index))
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
sample_color_dict = None
if (sample_metadata is not None and sample_color_dict is not None):
sample_groups = samples.groupby(sample_metadata[sample_color_category])
if len(sample_drop_list) > 0:
def dropf(x):
return x not in sample_drop_list
index_drop = sample_metadata[sample_color_category].apply(dropf)
samp_r = samples.loc[sample_metadata.index]
samp_met_r = sample_metadata.loc[index_drop]
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
sample_groups = samp_r.groupby(samp_met_r[sample_color_category])
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
if not sample_drop_list:
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
sample_color_dict = None
ax2 = ax.twinx()
if sample_color_category is not None:
ax.legend(title=sample_color_category, loc=samp_leg_loc, numpoints=1)
else:
ax.legend(loc=samp_leg_loc, numpoints=1)
ax2.set_ylim(ax.get_ylim())
recs = []
feature = []
otu_feature_append = []
colorVal = []
if (features is not None and feature_metadata is None):
for index, row in features.iterrows():
ax2.arrow(0, 0, row[features_x], row[features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha, color='b')
if (features is not None and
feature_metadata is not None and
feature_color_category is None):
otu_feature_append = []
feature_groups = features.groupby(feature_metadata.columns[0])
for name, group in feature_groups:
otu_feature_append.append(name)
otu_feature_append = sorted(list(set(otu_feature_append)))
cNorm = colors.Normalize(vmin=0, vmax=(len(otu_feature_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=feature_colors)
for index, row in enumerate(otu_feature_append):
colorVal.append(scalarMap.to_rgba(index))
feature_color_dict = dict(zip(otu_feature_append, colorVal))
feature_color_dict = OrderedDict(
sorted(feature_color_dict.items(),
key=lambda x: x[0]))
for name, group in feature_groups:
for i in range(group[features_x].shape[0]):
_id = group.index[i]
ax2.arrow(0, 0,
group.loc[_id, features_x],
group.loc[_id, features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha,
color=feature_color_dict[name])
for key, value in feature_color_dict.items():
recs.append(mpatches.Rectangle((0, 0), 1, 1,
fc=feature_color_dict[key],
alpha=feat_alpha))
feature.append(key)
ax2.legend(recs, feature, loc=feat_leg_loc,
title=feature_color_category)
feature_color_dict = None
if (features is not None and
feature_metadata is not None and
feature_color_category is not None):
feature_groups = features.groupby(
feature_metadata[feature_color_category])
if feature_color_dict is None:
otu_feature_append = []
feature_groups = features.groupby(
feature_metadata[feature_color_category])
for name, group in feature_groups:
otu_feature_append.append(name)
otu_feature_append = sorted(list(set(otu_feature_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(otu_feature_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=feature_colors)
for index, row in enumerate(otu_feature_append):
colorVal.append(scalarMap.to_rgba(index))
feature_color_dict = dict(zip(otu_feature_append, colorVal))
feature_color_dict = OrderedDict(
sorted(feature_color_dict.items(),
key=lambda x: x[0]))
for name, group in feature_groups:
for i in range(group[features_x].shape[0]):
_id = group.index[i]
kwds = {}
if feature_zorder is not None:
kwds['zorder'] = feature_zorder[name]
ax2.arrow(0, 0,
group.loc[_id, features_x],
group.loc[_id, features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha,
color=feature_color_dict[name],
**kwds)
for key, value in feature_color_dict.items():
recs.append(mpatches.Rectangle((0, 0), 1, 1,
fc=feature_color_dict[key],
alpha=feat_alpha))
feature.append(key)
ax2.legend(recs, feature, loc=feat_leg_loc,
title=feature_color_category)
if features is not None:
xmin = min([min(samples.ix[:, 0]), min(features.ix[:, 0])])
xmax = max([max(samples.ix[:, 0]), max(features.ix[:, 0])])
ymin = min([min(samples.ix[:, 1]), min(features.ix[:, 1])])
ymax = max([max(samples.ix[:, 1]), max(features.ix[:, 1])])
xpad = (xmax - xmin) * x_pad
ypad = (ymax - ymin) * y_pad
ax.set_zorder(ax2.get_zorder()+(1-feature_order))
ax.patch.set_visible(False)
ax.set_xlim(xmin - xpad, xmax + xpad)
ax.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_xlim(xmin - xpad, xmax + xpad)
ax2.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_yticks([])
else:
xmin = min([min(samples.ix[:, 0])])
xmax = max([max(samples.ix[:, 0])])
ymin = min([min(samples.ix[:, 1])])
ymax = max([max(samples.ix[:, 1])])
xpad = (xmax - xmin) * x_pad
ypad = (ymax - ymin) * y_pad
ax.set_xlim(xmin - xpad, xmax + xpad)
ax.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_yticks([])
if len(eigenvalues) > 2:
e_0 = eigenvalues[0]
e_1 = eigenvalues[1]
ax.set_xlabel('PC 1 ({:.2%})'.format(e_0**2/sum(eigenvalues**2)))
ax.set_ylabel('PC 2 ({:.2%})'.format(e_1**2/sum(eigenvalues**2)))
return fig, [ax, ax2]
| [
"jamietmorton@gmail.com"
] | jamietmorton@gmail.com |
7de46ff258d1e70d9b95ce0af43322a64519d851 | ab3575ba93b9082ce1bd928473f60657d3b3d66c | /front1/views.py | 89cc3dd44fb3824124b58bece80b6f2b77eb78bf | [] | no_license | leonardobarpe/ge1 | e592587d923c16b95fa6a5348171894ff186c953 | 4ee956590bbf16b13a2c6cd8025681df17392128 | refs/heads/master | 2020-03-19T08:49:04.594267 | 2018-07-16T05:21:25 | 2018-07-16T05:21:25 | 130,753,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from django.shortcuts import render_to_response, HttpResponseRedirect, HttpResponse, render
from django.template import RequestContext
# Create your views here.
# --------------------------------------------------------------------------- Logueo
def inicioSesion(request):
# front1 = request.user.has_module_perms('front1')
# tableroMando = request.user.has_module_perms('tableroMando')
return render_to_response('inicioSesion.html', locals())
# --------------------------------------------------------------------------- Inicio
def inicio(request):
return render_to_response('inicio.html', locals()) | [
"leonardobarpe@gmail.com"
] | leonardobarpe@gmail.com |
5f8bbcb4b5ff9366d6e4fb74645f50c061e4a06c | f2e914aa045c420b2d694c94b7adfb2091bc64f5 | /systemd_notifier/systemd.py | 7eef893875a77c4520fabe740905b42716f7542e | [
"Apache-2.0"
] | permissive | drbild/systemd-notifier | c20ac8b6e68e4aec184607582c3aa904fd43a4ca | 57d4f1faffd81c9f078efb9a22858ca9bfb6bf44 | refs/heads/master | 2021-03-30T18:20:07.403568 | 2017-06-12T19:37:30 | 2017-06-12T19:37:30 | 93,211,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | import dbus
import dbus.mainloop.glib
import gobject
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
gobject.threads_init()
from state import State
from error import SystemdError, UnknownUnitError
class DBusManager(object):
def __init__(self):
self.system_bus = dbus.SystemBus()
self.systemd_object = self.system_bus.get_object('org.freedesktop.systemd1',
'/org/freedesktop/systemd1')
self.systemd_manager = dbus.Interface(self.systemd_object, dbus_interface='org.freedesktop.systemd1.Manager')
try:
self.systemd_manager.Subscribe()
except dbus.DBusException as e:
raise SystemdError("Systemd is not installed, or is an incompatable version. It must provide the Subscribe dbus method: version 204 is the minimum recommended version.", e)
def fetch_unit(self, unit_name):
try:
unit_path = self.systemd_manager.LoadUnit(unit_name)
unit_object = self.system_bus.get_object('org.freedesktop.systemd1', str(unit_path))
return DBusUnit(unit_name, unit_path, unit_object)
except dbus.DBusException as e:
raise UnknownUnitError("Unknown or unloaded systemd unit '%s'"%(unit_name), e)
@property
def runner(self):
gobject.MainLoop().run()
class DBusUnit(object):
IFACE_UNIT = "org.freedesktop.systemd1.Unit"
IFACE_SERVICE = "org.freedesktop.systemd1.Service"
IFACE_PROPS = "org.freedesktop.DBus.Properties"
def __init__(self, name, path, dbus_object):
self._name = name
self._path = path
self._dbus_object = dbus_object
self._maybe_service_type = self._service_type()
@property
def name(self):
return self._name
@property
def path(self):
return self._path
@property
def dbus_object(self):
return self._dbus_object
@property
def maybe_service_type(self):
return self._maybe_service_type
def register_listener(self, queue):
self._enqueue_state(queue)
self.dbus_object.connect_to_signal("PropertiesChanged",
lambda iface, *args: self._enqueue_state(queue) if iface == DBusUnit.IFACE_UNIT else None,
dbus_interface = DBusUnit.IFACE_PROPS)
def on_change(self, callback):
self._change_callback = callback
def on_each_state_change(self, callback):
self._each_state_change_callback = callback
def property(self, name):
return self.dbus_object.Get(DBusUnit.IFACE_UNIT, name, dbus_interface=DBusUnit.IFACE_PROPS)
def __str__(self):
if self.maybe_service_type:
type_label = " (%s)"%(self.maybe_service_type,)
else:
type_label = ""
return "{name}{type_label}".format(name = self._name, type_label = type_label)
def _build_state(self):
return State(self.property('ActiveState'),
self.property('SubState'),
self.property('LoadState'),
self.property('UnitFileState'),
self.maybe_service_type)
def _enqueue_state(self, queue):
queue.put((self, self._build_state()))
def _service_type(self):
service_props = self.dbus_object.GetAll(DBusUnit.IFACE_SERVICE, dbus_interface=DBusUnit.IFACE_PROPS)
if 'Type' in service_props:
return service_props['Type']
else:
return None
| [
"drbild@willbild.com"
] | drbild@willbild.com |
9565c6008d359c9ef4776815146440ba81e91136 | a4f2d74559b00191454d7d3492f8d35d118332b5 | /src/atra/plot/network_air.py | 15d33b561b56963e9c5b77d2ee76eb5a2084872d | [
"MIT"
] | permissive | nfontan/argentina-transport | c4b6f06a33034ce1c3ce905f901ff5086013b38b | f1583b077844e6b20b2c81144dec0872c88bdb80 | refs/heads/master | 2023-03-18T10:23:44.580084 | 2019-08-11T22:01:34 | 2019-08-11T22:01:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | """Plot air network
"""
import os
import cartopy.crs as ccrs
import geopandas
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig
def main(config):
"""Read shapes, plot map
"""
data_path = config['paths']['data']
# data
output_file = os.path.join(config['paths']['figures'], 'network-air-map.png')
air_edge_file = os.path.join(data_path, 'network', 'air_edges.shp')
air_node_file = os.path.join(data_path, 'network', 'air_nodes.shp')
# air_usage_file = os.path.join(data_path, 'usage', 'air_passenger.csv')
# basemap
proj_lat_lon = ccrs.PlateCarree()
ax = get_axes()
plot_basemap(ax, data_path)
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, data_path, include_regions=False)
colors = {
'Air route': '#252525',
'Airport': '#d95f0e'
}
# edges
edges = geopandas.read_file(air_edge_file)
ax.add_geometries(
list(edges.geometry),
crs=proj_lat_lon,
linewidth=1.5,
edgecolor=colors['Air route'],
facecolor='none',
zorder=4
)
# edges merged with usage
# usage = pandas.read_csv(air_usage_file)
# edges_with_usage = edges.merge(usage[['id', 'passengers_2016']], on='id')
# nodes
nodes = geopandas.read_file(air_node_file)
ax.scatter(
list(nodes.geometry.x),
list(nodes.geometry.y),
transform=proj_lat_lon,
facecolor=colors['Airport'],
s=12,
zorder=5
)
# legend
legend_handles = [
mpatches.Patch(color=color, label=label)
for label, color in colors.items()
]
plt.legend(handles=legend_handles, loc='lower left')
# save
save_fig(output_file)
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG)
| [
"tomalrussell@gmail.com"
] | tomalrussell@gmail.com |
2264e15313f69e818f1bbdd697aae79e592592ad | 4273f162abb12ef1939271c2aabee9547ac6afee | /crowd/utils/config.py | 054ef5faf1c8d84308890a15230a3b194adf10e5 | [] | no_license | xiyuhao/subins_tutorials | 2717c47aac0adde099432e5dfd231606bf45a266 | acbe4fe16483397e9b0f8e240ca23bdca652b92d | refs/heads/master | 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | '''
config.py 0.0.1
Date: January 15, 2019
Last modified: June 14, 2019
Author: Subin. Gopi(subing85@gmail.com)
# Copyright(c) 2019, Subin Gopi
# All rights reserved.
# WARNING! All changes made in this file will be lost!
Description
None.
'''
def get_conig():
return 'Linux', 'maya', '2016', '2.7.5'
def get_tool_kit():
tools = {
'create': ['create', 'Create', '0.0.1'],
'publish': ['publish', 'Publish', '0.0.1']
}
return tools
| [
"subing85@gmail.com"
] | subing85@gmail.com |
6ef4885b55b2959e9db0e836280c30f7bf832629 | a2860dd0acbb7b85d30fad1be52512fa7bc4c611 | /cerebralcortex/core/file_manager/read_handler.py | f1d9eb4c750b94afc422959e5232bc2448e3825c | [
"BSD-2-Clause"
] | permissive | hippietilley/CerebralCortex-Kernel | b1783c8156744f7809c9a3810b990c45945da936 | c7dac033d9561f14bdb72430577db6ae4e3c7911 | refs/heads/master | 2020-04-18T15:15:47.199601 | 2019-01-18T16:05:14 | 2019-01-18T16:05:14 | 167,607,878 | 0 | 0 | BSD-2-Clause | 2019-01-25T20:16:54 | 2019-01-25T20:16:54 | null | UTF-8 | Python | false | false | 6,069 | py | # Copyright (c) 2018, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import gzip
import json
import traceback
from typing import List
from pympler import asizeof
from cerebralcortex.core.datatypes.datastream import DataStream, DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
class ReadHandler():
def read_file(self, filepath: str) -> str:
"""
Read a file and return contents
:param filepath:
:return: file contents
:rtype: str
"""
if not filepath:
raise ValueError("File path is required field.")
with open(filepath, "r") as file:
data = file.read()
file.close()
return data
def file_processor(self, msg: dict, zip_filepath: str) -> DataStream:
"""
Process a Kafka or MySQL msg. Parse compressed files. Convert json metadata and data in DataStream object.
:param msg:
:param zip_filepath:
:return: DataStream object with metadata and data
:rtype: DataStream
"""
if not isinstance(msg["metadata"], dict):
metadata_header = json.loads(msg["metadata"])
else:
metadata_header = msg["metadata"]
identifier = metadata_header["identifier"]
owner = metadata_header["owner"]
name = metadata_header["name"]
data_descriptor = metadata_header["data_descriptor"]
execution_context = metadata_header["execution_context"]
if "annotations" in metadata_header:
annotations = metadata_header["annotations"]
else:
annotations = {}
if "stream_type" in metadata_header:
stream_type = metadata_header["stream_type"]
else:
stream_type = StreamTypes.DATASTREAM
try:
gzip_file_content = self.get_gzip_file_contents(zip_filepath + msg["filename"])
datapoints = list(map(lambda x: self.row_to_datapoint(x), gzip_file_content.splitlines()))
# self.rename_file(zip_filepath + msg["filename"])
start_time = datapoints[0].start_time
end_time = datapoints[len(datapoints) - 1].end_time
ds = DataStream(identifier,
owner,
name,
data_descriptor,
execution_context,
annotations,
stream_type,
start_time,
end_time,
datapoints)
return ds
except Exception as e:
self.logging.log(error_message="In Kafka preprocessor - Error in processing file: " + str(
msg["filename"]) + " Owner-ID: " + owner + "Stream Name: " + name + " - " + str(traceback.format_exc()),
error_type=self.logtypes.CRITICAL)
return DataStream
def row_to_datapoint(self, row: str) -> DataPoint:
"""
Format data based on mCerebrum's current GZ-CSV format into what Cerebral
Cortex expects
:param row:
:return: single DataPoint
:rtype: DataPoint
"""
ts, offset, values = row.split(',', 2)
ts = int(ts) / 1000.0
offset = int(offset)
timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))
ts = datetime.datetime.fromtimestamp(ts, timezone)
return DataPoint(start_time=ts, sample=values)
def get_gzip_file_contents(self, filepath: str) -> str:
"""
Read and return gzip compressed file contents
:param filepath:
:return: gzip_file_content
:rtype: str
"""
fp = gzip.open(filepath)
gzip_file_content = fp.read()
fp.close()
gzip_file_content = gzip_file_content.decode('utf-8')
return gzip_file_content
def get_chunk_size(self, data: List[DataPoint]) -> int:
"""
get chunk size of DataPoint objects in 0.75 MB blocks. This method is computationally heavy and not scalable.
:param data:
:return: size of a list
:rtype: int
"""
if len(data) > 0:
chunk_size = 750000 / (asizeof.asizeof(data) / len(data)) # 0.75MB chunk size without metadata
return round(chunk_size)
else:
return 0
def chunks(data: str, max_len: int) -> str:
"""
Yields max_len sized chunks with the remainder in the last
:param data:
:param max_len:
"""
# TODO: default yield value needs to be set
for i in range(0, len(data), max_len):
yield data[i:i + max_len]
| [
"nasir.ali08@gmail.com"
] | nasir.ali08@gmail.com |
b8a0e0a648bc723ae6c7e0641be2840b0b6aa446 | a89b96bd2dc1878e36463cb39b4126b408db8ba3 | /프로그래머스/lv2/42578. 의상/의상.py | 4fc71004f1d3a6cbc3175838a5da1ff11ee74665 | [] | no_license | EmilyMinjuKim/Algorithm | fb2b9fec12e543f232d7148aa6859febd031dcb2 | 5f1a9211560d21cf034d426b58bbce896b31bc7e | refs/heads/master | 2023-06-11T09:59:29.435141 | 2023-06-03T02:44:35 | 2023-06-03T02:44:35 | 277,550,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from collections import Counter
def solution(clothes):
answer = 1
map = dict(Counter([type for name, type in clothes]))
map_list = list(map.values())
for i in map_list:
answer *= (i+1)
return answer-1 | [
"65878320+EmilyMinjuKim@users.noreply.github.com"
] | 65878320+EmilyMinjuKim@users.noreply.github.com |
996925f5530dd5af83fba2da9dd72a0012fcef11 | a0f27e45f598a5c4145efa44ae05edf431b7e06f | /seqmod/modules/ff.py | 4d6a8e6043e089cd4a0ed05da4145de8fd6bf3c5 | [] | no_license | cmry/seqmod | af4d2e6227247f5d3630a53818328cea493672f4 | ddc57cd36c6b6204263db770f4c98923ffb4ba0b | refs/heads/master | 2021-09-11T23:50:01.261133 | 2018-01-09T15:51:23 | 2018-01-09T15:51:23 | 113,448,571 | 0 | 0 | null | 2017-12-07T12:31:43 | 2017-12-07T12:31:43 | null | UTF-8 | Python | false | false | 5,677 | py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
class MLP(nn.Module):
"""
Standard MLP
"""
def __init__(self, inp_size, hid_size, nb_classes,
nb_layers=1, dropout=0.0, act='relu'):
self.inp_size, self.hid_size = inp_size, hid_size
self.nb_layers, self.nb_classes = nb_layers, nb_classes
self.dropout, self.act = dropout, act
super(MLP, self).__init__()
layers = []
for i in range(nb_layers):
layers.append(nn.Linear(inp_size, hid_size))
inp_size = hid_size
self.layers = nn.ModuleList(layers)
self.output = nn.Linear(hid_size, nb_classes)
def forward(self, inp):
"""
:param inp: torch.FloatTensor (batch_size x inp_size)
:return: torch.FloatTensor (batch_size x nb_classes)
"""
# hidden layers
for layer in self.layers:
out = layer(inp)
if self.act is not None:
out = getattr(F, self.act)(out)
if self.dropout > 0:
out = F.dropout(out, p=self.dropout, training=self.training)
inp = out
# output projection
out = self.output(out)
return out
class MaxOut(nn.Module):
def __init__(self, in_dim, out_dim, k):
"""
Implementation of MaxOut:
h_i^{maxout} = max_{j \in [1, ..., k]} x^T W_{..., i, j} + b_{i, j}
where W is in R^{D x M x K}, D is the input size, M is the output size
and K is the number of pieces to max-pool from. (i.e. i ranges over M,
j ranges over K and ... corresponds to the input dimension)
Parameters:
-----------
in_dim: int, Input dimension
out_dim: int, Output dimension
k: int, number of "pools" to max over
Returns:
--------
out: torch.Tensor (batch x k)
"""
self.in_dim, self.out_dim, self.k = in_dim, out_dim, k
super(MaxOut, self).__init__()
self.projection = nn.Linear(in_dim, k * out_dim)
def forward(self, inp):
"""
Because of the linear projection we are bound to 1-d input
(excluding batch-dim), therefore there is no need to generalize
the implementation to n-dimensional input.
"""
batch, in_dim = inp.size()
# (batch x self.k * self.out_dim) -> (batch x self.out_dim x self.k)
out = self.projection(inp).view(batch, self.out_dim, self.k)
out, _ = out.max(2)
return out
class Highway(torch.nn.Module):
"""
Reference:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/highway.py
A `Highway layer <https://arxiv.org/abs/1505.00387>`_ does a gated
combination of a linear transformation and a non-linear transformation
of its input. y = g * x + (1 - g) * f(A(x)), where A
is a linear transformation, `f` is an element-wise non-linearity,
and `g` is an element-wise gate, computed as sigmoid(B(x)).
Parameters
----------
input_dim: int, The dimensionality of `x`.
num_layers: int, optional, The number of highway layers.
activation: str or class, if string it should be an activation function
from torch.nn, otherwise it should be a class that will be instantiated
with kwargs for each layer.
dropout: float, dropout rate before the nonlinearity
"""
def __init__(self, input_dim, num_layers=1, activation='ReLU', dropout=0.0,
**kwargs):
self.input_dim = input_dim
self.dropout = dropout
super(Highway, self).__init__()
layers = []
for layer in range(num_layers):
if isinstance(activation, type): # custom activation class
nonlinear = activation(**kwargs)
else: # assume string
nonlinear = getattr(nn, activation)()
linear = nn.Linear(input_dim, input_dim * 2)
# We should bias the highway layer to just carry its input forward.
# We do that by setting the bias on B(x) to be positive, because
# that means `g` will be biased to be high, to we will carry the
# input forward. The bias on `B(x)` is the second half of the bias
# vector in each Linear layer.
linear.bias[input_dim:].data.fill_(1)
linear.bias.custom = True
layers.append(linear)
layers.append(nonlinear)
self.layers = torch.nn.ModuleList(layers)
def forward(self, inputs):
current_input = inputs
for i in range(0, len(self.layers), 2):
layer, activation = self.layers[i], self.layers[i+1]
proj, linear = layer(current_input), current_input
proj = F.dropout(proj, p=self.dropout, training=self.training)
nonlinear = activation(proj[:, 0:self.input_dim])
gate = F.sigmoid(proj[:, self.input_dim:(2 * self.input_dim)])
# apply gate
current_input = gate * linear + (1 - gate) * nonlinear
return current_input
# gracefully taken from:
# https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
class GradReverse(Function):
"Implementation of GRL from DANN (Domain Adaptation Neural Network) paper"
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
"""
GRL must be placed between the feature extractor and the domain classifier
"""
return GradReverse.apply(x)
| [
"enrique.manjavacas@gmail.com"
] | enrique.manjavacas@gmail.com |
c7da84fd41d7ba4a2e690f3ba382bc6e79adc58c | 9e6204f99bfc5c1c3b4201f74ac2bcbd317f1d54 | /Flow-free/venv/bin/pip | 38f7d146bfccaeee8c120f9123840fab66b5b02b | [] | no_license | Roshmar/Python | 643b27f7692d3bd866df5ee124fc5f74649f2f2d | 2288f9511e83e06b7376403b143314643e205f00 | refs/heads/main | 2023-06-17T07:18:44.410502 | 2021-07-20T11:20:14 | 2021-07-20T11:20:14 | 387,766,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/Users/yaroslav/Desktop/FlowFree/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"martin.roshko@student.tuke.sk"
] | martin.roshko@student.tuke.sk | |
fd75ee444727d1fd69c72d0457b9ea145dcba2b1 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225401.py | 2181904c869c47b47b88084f2f73feb9b48ff6f0 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.scv')
| [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
646cf0ca1a5bb5bdb50c0c7f66ae267edfa44b72 | 7c02c0175af45d97556f296f81fb17b561592979 | /gridBagSizer.py | 3050b2b27bf5785b4ea1bcebbb20cb9f8e4d3f2e | [] | no_license | rahulSingh2995/wxPython | 36842fc48dddec9d20eaf74d46a106c9eb7a4522 | 5fdcc872c4b4873468fdfd1608b20031cbfc2de7 | refs/heads/master | 2022-11-24T19:38:44.193559 | 2020-07-30T04:14:53 | 2020-07-30T04:14:53 | 283,667,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | #!/usr/bin/env python
"""
ZetCode wxPython tutorial
In this example we create a rename layout
with wx.GridBagSizer.
author: Jan Bodnar
website: www.zetcode.com
last modified: July 2020
"""
import wx
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title, size=(400,150))
self.InitUI()
self.Centre()
def InitUI(self):
panel = wx.Panel(self)
sizer = wx.GridBagSizer(4,4)
text = wx.StaticText(panel, label="Rename To")
sizer.Add(text, pos=(0, 0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)
tc = wx.TextCtrl(panel)
sizer.Add(tc, pos=(1, 0), span=(1, 5),
flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
buttonOk = wx.Button(panel, label="Ok", size=(90, 28))
buttonClose = wx.Button(panel, label="Close", size=(90, 28))
sizer.Add(buttonOk, pos=(3, 0), flag = wx.LEFT, border=10)
sizer.Add(buttonClose, pos=(3, 4), flag=wx.RIGHT|wx.BOTTOM, border=10)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(2)
panel.SetSizer(sizer)
def main():
app = wx.App()
ex = Example(None, title='Rename')
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| [
"kumarrah@inpunluxu915.ls.ege.ds"
] | kumarrah@inpunluxu915.ls.ege.ds |
925a4ae81321c8cdd62cc8c22f316ef03f50b3d7 | f44616f8e5c7695fcfbd19e67228274dc9d816eb | /backend/src/controllers/task_controller.py | 10216998ced96ab6d31efcf25acae59837146d74 | [] | no_license | samharre/kanban | 496fbcd15317e6142eb444d60085ca8d21c2c575 | 2b191f16eaf776b04c1bc4d846adcf5eee83ff02 | refs/heads/master | 2023-03-12T09:21:44.803066 | 2021-03-01T18:40:03 | 2021-03-01T18:40:03 | 302,793,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | from flask import Blueprint, request, jsonify, abort
from ..auth.auth import requires_auth, get_user_id
from ..models.task import Task
from ..models.phase import Phase
from ..utils.task_utils import (
reorder_tasks_same_phase,
reorder_tasks_diff_phases
)
import sys
task_bp = Blueprint('task_controller', __name__)
def list_tasks():
user_id = get_user_id()
return Task.query.filter(Task.user_id == user_id).\
order_by(Task.phase_id, Task.order).all()
@task_bp.route('/tasks', methods=['GET'])
def get_tasks():
try:
tasks = list_tasks()
return jsonify({
'success': True,
'tasks': [task.serialize() for task in tasks]
})
except Exception:
print(sys.exc_info())
abort(500)
@task_bp.route('/phases/<int:phase_id>/tasks', methods=['GET'])
def get_tasks_per_phase(phase_id):
try:
tasks = Task.query.filter(
Task.phase_id == phase_id).order_by(Task.order).all()
return jsonify({
'success': True,
'tasks': [task.serialize() for task in tasks]
})
except Exception:
print(sys.exc_info())
abort(500)
@task_bp.route('/tasks', methods=['POST'])
@requires_auth('post:tasks')
def create_task(jwt_payload):
body = request.get_json()
if not body:
abort(400)
phase_id = body.get('phase_id')
title = body.get('title')
if not (phase_id and title):
abort(422)
phase = Phase.query.get(phase_id)
if not phase:
abort(422)
query = Task.query.filter(
Task.phase_id == phase_id, Task.user_id == jwt_payload.get('sub'))
order = query.count() + 1
description = body.get('description')
priority = body.get('priority')
due_date = body.get('due_date')
try:
task = Task(
phase_id=phase_id,
title=title,
order=order,
description=description,
priority=priority,
due_date=due_date,
user_id=jwt_payload['sub']
)
task.insert()
except Exception:
task.rollback()
print(sys.exc_info())
abort(500)
return jsonify({
'success': True,
'task': task.serialize()
})
@task_bp.route('/tasks/<int:task_id>', methods=['PATCH'])
@requires_auth('patch:tasks')
def update_task(jwt_payload, task_id):
body = request.get_json()
if not body:
abort(400)
new_phase_id = body.get('phase_id')
new_title = body.get('title')
new_order = body.get('order')
if ('phase_id' in body and not new_phase_id) \
or ('title' in body and not new_title):
abort(422)
if new_phase_id and not new_order:
abort(422)
task = Task.query.get(task_id)
if not task:
abort(404)
try:
prev_phase_id = task.phase_id
prev_order = task.order
if 'phase_id' in body:
task.phase_id = new_phase_id
if 'title' in body:
task.title = new_title
if 'description' in body:
task.description = body.get('description')
if 'priority' in body:
task.priority = body.get('priority')
if 'due_date' in body:
task.due_date = body.get('due_date')
tasks = []
if (new_order and new_order != prev_order) \
or (new_phase_id and new_phase_id != prev_phase_id):
task.order = new_order
if (new_phase_id and new_phase_id != prev_phase_id):
tasks = reorder_tasks_diff_phases(
prev_phase_id=prev_phase_id,
new_phase_id=new_phase_id,
task_id=task.id,
order_prev_phase=prev_order,
order_new_phase=new_order
)
else:
tasks = reorder_tasks_same_phase(
phase_id=prev_phase_id,
task_id=task.id,
prev_order=prev_order,
new_order=new_order
)
for task_reorderd in tasks:
task.add_task_to_session(task_reorderd)
except Exception:
task.rollback()
print(sys.exc_info())
abort(500)
else:
task.update()
tasks = list_tasks()
return jsonify({
'success': True,
'task': task.serialize(),
'tasks': [task.serialize() for task in tasks]
})
@task_bp.route('/tasks/<int:task_id>', methods=['DELETE'])
@requires_auth('delete:tasks')
def delete_task(jwt_payload, task_id):
task = Task.query.get(task_id)
if not task:
abort(404)
try:
prev_order = task.order
task.delete()
tasks = reorder_tasks_same_phase(
phase_id=task.phase_id,
task_id=task.id,
prev_order=prev_order,
new_order=None
)
for task_reorderd in tasks:
task.add_task_to_session(task_reorderd)
except Exception:
task.rollback()
print(sys.exc_info())
abort(500)
return jsonify({
'success': True,
'task_deleted': task_id
})
| [
"samiaharre@gmail.com"
] | samiaharre@gmail.com |
8536e0cce05e6fee39144b2b1c6e1b5c482b510f | 8064bbf3dadc70c3aceeecd885bc69cfddf06549 | /ZeeAnalyzer/test/runElectronPlots_Skim_v1.py | 9dc1a84ee437d62c8d259a0380e2392dbbaec102 | [] | no_license | taroni/ZeeAnalyzer | 6faf7e4d9785ab9b15559d096a2b98d5e7483be7 | 44046f7095a22a9b5486a5ab0aee2dee52b430ae | refs/heads/master | 2022-01-17T20:12:06.695267 | 2017-11-24T13:51:25 | 2017-11-24T13:51:25 | 110,087,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TestElectrons")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag = GlobalTag(process.GlobalTag, '92X_upgrade2017_realistic_Candidate_forECALStudies', '')
# input
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
inputFilesData = cms.untracked.vstring(
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/030/00000/E69F63AA-EE8E-E711-8121-02163E019BAF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/008329E5-368F-E711-A1CD-02163E01A21D.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/04BB9D82-398F-E711-B74B-02163E019BDF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/407638D4-4B8F-E711-AC24-02163E01437E.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/44B91A0E-488F-E711-A372-02163E019CA5.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/5479D9DF-3C8F-E711-BCF4-02163E01A5EB.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/6496C386-518F-E711-B09E-02163E01341D.root'
)
inputFilesMC = cms.untracked.vstring(
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/00CDB4C7-5C93-E711-AF33-02163E0142CA.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/027E1441-3994-E711-BFBD-02163E01A6D8.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/02FD6F07-5D93-E711-85AC-02163E01A334.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/061B6C49-5793-E711-AF23-02163E011B7C.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0A66322F-5793-E711-9184-02163E01A2BD.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0EFBF8C4-5C93-E711-94C9-02163E012207.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/14FDD26B-7493-E711-8B21-001E67792532.root'
)
inputFiles = inputFilesMC
outputFile = "electron_ntuple.root"
process.source = cms.Source ("PoolSource", fileNames = inputFiles )
process.ntupler = cms.EDAnalyzer(
'ElectronPlots',
beamSpot = cms.InputTag('offlineBeamSpot'),
genEventInfoProduct = cms.InputTag('generator'),
electrons = cms.InputTag("gedGsfElectrons"),
genParticles = cms.InputTag("genParticles"),
vertices = cms.InputTag("offlinePrimaryVertices"),
conversions = cms.InputTag('allConversions'),
isMC = cms.bool(True)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string( outputFile )
)
process.load("DPGAnalysis/Skims/ZElectronSkim_cff")
process.p = cms.Path(process.zdiElectronSequence*process.ntupler)
| [
"Silvia.Taroni@cern.ch"
] | Silvia.Taroni@cern.ch |
e4ede140050fb8c241173693253719a2d0235799 | 799c9d7e1436232a02b213178ed0bda9d5c673e8 | /Chapter15/example2.py | b3c8ae550f592b84515a5257e78fd403bf0171f4 | [
"MIT"
] | permissive | KrisNguyen135/Advanced-Python-Programming-Second-Edition | a32578116805285983df8eac2dba584e0e77ea0d | e5d473e3efc5f6590028cb3f318e1f4aeb0aadd1 | refs/heads/main | 2023-08-14T18:14:09.087485 | 2021-09-19T17:57:03 | 2021-09-19T17:57:03 | 373,899,665 | 0 | 0 | MIT | 2021-06-04T16:23:55 | 2021-06-04T16:23:55 | null | UTF-8 | Python | false | false | 686 | py | import time
import threading
COUNT = 50000000
def countdown(n):
while n > 0:
n -= 1
###########################################################################
start = time.time()
countdown(COUNT)
print('Sequential program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
###########################################################################
thread1 = threading.Thread(target=countdown, args=(COUNT // 2,))
thread2 = threading.Thread(target=countdown, args=(COUNT // 2,))
start = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('Concurrent program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
| [
"nguyenminhquan135@gmail.com"
] | nguyenminhquan135@gmail.com |
a38e00bd15b7f69cd0501f9e2a9343c1615f935c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/83f8ec00a3cf40a78f2fd2fa2dedcd3a.py | 76cb33cce300805428c65c319f8169dd9e0ef049 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 270 | py | import re
def hey(input):
clean_input = input.strip()
if clean_input == '':
return 'Fine. Be that way!'
if clean_input.isupper():
return 'Whoa, chill out!'
if clean_input.endswith('?'):
return 'Sure.'
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
c656944c13e116c6febc4eead60f815fe2d86c6d | d198ac05f2c83146f955f148e6b290ba50025d06 | /pyshapelets/extractors/sax_shapelets.py | ecf748bdbb7bfab98cdfcdca0f947eb2cf635977 | [
"MIT"
] | permissive | tungk/pyShapelets | 3aa3827207b3915684fefeef52b287401d858bc2 | d7e91150c17bf0f5fed55dc36d0c4d2d447e80c9 | refs/heads/master | 2020-09-06T20:42:54.004418 | 2018-08-03T12:52:28 | 2018-08-03T12:52:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,065 | py | import numpy as np
import pandas as pd
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from sax_transform import transform
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import util
from collections import defaultdict, Counter
from tqdm import trange
def random_mask(sax_timeseries, mask_size=5):
"""When discretizing a continous real-valued timeseries, the problem of
false dismissals arises. This is caused by the fact that two timeseries that
differ only by a tiny epsilon can result in two different words. To
alleviate this problem, we take random subsets of each word by masking
them. Now a trade-off between false dismissals and false positives must
be considered. The higher the mask_size, the higher the probability of
false positives.
Parameters:
-----------
* sax_timeseries (3D np.array: timeseries x sax_words x word_length)
"""
random_idx = np.random.choice(
range(sax_timeseries.shape[2]),
size=sax_timeseries.shape[2] - mask_size,
replace=False
)
return sax_timeseries[:, :, random_idx]
def create_score_table(sax_timeseries, labels, iterations=10, mask_size=5):
unique_labels = list(set(labels))
score_table = np.zeros((
sax_timeseries.shape[0],
sax_timeseries.shape[1],
len(unique_labels)
))
#TODO: If stuff ever bugs out, check here first...
for it in range(iterations):
masked_timeseries = random_mask(sax_timeseries, mask_size)
hash_table = defaultdict(list)
for ts_idx in range(masked_timeseries.shape[0]):
for sax_idx in range(masked_timeseries.shape[1]):
key = tuple(list(masked_timeseries[ts_idx, sax_idx]))
hash_table[key].append((ts_idx, sax_idx))
for bucket in hash_table:
for (ts_idx1, sax_idx) in hash_table[bucket]:
unique_idx = set([x[0] for x in hash_table[bucket]])
for idx in unique_idx:
score_table[
ts_idx1,
sax_idx,
unique_labels.index(labels[idx])
] += 1
return score_table
def extract_shapelet(timeseries, labels, alphabet_size=4, sax_length=8,
nr_candidates=25, iterations=5, mask_size=3, min_len=None,
max_len=None):
# If no min_len and max_len are provided, we fill then in ourselves
if min_len is None or min_len < sax_length:
min_len = sax_length
if max_len is None:
max_len = timeseries.shape[1]
if type(timeseries) == pd.DataFrame or type(timeseries) == pd.Series:
timeseries = timeseries.values
if type(labels) == pd.DataFrame or type(labels) == pd.Series:
labels = labels.values
unique_classes = set(labels)
classes_cntr = Counter(labels)
max_gain, max_gap = 0, 0
best_shapelet, best_dist, best_L = None, None, None
for l in trange(min_len, max_len, desc='length', position=0):
# To select the candidates, all subsequences of length l from
# all time series are created using the sliding window technique,
# and we create their corresponding SAX word and keep them in SAXList
sax_words = np.zeros((
len(timeseries),
timeseries.shape[1] - l + 1,
sax_length
))
for ts_idx, ts in enumerate(timeseries):
# Extract all possible subseries, by using a sliding window
# with shift=1
subseries = []
for k in range(len(ts) - l + 1):
subseries.append(util.z_norm(ts[k:k+l]))
# Transform all the subseries and add them to the sax_words
transformed_timeseries = transform(subseries, sax_length,
alphabet_size)
sax_words[ts_idx] = transformed_timeseries
score_table = create_score_table(sax_words, labels,
iterations=iterations,
mask_size=mask_size)
max_score_table = np.ones_like(score_table)
for c in unique_classes:
max_score_table[:, :, c] = classes_cntr[c] * iterations
rev_score_table = max_score_table - score_table
power = []
for ts_idx in range(score_table.shape[0]):
for sax_idx in range(score_table.shape[1]):
min_val, max_val = float('inf'), float('-inf')
total = 0
for class_idx in range(score_table.shape[2]):
diff = score_table[ts_idx, sax_idx, class_idx] - rev_score_table[ts_idx, sax_idx, class_idx]
if diff > max_val:
max_val = diff
if diff < min_val:
min_val = diff
total += abs(diff)
v = (total-abs(max_val)-abs(min_val)) + abs(max_val-min_val)
power.append((v, (ts_idx, sax_idx)))
top_candidates = sorted(power, key=lambda x: -x[0])[:nr_candidates]
for score, (ts_idx, sax_idx) in top_candidates:
candidate = timeseries[ts_idx][sax_idx:sax_idx+l]
L = [] # The orderline, to calculate entropy
for k in range(len(timeseries)):
D = timeseries[k, :]
dist = util.sdist(candidate, D)
L.append((dist, labels[k]))
L = sorted(L, key=lambda x: x[0])
tau, updated, new_gain, new_gap = util.best_ig(L, max_gain, max_gap)
if updated:
best_shapelet = candidate
print('Found new best shapelet of length {} with gain {} and gap {}'.format(len(best_shapelet), new_gain, new_gap))
best_dist = tau
best_L = L
max_gain = new_gain
max_gap = new_gap
return best_shapelet, best_dist, best_L, max_gain, max_gap
| [
"givdwiel.vandewiele@ugent.be"
] | givdwiel.vandewiele@ugent.be |
fc81c7273b4d500b69a80fac0199e1efe9b4913d | 06a6334ae0372f4e9e0093b7a6315e3441617a18 | /Web Service Endpoint/rekog.py | edc2e85b2e1d8c9e7ed4e751e08ea1becd629ad4 | [
"MIT"
] | permissive | labs12-rxid/DS | 75c45ad8b563b4f7ce64eaed496ffe4eb36485de | dca6b42b7bd95d977321c2dedf5f407b7aa46dc7 | refs/heads/master | 2022-10-28T03:29:12.746205 | 2019-07-30T13:46:27 | 2019-07-30T13:46:27 | 183,264,870 | 1 | 0 | MIT | 2022-09-30T18:30:50 | 2019-04-24T16:15:17 | Jupyter Notebook | UTF-8 | Python | false | false | 9,588 | py | '''
Python Script to detect imprinted text on pill images using AWS Rekognition.
Source code:
https://github.com/labs12-rxid/DS/blob/master/text_detection_AWSRekognition.ipynb
print(cv2.getBuildInformation())
'''
import urllib.request
import json
import re
import boto3
import numpy as np
from dotenv import load_dotenv
import os
import cv2
load_dotenv()
key_id = os.getenv("AWS_ACCESS_KEY_ID")
secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
reg_ion = os.getenv("AWS_DEFAULT_REGION")
client = boto3.client('rekognition', region_name=reg_ion,
aws_access_key_id=key_id,
aws_secret_access_key=secret_key)
# Filter to increase image contrast
def add_contrast(image_path):
print('add_contrast: started :', image_path)
#-----Reading the image-----------------------------------------------------
img = cv2.imread(image_path)
print('add_contrast: image read :', image_path)
#-----Converting image to LAB Color model-----------------------------------
lab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
print('add_contrast: converted to LAB :', image_path)
#-----Splitting the LAB image to different channels-------------------------
l, a, b = cv2.split(lab)
print('add_contrast: LAB image split :', image_path)
#-----Applying CLAHE to L-channel-------------------------------------------
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
print('add_contrast: clahe instantiated :', image_path)
cl = clahe.apply(l)
print('add_contrast: clahe applied :', image_path)
#-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((cl,a,b))
print('add_contrast: clahe merged :', image_path)
#-----Converting image from LAB Color model to RGB model--------------------
image_contrast = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
print('add_contrast: converted to RGB :', image_path)
return image_contrast
# ____________ Text Detection Function ______________
def post_rekog_with_filter(pic_json, con_fidence=70):
# -------------Getting list of image file names -------------
imageURL_list = pic_json.get("image_locations")
# print(f'imageURL_list {imageURL_list}')
# ------------- Text from image(s) uploaded by user -------------
all_text = []
# ------------- text read from image(s) with contrast filter -------------
all_filter_text = []
ctr1 = 10000
ctr2 = 10001
for imageURL in imageURL_list:
if imageURL != "":
# ------------- Saving image URL locally -------------
ctr1 += 2
temp_img = str(ctr1) + ".jpg"
urllib.request.urlretrieve(imageURL, temp_img)
imageFile = './' + temp_img
# ------------- Detecting text from original image ------------
with open(imageFile, 'rb') as image:
# !!!!!! WRAP THIS IN A TRY / CATCH !!!!!!!!!
print('detect started', imageFile)
response = client.detect_text(Image={'Bytes': image.read()})
print('detect completed', imageFile)
# ------------- Detected Text (List of Dictionaries) -------------
textDetections = response['TextDetections']
# ------------- Parsing Through Detected Text and
# Making list of Unique Sets of Text Dectected -------------
text_found = []
for text in textDetections:
if text['Confidence'] > con_fidence:
text_found.append(text['DetectedText'])
text_set = list(set(text_found))
# ------------- Appending detected text in image to "all_text" list --------
all_text.append(text_set)
print('parsed text :', all_text)
# ------------- Detecting text from filtered image ------------
print('filtering started :', imageFile)
filtered_img = add_contrast(imageFile)
print('filtering completed :', imageFile)
# ------------- Saving image URL locally -------------
ctr2 += 2
temp_img = str(ctr2) + ".jpg"
cv2.imwrite(temp_img, filtered_img)
imageFile2 = './' + temp_img
print('contrasted image:', imageFile2)
with open(imageFile2, 'rb') as image:
# !!!!!! WRAP THIS IN A TRY / CATCH !!!!!!!!!
print('start detecting contrasted image:', imageFile2)
response2 = client.detect_text(Image={'Bytes': image.read()})
print('detect complete - contrasted image:', imageFile2)
# ------------- Detected Text (List of Dictionaries) -------------
textDetections2 = response2['TextDetections']
# ------------- Parsing Through Detected Text and
# Making list of Unique Sets of Text Dectected -------------
text_found2 = []
for text in textDetections2:
if text['Confidence'] > con_fidence:
text_found2.append(text['DetectedText'])
text_set2 = list(set(text_found2))
# ------------- Appending detected text in image to "all_text" list ------
all_filter_text.append(text_set2)
else:
continue
# ------------- Flattening 'all_text' (list of lists) into 1 list -------------
text_list = [text for sublist in all_text for text in sublist]
text_list = list(set(text_list))
text_list2 = [text for sublist in all_filter_text for text in sublist]
text_list2 = list(set(text_list2))
# ------------- Splitting any text blob that may have digits and numbers together ----
unique_list = []
for each in text_list:
num_split = re.findall(r'[A-Za-z]+|\d+', each)
unique_list.append(num_split)
unique_list2 = []
for each in text_list2:
num_split = re.findall(r'[A-Za-z]+|\d+', each)
unique_list2.append(num_split)
# ------------- Flattening again into one list with just unique values -------------
unique_list = [text for sublist in unique_list for text in sublist]
unique_list = list(set(unique_list))
unique_list2 = [text for sublist in unique_list for text in sublist]
unique_list2 = list(set(unique_list))
# ------------- Return 'final_list' -------------
final_list = set(unique_list + unique_list2)
# If 'final_list' is empty, return empty set
if len(final_list) == 0:
return {}
return final_list
def post_rekog(pic_json, con_fidence=70):
# Getting list of image file names
imageURL_list = pic_json.get("image_locations")
# text from image(s) uploaded by user
all_text = []
# Looping through image(s)
ctr1 = 10000
for imageURL in imageURL_list:
if imageURL != "":
# Saving image URL locally
ctr1 += 1
temp_img = str(ctr1) + ".jpg"
urllib.request.urlretrieve(imageURL, temp_img)
imageFile = './' + temp_img
# ------------- Detecting text from original image ------------
with open(imageFile, 'rb') as image:
# !!!!!! WRAP THIS IN A TRY / CATCH !!!!!!!!!
print('detect started', imageFile)
response = client.detect_text(Image={'Bytes': image.read()})
print('detect completed', imageFile)
# Detected Text (List of Dictionaries)
textDetections = response['TextDetections']
# Parsing Through Detected Text and
# Making list of Unique Sets of Text Dectected
text_found = []
for text in textDetections:
if text['Confidence'] > con_fidence:
text_found.append(text['DetectedText'])
text_set = list(set(text_found))
# Appending detected text in image to "all_text" list
all_text.append(text_set)
else:
continue
# Flattening 'all_text' (list of lists) into 1 list
text_list = [text for sublist in all_text for text in sublist]
text_list = list(set(text_list))
# Splitting any text blob that may have digits and numbers together
unique_list = []
for each in text_list:
num_split = re.findall(r'[A-Za-z]+|\d+', each)
unique_list.append(num_split)
# Flattening again into one list with just unique values
unique_list = [text for sublist in unique_list for text in sublist]
unique_list = list(set(unique_list))
# Return 'final_list'
final_list = set(unique_list)
# If 'final_list' is empty return empty set
if len(final_list) == 0:
return {}
return final_list
# __________ M A I N ________________________
if __name__ == '__main__':
data = {"image_locations": ["https://s3.us-east-2.amazonaws.com/firstpythonbucketac60bb97-95e1-43e5-98e6-0ca294ec9aad/adderall.jpg", ""]}
# data = {"image_locations": ["https://raw.githubusercontent.com/ed-chin-git/ed-chin-git.github.io/master/sample_pill_image.jpg", ""]}
# data = {"image_locations": ["https://s3.us-east-2.amazonaws.com/firstpythonbucketac60bb97-95e1-43e5-98e6-0ca294ec9aad/img2b.JPG",
# "https://s3.us-east-2.amazonaws.com/firstpythonbucketac60bb97-95e1-43e5-98e6-0ca294ec9aad/img2b.JPG"]}
print(post_rekog(data))
print(post_rekog_with_filter(data))
| [
"edgardochin@gmail.com"
] | edgardochin@gmail.com |
67187dc9a9c6274d8f2701640b2cb41b338c82e7 | f034a29ce5748fdd3cf21cbdd968c3464205f5a5 | /Manager/modules/purge.py | 2fa0fa0ce288dce69789cc2b2fca389de494c5e2 | [
"MIT"
] | permissive | Royal-Devendra01/Manager | b20fad5226293faec2bcf13ebfe14a4d4e6ce088 | 0cc3e585cd79f25c0af77946074c869599def128 | refs/heads/master | 2023-04-08T07:37:07.361563 | 2021-04-13T18:22:07 | 2021-04-13T18:22:07 | 339,805,559 | 0 | 1 | MIT | 2021-04-13T08:59:59 | 2021-02-17T17:40:26 | Python | UTF-8 | Python | false | false | 4,394 | py | import html
from typing import List
from telegram import Bot, Update, ParseMode
from telegram.error import BadRequest
from telegram.ext import Filters, run_async
from telegram.utils.helpers import mention_html
from Manager import dispatcher, LOGGER
from Manager.modules.disable import DisableAbleCommandHandler
from Manager.modules.helper_funcs.chat_status import user_admin, can_delete
from Manager.modules.log_channel import loggable
@run_async
@user_admin
@loggable
def purge(bot: Bot, update: Update, args: List[str]) -> str:
msg = update.effective_message
user = update.effective_user
chat = update.effective_chat
if can_delete(chat, bot.id):
if msg.reply_to_message:
message_id = msg.reply_to_message.message_id
start_message_id = message_id - 1
delete_to = msg.message_id - 1
if args and args[0].isdigit():
new_del = message_id + int(args[0])
# No point deleting messages which haven't been written yet.
if new_del < delete_to:
delete_to = new_del
else:
if args and args[0].isdigit():
messages_to_delete = int(args[0])
if messages_to_delete < 1:
msg.reply_text("Can't purge less than 1 message.")
return ""
delete_to = msg.message_id - 1
start_message_id = delete_to - messages_to_delete
for m_id in range(delete_to, start_message_id, -1): # Reverse iteration over message ids
try:
bot.deleteMessage(chat.id, m_id)
except BadRequest as err:
if err.message == "Message can't be deleted":
bot.send_message(chat.id, "Cannot delete all messages. The messages may be too old, I might "
"not have delete rights, or this might not be a supergroup.")
elif err.message != "Message to delete not found":
LOGGER.exception("Error while purging chat messages.")
try:
msg.delete()
except BadRequest as err:
if err.message == "Message can't be deleted":
bot.send_message(chat.id, "Cannot delete all messages. The messages may be too old, I might "
"not have delete rights, or this might not be a supergroup.")
elif err.message != "Message to delete not found":
LOGGER.exception("Error while purging chat messages.")
bot.send_message(chat.id, f"Purge <code>{delete_to - start_message_id}</code> messages.",
parse_mode=ParseMode.HTML)
return (f"<b>{html.escape(chat.title)}:</b>\n"
f"#PURGE\n"
f"<b>Admin:</b> {mention_html(user.id, user.first_name)}\n"
f"Purged <code>{delete_to - start_message_id}</code> messages.")
return ""
@run_async
@user_admin
@loggable
def del_message(bot: Bot, update: Update) -> str:
if update.effective_message.reply_to_message:
user = update.effective_user
chat = update.effective_chat
if can_delete(chat, bot.id):
update.effective_message.reply_to_message.delete()
update.effective_message.delete()
return (f"<b>{html.escape(chat.title)}:</b>\n"
f"#DEL\n"
f"<b>Admin:</b> {mention_html(user.id, user.first_name)}\n"
f"Message deleted.")
else:
update.effective_message.reply_text("Whadya want to delete?")
return ""
__help__ = """
*Admins only:*
• `/del`*:* deletes the message you replied to
• `/purge`*:* deletes all messages between this and the replied to message.
• `/purge <integer X>`*:* deletes the replied message, and X messages following it if replied to a message.
• `/purge <integer X>`*:* deletes the number of messages starting from bottom. (Counts manaully deleted messages too)
"""
DELETE_HANDLER = DisableAbleCommandHandler("del", del_message, filters=Filters.group)
PURGE_HANDLER = DisableAbleCommandHandler("purge", purge, filters=Filters.group, pass_args=True)
dispatcher.add_handler(DELETE_HANDLER)
dispatcher.add_handler(PURGE_HANDLER)
__mod_name__ = "Purges"
__command_list__ = ["del", "purge"]
__handlers__ = [DELETE_HANDLER, PURGE_HANDLER]
| [
"noreply@github.com"
] | Royal-Devendra01.noreply@github.com |
def536434c3705369d9088015894352a6b0c3bfa | 85c856de997b16103d51731b3d528f7d0dd265bf | /PythonOOP/FromTeacher/Lesson3/3.3/script.py | e989fd2d25797e1dc8e2a8bb15f41a19db1ba769 | [] | no_license | NyarukouSAMA/py_geekbrains | ff3a5589b85273af9b398098ec30ce95eb452d58 | 7a216719e1cefc1ffbbb1c2bf65f1f185b43ca8f | refs/heads/master | 2021-07-04T11:19:21.850154 | 2020-12-27T22:36:29 | 2020-12-27T22:36:29 | 210,034,160 | 0 | 0 | null | 2019-10-05T06:33:36 | 2019-09-21T18:24:26 | Python | UTF-8 | Python | false | false | 70 | py | import requests
print( requests.get("http://127.0.0.1:5000/").text )
| [
"afro.detray@gmail.com"
] | afro.detray@gmail.com |
cb4d6b0eaee0bc9f16f7367045c1709809b0a754 | 45301dd6851f09c02df698e8d74f7504b6e789e9 | /datam/settings.py | 69d378c0796aebfb5814299a4e4a6541ad07b196 | [] | no_license | bambeero1/datam-csv | 3e8ba6e611f50f06307420396afc947572276270 | ea43c92d19b9f429e34d8ebf03ddc4c512972126 | refs/heads/master | 2020-07-06T00:36:16.588865 | 2019-12-24T04:44:08 | 2019-12-24T04:44:08 | 202,832,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | # -*- coding: utf-8 -*-
# Scrapy settings for haaj project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'datam'
SPIDER_MODULES = ['datam.spiders']
NEWSPIDER_MODULE = 'datam.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'haaj (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'haaj.middlewares.HaajSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'haaj.middlewares.HaajDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'haaj.pipelines.HaajPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"noreply@github.com"
] | bambeero1.noreply@github.com |
bcee99a9a701fa5486e9c1baba62c7e8182cc60d | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/worksheet/test_date_time_01.py | 50180b311c94a156c9af3597a1e11e5fb953c101 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 6,888 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from datetime import datetime
from ...worksheet import Worksheet
class TestConvertDateTime(unittest.TestCase):
"""
Test the Worksheet _convert_date_time() method against dates extracted
from Excel.
"""
def setUp(self):
self.worksheet = Worksheet()
def test_convert_date_time(self):
"""Test the _convert_date_time() method."""
# Dates and corresponding numbers from an Excel file.
excel_dates = [
("1899-12-31T00:00:00.000", 0),
("1982-08-25T00:15:20.213", 30188.010650613425),
("2065-04-19T00:16:48.290", 60376.011670023145),
("2147-12-15T00:55:25.446", 90565.038488958337),
("2230-08-10T01:02:46.891", 120753.04359827546),
("2313-04-06T01:04:15.597", 150942.04462496529),
("2395-11-30T01:09:40.889", 181130.04838991899),
("2478-07-25T01:11:32.560", 211318.04968240741),
("2561-03-21T01:30:19.169", 241507.06272186342),
("2643-11-15T01:48:25.580", 271695.07529606484),
("2726-07-12T02:03:31.919", 301884.08578609955),
("2809-03-06T02:11:11.986", 332072.09111094906),
("2891-10-31T02:24:37.095", 362261.10042934027),
("2974-06-26T02:35:07.220", 392449.10772245371),
("3057-02-19T02:45:12.109", 422637.1147234838),
("3139-10-17T03:06:39.990", 452826.12962951389),
("3222-06-11T03:08:08.251", 483014.13065105322),
("3305-02-05T03:19:12.576", 513203.13834),
("3387-10-01T03:29:42.574", 543391.14563164348),
("3470-05-27T03:37:30.813", 573579.15105107636),
("3553-01-21T04:14:38.231", 603768.17683137732),
("3635-09-16T04:16:28.559", 633956.17810832174),
("3718-05-13T04:17:58.222", 664145.17914608796),
("3801-01-06T04:21:41.794", 694333.18173372687),
("3883-09-02T04:56:35.792", 724522.20596981479),
("3966-04-28T05:25:14.885", 754710.2258667245),
("4048-12-21T05:26:05.724", 784898.22645513888),
("4131-08-18T05:46:44.068", 815087.24078782403),
("4214-04-13T05:48:01.141", 845275.24167987274),
("4296-12-07T05:53:52.315", 875464.24574438657),
("4379-08-03T06:14:48.580", 905652.26028449077),
("4462-03-28T06:46:15.738", 935840.28212659725),
("4544-11-22T07:31:20.407", 966029.31343063654),
("4627-07-19T07:58:33.754", 996217.33233511576),
("4710-03-15T08:07:43.130", 1026406.3386936343),
("4792-11-07T08:29:11.091", 1056594.3536005903),
("4875-07-04T09:08:15.328", 1086783.3807329629),
("4958-02-27T09:30:41.781", 1116971.3963169097),
("5040-10-23T09:34:04.462", 1147159.3986627546),
("5123-06-20T09:37:23.945", 1177348.4009715857),
("5206-02-12T09:37:56.655", 1207536.4013501736),
("5288-10-08T09:45:12.230", 1237725.406391551),
("5371-06-04T09:54:14.782", 1267913.412671088),
("5454-01-28T09:54:22.108", 1298101.4127558796),
("5536-09-24T10:01:36.151", 1328290.4177795255),
("5619-05-20T12:09:48.602", 1358478.5068125231),
("5702-01-14T12:34:08.549", 1388667.5237100578),
("5784-09-08T12:56:06.495", 1418855.5389640625),
("5867-05-06T12:58:58.217", 1449044.5409515856),
("5949-12-30T12:59:54.263", 1479232.5416002662),
("6032-08-24T13:34:41.331", 1509420.5657561459),
("6115-04-21T13:58:28.601", 1539609.5822754744),
("6197-12-14T14:02:16.899", 1569797.5849178126),
("6280-08-10T14:36:17.444", 1599986.6085352316),
("6363-04-06T14:37:57.451", 1630174.60969272),
("6445-11-30T14:57:42.757", 1660363.6234115392),
("6528-07-26T15:10:48.307", 1690551.6325035533),
("6611-03-22T15:14:39.890", 1720739.635183912),
("6693-11-15T15:19:47.988", 1750928.6387498612),
("6776-07-11T16:04:24.344", 1781116.6697262037),
("6859-03-07T16:22:23.952", 1811305.6822216667),
("6941-10-31T16:29:55.999", 1841493.6874536921),
("7024-06-26T16:58:20.259", 1871681.7071789235),
("7107-02-21T17:04:02.415", 1901870.7111390624),
("7189-10-16T17:18:29.630", 1932058.7211762732),
("7272-06-11T17:47:21.323", 1962247.7412190163),
("7355-02-05T17:53:29.866", 1992435.7454845603),
("7437-10-02T17:53:41.076", 2022624.7456143056),
("7520-05-28T17:55:06.044", 2052812.7465977315),
("7603-01-21T18:14:49.151", 2083000.7602910995),
("7685-09-16T18:17:45.738", 2113189.7623349307),
("7768-05-12T18:29:59.700", 2143377.7708298611),
("7851-01-07T18:33:21.233", 2173566.773162419),
("7933-09-02T19:14:24.673", 2203754.8016744559),
("8016-04-27T19:17:12.816", 2233942.8036205554),
("8098-12-22T19:23:36.418", 2264131.8080603937),
("8181-08-17T19:46:25.908", 2294319.8239109721),
("8264-04-13T20:07:47.314", 2324508.8387420601),
("8346-12-08T20:31:37.603", 2354696.855296331),
("8429-08-03T20:39:57.770", 2384885.8610853008),
("8512-03-29T20:50:17.067", 2415073.8682530904),
("8594-11-22T21:02:57.827", 2445261.8770581828),
("8677-07-19T21:23:05.519", 2475450.8910360998),
("8760-03-14T21:34:49.572", 2505638.8991848612),
("8842-11-08T21:39:05.944", 2535827.9021521294),
("8925-07-04T21:39:18.426", 2566015.9022965971),
("9008-02-28T21:46:07.769", 2596203.9070343636),
("9090-10-24T21:57:55.662", 2626392.9152275696),
("9173-06-19T22:19:11.732", 2656580.9299968979),
("9256-02-13T22:23:51.376", 2686769.9332335186),
("9338-10-09T22:27:58.771", 2716957.9360968866),
("9421-06-05T22:43:30.392", 2747146.9468795368),
("9504-01-30T22:48:25.834", 2777334.9502990046),
("9586-09-24T22:53:51.727", 2807522.9540709145),
("9669-05-20T23:12:56.536", 2837711.9673210187),
("9752-01-14T23:15:54.109", 2867899.9693762613),
("9834-09-10T23:17:12.632", 2898088.9702850925),
("9999-12-31T23:59:59.000", 2958465.999988426),
]
for excel_date in excel_dates:
date = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
got = self.worksheet._convert_date_time(date)
exp = excel_date[1]
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
dbf588d3937c3c8c4e0f4d1cae686ccc1d6ba96b | ae6f6096fc0e3484a2a2d7fbc472f6bcf5f6449d | /algorithm/sort/SConscript | 290525036473870c0bbe33739509ccf360c922bb | [] | no_license | 0xuye0/XYLearn | be934b746eae04b1a612010b58322db9ad54f61b | 4cf183a9fe707bb281b020b22ccc077b91d7d34c | refs/heads/master | 2021-05-28T23:28:13.098526 | 2015-09-14T13:58:57 | 2015-09-14T13:58:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | import glob
Import('env')
cpp_files = glob.glob('*.cpp')
env.StaticLibrary('sort.a', cpp_files)
| [
"xuye@xuye-HP-Pavilion-dv3-Notebook-PC.(none)"
] | xuye@xuye-HP-Pavilion-dv3-Notebook-PC.(none) | |
4166c4f30135a6518167290572a9335ba71deec7 | 2e9c7dc6e8832ec9d226e86fefed1f4274d3e8c7 | /huang/myproject/myproject/urls.py | 6fdf601637dbfd15f9e3fc059132c0f1b4e6aa91 | [] | no_license | huangyuanyuan1011/my_blog | bc6d0f2a40f7058e5f05e93d1efaa7fb024617a5 | f7cec4ca1dcedc6008eeb25a867028a345f6be5d | refs/heads/master | 2021-05-04T16:44:58.558359 | 2018-02-05T06:48:07 | 2018-02-05T06:48:07 | 120,258,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from myboke import views
from . import settings
urlpatterns = [
path('admin/', admin.site.urls),
# path('base/', views.base),
path('register/', views.register),
path('info/', views.info),
path('login/', views.login),
path('create/', views.create),
path('logout/', views.logout),
path('index/', views.index),
path('article/', views.article),
path('comment/', views.comment),
path('editor/', views.editor),
path('search/', views.search),
path('delete/', views.delete),
path('change_icon/',views.change_icon)
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"15270102382@163.com"
] | 15270102382@163.com |
f5053ad5d39fa304ee1eb66dc931a17b20e9e1b5 | a88479c5387402275c3a9c73cf8ac60958063ed4 | /trunk/images/icon_edit.py | 4798f55f9e757c9c072f1d8698ae4f8be5a09e21 | [] | no_license | BackupTheBerlios/guitarportfolio-svn | acf2e24f2e6a3a54961ac4d853d10f49b6da9658 | c9f719c41feaaa20baafd77d7e2711e1b317b3dd | refs/heads/master | 2021-01-10T19:26:10.559122 | 2007-07-16T10:13:56 | 2007-07-16T10:13:56 | 40,669,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #----------------------------------------------------------------------
# This file was generated by make_images.py
#
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO, zlib
def getData():
return zlib.decompress(
'x\xda\x01\xe1\x01\x1e\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\
\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\
\x08\x08\x08\x08|\x08d\x88\x00\x00\x01\x98IDAT8\x8d\xa5\xd2\xbdK[Q\x18\x80\
\xf1\xe7\x9c\\\xee\xd5`T\xa8\x8d\x9aBM\x16\x15D\x10.\x88\x95"(8\xa8\xe0R(\
\xed\xd4\xc5Y\x1c\x04\x1d\xfc\x07\xeaT\xa5\xa58\t.\xe2\xa0\x88 \xa2v\x10\x94\
(\x88\x11\xfc\x18\xb4V+\xe4j\xabA\xa3i\xfd\x8a\xb9\xa7K{\xa1\xc4\xe8\x05\xcf\
\xfc\xfe\x1e\xdes8B)\xc5c\x8e\xe6v\xd0\x9a\xad\xca\xbe\x8c\x07\xf7O\x7f\x8a\
\'v\xca\x96^\xdf\xfe\xf7\x1cC\xafu\x1d\xf8}\x12\xbaP\xa9\x00\x95\xad\xef\x10\
R\xb26\xd1\x15\x8c[\xa7\x9f\x84\x9b+X\xb3U*?\xd0\x8d\xe6{\x8e\x90\x06\x00\
\xf6\xd5\x19k\x93\xfd\xb6t\x83\x0b\xcc\x8fh\xb9\x02ke\x98dl\x8bdl\x0b\x00\
\xe9\xd1n\xee\xdd\xc0\xc1F\x9ckk\x12\xfb\xc2\xe0[\xe4\x08_n\x80\xd8\xc1\x0e\
\xd5\xed\xe3"\xe3\x06\xff\xe1\x1f\xa3\xa8\xd4\x0eFI\t\xfeB\x8d\xdb\xe8\x12\
\xd5\xed\xe3\x02\xe0\xce@\x1a\xbe\x89\xa2\x175\x13\x0b\xaf\x92\xf8\xbaKi\xc7\
\x82\xf87\x9b\x16\x98\xeb+w\x8d\xd3\x023\xbde\xca|\xfd\xd95\x06\x10J)\x06\
\xdaB\xc1P\xa9\xbeW\xde\xd0\xcb\xe1\xee\x14\x95\xe6\xb5+\x0c\x7f\x7fb\xd2k\
\xef\xbdx3D\x96\x9e\xcd\xc6\xc4\n\x91x\x8a\xe2\x8a\x97$\xc2_\xb8:<\xa2\xa6g\
\xf9N\xec\x04\x00\x0c\xe9\xe1|\xfe\x03\xe6\xb3\x1c\xb6#\x9bD\x16G\xf0\xfa\
\xfd\xb4\xbc_\xcf\x88\x9d@x\xf3\x17\xf5\xd3\xfd\x84\xcc&.\xf1\xa0\x12\x83x\
\x8f\xa3\x0fb\xe7\r\xde\xd6\x17(]J\xea\xca\xf2)\xf2\x19\xe8O\xf3\xc6\x1a;\
\x17^=\x84\x9d\xc0c\xce\x1f\xfa=\xcc\x11\xc7\xcf\r\x10\x00\x00\x00\x00IEND\
\xaeB`\x82\xc7.\xdf2' )
def getBitmap():
return BitmapFromImage(getImage())
def getImage():
stream = cStringIO.StringIO(getData())
return ImageFromStream(stream)
| [
"jorg@9eb3242f-7b31-0410-b1b8-c03ae08380dd"
] | jorg@9eb3242f-7b31-0410-b1b8-c03ae08380dd |
ea8762af10e7c402026821e816a600291abf5210 | 47e3ae4a2037f72c98dfabdc00eef0585da4fe6b | /news/migrations/0004_auto_20190530_1541.py | 6f4ba4bed0bc476d3e2a3bd09ce17b26f7ec3add | [] | no_license | DHUer/Software_backend | 10e7e100e78df2275101d213da87d1a9e27344a5 | 778acde229081c2639cd270f278a7dd74689b05c | refs/heads/master | 2020-05-22T11:18:05.985619 | 2019-06-15T08:09:27 | 2019-06-15T08:09:27 | 186,320,459 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | # Generated by Django 2.2.1 on 2019-05-30 07:41
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20190523_1624'),
]
operations = [
migrations.AlterField(
model_name='article',
name='date',
field=models.DateField(default=datetime.datetime(2019, 5, 30, 7, 41, 10, 194214, tzinfo=utc)),
),
migrations.AlterField(
model_name='collectarticle',
name='date',
field=models.DateField(default=datetime.datetime(2019, 5, 30, 7, 41, 10, 197214, tzinfo=utc)),
),
migrations.AlterField(
model_name='vocabulary',
name='date',
field=models.DateField(default=datetime.datetime(2019, 5, 30, 7, 41, 10, 195214, tzinfo=utc)),
),
migrations.CreateModel(
name='testHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cover_rate', models.CharField(max_length=500)),
('date', models.DateField(default=datetime.datetime(2019, 5, 30, 7, 41, 10, 198211, tzinfo=utc))),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.user')),
],
),
]
| [
"751925352@qq.com"
] | 751925352@qq.com |
3ce5a14ec1f89e24ee3ad1bc35f7a61bd00ee311 | a1e8d862e11be1f8aacb530bff2498f359a2be86 | /app/common/queue_managemet.py | ab42958bbb4c6e752a79dd3588913fa8749233d7 | [] | no_license | vignesh-madanan/AIforimagev2 | 475be0bd324a728290a5bc0a726eb9c043227628 | 0b7eee6711b6b13076e7f4d6d54c628b2e15abd5 | refs/heads/master | 2022-11-16T09:08:49.904985 | 2020-06-28T08:18:52 | 2020-06-28T08:18:52 | 190,850,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | import sys
import zmq
import json
import os
import logging
import traceback
from multiprocessing import Process
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ProxyManager:
def __init__(self):
self.SENDER_PORT = 3344
self.RECEIVER_PORT = 3345
self.SENDER_PROXY = f"tcp://*:{self.SENDER_PORT}"
self.RECEIVER_PROXY = f"tcp://*:{self.RECEIVER_PORT}"
self.__number_of_processes__ = 1
def main(self):
try:
context = zmq.Context(1)
# Socket facing clients
sender = context.socket(zmq.XREP)
sender.bind(self.SENDER_PROXY)
# Socket facing services
receiver = context.socket(zmq.XREQ)
receiver.bind(self.RECEIVER_PROXY)
print(f'Starting Proxy | Sender:{self.SENDER_PROXY} | Receiver{self.SENDER_PROXY}')
zmq.device(zmq.QUEUE, sender, receiver)
except Exception as e:
print(e)
print("bringing down zmq device")
traceback.print_exc()
finally:
sender.close()
receiver.close()
context.term()
def start_proxy_server(self):
self.main()
#p = Process(target=self.main)
#p.start()
#p.join()
class Sender(ProxyManager):
def __init__(self):
self.client_id = 1
self.context = zmq.Context()
super().__init__()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:%s" % self.SENDER_PORT)
def send_message(self, message):
if isinstance(message, dict):
mm = {f'self.client_id':message}
self.socket.send_json(mm)
message = self.socket.recv()
return True
raise TypeError('Message should be of type Dict')
class Receiver(ProxyManager):
def __init__(self):
self.server_id = 2
super().__init__()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.connect("tcp://localhost:%s" % self.SENDER_PORT)
def receive_message(self):
message = self.socket.recv()
message = json.dumps(message)
print("Received Message", message)
self.socket.send({self.server_id:True})
return message
if __name__ == "__main__":
if sys.argv[1] == 'proxy':
ProxyManager().start_proxy_server()
if sys.argv[1] == 'sender':
sender = Sender()
sender.send_message({'test':'TEST'})
if sys.argv[1] == 'receiver':
receiver = Receiver()
receiver.receive_message() | [
"vigneshmadanan@bitbucket.org"
] | vigneshmadanan@bitbucket.org |
aee51eaa1c337062e93dd75f47cb2d5b50be65af | bf8fbff494eaaf8046a41d676d553d4e8144ddeb | /tabServers.py | 9b5de732410be373d834a6d261328ab171f44631 | [
"MIT"
] | permissive | TonyWhitley/rFactory | 685755a2a34f3ac6d8e7f4f0c4cf06eb9cc0d000 | b556a7746fbe50e231e615136a0c8d3a491d50cf | refs/heads/master | 2023-01-05T21:25:42.490043 | 2022-03-09T16:02:22 | 2022-03-09T16:02:22 | 145,017,219 | 2 | 1 | MIT | 2022-12-27T15:37:49 | 2018-08-16T17:03:38 | Python | UTF-8 | Python | false | false | 11,480 | py | # Python 3
from multiprocessing.dummy import Pool as ThreadPool
import os
import sys
import tkinter as tk
from tkinter import ttk
from lib.MC_table import Multicolumn_Listbox
from data.rFactoryConfig import config_tabServer, serverTags
##from data import getAllServerData, getSingleServerData
import edit.serverFavourites as serverFavourites
rF2_serverNotify_path = r'..\rF2_serverNotify\steps'
if os.path.exists(rF2_serverNotify_path):
sys.path.append(rF2_serverNotify_path)
import rF2_serverNotify # pylint: disable=import-error # NOQA: E402 don't move import
NOFILTER = '---' # String for not filtering
dummyData = {
'server 1': {
'Favourite': 'N',
'Server Name': 'server 1',
'Track Name': 'Not yet implemented',
'Humans': '3',
'Maybe': '0',
'AI': '0',
'Max': '20',
'Password': 'Y',
'Version': '11112',
'blank': ''},
'server 2': {
'Favourite': 'N',
'Server Name': 'server 2',
'Track Name': 'Not yet implemented',
'Humans': '3',
'Maybe': '0',
'AI': '0',
'Max': '20',
'Password': '',
'Version': '11112',
'blank': ''},
'server 3': {
'Favourite': 'N',
'Server Name': 'Need to show',
'Track Name': 'Silverstone',
'Humans': '0',
'Maybe': '0',
'AI': '5',
'Max': '20',
'Password': 'N',
'Version': '11112',
'blank': ''},
'server 4': {
'Favourite': 'N',
'Server Name': 'session too',
'Track Name': 'Oulton Park',
'Humans': '0',
'Maybe': '5',
'AI': '0',
'Max': '20',
'Password': 'Y',
'Version': '11112',
'blank': ''}}
dummyFavourites = {
'server 1': 'password',
'server 3': '',
'server 4': 'XYZZY',
}
class ServerQuery:
def __init__(self):
self.serverObj = rF2_serverNotify.readServersFile()
self.newNames = []
self.serverData = {}
servers = self.serverObj.getServerNames()
# Multi-thread querying all servers to speed things up
# make the Pool of workers
pool = ThreadPool(len(servers) // 10)
# read the servers in their own threads
# and return the results
results = pool.map(self.getServerData, servers)
# close the pool and wait for the work to finish
pool.close()
pool.join()
def getServerData(self, server):
status, humans, AI, probables, info = self.serverObj.getPlayerCounts(
server)
if status == 'OK':
_entry = {}
_entry['Favourite'] = 'N'
_entry['Server Name'] = server
_entry['Track Name'] = info['map']
_entry['Humans'] = str(humans)
_entry['Maybe'] = str(probables)
_entry['AI'] = str(AI)
_entry['Max'] = str(info['max_players'])
_entry['Password'] = str(info['password_protected'])
_entry['Version'] = info['version']
_entry['blank'] = ''
if _entry['Server Name'] in dummyFavourites:
_entry['_serverData'] = 'Y'
# _entry['Password'] = dummyFavourites[v['Server Name']]
self.serverData[server] = _entry
def getData(self):
return self.serverData
def getSingleServerData(ident, tags):
pass
#########################
# The tab's public class:
#########################
class Tab:
def __init__(self, parentFrame):
""" Put this into the parent frame """
self.parentFrame = parentFrame
self.settings = None # PyLint
self.activated = False
# callback when this tab is selected
parentFrame.bind("<Visibility>", self.activate)
self.mc = Multicolumn_Listbox(
parentFrame,
config_tabServer['serverColumns'],
striped_rows=(
"white",
"#f2f2f2"),
command=self.__on_select,
right_click_command=self.__on_right_click,
adjust_heading_to_content=True,
height=30,
cell_anchor="center")
_label = tk.Label(parentFrame, text='Getting server info...')
_label.grid(column=0, row=0, sticky='w')
parentFrame.update()
def activate(self, event):
"""
Don't actually fetch the data from servers until this tab is selected.
"""
if self.activated:
return # Already activated
# Create a temporary frame with a progress message
_info = ttk.Frame(
self.parentFrame,
width=120,
height=120,
relief='sunken',
borderwidth=5)
_info.grid()
_label = tk.Label(_info, text='Getting server info...')
_label.grid(column=0, row=0, sticky='e')
#tk.Frame(self.parentFrame, 'Getting server info...')
o_serverData = self.__ServerData()
serverData = o_serverData.fetchData()
print('%d servers found' % len(serverData))
# calculate the column widths to fit the headings and the data
colWidths = []
for col in config_tabServer['serverColumns']:
colWidths.append(len(col))
for __, row in serverData.items():
for col, column in enumerate(row):
if len(row[column]) > colWidths[col]:
colWidths[col] = len(row[column])
for col, column in enumerate(row):
self.mc.configure_column(col, width=colWidths[col] * 7 + 6)
self.mc.configure_column(
len(config_tabServer['serverColumns']) - 1, width=0, minwidth=0)
# Justify the data in column 2 and 3
self.mc.configure_column(1, anchor='w')
self.mc.configure_column(2, anchor='w')
self.mc.interior.grid(
column=0, row=1, pady=2, columnspan=len(
config_tabServer['serverColumns']))
o_filter = self.__Filter(
self.parentFrame,
config_tabServer['serverColumns'],
colWidths,
o_serverData,
self.mc)
for _filter in config_tabServer['serverFilters']:
o_filter.makeFilter(_filter, serverData)
# Initial dummy filter to load data into table
o_filter.filterUpdate(None)
self.activated = True
self.mc.select_row(0)
# Kill the temporary frame with a progress message
_info.destroy()
def getSettings(self):
""" Return the settings for this tab """
settings = {}
return settings
# return self.settings # filters too? Probably not
def setSettings(self, settings):
""" Set the settings for this tab """
if self.activated:
serverID = settings[-1]
i = 2 # the row for serverID
self.mc.deselect_all() # clear what is selected.
self.mc.select_row(i)
def __on_select(self, data):
self.settings = data
print('DEBUG')
print("called command when row is selected")
print(data)
print("\n")
def __on_right_click(self, data):
# don't change data self.settings = data
print('DEBUG')
print("called command when row is right clicked")
print(data)
print("\n")
top = tk.Toplevel(self.parentFrame)
top.title("Server editor")
fields = serverTags
##data = getSingleServerData(ident=data[-1], tags=fields)
o_tab = serverFavourites.Editor(top, 'Server 2', 1, 'password')
# Need to init the Tab again to get fresh data.
class __ServerData:
""" Fetch and filter the server data """
def __init__(self):
self.data = None # Pylint
self.filteredData = None
def fetchData(self):
""" Fetch the raw data from wherever """
if 0:
self.data = ServerQuery().getData()
else: # dev. shortcut - fake server data
self.data = dummyData
# getAllServerData(tags=config_tabServer['serverColumns'], maxWidth=20)
return self.data
def filterData(self, filters):
"""
Filter items of the data dict that match all of the filter combobox selections.
filters is a list of column name, comboBox text() function pairs """
_data = []
for _item, _values in self.data.items():
_data.append(_values.items())
self.filteredData = []
for __, _row in self.data.items():
_match = True
for _filter in filters:
if _row[_filter[0]] != _filter[1](
) and _filter[1]() != NOFILTER:
_match = False
continue
if _match:
_r = []
for colName in config_tabServer['serverColumns']:
_r.append(_row[colName])
self.filteredData.append(_r)
return self.filteredData
def setSelection(self, settings):
""" Match settings to self.data, set table selection to that row """
# tbd
pass
class __Filter:
""" Filter combobox in frame """
def __init__(self, mainWindow, columns, colWidths, o_serverData, mc):
self.columns = columns
self.colWidths = colWidths
self.mainWindow = mainWindow
self.o_serverData = o_serverData
self.mc = mc
self.filters = []
def makeFilter(self, filterName, serverData):
tkFilterText = tk.LabelFrame(self.mainWindow, text=filterName)
_col = self.columns.index(filterName)
tkFilterText.grid(column=_col, row=0, pady=0)
s = set()
for __, item in serverData.items():
s.add(item[filterName])
vals = [NOFILTER] + sorted(list(s))
#modderFilter = tk.StringVar()
tkComboFilter = ttk.Combobox(
tkFilterText,
# textvariable=modderFilter,
# height=len(vals),
height=10,
width=self.colWidths[_col])
tkComboFilter['values'] = vals
tkComboFilter.grid(column=1, row=0, pady=5)
tkComboFilter.current(0)
tkComboFilter.bind("<<ComboboxSelected>>", self.filterUpdate)
self.filters.append([filterName, tkComboFilter.get])
def filterUpdate(self, event):
""" Callback function when combobox changes """
serverData = self.o_serverData.filterData(self.filters)
self.mc.table_data = serverData
self.mc.select_row(0)
def resetFilters(self):
""" Reset all the filters to --- """
# tbd
# self.mc.select_row(0)
def setFilters(self, settings):
""" Set all the filters to settings """
# tbd
self.mc.select_row(0)
if __name__ == '__main__':
# To run this tab by itself for development
root = tk.Tk()
tabServers = ttk.Frame(
root,
width=1200,
height=1200,
relief='sunken',
borderwidth=5)
tabServers.grid()
o_tab = Tab(tabServers)
o_tab.activate(None)
root.mainloop()
| [
"Tony_Whitley@hotmail.com"
] | Tony_Whitley@hotmail.com |
0ca9f879ee04e0deca2c7d730207537a558a3d0c | 230378e3cb26ddf26bc172d3d041cf72be6a8cb3 | /challenge7/challenge7.py | 79d50a819d2d6ea0ca45fa99ce3ad9186c7b6adc | [] | no_license | zeeshanny/pyBiteChallenges | 9e09a5fde383df0201b27b00619d75874fc9ecfe | abfd8c6614132503c860fa0693810314b81b6e28 | refs/heads/master | 2020-03-10T11:02:41.993409 | 2018-05-09T02:38:29 | 2018-05-09T02:38:29 | 129,346,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | """
Given the provided cars dictionary:
Get all Jeeps
Get the first car of every manufacturer.
Get all vehicles containing the string Trail in their names
(should work for other grep too)
Sort the models (values) alphabetically
"""
import operator
cars = {
'Ford': ['Falcon', 'Focus', 'Festiva', 'Fairlane'],
'Holden': ['Commodore', 'Captiva', 'Barina', 'Trailblazer'],
'Nissan': ['Maxima', 'Pulsar', '350Z', 'Navara'],
'Honda': ['Civic', 'Accord', 'Odyssey', 'Jazz'],
'Jeep': ['Grand Cherokee', 'Cherokee', 'Trailhawk', 'Trackhawk']
}
def get_all_jeeps():
"""return a comma separated string of jeep models (original order)"""
return ", ".join(cars['Jeep'])
def get_first_model_each_manufacturer():
"""return a list of matching models (original ordering)"""
list = [car[0] for car in cars.values()]
return list
def get_all_matching_models(grep='trail'):
"""return a list of all models containing the case insensitive
'grep' string which defaults to 'trail' for this exercise,
sort the resulting sequence alphabetically"""
grep = grep.lower()
models = sum(cars.values(), []) # flatten list of lists
matching_models = [model for model in models
if grep in model.lower()]
return sorted(matching_models)
def sort_car_models():
"""sort the car models (values) and return the resulting cars dict"""
return {manufacturer: sorted(models) for
manufacturer, models in cars.items()}
| [
"zeeshan.sattar@tntp.org"
] | zeeshan.sattar@tntp.org |
d799d116e25cd1fae67137dd66a33f95dcb3703f | dfe569108641a4ff1a43cb6f84e7e052b77dafdf | /week06/week06_40740304S.py | 866d7fe2d012d51e911217e047e8442c63b97fd9 | [] | no_license | LYASDF/NTNU_TextProcessing_2021 | e79cf071809866df78b3dad668e85b57dfc51d00 | 33b6843396745782d0a411726200dd49d4ffb45b | refs/heads/master | 2023-07-14T11:42:03.573181 | 2021-08-22T15:00:01 | 2021-08-22T15:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import json
# 讀取 json 的程式
def jsonTextReader(jsonFilePath):
with open(jsonFilePath, 'r', encoding='utf-8') as f:
jsonFile = json.load(f)
return jsonFile['text']
# 將字串轉為「句子」列表的程式
def text2Sentence(inputSTR):
# replace '...' and '…' with ''
unused = ['...', '…']
for sep in unused:
inputSTR = inputSTR.replace(sep, '')
# replace ',', '、', '。', '「', '」' with separator '\n'
separators = [',', '、', '。', '「', '」']
for sep in separators:
inputSTR = inputSTR.replace(sep, '\n')
# if ',' is not located in number, replace it with '\n'
currentIndex = 0
while True:
# find next ','
currentIndex = inputSTR.find(',', currentIndex)
# ',' not find
if currentIndex == -1:
break
# ',' is located in numbers
if str.isdigit(inputSTR[currentIndex - 1]) and str.isdigit(inputSTR[currentIndex + 1]):
currentIndex += 1
continue
inputSTR = inputSTR[:currentIndex] + '\n' + inputSTR[currentIndex + 1:]
currentIndex += 1
return inputSTR.strip('\n').split('\n')
if __name__ == "__main__":
# 設定要讀取的 news.json 路徑
newsJsonPath = './example/news.json'
# 將 news.json 利用 [讀取 json] 的程式打開
text = jsonTextReader(newsJsonPath)
# 將讀出來的內容字串傳給 [將字串轉為「句子」 列表」]的程式,存為 newsLIST
newsLIST = text2Sentence(text)
# 設定要讀取的 test.json 路徑
testJsonPath = './example/test.json'
# 將 test.json 的 sentenceLIST 內容讀出,存為 testLIST
with open(testJsonPath, 'r', encoding='utf-8') as f:
jsonFile = json.load(f)
testLIST = jsonFile['sentence']
print(newsLIST)
print(testLIST)
# 測試是否達到作業需求
if newsLIST == testLIST:
print("作業過關!")
else:
print("作業不過關,請回到上面修改或是貼文求助!")
| [
"79515901+40740304S@users.noreply.github.com"
] | 79515901+40740304S@users.noreply.github.com |
03f855e35fc7d8912ad53ea48111f34ad916183d | c167d527f8b0c0a37b4698d243083ce7ef523212 | /prorestaurant/myrestaurant/apps.py | 05314cf1d77342cc8acb0ca8320801e94574ea91 | [] | no_license | madhusudan-j/prorestaurant | 1bc7a96a1028deb91ec8451dfa0fcd2432f762d2 | e22819a85dc57e974d69d9daa21860af1a46f1bd | refs/heads/master | 2023-04-29T20:34:07.608661 | 2019-08-05T11:57:36 | 2019-08-05T11:57:36 | 200,229,495 | 1 | 0 | null | 2023-04-21T20:35:29 | 2019-08-02T12:18:02 | Python | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class MyrestaurantConfig(AppConfig):
name = 'myrestaurant'
| [
"noreply@github.com"
] | madhusudan-j.noreply@github.com |
e445e69c3f4201e6a82f43d62ce2eed19fa2b935 | bb47a173f6e17e5b20f4d19830466767a47b5988 | /models/system_jobs.py | 76f165893f3bd50394463b7ab4e35ae9ae355a3b | [] | no_license | gmancoder/MailChimp-Management-Console | c0ad1d7d6995d573b962eaeb2e2194b1b2ff01d7 | 93a7fc61348c57c1c8d45f60a3614171f6307c95 | refs/heads/master | 2020-03-07T06:46:09.251318 | 2018-03-29T20:52:50 | 2018-03-29T20:52:50 | 127,331,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,980 | py | #!/usr/bin/env python
from models.shared import db
from models.imports import *
from models.exports import *
from models.tracking import *
import datetime
import os
import csv
class SystemJob(db.Model):
__tablename__ = "system_job"
id = db.Column(db.Integer, primary_key=True)
brand_id = db.Column(db.Integer, db.ForeignKey('brand.id'))
activity_type = db.Column(db.String(100))
activity_id = db.Column(db.Integer)
overall_status = db.Column(db.Integer, default=0)
status_message = db.Column(db.String(2000), nullable=True)
start_date = db.Column(db.TIMESTAMP, nullable=True)
end_date = db.Column(db.TIMESTAMP, nullable=True)
created = db.Column(db.DateTime)
created_by = db.Column(db.String(10))
updated = db.Column(db.DateTime)
updated_by = db.Column(db.String(10))
def __init__(self):
self.created = datetime.datetime.now()
self.updated = datetime.datetime.now()
def run(self):
import functions.activities as activities
self.start_date = datetime.datetime.now()
self.overall_status = 1
db.session.commit()
if self.activity_type == 'tracking_exports':
import functions.tracking as trk
activity = TrackingExportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
export_def = TrackingExportDefinition.query.get(activity.tracking_export_definition_id)
if not activities.check_object(self, activity, export_def):
return False
if export_def.target_activity == '':
activities.process_job_error(self, activity, export_def, 'Target Activity not populated');
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "exports"), export_def.file_path)
try:
ofh = open(file_path, 'w')
writer = csv.writer(ofh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
efh = open('%s.log' % file_path, 'w')
log_writer = csv.writer(efh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
log_writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
activities.process_job_error(self, activity, export_def, str(e))
return False
status, res = trk.export_tracking_detail(export_def, writer, log_writer)
if not status:
activities.process_job_error(self, activity, export_def, res)
efh.close()
ofh.close()
return False
#Export Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Export Completed"
activity.total_rows = res['total']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
efh.close()
ofh.close()
if(export_def.notify_addresses != None and len(export_def.notify_addresses) > 0):
status,res = activities.send_notification(export_def, self.activity_type, res)
print res
return True
if self.activity_type == "exports":
activity = ExportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
export_def = ExportDefinition.query.get(activity.export_definition_id)
if not activities.check_object(self, activity, export_def):
return False
if export_def.fields.count() == 0:
msg = "No Fields Passed to Export"
activities.process_job_error(self, activity, export_def, msg)
return False
if export_def.target_objects.count() == 0:
msg = "No Objects to Export"
activities.process_job_error(self, activity, export_def, msg)
return False
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "exports"), export_def.file_path)
try:
ofh = open(file_path, 'w')
writer = csv.writer(ofh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
efh = open('%s.log' % file_path, 'w')
log_writer = csv.writer(efh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
log_writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
activities.process_job_error(self, activity, export_def, str(e))
return False
if export_def.target_type == "lists":
import functions.lists as lists
status, res = lists.export_lists(export_def, writer, log_writer)
elif export_def.target_type == "subscribers":
import functions.lists as lists
status, res = lists.export_subscribers(export_def, writer, log_writer)
elif export_def.target_type == "template_categories":
import functions.templates as tmpl
status, res = tmpl.export_categories(export_def, writer, log_writer)
elif export_def.target_type == "segment_subscribers":
import functions.segments as seg
status, res = seg.export_subscribers(export_def, writer, log_writer)
elif export_def.target_type == "campaign_tracking":
import functions.tracking as trk
status, res = trk.export_tracking_summary(export_def, writer, log_writer)
else:
msg = "Export target_type of '%s' not defined" % activity.target_type
activities.process_job_error(self, activity, export_def, msg)
efh.close()
ofh.close()
return False
if not status:
activities.process_job_error(self, activity, export_def, msg)
efh.close()
ofh.close()
return False
#Export Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Export Completed"
activity.total_rows = res['total']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
efh.close()
ofh.close()
if(export_def.notify_addresses != None and len(export_def.notify_addresses) > 0):
status,res = activities.send_notification(export_def, self.activity_type, res)
print res
return True
elif self.activity_type == "imports":
activity = ImportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
import_def = ImportDefinition.query.get(activity.import_definition_id)
if not activities.check_object(self, activity, import_def):
return False
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "imports"), import_def.file_path)
if not os.path.exists(file_path):
msg = "File '%s' Not Found" % file_path
activities.process_job_error(self, activity, import_def, msg)
return False
try:
fh = open(file_path, 'r')
reader = csv.reader(fh, delimiter=str(import_def.file_delimiter), quoting=csv.QUOTE_ALL)
ofh = open('%s.log' % file_path, 'w')
writer = csv.writer(ofh, delimiter=",", quoting=csv.QUOTE_ALL)
writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
msg = str(e)
activities.process_job_error(self, activity, import_def, msg)
#fh.close()
#ofh.close()
return False
if import_def.mappings.count() == 0:
msg = "Import contains no mappings"
writer.writerow(["0", "", msg])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
if import_def.target_type == "lists":
import functions.lists as lists
status, res = lists.import_lists(import_def, reader, writer)
elif import_def.target_type == "subscribers":
import functions.lists as lists
status, res = lists.import_subscribers(import_def, reader, writer)
elif import_def.target_type == "template_categories":
import functions.templates as tmpl
status, res = tmpl.import_categories(import_def, reader, writer)
else:
msg = "Import target_type of '%s' not defined" % activity.target_type
writer.writerow(["0", "", msg])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
if not status:
writer.writerow(["0", "", res])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
#Import Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Import Completed"
activity.total_rows = res['total']
activity.inserts = res['inserted']
activity.updates = res['updated']
activity.ignored = res['ignored']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
fh.close()
ofh.close()
if(import_def.notify_addresses != None and len(import_def.notify_addresses) > 0):
status,res = activities.send_notification(import_def, self.activity_type, res)
print res
return True
else:
self.overall_status = 3
self.status_message = "Activity type '%s' not defined" % self.activity_type
self.end_date = datetime.datetime.now()
db.session.commit()
return False
| [
"grbrewer@grbrewer.com"
] | grbrewer@grbrewer.com |
c9eb0c5a631ba807850bc235622337d71913098d | e877d2bdaee44a1c3d124be97003cbba737d28bd | /regex/urls.py | c899cbca885fbbb6cb2d417ed4c7d1bb7e7485e3 | [] | no_license | daddeo/PythonTest | 9cd0d6326ce5811ef62f73e4b7094b2ef66d4417 | df57a160b828d77e7fcfab6244f450fcd5d2238d | refs/heads/master | 2021-03-22T21:18:08.603394 | 2020-05-13T02:30:17 | 2020-05-13T02:30:17 | 247,399,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | import re
urls = """
https://www.google.com
http://coreyms.com
https://youtube.com
https://www.nasa.gov
"""
pattern = re.compile(r"https?://(www\.)?(\w+)(\.\w+)")
subbed_urls = pattern.sub(r"\2\3", urls)
print(subbed_urls)
# matches = pattern.finditer(urls)
# for match in matches:
# print(match.group(3))
| [
"jason.wallenfang@gmail.com"
] | jason.wallenfang@gmail.com |
d0bc437a44318504958582938e6622cdb01b23a9 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_01_20_gsh_database_red/integrate_parallel.py | 19794112e938772715aa9bad100ef9986045d7cc | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | import numpy as np
# import itertools as it
import db_functions as fn
import gsh_hex_tri_L0_16 as gsh
import h5py
import time
import sys
tnum = np.int64(sys.argv[1])
filename = 'log_integrate_parallel_%s.txt' % str(tnum)
""" Load Y vec """
f = h5py.File('var_extract_total.hdf5', 'r')
var_set = f.get('var_set')
sinphi = np.sin(var_set[:, 2])
Y = var_set[:, 4]
f.close
""" Initialize important variables """
# these indices are defined for the sampled db inputs
inc = 6 # degree increment for angular variables
sub2rad = inc*np.pi/180.
n_th = 60/inc # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = 90/inc # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
N_p = 215 # number of GSH bases to evaluate
N_q = 9 # number of cosine bases to evaluate
L_th = np.pi/3.
n_eul = n_p1*n_P*n_p2
n_jobs = 10. # number of jobs submitted to cluster
""" Calculate basis function indices """
cmax = N_p*N_q # total number of permutations of basis functions
fn.WP(str(cmax), filename)
# cmat is the matrix containing all permutations of basis function indices
cmat = np.unravel_index(np.arange(cmax), [N_p, N_q])
cmat = np.array(cmat).T
""" Deal with the parallelization of this operation. specifically pick range
of indxmat to calculate """
n_ii = np.int64(np.ceil(np.float(cmax)/n_jobs)) # number dot products per job
fn.WP(str(n_ii), filename)
ii_stt = tnum*n_ii # start index
if (tnum+1)*n_ii > cmax:
ii_end = cmax
else:
ii_end = (tnum+1)*n_ii # end index
msg = "ii_stt = %s" % ii_stt
fn.WP(msg, filename)
msg = "ii_end = %s" % ii_end
fn.WP(msg, filename)
""" perform the orthogonal regressions """
coeff_prt = np.zeros(ii_end-ii_stt, dtype='complex128')
f = h5py.File('X_parts.hdf5', 'r')
c = 0
indxvec = gsh.gsh_basis_info()
bsz_gsh = ((np.pi**3)/3)/n_eul
bsz_cos = L_th/n_th
for ii in xrange(ii_stt, ii_end):
msg = str(ii)
fn.WP(msg, filename)
st = time.time()
p, q = cmat[ii, :]
basis_p = f.get('p_%s' % p)[...]
basis_q = f.get('q_%s' % q)[...]
ep_set = np.squeeze(basis_p)*basis_q
msg = "load time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
st = time.time()
l = indxvec[p, 0]
c_gsh = (1./(2.*l+1.))*(3./(2.*np.pi**2))
if q == 0:
c_cos = 1./L_th
else:
c_cos = 2./L_th
c_tot = c_gsh*c_cos*bsz_gsh*bsz_cos
tmp = c_tot*np.sum(Y*ep_set.conj()*sinphi)
del ep_set
coeff_prt[c] = tmp
msg = "regression time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
c += 1
f.close()
f = h5py.File('coeff_prt_%s.hdf5' % tnum, 'w')
f.create_dataset('coeff_prt', data=coeff_prt)
f.close()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
0b2040379f11972e753ede545d2b35e427664bc1 | 75fc303f2734fb1b220f088f1c0a49ef9557bb0c | /blog/models.py | 2d38a56ecdea8fec25f2896d57aa28100e28f769 | [] | no_license | chriszhang111/tinybook | 243c2a4f5fae55d9ec5cd004f231075647e746d7 | fdf63ef9c436fda8043c9936bd659c15d4959904 | refs/heads/master | 2021-05-11T02:38:34.649015 | 2018-01-21T19:58:49 | 2018-01-21T19:58:49 | 118,368,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | from flask_blog import db,uploaded_images
from datetime import datetime
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
admin = db.Column(db.Integer, db.ForeignKey('author.id'))
#posts = db.relationship('Blog_Post',backref='blog',lazy='dynamic')
def __init__(self,name,admin):
self.name = name
self.admin = admin
def __repr__(self):
return '<Blog %r>' % self.name
class Blog_Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
##blog_id = db.Column(db.Integer,db.ForeignKey('blog.id'))
body = db.Column(db.Text)
image = db.Column(db.String(255))
slug = db.Column(db.String(256),unique=True)
publish_date = db.Column(db.DateTime)
live = db.Column(db.Boolean)
category = db.Column(db.String(70))
comments = db.relationship('Comment',backref='blog__post',lazy='dynamic')
__searchable__ = ['title','body']
@property
def imgsrc(self):
return uploaded_images.url(self.image)
def __init__(self,title,author,body,image=None,slug=None,publish_date=None,live=True,category=None):
self.title = title
self.author_id = author.id
##self.blog_id = blog.id
self.body = body
self.image = image
if publish_date == None:
self.publish_date = datetime.utcnow()
else:
self.publish_date = publish_date
self.live = live
self.category = category
def __repr__(self):
return '<Blog article:%r>' % self.title
class Comment(db.Model):
id = db.Column(db.Integer,primary_key=True)
body = db.Column(db.Text)
########
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
blog_id = db.Column(db.Integer,db.ForeignKey('blog__post.id'))
publish_date = db.Column(db.DateTime)
live = db.Column(db.Boolean)
def __init__(self,body,author,blog,publish_date=None,live=True):
self.body = body
self.author_id = author.id
self.blog_id = blog.id
self.publish_date = datetime.utcnow()
self.live = live
def __repr__(self):
return '<Comment:%r>' % self.id
| [
"chris@chrisdeMBP.fios-router.home"
] | chris@chrisdeMBP.fios-router.home |
b217efd0f64b9d0741f845fcf771f02ebc45c190 | ea434bd9d6c2a3f2356c20b05f33a5d6f2d4d8ca | /geziyakurdi/test.py | 4c26ad334ae3a8404af040a9015df402f66da24e | [] | no_license | dicleoztur/subjectivity_detection | d0b0148849cb10cf0fd024762f80cdc275510ea2 | 7976a5480ed371676784b66d7026429f7e2360cb | refs/heads/master | 2021-01-20T19:34:04.614140 | 2016-08-09T12:28:30 | 2016-08-09T12:28:30 | 65,291,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | '''
Created on Jul 7, 2013
@author: dicle
'''
import numpy as np
import pandas as pd
from datetime import datetime
def f1():
p = "/home/dicle/Dicle/Tez/geziyakurdiproject/corpus/test/"
A = np.random.randint(0, 10, size=360000).reshape(600, 600)
names = [a for i in range(100) for a in 'abcdef']
df = pd.DataFrame(A, index=names, columns=names)
df.to_csv(p+'df.csv', index=True, header=True, sep=' ')
def f2():
p = "/home/dicle/Dicle/Tez/geziyakurdiproject/corpus/test/"
A = np.random.randint(0, 10, size=360000).reshape(600, 600)
names = [a for i in range(100) for a in 'abcdef']
np.savetxt(p+"df.txt", A)
def fback():
p = "/home/dicle/Dicle/Tez/geziyakurdiproject/corpus/test/"
data = pd.read_csv(p+"df.csv")
print type(data)
print data
if __name__ == "__main__":
start = datetime.now()
f1()
end = datetime.now()
print str(end-start)
print
f2()
end2 = datetime.now()
print str(end2-end)
fback()
| [
"dicleoztur@gmail.com"
] | dicleoztur@gmail.com |
23e08a3bc648e11c09e92370a1bd21eae7196502 | c2f320b696546e0e8ad4372ba40b3efa01920b33 | /set_mat_zeros.py | 47cb05427e25f365facb78d31528f25f8fac47c6 | [] | no_license | MatthewC221/Algorithms | e785f69533787326b791327381567a7bd15480a8 | b619498d2b8b5e53b629b664fabcff0b68c10897 | refs/heads/master | 2021-01-22T21:22:19.549119 | 2019-04-08T11:19:58 | 2019-04-08T11:19:58 | 85,416,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # Leetcode: https://leetcode.com/problems/set-matrix-zeroes/description/
# O(n) time, O(n) space. I think this is a good way to do it, it will never be O(n^2).
# Even if we approached in the manner that if we see a 0 we set its col and row to False we can reach O(n^2).
# This way, we only go through each row/col at most once at the cost of the dict.
# Beats 90%
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
# Maintain setting 0 list
rows = []
cols = []
rows_seen = {}
cols_seen = {}
m = len(matrix)
n = len(matrix[0])
for i in xrange(m):
for j in xrange(n):
if (matrix[i][j] == 0):
# Ensure we only set each row/col ONCE
if (i not in rows_seen):
rows.append(i)
rows_seen[i] = 1
if (j not in cols_seen):
cols.append(j)
cols_seen[j] = 1
# Set them all to 0
for i in xrange(len(rows)):
for j in xrange(n): matrix[rows[i]][j] = 0
for i in xrange(len(cols)):
for j in xrange(m): matrix[j][cols[i]] = 0
| [
"noreply@github.com"
] | MatthewC221.noreply@github.com |
a953715dfb4d72b0d80a6276358e69b854f3ef42 | 495e8027c9ebe0634fae72983c3d85ca22286a8b | /Homework 1/bayes.py | f65fecb9aa276068f0028a35f4c6b1572ef16088 | [] | no_license | braininahat/Advanced_Robotics_CSE668 | f0cfb1462f90a44911ce256e96aea0f648f5f243 | f0e8b14e31c9adce5d970f56f836c06ea8e042ee | refs/heads/master | 2020-03-07T12:33:41.407517 | 2018-05-17T05:51:05 | 2018-05-17T05:51:05 | 127,480,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | import numpy as np
world = np.empty((50, 50, 50)) # Initializing state space of 50x50x50 | [
"varunshi@buffalo.edu"
] | varunshi@buffalo.edu |
6e40eb92f7f07237b92ac81f04afb8abdeab0035 | 2e73a857b634f62ef3a939d63a9939858ee06205 | /lib/emojis.py | 96b3110e057c16be024816f0502a2f389e28c2d0 | [
"MIT"
] | permissive | LollipopGeneral/wxj | d0bf1a62dfcb613ba943042467268baf0c1b9861 | faaa16f30d4c076a4732ca4cd03032440d8cc437 | refs/heads/master | 2020-05-07T08:07:38.525752 | 2017-10-22T15:41:04 | 2017-10-22T15:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import json
import re
from . import utils
from .common import DATADIR
DATAFILE = DATADIR / 'emojis.json'
EMOJIS = {
'apple': {},
'pictorial': {},
}
def load():
if DATAFILE.exists():
with open(DATAFILE) as fp:
EMOJIS.update(json.load(fp))
load()
def emoji(label):
if label in EMOJIS['apple']:
return EMOJIS['apple'][label]
elif label in EMOJIS['pictorial']:
filename = EMOJIS['pictorial'][label]
path = utils.asset_path(f'images/emojis/{filename}')
return f'<img src="{path}" alt="[{label}]">'
else:
return f'[{label}]'
EMOJI_PLACEHOLDER = re.compile(r'\[(?P<label>[^\]]+)\]')
def markup_emojis(text):
return EMOJI_PLACEHOLDER.sub(lambda m: emoji(m.group('label')), text)
| [
"snh48live@gmail.com"
] | snh48live@gmail.com |
afe05bb5eb74325ed592e4e8709f4779a1e69f2a | 6f6a2e25b8efb195aa2bc4f89bae7ae8212c19c4 | /dictionary/dicto/migrations/0003_auto_20210529_1613.py | 169f40355008f2eee6396cfedbe3f3f616b1f222 | [] | no_license | hemantgupta0408/dictionary-django | f7a080d284490e9f9ce100a65366bf36ff6002bb | f1f5a2afa45dd2045fecadfd3102937a3862d75e | refs/heads/master | 2023-05-09T19:24:10.988571 | 2021-06-07T10:50:21 | 2021-06-07T10:50:21 | 353,572,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Generated by Django 3.1.6 on 2021-05-29 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dicto', '0002_auto_20210528_1724'),
]
operations = [
migrations.AlterField(
model_name='words',
name='category',
field=models.CharField(max_length=80, unique=True),
),
migrations.AlterField(
model_name='words',
name='image',
field=models.ImageField(upload_to='media/'),
),
]
| [
"hemantgupta0404@gmail.com"
] | hemantgupta0404@gmail.com |
124ea5660380a8f75b3dcf5d39f6419f82e4cb75 | d7f4a14f0ba681cb055f4a43823678baba2bdaa9 | /Lawn.py | 619953fd419aca18b68acaa5da9e2c14340a358a | [] | no_license | JamesMensah/blablamower | 1f0b0aa76d550292cc991eb961c1f83538ce8d89 | 1ff417e2e76dd3cca10900719b4f4d9909e19aa2 | refs/heads/master | 2020-06-29T08:19:06.900518 | 2019-08-04T20:17:04 | 2019-08-04T20:17:04 | 200,484,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | class Lawn(object):
def __init__(self, upper_right_x=None, upper_right_y=None, mowers=[]):
self.upper_right_x = upper_right_x
self.upper_right_y = upper_right_y
self.mowers = mowers
def __str__(self):
return "The lawnmowers locations are now " + str(self.mowers)
| [
"jamesou@gmail.com"
] | jamesou@gmail.com |
13da54188ce83afe9954d7fdbafc47366dcacc96 | a24f4a4f6861a528b7607adce135c47b718cb045 | /cautious-barnacle/interview_questions/searching/find_prefix_in_strings.py | 896dc23be285e8eab84f8110895e4b7431a0b685 | [] | no_license | domRowan/cautious-barnacle | a8e73602f9fb35fbc4c4d741e6d6214de11fb9aa | ca2fcd40a7c8f84f0aac93be090278af1231d931 | refs/heads/master | 2020-03-30T06:19:02.817961 | 2020-01-16T13:45:29 | 2020-01-16T13:45:29 | 150,852,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | def find_prefix_in_strings(input, prefix):
first_occurance = find_occurance_of_letter(input, prefix, True, False)
last_occurance = find_occurance_of_letter(input, prefix, False, True)
return (first_occurance, last_occurance)
def find_occurance_of_letter(input, prefix, FIRST, LAST):
left_pointer = 0
right_pointer = len(input) - 1
result = -1
while left_pointer <= right_pointer:
middle_pointer = (left_pointer + right_pointer) // 2
prefix_at_middle_pointer = input[middle_pointer][:len(prefix)]
if prefix_at_middle_pointer > prefix:
right_pointer = middle_pointer - 1
elif prefix_at_middle_pointer == prefix:
result = middle_pointer
if FIRST:
# Continue searching left
right_pointer = middle_pointer - 1
if LAST:
# Continue searching right
left_pointer = middle_pointer + 1
else: # first prefix > prefix
left_pointer = middle_pointer + 1
return result
input = ["bar", "baz", "pre-foo", "pre-quux"]
prefix = "pre-"
print find_prefix_in_strings(input, prefix)
| [
"rowand@amazon.com"
] | rowand@amazon.com |
ec138c2141ea41be405272bce0a14e83376a4208 | c284be8567ab62e330d8828e72045a03d32c118a | /replay.py | 57db29bf80192fd2f6571888206b5f9ebf4e8e10 | [] | no_license | syun0830/tracking_via_colorizing | 134b589ad47fd6191e382e2ec17e233bc570ac2e | 56da2848b7980499b107c324ac50a29e9b66d431 | refs/heads/master | 2020-12-26T22:23:46.118327 | 2020-02-02T09:58:08 | 2020-02-02T09:58:08 | 237,667,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,284 | py | import tensorflow as tf
def stratified_sample(probs, n):
N = tf.shape(probs)[0:1]
c = tf.cumsum(probs)
c = c/c[-1]
borders = tf.linspace(0.,1.,n+1)
right = borders[1:]
c = tf.expand_dims(c, 0)
right = tf.expand_dims(right, 1)
greater_mask = tf.cast(tf.greater(c, right), tf.int32)
_cum_num = tf.reduce_sum(greater_mask, 1)
cum_num = tf.concat([N,_cum_num[:-1]],0)
num = cum_num - _cum_num
unif = tf.contrib.distributions.Uniform(low=0., high=tf.cast(num, tf.float32))
local_inds = tf.cast(unif.sample(), tf.int32)
begin = N - cum_num
return local_inds + begin
class PrioritizedHistory:
def __init__(self, name_to_shape_dtype,
capacity = 100000,
device = '/gpu:0',
variable_collections = ['history'],
scope = 'history',
print_messages = False):
variables = []
self._capacity = capacity
self._device = device
self._scope = scope
self._print_messages = print_messages
if not isinstance(name_to_shape_dtype, dict):
name_to_shape_dtype = {'__singleton__': name_to_shape_dtype}
with tf.device(self._device), tf.name_scope(self._scope):
self._histories = {}
with tf.name_scope('data'):
for name, (shape, dtype) in name_to_shape_dtype.iteritems():
self._histories[name] = tf.Variable(tf.zeros([capacity]+list(shape), dtype=dtype),
trainable = False,
collections = variable_collections,
name = name)
variables.append(self._histories[name])
self._weights = tf.Variable(tf.zeros([capacity], dtype=tf.float32),
trainable = False,
collections = variable_collections,
name = 'weights')
variables.append(self._weights)
self._inds = tf.Variable(tf.range(capacity),
trainable = False,
collections = variable_collections,
name = 'indices')
variables.append(self._inds)
self._size = tf.Variable(tf.constant(0, dtype=tf.int32),
trainable = False,
collections = variable_collections,
name = 'size')
variables.append(self._size)
self.saver = tf.train.Saver(var_list=variables)
self.initializer = tf.group(map(lambda v: v.initializer, variables))
def append(self, name_to_value, weight):
if not isinstance(name_to_value, dict):
name_to_value = {'__singleton__': name_to_value}
with tf.device(self._device), tf.name_scope(self._scope):
weight = tf.convert_to_tensor(weight)
name_to_value = {name: tf.convert_to_tensor(value) for name, value in name_to_value.iteritems()}
inds = tf.where(tf.less(self._weights, weight))
accepted = tf.greater(tf.shape(inds)[0], 0)
def insert():
ind = inds[0,0]
ind_buf = self._inds[-1]
ops = []
for name, value in name_to_value.iteritems():
ops.append(self._histories[name][ind_buf].assign(value))
with tf.control_dependencies(ops):
ops = [self._weights[(ind+1):].assign(self._weights[ind:-1]),
self._inds[(ind+1):].assign(self._inds[ind:-1])]
with tf.control_dependencies(ops):
ops = [self._weights[ind].assign(weight),
self._inds[ind].assign(ind_buf),
self._size.assign(tf.reduce_min([self._size+1, self._capacity]))]
with tf.control_dependencies(ops):
ind = tf.cast(ind, tf.int32)
if self._print_messages:
ind = tf.Print(ind, [ind], message='Entry was inserted at: ')
ind = tf.Print(ind, [ind_buf], message='Replaced address: ')
return ind
if self._print_messages:
return tf.cond(accepted, insert, lambda: tf.Print(-1, [], message='Entry was rejected'))
else:
return tf.cond(accepted, insert, lambda: -1)
def update_weight(self, ind, weight):
with tf.device(self._device), tf.name_scope(self._scope):
ind = tf.convert_to_tensor(ind)
if self._print_messages:
ind = tf.Print(ind, [ind], message='Updated entry: ')
old_weight = self._weights[ind]
ind_buf = self._inds[ind]
weight = tf.convert_to_tensor(weight)
def first_less():
inds = tf.where(tf.less(self._weights, weight))
return tf.cond(tf.greater(tf.shape(inds)[0], 0),
lambda: tf.cast(inds[0,0], tf.int32),
lambda: tf.constant(self._capacity-1, dtype=tf.int32))
def last_greater():
inds = tf.where(tf.greater(self._weights, weight))
return tf.cond(tf.greater(tf.shape(inds)[0], 0),
lambda: tf.cast(inds[-1,0], tf.int32),
lambda: tf.constant(self._capacity-1, dtype=tf.int32))
new_ind = tf.cond(tf.greater(weight, old_weight), first_less, last_greater)
if self._print_messages:
new_ind = tf.Print(new_ind, [new_ind], message='Moved to: ')
def up():
ops = [self._weights[ind:new_ind].assign(self._weights[(ind+1):(new_ind+1)]),
self._inds[ind:new_ind].assign(self._inds[(ind+1):(new_ind+1)])]
return tf.group(ops)
def down():
ops = [self._weights[(new_ind+1):(ind+1)].assign(self._weights[new_ind:ind]),
self._inds[(new_ind+1):(ind+1)].assign(self._inds[new_ind:ind]),]
return tf.group(ops)
with tf.control_dependencies([ind_buf]):
shift = tf.cond(tf.greater(new_ind, ind), up, down)
with tf.control_dependencies([shift]):
ops = [self._weights[new_ind].assign(weight),
self._inds[new_ind].assign(ind_buf)]
with tf.control_dependencies(ops):
return tf.identity(new_ind)
def update_weights(self, inds, weights):
with tf.device(self._device), tf.name_scope(self._scope):
inds = tf.convert_to_tensor(inds)
if self._print_messages:
inds = tf.Print(inds, [inds], message='Updated entries: ')
weights = tf.convert_to_tensor(weights)
updated = tf.scatter_nd_update(self._weights, tf.expand_dims(inds, -1), weights)
sorted_inds = tf.contrib.framework.argsort(updated,
direction='DESCENDING',
stable=True)
ops = [self._weights.assign(tf.gather(updated, sorted_inds)),
self._inds.assign(tf.gather(self._inds, sorted_inds))]
return tf.group(ops)
def sample(self, size):
with tf.device(self._device), tf.name_scope(self._scope):
inds = stratified_sample(self._weights[:self._size], size)
if self._print_messages:
inds = tf.Print(inds, [inds], message='Sampled entries: ')
_inds = tf.gather(self._inds, inds)
if self._print_messages:
_inds = tf.Print(_inds, [_inds], message='Sampled addresses: ')
name_to_value = {name: tf.gather(hist, _inds) for name, hist in self._histories.iteritems()}
if set(name_to_value.keys()) == set(['__singleton__']):
name_to_value = name_to_value['__singleton__']
return inds, name_to_value
if __name__=='__main__':
sess = tf.InteractiveSession()
history = PrioritizedHistory(([1], tf.int32),
device = '/cpu:0',
capacity = 5,
print_messages = True)
def print_vars():
print('inds: ', history._inds.eval())
print('values: ', history._histories['__singleton__'].eval()[:,0])
print('weights: ', history._weights.eval())
sess.run(history.initializer)
print_vars()
history.append([1], 1.).eval()
print_vars()
history.append([2], 2.).eval()
print_vars()
history.append([3], 3.).eval()
print_vars()
history.update_weight(1, 4.).eval()
print_vars()
history.append([5], 5.).eval()
print_vars()
history.append([6], 6.).eval()
print_vars()
history.append([25], 2.5).eval()
print_vars()
history.update_weights([1], [3.9]).run()
print_vars()
| [
"shunsuke.dgear@gmail.com"
] | shunsuke.dgear@gmail.com |
6ce38780ae3c06e1906fca79ab887be09b941599 | 129332d9fb1dc4d47188186a2ee29d9a82395486 | /gui/components/VolumeLevel/VolumeLevel.py | 9564362c35f99e4a2e298f8cca7644404ab12c69 | [] | no_license | Dmy54/xmachine_graph | 636f16b96e39488668cc3670789ce3346b9b90d8 | 4b7e141aa8e4c113b5c346d8cbb60b7ab790eb92 | refs/heads/master | 2023-06-26T11:01:03.429277 | 2021-07-27T11:29:38 | 2021-07-27T11:29:38 | 386,931,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ListProperty, NumericProperty
class VolumeLevel(BoxLayout):
color = ListProperty([1, 0, 0, 1])
level = NumericProperty(0)
def __init__(self, **kwargs):
super(VolumeLevel, self).__init__(**kwargs)
def change_level(self, level):
self.level = level | [
"4254965@gmail.com"
] | 4254965@gmail.com |
1bec4f69dbf58f022ceafae8fdf4ee48366b0bc6 | f051b85de74757fd70fa03735ee087d24945730d | /pdf_to_img/apps.py | a6536aeb68a4ef5c8bbe6592aa89f078c340de48 | [
"MIT"
] | permissive | karuvally/wraith | fa18e7ff646b3436f5003da4be558387ddc3e5df | a2436e67ff67094487d9b6f3c7419510c9d72b35 | refs/heads/main | 2023-08-24T09:05:52.217170 | 2021-10-21T11:26:36 | 2021-10-21T11:26:36 | 411,770,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django.apps import AppConfig
class PdfToImgConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'pdf_to_img'
| [
"aswinbabuk@gmail.com"
] | aswinbabuk@gmail.com |
9f1ffddbff4221e8639aa9fec08053614fd6e15f | 22258dfb5e0ad56db0c690dbfde8f4af6d9509d8 | /maze.py | f88885ee8df22832dedc711dc118cd5930f20ef7 | [] | no_license | ranguera/Unity-Maze-Generator | 10fa29d9f3ec6b3fce850d941bafc4678ea022f6 | f6f9326183b7b5437bbf949fe2f0aaf783207af7 | refs/heads/master | 2021-01-12T03:37:32.972846 | 2017-02-09T22:00:36 | 2017-02-09T22:00:36 | 78,242,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,881 | py | # Code by Erik Sweet and Bill Basener
import random
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
num_rows = int(input("Rows: ")) # number of rows
num_cols = int(input("Columns: ")) # number of columns
# The array M is going to hold the array information for each cell.
# The first four coordinates tell if walls exist on those sides
# and the fifth indicates if the cell has been visited in the search.
# M(LEFT, UP, RIGHT, DOWN, CHECK_IF_VISITED)
M = np.zeros((num_rows,num_cols,5), dtype=np.uint8)
# The array image is going to be the output image to display
image = np.zeros((num_rows*10,num_cols*10), dtype=np.uint8)
# Set starting row and column
r = 0
c = 0
history = [(r,c)] # The history is the stack of visited locations
# Trace a path though the cells of the maze and open walls along the path.
# We do this with a while loop, repeating the loop until there is no history,
# which would mean we backtracked to the initial start.
while history:
M[r,c,4] = 1 # designate this location as visited
# check if the adjacent cells are valid for moving to
check = []
if c > 0 and M[r,c-1,4] == 0:
check.append('L')
if r > 0 and M[r-1,c,4] == 0:
check.append('U')
if c < num_cols-1 and M[r,c+1,4] == 0:
check.append('R')
if r < num_rows-1 and M[r+1,c,4] == 0:
check.append('D')
if len(check): # If there is a valid cell to move to.
# Mark the walls between cells as open if we move
history.append([r,c])
move_direction = random.choice(check)
if move_direction == 'L':
M[r,c,0] = 1
c = c-1
M[r,c,2] = 1
if move_direction == 'U':
M[r,c,1] = 1
r = r-1
M[r,c,3] = 1
if move_direction == 'R':
M[r,c,2] = 1
c = c+1
M[r,c,0] = 1
if move_direction == 'D':
M[r,c,3] = 1
r = r+1
M[r,c,1] = 1
else: # If there are no valid cells to move to.
# retrace one step back in history if no move is possible
r,c = history.pop()
# Open the walls at the start and finish
M[0,0,0] = 1
M[num_rows-1,num_cols-1,2] = 1
# Generate the image for display
for row in range(0,num_rows):
for col in range(0,num_cols):
cell_data = M[row,col]
for i in range(10*row+1,10*row+9):
image[i,range(10*col+1,10*col+9)] = 255
if cell_data[0] == 1: image[range(10*row+1,10*row+9),10*col] = 255
if cell_data[1] == 1: image[10*row,range(10*col+1,10*col+9)] = 255
if cell_data[2] == 1: image[range(10*row+1,10*row+9),10*col+9] = 255
if cell_data[3] == 1: image[10*row+9,range(10*col+1,10*col+9)] = 255
# Display the image
plt.imshow(image, cmap = cm.Greys_r, interpolation='none')
plt.show() | [
"ranguera@gmail.com"
] | ranguera@gmail.com |
e1f86e42b1651b24b49a852a30e9ba287c876154 | 36126f91a2d5903483b84ba2d8be77e160803058 | /tests/test_model.py | 2fcf991147d46894fa7d917d389309988844fd6e | [
"Apache-2.0"
] | permissive | open-risk/transitionMatrix | 9962bb2656eb637ba56afc3adecf42bbe68f9593 | d05e75cbc251f01842dd8c5ce225894b988f4d99 | refs/heads/master | 2023-03-05T08:01:20.816425 | 2023-02-22T20:46:38 | 2023-02-22T20:46:38 | 110,365,127 | 73 | 29 | Apache-2.0 | 2022-12-08T11:37:12 | 2017-11-11T17:25:08 | Python | UTF-8 | Python | false | false | 5,126 | py | # encoding: utf-8
# (c) 2017-2022 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from scipy.linalg import expm
import transitionMatrix as tm
from transitionMatrix import source_path
ACCURATE_DIGITS = 7
class TestTransitionMatrix(unittest.TestCase):
'''
Default instance (2x2 identity matrix)
'''
def test_instantiate_matrix(self):
a = tm.TransitionMatrix()
self.assertAlmostEqual(a[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
b = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(b[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[0, 1], 3.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 1], 4.0, places=ACCURATE_DIGITS, msg=None, delta=None)
def test_csv_io(self):
a = tm.TransitionMatrix()
a.to_csv("test.csv")
b = tm.TransitionMatrix(csv_file="test.csv")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_json_io(self):
a = tm.TransitionMatrix()
a.to_json("test.json")
b = tm.TransitionMatrix(json_file="test.json")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_validation(self):
a = tm.TransitionMatrix()
self.assertEqual(a.validate(), True)
b = tm.TransitionMatrix(values=[1.0, 3.0])
self.assertEqual(b.validate()[0][0], 'Matrix Dimensions Differ: ')
c = tm.TransitionMatrix(values=[[0.75, 0.25], [0.0, 0.9]])
self.assertEqual(c.validate()[0][0], 'Rowsum not equal to one: ')
d = tm.TransitionMatrix(values=[[0.75, 0.25], [-0.1, 1.1]])
self.assertEqual(d.validate()[0][0], 'Negative Probabilities: ')
def test_generator(self):
a = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(a[0, 0], expm(a.generator())[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], expm(a.generator())[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], expm(a.generator())[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], expm(a.generator())[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
class TestTransitionMatrixSet(unittest.TestCase):
def test_instantiate_matrix_set(self):
periods = 5
a = tm.TransitionMatrixSet(dimension=2, periods=periods)
self.assertEqual(a.temporal_type, 'Incremental')
self.assertAlmostEqual(a.entries[0][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a.entries[periods-1][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_validation(self):
a = tm.TransitionMatrixSet(dimension=2, periods=5)
self.assertEqual(a.validate(), True)
def test_set_cumulate_incremental(self):
a = tm.TransitionMatrix(values=[[0.6, 0.2, 0.2], [0.2, 0.6, 0.2], [0.2, 0.2, 0.6]])
a_set = tm.TransitionMatrixSet(values=a, periods=3, method='Copy', temporal_type='Incremental')
b_set = a_set
b_set.cumulate()
b_set.incremental()
self.assertAlmostEqual(a_set.entries[2][0, 0], b_set.entries[2][0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_csv_io(self):
pass
def test_set_json_io(self):
pass
if __name__ == "__main__":
unittest.main()
| [
"openrisk@outlook.com"
] | openrisk@outlook.com |
d1df29cfcfd4dace82fa7e4e728abf9975d61641 | 94615230d5733282fb69ae5d35411c04a337d353 | /sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/constants.py | 2d082675e21dc88f0f92e4c331ef81174d4f9007 | [
"Unlicense"
] | permissive | EnTeQuAk/dotfiles | fcef6a885891c3c132da3ea970dd21aee16b72c1 | b00890fa64a01b3a0e4eaaada13e90c1ef36b9e0 | refs/heads/master | 2023-01-04T21:09:37.330838 | 2019-09-16T14:49:45 | 2019-09-16T14:49:45 | 1,558,950 | 1 | 0 | Unlicense | 2023-01-04T05:01:57 | 2011-04-02T08:31:38 | Vim script | UTF-8 | Python | false | false | 623 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various constants used by this plugin"""
from sublime import platform, version
PLATFORM = platform()
SUBLIME_VERSION = int(version())
DIAGNOSTICS_MARKER_BEGIN = b"### HTMLPrettify diagnostics begin ###"
DIAGNOSTICS_MARKER_END = b"### HTMLPrettify diagnostics end ###"
PRETTIFIED_CODE_MARKER_BEGIN = b"### HTMLPrettify prettified code begin ###"
PRETTIFIED_CODE_MARKER_END = b"### HTMLPrettify prettified code end ###"
| [
"cg@webshox.org"
] | cg@webshox.org |
7b14555c4e4fe08c4111754baee534c46f8a907b | 357e72f2a8b7aa2a26fb02312aab9e1caefdf78a | /sc2_start/unit_based_approach.py | c8af2d9f6861824aa7c7c3e421ab1b3eecd68e66 | [] | no_license | tristan00/sc2 | a9044bae01dc6f2f1a66ebbe9cb9ada45285d202 | 9eb0c800a3e647f3f847a17243ad1e80ac7153a6 | refs/heads/master | 2020-03-21T06:51:46.163844 | 2019-01-23T03:01:49 | 2019-01-23T03:01:49 | 138,245,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,615 | py | import sc2
from sc2 import run_game, maps, Race, Difficulty, position
from sc2.player import Bot, Computer
from sc2.constants import NEXUS, PROBE, PYLON, ASSIMILATOR, GATEWAY, \
CYBERNETICSCORE, STARGATE, VOIDRAY, ZEALOT, STALKER, ROBOTICSFACILITY, \
OBSERVER, IMMORTAL, ADEPT, FORGE, SHIELDBATTERY, PHOTONCANNON, TWILIGHTCOUNCIL, \
DARKSHRINE, DARKTEMPLAR, ORBITALCOMMAND, COMMANDCENTER, DESTRUCTIBLEROCK2X4VERTICAL, \
DESTRUCTIBLEROCK2X4HORIZONTAL, DESTRUCTIBLEROCK2X6VERTICAL, DESTRUCTIBLEROCK2X6HORIZONTAL, \
DESTRUCTIBLEROCK4X4, DESTRUCTIBLEROCK6X6
import multiprocessing
import random
import asyncio
import numpy as np
import pickle
import datetime
import glob
import operator
import os
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler, LabelBinarizer
import lightgbm as lgb
import traceback
from keras import layers, models, callbacks
import tensorflow
import h5py
import pickle
import time
import pandas as pd
max_iter = 1000000
class_num = 30
var_size = 41
memory_size = 500
nan_replacement = -1
max_game_training_size = 1000
perc_to_consider = .5
aggressive_units = {
ZEALOT: [8, 3],
STALKER: [8, 3],
IMMORTAL: [8, 3],
VOIDRAY: [8, 3],
ADEPT: [8, 3],
DARKTEMPLAR: [8, 3],
}
buildings = {NEXUS,PYLON,ASSIMILATOR,GATEWAY,CYBERNETICSCORE,STARGATE,ROBOTICSFACILITY,TWILIGHTCOUNCIL,DARKSHRINE}
dump_dict = dict()
path = r'C:\Users\trist\Documents\sc2_bot/'
record_memory = 2000
def train_strat_model():
files = glob.glob(path + '*_data.plk')
dicts = []
# if len(files) > max_game_training_size:
# files = random.sample(files, max_game_training_size)
for i in files:
with open(i, 'rb') as f:
dicts.append(pickle.load(f))
random.shuffle(dicts)
dicts.sort(reverse = True, key = lambda x: x['score'])
print(len(dicts))
print([i['score'] for i in dicts])
features = sum([i['past_moves'] for i in dicts], [])
# random.shuffle(features)
x = np.array([i['game_state'] for i in features])
y = np.array([i['f'] for i in features])
# x = x.reshape((-1, var_size * memory_size))
x = np.squeeze(x)
scaler = MinMaxScaler()
scaler.fit(x)
enc = LabelBinarizer(sparse_output = False)
enc.fit(np.reshape(y, (-1, 1)))
with open(path + 'scaler.plk', 'wb') as f:
pickle.dump(scaler, f)
with open(path + 'encoder.plk', 'wb') as f:
pickle.dump(enc, f)
pos_dicts = dicts[:int(len(dicts)*perc_to_consider)]
neg_dicts = dicts[-int(len(dicts)*perc_to_consider):]
pos_features = sum([i['past_moves'] for i in pos_dicts], [])
neg_features = sum([i['past_moves'] for i in neg_dicts], [])
pos_x = np.array([i['game_state'] for i in pos_features])
neg_x = np.array([i['game_state'] for i in neg_features])
pos_y = np.array([i['f'] for i in pos_features])
neg_y = np.array([i['f'] for i in neg_features])
# pos_x = pos_x.reshape((-1, var_size * memory_size))
# neg_x = neg_x.reshape((-1, var_size * memory_size))
pos_x = np.squeeze(pos_x)
neg_x = np.squeeze(neg_x)
pos_x = scaler.transform(pos_x)
neg_x = scaler.transform(neg_x)
pos_y = enc.transform(np.reshape(pos_y, (-1, 1)))
neg_y = enc.transform(np.reshape(neg_y, (-1, 1)))
x = np.vstack([pos_x, neg_x])
pos_y = pos_y.astype(np.float32)
neg_y = (neg_y == 0).astype(np.float32)
y = np.vstack([pos_y, neg_y])
# model = ExtraTreesClassifier(n_jobs=-1, min_samples_leaf = 50)
# model.fit(pos_x, pos_y)
#
# with open(path + 'model.plk', 'wb') as f:
# pickle.dump(model, f)
#
# res = model.predict(x)
# print(res)
#
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=.1)
#
cb = [callbacks.EarlyStopping(patience=0),
callbacks.ModelCheckpoint(path + 'dnn.h5',
save_best_only=True,
save_weights_only=False)]
model = models.Sequential()
model.add(layers.Dense(1000, input_dim=40 + memory_size, activation='elu'))
model.add(layers.Dense(1000, activation='elu'))
model.add(layers.Dense(1000, activation='elu'))
model.add(layers.Dense(class_num, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['mae'])
model.fit(x_train, y_train, validation_data=(x_val, y_val), callbacks=cb, epochs=100, batch_size=512)
def get_closest(unit, unit_list):
closest_unit = None
closest_distance = 999
for i in unit_list:
if i.distance_to(unit) < closest_distance:
closest_distance = i.distance_to(unit)
closest_unit = i
return closest_unit
def get_closest_distance(units, e_units):
closest_distance = 999
for i in units:
for j in e_units:
if i.distance_to(j) < closest_distance:
closest_distance = i.distance_to(j)
return closest_distance
class Strat():
random_chance = .01
print('random', random_chance)
def __init__(self):
try:
self.model = models.load_model(path + 'dnn.h5')
# with open(path + 'model.plk', 'rb') as f:
# self.model = pickle.load(f)
with open(path + 'scaler.plk', 'rb') as f:
self.scaler = pickle.load(f)
with open(path + 'encoder.plk', 'rb') as f:
self.encoder = pickle.load(f)
self.use_model = True
except:
traceback.print_exc()
self.use_model = False
def get_move(self, move_dict, game_state):
if random.random() < self.random_chance or not self.use_model:
next_move = random.choice(move_dict)
else:
game_state = np.expand_dims(game_state, 0)
# print(game_state.shape)
game_state = np.reshape(game_state, (1, -1))
# print(game_state.shape)
# game_state = game_state.reshape((-1, var_size * memory_size))
scaled_input = self.scaler.transform(game_state)
a = self.model.predict(scaled_input)
# next_move = a[0]
# print(next_move)
p = a[0]
p /= p.sum()
# print(p.tolist())
next_move_index = np.random.choice(np.array([i for i in range(p.shape[0])]), p = p)
next_move_array = [0 for _ in range(p.shape[0])]
next_move_array[next_move_index] = 1
next_move_array =np.array([next_move_array])
next_move = self.encoder.inverse_transform(np.array(next_move_array))[0]
# print(next_move)
# next_move = np.argmax(a[0])
return next_move
class UnitBot(sc2.BotAI):
def __init__(self, s):
super().__init__()
self.ts = int(datetime.datetime.now().timestamp())
self.ITERATIONS_PER_MINUTE = 60
self.MAX_WORKERS = 100
self.memory = memory_size
self.actions = [i for i in range(class_num)]
self.past_moves = []
self.s = s
self.max_score = 0
self.games_states = []
self.move_history = []
self.counter = 0
for i in range(memory_size + 1):
self.games_states.append([nan_replacement for j in range(var_size - 1)])
self.move_history.append(-1)
def get_possible_moves(self):
possible_moves = []
for i in self.units:
possible_moves.append({'move_id':0, 'unit':i, 'x':random.random(), 'y':random.random()})
for i in self.workers:
for b in buildings:
possible_moves.append({'move_id':1, 'unit':i, 'building_id': b, 'x':random.random(), 'y':random.random()})
def move(self, unit, v1, v2):
unit.move((v1 * self.game_info.map_size[0], v2 * self.game_info.map_size[1]))
def build_building(self, u, b, v1, v2):
self.build(building=b, unit=u, near=position.Point2((v1 * self.game_info.map_size[0], v2 * self.game_info.map_size[1])))
def get_closest_enemy_to_pos(self, x, y):
pass
def read_map(self):
enemy_df = []
enemy_unit_dict = {}
for i in self.known_enemy_units:
enemy_unit_dict[i.tag] = i
enemy_df.append({'tag':i.tag, 'x':i.position[0], 'y':i.position[1]})
self.enemy_df = pd.DataFrame.from_dict(enemy_df)
unit_df = []
unit_dict = {}
for i in self.units:
if i.name.upper() in aggressive_units.keys():
unit_dict[i.tag] = i
unit_df.append({'tag':i.tag, 'x':i.position[0], 'y':i.position[1]})
self.unit_df = pd.DataFrame.from_dict(unit_df)
def run_games():
s = Strat()
ts = int(datetime.datetime.now().timestamp())
a = None
print('playing easy')
a = run_game(maps.get("AbyssalReefLE"), [
Bot(Race.Protoss, UnitBot(s)),
Computer(Race.Terran, Difficulty.Easy)
], realtime=False)
with open('{0}/{1}_data.plk'.format(path, ts), 'wb') as f:
pickle.dump(dump_dict, f)
if __name__ == '__main__':
games = 0
wins = 0
win_rate = 0
difficulties = [0]
for i in range(20000):
try:
if i % 1 == 0 and i != 0:
train_strat_model()
except:
traceback.print_exc()
run_games()
# pool = [multiprocessing.Process(target=run_games) for i in range(6)]
# for p in pool:
# p.start()
# time.sleep(5)
# [p.join() for p in pool]
| [
"tristandelforge1@gmail.com"
] | tristandelforge1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.