blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
414d3c2d4416835c5ace56747881d819223bc332 | 2b32cd50d4c5069898fe5e0d4d94ab224bd0f4ce | /chat/settings.py | bc5e28ff78bad606609ec9348bfe07f56f53469e | [] | no_license | xSerioUsx78/django-react-chat | ef6ea418b2bc245049f8cc01a5b70d4ad9ed9019 | 31f9e70b62589c5e6cfe77936f1b4a3f8c8abf95 | refs/heads/main | 2023-07-15T07:02:17.737076 | 2021-08-30T17:19:44 | 2021-08-30T17:19:44 | 400,559,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | """
Django settings for chat project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import django_heroku
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['django-react-chat.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# THIRD PARTY APPS
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'channels',
'channels_redis',
'whitenoise',
# MY APPS
'main',
'users'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication'
]
}
SITE_ID = 1
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGINS = [
"http://django-react-chat.herokuapp.com"
]
ROOT_URLCONF = 'chat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ASGI_APPLICATION = 'chat.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'build', 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'users.User'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
django_heroku.settings(locals())
| [
"lomililmore@gmail.com"
] | lomililmore@gmail.com |
4f5804caf55956d84705b6be8729785adb0a9c12 | 98528563134f0dd2eb595cf2d6014f09362b7c25 | /Python/test.py | afbf8ba51808bd6e643457ca80c7a5d8c80e0d4d | [] | no_license | dwils098/Masters | ade8fc744d42d87323ddd3806e6faf3f85903456 | 6bc3e7cdce7f4ce2269f9e31655d80c24b3efde9 | refs/heads/master | 2020-04-11T03:24:10.257739 | 2015-06-11T21:46:31 | 2015-06-11T21:46:31 | 23,398,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from networkInterface import NetworkInterface
import sys
from twisted.python import log
log.startLogging(sys.stdout)
x = NetworkInterface()
fromPort = int(sys.argv[1])
toPort = int(sys.argv[2])
#action = raw_input("What to do [g]et K or [s]et K V :")
#command = action.split()
#print "command received: ", command
if sys.argv[3] == "g":
x.connect(fromPort,toPort).addCallback(x.get,"key1")
elif sys.argv[3] == "s":
x.connect(fromPort,toPort).addCallback(x.set,"key1", sys.argv[4])
from twisted.internet import reactor
reactor.run()
| [
"danywilson@TechnoCORE.local"
] | danywilson@TechnoCORE.local |
a255df5ad0169ad0db4b8be414d01d51201685f4 | b526e941382b32c1478641eeacf717ddca7688d9 | /poretools/combine.py | 830d097607aca20d8872d408854e5e5038c1ed78 | [] | no_license | monkollek/poretools | 677f1dff269ccbccb184161ccb8a33df28bbe12f | 5569170c0e210f20733974b886c458b0a4409063 | refs/heads/master | 2021-01-18T06:53:39.543962 | 2014-07-17T17:11:29 | 2014-07-17T17:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import tarfile
import sys
import Fast5File
def run(parser, args):
if args.tar_filename.endswith('.tar'):
tar = tarfile.open(args.tar_filename, mode='w')
elif args.tar_filename.endswith('.gz'):
tar = tarfile.open(args.tar_filename, mode='w:gz')
elif args.tar_filename.endswith('.bz2'):
tar = tarfile.open(args.tar_filename, mode='w:bz2')
else:
sys.stderr.write("Unrecognized FAST5 archive extension. Exiting.\n")
sys.exit()
file_count = 0
for fast5 in Fast5File.Fast5FileSet(args.files):
tar.add(fast5.filename)
fast5.close()
file_count += 1
tar.close()
sys.stderr.write("%s successfully created from %d FAST5 files.\n" % \
(args.tar_filename, file_count))
| [
"arq5x@virginia.edu"
] | arq5x@virginia.edu |
5a0f5cae22a02982c592fad023637ceb83f133fe | af9d9043a83a751f00f7b805533d87ccce330d21 | /Portfolio/Kauri.One/main.py | 4e8d91b7cd7be43c8a4d419d97bfc461181aeb85 | [] | no_license | HeCToR74/Python | e664b79593a92daa7d39d402f789812dfc59c19f | f448ec0453818d55c5c9d30aaa4f19e1d7ca5867 | refs/heads/master | 2023-03-08T13:44:19.961694 | 2022-07-03T19:23:25 | 2022-07-03T19:23:25 | 182,556,680 | 1 | 1 | null | 2023-02-28T15:30:01 | 2019-04-21T16:26:48 | HTML | UTF-8 | Python | false | false | 191 | py | from urllib import response
import requests
def get_data(url):
response = requests.get(url)
try:
return response.json()
except ConnectionError as e:
return e
| [
"v.nesterenko@chnu.edu.ua"
] | v.nesterenko@chnu.edu.ua |
2672067dada9f2117c47ed43dbb235c7e2edcc10 | 5c8290870235f060bd550a06210ff6a658b14d47 | /howfarcanigo/main.py | 76bdfb869ff92d61c7fa3ac5b1204e5df7a4505c | [
"MIT"
] | permissive | SebStrug/HowFarCanIGo | 67703341b2c636de33dcffc2680f5d3961516acd | db00c342a60000cd8b536d3aecbce99d5d726e57 | refs/heads/master | 2021-06-03T06:06:01.560120 | 2019-09-02T19:12:42 | 2019-09-02T19:12:42 | 152,883,996 | 13 | 0 | MIT | 2021-06-02T00:23:23 | 2018-10-13T15:09:10 | Python | UTF-8 | Python | false | false | 2,084 | py | # -*- coding: utf-8 -*-
import pickle
import googlemaps
import seaborn as sns; sns.set()
from mapping import configure, generate, transform, draw
## Add home marker to map, and maybe tube stops, play with having more layers etc.
## Options to pickle those hull arrays since they take so long to generate
## Options to draw map or just generate points
## Test with many, many points
if __name__ == '__main__':
# Import configuration file
API_key, origin_string, origin_coords, \
travel_mode, map_type, global_coords, \
N, cutoff_mins = configure.read_config()
print(origin_string, origin_coords['origin_lat'], origin_coords['origin_lng'])
print(travel_mode, map_type)
print(global_coords)
print(N, cutoff_mins)
# Set up client key
gmaps = googlemaps.Client(key=API_key)
# Define what we will call the data
data_name = '{}_{}map_N{}'.format(travel_mode, map_type, N)
# Import data if available for specifications
lats, lngs, travel_times = configure.import_data(data_name)
if not travel_times.any():
# If data does not exist, generate points to travel to
dest_lats, dest_lngs = generate.generate_points(\
map_type, N, \
origin_coords, global_coords)
lats, lngs, travel_times = generate.retrieve_travel_times(\
dest_lats, dest_lngs, \
API_key, travel_mode, \
**origin_coords)
# Save data to save future API calls
pickle.dump([lats, lngs, travel_times], \
open('data/coords/{}.p'.format(data_name), 'wb'))
# Transform data into concave hull arrays
grouped_coords = transform.group_coords(lats, lngs, travel_times, cutoff_mins)
cutoff_hull_arrays = transform.generate_hull_arrays(grouped_coords, num_bins=4)
transform.describe_cutoffs(cutoff_mins, grouped_coords)
# Define a colormap. Could also use `draw.pick_random_cmap(len(cutoff_mins))`
cmap = sns.cubehelix_palette(8, dark=.2, light=.8, reverse=True, as_cmap=True)
map_object = draw.draw_folium_map(cutoff_hull_arrays, \
cutoff_mins, cmap, \
**origin_coords)
# Save map
map_object.save('data/{}.html'.format(data_name))
| [
"SebStrug@users.noreply.github.com"
] | SebStrug@users.noreply.github.com |
ffbc82cc21c15662985e218c64b0c17c758ae9a2 | 0c126654013d3995b1258f512246ca33000227e3 | /mysite/settings.py | e40bab74de5582a693bb58803746f5768ca620ec | [] | no_license | sandradizdarevic2017/watsonandsecondblog | 45e7a6b33e8132670818c39006532b6cc81df899 | ecbecbe2106391163129d0e18cffcf31b9a884be | refs/heads/master | 2021-07-14T17:59:35.068290 | 2017-10-20T02:39:37 | 2017-10-20T02:39:37 | 107,623,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_xq43$pcxusw^(cm-^_s#ei4vh(dq=-9r*667j!i2-9(eqs8(8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'nickdoen2020.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"sdizdarevic@unomaha.edu"
] | sdizdarevic@unomaha.edu |
97021e424312284713ee92fee8b3cdf2a7b47b1b | f66e88aa7cf3719801a8691aec944457efd52fee | /my_portforlio/settings.py | 4790b18c4846a2cf54185be20d2b16c9c9ffd41d | [] | no_license | vominhtri1991/my_blogv3 | 05138f269a58d432ef5a5e14968331f35610004b | 656f6394f6c7fe3fd35d2a66cc252d0d96fc682f | refs/heads/master | 2022-07-19T01:24:45.634282 | 2020-05-23T12:01:55 | 2020-05-23T12:01:55 | 266,325,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | """
Django settings for my_portforlio project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3i2-lk!s55af%fyvx8oe2u8uwjk2kthrgy1b8312p4wd_dd5*^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_blog',
'ckeditor',
'ckeditor_uploader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_portforlio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_portforlio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'dbnew.sqlite3'),
#'ENGINE': 'django.db.backends.mysql',
#'NAME': 'myblog',
#'USER': 'myblog',
#'PASSWORD': "Myblog@999",
#'HOST': "192.168.9.99",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
#TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Ho_Chi_Minh'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
SITE_ID = 1
####################################
## CKEDITOR CONFIGURATION ##
####################################
CKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.4/jquery.min.js'
CKEDITOR_UPLOAD_PATH = ''
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
},
}
| [
"vominhtri1991@gmail.com"
] | vominhtri1991@gmail.com |
051f937f0a54493419ede9bea95fb0aca6c70e1c | e54c24b053f1c8f49454808ca56f46b638fce445 | /augmented_data.py | 676a06c89d20a86b1d248a993e650164dc7c32ee | [] | no_license | mehuizuizai/sequenceRecognization | 229186d51a516374f3861bd7f777184b226ff6d6 | c506ea04382740fbefe9d06d08429132499f01e0 | refs/heads/master | 2020-04-12T13:43:20.819535 | 2018-12-20T03:12:11 | 2018-12-20T03:12:11 | 162,529,936 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,541 | py | from PIL import Image, ImageEnhance, ImageOps, ImageFile
import numpy as np
import random
import threading, os, time
# RANGE_DIR = [0,1,2,3,4,5,6,7,8,9,"K","L","R","S","U","X","Y"]
RANGE_DIR = [0,1,2,3,4,5,6,7,8,9,"A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"]
class DataAugmentation:
def randomColor(image):
"""
对图像进行颜色抖动
:param image: PIL的图像image
:return: 有颜色色差的图像image
"""
random_factor = np.random.randint(0, 31) / 10. # 随机因子
color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度
random_factor = np.random.randint(10, 21) / 10. # 随机因子
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度
random_factor = np.random.randint(10, 21) / 10. # 随机因1子
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度
random_factor = np.random.randint(0, 31) / 10. # 随机因子
return ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度
def randomGaussian(image, mean=0.2, sigma=0.3):
"""
对图像进行高斯噪声处理
:param image:
:return:
"""
def gaussianNoisy(im, mean=0.2, sigma=0.3):
"""
对图像做高斯噪音处理
:param im: 单通道图像
:param mean: 偏移量
:param sigma: 标准差
:return:
"""
for _i in range(len(im)):
im[_i] += random.gauss(mean, sigma)
return im
# 将图像转化成数组
img = np.asarray(image)
img.flags.writeable = True # 将数组改为读写模式
width, height = img.shape[:2]
img_r = gaussianNoisy(img[:, :, 0].flatten(), mean, sigma)
img_g = gaussianNoisy(img[:, :, 1].flatten(), mean, sigma)
img_b = gaussianNoisy(img[:, :, 2].flatten(), mean, sigma)
img[:, :, 0] = img_r.reshape([width, height])
img[:, :, 1] = img_g.reshape([width, height])
img[:, :, 2] = img_b.reshape([width, height])
return Image.fromarray(np.uint8(img))
@staticmethod
def saveImage(image, path):
image.save(path)
if __name__ == '__main__':
for i in RANGE_DIR:
dir = 'E:/胎号所有/训练_图片大于50resize_28/%s/' % i
# dir_ran_color = 'E:/胎号所有/训练_图片大于50resize_28_ran_color/%s/' %i
dir_ran_Gaussion ='E:/胎号所有/训练_图片大于50resize_28_ran_Gassu/%s/' %i
try:
os.listdir(dir)
except Exception:
continue
# if not os.path.exists(dir_ran_color):
# os.makedirs(dir_ran_color)
if not os.path.exists(dir_ran_Gaussion):
os.makedirs(dir_ran_Gaussion)
for rt, dirs, files in os.walk(dir):
for filename in files:
split = filename.find('.')
filename1 = dir + filename
img = Image.open(filename1)
random_color = DataAugmentation.randomColor(img)
random_Gaussion = DataAugmentation.randomGaussian(img)
# DataAugmentation.saveImage(random_color,dir_ran_color+filename[:split]+"_clor.jpg")
DataAugmentation.saveImage(random_Gaussion,dir_ran_Gaussion+filename[:split]+"_gauss.jpg")
| [
"935815369@qq.com"
] | 935815369@qq.com |
d66d586e7b16e912053b19f171aa3d4e15a341f9 | 5f86944bdf1b810a84c63adc6ed01bbb48d2c59a | /kubernetes/client/models/v1beta1_stateful_set_spec.py | 0288ff6cba3d852e235cad1c93025464292b168a | [
"Apache-2.0"
] | permissive | m4ttshaw/client-python | 384c721ba57b7ccc824d5eca25834d0288b211e2 | 4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1 | refs/heads/master | 2021-01-13T06:05:51.564765 | 2017-06-21T08:31:03 | 2017-06-21T08:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,117 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1StatefulSetSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None, selector=None, service_name=None, template=None, volume_claim_templates=None):
"""
V1beta1StatefulSetSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int',
'selector': 'V1LabelSelector',
'service_name': 'str',
'template': 'V1PodTemplateSpec',
'volume_claim_templates': 'list[V1PersistentVolumeClaim]'
}
self.attribute_map = {
'replicas': 'replicas',
'selector': 'selector',
'service_name': 'serviceName',
'template': 'template',
'volume_claim_templates': 'volumeClaimTemplates'
}
self._replicas = replicas
self._selector = selector
self._service_name = service_name
self._template = template
self._volume_claim_templates = volume_claim_templates
@property
def replicas(self):
"""
Gets the replicas of this V1beta1StatefulSetSpec.
Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.
:return: The replicas of this V1beta1StatefulSetSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta1StatefulSetSpec.
Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.
:param replicas: The replicas of this V1beta1StatefulSetSpec.
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""
Gets the selector of this V1beta1StatefulSetSpec.
Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
:return: The selector of this V1beta1StatefulSetSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1beta1StatefulSetSpec.
Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
:param selector: The selector of this V1beta1StatefulSetSpec.
:type: V1LabelSelector
"""
self._selector = selector
@property
def service_name(self):
"""
Gets the service_name of this V1beta1StatefulSetSpec.
ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.
:return: The service_name of this V1beta1StatefulSetSpec.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""
Sets the service_name of this V1beta1StatefulSetSpec.
ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.
:param service_name: The service_name of this V1beta1StatefulSetSpec.
:type: str
"""
if service_name is None:
raise ValueError("Invalid value for `service_name`, must not be `None`")
self._service_name = service_name
@property
def template(self):
"""
Gets the template of this V1beta1StatefulSetSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.
:return: The template of this V1beta1StatefulSetSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this V1beta1StatefulSetSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.
:param template: The template of this V1beta1StatefulSetSpec.
:type: V1PodTemplateSpec
"""
if template is None:
raise ValueError("Invalid value for `template`, must not be `None`")
self._template = template
@property
def volume_claim_templates(self):
"""
Gets the volume_claim_templates of this V1beta1StatefulSetSpec.
VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.
:return: The volume_claim_templates of this V1beta1StatefulSetSpec.
:rtype: list[V1PersistentVolumeClaim]
"""
return self._volume_claim_templates
@volume_claim_templates.setter
def volume_claim_templates(self, volume_claim_templates):
"""
Sets the volume_claim_templates of this V1beta1StatefulSetSpec.
VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.
:param volume_claim_templates: The volume_claim_templates of this V1beta1StatefulSetSpec.
:type: list[V1PersistentVolumeClaim]
"""
self._volume_claim_templates = volume_claim_templates
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1StatefulSetSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
8b2d00297c0cf74e62326f84649b10c60b835545 | 917dc43e2202542d9dec882032d4a07622e247a2 | /airbnb/migrations/0007_booking.py | 3da0f1d938250cc7f858cbf3dafd33b26838466c | [] | no_license | bigbird10/comp9900 | 019d9febdafaf03beaa684f633648d0a51561644 | 329ef20f8d5197757b7b1d63e308b0766aa6ea47 | refs/heads/master | 2020-08-04T09:51:44.186623 | 2019-10-29T04:10:11 | 2019-10-29T04:10:11 | 212,096,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # Generated by Django 2.2.5 on 2019-10-28 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('airbnb', '0006_auto_20191018_1850'),
]
operations = [
migrations.CreateModel(
name='Booking',
fields=[
('booking_id', models.BigIntegerField(primary_key=True, serialize=False)),
('listing_id', models.BigIntegerField()),
('guest_id', models.BigIntegerField()),
('check_in', models.DateField()),
('check_out', models.DateField()),
('total_price', models.DecimalField(blank=True, decimal_places=1, max_digits=6, null=True)),
],
),
]
| [
"dzrlpp1134@gmail.com"
] | dzrlpp1134@gmail.com |
ef047e92c0307b434c5783a939529501e2119e6b | bf616736ea66c0ce3f36f0d75d9f2951c52b74d7 | /15. Statements/Statements_test.py | fb5c56659a60f266adab80d3a66f70daea4dbb72 | [
"MIT"
] | permissive | Pratham82/Python-Programming | 40a03e163bdc6985a337a8a9638f4eb77ae43ad9 | bbe5fd9132d5cf42ed9f29c3dd758cdc2c17760c | refs/heads/master | 2021-12-12T15:13:32.018356 | 2021-12-09T18:16:43 | 2021-12-09T18:16:43 | 230,051,536 | 3 | 2 | MIT | 2021-10-06T10:11:37 | 2019-12-25T06:13:04 | Python | UTF-8 | Python | false | false | 1,491 | py | # Use for, .split(), and if to create a Statement that will print out words that start with 's':
print("Challenge 1 : ")
st = 'Print only the words that start with s in this sentence'
for word in st.split():
if word[0]=='s' or word[0]=='S':
print(word)
# Use range() to print all the even numbers from 0 to 10.
l1= list(range(0,11,2))
print(l1)
for num in range(0,11,2):
print(num)
# Use a List Comprehension to create a list of all numbers between 1 and 50 that are divisible by 3.
print("Challenge 3 : ")
list1 =[i for i in range(1,51) if i%3==0]
print(list1)
# Go through the string below and if the length of a word is even print "even!"
st1 = 'Print every word in this sentence that has an even number of letters'
print("Challenge 4 : ")
for i in st1.split():
if len(i) %2==0:
print(f"{i}: even")
# Write a program that prints the integers from 1 to 100. But for multiples of three print "Fizz" instead of the number, and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz".
for n in range(1,101):
if n % 3==0 and n % 5== 0:
print("FizzBuzz")
elif n % 3 ==0:
print("Fizz")
elif n % 5 ==0:
print("Buzz")
else:
print(n)
# Use List Comprehension to create a list of the first letters of every word in the string below:
st2 = 'Create a list of the first letters of every word in this string'
list1 =[ i[0] for i in st2.split()]
print(list1) | [
"mali.prathamesh82@gmail.com"
] | mali.prathamesh82@gmail.com |
3d38eefec532924ce95ba3d71a604ad1daf3ac86 | 083e363ad724cdca84ab7adf1169df3e21bce37e | /Sentiment_Classifier/train/train_3.py | 05bb9c3bf83c9137bc34d55ab116fc040da95256 | [] | no_license | 621Alice/Oasis | c5284144373b411ea326e02c619198cfc9d65ad8 | 13d103400becc53ccb0d7f46cb542acfbc2ce9c2 | refs/heads/master | 2020-07-21T06:49:39.156283 | 2019-09-06T13:10:32 | 2019-09-06T13:10:32 | 206,772,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,012 | py | from Sentiment_Classifier.preprocessing.preprocessing_data_3labels import *
#build embedding layer
embedding_dim=200
embedding_matrix=build_embeddings(embedding_dim, word_index)
embedding_layer = Embedding(len(word_index) + 1,
embedding_dim,
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=False)
sequence_input=Input(shape=(max_seq_len,), dtype='int32')
embedded_sequences=embedding_layer(sequence_input)
lstm_1 = Bidirectional(LSTM(6,recurrent_dropout=0.0,return_sequences=True,dropout=0.15))(embedded_sequences)
conv_1 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(lstm_1)
conv_1 = Dropout(0.15)(conv_1)
merge_1=Concatenate(axis=1)([ conv_1,lstm_1])
lstm_2= Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(merge_1)
conv_2 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(lstm_2)
conv_2 = Dropout(0.15)(conv_2)
merge_2=Concatenate(axis=1)([ conv_1,lstm_1,conv_2,lstm_2])
lstm_3= Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(merge_2)
conv_3 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(lstm_3)
conv_3 = Dropout(0.15)(conv_3)
merge_3=Concatenate(axis=1)([ conv_1,lstm_1,conv_2,lstm_2,conv_3,lstm_3])
lstm_4= Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(merge_3)
conv_4 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(lstm_4)
conv_4 = Dropout(0.15)(conv_3)
merge_4=Concatenate(axis=1)([ conv_1,lstm_1,conv_2,lstm_2,conv_3,lstm_3,conv_4,lstm_4])
conv_5 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(embedded_sequences)
conv_5 = Dropout(0.15)(conv_5)
lstm_5 = Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(conv_5)
merge_5=Concatenate(axis=1)([lstm_5,conv_5])
conv_6 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(merge_5)
conv_6 = Dropout(0.15)(conv_6)
lstm_6 = Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(conv_6)
merge_6=Concatenate(axis=1)([lstm_5,conv_5,lstm_6,conv_6])
conv_7 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(merge_6)
conv_7 = Dropout(0.15)(conv_7)
lstm_7 = Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(conv_7)
merge_7=Concatenate(axis=1)([lstm_5,conv_5,lstm_6,conv_6,lstm_7,conv_7])
conv_8 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(merge_7)
conv_8 = Dropout(0.15)(conv_8)
lstm_8 = Bidirectional(LSTM(6,dropout=0.15,recurrent_dropout=0.0,return_sequences=True))(conv_8)
merge_8=Concatenate(axis=1)([lstm_5,conv_5,lstm_6,conv_6,lstm_7,conv_7,lstm_8,conv_8])
lstm_9 = Bidirectional(LSTM(6,dropout=0.05,recurrent_dropout=0.0,return_sequences=True))(embedded_sequences)
lstm_10 = Bidirectional(LSTM(6,dropout=0.05,recurrent_dropout=0.0,return_sequences=True))(lstm_9)
lstm_11 = Bidirectional(LSTM(6,dropout=0.05,recurrent_dropout=0.0,return_sequences=True))(lstm_10)
lstm_12 = Bidirectional(LSTM(6,dropout=0.05,recurrent_dropout=0.0,return_sequences=True))(lstm_11)
conv_9 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(embedded_sequences)
conv_9 = MaxPooling1D(2)(conv_9)
conv_9 = Dropout(0.05)(conv_9)
conv_10 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(conv_9)
conv_10 = MaxPooling1D(2)(conv_10)
conv_10 = Dropout(0.05)(conv_10)
conv_11 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(conv_10)
conv_11 = MaxPooling1D(2)(conv_11)
conv_11 = Dropout(0.05)(conv_11)
conv_12 = Conv1D(filters=12,kernel_size=2,activation='relu',kernel_regularizer=regularizers.l2(0.0001))(conv_10)
conv_12 = MaxPooling1D(2)(conv_12)
conv_12 = Dropout(0.05)(conv_12)
merge=Concatenate(axis=1)([merge_4,merge_8,lstm_11,conv_11])
pool= MaxPooling1D(4)(merge)
drop= Dropout(0.4)(pool)
flat = Flatten()(drop)
dense = Dense(24, activation='relu')(flat)
preds = Dense(3, activation='softmax')(dense)
model = Model(sequence_input, preds)
adadelta = optimizers.Adadelta(lr=1.0, epsilon=None, decay=0.000)
model_checkpoints = callbacks.ModelCheckpoint(p+"/model/checkpoint-3labels-{val_loss:.3f}.h5", verbose=0,period=0,monitor='val_loss', save_best_only=True, save_weights_only=False, mode='auto')
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=adadelta,
metrics=['acc'])
print("Training Progress:")
model_log = model.fit(train_features_3,train_labels_3, validation_data=(val_features_3,val_labels_3),
epochs=30, batch_size=200,
callbacks=[model_checkpoints])
| [
"15251608@life.hkbu.edu.hk"
] | 15251608@life.hkbu.edu.hk |
adcca0b3d13ae93460b0d7f372d36e4665102e4c | fc5c9741ff05a816d660502b388be198f1298aeb | /broker_json/conversions.py | e781b4aff25da2653fe68a5b1d36a05fda316886 | [
"BSD-2-Clause"
] | permissive | grigorescu/broker-to-json | 85fefcfa8a5c32dbc437089f7a57c9bf830d65a1 | ed4caccffd2b7ea74e38c22b0e28ff2e840a4c47 | refs/heads/main | 2023-06-12T01:54:18.067990 | 2021-07-02T02:41:45 | 2021-07-02T02:41:45 | 381,872,737 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | # These utilities need Broker bindings.
from . import find_broker
from .utils import get_index_types, get_record_types, get_yield_type
import datetime
import ipaddress
import json
import broker
# Broker returns native objects for Port. This will just give a string.
def fix_ports(val):
if isinstance(val, broker._broker.Port) or isinstance(val, str):
return str(val)
try:
is_tuple = isinstance(val, tuple)
# tuples are immutable
if is_tuple:
val = list(val)
for i in range(len(val)):
val[i] = fix_ports(val[i])
if is_tuple:
val = tuple(val)
except TypeError:
pass
return val
def to_json(val):
"""Convert broker types to JSON."""
if val is None:
return val
if (
isinstance(val, bool)
or isinstance(val, str)
or isinstance(val, float)
or isinstance(val, int)
or isinstance(val, bytes)
):
return val
elif isinstance(val, datetime.timedelta):
return float(val.total_seconds())
elif isinstance(val, datetime.datetime):
return float(val.timestamp())
elif isinstance(val, ipaddress.IPv4Address) or isinstance(
val, ipaddress.IPv6Address
):
return val.compressed.lower()
elif isinstance(val, ipaddress.IPv4Network) or isinstance(
val, ipaddress.IPv6Network
):
return val.compressed.lower()
elif isinstance(val, broker.Count):
return int(str(val))
elif isinstance(val, broker.Enum) or isinstance(val, broker.Port):
return str(val)
elif isinstance(val, set):
return [to_json(x) for x in val]
elif isinstance(val, tuple):
return [to_json(x) for x in val]
elif isinstance(val, dict):
data = {}
for k, v in val.items():
tmp_k = to_json(k)
if isinstance(tmp_k, list):
tmp_k = json.dumps(tmp_k)
data[tmp_k] = to_json(v)
return data
else:
raise ValueError("Unknown type", str(type(val)))
def from_json(val, type_name):
"""Convert JSON types to broker."""
if val is None:
v = val
# Native types
elif type_name in ["bool", "int", "double", "string"]:
v = val
# Wrapper types
elif type_name == "count":
v = broker.Count(val)
elif type_name == "enum":
v = broker.Enum(val)
# Network types
elif type_name == "addr":
v = ipaddress.ip_address(val)
elif type_name == "subnet":
v = ipaddress.ip_network(val)
elif type_name == "port":
num, proto = val.split("/", 1)
num = int(num)
proto = proto.upper()
if proto == "TCP":
proto = broker.Port.Protocol.TCP
elif proto == "UDP":
proto = broker.Port.Protocol.UDP
elif proto == "ICMP":
proto = broker.Port.Protocol.ICMP
else:
proto = broker.Port.Protocol.Unknown
v = broker.Port(num, proto)
# Time types
elif type_name == "interval":
v = broker.Timespan(float(val))
elif type_name == "time":
v = broker.Timestamp(float(val))
# Composite types
elif type_name.startswith("set["):
inner_type_name = type_name.split("set[", 1)[1]
inner_type_name = inner_type_name[:-1]
data = set([from_json(x, inner_type_name) for x in val])
v = broker.Data(data)
elif type_name.startswith("vector of "):
inner_type_name = type_name[10:]
data = tuple([from_json(x, inner_type_name) for x in val])
v = broker.Data(data)
elif type_name.startswith("table["):
index_types = get_index_types(type_name)
yield_type = get_yield_type(type_name)
data = {}
for k, v in val.items():
if len(index_types) > 1:
index = ()
k = json.loads(k)
for i in range(len(index_types)):
index = index + tuple([from_json(k[i], index_types[i])])
else:
index = from_json(k, index_types[0])
data[index] = from_json(v, yield_type)
return broker.Data(data)
elif type_name.startswith("record {"):
types = get_record_types(type_name)
data = []
for i in range(len(types)):
field_type = types[i]["field_type"]
if len(val) > i:
data.append(from_json(val[i], field_type))
else:
data.append(from_json(None, field_type))
return broker.Data(data)
elif type_name == "pattern":
return broker.Data(val)
else:
raise NotImplementedError("Converting type", type_name)
return v
| [
"vlad@es.net"
] | vlad@es.net |
5dc8031a372b2a037a37f7988cd5b299ae4e40cd | efc010c7b1e5d4ad0bb377335e74fbffb453c9c9 | /images_import.py | 6b7223f32f42fb822be689eff3826d2b6a22130c | [] | no_license | Michelle-lele/project1 | 43a4e96792cfea4d575947aded5391c1793f6d77 | 5a685fcd849068847ffe57dbd72f86c30114970d | refs/heads/master | 2020-05-06T20:08:35.350697 | 2019-06-20T18:21:52 | 2019-06-20T18:21:52 | 180,223,415 | 1 | 0 | null | 2019-06-09T07:54:31 | 2019-04-08T19:58:45 | HTML | UTF-8 | Python | false | false | 1,381 | py | #!/usr/bin/env python3
import os
import sys
import requests
import xml.etree.ElementTree as ET
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
key = os.getenv("GOODREADS_API_KEY")
# Get all existing book isbns from database that don't have an image
NoCoverBooks= db.execute("SELECT isbn from books WHERE cover_img IS NULL").fetchall()
#print(NoCoverBooks, file=sys.stderr)
# call GoodReads API for each isbn
for isbn in NoCoverBooks:
print(f"ISBN: {isbn[0]}", file=sys.stderr)
GetBookbyIsbn = requests.get("https://www.goodreads.com/search/index.xml?key=" + key + "&q=" + isbn[0])
if GetBookbyIsbn.status_code == 200:
root = ET.fromstring(GetBookbyIsbn.text)
for search in root.findall('search'):
for results in search.findall('results'):
for works in results.findall('work'):
for best_book in works.findall("best_book"):
for image_url in best_book.findall("image_url"):
cover_img = image_url.text
#TODO skip the GoodReads placeholder image
NewBookCoverImage = db.execute("UPDATE books SET cover_img= :cover_img WHERE isbn= :isbn",
{"cover_img": cover_img, "isbn": isbn[0]})
db.commit()
else:
print("Not Sucessfull", file=sys.stderr)
print("--END--", file=sys.stderr) | [
"fia_m@abv.bg"
] | fia_m@abv.bg |
2b2dce53205515424c5bb11c71552d4553094d37 | 3a298c93b67386392d3dee243671f2c101decf01 | /leetcode/learn-cards/array-101/12_move_zeros.py | b550db407a56d2417d7e7300073945f2bd13d3af | [] | no_license | Zahidsqldba07/coding-problems-2 | ffbc8408e4408fc846c828af2ec50a9d72e799bc | 020bffbd14ca9993f1e678181ee7df761f1533de | refs/heads/master | 2023-06-26T11:05:34.089697 | 2021-07-21T15:16:10 | 2021-07-21T15:16:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | class Solution:
def moveZeroes(self, nums):
"""
Do not return anything, modify nums in-place instead.
"""
z = 0
for i in range(len(nums)):
if nums[i] != 0:
nums[i], nums[z] = nums[z], nums[i]
z += 1 | [
"alvee.akand@outlook.com"
] | alvee.akand@outlook.com |
070f9494314e7d8a7ce8283fe45bb2b13ae5e7d8 | 9f9f4280a02f451776ea08365a3f119448025c25 | /plans/hsppw/lcut_hsp-s_070_pwde_mlpc_hs.py | b7b1a1ccf1a62ce508fefc6b8b40da3238c1b831 | [
"BSD-2-Clause"
] | permissive | dbis-uibk/hit-prediction-code | 6b7effb2313d2499f49b2b14dd95ae7545299291 | c95be2cdedfcd5d5c27d0186f4c801d9be475389 | refs/heads/master | 2023-02-04T16:07:24.118915 | 2022-09-22T12:49:50 | 2022-09-22T12:49:50 | 226,829,436 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | """Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import CutLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 70
dataloader = ClassLoaderWrapper(
wrapped_loader=CutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-s_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=MLPClassifier(
hidden_layer_sizes=(256, 128, 128, 128, 64),
verbose=True,
),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='delta',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
| [
"mikevo-uibk@famv.net"
] | mikevo-uibk@famv.net |
830af3d97141cb781619f79262939f3fe8ecfff4 | 2b4f2ab43f8ae353f82e1add9fe2c24df2f51b60 | /venv/Scripts/django-admin.py | 8a76bfb66a0c242037fc2a08c2cfe368b3b3d9af | [] | no_license | ermilovim/NewDjango | f56c74efd7bf0f8a22f53a2305415578d6f67b96 | b820bfc311be23ae9e7ce1a9e0c9b24fb84ad57b | refs/heads/master | 2021-06-03T03:36:41.611517 | 2020-09-17T17:23:22 | 2020-09-17T17:23:22 | 135,053,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!E:\python\NewDjango\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"ermilovim@gmail.com"
] | ermilovim@gmail.com |
af6a681e608dc1a43decb5ac526cc86dfbccaea1 | 5fcddf2a68ad78f8cd66af363d49ead2a3b66919 | /cscs-checks/libraries/hpx/hpx_hello_world.py | 81c3cd6675b65cc82ae7818712bfc7ec8ef99702 | [
"BSD-3-Clause"
] | permissive | GiuseppeLoRe/reframe | e6c5a780d414ad34b8e1982c0e02fad642097b72 | a1e5aec54dd29925af96e4bb7095f47ea9547c5a | refs/heads/master | 2020-07-17T14:25:06.893593 | 2019-09-02T16:33:32 | 2019-09-02T16:33:32 | 192,341,923 | 0 | 0 | BSD-3-Clause | 2019-07-03T19:44:03 | 2019-06-17T12:22:52 | Python | UTF-8 | Python | false | false | 2,325 | py | import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class HelloWorldHPXCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
super().__init__()
self.descr = 'HPX hello, world check'
self.valid_systems = ['daint:gpu, daint:mc', 'dom:gpu', 'dom:mc']
self.valid_prog_environs = ['PrgEnv-gnu']
self.modules = ['HPX']
self.executable = 'hello_world'
self.sourcesdir = None
self.use_multithreading = None
self.tags = {'production'}
self.maintainers = ['VH', 'JG']
def setup(self, partition, environ, **job_opts):
hellos = sn.findall(r'hello world from OS-thread \s*(?P<tid>\d+) on '
r'locality (?P<lid>\d+)', self.stdout)
if partition.fullname == 'daint:gpu':
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_cpus_per_task = 12
elif partition.fullname == 'daint:mc':
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_cpus_per_task = 36
elif partition.fullname == 'dom:gpu':
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_cpus_per_task = 12
elif partition.fullname == 'dom:mc':
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_cpus_per_task = 36
self.executable_opts = ['--hpx:threads=%s' % self.num_cpus_per_task]
# https://stellar-group.github.io/hpx/docs/sphinx/branches/master/html/terminology.html#term-locality
num_localities = self.num_tasks // self.num_tasks_per_node
assert_num_tasks = sn.assert_eq(sn.count(hellos),
self.num_tasks*self.num_cpus_per_task)
assert_threads = sn.map(lambda x: sn.assert_lt(int(x.group('tid')),
self.num_cpus_per_task), hellos)
assert_localities = sn.map(lambda x: sn.assert_lt(int(x.group('lid')),
num_localities), hellos)
self.sanity_patterns = sn.all(sn.chain([assert_num_tasks],
assert_threads,
assert_localities))
super().setup(partition, environ, **job_opts)
| [
"victorusu@gmail.com"
] | victorusu@gmail.com |
7c1a811e18ee1784ac9af8787907e23d53390186 | 3e8bba1f256e9dd30c7b0609cab2356c38289396 | /pr2_robot/scripts/project_impl_scripts/project_impl_script.py | b0c5f1b71cbdff8938bc2c71345785b94e2586ce | [] | no_license | priteshgudge/pick_place_3dperception | 1f32e3616f2472ac11c70579bd4be2ee0a5d7383 | bb6cdfb15ed306fff431e334ecc4aa2de105970d | refs/heads/master | 2021-01-02T08:25:20.746347 | 2017-09-11T21:26:03 | 2017-09-11T21:26:03 | 99,007,979 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,240 | py | #!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
# Statistical Outlier Filtering
outlier_filter = cloud.make_statistical_outlier_filter()
outlier_filter.set_mean_k(50) #30
outlier_filter.set_std_dev_mul_thresh(0.5) # 0.3
cloud_filtered = outlier_filter.filter()
#########################################################################
# TODO: Voxel Grid Downsampling
vox = cloud.make_voxel_grid_filter()
# TODO: PassThrough Filter
LEAF_SIZE = 0.005 # 0.005
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
##############################################################################
#TWO passthrough filters one over Z and one over X
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.3 # 0.65
axis_max = 5.0 # 1.35
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'x' # y
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.34 # -0.55
axis_max = 1.0 # + 0.55
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
##############################################################################
# RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.015
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
##################################################################################
# Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(extracted_outliers)
kd_tree = white_cloud.make_kdtree()
#Created a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
#SetTolerances
ec.set_ClusterTolerance(0.01) # 0.015
ec.set_MinClusterSize(50) # 100
ec.set_MaxClusterSize(15000) # 5000
#Search the k-d tree for clusters
ec.set_SearchMethod(kd_tree)
#Extract indices for each discovered clusters
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a cloror corresponding to each segmented object
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([
white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])
])
#CreateNew Cloud Contaning all clusters, with unique colors
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
table_pcl_msg = pcl_to_ros(extracted_inliers)
objects_pcl_msg = pcl_to_ros(extracted_outliers)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
pcl_objects_pub.publish(objects_pcl_msg)
pcl_table_pub.publish(table_pcl_msg)
pcl_clusters_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects_list = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = extracted_outliers.extract(pts_list)
ros_cluster = pcl_to_ros(pcl_cluster)
# Compute the associated feature vector
chists = compute_color_histograms(ros_cluster, using_hsv=True)
normals = get_normals(ros_cluster)
nhists = compute_color_histograms(normals)
feature = np.concatenate((chists,nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += 0.25
object_markers_pub.publish(make_label(label, label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects_list.append(do)
# Publish the list of detected objects
#This is the output for the upcoming project
detected_objects_pub.publish(detected_objects_list)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
try:
pr2_mover(detected_objects_list)
except rospy.ROSInterruptException:
pass
def reset_pose_position(pose):
pose.position.x = 0
pose.position.y = 0
pose.position.z = 0
return pose
def reset_pose_orientation(pose):
pose.orientation.x = 0
pose.orientation.y = 0
pose.orientation.z = 0
pose.orientation.w = 0
return pose
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
TEST_SCENE_NUM = std_msgs.msg.Int32()
TEST_SCENE_NUM.data = 1
OBJECT_NAME = std_msgs.msg.String()
WHICH_ARM = std_msgs.msg.String() # green = right, red = left
PICK_POSE = geometry_msgs.msg.Pose()
PLACE_POSE = geometry_msgs.msg.Pose()
dict_list = []
centroids = []
counter = 0
output_yaml = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
dropbox_param = rospy.get_param('/dropbox')
rospy.loginfo('Starting pr2_mover with {} objects'.format(len(object_list_param)))
# TODO: Parse parameters into individual variables
dict_dropbox = {}
for param in dropbox_param:
dict_dropbox[param['name']] = param['position']
print "Object List Len", len(object_list)
print "Dict Dropbox",dict_dropbox
print "Object Param List", len(object_list_param)
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for obj in object_list_param:
print "Object Name:", obj['name']
OBJECT_NAME.data = obj['name']
WHICH_ARM.data = ''
reset_pose_position(PICK_POSE)
reset_pose_orientation(PICK_POSE)
reset_pose_position(PLACE_POSE)
reset_pose_orientation(PLACE_POSE)
# TODO: Get the PointCloud for a given object and obtain it's centroid
for detected in object_list:
if OBJECT_NAME.data == detected.label:
print "Detected Label:",detected.label
points_arr = ros_to_pcl(detected.cloud).to_array()
pick_pose_centroids = np.mean(points_arr, axis=0)[:3]
# TODO: Create 'place_pose' for the object
PICK_POSE.position.x = np.asscalar(pick_pose_centroids[0])
PICK_POSE.position.y = np.asscalar(pick_pose_centroids[1])
PICK_POSE.position.z = np.asscalar(pick_pose_centroids[2])
#break
# TODO: Assign the arm to be used for pick_place
if obj['group'] == 'red':
WHICH_ARM.data = 'left'
else:
WHICH_ARM.data = 'right'
PLACE_POSE.position.x = dict_dropbox[WHICH_ARM.data][0]
PLACE_POSE.position.y = dict_dropbox[WHICH_ARM.data][1]
PLACE_POSE.position.z = dict_dropbox[WHICH_ARM.data][2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(TEST_SCENE_NUM, WHICH_ARM, OBJECT_NAME, PICK_POSE, PLACE_POSE)
output_yaml.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
#try:
# pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# TODO: Insert your message variables to be sent as a service request
# resp = pick_place_routine(TEST_SCENE_NUM, OBJECT_NAME, WHICH_ARM, PICK_POSE, PLACE_POSE)
# print ("Response: ",resp.success)
#except rospy.ServiceException, e:
# print "Service call failed: %s"%e
#else:
# rospy.loginfo('Cant find object: {}'.format(object_list_param[counter]['name']))
# TODO: Output your request parameters into output yaml file
send_to_yaml("output_"+ str(TEST_SCENE_NUM.data) + ".yaml", output_yaml)
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=False)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber('/pr2/world/points', pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_clusters_pub = rospy.Publisher("/pcl_clusters", PointCloud2, queue_size=1)
# TODO: Load Model From disk
model = pickle.load(open('model.sav','rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| [
"priteshgudge@gmail.com"
] | priteshgudge@gmail.com |
8448482c0c96ec4904f8f99a504792bd67ba61c2 | 4c5329f63dbe10aec9b0e992fab0170616f0250a | /pyth.py | 85bdfea6476b79fbffbdfa1f7ef1a503d3d3cfb8 | [] | no_license | faarhann/guessingGame | 1fd3ac6d8b6a309a302efc64121a51089ef7263f | 28be449fe9460eaf816a6bcfccf7312bf8bd6d1a | refs/heads/master | 2020-03-28T14:49:54.028567 | 2018-09-12T18:54:06 | 2018-09-12T18:54:06 | 148,526,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import random
answer = random.randint(1, 10)
print("Please guess a number between 1-10: ")
guess = int(input())
numberOfGuesses = 0
while guess != answer:
numberOfGuesses+=1
if guess > answer:
print("You guessed too high and answer was {0}".format(answer))
answer = random.randint(1, 10)
print("Please guess a number between 1-10: ")
guess = int(input())
elif guess < answer:
print("You guessed too low answer was {}".format(answer))
answer = random.randint(1, 10)
"Please guess a number between 1-10: "
guess = int(input())
elif guess == answer:
break
print("You guessed it right answer was {} and number of guesses made was {}".format(answer, numberOfGuesses))
| [
"farhanmohamed@hotmail.se"
] | farhanmohamed@hotmail.se |
bd3f03426be8ceee351ecdae9121ec268fe032c3 | b3bf96d14da09fc4c25074c3d7a8e61dd859688d | /mysales/wsgi.py | 61dcada3f0824c6502da07c7ac3bc80eafb4c1c5 | [] | no_license | mcjyang/Django-Website | afd4734dbda4a650346231c3c933fe7ddf5ffbc1 | 4019114aa69fae67535aa378593d3804860a0c41 | refs/heads/master | 2021-01-20T09:54:11.900355 | 2017-05-06T02:42:36 | 2017-05-06T02:42:36 | 90,296,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for mysales project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysales.settings")
application = get_wsgi_application()
| [
"meng-chieh.yang@stonybrook.edu"
] | meng-chieh.yang@stonybrook.edu |
2148bdcf3f156fb3c3f541320903dd6e66cefb51 | c01a08d60003cc8dfa347c7a982a358ba8e837b1 | /data2.py | 38c3bc094e942509c837d524625da2131f4f5841 | [] | no_license | LouisG99/honda-mobility-hacks | 7a0a072d9990744000bab92ab3c8a1a904c71a31 | 5c449d6ab165a51110c34d0e23b81c5f356e1266 | refs/heads/master | 2020-04-17T10:21:07.005983 | 2019-01-20T21:26:12 | 2019-01-20T21:26:12 | 166,498,050 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | import boto3
import botocore
# Define the S3 Bucket Name
BUCKET_NAME = 'p3na-18gus.3101.027'
# Path within the S3 bucket to folder we desire
PATH = 'video-files/'
SAVE_PATH = 'downloaded-files/'
# File name we wish to download
file_name = 'Recfile P3 Edge 20181121 082855 Webcam Logitech Forward Outputiplimage.m4v'
KEY = PATH + file_name
# Establish the AWS client connection using access keys.
# Select the correct AWS resource
s3 = boto3.resource('s3',
aws_access_key_id='AKIAJJKVLCJ47OTT7FYQ',
aws_secret_access_key='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',
region_name='us-east-1'
)
# Download file from S3 bucket, and store at local location 'file_name'.
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, SAVE_PATH + file_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise | [
"utlathia@umich.edu"
] | utlathia@umich.edu |
fc37ca62494fdb9c5e7ab802b5563d6df36faff3 | b32afacd7de62e053bf823fc27d0cc57ee07c55a | /testsSDW/game_object_tests.py | 1101b4de45389a556b1320082cb95cd3824b5a90 | [
"MIT"
] | permissive | jomyhuang/sdwle | 508464c990f01b189029dfcc8e65b6617279ea4c | 9b6e916567e09c7cba4a171fe0adf0f47009a8c3 | refs/heads/master | 2021-01-20T20:36:26.327004 | 2016-08-13T13:48:31 | 2016-08-13T13:48:31 | 62,549,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,469 | py | import random
import unittest
from SDWLE.agents.basic_agents import DoNothingAgent, PredictableAgent
from SDWLE.cards.base import SecretCard
from SDWLE.cards.heroes import Malfurion, Jaina
from SDWLE.cards.minions.rogue import AnubarAmbusher
from SDWLE.engine import Game, Deck, card_lookup
from testsSDW.agents.testing_agents import CardTestingAgent, OneCardPlayingAgent, PlayAndAttackAgent
from testsSDW.testing_utils import generate_game_for, mock
from SDWLE.cards import StonetuskBoar, ArcaneIntellect, Naturalize, Abomination, NerubianEgg, SylvanasWindrunner
from SDWLE.game_objects import Bindable
from SDWLE.cards import SDW01, SDW02, SDW03, SDW04, SDWBasicA, SDWBasicH, SDWBasicT, SDWBasic01, SDWBasic02
from SDWLE.constants import GAMESTATE, CHARACTER_CLASS, MINION_TYPE, TROOP_TYPE, COLOR_TYPE, NATURE_TYPE
class TestGame(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_state_machine(self):
game = generate_game_for([SDW01, SDW02, SDW03],
[SDW03, SDW04, SDW02],
PredictableAgent, PredictableAgent, random_order=False)
game.state_init(GAMESTATE.START)
for i in range(20):
game.state_step()
self.assertEqual(game.game_ended, True)
def test_create_game(self):
card_set1 = []
card_set2 = []
test_env = self
for cardIndex in range(0, 30):
card_set1.append(card_lookup("Stonetusk Boar"))
card_set2.append(card_lookup("Novice Engineer"))
deck1 = Deck(card_set1, Malfurion())
deck2 = Deck(card_set2, Jaina())
checked_cards = []
class MockAgent1:
def do_card_check(self, cards):
test_env.assertEqual(len(cards), 5)
checked_cards.append(list(cards))
return [False, True, True, True, True]
def set_game(self, game):
pass
class MockAgent2:
def do_card_check(self, cards):
test_env.assertEqual(len(cards), 5)
checked_cards.append(list(cards))
return [False, True, True, False, True]
def set_game(self, game):
pass
agent1 = mock.Mock(spec=MockAgent1(), wraps=MockAgent1())
agent2 = mock.Mock(spec=MockAgent2(), wraps=MockAgent2())
game = Game([deck1, deck2], [agent1, agent2])
game.pre_game()
self.assertEqual(agent1.method_calls[0][0], "do_card_check", "Agent not asked to select cards")
self.assertEqual(agent2.method_calls[0][0], "do_card_check", "Agent not asked to select cards")
self.assertTrue(game.players[0].deck == deck1, "Deck not assigned to player")
self.assertTrue(game.players[1].deck == deck2, "Deck not assigned to player")
self.assertTrue(game.players[0].agent == agent1, "Agent not stored in the hearthbreaker")
self.assertTrue(game.players[1].agent == agent2, "Agent not stored in the hearthbreaker")
self.assertListEqual(checked_cards[0][1:], game.players[0].hand[1:], "Cards not retained after request")
self.assertListEqual(checked_cards[1][1:2], game.players[1].hand[1:2], "Cards not retained after request")
def test_game_start_end(self):
card_set1 = []
card_set2 = []
for cardIndex in range(0, 30):
card_set1.append(card_lookup("Stonetusk Boar"))
card_set2.append(card_lookup("Novice Engineer"))
deck1 = Deck(card_set1, Malfurion())
deck2 = Deck(card_set2, Jaina())
agent1 = mock.Mock(spec=DoNothingAgent(), wraps=DoNothingAgent())
agent2 = mock.Mock(spec=DoNothingAgent(), wraps=DoNothingAgent())
game = Game([deck1, deck2], [agent1, agent2])
game.start()
self.assertEqual(game.game_ended, True)
# def test_secrets(self):
# for secret_type in SecretCard.__subclasses__():
# random.seed(1857)
# secret = secret_type()
# game = generate_game_for(secret_type, StonetuskBoar, CardTestingAgent, DoNothingAgent)
# for turn in range(0, secret.mana * 2 - 2):
# game.play_single_turn()
#
# def assert_different():
# new_events = game.events.copy()
# new_events.update(game.other_player.hero.events)
# new_events.update(game.other_player.events)
# new_events.update(game.current_player.hero.events)
# new_events.update(game.current_player.events)
# self.assertNotEqual(events, new_events, secret.name)
#
# def assert_same():
# new_events = game.events.copy()
# new_events.update(game.current_player.hero.events)
# new_events.update(game.current_player.events)
# new_events.update(game.other_player.hero.events)
# new_events.update(game.other_player.events)
# self.assertEqual(events, new_events)
#
# game.current_player.bind("turn_ended", assert_different)
# game.other_player.bind("turn_ended", assert_same)
#
# # save the events as they are prior to the secret being played
# events = game.events.copy()
# events.update(game.other_player.hero.events)
# events.update(game.other_player.events)
# events.update(game.current_player.hero.events)
# events.update(game.current_player.events)
#
# # The secret is played, but the events aren't updated until the secret is activated
# game.play_single_turn()
#
# self.assertEqual(1, len(game.current_player.secrets))
#
# # Now the events should be changed
# game.play_single_turn()
#
# # Now the events should be reset
# game.play_single_turn()
# def test_physical_hero_attacks(self):
# game = generate_game_for(Naturalize, ArcaneIntellect, PredictableAgent, PredictableAgent)
# for turn in range(0, 4):
# game.play_single_turn()
#
# self.assertEqual(30, game.other_player.hero.health)
# self.assertEqual(0, game.other_player.hero.armor)
# self.assertEqual(29, game.current_player.hero.health)
# def test_hero_weapon_sheath(self):
# game = generate_game_for(AnubarAmbusher, StonetuskBoar, PredictableAgent, PlayAndAttackAgent)
#
# for turn in range(0, 3):
# game.play_single_turn()
#
# self.assertEqual(0, len(game.other_player.minions))
# self.assertEqual(28, game.current_player.hero.health)
#
# game.play_single_turn()
# self.assertEqual(2, len(game.current_player.minions))
# self.assertEqual(26, game.other_player.hero.health)
# def test_deathrattle_ordering(self):
# game = generate_game_for(SylvanasWindrunner, [Abomination, NerubianEgg],
# OneCardPlayingAgent, OneCardPlayingAgent)
#
# for turn in range(0, 12):
# game.play_single_turn()
#
# self.assertEqual(2, len(game.current_player.minions))
# self.assertEqual(1, len(game.other_player.minions))
# game.other_player.minions[0].health = 2
#
# game.current_player.minions[1].die(None)
# game.check_delayed()
#
# # Everything should die at once, but Sylvanas shouldn't get the Nerubian because its Deathrattle will not have
# # gone yet
#
# self.assertEqual(1, len(game.current_player.minions))
class TestBinding(unittest.TestCase):
def test_bind(self):
event = mock.Mock()
binder = Bindable()
binder.bind("test", event)
binder.trigger("test", 1, 5, 6)
event.assert_called_once_with(1, 5, 6)
binder.unbind("test", event)
binder.trigger("test")
event.assert_called_once_with(1, 5, 6)
def test_bind_once(self):
event = mock.Mock()
event2 = mock.Mock()
binder = Bindable()
binder.bind_once("test", event)
binder.bind("test", event2)
binder.trigger("test", 1, 5, 6)
event.assert_called_once_with(1, 5, 6)
event2.assert_called_once_with(1, 5, 6)
binder.trigger("test")
event.assert_called_once_with(1, 5, 6)
self.assertEqual(event2.call_count, 2)
| [
"jomyhuang@gmail.com"
] | jomyhuang@gmail.com |
3bee6135e23cb604bccca664854a180290560760 | f496dd1b87dd7e25d98d30791787620c05f819ee | /telegram_bot/main.py | 54c4c129cafa403fa64eb3bc6d4677237325d6a0 | [] | no_license | MaybeBaybe/rememberME | 7abe7e8c7cb1edc3fcf6223300d45b30bb03b5f9 | 300daa60d520ddf89972eb03be5bc289d745ded4 | refs/heads/master | 2023-01-02T09:39:23.953483 | 2020-10-24T20:14:06 | 2020-10-24T20:14:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | import telebot
import collections
from settings import BOT_TOKEN
# TODO: добавить logger
def remember():
answer = yield "Я слушаю)"
buffer[answer.from_user.id] = answer.text
return answer.text
bot = telebot.TeleBot(BOT_TOKEN)
handlers = collections.defaultdict(remember)
buffer = collections.defaultdict(lambda: 'empty')
@bot.message_handler(commands=['remember'])
def remember_handler(message):
print(f'{message.from_user.username}:{message.text}')
telegram_id = message.from_user.id
answer = next(handlers[telegram_id])
# отправляем полученный ответ пользователю
bot.send_message(chat_id=telegram_id, text=answer)
@bot.message_handler(content_types=['text'])
def insult(message):
print(f'{message.from_user.username}:{message.text}')
telegram_id = message.from_user.id
if telegram_id in handlers:
# если диалог уже начат, то надо использовать .send(), чтобы
# передать в генератор ответ пользователя
try:
handlers[telegram_id].send(message)
except StopIteration:
del handlers[telegram_id]
bot.send_message(chat_id=telegram_id, text=buffer[telegram_id])
del buffer[telegram_id]
bot.send_message(chat_id=telegram_id, text="Я канеш запомнил, но ты все равно идешь нахуй.")
return
bot.send_message(telegram_id, "Ди на хуй")
if __name__ == '__main__':
# bot.polling(none_stop=True, interval=1)
bot.start_polling()
bot.idle()
| [
"buzovv1997@gmail.com"
] | buzovv1997@gmail.com |
24d6117e0d160f98ac82f7e51715b257a18b1e74 | 4e4fefb09d812688a15bede9bad0a72ef774bda6 | /myexception.py | 0d7621c834a6dceafaf013e576837c5253aeebd9 | [] | no_license | nisarg291/demopygit | 8cad9ad8769a7124a93ad0bc603ac330a731c6ef | 136c4f527632ebf30db40f2d1b29b380258a8f75 | refs/heads/master | 2022-03-31T07:26:39.378064 | 2020-01-05T15:11:50 | 2020-01-05T15:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | try:
print(x)
except:
print("An exception occurred")
try:
print(x)
except NameError:
print("Variable x is not defined")
except:
print("Something else went wrong")
try:
print("Hello")
except:
print("Something went wrong")
else:
print("Nothing went wrong")
try:
print(x)
except:
print("Something went wrong")
finally:
print("The 'try except' is finished")
try:
f = open("demofile.txt")
f.write("Lorum Ipsum")
except:
print("Something went wrong when writing to the file")
finally:
f.close()
#Raise an exception
"""As a Python developer you can choose to throw an exception if a condition occurs.
To throw (or raise) an exception, use the raise keyword.
Example
Raise an error and stop the program if x is lower than 0:"""
x = -1
if x < 0:
raise Exception("Sorry, no numbers below zero") # raise Exception is same as throw a exception
# The raise keyword is used to raise an exception.
# You can define what kind of error to raise, and the text to print to the user.
| [
"nisargadalja24680@gmail.com"
] | nisargadalja24680@gmail.com |
944c0b8bcaf0fd84f60cac9d5f0b950ddfc4b068 | e75f0ebd0a50b02e5c3661c1515a3b993ece30e1 | /tests/models/torch/test_torch_snet.py | 27dabb3413b37d5394deb61e6a19b7e6acc45ead | [
"BSD-3-Clause"
] | permissive | zhangchunlei0813/CATENets | 7ec12b9252f1571caed1cc8bbcf048383e6d1bd2 | e899872546e0449ac8e26de685b45ccd1c9952fc | refs/heads/main | 2023-07-10T15:10:36.322715 | 2021-08-03T11:14:32 | 2021-08-03T11:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | import pytest
from torch import nn
from catenets.datasets import load
from catenets.experiment_utils.tester import evaluate_treatments_model
from catenets.models.torch import SNet
def test_model_params() -> None:
model = SNet(
2,
binary_y=True,
n_layers_out=1,
n_units_out=2,
n_layers_r=3,
n_units_r=4,
weight_decay=0.5,
lr=0.6,
n_iter=700,
batch_size=80,
val_split_prop=0.9,
n_iter_print=10,
seed=11,
)
assert model._reps_c is not None
assert model._reps_o is not None
assert model._reps_mu0 is not None
assert model._reps_mu1 is not None
assert model._reps_prop is not None
assert model._propensity_estimator is not None
assert len(model._po_estimators) == 2
for mod in model._po_estimators:
assert len(mod.model) == 7 # 1 in + NL + 3 * n_layers_out + 1 out + NL
assert len(model._reps_c.model) == 6
assert len(model._reps_o.model) == 6
assert len(model._reps_mu0.model) == 6
assert len(model._reps_mu1.model) == 6
assert len(model._propensity_estimator.model) == 8
@pytest.mark.parametrize("nonlin", ["elu", "relu", "sigmoid", "selu", "leaky_relu"])
def test_model_params_nonlin(nonlin: str) -> None:
model = SNet(2, nonlin=nonlin)
nonlins = {
"elu": nn.ELU,
"relu": nn.ReLU,
"sigmoid": nn.Sigmoid,
"selu": nn.SELU,
"leaky_relu": nn.LeakyReLU,
}
for mod in [
model._reps_c,
model._reps_o,
model._reps_mu0,
model._reps_mu1,
model._reps_prop,
model._po_estimators[0],
model._po_estimators[1],
model._propensity_estimator,
]:
assert isinstance(mod.model[1], nonlins[nonlin])
@pytest.mark.slow
@pytest.mark.parametrize("dataset, pehe_threshold", [("twins", 0.4), ("ihdp", 1.5)])
def test_model_sanity(dataset: str, pehe_threshold: float) -> None:
X_train, W_train, Y_train, Y_train_full, X_test, Y_test = load(dataset)
W_train = W_train.ravel()
model = SNet(X_train.shape[1], batch_size=1024, n_iter=1500)
score = evaluate_treatments_model(
model, X_train, Y_train, Y_train_full, W_train, n_folds=3
)
print(f"Evaluation for model SNet on {dataset} = {score['str']}")
assert score["raw"]["pehe"][0] < pehe_threshold
def test_model_predict_api() -> None:
X_train, W_train, Y_train, Y_train_full, X_test, Y_test = load("ihdp")
W_train = W_train.ravel()
model = SNet(X_train.shape[1], batch_size=1024, n_iter=100)
model.fit(X_train, Y_train, W_train)
out = model.predict(X_test)
assert len(out) == len(X_test)
out, p0, p1 = model.predict(X_test, return_po=True)
assert len(out) == len(X_test)
assert len(p0) == len(X_test)
assert len(p1) == len(X_test)
score = model.score(X_test, Y_test)
assert score > 0
| [
"noreply@github.com"
] | zhangchunlei0813.noreply@github.com |
d9fff1f67e29c8e4874cb960bb72997b315bf711 | 22fca687f976a6f5766ab0ac20f0364760a638a3 | /1-策略开发/1-开发中的策略/13-oscillator_drive/3-实盘/test/test7.py | 1b347676835789d8789d19215d685569395d0f19 | [] | no_license | crystalphi/CTA | 6d5ca789162afeb8abb914ab7db0ddca86d742e9 | e5471f93ca9bfe44eff948479238d7604f77501b | refs/heads/main | 2023-07-03T11:07:44.456413 | 2021-07-31T03:55:30 | 2021-07-31T03:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #%%
import openpyxl
import datetime
wb = openpyxl.Workbook()
sheet = wb.active
sheet.column_dimensions["B"].number_format = "yyyy-mm-dd hh:mm:ss:ff"
sheet["B2"] = datetime.datetime.fromisoformat('2020-12-12 12:22:22:888')
wb.save("D:/CTA/1-策略开发/1-开发中的策略/14-oscillator_drive/2-实盘/datetime.xlsx") | [
"hun1982@qq.com"
] | hun1982@qq.com |
84c692400092327be8d677f99f3e4be977c1f2a3 | 7bfdb68f3803992127bf6fb5fbda0d4e27020b07 | /leetcode/1221.py | ffca7250a8903061a59abe14effc3f81b67fecbb | [] | no_license | Mesona/pythonWorkspace | f23f06e8d5070ca60af58b2d90ef8b25511bea6d | 6275e10eb73d93d12644bf39de6b81e6879c7215 | refs/heads/master | 2022-12-10T11:25:37.023147 | 2021-03-07T03:32:26 | 2021-03-07T03:32:26 | 213,464,015 | 0 | 0 | null | 2022-12-08T06:41:42 | 2019-10-07T19:03:57 | Python | UTF-8 | Python | false | false | 479 | py | # https://leetcode.com/problems/split-a-string-in-balanced-strings/submissions/
class Solution:
def balancedStringSplit(self, s: str) -> int:
L = 0
R = 0
output = 0
for i in s:
if i == "L":
L = L + 1
if i == "R":
R = R + 1
if R == L:
L = 0
R = 0
output = output + 1
return output | [
"kylemesona@gmail.com"
] | kylemesona@gmail.com |
0884b4793dda062ab281b700400beb0de77bfa84 | a3dea6cafe214ac03aeaf0b09dc66209b14b0fcb | /kaosuhoshi/login/urls.py | dd17f33f6a2737cf0c3092f3cfc997e43442729e | [] | no_license | phantomSuying/KaosuHoshino | 093b03149ea039e14321a3d7881ff73702421e2a | 15cf264c18bd48bde878b6c6af14c2e3caadefcf | refs/heads/master | 2020-03-26T15:55:33.757041 | 2018-10-17T12:45:10 | 2018-10-17T12:45:10 | 145,071,916 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.conf.urls import url,include
from login import views
urlpatterns=[
url(r'^$',views.first_page),
url(r'^/loginCheck',views.loginCheck),
] | [
"40844291+phantomSuying@users.noreply.github.com"
] | 40844291+phantomSuying@users.noreply.github.com |
0f2943085fb07ae711d23a3d3d0679f3e81beb89 | ed25190274ba9151e7455f0239c36f57d4cbc509 | /team/migrations/0003_auto_20210306_2120.py | 9c966726b57529c237ffe2db228d1f2f0b6843ea | [] | no_license | cerebro-iiitv/cerebro-backend-2021 | 8859e4d0ad4e7f0317751aaf5a8c03ee521be693 | 358ef743ef1fa0184713e6df56c640143660fdce | refs/heads/develop | 2023-03-27T00:25:33.944129 | 2021-03-28T16:23:12 | 2021-03-28T16:23:12 | 332,232,855 | 1 | 2 | null | 2021-03-28T16:23:12 | 2021-01-23T14:43:38 | Python | UTF-8 | Python | false | false | 454 | py | # Generated by Django 3.1.6 on 2021-03-06 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20210306_2117'),
]
operations = [
migrations.AlterField(
model_name='team',
name='role',
field=models.CharField(choices=[('Lead', 'Lead'), ('co_lead', 'Co-Lead'), ('member', 'Member')], max_length=100),
),
]
| [
"201851150@iiitvadodara.ac.in"
] | 201851150@iiitvadodara.ac.in |
259ffa5002c8abbc9a2390914f18484009221b26 | 3ef0e8e4c47a7c86b74a25bbd67d4b12a8a5b55e | /places/views.py | 4c552ba721653070e6ab228b5df691077dfaa3b2 | [] | no_license | AlymbaevaBegimai/intourist | 054247e826a7581077260280dc4c77bcfac9c607 | 0447535ac1e97837ca2be31df409d4ea9af81977 | refs/heads/main | 2023-06-22T15:36:53.899528 | 2021-07-21T14:42:04 | 2021-07-21T14:42:04 | 381,718,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | from django.shortcuts import render, redirect, HttpResponse
from django.views.generic import FormView, DetailView
from .models import Place, Feedback
from .forms import PlaceForm, FeedbackForm
def places(request):
place_objects = Place.objects.all()
return render(request, 'places/places.html', {'places': place_objects})
def create_place(request):
if request.method == "POST":
place_form = PlaceForm(request.POST)
if place_form.is_valid():
place_form.save()
return redirect(places)
place_form = PlaceForm()
return render(request, 'places/form.html', {'place_form': place_form})
def place(request, id):
try:
place_object = Place.objects.get(id=id)
return render(request, 'places/place.html', {'place_object': place_object})
except Place.DoesNotExist as e:
return HttpResponse(f'Not found: {e}', status=404)
def edit_place(request, id):
place_object = Place.objects.get(id=id)
if request.method == 'POST':
place_form = PlaceForm(data=request.POST, instance=place_object)
if place_form.is_valid():
place_form.save()
return redirect(place, id=id)
place_form = PlaceForm(instance=place_object)
return render(request, 'places/form.html', {'place_form': place_form})
def delete_place(request, id):
place_object = Place.objects.get(id=id)
place_object.delete()
return redirect(places)
class FeedbackView(FormView):
template_name = 'places/feedback_form.html'
form_class = FeedbackForm
success_url = '/places/'
def form_valid(self, form):
form.save()
return super().form_valid(form)
class FeedbackDetailView(DetailView):
queryset = Feedback.objects.all()
template_name = 'places/feedback.html' | [
"sally.vatanabe.2002@gmail.com"
] | sally.vatanabe.2002@gmail.com |
010044719defff9a149b002bb54cdbca81295588 | 929f00c386b8686e1c802aa622875c62d295e216 | /spikeforest/forestview/recording_views/testplotlyview.py | 61f739cfd85644eeaf32c4bad8d6f58d58d8258e | [
"Apache-2.0"
] | permissive | mhhennig/spikeforest | e0d6cbb47d15131e683545c1978abc6f99c51dc5 | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | refs/heads/master | 2020-05-31T11:03:58.438693 | 2019-06-04T18:06:37 | 2019-06-04T18:06:37 | 190,254,208 | 0 | 0 | Apache-2.0 | 2019-06-04T18:05:28 | 2019-06-04T18:05:28 | null | UTF-8 | Python | false | false | 2,824 | py | import vdomr as vd
import time
import multiprocessing
import sys
from .stdoutsender import StdoutSender
import mtlogging
import numpy as np
class TestPlotlyView(vd.Component):
def __init__(self, context):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
self._test_plotly_widget = None
self._connection_to_init, connection_to_parent = multiprocessing.Pipe()
self._init_process = multiprocessing.Process(target=_initialize, args=(context, connection_to_parent))
self._init_process.start()
self._init_log_text = ''
vd.set_timeout(self._check_init, 0.5)
def _on_init_completed(self, init):
self._test_plotly_widget = TestPlotlyWidget()
self._test_plotly_widget.setSize(self._size)
self.refresh()
def setSize(self, size):
self._size = size
if self._test_plotly_widget:
self._test_plotly_widget.setSize(size)
def size(self):
return self._size
def tabLabel(self):
return 'Test plotly'
def render(self):
if self._test_plotly_widget:
return vd.div(
self._test_plotly_widget
)
else:
return vd.div(
vd.h3('Initializing...'),
vd.pre(self._init_log_text)
)
def _check_init(self):
if not self._test_plotly_widget:
if self._connection_to_init.poll():
msg = self._connection_to_init.recv()
if msg['name'] == 'log':
self._init_log_text = self._init_log_text + msg['text']
self.refresh()
elif msg['name'] == 'result':
self._on_init_completed(msg['result'])
return
vd.set_timeout(self._check_init, 1)
class TestPlotlyWidget(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._size = (100, 100)
self._plot = None
self._update_plot()
def setSize(self, size):
self._size = size
self._update_plot()
def _update_plot(self):
xx = np.linspace(0, 1, 10)
yy = np.cos((10 * xx)**2)
self._plot = vd.components.PlotlyPlot(
data=dict(x=xx, y=yy),
layout=dict(margin=dict(t=5)),
config=dict(),
size=self._size
)
self.refresh()
def render(self):
if not self._plot:
return vd.div('no plot.')
return self._plot
# Initialization in a worker thread
mtlogging.log(root=True)
def _initialize(context, connection_to_parent):
with StdoutSender(connection=connection_to_parent):
pass
connection_to_parent.send(dict(
name='result',
result=dict()
))
| [
"jeremy.magland@gmail.com"
] | jeremy.magland@gmail.com |
9a179c09c2ccd31e9d0d55efe8784ca707ccebf0 | 2efa640e2c089a601a3c748d5ec4c80d65cb9695 | /src/ploomber/dag/dagclients.py | 45a7f88f2b788e3e2e5c68606b8dd5f15f4a8368 | [
"Apache-2.0"
] | permissive | BigRLab/ploomber | 19d35345cc8548b79f73f026674186969f1c3d4e | e2732be116507128ec900e4ef6195f529f639358 | refs/heads/master | 2023-05-08T03:37:11.384226 | 2021-05-31T20:57:37 | 2021-05-31T20:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | from inspect import isclass
from collections.abc import MutableMapping
from ploomber.tasks.abc import Task
from ploomber.products.Product import Product
from ploomber.validators.string import get_suggestion, str_to_class
class DAGClients(MutableMapping):
"""
A dict-like object with validations
1. __setitem__, __getitem__ work with strings (e.g., clients['SQLScript'])
2. __setitem__ validates the key is a Task or Product subclass
"""
def __init__(self, mapping=None):
self._mapping = mapping or dict()
def __getitem__(self, key):
if isinstance(key, str):
key_obj = str_to_class(key)
else:
key_obj = key
if key_obj is None:
error = repr(key)
suggestion = get_suggestion(key)
if suggestion and str_to_class(suggestion) in self:
error += f'. Did you mean {suggestion!r}?'
raise KeyError(error)
return self._mapping[key_obj]
def __setitem__(self, key, value):
if isinstance(key, str):
key_obj = str_to_class(key)
if key_obj is None:
maybe = get_suggestion(key)
msg = (f'Could not set DAG-level client {value!r}. '
f'{key!r} is not a valid Task or '
'Product class name')
if maybe:
msg += f'. Did you mean {maybe!r}?'
raise ValueError(msg)
else:
key_obj = key
if not isclass(key_obj) or not issubclass(key_obj, (Task, Product)):
raise ValueError('DAG client keys must be Tasks '
f'or Products, value {key_obj!r} is not')
self._mapping[key_obj] = value
def __delitem__(self, key):
del self._mapping[key]
def __iter__(self):
for item in self._mapping:
yield item
def __len__(self):
return len(self._mapping)
def __repr__(self):
return f'{type(self).__name__}({self._mapping!r})'
| [
"github@blancas.io"
] | github@blancas.io |
d6a1d9c427e29866cc8996e3c14bf72fd8900613 | 199654d837b74cb38057c05f76beec4963ce519f | /ui.py | b3e2185c26e0305b4e598f6aa37ca299eda2c306 | [] | no_license | jjunyeung/jjunyeung | 2288279bdad17863b30ccd4494bab157890a6397 | 30fa22aee29232661879f6876630bbc0af0306f9 | refs/heads/main | 2023-04-30T10:31:58.753892 | 2021-05-12T10:34:00 | 2021-05-12T10:34:00 | 366,678,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,826 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'capston.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtMultimediaWidgets import QVideoWidget
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1053, 845)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(370, 10, 291, 51))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(48)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(30, 90, 1011, 711))
self.tabWidget.setObjectName("tabWidget")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.video_2 = QVideoWidget(self.tab_2)
self.video_2.setGeometry(QtCore.QRect(20, 10, 451, 491))
self.video_2.setStyleSheet("")
self.video_2.setObjectName("video_2")
self.label_3 = QtWidgets.QLabel(self.video_2)
self.label_3.setGeometry(QtCore.QRect(400, 120, 111, 16))
self.label_3.setObjectName("label_3")
self.widget_2 = QVideoWidget(self.video_2)
self.widget_2.setGeometry(QtCore.QRect(20, 10, 921, 351))
self.widget_2.setObjectName("widget_2")
self.tabWidget_3 = QtWidgets.QTabWidget(self.widget_2)
self.tabWidget_3.setGeometry(QtCore.QRect(-40, -30, 971, 621))
self.tabWidget_3.setObjectName("tabWidget_3")
self.tab_8 = QtWidgets.QWidget()
self.tab_8.setObjectName("tab_8")
self.tabWidget_3.addTab(self.tab_8, "")
self.tab_9 = QtWidgets.QWidget()
self.tab_9.setObjectName("tab_9")
self.tabWidget_3.addTab(self.tab_9, "")
self.listView_5 = QtWidgets.QListView(self.tab_2)
self.listView_5.setGeometry(QtCore.QRect(540, 10, 391, 391))
self.listView_5.setObjectName("listView_5")
self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_4.setGeometry(QtCore.QRect(640, 520, 75, 23))
self.pushButton_4.setObjectName("pushButton_4")
self.tabWidget.addTab(self.tab_2, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.textEdit = QtWidgets.QTextEdit(self.tab_5)
self.textEdit.setGeometry(QtCore.QRect(20, 20, 961, 481))
self.textEdit.setObjectName("textEdit")
self.pushButton_14 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_14.setGeometry(QtCore.QRect(100, 550, 75, 23))
self.pushButton_14.setObjectName("pushButton_14")
self.tabWidget.addTab(self.tab_5, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.pushButton_2 = QtWidgets.QPushButton(self.tab)
self.pushButton_2.setGeometry(QtCore.QRect(360, 210, 271, 231))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../OneDrive/바탕 화면/갈매기/asdf.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon)
self.pushButton_2.setIconSize(QtCore.QSize(50, 50))
self.pushButton_2.setObjectName("pushButton_2")
self.tabWidget.addTab(self.tab, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.listView_8 = QtWidgets.QListView(self.tab_6)
self.listView_8.setGeometry(QtCore.QRect(20, 20, 951, 481))
self.listView_8.setObjectName("listView_8")
self.groupBox_7 = QtWidgets.QGroupBox(self.tab_6)
self.groupBox_7.setGeometry(QtCore.QRect(20, 510, 371, 81))
self.groupBox_7.setFlat(False)
self.groupBox_7.setCheckable(False)
self.groupBox_7.setObjectName("groupBox_7")
self.listView_9 = QtWidgets.QListView(self.groupBox_7)
self.listView_9.setGeometry(QtCore.QRect(10, 40, 256, 21))
self.listView_9.setObjectName("listView_9")
self.pushButton_15 = QtWidgets.QPushButton(self.groupBox_7)
self.pushButton_15.setGeometry(QtCore.QRect(290, 40, 75, 23))
self.pushButton_15.setObjectName("pushButton_15")
self.groupBox_8 = QtWidgets.QGroupBox(self.tab_6)
self.groupBox_8.setGeometry(QtCore.QRect(20, 590, 371, 81))
self.groupBox_8.setObjectName("groupBox_8")
self.listView_10 = QtWidgets.QListView(self.groupBox_8)
self.listView_10.setGeometry(QtCore.QRect(10, 40, 256, 21))
self.listView_10.setObjectName("listView_10")
self.pushButton_16 = QtWidgets.QPushButton(self.groupBox_8)
self.pushButton_16.setGeometry(QtCore.QRect(290, 40, 75, 23))
self.pushButton_16.setObjectName("pushButton_16")
self.pushButton_11 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_11.setGeometry(QtCore.QRect(600, 550, 121, 71))
self.pushButton_11.setObjectName("pushButton_11")
self.tabWidget.addTab(self.tab_6, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1053, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_4.setText(_translate("MainWindow", "CCTV MASKING"))
self.label_3.setText(_translate("MainWindow", "영상 들어갈 부분"))
self.tabWidget_3.setTabText(self.tabWidget_3.indexOf(self.tab_8), _translate("MainWindow", "Tab 1"))
self.tabWidget_3.setTabText(self.tabWidget_3.indexOf(self.tab_9), _translate("MainWindow", "Tab 2"))
self.pushButton_4.setText(_translate("MainWindow", "Browse"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Image Capture"))
self.pushButton_14.setText(_translate("MainWindow", "Browse"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("MainWindow", "Training"))
self.pushButton_2.setText(_translate("MainWindow", ">"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Play"))
self.groupBox_7.setTitle(_translate("MainWindow", "Load"))
self.pushButton_15.setText(_translate("MainWindow", "Browse"))
self.groupBox_8.setTitle(_translate("MainWindow", "Save"))
self.pushButton_16.setText(_translate("MainWindow", "Browse"))
self.pushButton_11.setText(_translate("MainWindow", "Masking"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate("MainWindow", "Masking"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | jjunyeung.noreply@github.com |
48df40ec5f7fc892af4b848826d7e3467d873d4d | 54a146c4088b238859bcf4be2954afb08a493e4d | /appatcher/patcher/patch.py | 22312c2c385af9c665a7ce1bf943f329146e756f | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | clementpoiret/Appatcher | 44de2c11d0563a4c91a65bdd4da6174bd18510d8 | a620829230b6a3130972a5eae4347072cbe9df31 | refs/heads/main | 2023-07-05T18:27:24.194197 | 2021-08-11T09:06:47 | 2021-08-11T09:06:47 | 393,658,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | import os
import subprocess
from pathlib import Path
import patch as patch_
def sanity_checks(apk, patch):
if not apk.exists():
return 0, "APK not found"
if not patch.exists():
return 0, "Patch not found"
return 1, "OK"
def patch(patch_file, root):
pset = patch_.fromfile(patch_file)
pset.apply(root=root)
def decompile(apk, dst):
apktoold = subprocess.Popen([
"java",
"-jar",
"appatcher/thirdparty/apktool/apktool_2.5.0.jar",
"d",
apk,
"-o",
dst,
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
return apktoold.communicate()
def obfuscate(dst):
smob = subprocess.Popen(
["./appatcher/thirdparty/smob/smob", "-p", "-i", dst],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
return smob.communicate()
def recompile(src, apk):
apktoolb = subprocess.Popen([
"java",
"-jar",
"appatcher/thirdparty/apktool/apktool_2.5.0.jar",
"b",
src,
"-o",
apk,
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
return apktoolb.communicate()
def sign(apk, dst):
signer = subprocess.Popen([
"java",
"-jar",
"appatcher/thirdparty/uas/uber-apk-signer-1.2.1.jar",
"-a",
apk,
"-o",
dst,
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
return signer.communicate()
| [
"poiret.clement@outlook.fr"
] | poiret.clement@outlook.fr |
0c08520810a73883b54bd1055179f58e7e018a84 | cc873161235502933845cfdaa7b2bfd9006b70c8 | /week7/coffeehouse/menu_api/migrations/0003_special_created_by.py | 5eba7622b1e13064f614216ac101479e0582a734 | [] | no_license | taddeimania/class_notes | d8a7f72ac9abf927768072a253effd35e521fb6d | 1cb321782caf9d823eee69fa43175cf31fd6b34f | refs/heads/master | 2020-04-10T03:55:44.311163 | 2016-11-08T14:22:41 | 2016-11-08T14:22:41 | 68,213,614 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-27 14:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('menu_api', '0002_auto_20161026_1447'),
]
operations = [
migrations.AddField(
model_name='special',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"jtaddei@gmail.com"
] | jtaddei@gmail.com |
83cc27867d5379377ed699898acc9906449c331a | 3bce16dd91dde80998e0878c9d012b86fb503594 | /greenback/_version.py | 4d520d4f50d1b641e49e13da302baaa4dde51e3a | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jab/greenback | b5a09a67bfc861cec6c2700c46710cb26f3f69cf | 71781de9cd9058cc7c15f1f028091c875369d418 | refs/heads/master | 2023-05-28T01:29:13.045658 | 2020-06-29T08:51:23 | 2020-06-29T08:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # This file is imported from __init__.py and exec'd from setup.py
__version__ = "0.2.0+dev"
| [
"oremanj@gmail.com"
] | oremanj@gmail.com |
39034a98f5ba5a2a242618885054389e9d92c9d0 | 84f65e90a31d7e5368f4736488c48997753de005 | /forecast/prdeict.py | ea3853b79e2c525db9c2e63462fb3f76eda1f705 | [] | no_license | ulsonhu/mltest | 182e7bc3ef3c47977af9ae13a3a0b533409756b3 | 35a755fb19d0c064b7d44e1789aa23a22d5b6b92 | refs/heads/master | 2021-05-10T10:49:14.568952 | 2018-03-24T14:10:49 | 2018-03-24T14:10:49 | 118,395,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py |
# Generate our predictions for the test set.
lin_predictions = lin_model.predict(test[columns])
print("Predictions:", lin_predictions)
# Compute error between our test predictions and the actual values.
lin_mse = mean_squared_error(lin_predictions, test[target])
print("Computed error:", lin_mse) | [
"touristman5@gmail.com"
] | touristman5@gmail.com |
89c0044052529dcdc31e530ab0f8f8c0a0b478c0 | 46c378fb94298f6ce65b775f37ccad0c4b26dd53 | /Exames/Exercicio4.py | 6b7d276717b4ffd966d5cb762ef90b8584c6ebb2 | [
"MIT"
] | permissive | alexandrebatista84/fundamentos-programacao | 615e1d46dd8b062e01cb02a2e1c65926abf6a242 | 0cb2fc11da82d84c959d6bb861cb45ba357f6dab | refs/heads/master | 2021-09-04T07:06:56.759025 | 2018-01-17T00:14:03 | 2018-01-17T00:14:03 | 106,857,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def cria_multiplos(n):
if ((not isinstance(n,int)) | (n<=0)):
raise ValueError("Argumento Inválido")
l=[]
for i in range(10):
l.append(i*n)
return l
print(cria_multiplos(1)) | [
"alexandrebatista84@gmail.com"
] | alexandrebatista84@gmail.com |
099f4adc7e2687a275fc38c8f553b0c310af0199 | 94f97ab0444cc1612c5ffbb9e1069e517b1a2a53 | /polygerrit-ui/app/template_test_srcs/convert_for_template_tests.py | 89bada344eb28e5bbdebc56056e1d1de0fe12d6f | [
"Apache-2.0"
] | permissive | lambdalab/gerrit | f2fd7298541473e98e80ba5e56c4b33f0049ddea | f15f958b25206d103fb900705f58db36ad3d9015 | refs/heads/master | 2021-01-20T08:08:13.432618 | 2018-02-06T03:52:47 | 2018-02-06T03:52:47 | 90,103,663 | 0 | 0 | Apache-2.0 | 2019-05-22T04:00:00 | 2017-05-03T03:21:32 | Java | UTF-8 | Python | false | false | 4,163 | py | import os, re, json
from shutil import copyfile, rmtree
polymerRegex = r"Polymer\({"
polymerCompiledRegex = re.compile(polymerRegex)
removeSelfInvokeRegex = r"\(function\(\) {\n(.+)}\)\(\);"
fnCompiledRegex = re.compile(removeSelfInvokeRegex, re.DOTALL)
regexBehavior = r"<script>(.+)<\/script>"
behaviorCompiledRegex = re.compile(regexBehavior, re.DOTALL)
def replaceBehaviorLikeHTML (fileIn, fileOut):
with open(fileIn) as f:
file_str = f.read()
match = behaviorCompiledRegex.search(file_str)
if (match):
with open("polygerrit-ui/temp/behaviors/" + fileOut.replace("html", "js") , "w+") as f:
f.write(match.group(1))
def replaceBehaviorLikeJS (fileIn, fileOut):
with open(fileIn) as f:
file_str = f.read()
with open("polygerrit-ui/temp/behaviors/" + fileOut , "w+") as f:
f.write(file_str)
def generateStubBehavior(behaviorName):
with open("polygerrit-ui/temp/behaviors/" + behaviorName + ".js", "w+") as f:
f.write("/** @polymerBehavior **/\n" + behaviorName + "= {};")
def replacePolymerElement (fileIn, fileOut, root):
with open(fileIn) as f:
key = fileOut.split('.')[0]
# Removed self invoked function
file_str = f.read()
file_str_no_fn = fnCompiledRegex.search(file_str)
if file_str_no_fn:
package = root.replace("/", ".") + "." + fileOut
with open("polygerrit-ui/temp/" + fileOut, "w+") as f:
mainFileContents = re.sub(polymerCompiledRegex, "exports = Polymer({", file_str_no_fn.group(1)).replace("'use strict';", "")
f.write("/** \n" \
"* @fileoverview \n" \
"* @suppress {missingProperties} \n" \
"*/ \n\n" \
"goog.module('polygerrit." + package + "')\n\n" + mainFileContents)
# Add package and javascript to files object.
elements[key]["js"] = "polygerrit-ui/temp/" + fileOut
elements[key]["package"] = package
def writeTempFile(file, root):
# This is included in an extern because it is directly on the window object.
# (for now at least).
if "gr-reporting" in file:
return
key = file.split('.')[0]
if not key in elements:
# gr-app doesn't have an additional level
elements[key] = {"directory": 'gr-app' if len(root.split("/")) < 4 else root.split("/")[3]}
if file.endswith(".html") and not file.endswith("_test.html"):
# gr-navigation is treated like a behavior rather than a standard element
# because of the way it added to the Gerrit object.
if file.endswith("gr-navigation.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
else:
elements[key]["html"] = os.path.join(root, file)
if file.endswith(".js"):
replacePolymerElement(os.path.join(root, file), file, root)
if __name__ == "__main__":
# Create temp directory.
if not os.path.exists("polygerrit-ui/temp"):
os.makedirs("polygerrit-ui/temp")
# Within temp directory create behavior directory.
if not os.path.exists("polygerrit-ui/temp/behaviors"):
os.makedirs("polygerrit-ui/temp/behaviors")
elements = {}
# Go through every file in app/elements, and re-write accordingly to temp
# directory, and also added to elements object, which is used to generate a
# map of html files, package names, and javascript files.
for root, dirs, files in os.walk("polygerrit-ui/app/elements"):
for file in files:
writeTempFile(file, root)
# Special case for polymer behaviors we are using.
replaceBehaviorLikeHTML("polygerrit-ui/app/bower_components/iron-a11y-keys-behavior/iron-a11y-keys-behavior.html", "iron-a11y-keys-behavior.html")
generateStubBehavior("Polymer.IronOverlayBehavior")
generateStubBehavior("Polymer.IronFitBehavior")
#TODO figure out something to do with iron-overlay-behavior. it is hard-coded reformatted.
with open("polygerrit-ui/temp/map.json", "w+") as f:
f.write(json.dumps(elements))
for root, dirs, files in os.walk("polygerrit-ui/app/behaviors"):
for file in files:
if file.endswith("behavior.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
elif file.endswith("behavior.js"):
replaceBehaviorLikeJS(os.path.join(root, file), file) | [
"beckysiegel@google.com"
] | beckysiegel@google.com |
4d224b95c18eec6d3cf6ece324688a8669c8c455 | a1f633e1cc154f1dc8feae341598ec06d8169e5f | /moviestats.py | e3ebfa2d15441ab2f1131ea73a853ab6802af32c | [] | no_license | kartikanand/movie-stats | fda146c0843db358824caf6a088f91a44b36232c | 3f8d61e7cc2f563165e2e4bad76074c53aa5e8a9 | refs/heads/master | 2022-12-10T00:48:19.848140 | 2019-01-17T08:56:06 | 2019-01-17T08:56:06 | 165,992,955 | 0 | 0 | null | 2022-12-08T01:32:29 | 2019-01-16T07:15:11 | Python | UTF-8 | Python | false | false | 2,340 | py | from collections import defaultdict
def get_max_count_items(count_dict):
"""
get items with highest count
this function returns a list
since there can be more than
one item with the same highest
count
"""
curr_max_count = float('-inf')
curr_max_items = []
for k, v in count_dict.items():
if v > curr_max_count:
curr_max_items = [k]
curr_max_count = v
elif v == curr_max_count:
curr_max_items.append(k)
return curr_max_items
class MovieStats:
def __init__(self):
self.genre_count = defaultdict(int)
self.actor_count = defaultdict(int)
self.director_count = defaultdict(int)
def add_movie(self, movie):
# add all genres
genre_lst = movie['Genre'].strip().split(', ')
self.add_genres(genre_lst)
# add all actors
actor_lst = movie['Actors'].strip().split(', ')
self.add_actors(actor_lst)
# add all directors
director_lst = movie['Director'].strip().split(', ')
self.add_directors(director_lst)
def add_genre(self, genre):
self.genre_count[genre] += 1
def add_genres(self, genre_lst):
for genre in genre_lst:
self.add_genre(genre)
def add_actor(self, actor):
self.actor_count[actor] += 1
def add_actors(self, actor_lst):
for actor in actor_lst:
self.add_actor(actor)
def add_director(self, director):
self.director_count[director] += 1
def add_directors(self, director_lst):
for director in director_lst:
self.add_director(director)
def get_max_count_genres(self):
return get_max_count_items(self.genre_count)
def get_max_count_actors(self):
return get_max_count_items(self.actor_count)
def get_max_count_directors(self):
return get_max_count_items(self.director_count)
def print_stats(self):
# get genres with highest count
print('Most loved genres')
print(self.get_max_count_genres())
# get actors with highest count
print('Most loved actors')
print(self.get_max_count_actors())
# get directors with highest count
print('Most loved directors')
print(self.get_max_count_directors())
| [
"kartikanand1992@gmail.com"
] | kartikanand1992@gmail.com |
7db6103d81b537c2290cfb980ed1834d89a33340 | a84fb1816328068903d6ae5d41f932b65cc7af38 | /Algorithm/code/brute_force/permutation_position.py | a713b40bff6e7d80cec2b77608e29a835af4b628 | [] | no_license | goodstart57/TIL | 3e44be012c8310dfa4e6908bfb2d504380a55ece | 46dbd3d7ce74ed6f94940afb3e4a868db8e7442e | refs/heads/master | 2022-11-08T17:41:15.001781 | 2022-10-10T08:08:57 | 2022-10-10T08:08:57 | 162,062,774 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | """
재귀 호출 + 최소 횟수로 원소 교환을 이용한
순열 생성
"""
a = [1, 2, 3, 4, 5]
n = len(a)
def perm(a, n, k):
if k == n:
print(a)
for i in range(k, n):
a[k], a[i] = a[i], a[k]
perm(a, n, k + 1)
a[k], a[i] = a[i], a[k]
perm(a, n, 0)
| [
"goodstart57@gmail.com"
] | goodstart57@gmail.com |
713c2b5154472452b4241477e6f47c0611a1fe82 | 4d5e6e0a7057123ddd7cb97027e667117e1be143 | /control/type_casting_v2.py | f0aefebd5cad5c7423fdfb73587172c2743d6a1d | [] | no_license | shubhomedia/Learn_Python | cee48990c04521fcbb7dbf5ad120c69170dcd1be | 01e0a8e3dc2de87b09c963e7cb9fc5e246831ddb | refs/heads/master | 2021-07-01T08:53:51.151326 | 2021-01-02T17:31:36 | 2021-01-02T17:31:36 | 204,191,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | x = str("s1") # x will be 's1'
y = str(2) # y will be '2'
z = str(3.0) # z will be '3.0'
print(x,y,z)
# all print type will be string type | [
"shubhomedia@gmail.com"
] | shubhomedia@gmail.com |
059b27eba7ba6b1b392b09fcc952af85e87161e5 | 708e17ad98f3143abaf811357883e680991d711f | /python3/firstBadVer.py | fba03621599fc20dfc8176bd759a7e7ebb1065b8 | [] | no_license | yichuanma95/leetcode-solns | a363cc8e85f2e8cdd5d2cde6e976cd76d4c4ea93 | 6812253b90bdd5a35c6bfba8eac54da9be26d56c | refs/heads/master | 2021-05-24T18:05:02.588481 | 2020-10-08T00:39:58 | 2020-10-08T00:39:58 | 253,690,413 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | '''
Problem 272: First Bad Version
You are a product manager and currently leading a team to develop a new product. Unfortunately,
the latest version of your product fails the quality check. Since each version is developed based
on the previous version, all the versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which
causes all the following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad.
Implement a function to find the first bad version. You should minimize the number of calls to
the API.
Example:
Given n = 5, and version = 4 is the first bad version.
call isBadVersion(3) -> false
call isBadVersion(5) -> true
call isBadVersion(4) -> true
Then 4 is the first bad version.
'''
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
return self.bin_search_for_bad_ver(1, n)
def bin_search_for_bad_ver(self, low, high):
middle = (low + high) // 2
if isBadVersion(middle) and not isBadVersion(middle - 1):
return middle
if not isBadVersion(middle) and isBadVersion(middle + 1):
return middle + 1
if isBadVersion(middle):
return self.bin_search_for_bad_ver(low, middle - 1)
return self.bin_search_for_bad_ver(middle + 1, high)
| [
"ma.yich@husky.neu.edu"
] | ma.yich@husky.neu.edu |
fba6bc3853ad3d4853ed6461e4c967589c6920e7 | 1b4abb5e310c7ae1b2928f9ea80a6b3a8c2fb8ed | /model/ml/active_learning_unique_mincertainty.py | cd034340d19b28b3fea4a25a2635ca76c9456298 | [] | no_license | zhang-198/ExampleDrivenErrorDetection | 2e2c708665f2b57b6ac7c785604a2ac6234f7ba9 | ae8bc24fc441957d9a29e5fa4cc247f1805d8b4d | refs/heads/master | 2023-05-23T14:49:29.628520 | 2020-04-09T14:02:28 | 2020-04-09T14:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,230 | py | import pickle
from ml.active_learning.library import *
import xgboost as xgb
from sklearn.metrics import confusion_matrix
# best version
def go_to_next_column_prob1(column_id, pred_potential):
minimum_pred = 0.0
for column_step in range(len(pred_potential)):
if pred_potential[column_step] != -1.0:
if pred_potential[column_step] < minimum_pred:
minimum_pred = pred_potential[column_step]
new_potential = pred_potential - minimum_pred
for column_step in range(len(pred_potential)):
if pred_potential[column_step] == -1.0:
new_potential[column_step] = 0.0
# print str(new_potential)
# print str(np.sum(new_potential))
new_potential = np.square(new_potential)
new_potential = new_potential / np.sum(new_potential)
print "pot: " + str(new_potential) + " sum: " + str(np.sum(new_potential))
# return np.random.choice(len(new_potential), 1, p=new_potential)[0]
return np.argmax(new_potential)
def go_to_next_column_prob(id_next, avg_certainty):
import operator
return min(avg_certainty.iteritems(), key=operator.itemgetter(1))[0]
def go_to_next_column_round(column_id):
column_id = column_id + 1
if column_id == dataSet.shape[1]:
column_id = 0
return column_id
def load_model(dataSet, classifier):
dataset_log_files = {}
dataset_log_files[HospitalHoloClean().name] = "hospital"
dataset_log_files[BlackOakDataSetUppercase().name] = "blackoak"
dataset_log_files[FlightHoloClean().name] = "flight"
# not yet
dataset_log_files[Salary().name] = "hospital" # be careful
dataset_log_files[Book().name] = "hospital" # be careful
potential_model_dir = Config.get("column.potential.models")
return pickle.load(
open(potential_model_dir + "/model" + dataset_log_files[dataSet.name] + "_" + classifier.name + ".p"))
def add_lstm_features(data, use_lstm_only, all_matrix_train, feature_name_list):
lstm_path = ""
if dataSet.name == 'Flight HoloClean':
lstm_path = "/home/felix/SequentialPatternErrorDetection/deepfeatures/Flights/last/"
elif dataSet.name == 'HospitalHoloClean':
lstm_path = "/home/felix/SequentialPatternErrorDetection/deepfeatures/HospitalHoloClean/last/"
elif dataSet.name == 'BlackOakUppercase':
lstm_path = "/home/felix/SequentialPatternErrorDetection/deepfeatures/BlackOakUppercase/last/"
else:
raise Exception('We have no potential model for this dataset yet')
all_matrix_train_deep = read_compressed_deep_features(lstm_path)
all_matrix_test = None
feature_name_list_deep = ['deep ' + str(dfeature) for dfeature in range(all_matrix_train_deep.shape[1])]
if use_lstm_only:
all_matrix_train = all_matrix_train_deep
feature_name_list = feature_name_list_deep
else:
all_matrix_train = hstack((all_matrix_train, all_matrix_train_deep)).tocsr()
feature_name_list.extend(feature_name_list_deep)
return all_matrix_train, all_matrix_test, feature_name_list
# input
start_time = time.time()
from ml.datasets.flights.FlightHoloClean import FlightHoloClean
#dataSet = FlightHoloClean()
from ml.datasets.hospital.HospitalHoloClean import HospitalHoloClean
#dataSet = HospitalHoloClean()
from ml.datasets.blackOak.BlackOakDataSetUppercase import BlackOakDataSetUppercase
dataSet = BlackOakDataSetUppercase()
from ml.datasets.salary_data.Salary import Salary
#dataSet = Salary()
from ml.datasets.luna.book.Book import Book
#dataSet = Book()
from ml.datasets.luna.restaurant.Restaurant import Restaurant
#dataSet = Restaurant()
'''
from ml.datasets.synthetic.Synthetic import Synthetic
from ml.datasets.synthetic.ReplaceError import ReplaceError
rows = 2000
datasets =[BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase(), BlackOakDataSetUppercase()]
columns = [4,4,4,4,4,4,4,4,4,4]
error_fraction = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
error_types = [ReplaceError, ReplaceError,ReplaceError, ReplaceError,ReplaceError, ReplaceError,ReplaceError, ReplaceError,ReplaceError, ReplaceError]
seed_synth = 41
dataSet = Synthetic(rows, datasets, columns, error_fraction, error_types, seed_synth)
'''
'''
from ml.datasets.synthetic.Synthetic import Synthetic
from ml.datasets.synthetic.ReplaceError import ReplaceError
rows = 2000
datasets =[BlackOakDataSetUppercase()]
columns = [4]
error_fraction = [0.9]
error_types = [ReplaceError]
seed_synth = 41
dataSet = Synthetic(rows, datasets, columns, error_fraction, error_types, seed_synth)
'''
print("read: %s seconds ---" % (time.time() - start_time))
start_time = time.time()
number_of_round_robin_rounds = 2
train_fraction = 1.0
ngrams = 1
runSVD = False
use_metadata = True
use_metadata_only = False
use_lstm = False
user_error_probability = 0.00
step_size = 10
cross_validation_rounds = 1 # 1
use_change_features = True
checkN = 10 # 5
# total runs
label_iterations = 6 # 6
run_round_robin = False
if run_round_robin:
number_of_round_robin_rounds = 10000
label_iterations = 41
checkN = 10
feature_names_potential = ['distinct_values_fraction', 'labels', 'certainty', 'certainty_stddev', 'minimum_certainty']
for i in range(100):
feature_names_potential.append('certainty_histogram' + str(i))
feature_names_potential.append('predicted_error_fraction')
for i in range(7):
feature_names_potential.append('icross_val' + str(i))
feature_names_potential.append('mean_cross_val')
feature_names_potential.append('stddev_cross_val')
feature_names_potential.append('training_error_fraction')
for i in range(100):
feature_names_potential.append('change_histogram' + str(i))
feature_names_potential.append('mean_squared_certainty_change')
feature_names_potential.append('stddev_squared_certainty_change')
for i in range(10):
feature_names_potential.append('batch_certainty_' + str(i))
if use_change_features:
feature_names_potential.append('no_change_0')
feature_names_potential.append('no_change_1')
feature_names_potential.append('change_0_to_1')
feature_names_potential.append('change_1_to_0')
print(str(feature_names_potential))
size = len(feature_names_potential)
for s in range(size):
feature_names_potential.append(feature_names_potential[s] + "_old")
which_features_to_use = []
for feature_index in range(len(feature_names_potential)):
if True: #not 'histogram' in feature_names_potential[feature_index]:
which_features_to_use.append(feature_index)
print which_features_to_use
feature_names_potential = [i for j, i in enumerate(feature_names_potential) if j in which_features_to_use]
feature_gen_time = 0.0
for check_this in range(checkN):
f = open("/home/felix/ExampleDrivenErrorDetection/log_progress_" + dataSet.name + "_" + str(check_this) + ".csv",
'w+')
train_indices, test_indices = split_data_indices(dataSet, train_fraction, fold_number=check_this)
total_start_time = time.time()
feature_gen_start = time.time()
all_matrix_train, all_matrix_test, feature_name_list = create_features(dataSet, train_indices, test_indices, ngrams,
runSVD)
if use_metadata:
all_matrix_train, all_matrix_test, feature_name_list = add_metadata_features(dataSet, train_indices,
test_indices, all_matrix_train,
all_matrix_test, feature_name_list,
use_metadata_only)
if use_lstm:
all_matrix_train, all_matrix_test, feature_name_list = add_lstm_features(dataSet, False, all_matrix_train,
feature_name_list)
print("features: %s seconds ---" % (time.time() - start_time))
data_result = []
column_id = 0
feature_matrix = all_matrix_train.tocsr()
from ml.active_learning.classifier.XGBoostClassifier import XGBoostClassifier
classifier = XGBoostClassifier(all_matrix_train, all_matrix_test)
from ml.active_learning.classifier.LinearSVMClassifier import LinearSVMClassifier
# classifier = LinearSVMClassifier(all_matrix_train, all_matrix_test)
from ml.active_learning.classifier.NaiveBayesClassifier import NaiveBayesClassifier
# classifier = NaiveBayesClassifier(all_matrix_train, all_matrix_test)
all_error_status = np.zeros((all_matrix_train.shape[0], dataSet.shape[1]), dtype=bool)
if all_matrix_test != None:
all_error_status_test = np.zeros((all_matrix_test.shape[0], dataSet.shape[1]), dtype=bool)
feature_gen_time = time.time() - feature_gen_start
print("Feature Generation Time: " + str(feature_gen_time))
save_fscore = []
save_labels = []
save_certainty = []
save_fscore_general = []
save_time = []
our_params = {}
train = {}
train_target = {}
train_chosen_ids = {}
y_pred = {}
certainty = {}
min_certainty = {}
final_gb = {}
res = {}
feature_array_all = {}
zero_change_count = {}
rounds_per_column = {}
model = None
pred_potential = {}
y_next = {}
x_next = {}
id_next = {}
diff_certainty = {}
avg_certainty = {}
for round in range(label_iterations * dataSet.shape[1]):
print("round: " + str(round))
if column_id in rounds_per_column:
current_rounds = rounds_per_column[column_id]
current_rounds += 1
rounds_per_column[column_id] = current_rounds
else:
rounds_per_column[column_id] = 1
# switch to column
target_run, target_test = getTarget(dataSet, column_id, train_indices, test_indices)
if rounds_per_column[column_id] == 1:
start_time = time.time()
num_errors = 2
train[column_id], train_target[column_id], train_chosen_ids[column_id] = create_user_start_data(feature_matrix.tocsr(), target_run,
num_errors, return_ids=True)
if train[column_id] == None:
certainty[column_id] = 1.0
#pred_potential[column_id] = -1.0
column_id = go_to_next_column_round(column_id)
continue
print("Number of errors in training: " + str(np.sum(train_target[column_id])))
print("clustering: %s seconds ---" % (time.time() - start_time))
# cross-validation
start_time = time.time()
classifier.run_cross_validation(train[column_id], train_target[column_id], num_errors, column_id)
print("cv: %s seconds ---" % (time.time() - start_time))
min_certainty[column_id] = 0.0
eval_scores = np.zeros(7)
else:
if train[column_id] == None:
if round < dataSet.shape[1] * number_of_round_robin_rounds:
column_id = go_to_next_column_round(column_id)
else:
column_id = go_to_next_column_prob(id_next, avg_certainty)
continue
# change column
if column_id in certainty:
min_certainty[column_id] = np.min(np.absolute(y_pred[column_id] - 0.5))
else:
min_certainty[column_id] = 0.0
diff = np.absolute(y_pred[column_id] - 0.5)
print("min certainty: " + str(np.min(diff)))
'''
train[column_id], train_target[column_id], certainty[column_id], train_chosen_ids[column_id] = create_next_data(train[column_id],
train_target[column_id],
feature_matrix,
target_run,
y_pred[column_id],
step_size,
dataSet,
column_id,
user_error_probability,
train_chosen_ids[column_id])
'''
train[column_id], train_target[column_id], train_chosen_ids[column_id] = add_data_next(
train[column_id],
train_target[column_id],
train_chosen_ids[column_id],
x_next[column_id],
y_next[column_id],
id_next[column_id])
#print "len: " + str(len(train[column_id])) + " - " + str(len(train_target[column_id]))
# cross-validation
if round < dataSet.shape[1] * cross_validation_rounds:
our_params[column_id] = classifier.run_cross_validation(train[column_id], train_target[column_id],
num_errors, column_id)
# print("cv: %s seconds ---" % (time.time() - start_time))
eval_scores = classifier.run_cross_validation_eval(train[column_id], train_target[column_id], 7, column_id)
start_time = time.time()
# train
# predict
y_pred_current_prediction, res_new = classifier.train_predict(train[column_id], train_target[column_id],
column_id)
if column_id in y_pred:
prediction_change_y_pred = np.square(y_pred_current_prediction - y_pred[column_id])
else:
prediction_change_y_pred = np.zeros(len(y_pred_current_prediction))
y_pred[column_id] = y_pred_current_prediction
x_next[column_id], y_next[column_id], diff_certainty[column_id], id_next[column_id] = create_next_part(
feature_matrix,
target_run,
y_pred[column_id],
step_size,
dataSet,
column_id,
user_error_probability,
train_chosen_ids[column_id])
print "size x: " + str(len(x_next[column_id]))
if column_id in res:
no_change_0, no_change_1, change_0_to_1, change_1_to_0 = compare_change(res[column_id], res_new)
print("no change 0: " + str(no_change_0) + " no change 1: " + str(no_change_1) + " sum no change: " + str(
no_change_0 + no_change_1))
print("change 0 ->1: " + str(change_0_to_1) + " change 1->0: " + str(change_1_to_0) + " sum change: " + str(
change_0_to_1 + change_1_to_0))
else:
no_change_0, no_change_1, change_0_to_1, change_1_to_0 = compare_change(np.zeros(len(res_new)), res_new)
res[column_id] = res_new
all_error_status[:, column_id] = res[column_id]
print("train & predict: %s seconds ---" % (time.time() - start_time))
if all_matrix_test != None:
y_pred_test, res_gen = classifier.predict(column_id)
all_error_status_test[:, column_id] = res_gen
# visualize_model(dataSet, column_id, final_gb, feature_name_list, train, target_run, res)
print ("current train shape: " + str(train[column_id].shape))
print ("column: " + str(column_id))
print_stats(target_run, res[column_id])
print_stats_whole(dataSet.matrix_is_error[train_indices, :], all_error_status, "run all")
calc_my_fscore(dataSet.matrix_is_error[train_indices, :], all_error_status, dataSet)
if all_matrix_test != None:
print_stats_whole(dataSet.matrix_is_error[test_indices, :], all_error_status_test, "test general")
number_samples = 0
for key, value in train.iteritems():
if value != None:
number_samples += value.shape[0]
print("total labels: " + str(number_samples) + " in %: " + str(
float(number_samples) / (dataSet.shape[0] * dataSet.shape[1])))
sum_certainty = 0.0
for key, value in certainty.iteritems():
if value != None:
sum_certainty += value
sum_certainty /= dataSet.shape[1]
print("total certainty: " + str(sum_certainty))
save_fscore.append(f1_score(dataSet.matrix_is_error[train_indices, :].flatten(), all_error_status.flatten()))
if all_matrix_test != None:
save_fscore_general.append(
f1_score(dataSet.matrix_is_error[test_indices, :].flatten(), all_error_status_test.flatten()))
save_labels.append(number_samples)
save_certainty.append(sum_certainty)
num_hist_bin = 100
diff = np.absolute(y_pred[column_id] - 0.5)
certainty_here = (np.sum(diff) / len(diff)) * 2
distinct_values_fraction = float(
len(dataSet.dirty_pd[dataSet.dirty_pd.columns[column_id]].unique())) / float(dataSet.shape[0])
feature_array = []
feature_array.append(distinct_values_fraction)
feature_array.append(train[column_id].shape[0])
feature_array.append(certainty_here)
avg_certainty[column_id] = certainty_here
feature_array.append(np.std(diff))
feature_array.append(np.min(np.absolute(y_pred[column_id] - 0.5)))
for i in range(num_hist_bin):
feature_array.append(float(len(
diff[np.logical_and(diff >= i * (0.5 / num_hist_bin), diff < (i + 1) * (0.5 / num_hist_bin))])) / len(
diff))
predicted_error_fraction = float(np.sum(y_pred[column_id] > 0.5)) / float(len(y_pred[column_id]))
print "predicted error fraction: " + str(predicted_error_fraction)
feature_array.append(predicted_error_fraction)
for score in eval_scores:
feature_array.append(score)
feature_array.append(np.mean(eval_scores))
feature_array.append(np.std(eval_scores))
training_error_fraction = float(np.sum(train_target[column_id])) / float(len(train_target[column_id]))
print "training error fraction: " + str(training_error_fraction)
feature_array.append(training_error_fraction)
hist_pred_change = []
for histogram_i in range(num_hist_bin):
feature_array.append(float(len(prediction_change_y_pred[np.logical_and(
prediction_change_y_pred >= histogram_i * (1.0 / num_hist_bin),
prediction_change_y_pred < (histogram_i + 1) * (1.0 / num_hist_bin))])) / len(prediction_change_y_pred))
hist_pred_change.append(float(len(prediction_change_y_pred[np.logical_and(
prediction_change_y_pred >= histogram_i * (1.0 / num_hist_bin),
prediction_change_y_pred < (histogram_i + 1) * (1.0 / num_hist_bin))])) / len(prediction_change_y_pred))
feature_array.append(np.mean(prediction_change_y_pred))
feature_array.append(np.std(prediction_change_y_pred))
print "Mean Squared certainty change: " + str(np.mean(prediction_change_y_pred))
batch_certainties = diff_certainty[column_id][id_next[column_id]]
assert len(batch_certainties) == 10
for batch_certainty in batch_certainties:
feature_array.append(batch_certainty)
# print "hist: pred: " + str(hist_pred_change)
# plt.bar(range(100), hist_pred_change)
# plt.show()
if use_change_features:
feature_array.append(no_change_0)
feature_array.append(no_change_1)
feature_array.append(change_0_to_1)
feature_array.append(change_1_to_0)
feature_vector = []
if column_id in feature_array_all:
if not run_round_robin:
column_list = feature_array_all[column_id]
column_list.append(feature_array)
feature_array_all[column_id] = column_list
feature_vector.extend(feature_array)
feature_vector.extend(column_list[len(column_list) - 2])
feature_vector_new = np.matrix(feature_vector)[0, which_features_to_use]
'''
if model == None:
model = load_model(dataSet, classifier)
mat_potential = xgb.DMatrix(feature_vector_new, feature_names=feature_names_potential)
pred_potential[column_id] = model.predict(mat_potential)
print("prediction: " + str(pred_potential[column_id]))
'''
else:
column_list = []
column_list.append(feature_array)
feature_array_all[column_id] = column_list
for feature_e in feature_array:
f.write(str(feature_e) + ",")
tn, fp, fn, tp = confusion_matrix(target_run, res[column_id]).ravel()
# tn = float(tn) / float(len(target_run))
fp = float(fp) / float(len(target_run))
fn = float(fn) / float(len(target_run))
tp = float(tp) / float(len(target_run))
f.write(str(f1_score(target_run, res[column_id])) + "," + str(fp) + "," + str(fn) + "," + str(tp) + '\n')
if round < dataSet.shape[1] * number_of_round_robin_rounds:
column_id = go_to_next_column_round(column_id)
else:
print ("start using prediction")
column_id = go_to_next_column_prob(id_next, avg_certainty)
current_runtime = (time.time() - total_start_time)
print("iteration end: %s seconds ---" % current_runtime)
save_time.append(current_runtime)
print (save_fscore)
print (save_fscore_general)
print (save_labels)
print (save_certainty)
print (save_time)
f.close()
| [
"neutatz@googlemail.com"
] | neutatz@googlemail.com |
6d4e4ab81c65d0f7ff8022ea8cb01c733817acb7 | e5753cbfc512c5905c5629964b22d7ba5978654c | /training_codes_and_data/transform_frame.py | dcac89686af4fc2c9d32e349277e8a465a6ff5db | [] | no_license | yalim/leap-wam-controller | a9b38f30ec52096ededb0410c3395f963c6ad912 | 7d98b6fbe98287b337930cee86c9cddab08051c2 | refs/heads/master | 2020-06-04T22:46:43.101918 | 2014-07-10T10:39:11 | 2014-07-10T10:39:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,491 | py | import numpy as np
from math import sin, cos
def transform_frame(x_list,y_list,z_list):
'''
Transformation between two frames given translations and roll, pitch and yaw rotations
'''
r = -2.957
p = 0.053
y = 1.516
xt = 0.904
yt = -0.015
zt = 0.723
cr = cos(r)
sr = sin(r)
cp = cos(p)
sp = sin(p)
cy = cos(y)
sy = sin(y)
x_prime = []
y_prime = []
z_prime = []
rpy_matrix = np.matrix([[cy*cp, cy*sp*sr-sy*cr, cy*sp*cr + sy*sr, 0],
[sy*cp, sy*sp*sr+cy*cr, sy*sp*cr - cy*sr, 0],
[-sp , cp*sr, cp*cr, 0],
[0, 0, 0, 1]])
translation_matrix = np.matrix([[1, 0, 0, xt],
[0, 1, 0, yt],
[0, 0, 1, zt],
[0, 0, 0, 1]])
# for sample in range(len(x_list)):
# x_prime_sample = []
# y_prime_sample = []
# z_prime_sample = []
# for index in range(len(x_list[sample])):
# point = np.matrix([[x_list[sample][index]],
# [y_list[sample][index]],
# [z_list[sample][index]],
# [1]])
# transformed_point = rpy_matrix*translation_matrix*point
# x_prime_sample.append(transformed_point[0,0])
# y_prime_sample.append(transformed_point[1,0])
# z_prime_sample.append(transformed_point[2,0])
# x_prime.append(x_prime_sample)
# y_prime.append(y_prime_sample)
# z_prime.append(z_prime_sample)
for x_sample, y_sample, z_sample in zip(x_list, y_list, z_list):
x_prime_sample = []
y_prime_sample = []
z_prime_sample = []
for x, y, z in zip(x_sample, y_sample, z_sample):
point = np.matrix([[x], [y], [z], [1]])
transformed_point = translation_matrix*rpy_matrix*point
x_prime_sample.append(transformed_point[0,0])
y_prime_sample.append(transformed_point[1,0])
z_prime_sample.append(transformed_point[2,0])
x_prime.append(x_prime_sample)
y_prime.append(y_prime_sample)
z_prime.append(z_prime_sample)
return x_prime, y_prime, z_prime
if __name__ == '__main__':
X,Y,Z = transform_frame([[0],[0]],[[0],[0]],[[0],[0]])
print X
print Y
print Z | [
"yisleyici@droopy.(none)"
] | yisleyici@droopy.(none) |
245f48272b609bee9575197d6d95b25c22e9116e | 3aea902c5e864f23c1a7875b2cb8d8c06ef61762 | /Game.py | 781753a8dad950783a73150acdde05d406f52cf0 | [] | no_license | FinnDority/CS205-Final | e2c90ef178c3388c75c384afbcaf71b3d73bf300 | a21e59808d0c91a2d4482ad4da58a14190850453 | refs/heads/master | 2022-04-21T13:53:59.012507 | 2020-04-24T22:19:24 | 2020-04-24T22:19:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,328 | py | # Team 9 RISK
# Game can be run by itself for debugging, run the RiskGUI to setup player settings
# Imports needed modules
import functools
import glob
import pickle
import pygame
from pygame import *
from Sprites import Sprites
import Constants as c
# Class contains pygame methods
class Game():
def __init__(self, pygameWindow, Turn): #Initializes surfaces for pygame given pygame and round instances
self.pygameWindow = pygameWindow
# Updates current objects
self.map = Turn.map
self.players = Turn.players
self.Turn = Turn
self.numTroops = 25 #Sets number of troops
self.selectedTerritory = None
self.interfaceDice = [] #Contains dice results
self.functions = [] #Contains function calls
self.interfaceText = [] #Contains text layers for HUD
self.surfaces = [] #Contains surface layers
self.tempTerritoryList = [] #Contains territory layers
self.textList = [] #Contains text overlays
self.topLevel = [] #Used to hold help and win screen
@property # Decorator overwrites get/set, method checks min deployment troops
def troopCount(self):
if self.Turn.phase == 0:
return min(self.numTroops, self.players[self.Turn.turnCount-1].nb_troupes) #Cannot deploy more then total - 1 troops from territory
else:
return self.numTroops
@troopCount.setter # Alternative corresponding decorator
def troopCount(self, troopVal):
if self.Turn.phase == 0: #Checks troop placement during different phases
if troopVal < 1:
self.numTroops = 1
print("Too few troops")
elif troopVal > self.players[self.Turn.turnCount - 1].nb_troupes:
self.numTroops = self.players[self.Turn.turnCount - 1].nb_troupes
print("Too many troops")
else:
if troopVal < 0:
self.numTroops = 0
print("Too few troops")
elif troopVal > self.selectedTerritory.nb_troupes - 1:
self.numTroops = self.selectedTerritory.nb_troupes - 1 #Minimum of 1 troop per territory
print("Too many troops")
self.numTroops = troopVal
# Sets a color layer on territory sprites based on player color
def colorTerritories(self, sprites):
for p in self.players:
for territories in p.territories:
sprite = next((s for s in sprites if s.id == territories), None)
setSurfaceColor(sprite, p.color, 255)
## # Displays initial menu
## def menu(self):
## print(1)
##
## self.surfaces = []
## menuBackground = pygame.image.load(c.imagePath + c.menuBackgroundImage).convert()
##
## #Auto resize to fit menuBackground
## resize = c.windowLength/menuBackground.get_width()
## w = int(resize * menuBackground.get_width())
## h = int(resize * menuBackground.get_height())
## menuBackground = pygame.transform.scale(menuBackground, (w, h))
##
## self.functions = []
## self.surfaces.extend([[menuBackground, (0, 0)]])
# Method initialzes map surface
def run(self):
self.surfaces=[]
background = pygame.image.load(c.imagePath + c.backgroundImage).convert()
#Auto resize to fit background
resize = c.windowLength/background.get_width()
w = int(resize * background.get_width())
h = int(resize * background.get_height())
background = pygame.transform.scale(background, (w, h))
#Auto resize to fit base map
worldMap = pygame.image.load(c.imagePath + c.mapImages).convert_alpha()
resize = c.windowLength/worldMap.get_width()
w = int(resize * worldMap.get_width())
h = int(resize * worldMap.get_height())
worldMap = pygame.transform.scale(worldMap, (w, h))
#Player HUD
barre = pygame.image.load(c.imagePath + c.bareImage).convert_alpha()
barre = pygame.transform.scale(barre, (c.windowLength, c.windowWidth - h))
self.functions = []
self.surfaces.extend([[background, (0, 0)], [barre, (0, h)], [worldMap, (0, 0)]])
# Method utilizes overlay methods to update pygameWindow
def display(self, function = None):
# Loads png sprites for highlighting selected territories
worldTerritories = glob.glob(c.mapPath + "*.png")
territorySprites = []
highlightedTerritories = []
selectedTerritory = -1
# Boolean flags for player functions
selectFlag = False
attackFlag = False
helpFlag = False
gameEnd = False
# Format territory sprites and add to surface
for i, j in enumerate(worldTerritories):
surface = pygame.image.load(j).convert()
resize = c.windowLength/surface.get_width()
surface = pygame.transform.scale(surface, (int(resize * surface.get_width()), int(resize * surface.get_height())))
territorySprite = Sprites(surface, j)
initialSpriteLayer = Sprites(surface.copy(), j)
setSurfaceColor(initialSpriteLayer, (1, 1, 1), 150)
territorySprites.append(territorySprite)
highlightedTerritories.append(initialSpriteLayer)
# Creates final layer of all connected sprites
self.colorTerritories(territorySprites)
for i, j in enumerate(territorySprites):
if i == 0:
finalLayout = j.layout.copy()
else:
finalLayout.blit(j.layout, (0, 0))
# Update visual troop numbers
troopDisplay(self.textList, territorySprites, self.map)
# Event handler
while (not gameEnd):
for event in pygame.event.get(): #Checks every mouse and key action in window
if event.type == QUIT:
print("Ending game!")
gameEnd = True
# Handling key presses
elif event.type == KEYDOWN:
if event.key == K_ESCAPE: #Exit program on key press
print("Ending game!")
gameEnd = True
## #SAVING BROKEN... TO BE REMOVED
## elif event.key == K_k: #Save game and exit
## tempSave = []
## tempSave.append(self.map) #Map data saved in state 0
## tempSave.append(self.players) #Player data saved in state 1
## tempSave.append(self.Turn) #Turn data saved in state 2 ..etc
## tempSave.append(self.numTroops)
## tempSave.append(self.selectedTerritory)
#### tempSave.append(self.interfaceDice) #surface obj cannot be saved
## tempSave.append(self.functions)
#### tempSave.append(self.surfaces) #surface obj cannot be saved
#### tempSave.append(self.tempTerritoryList) #surface obj cannot be saved
#### tempSave.append(self.textList) #surface obj cannot be saved
## tempSave.append(self.topLevel)
## saveGame(tempSave)
##
##
## elif event.key == K_l: #Restore saved game
## loadData = loadGame(tempSave)
## self.map = loadData[0]
## self.players = loadData[1]
## self.Turn = loadData[2]
## self.numTroops = loadData[3]
## self.selectedTerritory = loadData[4]
#### self.interfaceDice = loadData[5]
## self.functions = loadData[5]
#### self.surfaces = loadData[7]
#### self.tempTerritoryList = loadData[8]
#### self.textList = loadData[9]
## self.topLevel = loadData[6]
elif event.key == K_n: #Proceed to next round
try:
self.Turn.next()
except ValueError as e:
print(e.args)
self.tempTerritoryList = [] #Resets selected territory for next player
selectFlag = False
selectedTerritory = 0
elif event.key == K_h: #Help screen
helpFlag = not helpFlag
# Handling mouse-clicks/scrolls
elif event.type == MOUSEBUTTONDOWN:
try:
if event.button == 3: #Right mouse-click to unselect (selected) territory
self.tempTerritoryList = []
selectFlag = False
selectedTerritory = 0
elif event.button == 4: #Scroll mousewheel down to increase selected troops
self.troopCount += 1
elif event.button == 5: #Scroll mousewheel down to decrease selected troops
if self.troopCount > 0:
self.troopCount -= 1
except AttributeError as e:
print("You should select a country first ...")
except ValueError as e:
print(e.args)
# Sends layers to surface of pygame
for surface in self.surfaces:
self.pygameWindow.blit(surface[0], surface[1])
for dice in self.interfaceDice:
self.pygameWindow.blit(dice[0], dice[1])
self.pygameWindow.blit(finalLayout, (0, 0))
for tempTerritoryList in self.tempTerritoryList:
self.pygameWindow.blit(tempTerritoryList, (0, 0))
for text in self.textList:
self.pygameWindow.blit(text[0], text[1])
for t in self.interfaceText:
self.pygameWindow.blit(t[0], t[1])
for final in self.topLevel:
self.pygameWindow.blit(final[0], final[1])
if self.functions != []:
for f in self.functions:
f()
# Shows victory screen if player completes domination goal
if self.Turn.players[self.Turn.turnCount - 1].obj.getGoalStatus() == True:
self.topLevel = []
topLayer = pygame.Surface(self.pygameWindow.get_size())
topLayer = topLayer.convert()
topLayer.fill(c.black)
topLayer.set_alpha(180)
self.topLevel.append([topLayer, (0,0)])
display_win(self.topLevel,self.players)
# Uses same top layer to contain help screen
else:
if helpFlag:
self.topLevel=[]
topLayer = pygame.Surface(self.pygameWindow.get_size())
topLayer = topLayer.convert()
topLayer.fill(c.black)
topLayer.set_alpha(180)
self.topLevel.append([topLayer, (0,0)])
display_help(self.topLevel)
else:
self.topLevel=[]
# Highlight territories as cursor moves over them
mouse = pygame.mouse.get_pos()
try:
tempColorValue=self.surfaces[2][0].get_at((mouse[0], mouse[1]))
except IndexError as e:
pass
# Setups user GUI layout and enables player functions
try:
if tempColorValue != (0,0,0,0) and tempColorValue != (0,0,0,255):
temptroopValID = tempColorValue[0] - 100
spriteLayer = next((territorySprite for territorySprite in highlightedTerritories if territorySprite.id == temptroopValID), None)
# Update selected territory visuals
if temptroopValID != selectedTerritory:
self.pygameWindow.blit(spriteLayer.layout, (0, 0))
pygame.display.update(spriteLayer.layout.get_rect())
# On click, check phase and territory function validity
click = pygame.mouse.get_pressed()
# Placing reinforcements on owned territories
if self.Turn.list_phase[self.Turn.phase] == "Placement":
if click[0] == 1:
playerTerritory = next((p for p in self.map.territories if p.id == temptroopValID), None)
if playerTerritory.id_player == self.Turn.turnCount:
self.Turn.placeTroops(playerTerritory, self.troopCount)
pygame.time.wait(100)
else:
print("This territory does not belong to the player!")
# Attacking neighboring territories with n-1 troops
elif self.Turn.list_phase[self.Turn.phase] == "Attack":
if click[0] == 1 and not selectFlag:
startTerritory = next((p for p in self.map.territories if p.id == temptroopValID), None)
self.selectedTerritory = startTerritory
if startTerritory.id_player == self.Turn.turnCount and startTerritory.nb_troupes > 1:
self.troopCount = startTerritory.nb_troupes-1
self.tempTerritoryList.append(spriteLayer.layout)
selectFlag = True
selectedTerritory = temptroopValID
elif click[0] == 1: # Selecting territory to attack
endTerritory = next((p for p in self.map.territories if p.id == temptroopValID), None)
if attackFlag and endTerritory == targetTerritory and startTerritory.nb_troupes > 1:
self.Turn.troopMovement(startTerritory, endTerritory, self.troopCount)
selectFlag = False
self.tempTerritoryList = []
attackFlag = False
elif attackFlag:
selectFlag = False
self.tempTerritoryList = []
attackFlag = False
elif endTerritory.id_player != self.Turn.turnCount and endTerritory.id in startTerritory.voisins: #Attack with home troops
try:
self.interfaceDice = []
attackResult, diceResults = self.Turn.attack(startTerritory, endTerritory, self.troopCount)
for i,res in enumerate(diceResults):
diceRolls(self, res[0], res[2], 600, territorySprites[0].layout.get_height() + 10 + i * c.diceSize * 1.1)
diceRolls(self, res[1], res[3], 800, territorySprites[0].layout.get_height() + 10 + i * c.diceSize * 1.1)
pygame.time.wait(100)
except ValueError as e:
print(e.args)
attackResult = False
selectFlag = False
self.tempTerritoryList = []
if attackResult: #On successful attack, update visuals
sprite = next((s for s in territorySprites if s.id == temptroopValID), None)
setSurfaceColor(sprite, self.Turn.players[self.Turn.turnCount - 1].color, 255)
finalLayout.blit(sprite.layout,(0,0))
attackFlag = True
targetTerritory = endTerritory
self.troopCount = startTerritory.nb_troupes - 1
else:
selectFlag = False
self.tempTerritoryList = []
# Moving troops between territories
elif self.Turn.list_phase[self.Turn.phase] == "Movement":
if click[0] == 1 and not selectFlag: #On left click select territory
startTerritory = next((p for p in self.map.territories if p.id == temptroopValID), None)
self.selectedTerritory = startTerritory
if startTerritory.id_player == self.Turn.turnCount and startTerritory.nb_troupes > 1:
self.troopCount = startTerritory.nb_troupes - 1
self.tempTerritoryList.append(spriteLayer.layout)
selectFlag = True
selectedTerritory = temptroopValID
elif click[0] == 1: #On right click unselect territory
endTerritory = next((p for p in self.map.territories if p.id == temptroopValID), None)
path = self.map.checkPathValid(self.Turn.players[self.Turn.turnCount - 1].territories, startTerritory, endTerritory)
selectFlag = False
selectedTerritory = 0
self.tempTerritoryList = []
if path and endTerritory.id != startTerritory.id:
self.Turn.troopMovement(startTerritory, endTerritory, self.troopCount)
self.Turn.next()
# Update troop text overlay visuals
self.textList = []
troopDisplay(self.textList, territorySprites, self.map)
except ValueError as e:
pass
# Update HUD text visuals
self.interfaceText = []
display_hud(self.troopCount, self.interfaceText, self.Turn, (75, territorySprites[0].layout.get_height() + 10))
pygame.display.flip()
# Returns information for text handling
def textArea(text, font, color = (0, 0, 0)):
textSurface = font.render(text, True, color)
return textSurface, textSurface.get_rect()
# Creates clickable area for mouse interactions and overlays with text
def button(txt, xPos, yPos, width, height, ic, ac, command = None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if xPos + width > mouse[0] > xPos and yPos + height > mouse[1] > yPos:
pygame.draw.rect(pygameWindow, ac,(xPos, yPos, width, height))
if click[0] == 1 and action != None:
Win.functions.append(action)
else:
pygame.draw.rect(pygameWindow, ic,(xPos, yPos, width, height))
smallText = pygame.font.Font(None, 20)
textSurface, textBox = textArea(txt, smallText)
textBox.center = ((xPos + (w/2)), (yPos + (height/2)))
pygameWindow.blit(textSurface, textBox)
# Sets sprite overlay colors
def setSurfaceColor(sprite, color, alpha):
for x in range(0, sprite.bounds.width):
for y in range(0, sprite.bounds.height):
if sprite.layout.get_at((sprite.bounds.x + x, sprite.bounds.y + y)) != (0, 0, 0):
sprite.layout.set_at((sprite.bounds.x + x, sprite.bounds.y + y), color)
sprite.layout.set_alpha(alpha)
# Update troop visual count
def troopDisplay(textList, sprites, Map):
smallText = pygame.font.Font(None, 25)
for sprite in sprites:
territories = Map.territories[sprite.id-1]
textSurface, textBox = textArea(str(territories.nb_troupes), smallText)
textBox.center = sprite.bounds.center
textList.append([textSurface, textBox])
# Player victory screen if a player completes goals
def display_win(topLevel, players):
largeText = pygame.font.Font(None, 75)
margin = 50
textPosition = (200, 200)
for p in players:
if p.obj.getGoalStatus() == True:
winnerPlayer = p
textSurface, textBox = textArea(winnerPlayer.name + " wins!", largeText, winnerPlayer.color)
textBox.topleft = textPosition
textPosition = (textPosition[0], textPosition[1] + margin)
topLevel.append([textSurface, textBox])
# Adds text to top layer for help screen
def display_help(topLevel):
largeText = pygame.font.Font(None, 50)
margin = 50
textPosition = (200, 200)
textSurface, textBox = textArea("'h' key: Help", largeText, c.white)
textBox.topleft = textPosition
topLevel.append([textSurface, textBox])
textPosition = (textPosition[0],textPosition[1]+margin)
textSurface, textBox = textArea("Left/Right Mouse-click : Select/Deselect Territory", largeText, c.white)
textBox.topleft = textPosition
topLevel.append([textSurface, textBox])
textPosition = (textPosition[0], textPosition[1] + margin)
textSurface, textBox = textArea("Scroll Wheel Up/Down : Increase/Decrease Troop Selection", largeText, c.white)
textBox.topleft = textPosition
topLevel.append([textSurface, textBox])
textPosition = (textPosition[0], textPosition[1] + margin)
textSurface, textBox = textArea("'n' key: Next phase", largeText, c.white)
textBox.topleft = textPosition
topLevel.append([textSurface, textBox])
textPosition = (textPosition[0], textPosition[1] + margin)
## textSurface, textBox = textArea("'k' key: Save game [TODO]", largeText, c.white)
## textBox.topleft = textPosition
## topLevel.append([textSurface, textBox])
## textPosition = (textPosition[0], textPosition[1] + margin)
##
## textSurface, textBox = textArea("'l' key: Load game [TODO]", largeText, c.white)
## textBox.topleft = textPosition
## topLevel.append([textSurface, textBox])
## textPosition = (textPosition[0], textPosition[1] + margin)
textSurface, textBox = textArea("'esc' key: quit", largeText, c.white)
textBox.topleft = textPosition
topLevel.append([textSurface, textBox])
textPosition = (textPosition[0], textPosition[1] + margin)
# Player interface text updates
def display_hud(troopCount, interfaceText, Turn, textPosition):
smallText = pygame.font.Font(None, 25)
margin = 20
col = [100, 400, 700, 1000]
row = textPosition[1]
# FIRTS COLUMN TEXT ... position carries over to next
textSurface, textBox = textArea("Round : " + str(Turn.num), smallText)
textBox.topleft = (textPosition[0], textPosition[1])
interfaceText.append([textSurface, textBox])
textSurface, textBox = textArea("Phase : " + Turn.list_phase[Turn.phase], smallText)
textPosition = (textPosition[0], textPosition[1] + margin + margin)
textBox.topleft = textPosition
interfaceText.append([textSurface, textBox])
textSurface, textBox = textArea("Player : ",smallText)
textPosition = (textPosition[0], textPosition[1] + margin + margin)
textBox.topleft = textPosition
interfaceText.append([textSurface, textBox])
#name value
textSurface, textBox = textArea(Turn.players[Turn.turnCount -1 ].name, smallText, Turn.players[Turn.turnCount - 1].color)
textBox.topleft = (textPosition[0] + 70, textPosition[1])
interfaceText.append([textSurface, textBox])
# MIDDLE COLUMN TEXT
textSurface, textBox = textArea("Number of Selected Troops : " + str(troopCount), smallText)
textPosition = (textPosition[0] + 200, textPosition[1])
textBox.topleft = textPosition
interfaceText.append([textSurface, textBox])
textSurface, textBox = textArea("Available number of troops to deploy : " + str(Turn.players[Turn.turnCount - 1].nb_troupes), smallText)
textPosition = (textPosition[0], textPosition[1] - margin - margin)
textBox.topleft = textPosition
interfaceText.append([textSurface, textBox])
textSurface, textBox = textArea("Troops per turn : " + str(Turn.players[Turn.turnCount - 1].sbyturn), smallText)
textPosition = (textPosition[0], textPosition[1] - margin - margin)
textBox.topleft = textPosition
interfaceText.append([textSurface, textBox])
# Updates dice visuals and shows respective losses as a column
def diceRolls(gameInstance, troopLosses, numDies, xPos, yPos):
tempDiceLayer = []
for i, j in enumerate(numDies): #Gets correct die sprite and resizes
dieSprite = pygame.image.load(c.dicePath + str(j) + ".png").convert_alpha()
resizeSprite = pygame.transform.scale(dieSprite, (c.diceSize, c.diceSize))
tempDiceLayer.append([resizeSprite, gameInstance.pygameWindow.blit(resizeSprite, (i * c.diceSize * 1.1 + xPos, yPos))])
for deaths in range(0, troopLosses): #Gets tombstome sprite to represent losses in a row
tombstoneSprite = pygame.image.load(c.imagePath + c.deadImage).convert_alpha()
resizeSprite = pygame.transform.scale(tombstoneSprite, (c.diceSize, c.diceSize))
tempDiceLayer.append([resizeSprite, gameInstance.pygameWindow.blit(resizeSprite, (xPos - (deaths + 1) * c.diceSize * 1.1, yPos))])
gameInstance.interfaceDice.extend(tempDiceLayer)
#### CANNOT SAVE SURFACE...
### Save and restore game state using pickle
##def saveGame(save):
## with open("saved_game", "wb") as l: #DOES NOT WORK
## print("Game has been saved")
## pickle.dump(save, l)
##
##
##def loadGame(save):
## with open("saved_game","rb") as l:
## print("Save has been loaded")
## save = pickle.load(l)
# Secondary run, used for debugging
if __name__ == "__main__":
from tkinter import *
import random
import copy
from Map import Map
from Player import Player
from Card import Card
from Turn import Turn
import Constants as c
# Run risk with set player params
tempMap = Map()
turn = Turn(3, tempMap) # Turn object created given number players and map object
turn.initialTroops() # Sets starting troops, varies depending on number of players
turn.distributeTerritories(tempMap.territories) # Distributes territories to players from map list
Continents = tempMap.continents
# Initialize players
turn.players[0].color = c.riskRed #c.red
turn.players[1].color = c.riskGreen #c.green
turn.players[2].color = c.riskBlue #c.blue
## turn.players[3].color = c.yellow
## turn.players[4].color = c.purple
## turn.players[5].color = c.teal
turn.players[0].name = "Duncan"
turn.players[1].name = "Isaac"
turn.players[2].name = "Lily"
## turn.players[3].name = "Finn"
## turn.players[4].name = "Anna"
## turn.players[5].name = "Brianna"
# Setup and start pygame
pygame.init()
pygameWindow = pygame.display.set_mode((c.windowLength, c.windowWidth))
# Create instance of Game to contain risk objects
try:
gameInstance = Game(pygameWindow, turn)
## # User in game menu until button click
## displayFlag = False
## while (not displayFlag):
## gameInstance.functions.append(gameInstance.menu)
## gameInstance.display()
gameInstance.functions.append(gameInstance.run)
gameInstance.display()
except UnboundLocalError:
print("Colorization of map error, restart game and try again!")
| [
"kevin.yeung.1@uvm.edu"
] | kevin.yeung.1@uvm.edu |
df94b7459a1e5e973592fb5dc4ecba8d5dbddbbc | 1847b28cf4944085d93f78ed282ea56a6482787e | /blog/views.py | 6a7f74fe88ca1b3be40382529bb5ef3d9ec5c91a | [] | no_license | Slavian2015/slava_blog | 6a66bb6fb4adcb6093c3cd6b45008aa2989f0487 | b277049bc1e30b0a1ebc6057f49c0501585fa654 | refs/heads/master | 2023-03-16T01:12:57.083422 | 2021-03-15T10:11:04 | 2021-03-15T10:11:04 | 347,901,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | from django.shortcuts import render
from .models import Post, Profile
def follow_unfollow(my_id, id_to_follow):
profile = my_id
pk = id_to_follow
obj = Profile.objects.get(pk=pk)
if obj.user in profile.following.all():
print("remove :", "\n", obj.user, "\n", profile.following.all())
profile.following.remove(obj.user)
else:
print("add :", "\n", obj.user, "\n", profile.following.all())
profile.following.add(obj.user)
return
def posts_of_following_profiles(request):
profile = Profile.objects.get(user=request.user)
users = [user for user in profile.following.all()]
posts = []
for u in users:
p = Profile.objects.get(user=u)
p_posts = p.post_set.all()
posts.append(p_posts)
my_posts = profile.profiles_posts()
posts.append(my_posts)
""" This command for adding User to follow/unfollow
(it should be turned on only once and turned off immediately after first page refresh) """
# follow_unfollow(profile, 2)
return render(request, 'home.html', {"profile": profile, "posts": posts})
def posts_of_my_profiles(request):
profile = Profile.objects.get(user=request.user)
posts = profile.profiles_posts()
return render(request, 'home_all.html', {"profile": profile, "posts": posts})
def posts_of_all_profiles(request):
posts = Post.objects.all()
return render(request, 'home_all.html', {"posts": posts})
| [
"slavaku2014@gmail.com"
] | slavaku2014@gmail.com |
f8f34d24b3dbcd96aaa7b332b9adb079dc3cca9a | 9fa48d3f7c33957399fa7f4de51b04ae6bfb019d | /features.py | 76942f1e44ff61b48f74cd0568c6c5dbeed4a296 | [] | no_license | Charlie-Lichao/Task-recognition-Myo-armband | 1c739af15e3a69ff2d7514d1859951c475f4f825 | f0e426a3d6ee121bc6716fe2a39a300b995f974b | refs/heads/master | 2020-03-27T21:33:09.861428 | 2018-09-09T17:11:36 | 2018-09-09T17:11:36 | 147,155,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,644 | py |
# coding: utf-8
# In[1]:
# coding: utf-8
import pandas as pd
import numpy as np
import os
from scipy.stats import entropy
def get_mean(signal):
"""
Get the mean of signal
Arguments:
signal -- the original signal
Return:
ans -- mean of signal
"""
arr = np.array(signal)
return np.mean(arr.astype(np.float))
def get_RMS(signal):
"""
Get the rms of signal
Arguments:
signal -- the original signal
Return:
ans -- rms of signal
"""
ans = np.sqrt(np.mean(signal**2))
return ans
def get_ZC(signal):
"""
Get the ratio of zero cross rate
Arguments:
signal -- the original signal
Return:
ans -- the ratio of zero cross rate
"""
count = 0
for i in range(len(signal)-1):
if (signal[i]*signal[i+1])<0:
count = count+1
ans = count/(len(signal)-1)
return ans
def get_kurt(signal):
"""
Get the kurt of signal
Arguments:
signal -- the original signal
Return:
ans -- the kurt of signal
"""
mean = get_mean(signal)
m4 = np.mean((signal-mean)**4)
m2 = (np.mean((signal-mean)**2))**2
ans = (m4/m2)-3
return ans
def get_skew(signal):
"""
Get the skew of signal
Arguments:
signal -- the original signal
Return:
ans -- the skew of signal
"""
mean = get_mean(signal)
m4 = np.mean((signal-mean)**4)
m2 = (np.mean((signal-mean)**2))**(3/2)
ans = m4/m2
return ans
def get_sma_numpy(signal):
"""
Get the signal magnitude area:
measure of the magnitude of a varying quantity.
Arguments:
signal -- the original signal
Return:
get_sum/len(signal) -- the statistical value
"""
ans = 0
if signal.shape[1] == 3:
for i in range (len(signal)):
ans += (abs(signal[i,0])+abs(signal[i,1])+abs(signal[i,2]))
elif signal.shape[1] == 8:
for i in range (len(signal)):
ans += (abs(signal[i,0])+abs(signal[i,1])+abs(signal[i,2])+abs(signal[i,3])
+abs(signal[i,4])+abs(signal[i,5])+abs(signal[i,6])+abs(signal[i,7]))
else:
print('The dimension of the input is incorrect')
return ans/len(signal)
def get_entropy(signal):
"""
Get the entropy of signal
Arguments:
signal -- the original signal
Return:
ans -- the entropy of signal
"""
signal_normalized = signal/max(abs(signal))
ans = entropy(abs(signal_normalized))
return ans
def get_rising_time(signal):
"""
Get the rising time from 10% of largest value of signal to 90% of largest value of signal
Arguments:
signal -- the original signal
Return:
ans -- the rising time from 10% of largest value of signal to 90% of largest value of signal
"""
#get the 10% and 90% of maximal value of signal
maxamp = get_max_amp(signal)
up = 0.9*maxamp
low = 0.1*maxamp
#indicator for finding the lower and upper bound
findlow = False
findup = False
for i in range(len(signal)):
#if lower/upper bound not found, and we meet the first value the exceed the bound, store it and inverse the flag
if (findlow==False) & (signal.iloc[i].values[0]>low):
t1 = i
findlow=True
if (findup==False) & (signal.iloc[i].values[0]>up):
t2 = i
findup = True
if findlow & findup:
ans = np.float(t2-t1)
return ans #should multiply by freq: eda=4,bvp=64
def get_energy(signal):
"""
Get the energy value of signal
Arguments:
signal -- the original signal
Return:
ans -- energy value of signal
"""
ans = sum([x**2 for x in signal])
return ans
def get_max_amp(signal):
"""
Get the maximal value of signal
Arguments:
signal -- the original signal
Return:
ans -- maximal value of signal
"""
ans = signal.values.max()
return ans
def get_std(signal):
"""
Get the std of signal
Arguments:
signal -- the original signal
Return:
ans -- std of signal
"""
ans = np.std(signal)
return ans
def first_order_diff(X):
""" Compute the first order difference of a time series.
For a time series X = [x(1), x(2), ... , x(N)], its first order
difference is:
Y = [x(2) - x(1) , x(3) - x(2), ..., x(N) - x(N-1)]
"""
D=[]
for i in range(1,len(X)):
D.append(X[i]-X[i-1])
return D
def get_pfd(X):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
D = None
if D is None:
D = first_order_diff(X)
N_delta= 0; #number of sign changes in derivative of the signal
for i in range(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
return np.log10(n)/(np.log10(n)+np.log10(n/n+0.4*N_delta))
def get_bin_power(X, Band):
Fs = 50
C = abs(X)
Power = np.zeros(len(Band)-1);
for Freq_Index in range(0,len(Band)-1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index+1])
Power = sum(C[int(np.floor(Freq/Fs*len(X))):int(np.floor(Next_Freq/Fs*len(X)))])
return Power
| [
"noreply@github.com"
] | Charlie-Lichao.noreply@github.com |
04ae3124b11172bb493c0bae5c96ffb6adac16d9 | 03f78d37709f6e8efa6a088045412abeb44bb615 | /viberbot/api/viber_requests/viber_unsubscribed_request.py | efd536735372d5639d45f1bb8b6d1669194394a1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | XRave91/viber-bot-python | 5c3432992234f849fe74f89c1a2aa027dd17ad9c | b49b5d85870e22ef9999f57209a800db35d15cc1 | refs/heads/master | 2020-03-31T01:25:57.826581 | 2018-10-05T21:44:23 | 2018-10-05T21:44:23 | 151,781,062 | 0 | 0 | NOASSERTION | 2018-10-05T21:44:24 | 2018-10-05T21:41:09 | Python | UTF-8 | Python | false | false | 727 | py | from future.utils import python_2_unicode_compatible
from viberbot.api.event_type import EventType
from viberbot.api.viber_requests.viber_request import ViberRequest
class ViberUnsubscribedRequest(ViberRequest):
def __init__(self):
super(ViberUnsubscribedRequest, self).__init__(EventType.UNSUBSCRIBED)
self._user_id = None
def from_dict(self, request_dict):
super(ViberUnsubscribedRequest, self).from_dict(request_dict)
self._user_id = request_dict['user_id']
return self
@property
def user_id(self):
return self._user_id
@python_2_unicode_compatible
def __str__(self):
return u"ViberUnsubscribedRequest [{0}, user_id={1}]" \
.format(super(ViberUnsubscribedRequest, self).__str__(), self._user_id)
| [
"lidora@viber.com"
] | lidora@viber.com |
79c589ccc015f6aa137459595e6e30ecef556171 | 9ba82dd9da2822044eea61de388935384a346884 | /app.py | 92424f3610d8a36b2e501d63c331eafb83b723b4 | [] | no_license | alexdylan/app1 | 15e3067da43154446caf0e3aebb957888cbcf91a | fcb5ec5d41a3f78dde2c9ccb081fa99fedbb1a2a | refs/heads/master | 2020-03-31T10:57:30.255656 | 2018-10-08T22:53:09 | 2018-10-08T22:53:09 | 152,156,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import mysql.connector
from dueno import Dueno
from menuDueno import MenuDueno
from menuInvernadero import MenuInvernadero
from menuUsuario import MenuUsuario
from menuPlanta import MenuPlanta
from menuRegistro import MenuRegistro
conexion = mysql.connector.connect(user='alex',password='12345',database= 'invernadero')
cursor = conexion.cursor()
while True:
print("1) Menu Dueño")
print("2) Menu Invernadero")
print("3) Menu Usuario")
print("4) Menu Planta")
print("5) Menu Registro")
print("0) Salir")
op = input()
if op == "1":
menuD = MenuDueno(conexion,cursor)
elif op == "2":
menuI = MenuInvernadero(conexion,cursor)
elif op == "3":
menuU = MenuUsuario(conexion,cursor)
elif op == "4":
menuP = MenuPlanta(conexion,cursor)
elif op == '5':
menuR = MenuRegistro(conexion,cursor)
elif op == "0":
break
| [
"omara.cruz@alumnos.udg.mx"
] | omara.cruz@alumnos.udg.mx |
e5d01453be61f2b329f66383a47ae1bd9104c98e | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/data23/recipe-576938.py | 252a8acccd4b737098c0a74b541a95691ad026fb | [
"BSD-2-Clause"
] | permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | # -*- coding: iso-8859-1 -*-
# laplace.py with mpmath
# appropriate for high precision
# Talbot suggested that the Bromwich line be deformed into a contour that begins
# and ends in the left half plane, i.e., z \to \infty at both ends.
# Due to the exponential factor the integrand decays rapidly
# on such a contour. In such situations the trapezoidal rule converge
# extraordinarily rapidly.
# For example here we compute the inverse transform of F(s) = 1/(s+1) at t = 1
#
# >>> error = Talbot(1,24)-exp(-1)
# >>> error
# (3.3306690738754696e-015+0j)
#
# Talbot method is very powerful here we see an error of 3.3e-015
# with only 24 function evaluations
#
# Created by Fernando Damian Nieuwveldt
# email:fdnieuwveldt@gmail.com
# Date : 25 October 2009
#
# Adapted to mpmath and classes by Dieter Kadelka
# email: Dieter.Kadelka@kit.edu
# Date : 27 October 2009
#
# Reference
# L.N.Trefethen, J.A.C.Weideman, and T.Schmelzer. Talbot quadratures
# and rational approximations. BIT. Numerical Mathematics,
# 46(3):653 670, 2006.
from mpmath import mpf,mpc,pi,sin,tan,exp
# testfunction: Laplace-transform of exp(-t)
def F(s):
return 1.0/(s+1.0)
class Talbot(object):
def __init__(self,F=F,shift=0.0):
self.F = F
# test = Talbot() or test = Talbot(F) initializes with testfunction F
self.shift = shift
# Shift contour to the right in case there is a pole on the
# positive real axis :
# Note the contour will not be optimal since it was originally devoloped
# for function with singularities on the negative real axis For example
# take F(s) = 1/(s-1), it has a pole at s = 1, the contour needs to be
# shifted with one unit, i.e shift = 1.
# But in the test example no shifting is necessary
self.N = 24
# with double precision this constant N seems to best for the testfunction
# given. For N = 22 or N = 26 the error is larger (for this special
# testfunction).
# With laplace.py:
# >>> test.N = 500
# >>> print test(1) - exp(-1)
# >>> -2.10032517928e+21
# Huge (rounding?) error!
# with mp_laplace.py
# >>> mp.dps = 100
# >>> test.N = 500
# >>> print test(1) - exp(-1)
# >>> -5.098571435907316903360293189717305540117774982775731009465612344056911792735539092934425236391407436e-64
def __call__(self,t):
if t == 0:
print("ERROR: Inverse transform can not be calculated for t=0")
return ("Error");
# Initiate the stepsize
h = 2*pi/self.N
ans = 0.0
# parameters from
# T. Schmelzer, L.N. Trefethen, SIAM J. Numer. Anal. 45 (2007) 558-571
c1 = mpf('0.5017')
c2 = mpf('0.6407')
c3 = mpf('0.6122')
c4 = mpc('0','0.2645')
# The for loop is evaluating the Laplace inversion at each point theta i
# which is based on the trapezoidal rule
for k in range(self.N):
theta = -pi + (k+0.5)*h
z = self.shift + self.N/t*(c1*theta/tan(c2*theta) - c3 + c4*theta)
dz = self.N/t * (-c1*c2*theta/sin(c2*theta)**2 + c1/tan(c2*theta)+c4)
ans += exp(z*t)*self.F(z)*dz
return ((h/(2j*pi))*ans).real
| [
"johannes.buchner.acad@gmx.com"
] | johannes.buchner.acad@gmx.com |
a2ec9c682a00a06f0c9762dc40402c1247ff487b | 8bd1f4adfa846cbc465fa867fe07951ca191a881 | /src/rules.py | 13d9b4dab544e52b89e434e10d14deec6b3cfe27 | [] | no_license | yitaodong/pascal-compiler | 3649b9b23143197730bba026b1f2d08d0d3067ed | 2855e910276e3e5dbe65503d300b6f35adf92758 | refs/heads/master | 2020-05-24T18:35:38.382941 | 2019-05-20T03:17:59 | 2019-05-20T03:17:59 | 187,413,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,386 | py | from codegen.ast import Node
import sys
# META
#start = 'block'
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVISION'),
('left', 'DIV', 'MOD'),
('left', 'EQ', 'NEQ', 'LTE','LT','GT','GTE'),
('left', 'OR', 'AND'),
)
def p_program_start(t):
'program : header SEMICOLON block DOT'
t[0] = Node('program',t[1],t[3])
def p_header(t):
'header : PROGRAM identifier'
t[0] = t[2]
def p_block(t):
"""block : variable_declaration_part procedure_or_function statement_part
"""
t[0] = Node('block',t[1],t[2],t[3])
def p_variable_declaration_part(t):
"""variable_declaration_part : VAR variable_declaration_list
|
"""
if len(t) > 1:
t[0] = t[2]
def p_variable_declaration_list(t):
"""variable_declaration_list : variable_declaration variable_declaration_list
| variable_declaration
"""
# function and procedure missing here
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node('var_list',t[1],t[2])
def p_variable_declaration(t):
"""variable_declaration : identifier COLON type SEMICOLON"""
t[0] = Node('var',t[1],t[3])
def p_procedure_or_function(t):
"""procedure_or_function : proc_or_func_declaration SEMICOLON procedure_or_function
| """
if len(t) == 4:
t[0] = Node('function_list',t[1],t[3])
def p_proc_or_func_declaration(t):
""" proc_or_func_declaration : procedure_declaration
| function_declaration """
t[0] = t[1]
def p_procedure_declaration(t):
"""procedure_declaration : procedure_heading SEMICOLON block"""
t[0] = Node("procedure",t[1],t[3])
def p_procedure_heading(t):
""" procedure_heading : PROCEDURE identifier
| PROCEDURE identifier LPAREN parameter_list RPAREN"""
if len(t) == 3:
t[0] = Node("procedure_head",t[2])
else:
t[0] = Node("procedure_head",t[2],t[4])
def p_function_declaration(t):
""" function_declaration : function_heading SEMICOLON block"""
t[0] = Node('function',t[1],t[3])
def p_function_heading(t):
""" function_heading : FUNCTION type
| FUNCTION identifier COLON type
| FUNCTION identifier LPAREN parameter_list RPAREN COLON type"""
if len(t) == 3:
t[0] = Node("function_head",t[2])
elif len(t) == 5:
t[0] = Node("function_head",t[2],t[3])
else:
t[0] = Node("function_head",t[2],t[4],t[7])
def p_parameter_list(t):
""" parameter_list : parameter COMMA parameter_list
| parameter"""
if len(t) == 4:
t[0] = Node("parameter_list", t[1], t[3])
else:
t[0] = t[1]
def p_parameter(t):
""" parameter : identifier COLON type"""
t[0] = Node("parameter", t[1], t[3])
def p_type(t):
""" type : TREAL
| TINTEGER
| TCHAR
| TSTRING """
t[0] = Node('type',t[1].lower())
def p_statement_part(t):
"""statement_part : BEGIN statement_sequence END"""
t[0] = t[2]
def p_statement_sequence(t):
"""statement_sequence : statement SEMICOLON statement_sequence
| statement"""
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node('statement_list',t[1],t[3])
def p_statement(t):
"""statement : assignment_statement
| statement_part
| if_statement
| while_statement
| repeat_statement
| for_statement
| procedure_or_function_call
|
"""
if len(t) > 1:
t[0] = t[1]
def p_procedure_or_function_call(t):
""" procedure_or_function_call : identifier LPAREN param_list RPAREN
| identifier """
if len(t) == 2:
t[0] = Node("function_call", t[1])
else:
t[0] = Node("function_call",t[1],t[3])
def p_param_list(t):
""" param_list : param_list COMMA param
| param """
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node("parameter_list",t[1],t[3])
def p_param(t):
""" param : expression """
t[0] = Node("parameter",t[1])
def p_if_statement(t):
"""if_statement : IF expression THEN statement ELSE statement
| IF expression THEN statement
"""
if len(t) == 5:
t[0] = Node('if',t[2],t[4])
else:
t[0] = Node('if',t[2],t[4],t[6])
def p_while_statement(t):
"""while_statement : WHILE expression DO statement"""
t[0] = Node('while',t[2],t[4])
def p_repeat_statement(t):
"""repeat_statement : REPEAT statement UNTIL expression"""
t[0] = Node('repeat',t[2],t[4])
def p_for_statement(t):
"""for_statement : FOR assignment_statement TO expression DO statement
| FOR assignment_statement DOWNTO expression DO statement
"""
t[0] = Node('for',t[2],t[3],t[4],t[6])
def p_assignment_statement(t):
"""assignment_statement : identifier ASSIGNMENT expression"""
t[0] = Node('assign',t[1],t[3])
def p_expression(t):
"""expression : expression and_or expression_m
| expression_m
"""
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node('op',t[2],t[1],t[3])
def p_expression_m(t):
""" expression_m : expression_s
| expression_m sign expression_s"""
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node('op',t[2],t[1],t[3])
def p_expression_s(t):
""" expression_s : element
| expression_s psign element"""
if len(t) == 2:
t[0] = t[1]
else:
t[0] = Node('op',t[2],t[1],t[3])
def p_and_or(t):
""" and_or : AND
| OR """
t[0] = Node('and_or',t[1])
def p_psign(t):
"""psign : TIMES
| DIVISION"""
t[0] = Node('sign',t[1])
def p_sign(t):
"""sign : PLUS
| MINUS
| DIV
| MOD
| EQ
| NEQ
| LT
| LTE
| GT
| GTE
"""
t[0] = Node('sign',t[1])
def p_element(t):
"""element : identifier
| real
| integer
| string
| char
| LPAREN expression RPAREN
| NOT element
| function_call_inline
"""
if len(t) == 2:
t[0] = Node("element",t[1])
elif len(t) == 3:
# not e
t[0] = Node('not',t[2])
else:
# ( e )
t[0] = Node('element',t[2])
def p_function_call_inline(t):
""" function_call_inline : identifier LPAREN param_list RPAREN"""
t[0] = Node('function_call_inline',t[1],t[3])
def p_identifier(t):
""" identifier : IDENTIFIER """
t[0] = Node('identifier',str(t[1]).lower())
def p_real(t):
""" real : REAL """
t[0] = Node('real',t[1])
def p_integer(t):
""" integer : INTEGER """
t[0] = Node('integer',t[1])
def p_string(t):
""" string : STRING """
t[0] = Node('string',t[1])
def p_char(t):
""" char : CHAR """
t[0] = Node('char',t[1])
def p_error(t):
print "Syntax error in input, in line %d!" % t.lineno
sys.exit() | [
"noreply@github.com"
] | yitaodong.noreply@github.com |
c793af039ab512be26af087d04cee9f47f6aa452 | 7b2e5c61ffa754a2bc371e166eae67bfe92492bc | /webmotors/scrap.py | 6f205416045fbb2f71a751524faa9bfe5f1cbaf6 | [] | no_license | gustavoid/webmotors | 8fbf2aad9764a7cf165f53837baef237a028e358 | 6d4d382115f70a045ee64c0a30ec46b55c20a390 | refs/heads/main | 2023-03-24T22:41:41.853557 | 2021-03-23T14:19:06 | 2021-03-23T14:19:06 | 350,739,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,438 | py | import requests
import logging
import json
import logging
from random import choice
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
IMAGE_URL = "https://image.webmotors.com.br/_fotos/AnuncioUsados/gigante/"
USERS_AGENTS = [
'Mozilla/5.0 (Linux; Android 5.0.2; VK810 4G Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.84 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.76.4 (KHTML, like Gecko) Version/7.0.4 Safari/537.76.4',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:40.0) Gecko/20100101 Firefox/40.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; SMJB; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64; Trident/7.0; Touch; MDDCJS; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; BOIE9;ENUS; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0',
'Mozilla/5.0 (iPad; CPU OS 8_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/6.0.51363 Mobile/12H143 Safari/600.1.4',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Windows NT 5.1; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2503.0 Safari/537.36',
]
class Webmotors(object):
def __init__(self,proxy=None,vehiclePerPage=100):
self.__proxy = proxy
self.__carsPageNum = 1
self.__bikePageNum = 1
self.__vehiclePerPage = vehiclePerPage
self.__session = requests.Session()
@property
def proxy(self):
return self.__proxy
@proxy.setter
def proxy(self,value):
try:
ip = requests.get("https://ifconfig.me/ip")
proxy = {
"http":value,
"https":value
}
newIp = requests.get("https://ifconfig.me/ip",proxies=proxy)
if ip != newIp:
self.__proxy = value
self.__session.proxies = proxy
logger.info(f"Proxy configurado: {ip}")
else:
logger.warn(f"Nao foi possivel configurar o proxy")
except Exception as e:
logger.error(f"Ocorreu um erro: {str(e)}")
def getCars(self):
try:
url = f"https://www.webmotors.com.br:443/api/search/car?url=https://www.webmotors.com.br/carros%2Festoque%3F&actualPage={self.__carsPageNum}&displayPerPage={self.__vehiclePerPage}&order=1&showMenu=true&showCount=true&showBreadCrumb=true&testAB=false&returnUrl=false"
cookies = {"AMCV_3ADD33055666F1A47F000101%40AdobeOrg": "-1124106680%7CMCIDTS%7C18705%7CMCMID%7C08361442210490129111811084005471184982%7CMCOPTOUT-1616107905s%7CNONE%7CvVersion%7C5.2.0", "mbox": "session#778ba20bffd6441b84a07f970ef4bfdb#1616102564", "at_check": "true", "WebMotorsVisitor": "1", "AMCVS_3ADD33055666F1A47F000101%40AdobeOrg": "1", "WMLastFilterSearch": "%7B%22car%22%3A%22carros%2Festoque%3Fidcmpint%3Dt1%3Ac17%3Am07%3Awebmotors%3Abusca%3A%3Averofertas%22%2C%22bike%22%3A%22motos%2Festoque%22%2C%22estadocidade%22%3A%22estoque%22%2C%22lastType%22%3A%22car%22%2C%22cookie%22%3A%22v3%22%2C%22ano%22%3A%7B%7D%2C%22preco%22%3A%7B%7D%2C%22marca%22%3A%22%22%2C%22modelo%22%3A%22%22%7D", "WebMotorsSearchDataLayer": f"%7B%22search%22%3A%7B%22location%22%3A%7B%7D%2C%22ordination%22%3A%7B%22name%22%3A%22Mais%20relevantes%22%2C%22id%22%3A1%7D%2C%22pageNumber%2{self.__carsPageNum}%3A2%2C%22totalResults%22%3A262926%2C%22vehicle%22%3A%7B%22type%22%3A%7B%22id%22%3A1%2C%22name%22%3A%22carro%22%7D%7D%2C%22cardExhibition%22%3A%7B%22id%22%3A%221%22%2C%22name%22%3A%22Cards%20Grid%22%7D%2C%22eventType%22%3A%22paginacaoRealizada%22%7D%7D", "WebMotorsTrackingFrom": "paginacaoRealizada"}
headers = {"GET /api/search/car?url=https": f"/www.webmotors.com.br/carros%2Festoque%3F&actualPage={self.__carsPageNum}&displayPerPage={self.__vehiclePerPage}&order=1&showMenu=true&showCount=true&showBreadCrumb=true&testAB=false&returnUrl=false HTTP/1.1", "User-Agent": choice(USERS_AGENTS), "Accept": "application/json, text/plain, */*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "DNT": "1", "Connection": "close", "Sec-GPC": "1"}
response = self.__session.get(url,headers=headers,cookies=cookies)
except Exception as e:
logger.error(f"Ocorreu um erro: {str(e)}")
return []
if response.status_code == 200:
results = json.loads(response.text)
if len(results["SearchResults"]) == 0:
self.__carsPageNum = 1
return []
else:
self.__carsPageNum += 1
return results["SearchResults"]
def getBikes(self):
try:
url = f"https://www.webmotors.com.br:443/api/search/bike?url=https://www.webmotors.com.br/motos%2Festoque%3Ftipoveiculo%3Dmotos&actualPage={self.__bikePageNum}&displayPerPage={self.__vehiclePerPage}&order=1&showMenu=true&showCount=true&showBreadCrumb=true&testAB=false&returnUrl=false"
cookies = {"AMCV_3ADD33055666F1A47F000101%40AdobeOrg": "359503849%7CMCIDTS%7C18602%7CMCMID%7C56241706435647372388498402368390428709%7CMCOPTOUT-1607182934s%7CNONE%7CvVersion%7C5.0.1", "AMCV_3ADD33055666F1A47F000101%40AdobeOrg": "-1124106680%7CMCIDTS%7C18704%7CMCMID%7C56241706435647372388498402368390428709%7CMCOPTOUT-1615992864s%7CNONE%7CvVersion%7C5.2.0", "WebMotorsLastSearches": "%5B%7B%22route%22%3A%22carros%2Festoque%2Fvolkswagen%2Fjetta%22%2C%22query%22%3A%22%22%7D%5D", "mbox": "session#95f94e1177ac42908ac4fb1aaac3a342#1615986642", "at_check": "true", "AMCVS_3ADD33055666F1A47F000101%40AdobeOrg": "1", "WebMotorsVisitor": "1", "WMLastFilterSearch": "%7B%22car%22%3A%22carros%2Festoque%3Fidcmpint%3Dt1%3Ac17%3Am07%3Awebmotors%3Abusca%3A%3Averofertas%22%2C%22bike%22%3A%22motos%2Festoque%22%2C%22estadocidade%22%3A%22estoque%22%2C%22lastType%22%3A%22car%22%2C%22cookie%22%3A%22v3%22%2C%22ano%22%3A%7B%7D%2C%22preco%22%3A%7B%7D%2C%22marca%22%3A%22%22%2C%22modelo%22%3A%22%22%7D", "WebMotorsSearchDataLayer": "%7B%22search%22%3A%7B%22location%22%3A%7B%7D%2C%22ordination%22%3A%7B%22name%22%3A%22Mais%20relevantes%22%2C%22id%22%3A1%7D%2C%22pageNumber%22%3A1%2C%22totalResults%22%3A258704%2C%22vehicle%22%3A%7B%22type%22%3A%7B%22id%22%3A1%2C%22name%22%3A%22carro%22%7D%7D%2C%22cardExhibition%22%3A%7B%22id%22%3A%221%22%2C%22name%22%3A%22Cards%20Grid%22%7D%2C%22eventType%22%3A%22buscaRealizada%22%7D%7D", "WebMotorsTrackingFrom": "filtroRealizado"}
headers = {"GET /api/search/bike?url=https": f"/www.webmotors.com.br/motos%2Festoque%3Ftipoveiculo%3Dmotos&actualPage={self.__bikePageNum}&displayPerPage={self.__vehiclePerPage}&order=1&showMenu=true&showCount=true&showBreadCrumb=true&testAB=false&returnUrl=false HTTP/1.1", "User-Agent": choice(USERS_AGENTS), "Accept": "application/json, text/plain, */*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "DNT": "1", "Connection": "close", "Sec-GPC": "1"}
response = self.__session.get(url,headers=headers,cookies=cookies)
except Exception as e:
logger.error(f"Ocorreu um erro: {str(e)}")
return []
if response.status_code == 200:
results = json.loads(response.text)
if len(results["SearchResults"]) == 0:
self.__bikePageNum = 1
return []
else:
self.__bikePageNum += 1
return results["SearchResults"]
| [
"guzzt@localhost.localdomain"
] | guzzt@localhost.localdomain |
5f6f1d4d9488f159cbe77963ab23c55884831ffc | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/101-200/week 200/1536. Minimum Swaps to Arrange a Binary Grid/Minimum Swaps to Arrange a Binary Grid.py | 3945b9170c8ea867c0294760570f9df5e6239462 | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Minimum Swaps to Arrange a Binary Grid
@time: 2020/08/03 04:39
"""
class Solution:
def minSwaps(self, A: list(list())) -> int:
m, n = len(A), len(A[0])
res = [0] * m
for i in range(m):
for j in range(n):
if not A[i][~j]:
res[i] += 1
else:
break
ret = 0
for i, r in enumerate(res):
target = m - 1 - i
if res[i] >= target: continue
for j in range(i + 1, m):
if res[j] >= target:
ret += j - i
res[i + 1:j + 1] = res[i:j]
break
else:
return -1
return ret
so = Solution()
print(so.minSwaps([[0, 0, 1], [1, 1, 0], [1, 0, 0]]))
| [
"905317742@qq.com"
] | 905317742@qq.com |
aea2948697eef4b3cd89e905116f4a3832e63170 | 38ac429d63369922e12e19cdda042b08b8123027 | /test/test_json_view.py | c5b01994dc9e1d97795a7c689c0a2b5dd2bb5dcb | [] | no_license | aviv-julienjehannet/collibra_apiclient | 0dfebe5df2eb929645b87eba42fab4c06ff0a6be | 10a89e7acaf56ab8c7417698cd12616107706b6b | refs/heads/master | 2021-09-12T16:52:19.803624 | 2018-04-19T01:35:20 | 2018-04-19T01:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.json_view import JsonView # noqa: E501
from swagger_client.rest import ApiException
class TestJsonView(unittest.TestCase):
"""JsonView unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJsonView(self):
"""Test JsonView"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.json_view.JsonView() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"busworld08@gmail.com"
] | busworld08@gmail.com |
dd44f27d784a2604fe1e4bc747941f58c0a8e8c3 | 9cc14327257c06293ed0f985a36996a4f60b5ef1 | /extract_jims_data/src/root/nested/__init__.py | 3c5806573f59a940f25c14e21427f569791a1af4 | [] | no_license | mmmerlin/my_code | 37d18782738586ba54f9b0d8e2cddd85b98cb84c | 7ebfe43e8bde7cb7818545eb636d74c0b1ece26e | refs/heads/master | 2016-09-01T20:39:58.422935 | 2015-11-24T14:46:18 | 2015-11-24T14:46:18 | 26,291,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | import pyfits
data = pyfits.open('/mnt/hgfs/VMShared/output/QE_LSST/Jim_results/112-04_QE.fits')['QE_CURVES'].data
wl = data.field('WAVELENGTH')
amp01 = data.field('AMP01')
amp02 = data.field('AMP02')
amp03 = data.field('AMP03')
amp04 = data.field('AMP04')
amp05 = data.field('AMP05')
amp06 = data.field('AMP06')
amp07 = data.field('AMP07')
amp08 = data.field('AMP08')
amp09 = data.field('AMP09')
amp10 = data.field('AMP10')
amp11 = data.field('AMP11')
amp12 = data.field('AMP12')
amp13 = data.field('AMP13')
amp14 = data.field('AMP14')
amp15 = data.field('AMP15')
amp16 = data.field('AMP16')
for i in range(len(wl)):
print str(wl[i]) + \
'\t' + str(amp01[i]) + \
'\t' + str(amp02[i]) + \
'\t' + str(amp03[i]) + \
'\t' + str(amp04[i]) + \
'\t' + str(amp05[i]) + \
'\t' + str(amp06[i]) + \
'\t' + str(amp07[i]) + \
'\t' + str(amp08[i]) + \
'\t' + str(amp09[i]) + \
'\t' + str(amp10[i]) + \
'\t' + str(amp11[i]) + \
'\t' + str(amp12[i]) + \
'\t' + str(amp13[i]) + \
'\t' + str(amp14[i]) + \
'\t' + str(amp15[i]) + \
'\t' + str(amp16[i])
print ""
| [
"mmmerlin@ubuntu.(none)"
] | mmmerlin@ubuntu.(none) |
950413a7deae946a97ca3d90d8beb24ec762c08a | 53e31ab12fddcc0e8d3e9add10ad266ff8bd30c8 | /url lib.py | e3db666922259528b1943ac04a6fae0fa0f759c7 | [] | no_license | sneh2001/first_upload | b7caaf1067183adca69ec0de77ff6bbab909c439 | 07898cd4b7eda85ea36a5ecf3ce7634c0ec49742 | refs/heads/master | 2020-12-29T12:56:45.612903 | 2020-08-13T04:54:48 | 2020-08-13T04:54:48 | 238,614,927 | 0 | 0 | null | 2020-03-15T09:15:00 | 2020-02-06T05:27:44 | Python | UTF-8 | Python | false | false | 224 | py | import urllib.request
webUrl = \
urllib.request.urlopen('http://wordpress.org/plugins/about/readme.txt'
)
print 'Result code: ' + str(webUrl.getcode())
data = webUrl.read()
print data
| [
"noreply@github.com"
] | sneh2001.noreply@github.com |
6d02a2ddde0e821a4c5330b95d16d6cb74325b15 | 65ca852688354783630f1595853222f8ecc4668a | /RNAMultiCoucheMinus22.py | 51673d1d39a71f581feb3ad7a89ac0b46441bbf4 | [] | no_license | RobertGodin/CodePython | f15190df24b6da9f53002aeb791b63ebe2996275 | fb051d2b627cf43d55944b5f09626eb618de7411 | refs/heads/master | 2023-02-07T06:45:45.007762 | 2023-02-04T15:54:59 | 2023-02-04T15:54:59 | 133,089,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,586 | py | # -*- coding: utf-8 -*-
# Implémentation d'un RNA multi-couche, exemple avec le RNA Minus
import numpy as np
np.random.seed(42) # pour reproduire les mêmes résultats
class Couche:
""" Classe abstraite qui représente une couche du RNA
X: vecteur, entrée de la couche
Y: vecteur, sortie de la couche
"""
def __init__(self):
self.X = None
self.Y = None
def propager_une_couche(self,X):
""" Calculer la sortie Y pour une valeur de X
X : vecteur des variables prédictives
Les valeurs de X et Y sont stockées pour les autres traitements.
"""
raise NotImplementedError
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Calculer les dérivées par rapport à X et les autres paramètres à partir de dJ_dY
et mettre à jour les paramètres de la couche selon le taux spécifié.
dJ_dY : np.float, la dérivée de J par rapport à la sortie Y
taux : np.float, taux est le taux dans la descente de gradiant
retourne la dérivée de J par rapport à X
"""
raise NotImplementedError
# inherit from base class Layer
class CoucheDenseLineaire(Couche):
""" Couche linéaire dense. Y=WX+B
"""
def __init__(self,n,m,init_W=None,init_B=None):
""" Initilalise les paramètres de la couche. W et B sont initialisés avec des valeurs aléatoires
selon une distribution uniforme entre U(-0.5,0.5) si les paramètres init_W et init_B
ne sont pas spécifiés.
n : int, taille du vecteur d'entrée X
m : int, taille du vecteur de sortie Y
init_W : np.array, shape(n,m), valeur initiale optionnelle de W
init_B : np.array, shape(1,m), valeur initial optionnelle de B
"""
if init_W is None :
self.W = np.random.rand(n,m) - 0.5
else:
self.W = init_W
if init_B is None :
self.B = np.random.rand(1, m) - 0.5
else:
self.B = init_B
def propager_une_couche(self,X):
""" Fait la propagation de X et retourne Y=WX+B.
"""
self.X = X
self.Y = self.B + np.dot(self.X,self.W)
return self.Y
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Calculer les dérivées dJ_dW,dJ_dB,dJ_dX pour une couche linéaire dense et
mettre à jour les paramètres
"""
dJ_dW = np.dot(self.X.T,dJ_dY)
dJ_dB = dJ_dY
dJ_dX = np.dot(dJ_dY,self.W.T)
if trace:
print("dJ_dW:",dJ_dW)
print("dJ_dB:",dJ_dB)
print("dJ_dX:",dJ_dX)
# Metre à jour les paramètres W et B
self.W -= taux * dJ_dW
self.B -= taux * dJ_dB
if trace:
print("W modifié:",self.W)
print("B modifié:",self.B)
return dJ_dX
def erreur_quadratique(y_prediction,y):
""" Retourne l'erreur quadratique entre la prédiction y_prediction et la valeur attendue y
"""
return np.sum(np.power(y_prediction-y,2))
def d_erreur_quadratique(y_prediction,y):
return 2*(y_prediction-y)
class ReseauMultiCouches:
""" Réseau mutli-couche formé par une séquence de Couches
couches : liste de Couches du RNA
cout : fonction qui calcule de cout J
derivee_cout: dérivée de la fonction de cout
"""
def __init__(self):
self.couches = []
self.cout = None
self.derivee_cout = None
def ajouter_couche(self,couche):
self.couches.append(couche)
def specifier_J(self,cout,derivee_cout):
""" Spécifier la fonction de coût J et sa dérivée
"""
self.cout = cout
self.derivee_cout = derivee_cout
def propagation_donnees_ent_X(self,donnees_ent_X,trace=False):
""" Prédire Y pour chacune des observations dans donnees_ent_X)
donnees_ent_X : np.array 3D des valeurs de X pour chacune des observations
chacun des X est un np.array 2D de taille (1,n)
"""
nb_observations = len(donnees_ent_X)
predictions_Y = []
for indice_observation in range(nb_observations):
XY_propage = donnees_ent_X[indice_observation]
if trace:
print("Valeur de X initiale:",XY_propage)
for couche in self.couches:
XY_propage = couche.propager_une_couche(XY_propage)
if trace:
print("Valeur de Y après propagation pour la couche:",XY_propage)
predictions_Y.append(XY_propage)
return predictions_Y
def entrainer_descente_gradiant_stochastique(self,donnees_ent_X,donnees_ent_Y,nb_epochs,taux,trace=False):
""" Entrainer le réseau par descente de gradiant stochastique (une observation à la fois)
donnees_ent_X : np.array 3D des valeurs de X pour chacune des observations
chacun des X est un np.array 2D de taille (1,n)
donnees_ent_Y : np.array 3D des valeurs de Y pour chacune des observations
chacun des Y est un np.array 2D de taille (1,m)
"""
nb_observations = len(donnees_ent_X)
# Boucle d'entrainement principale, nb_epochs fois
for cycle in range(nb_epochs):
cout_total = 0
# Descente de gradiant stochastique, une observation à la fois
for indice_observation in range(nb_observations):
# Propagation avant pour une observation X
XY_propage = donnees_ent_X[indice_observation]
for couche in self.couches:
XY_propage = couche.propager_une_couche(XY_propage)
# Calcul du coût pour une observation
cout_total += self.cout(XY_propage,donnees_ent_Y[indice_observation])
# Rétropropagation pour une observation
# dJ_dX_dJ_dY représente la valeur de la dérivée dJ_dX passée à dJ_dY de couche en couche
dJ_dX_dJ_dY = self.derivee_cout(XY_propage,donnees_ent_Y[indice_observation])
if trace :
print("dJ_dY couche finale:",dJ_dX_dJ_dY)
for couche in reversed(self.couches):
dJ_dX_dJ_dY = couche.retropropager_une_couche(dJ_dX_dJ_dY,taux,trace)
# Calculer et afficher le coût moyen pour une epoch
cout_moyen = cout_total/nb_observations
print('epoch %d/%d cout_moyen=%f' % (cycle+1,nb_epochs,cout_moyen))
# Une seule observation pour illustrer le fonctionnement de RNA Minus
donnees_ent_X = np.array([[[1,1]]])
donnees_ent_Y = np.array([[[1,0]]])
# Définir les paramètres initiaux de RNA Minus
B1=np.array([[0.2,0.7]])
W1=np.array([[0.5,0.1],[0.3,-0.3]])
B2=np.array([[-0.2,0.5]])
W2=np.array([[0.7,-0.1],[0,0.2]])
# Définir l'architecture du RNA Minus
un_RNA = ReseauMultiCouches()
un_RNA.specifier_J(erreur_quadratique,d_erreur_quadratique)
un_RNA.ajouter_couche(CoucheDenseLineaire(2,2,init_W=W1,init_B=B1))
un_RNA.ajouter_couche(CoucheDenseLineaire(2,2,init_W=W2,init_B=B2))
# Tester le RNA Minus avant entrainement
predictions_Y = un_RNA.propagation_donnees_ent_X(donnees_ent_X,trace=True)
print("Prédiction initiale: ",predictions_Y)
# Entrainer le RNA Minus
un_RNA.entrainer_descente_gradiant_stochastique(donnees_ent_X,donnees_ent_Y,nb_epochs=1,taux=0.1,trace = True)
# Tester le RNA Minus
predictions_Y = un_RNA.propagation_donnees_ent_X(donnees_ent_X,trace=True)
print("Prédiction après entraînement:",predictions_Y)
| [
"godin.robert@uqam.ca"
] | godin.robert@uqam.ca |
6db5f439616bd427988a20cb6d69eab45e22ceb4 | 3d813a1ae6f6e9ca9d339b2afd36eedbda99ce5f | /spell.py | 9af166db1edbfee6084159126bb8dbe87866f5cd | [] | no_license | edwelker/search | 00555b379c21b688c60fccb0a01e8578bb169e25 | eddfe285e2209534a272a4681344db1f73205e82 | refs/heads/master | 2016-09-01T23:08:34.975830 | 2011-04-27T02:43:37 | 2011-04-27T02:43:37 | 1,592,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | from optparse import OptionParser
from lxml import etree
from damerau_levenshtein import dameraulevenshtein as spdiff
def main():
usage = "Usage: %prog [options] arg"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Sorry, incorrect number of arguments")
tree = etree.parse('adjusted_menu.xml')
results = match(tree.xpath('//title'), args[0] )
print(results)
# need to cover
# spelling (done)
# contains
# contains with spelling
def match(xml_to_check, input):
'''Get the distance between the input and the xml tree. Run Damerau Levenshtein on
lowercase names and lowercase input.'''
adjusted_input = input.strip().lower()
results = []
for e in xml_to_check:
distance = spdiff( e.text.strip().lower(), adjusted_input )
if distance < 3:
x = (etree.tostring(e.getparent()), distance)
results.append(x)
return results
if __name__ == '__main__':
main()
| [
"eddie.welker@gmail.com"
] | eddie.welker@gmail.com |
02082689b4b0c42059dca0579e9299120b08095a | 9c34836aded5c69ac98c90a5e3a0f7f11b5e8594 | /board.py | 2b4ef27e7fb2397df6ba1593682b090c53819a07 | [] | no_license | Boissineau/RPG | 6f456d642ca5a36421f752965e4e8df2089895bf | 9d1443dc4c6e1698b6f08e6e869b68876bb38b0a | refs/heads/master | 2023-06-05T13:16:29.738083 | 2021-06-21T19:41:07 | 2021-06-21T19:41:07 | 378,438,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py |
class Board:
def __init__(self, rows, cols):
self.rows = rows - 1
self.cols = cols - 1
self.grid = [[0]*cols for i in range(rows)]
self.number_of_entities = 0
self.entities = []
def update_board(self, prev_x, prev_y, x, y):
self.grid[prev_y][prev_x] = 0
self.grid[y][x] = 1
def get_board(self):
return self.grid
def nearby(self, x, y, name):
for i in self.entities:
x2, y2 = i.get_position()
if (x == x2 - 1 or x == x2 + 1) and (y == y2 - 1 or y == y2 + 1):
return True
return False
def get_entities(self):
return self.entities
def entity_list(self, entity):
self.entities.append(entity)
| [
"brendanboissineau@gmail.com"
] | brendanboissineau@gmail.com |
d54d700958b09fbd6a63f405ac357f0258199523 | 95fc7acc1fb21f2a0ebc6c8e0cb016f95258cd10 | /py/bin/easy_install | 63dc7cbec688923f3fb95c62785b95a533457f55 | [
"MIT"
] | permissive | lin826/chat-room | 5edc631eeae04ab39fff09b95b290aac4587fc61 | f9e11ad10c9a52b066cd2553ae1c8ca2180c26f8 | refs/heads/master | 2021-01-25T08:20:12.121087 | 2017-07-21T21:41:18 | 2017-07-21T21:41:18 | 93,758,638 | 0 | 0 | null | 2017-06-08T14:25:04 | 2017-06-08T14:25:04 | null | UTF-8 | Python | false | false | 261 | #!/home/iju/Documents/chat-room/py/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"liniju826@gmail.com"
] | liniju826@gmail.com | |
3ff8f5fa4434a48ad4e6427bd8af6db22f99f475 | 7f98a67bbdea2570bc605de8263cfc9e751ef6c8 | /grover2.py | 2ef0e9beabdba82a769484e610208a25d5df71b0 | [] | no_license | claudioalvesmonteiro/quantum_computing_algorithms | a1596ac10ae26d5867715b038a5732a63f7a54ea | 00ed0d1b5b578a7c711725d05242b105cbd51e07 | refs/heads/master | 2020-07-24T19:52:32.368331 | 2019-09-17T22:13:29 | 2019-09-17T22:13:29 | 208,030,416 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | '''
QUANTUM COMPUTING STUDIES
Introduction to Qiskit
@ claudio alves monteiro 2019
clam@cin.ufpe.br
'''
#=======================#
# INITIALIZATION
#======================#
# import package
import qiskit as qk
# define nqubits
nqubits = 3
# creating a quantum register with 3 qubits
q = qk.QuantumRegister(nqubits)
# creating a classical register with nqubits(for measure)
c = qk.ClassicalRegister(nqubits)
# build quantum circuit with the qubits and classical register
circuit = qk.QuantumCircuit(q, c)
# print circuit
print(circuit)
#===============================#
# Quantum State 1
#==============================#
# not on last qubit
circuit.x(q[2])
# Hadamard on all qubits
for i in range(nqubits):
circuit.h(q[i])
#===============================#
# ORACLE
#==============================#
# multi controlled not
circuit.ccx(q[0], q[1], q[2])
#===============================#
# CONDITIONAL PHASE SHIFT
#==============================#
# Hadamard on all but last
for i in range(nqubits-1):
circuit.h(q[i])
# not in all but last
for i in range(nqubits-1):
circuit.x(q[i])
# hadamrd before last
circuit.h(q[1])
# cnot
circuit.cx(q[0], q[1])
# hadamrd before last
circuit.h(q[1])
# not in all but last
for i in range(nqubits-1):
circuit.x(q[i])
# Hadamard on all but last
for i in range(nqubits-1):
circuit.h(q[i])
#========================#
# SIMULATE ALGORITHM
#======================#
# measure
circuit.measure(q[0:2], c[0:2])
print(circuit)
# using Aer Qasm Simulator
simulator = qk.BasicAer.get_backend('qasm_simulator')
# simulate the circuit and get result
job = qk.execute(circuit, simulator)
result = job.result()
# get the aggregate binary outputs od the circuit
counts = result.get_counts(circuit)
print(counts)
#https://hiqsimulator.readthedocs.io/en/latest/quantumcomputing.html | [
"claudiomonteiro@protonmail.com"
] | claudiomonteiro@protonmail.com |
d87fc2c887b5dc9ba24c562a0b8b68721470e5ff | 8ef0ff98852c2b22e447aeee66172820c381e8a5 | /train1.py | e100b765ab9eefb0ed1c73a3f208d8e4312150aa | [] | no_license | leezqcst/LSTM-Text-Generator | 200de23121e2a1703883e6b85a8d92ffa2e46c3b | 07ba5573932a8faf690f237da9c590978e44071f | refs/heads/master | 2020-11-29T11:52:36.839451 | 2017-03-16T02:36:44 | 2017-03-16T02:36:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import re
import keras.layers as kl
import keras.layers.core as klc
import keras.models as km
import numpy as np
# load raw text
filename = "alice.txt"
with open(filename, 'r') as f:
lines = f.readlines()[31:3370]
lines = [l if len(l) <= 2 else l[:-2] + " " for l in lines]
raw_text = "".join(lines)
# find list of characters
chars = sorted(set(raw_text)) + ['START', 'END', 'BLANK']
num_chars = len(chars)
# map characters to vectors
char_to_ind = dict((c,i) for i,c in enumerate(chars))
def char_to_vec(c):
vec = np.zeros((num_chars))
vec[char_to_ind(c)] = 1
# map vectors to characters
def vec_to_char(vec):
ind = np.argmax(vec)
return chars[ind]
# convert data tensor to string
def tensor_to_string(tensor):
s = ""
for i in range(len(tensor)):
for j in range(len(tensor[i])):
c = vec_to_char(tensor[i,j])
if len(c) == 1:
s += c
s += "\n"
return s
# split text into sentences
sentences = re.split('[\r\n]', raw_text)
for i in range(len(sentences)-1, -1, -1):
if len(sentences[i]) < 5:
del sentences[i]
# convert strings to char arrays
lines = [list(l) for l in sentences]
# add START and END to lines
lines = [['START'] + l + ['END'] for l in lines]
# force all lines to be same length
maxlen = 0
for l in lines:
if len(l) > maxlen:
maxlen = len(l)
for i in range(len(lines)):
if len(lines[i]) < maxlen:
lines[i] += ['BLANK'] * (maxlen - len(lines[i]))
# condense list of paragraphs into an np tensor
# dimensions: examples/sentences, character vectors, characters 1/0s
data = np.zeros((len(lines), maxlen, num_chars))
for i, line in enumerate(lines):
for j, c in enumerate(line):
data[i][j][char_to_ind[c]] = 1
# split data into inputs and outputs
seq_len = 100
X = np.zeros((0, seq_len, num_chars))
# create LSTM model
lstm_input = kl.Input(shape=[maxlen, num_chars])
H = kl.LSTM(256)(lstm_input)
H = kl.Dropout(0.2)(H)
lstm_output = kl.Dense(num_chars, activation='softmax')(H)
lstm = km.Model(lstm_input, lstm_output)
lstm.compile(loss="categorical_crossentropy", optimizer="adam")
| [
"weidman.matthew@gmail.com"
] | weidman.matthew@gmail.com |
ca0396a7798112fb29c61c15184dfb8305b228b4 | 52e72490c30ead79d84498f92668b8778990fb6c | /p13.py | 8b15af8c823c7dc9c7aafb852fb0fabe86359059 | [] | no_license | zincsoda/euler | b218b5c611e3b81b2deb0155623d108f1f51e324 | abd48cbbdd94215454c8cdeb7180c38b26d93b82 | refs/heads/master | 2020-06-04T09:00:49.426820 | 2013-10-30T12:37:05 | 2013-10-30T12:37:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,566 | py | #!/usr/bin/python2.6
data = "\
37107287533902102798797998220837590246510135740250\n\
46376937677490009712648124896970078050417018260538\n\
74324986199524741059474233309513058123726617309629\n\
91942213363574161572522430563301811072406154908250\n\
23067588207539346171171980310421047513778063246676\n\
89261670696623633820136378418383684178734361726757\n\
28112879812849979408065481931592621691275889832738\n\
44274228917432520321923589422876796487670272189318\n\
47451445736001306439091167216856844588711603153276\n\
70386486105843025439939619828917593665686757934951\n\
62176457141856560629502157223196586755079324193331\n\
64906352462741904929101432445813822663347944758178\n\
92575867718337217661963751590579239728245598838407\n\
58203565325359399008402633568948830189458628227828\n\
80181199384826282014278194139940567587151170094390\n\
35398664372827112653829987240784473053190104293586\n\
86515506006295864861532075273371959191420517255829\n\
71693888707715466499115593487603532921714970056938\n\
54370070576826684624621495650076471787294438377604\n\
53282654108756828443191190634694037855217779295145\n\
36123272525000296071075082563815656710885258350721\n\
45876576172410976447339110607218265236877223636045\n\
17423706905851860660448207621209813287860733969412\n\
81142660418086830619328460811191061556940512689692\n\
51934325451728388641918047049293215058642563049483\n\
62467221648435076201727918039944693004732956340691\n\
15732444386908125794514089057706229429197107928209\n\
55037687525678773091862540744969844508330393682126\n\
18336384825330154686196124348767681297534375946515\n\
80386287592878490201521685554828717201219257766954\n\
78182833757993103614740356856449095527097864797581\n\
16726320100436897842553539920931837441497806860984\n\
48403098129077791799088218795327364475675590848030\n\
87086987551392711854517078544161852424320693150332\n\
59959406895756536782107074926966537676326235447210\n\
69793950679652694742597709739166693763042633987085\n\
41052684708299085211399427365734116182760315001271\n\
65378607361501080857009149939512557028198746004375\n\
35829035317434717326932123578154982629742552737307\n\
94953759765105305946966067683156574377167401875275\n\
88902802571733229619176668713819931811048770190271\n\
25267680276078003013678680992525463401061632866526\n\
36270218540497705585629946580636237993140746255962\n\
24074486908231174977792365466257246923322810917141\n\
91430288197103288597806669760892938638285025333403\n\
34413065578016127815921815005561868836468420090470\n\
23053081172816430487623791969842487255036638784583\n\
11487696932154902810424020138335124462181441773470\n\
63783299490636259666498587618221225225512486764533\n\
67720186971698544312419572409913959008952310058822\n\
95548255300263520781532296796249481641953868218774\n\
76085327132285723110424803456124867697064507995236\n\
37774242535411291684276865538926205024910326572967\n\
23701913275725675285653248258265463092207058596522\n\
29798860272258331913126375147341994889534765745501\n\
18495701454879288984856827726077713721403798879715\n\
38298203783031473527721580348144513491373226651381\n\
34829543829199918180278916522431027392251122869539\n\
40957953066405232632538044100059654939159879593635\n\
29746152185502371307642255121183693803580388584903\n\
41698116222072977186158236678424689157993532961922\n\
62467957194401269043877107275048102390895523597457\n\
23189706772547915061505504953922979530901129967519\n\
86188088225875314529584099251203829009407770775672\n\
11306739708304724483816533873502340845647058077308\n\
82959174767140363198008187129011875491310547126581\n\
97623331044818386269515456334926366572897563400500\n\
42846280183517070527831839425882145521227251250327\n\
55121603546981200581762165212827652751691296897789\n\
32238195734329339946437501907836945765883352399886\n\
75506164965184775180738168837861091527357929701337\n\
62177842752192623401942399639168044983993173312731\n\
32924185707147349566916674687634660915035914677504\n\
99518671430235219628894890102423325116913619626622\n\
73267460800591547471830798392868535206946944540724\n\
76841822524674417161514036427982273348055556214818\n\
97142617910342598647204516893989422179826088076852\n\
87783646182799346313767754307809363333018982642090\n\
10848802521674670883215120185883543223812876952786\n\
71329612474782464538636993009049310363619763878039\n\
62184073572399794223406235393808339651327408011116\n\
66627891981488087797941876876144230030984490851411\n\
60661826293682836764744779239180335110989069790714\n\
85786944089552990653640447425576083659976645795096\n\
66024396409905389607120198219976047599490197230297\n\
64913982680032973156037120041377903785566085089252\n\
16730939319872750275468906903707539413042652315011\n\
94809377245048795150954100921645863754710598436791\n\
78639167021187492431995700641917969777599028300699\n\
15368713711936614952811305876380278410754449733078\n\
40789923115535562561142322423255033685442488917353\n\
44889911501440648020369068063960672322193204149535\n\
41503128880339536053299340368006977710650566631954\n\
81234880673210146739058568557934581403627822703280\n\
82616570773948327592232845941706525094512325230608\n\
22918802058777319719839450180888072429661980811197\n\
77158542502016545090413245809786882778948721859617\n\
72107838435069186155435662884062257473692284509516\n\
20849603980134001723930671666823555245252804609722\n\
53503534226472524250874054075591789781264330331690"
if __name__=="__main__":
sum = 0
rows = data.split('\n')
for row in rows:
sum += int(row)
print str(sum)[:10]
| [
"steve.walsh@sap.com"
] | steve.walsh@sap.com |
35f1897afb05480d4303f528cc251d5f626ea9a2 | 63d61e7aa661f529bf170eec519184c79dd624df | /hw-opt-challenges/challenges/02_One_two_buckle_my_shoe/solve.py | 84714ce43c4962545b3109b2b10a2b722c05f718 | [] | no_license | segashin/ait_crypto | eae3a8fa53cb057f3a104d35ffcc91069b699af8 | 0653d5678d9b75c0e0ce5517dc974933febfde7a | refs/heads/master | 2022-10-03T23:44:15.649160 | 2020-06-06T12:09:50 | 2020-06-06T12:09:50 | 269,969,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import os
f0 = "LabProfile-v1.crypt"
f1 = "LabProfile-v1.1.crypt"
f0 = os.path.join(os.getcwd(), f0)
f1 = os.path.join(os.getcwd(), f1)
ifile0 = open(f0, "rb")
ifile1 = open(f1, "rb")
def diffB(a):
x = ifile0.read(16)
y = ifile1.read(16)
#print(x)
#print(y)
res = ""
for i in range(16):
z = (x[i]) ^(y[i]) ^ ord(a[i])
res += chr(z)
print(res)
return res
wfile3 = open("output4.txt", "w")
pfile = open("output_ptext.txt", "w")
a = "tory Was involve"
for i in range(11):
res = diffB(a)
for i in range(100):
res = diffB(a)
a = res
wfile3.write(res)
wfile3.close()
"""
wfile3 = open("output3.txt", "w")
pfile = open("output_ptext.txt", "w")
a = "veral high-profi"
for i in range(14):
res = diffB(a)
for i in range(50):
res = diffB(a)
a = res
wfile3.write(res)
wfile3.close()
"""
| [
"segashin0810shin@outlook.jp"
] | segashin0810shin@outlook.jp |
8d605d1923f31fa182d52871222663260f4680c7 | fb87f29a2cc1997b38943191a416cc32ba095f6d | /obj/base/qcube.py | d51a9f7f28dce59eff006b59b0641e7298b01651 | [] | no_license | Lludion/QGOL | 05a6e58c69085ec89a09a2d482fce96eded70ec5 | 03b902a2fb2334a008b2ec840f094cf71b372f0d | refs/heads/main | 2023-01-08T21:50:25.577876 | 2020-11-11T10:46:42 | 2020-11-11T10:50:06 | 303,635,962 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | class QCube:
""" A container for a Cube and an amplitude (alpha) """
def __init__(self,cube=None,alpha=1):
self.cube = cube
self.alpha = alpha
def __repr__(self):
return "\\" + str(self.alpha) + ":" + str(self.cube) + "/"
| [
"ulysse.remond@outlook.fr"
] | ulysse.remond@outlook.fr |
aba749ebbea489231af3f6f5526e57d7f23a570f | 680d1419ed16071082ce02f4b3c61cff735f0ac3 | /6.0001 - Introduction to Computer Science and Programming in Python/ps1/ps1a.py | 7a9537949ce705fdfcd498cfe012570b1654231c | [] | no_license | RoboticDinosaur/MIT-Open-Courseware | 6ba7f2e98ada21d93b3907cc6be2c311c029bba1 | f12b69980aa93e357423cbd0242bb4639898cf79 | refs/heads/master | 2022-11-10T12:23:57.513996 | 2020-06-30T19:22:32 | 2020-06-30T19:22:32 | 255,319,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | """
Annual salary: 120000
Percent save: .10
Total cost: 1000000
Number of months: 183
"""
####
# Get the inputs direct to variables.
#####
annual_salary = int(input('Enter your annual salary: '))
portion_save_percent = float(input('Enter the percent of your salary to save, as a decimal: '))
total_cost = int(input('Enter the cost of your dream home: '))
monthly_salary = annual_salary / 12
portion_down_payment = total_cost * 0.25
savings = 0
r = float(0.04)
month_count = int(0)
while savings < portion_down_payment:
portion_saved = monthly_salary * portion_save_percent
roi = float(savings * r / 12)
savings += roi + portion_saved
month_count += 1
print('Number of months: ', month_count)
| [
"robert@roboticdinosaur.co.uk"
] | robert@roboticdinosaur.co.uk |
865a3b17e948be52bf5924df694dbcd39f085a16 | 75a08d9cc0feda5899859ad11df15109a89c9a44 | /src/gym_gazebo_envs/src/gym_gazebo_envs/robotEnvs/turtlebot3Envs/tasksEnvs/turtlebot3_obstacle_avoidance_v1.py | 4f16bf37b7c0e1ed3ddb3e32a38b54da22be31bb | [] | no_license | victorfdezc/rl_gym_gazebo | 9bb62547b16c70305d3ed32b3686dd98529489af | c8c748d1cd8ff6eadbc01f2d438cf6493f13185d | refs/heads/master | 2023-01-22T09:47:07.913563 | 2020-12-07T16:42:20 | 2020-12-07T16:42:20 | 302,654,654 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,930 | py | #!/usr/bin/env python
import rospy
import numpy
import random
from gym import spaces
from gym_gazebo_envs.robotEnvs.turtlebot3Envs import turtlebot3_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
'''
Register an environment by ID. IDs remain stable over time and are guaranteed to resolve
to the same environment dynamics. The goal is that results on a particular environment
should always be comparable, and not depend on the version of the code that was running.
To register an environment, we have the following arguments:
* id (str): The official environment ID
* entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
* reward_threshold (Optional[int]): The reward threshold before the task is considered solved
* nondeterministic (bool): Whether this environment is non-deterministic even after seeding
* max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of (maximum number of executed actions)
* kwargs (dict): The kwargs to pass to the environment class
'''
register(
id = 'TurtleBot3ObstacleAvoidance-v1',
entry_point = 'gym_gazebo_envs.robotEnvs.turtlebot3Envs.tasksEnvs.turtlebot3_obstacle_avoidance_v1:TurtleBot3ObstacleAvoidanceEnv',
max_episode_steps = 500
)
'''
This class is used to define a task to solve for a Turtlebot3 robot. In particular, we must define how
observations are taken, how to compute the reward, how to execute actions, when an episode has finished...
that is all the GazeboRobotEnv methods that have not been implemented yet.
Besides, we must define the attributes needed to define a Gym environment: action_space, observation_space
and reward_range.
This class is defined to make the Turtlebot3 robot avoid obstacles in the world where it moves. To do that,
each time the robot crash, it will be penalized with a very high (negative) reward, but each step the robot moves
without crashing, the reward will depend on the action taken, so for example, usually the robot will receive more
reward if the previous action was to move forward, because in this way the robot will move much faster (these rewards
can be changed in the yaml file #TODO). If you don't do that, the robot can realize that turning in one direction
always can lead to not crashing, so it would reach the maximum reward always without almost moving.
For the observations we will use the laser data. This data will be discretized, so we will have 5 laser lectures
corresponding to the following 5 laser angle ranges (remember that the angle 0 is the front of the robot): from
-90 degrees to -54 degrees, from -54 to -18, from -18 to 18, from 18 to 54 and from 54 to 90. So each one of these
5 laser ranges will have only one laser reading corresponding to the lowest reading obtained in that range.
Finally, this reading (which is a distance in meters) will be discretized again, so that we can have 2 possible
values: 0 if the lecture is less than 0.5 meters and 1 if it is not (these values can be changed from the yaml file #TODO).
Finally, the robot can take only 3 possible actions: go forward, turn left or turn right. In this way we are making
a simple environment to train the robot with a simple Q-Learning algorithm.
'''
class TurtleBot3ObstacleAvoidanceEnv(turtlebot3_env.TurtleBot3Env):
def __init__(self):
# Call the __init__ function of the parent class:
super(TurtleBot3ObstacleAvoidanceEnv, self).__init__()
#TODO: add a description of each parameter we use!!
# First we load all the parameters defined in the .yaml file.
# Actions:
self.linear_forward_speed = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/angular_speed')
self.step_time = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/step_time')
self.reset_time = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/reset_time')
# Observation:
self.max_distance = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/max_distance")
self.angle_ranges = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/angle_ranges")
self.min_range = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/min_range')
# Rewards:
self.forward_reward = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/forward_reward")
self.turn_reward = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/end_episode_points")
# Initial states:
self.init_linear_forward_speed = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot3_obstacle_avoidance_v1/init_linear_turn_speed')
self.initial_poses = rospy.get_param("/turtlebot3_obstacle_avoidance_v1/initial_poses")
# Now we are going to define the attributes needed to make a Gym environment.
# First we define our action_space. In this case, the action_space is discrete
# and it has 3 possible values (it must be a Space object, in this case,
# a Discrete space object):
self.action_space = spaces.Discrete(3)
# We set the reward range, that in this case can have any positive or negative value. This
# must be a tuple:
self.reward_range = (-numpy.inf, numpy.inf)
# Finally we set the observation space which is a box (in this case it is bounded but it can be
# unbounded). Specifically, a Box represents the Cartesian product of n closed intervals. Each
# interval has the form of one of [a, b], (-oo, b], [a, oo), or (-oo, oo). In this case we will
# have 5 closed intervals of the form [0,max_distance]
num_laser_readings = len(self.angle_ranges) # Number of laser ranges
high = numpy.full((num_laser_readings), self.max_distance)
low = numpy.full((num_laser_readings), 0.0)
self.observation_space = spaces.Box(low, high, dtype=numpy.float32)
#--------------------- GazeboRobotEnv Methods ---------------------#
def _set_initial_state(self):
'''
Set a initial state for the Turtlebot3. In our case, the initial state is
a linear and angular speed equal to zero, so the controllers are 'resetted'.
We will also set a random initial pose.
'''
random_pose = self.initial_poses[random.randint(0,len(self.initial_poses)-1)]
self.gazebo.setModelState("turtlebot3_burger", random_pose[0], random_pose[1], 0,0,0, random_pose[2],random_pose[3])
self.move_base( self.init_linear_forward_speed, self.init_linear_turn_speed, wait_time=self.reset_time)
return True
def _set_final_state(self): # TODO: define this method in main RobotGazeboEnv class
'''
Set a final state for the Turtlebot3. In our case, the final state is also
a linear and angular speed equal to zero.
'''
self.move_base( self.init_linear_forward_speed, self.init_linear_turn_speed, wait_time=self.reset_time)
return True
def _execute_action(self, action):
'''
This method is used to execute an action in the environment.
In this case, based on the action number given, we will set the linear and angular
speed of the Turtlebot3 base.
'''
# We convert the actions numbers to linear and angular speeds:
if action == 0: #Go forward
linear_speed = self.linear_forward_speed
angular_speed = 0.0
# We store the last action executed to compute the reward
self.last_action = "forward"
elif action == 1: #Turn Left
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "turn_left"
elif action == 2: #Turn Right
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "turn_right"
# We tell to TurtleBot3 the linear and angular speed to execute
self.move_base(linear_speed, angular_speed, wait_time=self.step_time)
def _get_obs(self):
'''
This method is used to get the observations of the environment.
In this case, our observations will be computed with the LIDAR readings. In particular,
we will discretize these readings in order to have the fewer states as possible (by decreasing
the number of laser readings to 5, and by discretizing the continuous laser readings to have only 2
possible values).
'''
# We get the laser scan data
laser_scan = self.laser_scan
# And discretize them:
discretized_observations = self._discretize_scan_observation(laser_scan)
return discretized_observations
def _is_done(self, observations): # TODO: ten cuidado con los argumentos de cada funcion... recuerda que estas funciones estan ya definidas previamente por lo que no puedes quitar o poner argumentos como uno quiera
'''
This method is used to know if a episode has finished or not. It can be based on
the observations given as argument.
In this case, we will use the laser readings to check that. If any of the readings
has a value less than a given distance, we suppose that the robot is really close of
an obstacle, so the episode must finish.
TODO: put args and returns
'''
# Initialize the variable
self.episode_done = False
# Get the laser scan data
laser_scan = self.laser_scan.ranges
min_value = min(laser_scan)
if min_value<self.min_range:
self.episode_done = True
return self.episode_done, self.episode_success
def _compute_reward(self, observations, done, success):
'''
This method is used to compute the reward to give to the agent. It can be based on
the observations or on the fact that the episode has finished or not (both of them given
as arguments).
In this case, the reward is based on the fact the agent has collided or not and on the last
action taken.
TODO: put args and returns
'''
# The reward will depend on the fact that the episode has finished or not
if not done:
# In the case the episode has not finished, the reward will depend on the last action executed
if self.last_action == "forward":
reward = self.forward_reward
else:
reward = self.turn_reward
else:
reward = self.end_episode_points
return reward
#------------------------------------------------------------------#
#------------------------ Auxiliar Methods ------------------------#
def _discretize_scan_observation(self, data):
'''
Discretize the laser scan data. To do that, first we take only 180 readings (from 360 readings) that correspond
to the range [-90,90] degrees (being 0 the front of the robot). Then we take those laser readings and we divide them
into 5 sections (each one of 36 degrees). In this way the observation will be a list with 5 elements, and each element
will have a binary value depending on the minumum distance measured in its corresponding angle range (if the lowest
measurement in that range is less than some threshold, the value of the element would be 0, and it would be 1 if it is
greater). In this way we will have discrete distances making learning faster.
'''
# We get only the distance values
laser_data = data.ranges
discretized_ranges = []
for r in self.angle_ranges:
# From each section we get the lowest value
min_value = min([laser_data[i] for i in range(r[0],r[1])])
if min_value > self.max_distance:
discretized_ranges.append(self.max_distance)
else:
discretized_ranges.append(min_value)
# We reverse the list so the first element in the list (the most left element) correspond to the
# most left angle range in the real robot: # TODO: explain this better
self.discretized_ranges = discretized_ranges[::-1]
# rospy.loginfo("Discretized obs " + str(self.discretized_ranges))
return self.discretized_ranges
#------------------------------------------------------------------#
| [
"victorfdezc1996@gmail.com"
] | victorfdezc1996@gmail.com |
380f273d10bd7fdea8606d9b9142f4d4cd0e3799 | cfaf0cea8c22bbc7c337d4a092002e26d9e821f2 | /modules/PLOTER_module.py | 018b18ef538ad4a5235833c84c4ef4006646d525 | [] | no_license | piotrlaczkowski/Data-Inspector-Advanced | 4430d6e58b1720822c7b6cfd902c365d3889a65c | 00ebee9d99400364014f5be97254d30afbaf397c | refs/heads/master | 2021-01-25T00:16:27.036944 | 2014-01-25T12:54:33 | 2014-01-25T12:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,804 | py | # defining character coding
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 11:17:23 2012
@author: Piotr Laczkowski piotr.laczkowski@gmail.com
SCRIPT DESCRIPTION:
This script is ment to ease the plotting process and simple data analysis.
It can be extended by its own modules in the form of python scripts.
"""
#! DIA MODULE FOR PLOTTING
#!==========================================================================
#! used to parse files more easily
from __future__ import with_statement, division
import numpy as np
from numpy import *
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.ticker import OldScalarFormatter, MaxNLocator
import matplotlib.ticker as ticker
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import pylab
from pylab import *
import PyQt4
from PyQt4 import *
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import math
import scipy
from scipy import *
#! for inset
from matplotlib.offsetbox import OffsetImage,AnnotationBbox
from matplotlib._png import read_png
#for delimiter deterction (sniffing)
import csv
#! import the MainWindow widget from the converted .ui files
from PLOT_main_window import Ui_MainWindow
class DesignerMainWindow(QtGui.QMainWindow, Ui_MainWindow):
"""Customization for Qt Designer created window"""
#!=======================================initialization=============================================
def __init__(self, parent = None):
"""Initializing some parameters at start"""
#! initialization of the superclass
super(DesignerMainWindow, self).__init__(parent)
self.setupUi(self)
#! defining short-filename that will be used as a figures title
shortpath = os.path.basename(filename)
try:
'''trying with filename and tex'''
st=u'%s'%(shortpath[:-4])
self.edit_title.setText("$"+st.replace('_','\,')+"$")
#self.edit_title.setText(shortpath[:-4])
except Exception:
'''when tex and filename does not work we will use simple title'''
self.edit_title.setText(u'TITLE')
#! drawing command
self.Draw()
#! setting tesla gauss conversion for x axis display - if necessary uncomment
#self.edit_divx.setText('1e4')
#! connecting the signals with the slots
QtCore.QObject.connect(self.btn_pythonize, QtCore.SIGNAL("clicked()"), self.SavePython)
QtCore.QObject.connect(self.btn_SaveFigs, QtCore.SIGNAL("clicked()"), self.SavePlot)
#! connecting changes of edits:
self.connect(self.edit_xlabel, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_ylabel, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_title, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_label, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_xmin, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_xmax, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_ymin, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_ymax, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_divx, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_multx, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_divy, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_multy, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_inset, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_dt, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_dH, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_dH2, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_smooth, SIGNAL('editingFinished ()'), self.on_change)
self.connect(self.edit_degree, SIGNAL('editingFinished ()'), self.on_change)
#! connecting changes of checkboxes
self.connect(self.check_grid, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_label, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_tight, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_xlim, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_ylim, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_derivate, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_invertx, SIGNAL('stateChanged(int)'), self.on_change)
self.connect(self.check_inverty, SIGNAL('stateChanged(int)'), self.on_change)
#! connecting changes of sliders
self.connect(self.slide_start, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_stop, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_xsize, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_xnr, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_xsizeticks, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_ysize, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_ynr, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_ysizeticks, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_titlesize, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_zoom, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_posx, SIGNAL('valueChanged(int)'), self.on_change)
self.connect(self.slide_posy, SIGNAL('valueChanged(int)'), self.on_change)
#! connecting changes for spins boxes
self.connect(self.combo_x, SIGNAL('currentIndexChanged(int)'), self.on_change)
self.connect(self.combo_y, SIGNAL('currentIndexChanged(int)'), self.on_change)
#! adding combo boxes items for x and y columns selection and labels for all columns
#! firs we need to detect how many columns we have
infile = open(filename,"r")
#detecting file delimiter
global delim
delimiter = csv.Sniffer().sniff(infile.readlines()[40], ['\t',',',';',' '])
delim=delimiter.delimiter
print('found delimiter=',delim)
#reading file
col_nbr=genfromtxt(filename,dtype=float,delimiter=delim,skip_header=30,skip_footer=self.slide_stop.value(),unpack=True)
print u"found %s columns in the file"%(len(col_nbr))
#! adding variables to column choice for x and y
for i in range(len(col_nbr)):
#print "i=",i+1
self.combo_x.addItem(str(i+1))
self.combo_y.addItem(str(i+1))
#setting y to start with second column and not the first one
self.combo_y.setCurrentIndex(1)
#!=======================================definitions=============================================
def on_change(self):
"""Clearing and redrawing the figure canvas after some changes"""
#! clearing
self.mpl.canvas.ax.clear()
#! redrawing
self.Draw()
def read_data(self,filename):
"""reading data from the input file depending on the selected column number
First the tab delimiter will be tried-if this does not work automatic
recognition of the delimiter will be used"""
infile = open(filename,"r")
global X
global Y
try:
X,Y=genfromtxt(infile,dtype=float,delimiter=delim,skip_header=self.slide_start.value()+10, skip_footer=self.slide_stop.value(),usecols=(self.combo_x.currentIndex(),self.combo_y.currentIndex()),unpack=True)
# print u"tab delimiter option was used"
except Exception:
X,Y=genfromtxt(infile,dtype=float,skip_header=self.slide_start.value()+10, skip_footer=self.slide_stop.value(),usecols=(self.combo_x.currentIndex(),self.combo_y.currentIndex()),unpack=True)
# print u"empyt delimiter was used - lookes for empty spaces"
return X,Y
def Draw(self):
"""Drawing X and Y on the canvas. In this definition all necessary data
correction are performed"""
#! setting up gobal variables for parsing to another funtions
global X
global Y
#!----------------------------------------------------Corrections Start
#! rerading data-file Y0 is for correction
X,Y0=self.read_data(filename)
#! derivative, linear, and B^2 corrections
R = float(self.edit_dt.text())
E = float(self.edit_dH.text())
EE = float(self.edit_dH2.text())
#! creating final for Y list after corrections
Y=[]
#! field derivative correction for Y0 saved in Y
'''to jest miejsce gdzie przemanazam wszystkie element listy X0 przez korekte derivative'''
for index,y in enumerate(Y0):
Y.append(Y0[index] + (float(X[index])*float(E))+ ((float(index)/float(len(Y0)))*float(R)) + (float(X[index])*float(X[index])*float(EE)))
#!----------------------------------------------------Corrections END
#!----------------------------------------------------Customization of the plot
#! setting up ticks numbers for X and Y from slider
majorLocatorX = MaxNLocator(self.slide_xnr.value())
majorLocatorY = MaxNLocator(self.slide_ynr.value())
#! definition of MPL
MPL=self.mpl.canvas.ax
# print u"MPL signature=",MPL
MPL.set_title(str(self.edit_title.text()),fontsize=self.slide_titlesize.value())
MPL.set_xlabel(self.edit_xlabel.text(),fontsize=self.slide_xsize.value())
MPL.set_ylabel(self.edit_ylabel.text(),fontsize=self.slide_ysize.value())
#self.ticker.set_major_locator(MaxNLocator(4))
MPL.xaxis.set_major_locator(majorLocatorX)
MPL.yaxis.set_major_locator(majorLocatorY)
ticker.ScalarFormatter(useOffset=True, useMathText=True) #- for offset - but need to be adapted
for t in MPL.get_xticklabels():
t.set_fontsize(self.slide_xsizeticks.value())
for t in MPL.get_yticklabels():
t.set_fontsize(self.slide_ysizeticks.value())
# grid verification
if self.check_grid.isChecked():
MPL.get_xaxis().grid(True)
MPL.get_yaxis().grid(True)
#!----------------------------------------------------Further corrections
# applying corrections for Y
try:
Y=[i*float(self.edit_multy.text())/float(self.edit_divy.text()) for i in Y]
except Exception:
pass
# applying corrections for X
try:
X=[i*float(self.edit_multx.text())/float(self.edit_divx.text()) for i in X]
except Exception:
pass
#___________Smoothing____________
degree = eval(str(self.edit_smooth.text()))
# print "smooth degree was set to be =", degree
def on_smooth(data,degree,dropVals=False):
smoothed=[]
for i in range(degree,len(data)-degree):
point=data[i:i+degree]
smoothed.append((sum(point)/degree))
if dropVals: return smoothed
smoothed=[smoothed[0]]* int((degree+(degree/2)))+smoothed
while len(smoothed)<len(data):smoothed.append(smoothed[-1])
return smoothed
#! derivate correction check
if self.check_derivate.isChecked():
''' when derivate of degree n is taken on y we need to make same number of points in x making x[n:]'''
Y=diff(Y, n=int(self.edit_degree.text()), axis=-1)
X=X[int(self.edit_degree.text()):]
# invert X verification
if self.check_invertx.isChecked():
# X=ma.masked_where(X<0,X)
X=ma.masked_less_equal(X,0)
X=[1./i for i in X]
#! changing xlabel
acctualx= self.edit_xlabel.text()
if acctualx[0:3]!="$1/":
newlabel="$1/ %s $"%(str(acctualx).strip('$'))
self.edit_xlabel.setText(newlabel)
# invert Y verification
if self.check_inverty.isChecked():
# Y=ma.masked_where(Y!=0,Y)
Y=ma.masked_less_equal(Y,0)
Y=[1./i for i in Y]
#! changing xlabel
acctualy= self.edit_ylabel.text()
if acctualy[0:3]!="$1/":
newlabel="$1/ %s $"%(str(acctualy).strip('$'))
self.edit_ylabel.setText(newlabel)
#! Plotting command
MPL.plot(X,on_smooth(Y,degree), 'ro-',label=str(self.edit_label.text()),linewidth = 3,picker=1)
#! limitation on axis
if self.check_xlim.isChecked():
print u"limits X are set..."
MPL.set_xlim(eval(str(self.edit_xmin.text())),eval(str(self.edit_xmax.text())))
if self.check_ylim.isChecked():
print u"limits Y are set..."
MPL.set_ylim(eval(str(self.edit_ymin.text())),eval(str(self.edit_ymax.text())))
wdir=str(os.getcwd())
if eval(str(self.edit_inset.text()))!=0:
inset = read_png(wdir + "/insets/"+str(self.edit_inset.text())+".png")
# print "inset file=",inset
imagebox = OffsetImage(inset, zoom = float(eval(str(self.slide_zoom.value())))/100)
ab = AnnotationBbox(imagebox, xy=(float(eval(str(self.slide_posx.value())))/100,float(eval(str(self.slide_posy.value())))/100), xycoords='axes fraction')
#self.axes.add_artist(ab)
MPL.add_artist(ab) #MPL=self.mpl.canvas.ax
# legend verification
if self.check_label.isChecked():
MPL.legend(shadow=True, loc=0, borderaxespad=0.,fancybox=True)
#print "legend enabled"
#! tight layout verification
if self.check_tight.isChecked():
try:
self.mpl.canvas.fig.tight_layout()
# print u"tighted"
except Exception:
# print u"exception in tight layout occured"
pass
#print "legend enabled"
self.mpl.canvas.draw()
def SavePython(self):
"""Saving data as a python script"""
print u"saving python script"
#! GUI for script name selection
shortpath = os.path.basename(filename)
save_path2 = QFileDialog.getSaveFileName(self,"", shortpath +".py")
fpy=open(save_path2,"w")
#! what will be written
fpy.write('#_________________________saved python script_______________ \n')
fpy.write('from __future__ import division \n')
fpy.write('import math \n')
fpy.write('import numpy \n')
fpy.write('import pylab \n')
fpy.write('from pylab import * \n')
fpy.flush()
#! writing datapoints
fpy.write('X=')
#fpy.write(''.join(str(X)))
fpy.write(str(X))
fpy.write('\n')
fpy.write('Y=')
fpy.write(''.join(str(Y)))
fpy.write('\n')
fpy.flush()
#writing configuration
#fpy.write('majorLocatorX=MaxNLocator('+ str(self.sliderXn.value()) +') \n')
#fpy.write('majorLocatorY=MaxNLocator('+ str(self.sliderYn.value()) +') \n')
fpy.write("xlabel('$"+ self.edit_xlabel.text()+"$') \n")
fpy.write("ylabel('$"+ self.edit_ylabel.text()+"$') \n")
#fpy.write('xaxis.set_major_locator(majorLocatorX) \n')
#fpy.write('yaxis.set_major_locator(majorLocatorY) \n')
#plotting
fpy.write("plot(X, Y, 'ro-',")
fpy.write("label='"+str(self.edit_label.text())+"', ")
fpy.write('linewidth=3, ')
fpy.write('picker=2) \n')
#fpy.write("plot(X,Y,'o--') \n")
fpy.write("show() \n")
fpy.flush()
fpy.close()
def SavePlot(self):
shortpath = os.path.basename(filename)
file_choices2 = "svg (*.svg)|*.svg"
path2 = unicode(QFileDialog.getSaveFileName(self,
'Save svg file', str(shortpath[:-5])+'.svg',
file_choices2))
if path2:
self.mpl.canvas.print_figure(path2, dpi=100)
self.statusBar().showMessage('Saved to %s' % path2, 2000)
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save png file',str(shortpath[:-5])+ '.png',
file_choices))
if path:
self.mpl.canvas.print_figure(path, dpi=100)
self.statusBar().showMessage('Saved to %s' % path, 2000)
#!=======================================usuall command for starting GUI=============================================
def main():
app = QtGui.QApplication(sys.argv) # create the GUI application
dmw = DesignerMainWindow() # instantiate the main window
dmw.show() # show it
sys.exit(app.exec_())
#! defining used file
if __name__ == "__main__":
print u"used sys.args=",sys.argv
filename = sys.argv[1]
main()
| [
"piotr.laczkowski@gmail.com"
] | piotr.laczkowski@gmail.com |
bfa584732aff660181762e7065a9e57781df0d42 | 696d59bec58386f8daace13f0e07e3ba4cf260ea | /tt-streaming-2.py | 79b1450110610e6f4290237c9987e7306858b8bd | [] | no_license | marcosvilela/twitter-api-study | c4e54f97484246f272330f9192d31db8e543906f | dc5008dcacea8262536771a09707a7cdb170c4b1 | refs/heads/master | 2020-07-23T18:19:32.506353 | 2019-09-10T21:50:48 | 2019-09-10T21:50:48 | 207,664,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | import tweepy
import sys
#This file is not tracked because the credentials are PERSONAL. Just put your credentials file on the same directory and it will be fine
sys.path.insert(1, '/Estudos/TwitterAPI')
import tt_credentials
'''
Part 2: Cursor and pagination. With this, we can access our own tweets,
user tweets, followers or friends from a specific user
It picks up from the first part's code and improves it
'''
class twitterAuthenticator():
def authenticate(self):
#The authentication process is handled by the OAuthHandler
authentication = tweepy.OAuthHandler(tt_credentials.CONSUMER_KEY, tt_credentials.CONSUMER_SECRET)
authentication.set_access_token(tt_credentials.ACCESS_TOKEN, tt_credentials.ACCESS_TOKEN_SECRET)
return authentication
class twitterListener(tweepy.StreamListener):
def __init__(self, target_filename):
self.target_filename = target_filename
def on_data(self, data):
#This method handles the arrival of data on our streamer
f = open(self.target_filename, 'a')
f.write(data)
print(data)
return True
def on_error(self, status):
#This method handles the error while streaming
if status == 420:
#Kills the connection when error 420 (rate limit) happens
return False
print(status)
class twitterStreamer():
def __init__(self):
self.tt_authenticator = twitterAuthenticator()
def stream_tweets(self, target_filename, keywords_list):
#This method handles the authentication and streaming of tweets
#Our listener object
listener = twitterListener(target_filename)
#We create our authentication object
authentication = self.tt_authenticator.authenticate()
#The scream object, passing the listener and the authentication as arguments
stream = tweepy.Stream(authentication, listener)
#We need to filter Tweets acoording to a set of keywords or hashtags
stream.filter(track=keywords_list)
class twitterClient():
def __init__(self, tt_user=None):
self.auth = twitterAuthenticator().authenticate()
self.tt_client = tweepy.API(self.auth)
#None is default because if we don't specify a user, it defaults to ourselves
self.tt_user = tt_user
def fetch_user_tweets(self, num_tweets):
tweetlist = []
#Iterate through the tweets on a user's timeline and gets his tweets.
for tweet in tweepy.Cursor(self.tt_client.user_timeline, id=self.tt_user).items(num_tweets):
tweetlist.append(tweet)
return tweetlist
#We can also use this for getting friendlist. We'll evolve this later
#The only thing different is that we'll use the .friends attribute from the tt_client
if __name__ == '__main__':
ttclient = twitterClient('jairbolsonaro')
print(ttclient.fetch_user_tweets(1))
| [
"marcos.vilela42@hotmail.com"
] | marcos.vilela42@hotmail.com |
a3485fe706a94a1dfb07a5d08341e233d821fee3 | bac2903fab536e1cc7e5847b291c87cc1cbd41c0 | /app.py | bb2e44af3fcfd16144cbb05f9f2d7bf8fee76f94 | [] | no_license | Tssa301/301api_atividade | 21605f590c9f5d9ebe589658f8101f99ae544387 | d4514477a75d5777f37f0b49333aff8ace390981 | refs/heads/master | 2023-03-21T00:12:27.209173 | 2020-07-06T18:08:49 | 2020-07-06T18:08:49 | 277,159,574 | 1 | 0 | null | 2021-03-20T04:34:03 | 2020-07-04T17:52:13 | Python | UTF-8 | Python | false | false | 3,108 | py | from flask import Flask, request
from flask_restful import Resource, Api
from models import Pessoas, Atividades, Usuarios
from flask_httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
app = Flask(__name__)
api = Api(app)
# USUARIOS = {
# 'tiago': '123',
# 'silva': '321'
# }
# @auth.verify_password
# def verificacao(login, senha):
# print('validando usuario')
# print(USUARIOS.get(login) == senha)
# if not (login, senha):
# return False
# return USUARIOS.get(login) == senha
@auth.verify_password
def verificacao(login, senha):
if not (login, senha):
return False
return Usuarios.query.filter_by(login=login, senha=senha).first()
class Pessoa(Resource):
@auth.login_required
def get(self, nome):
pessoa = Pessoas.query.filter_by(nome=nome).first()
try:
response = {'nome':pessoa.nome,
'idade':pessoa.idade,
'id':pessoa.id
}
except AttributeError:
response = {'status': 'error',
'mensagem': 'Pessoa nao encotrada'}
return response
def put(self, nome):
pessoa = Pessoas.query.filter_by(nome=nome).first()
dados = request.json
if 'nome' in dados:
pessoa.nome = dados['nome']
if 'idade' in dados:
pessoa.idade = dados['idade']
pessoa.save()
response = {'id':pessoa.id,
'nome':pessoa.nome,
'idade':pessoa.idade
}
return response
def delete(self, nome):
pessoa = Pessoas.query.filter_by(nome=nome).first()
mensagem = 'Pessoa {} excluida com sucesso'.format(pessoa.nome)
pessoa.delete()
return {'status': 'sucesso', 'mensagem':mensagem}
class ListaPessoas(Resource):
@auth.login_required
def get(self):
pessoas = Pessoas.query.all()
response = [{'ide':i.id, 'nome':i.nome, 'idade':i.idade} for i in pessoas]
return response
def post(self):
dados = request.json
pessoa = Pessoas(nome=dados['nome'], idade=dados['idade'])
pessoa.save()
response = {'id':pessoa.id,
'nome':pessoa.nome,
'idade':pessoa.idade
}
return response
class ListaAtividades(Resource):
def get(self):
atividades = Atividades.query.all()
response = [{'id':i.id, 'nome':i.nome, 'pessoa':i.pessoa} for i in atividades]
return response
def post(self,):
dados = request.json
pessoa = Pessoas.query.filter_by(nome=dados['pessoa']).first()
atividade = Atividades(nome=dados['nome'], pessoa=pessoa)
atividade.save()
response = {'pessoa':atividade.pessoa.nome,
'nome': atividade.nome,
'id':atividade.id
}
return response
api.add_resource(Pessoa, '/pessoa/<string:nome>/')
api.add_resource(ListaPessoas, '/pessoa/')
api.add_resource(ListaAtividades, '/atividades/')
if __name__=='__main__':
app.run(debug=True) | [
"tiago.silva301@gmail.com"
] | tiago.silva301@gmail.com |
3b8d5fa6b4cced71be6df8eb6d0a7e4f9cbb5ac9 | ed1dd7bc3837cf4059a529d71f43b53d7c6a65d8 | /RosieGUI.py | cb83e9b57f6ff93bdfdee5437d9ca7db7b2e8604 | [] | no_license | amininger/rosiethor | dbc290e8684e2b1a73962af0fb84ad6c65956f1e | 789396f08e10d6e46a684622cd95e7d309d9a246 | refs/heads/master | 2021-04-28T14:25:19.807467 | 2019-03-05T16:49:21 | 2019-03-05T16:49:21 | 121,964,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,356 | py | from tkinter import *
import tkinter.font
import sys
from rosiethor import *
class RosieGUI(Frame):
def create_widgets(self):
self.grid(row=0, column=0, sticky=N+S+E+W)
self.columnconfigure(0, weight=3, minsize=600)
self.columnconfigure(1, weight=1, minsize=400)
self.columnconfigure(2, weight=1, minsize=100)
self.rowconfigure(0, weight=10, minsize=400)
self.rowconfigure(1, weight=1, minsize=50)
self.messages_list = Listbox(self, font=("Times", "12"))
self.scrollbar = Scrollbar(self.messages_list)
self.messages_list.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.messages_list.yview)
self.messages_list.grid(row=0, column=0, sticky=N+S+E+W)
self.scrollbar.pack(side=RIGHT, fill=Y)
self.script_frame = Frame(self)
self.script_frame.grid(row=0, column=1, sticky=N+S+E+W)
self.chat_entry = Entry(self, font=("Times", "16"))
self.chat_entry.bind('<Return>', lambda key: self.on_submit_click())
self.chat_entry.bind('<Up>', lambda key: self.scroll_history(-1))
self.chat_entry.bind('<Down>', lambda key: self.scroll_history(1))
self.chat_entry.grid(row=1, column=0, sticky=N+S+E+W)
self.submit_button = Button(self, text="Send", font=("Times", "24"))
self.submit_button["command"] = self.on_submit_click
self.submit_button.grid(row=1, column=1, sticky=N+S+E+W)
self.run_button = Button(self, text="Run", font=("Times", "24"))
self.run_button["command"] = self.on_run_click
self.run_button.grid(row=1, column=2, sticky=N+S+E+W)
def init_soar_agent(self, config_file):
self.agent = RosieThorAgent(self.sim, config_filename=config_file)
self.agent.connectors["language"].register_message_callback(self.receive_message)
self.agent.connect()
self.sim.start(self.agent.scene)
def create_script_buttons(self):
self.script = []
if self.agent.messages_file != None:
with open(self.agent.messages_file, 'r') as f:
self.script = [ line.rstrip('\n') for line in f.readlines() if len(line.rstrip('\n')) > 0 and line[0] != '#']
row = 0
for message in self.script:
button = Button(self.script_frame, text=message[:30], font=("Times", "16"))
button["command"] = lambda message=message: self.send_message(message)
button.grid(row=row, column=0, sticky=N+S+E+W)
row += 1
def send_message(self, message):
self.messages_list.insert(END, message)
self.chat_entry.delete(0, END)
if len(self.message_history) == 0 or self.message_history[-1] != message:
self.message_history.append(message)
self.history_index = len(self.message_history)
self.agent.connectors["language"].send_message(message)
def receive_message(self, message):
self.messages_list.insert(END, message)
def on_submit_click(self):
self.send_message(self.chat_entry.get())
def on_run_click(self):
self.agent.start()
def scroll_history(self, delta):
if self.history_index == 0 and delta == -1:
return
if self.history_index == len(self.message_history) and delta == 1:
return
self.history_index += delta
self.chat_entry.delete(0, END)
if self.history_index < len(self.message_history):
self.chat_entry.insert(END, self.message_history[self.history_index])
def on_exit(self):
self.agent.kill()
root.destroy()
def __init__(self, rosie_config, master=None):
Frame.__init__(self, master, width=800, height=600)
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
self.message_history = []
self.history_index = 0
self.create_widgets()
self.sim = Ai2ThorSimulator()
self.init_soar_agent(rosie_config)
self.create_script_buttons()
controller_gui = ControllerGUI(self.sim, master=self)
if len(sys.argv) == 1:
print("Need to specify rosie config file as argument")
else:
root = Tk()
rosie_gui = RosieGUI(sys.argv[1], master=root)
root.protocol("WM_DELETE_WINDOW", rosie_gui.on_exit)
root.mainloop()
| [
"mininger@umich.edu"
] | mininger@umich.edu |
4116173f3381c4d0ec24d7a2542a504531fa2eb0 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/__init___parts/EntryPointNotFoundException.py | cd35b92ca551fc01347a5e98978af60cbbbfdd4f | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | class EntryPointNotFoundException(TypeLoadException,ISerializable,_Exception):
"""
The exception that is thrown when an attempt to load a class fails due to the absence of an entry method.
EntryPointNotFoundException()
EntryPointNotFoundException(message: str)
EntryPointNotFoundException(message: str,inner: Exception)
"""
def add_SerializeObjectState(self,*args):
""" add_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def remove_SerializeObjectState(self,*args):
""" remove_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,message=None,inner=None):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,inner: Exception)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
61340f6da5ef7b70273d93b3f828a320d81eb30b | 893534787465f76688507d88d4fd05e08cceac57 | /cam_calib/main_ui.py | 2297a1d5228a36919a05f4ced545ab39de667b6b | [] | no_license | alin-draghia/CameraCalib | 9015bd9cf079c5ad3b6a9196005242963b65cb4d | b89d5ea74b22ea9e698844098c9ad847ac4c503a | refs/heads/master | 2021-08-14T18:25:21.616916 | 2017-11-16T13:25:12 | 2017-11-16T13:25:12 | 110,833,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,601 | py | import os
import sys
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtOpenGL import *
from PySide.phonon import Phonon
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
import cv2
class MyVertex(QGraphicsItem):
def __init__(self):
self.edge = None
QGraphicsItem.__init__(self)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(self.DeviceCoordinateCache)
p1 = QPointF(-5,-5)
p2 = QPointF(5,5)
self.rect = QRectF(p1, p2)
def paint(self, painter, option, widget):
painter.drawRect(self.rect)
def itemChange(self, change, value):
# notify the parent(aka line) to update
p = self.parentItem()
if p:
p.prepareGeometryChange()
return QGraphicsItem.itemChange(self, change, value)
def boundingRect(self):
return self.rect
def mousePressEvent(self, event):
self.update()
QGraphicsItem.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
self.update()
QGraphicsItem.mouseReleaseEvent(self, event)
class MyEdge(QGraphicsItem):
def __init__(self, x1, y1, x2, y2, color):
QGraphicsItem.__init__(self)
self.setAcceptedMouseButtons(Qt.NoButton)
self.pen = QPen(color, 1, Qt.DashLine)
self.v1 = MyVertex()
self.v2 = MyVertex()
self.v1.setParentItem(self)
self.v2.setParentItem(self)
self.v1.setPos(x1, y1)
self.v2.setPos(x2, y2)
pass
def boundingRect(self):
r = QRectF()
if self.v1 and self.v2:
p1 = self.v1.pos()
p2 = self.v2.pos()
s = QSizeF(p2.x() - p1.x(), p2.y() - p1.y());
r = QRectF(p1, s).normalized()
return r
def paint(self, painter, option, widget):
if self.v1 and self.v2:
painter.setPen(self.pen)
p1 = self.v1.pos()
p2 = self.v2.pos()
painter.drawLine(p1,p2)
class MyGroundPlane(QGraphicsItem):
def __init__(self):
QGraphicsItem.__init__(self);
ctx = QGLContext.currentContext()
if not ctx:
raise Exception('no current gl context')
# generate the ground plane grid vbo
gp = np.zeros(shape=(9*2*2, 3+3), dtype=np.float32)
for i in range(9):
gp[i*2+0,:]=[-4.0+i, -4.0, 0.0, 1.0, 0.0, 0.0]
gp[i*2+1,:]=[-4.0+i, +4.0, 0.0, 1.0, 0.0, 0.0]
for i in range(9):
gp[9*2+i*2+0,:]=[-4.0, -4.0+i, 0.0, 0.0, 1.0, 0.0]
gp[9*2+i*2+1,:]=[+4.0, -4.0+i, 0.0, 0.0, 1.0, 0.0]
self.ground_plane_vbo = QGLBuffer(QGLBuffer.VertexBuffer)
self.ground_plane_vbo.setUsagePattern(QGLBuffer.StreamDraw)
self.ground_plane_vbo.create()
self.ground_plane_vbo.bind()
self.ground_plane_vbo.allocate(gp.tostring())
self.ground_plane_vbo.release()
# coordonate axes lines
ca = np.array([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [5.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 5.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 5.0, 0.0, 0.0, 1.0]], dtype=np.float32)
self.coord_axes_vbo = QGLBuffer(QGLBuffer.VertexBuffer)
self.coord_axes_vbo.setUsagePattern(QGLBuffer.StreamDraw)
self.coord_axes_vbo.create()
self.coord_axes_vbo.bind()
self.coord_axes_vbo.allocate(ca.tostring())
self.coord_axes_vbo.release()
# coordonate points
cp = np.array([[0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[5.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 5.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 5.0, 0.0, 0.0, 1.0]], dtype=np.float32)
self.coord_pts_vbo = QGLBuffer(QGLBuffer.VertexBuffer)
self.coord_pts_vbo.setUsagePattern(QGLBuffer.StreamDraw)
self.coord_pts_vbo.create()
self.coord_pts_vbo.bind()
self.coord_pts_vbo.allocate(cp.tostring())
self.coord_pts_vbo.release()
vs ="#version 420\n" +\
"in vec3 vert_pos;\n" +\
"in vec3 vert_color;\n" +\
"uniform mat4 mvp;\n" +\
"out vec4 color;\n" +\
"void main() {\n" +\
" gl_Position = mvp * vec4(vert_pos,1.0);" +\
" color = vec4(vert_color,1.0);" +\
"}\n"
fs ="#version 420\n" +\
"in vec4 color;\n" +\
"out vec4 frag_color;\n" +\
"void main() {\n" +\
" frag_color = color;" +\
"}\n"
self.shader_program = QGLShaderProgram()
if self.shader_program.addShaderFromSourceCode(QGLShader.Vertex, vs) and \
self.shader_program.addShaderFromSourceCode(QGLShader.Fragment, fs):
if self.shader_program.link() and \
self.shader_program.bind():
# bam
pass
else:
raise Exception("error link|bind")
else:
raise Exception("error add shader")
self.shader_program.release()
return
def boundingRect(self):
return QRectF(0,0,640,480)
def render_old_style(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, 4.0/3.0, 1.0, 20.0)
glMatrixMode(GL_MODELVIEW)
glTranslatef(0.0, 0.0, -15.0)
glRotatef(-45, 1.0, 0.0, 0.0)
glRotatef(-15, 0.0, 0.0, 1.0)
glLineWidth(1.0)
glBegin(GL_LINES)
glColor3f(1.0,0.0,0.0)
for i in range(9):
glVertex3f((-4.0 + i), -4.0, 0.0)
glVertex3f((-4.0 + i), 4.0, 0.0)
glColor3f(0.0,1.0,0.0)
for i in range(9):
glVertex3f(-4.0, (-4.0 + i), 0.0)
glVertex3f(4.0, (-4.0 + i), 0.0)
glEnd()
glLineWidth(3.0)
glBegin(GL_LINES)
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0,0,0)
glVertex3f(1,0,0)
glColor3f(0.0, 1.0, 0.0)
glVertex3f(0,0,0)
glVertex3f(0,1,0)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0,0,0)
glVertex3f(0,0,1)
glEnd()
glPointSize(5.0)
glBegin(GL_POINTS)
glColor3f(1.0, 1.0, 1.0)
glVertex3f(0.0, 0.0, 0.0)
glEnd()
glPointSize(1.0)
#glBegin(GL_TRIANGLES)
#glVertex3f(-1.0, -1.0, 0.0)
#glVertex3f(1.0, -1.0, 0.0)
#glVertex3f(0.0, 1.0, 0.0)
#glEnd()
#glBegin(GL_TRIANGLES)
#glVertex3f(-1.0, -1.0, 0.0)
#glVertex3f(1.0, -1.0, 0.0)
#glVertex3f(0.0, 1.0, 0.0)
#glEnd()
return
def render_new_style(self):
if self.shader_program.bind():
self.shader_program.enableAttributeArray("vert_pos")
self.shader_program.enableAttributeArray("vert_color")
self.ground_plane_vbo.bind()
self.shader_program.setAttributeBuffer("vert_pos", GL_FLOAT, 0, 3, 6*4)
self.shader_program.setAttributeBuffer("vert_color", GL_FLOAT, 3*4, 3, 6*4)
self.ground_plane_vbo.release()
P = QMatrix4x4()
P.setToIdentity()
P.perspective(45.0, 4.0/3.0, 1.0, 20.0)
M = QMatrix4x4()
M.setToIdentity()
V = QMatrix4x4()
V.setToIdentity()
V.translate(0.0, 0.0, -15.0)
V.rotate(-45.0, 1.0, 0.0, 0.0)
V.rotate(-15.0, 0.0, 0.0, 1.0)
MVP = P*V*M
self.shader_program.setUniformValue("mvp", MVP)
glLineWidth(1.0)
glDrawArrays(GL_LINES, 0, 9*2*2)
self.coord_axes_vbo.bind()
self.shader_program.setAttributeBuffer("vert_pos", GL_FLOAT, 0, 3, 6*4)
self.shader_program.setAttributeBuffer("vert_color", GL_FLOAT, 3*4, 3, 6*4)
self.coord_axes_vbo.release()
glLineWidth(2.0)
glDrawArrays(GL_LINES, 0, 3*2)
self.coord_pts_vbo.bind()
self.shader_program.setAttributeBuffer("vert_pos", GL_FLOAT, 0, 3, 6*4)
self.shader_program.setAttributeBuffer("vert_color", GL_FLOAT, 3*4, 3, 6*4)
self.coord_pts_vbo.release()
glPointSize(8.0)
glDrawArrays(GL_POINTS, 0, 4)
self.shader_program.disableAttributeArray("vert_pos")
self.shader_program.disableAttributeArray("vert_color")
self.shader_program.release()
return
def paint(self, painter, option, widget):
painter.beginNativePainting()
#self.render_old_style()
self.render_new_style()
painter.endNativePainting()
return
class MyGui(QWidget):
def __init__(self, video_file):
QWidget.__init__(self)
self.video_file = video_file
self.videoPlayer = Phonon.VideoPlayer()
self.graphicsScene = QGraphicsScene()
self.graphicsView = QGraphicsView()
glw = QGLWidget()
glctx = glw.context()
self.graphicsView.setViewport(glw)
self.graphicsView.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.graphicsView.setScene(self.graphicsScene)
# using opencv to get the video width and height
vcap = cv2.VideoCapture(self.video_file)
w = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
h = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
vcap.release()
# need to call this so the ground plane item
# can have access to a initializa gl context
glw.glInit()
glw.makeCurrent()
self.groundPlaneItem = MyGroundPlane()
self.vanisingLines1 = [
MyEdge(100,100,100,200, Qt.blue),
MyEdge(200,100,200,200, Qt.blue)
]
self.vanisingLines2 = [
MyEdge(100,100,200,100, Qt.red),
MyEdge(100,200,200,100, Qt.red)
]
proxy = self.graphicsScene.addWidget(self.videoPlayer)
self.graphicsScene.addItem(self.groundPlaneItem )
for vline in self.vanisingLines1:
self.graphicsScene.addItem(vline)
for vline in self.vanisingLines2:
self.graphicsScene.addItem(vline)
self.videoPlayer.load(Phonon.MediaSource(self.video_file))
self.videoPlayer.play()
self.setFixedSize(w,h)
self.graphicsView.setParent(self)
self.videoPlayer.setGeometry(0,0,w,h)
self.graphicsScene.setSceneRect(0, 0, w, h)
#self.graphicsView.setSceneRect(0,0,w,h)
self.graphicsView.move(0,0)
return
if(__name__ == '__main__'):
app = QApplication([])
video_file = r'x:\DEV\Traffic\6628_h264_1_640x480.avi'
w = MyGui(video_file)
w.show();
app.exec_() | [
"alin.draghia@gmail.com"
] | alin.draghia@gmail.com |
dc68c6aa9884b9c93f4196918328a5aeadf4d6c7 | 9248a1c9e451713885b0525e5db14537bad66cd4 | /cache/country.py | 069b30bc1c4eb8efd3297d54f4969c9984bebb3a | [] | no_license | prathamesh2901/Flask_Corona_App_Read | 0f05af0c2de7470393f7768fa30fe333630dbd75 | 6073822a95bfde187290cbe5c15d8da6f5f24ca6 | refs/heads/master | 2022-09-04T13:57:23.959125 | 2020-06-01T03:43:04 | 2020-06-01T03:43:04 | 262,450,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | #!/usr/local/bin/python3
from rejson import Client, Path
class CountryCache():
def __init__(self, name):
self.name = name
def find_by_country(self):
try:
rj = Client(
host='redis',
port=6379,
decode_responses=True)
return rj.jsonget(self.name)
except:
return None
def cache(self, obj):
rj = Client(
host='redis',
port=6379,
decode_responses=True)
rj.jsonset(self.name, Path.rootPath(), obj)
| [
"prathamesh2901@gmail.com"
] | prathamesh2901@gmail.com |
beff1ee3c57d378902960da431bb43a8cc21ca03 | 2b9d2469129b1bd98d96def0b89323fff0767aff | /Debugging and Tests/lab 3 debugging and tests.py | fb0c257968c049ed02a281ee7ad69849fa6a0e28 | [] | no_license | Phred7/CSCI127-Python | 9eac713d62e8be414804ddb6a76181ec9bbaf617 | 584b97284b8731d0ebca693afcd4fadfcc0c6911 | refs/heads/master | 2021-02-08T01:07:55.654210 | 2020-03-01T05:28:34 | 2020-03-01T05:28:34 | 244,092,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | ##sentence = input("Please enter a sentence to evaluate: ");
##sentence = sentence.lower(); # convert to lowercase
##print(sentence.count("a")+sentence.count("e")+sentence.count("i")+sentence.count("o")+sentence.count("u"));
##def count_vowels_iterative(s):
## l = len(s);
## x = 0;
## v = 0;
## for i in range(0, l):
## print(x);
## if(s[(x)] == "a" or s[(x)] == "e" or s[(x)] == "i" or s[(x)] == "o" or s[(x)] == "u"):
## v=v+1
## else:
## print(False);
## x = x+1;
## return v;
##
##s = "aeioua";
##v = count_vowels_iterative(s);
###print(s[0])
##print("v", v);
def remove_iterative(x):
sTR = x.strip();
result = "";
y = 0;
for i in x:
if(x[y] == " "):
result=result;
#print(result);
else:
result=result+i;
#print(result);
y=y+1;
return result;
x = " h i there p"
print(remove_iterative(x));
| [
"noreply@github.com"
] | Phred7.noreply@github.com |
44b42dfbde5aabbad49f01d0c40eae805b3bd01f | 0d61f90e3a7877e91d72fed71b0895c7070dc046 | /final_project/.history/project/account_app/forms_20210104104511.py | 4936c5cd23264028d4004e9275b0dd27cb819201 | [] | no_license | lienusrob/final_project | 44d7d90dc0b7efc0cf55501549a5af0110d09b3b | 4164769626813f044ec2af3e7842514b5699ef77 | refs/heads/master | 2023-02-10T16:36:33.439215 | 2021-01-05T09:34:01 | 2021-01-05T09:34:01 | 325,002,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py |
from django.forms import ModelForm, widgets
from django.forms import Textarea
from .models import Contact, AnonymousReview
from django import forms
# class ContactForm(forms.Form):
# subject = forms.CharField(max_length=100)
# message = forms.CharField(widget=forms.Textarea)
# sender = forms.EmailField()
# cc_myself = forms.BooleanField(required=False)
class ReviewsFrom(forms.ModelForm)
name = forms.CharField(max_length= 100)
details = forms.CharField(widget=forms.Textarea)
date = forms.DateTimeField(required=True, input_formats=["%Y-%m-%dT%H:%M"]) | [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
6fae226dadd3e202c2775cdbf0c4c4a7859dba2f | 9b06dd37c490c1a5a107129e75299680d4b36e71 | /vaeseq/examples/play/agent.py | fd339f3890e972eb81e51bcdcd0ce81734efd572 | [
"Apache-2.0"
] | permissive | ghas-results/vae-seq | 5962c0fc5b84c03bfad666242190c54c1ca00fb6 | 0a1bace02c6bac6ab991ab8203a203d3061615ec | refs/heads/master | 2023-08-26T17:17:20.558233 | 2018-03-23T05:23:52 | 2018-03-23T05:23:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,981 | py | # Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game-playing agent."""
import abc
import sonnet as snt
import tensorflow as tf
from vaeseq import context as context_mod
from vaeseq import util
class AgentBase(context_mod.Context):
"""Base class for input agents."""
def __init__(self, hparams, name=None):
super(AgentBase, self).__init__(name=name)
self._hparams = hparams
self._num_actions = tf.TensorShape([self._hparams.game_action_space])
@property
def output_size(self):
return self._num_actions
@property
def output_dtype(self):
return tf.float32
@abc.abstractmethod
def get_variables(self):
"""Returns the variables used by this Agent."""
class RandomAgent(AgentBase):
"""Produces actions randomly, for exploration."""
def __init__(self, hparams, name=None):
super(RandomAgent, self).__init__(hparams, name=name)
self._dist = tf.distributions.Dirichlet(tf.ones(self._num_actions))
@property
def state_size(self):
return tf.TensorShape([0])
@property
def state_dtype(self):
return tf.float32
def observe(self, observation, state):
return state
def get_variables(self):
return None
def _build(self, input_, state):
del input_ # Not used.
batch_size = tf.shape(state)[0]
return self._dist.sample(batch_size), state
class TrainableAgent(AgentBase):
"""Produces actions from a policy RNN."""
def __init__(self, hparams, obs_encoder, name=None):
super(TrainableAgent, self).__init__(hparams, name=name)
self._agent_variables = None
self._obs_encoder = obs_encoder
with self._enter_variable_scope():
self._policy_rnn = util.make_rnn(hparams, name="policy_rnn")
self._project_act = util.make_mlp(
hparams, layers=[hparams.game_action_space], name="policy_proj")
@property
def state_size(self):
return dict(policy=self._policy_rnn.state_size,
action_logits=self._num_actions,
obs_enc=self._obs_encoder.output_size)
@property
def state_dtype(self):
return snt.nest.map(lambda _: tf.float32, self.state_size)
def get_variables(self):
if self._agent_variables is None:
raise ValueError("Agent variables haven't been constructed yet.")
return self._agent_variables
def observe(self, observation, state):
obs_enc = self._obs_encoder(observation)
rnn_state = state["policy"]
hidden, rnn_state = self._policy_rnn(obs_enc, rnn_state)
action_logits = self._project_act(hidden)
if self._agent_variables is None:
self._agent_variables = snt.nest.flatten(
(self._policy_rnn.get_variables(),
self._project_act.get_variables()))
if self._hparams.explore_temp > 0:
dist = tf.contrib.distributions.ExpRelaxedOneHotCategorical(
self._hparams.explore_temp,
logits=action_logits)
action_logits = dist.sample()
return dict(policy=rnn_state,
action_logits=action_logits,
obs_enc=obs_enc)
def _build(self, input_, state):
if input_ is not None:
raise ValueError("I don't know how to encode any inputs.")
return state["action_logits"], state
| [
"yury.sulsky@gmail.com"
] | yury.sulsky@gmail.com |
1f4509d10a8e05de2dbe02598f9990439939f931 | 73353f1a371ef0a778dff0f0b7cd2405f1d70f22 | /utils/config.py | a9474457636b42e64aaa0a11e78d36f964e9531a | [] | no_license | samueltenka/stronglenses | 917d818aa8e1edecf59ca290c424e588f75579de | 59189e25a65b942d01c00368326abaf00df5cec9 | refs/heads/master | 2021-01-11T00:03:21.848429 | 2017-08-19T06:23:14 | 2017-08-19T06:23:14 | 70,765,017 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | ''' author: sam tenka
date: 2016-11-20
descr: Load config data
'''
try:
with open('config.json') as f:
config = eval(f.read())
except SyntaxError:
print('Uh oh... I couldn\'t parse the config file. Is it typed correctly? --- utils.config ')
except IOError:
print('Uh oh... I couldn\'t find the config file. --- utils.config')
def get(attr, root=config):
''' Return value of specified configuration attribute. '''
node = root
for part in attr.split('.'):
node = node[part]
return node
def test():
''' Ensure reading works '''
assert(get('META.AUTHOR')=='sam tenka')
assert(get('META.AUTHOR')!='samtenka')
print('test passed!')
if __name__=='__main__':
test()
| [
"samtenka@umich.edu"
] | samtenka@umich.edu |
b928aa97ab3a5b9be31f9eba9796e4582a9dd906 | 9502244f1da84990fab430f861360d58e756b757 | /Median Filter.py | e02f1e66b7567f0539dddd7dc2b2b38c37499e21 | [] | no_license | telidevaravind/Image-Filters | 1601ab8fc5d94a75b2c22d5ef4c662f8d6a700c9 | 20020dc5090ea2d6b0049330d644afeab2fc6942 | refs/heads/master | 2020-07-31T03:24:28.441079 | 2019-09-23T23:19:52 | 2019-09-23T23:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | import numpy as np
from scipy.misc import toimage
import cv2
#from matplotlib import pyplot as plt
def median_filter(im):
im = im.flatten('F')
l = len(im)
for i in range(l):
for j in range(0, l - i - 1):
if im[j] > im[j + 1]:
im[j], im[j + 1] = im[j + 1], im[j]
return im[(l//2) +1]
def Median_Filter():
imageFileName = input("enter the image name with absolute path:\n ")
image = cv2.imread(imageFileName, 0)
# print image
# toimage(image).show()
height, width = image.shape
print ('height:\n', height)
print ('width:\n', width)
# print 'channels:\n', channels
m = input('Enter the height of filter:\n')
n = input('Enter the width of filter:\n')
m = int(m)
n = int(n)
print('The size of the filter is: %d * %d\n' % (m, n))
pad = int((n - 1) / 2)
print ('pad:', pad)
image = cv2.copyMakeBorder(image, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
Blur = np.zeros((height, width), int)
for y in np.arange(pad, height + 1):
for x in np.arange(pad, width + 1):
mod = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# print mod
k = median_filter(mod)
# print k
Blur[y - pad, x - pad] = k
# print Blur
return toimage(Blur).show()
Median_Filter()
| [
"noreply@github.com"
] | telidevaravind.noreply@github.com |
c32c232545c877b74ff269f7a45c39e31f51cf58 | 85550d94a02496dbe6720e53b2f40fb8cbc28a74 | /site-packages/qiniu/rs/test/__init__.py | 5704743b1a519c1d8e37efd1da95ac35256222d3 | [
"Apache-2.0"
] | permissive | davidvon/pipa-pay-server | 37be5092a7973fca77b9f933f64a8a4e3e781614 | 36e3e5c896a05107ca9436416cc246571bdf3f01 | refs/heads/master | 2021-01-09T20:12:19.748383 | 2016-07-26T11:08:05 | 2016-07-26T11:08:05 | 61,994,430 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # -*- coding: utf-8 -*-
import os
import urllib
import qiniu.io
import qiniu.rs
import qiniu.conf
pic = "http://cheneya.qiniudn.com/hello_jpg"
key = 'QINIU_UNIT_TEST_PIC'
def setUp():
qiniu.conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY")
qiniu.conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY")
bucket_name = os.getenv("QINIU_TEST_BUCKET")
policy = qiniu.rs.PutPolicy(bucket_name)
uptoken = policy.token()
f = urllib.urlopen(pic)
_, err = qiniu.io.put(uptoken, key, f)
f.close()
if err is None or err.startswith('file exists'):
print err
assert err is None or err.startswith('file exists')
| [
"davidvon71@163.com"
] | davidvon71@163.com |
3051edfff178f177865fd4e6c7b0d9cd8368f1bc | 3644abd88401651989d7b8488a6ca55725c7baf7 | /Python/bubble_sort/test_bubble_sort.py | 52d2b4a69a54348958c0625adf34044badadf6f0 | [] | no_license | Dpalazzari/codeChallenges | 0e6947b9ffbdaab087fe92b178ed9a6afb11ca9f | 034b23e596492e64ef1a5fc5b49619542b47189c | refs/heads/master | 2021-01-20T08:56:47.014614 | 2017-06-19T22:25:00 | 2017-06-19T22:25:00 | 90,205,333 | 1 | 0 | null | 2017-06-19T22:25:01 | 2017-05-04T00:35:43 | Ruby | UTF-8 | Python | false | false | 422 | py | import unittest
from bubble_sort import Bubble
class BubbleSortTestCase(unittest.TestCase):
def setUp(self):
self.bubble = Bubble()
def test_it_sorts_a_short_list(self):
arr = [5, 8, 1, 6, 14, 2, 0]
result = self.bubble.sort(arr)
self.assertEqual(result, [0, 1, 2, 5, 6, 8, 14])
suite = unittest.TestLoader().loadTestsFromTestCase(BubbleSortTestCase)
unittest.TextTestRunner(verbosity=2).run(suite) | [
"drewpalazzari@hotmail.com"
] | drewpalazzari@hotmail.com |
3d4b7227617613c91a21619aa78a3d3be82ad015 | acf4a2cea9d3f86f4ca6e28a462c9e82b10f53e6 | /myapp/models.py | 887afd76402b6f23f5e2e4d3efb1324e80c4c4f5 | [] | no_license | rahilkadakia/task1 | 7332c2a05e52e32b6b095c76ad33aaae020c1106 | 96327939cc71d34d090a16caf8107d4db8105f24 | refs/heads/master | 2022-12-06T23:44:32.033348 | 2020-08-31T07:05:24 | 2020-08-31T07:05:24 | 291,640,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | from django.db import models
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
class SignUp(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
first_name = models.CharField(max_length=100, null=False, blank=False)
last_name = models.CharField(max_length=100, null=False, blank=False)
username = models.CharField(max_length=20, null=False, blank=False, unique=True, default='no_username')
password = models.CharField(max_length=20, null=False, blank=False, unique=True, default='no_password')
email = models.EmailField(max_length=75, null=False, blank=False, unique=True)
phone = PhoneNumberField(null=False, blank=False, unique=True)
newsletter = models.BooleanField(null=False, blank=False)
def __str__(self):
return self.user.username | [
"31380798+rahilkadakia@users.noreply.github.com"
] | 31380798+rahilkadakia@users.noreply.github.com |
947f4a2549bd2c2c272432bc2fdddf5a405255c7 | d29c0ce479b1ce92a965818543112b8d670fb5bf | /packages/python/plotly/plotly/validators/scattergl/marker/line/__init__.py | fc456a0b7cb02e723b3335e56d572fa1e90818e2 | [
"MIT"
] | permissive | Laxminarayen/plotly.py | c04ae4fdc78422d1a5c1a31717b4150fa12985e8 | b1fa50e8adfe358fc2613a17e3e723f4bd9fceeb | refs/heads/master | 2020-06-21T02:21:08.964259 | 2019-07-16T18:13:30 | 2019-07-16T18:13:30 | 197,321,197 | 1 | 0 | MIT | 2019-07-17T05:30:49 | 2019-07-17T05:30:47 | null | UTF-8 | Python | false | false | 6,526 | py | import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="scattergl.marker.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="width", parent_name="scattergl.marker.line", **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="scattergl.marker.line", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattergl.marker.line", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scattergl.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scattergl.marker.line", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattergl.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
colorscale_path=kwargs.pop(
"colorscale_path", "scattergl.marker.line.colorscale"
),
**kwargs
)
import _plotly_utils.basevalidators
class CminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmin", parent_name="scattergl.marker.line", **kwargs
):
super(CminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmid", parent_name="scattergl.marker.line", **kwargs
):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmax", parent_name="scattergl.marker.line", **kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="cauto", parent_name="scattergl.marker.line", **kwargs
):
super(CautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="autocolorscale",
parent_name="scattergl.marker.line",
**kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "style"),
**kwargs
)
| [
"noreply@github.com"
] | Laxminarayen.noreply@github.com |
fccacfc7b5d906a653eb3d749965e6fb19b49bc8 | 7d30160d7e40675faaf71ba011e72002cf3fe4e9 | /Whatsapp Scheduled texts.py | 9c45e17c5623149c6e0dfd46e70772ea9bfb87fe | [] | no_license | ShubhamNarandekar/Practice | 2527881aff3f49affa67a79962d190c81206c118 | ee86eec82716ff5695c25e2813ee086b53cb8ca8 | refs/heads/master | 2022-12-17T01:58:59.925832 | 2020-09-21T12:23:36 | 2020-09-21T12:23:36 | 297,330,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pywhatkit as kit
#kit.add('C:\Users\ssnar\Downloads\chromedriver.exe')
#kit.load_QRcode()
kit.sendwhatmsg('+919096336378', 'zhala ka?',23,39)
print('Sent')
| [
"noreply@github.com"
] | ShubhamNarandekar.noreply@github.com |
e369cf59813dfef92b0064e6c269a5715ba4bf69 | e804627232723ce7886b1b417e277fd274ad048c | /lambda_function.py | 8cfa55db1bf458dc472fec7a40a33a7f80c19e2c | [] | no_license | nittyan/twitter-bot | bb6cdbe0a5968a1f03a255a46d6a5c4fbde05729 | 38c6291a7925f7d737ab37f298aa079eefa5cd47 | refs/heads/main | 2023-06-06T17:05:19.996519 | 2021-07-01T12:09:49 | 2021-07-01T12:09:49 | 382,018,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | import json
import random
import boto3
import tweepy
def lambda_handler(event, context):
text = random_choice_text()
tweet(text)
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
def tweet(text: str):
auth = tweepy.OAuthHandler('${api_key}', '${api_secret_key}')
auth.set_access_token('${access_token}',
'${access_token_secret}')
api = tweepy.API(auth)
api.update_status(text)
def random_choice_text() -> str:
db = boto3.resource('dynamodb')
table = db.Table('tweets')
res = table.scan()
items = res['Items']
texts = []
weights = []
for item in items:
texts.append(item['text'])
weights.append(int(item['weight']))
random.seed()
return random.choices(texts, k=1, weights=weights)[0]
| [
"hnittyan@gmail.com"
] | hnittyan@gmail.com |
31b85c176a0f6030e708c8a7faeb2bcacbedece9 | 5430995cc5bddfd30d1f21e05ece97858562a808 | /luminosity.py | 175dfd712d2681bb5d9d19fe60ab1c2c3dc4f2d9 | [] | no_license | arabeda/Data_Science_in_Astronomy_Luminosity_Project | 5ec087e5764ed9e9b236066191e2dc606776d2cc | ac2f89ded89568b7bd1448ac1eac686ab985ba81 | refs/heads/main | 2023-07-22T01:04:42.598364 | 2021-08-26T18:29:44 | 2021-08-26T18:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astropy.cosmology import WMAP9 as cosmo
def get_phi_from_txt():
df = pd.read_csv('DataAngleIternalPlateau.txt', sep='\t')
phi = df['phinprompt']
phi_err = df['phinprompterror']
return phi, phi_err
def get_data_from_txt():
df = pd.read_csv('repo_data.txt', sep='\t')
time = df['time']
timeerr1 = df['timeerr1']
timeerr2 = df['timeerr2']
flux = df['flux']
fluxerr1 = df['fluxerr1']
fluxerr2 = df['fluxerr2']
return flux, df, time
def get_L():
phi = -0.786642
z = 2.26
theta = 9.32499174100697
(flux, df, time) = get_data_from_txt()
k = (-0.09 * 0.3 ** (phi) + 10 ** 8 * 10000 ** (phi) * ((1 / ((1 + z))) ** (2 + (phi))))
Dl = cosmo.luminosity_distance(z)
flux = np.log(df['flux']*(np.pi*Dl**2*4*k*(1-np.cos(theta))))
time = df['time'].apply(lambda x: float(x))
time = np.log(time)
df.plot(x='time', y='flux', kind='scatter')
plt.show()
return df['flux']
if __name__ == '__main__':
get_L()
| [
"noreply@github.com"
] | arabeda.noreply@github.com |
a3a80c863a6e05bdd6810d505cfa909fd7c618fe | a0e924167ea8b12b20fbaf266cd7d51b1e444129 | /codes/synthetic.py | ad6448b0a170b6ce7cd94d5c3ce73239130a920e | [
"MIT"
] | permissive | xuhuiustc/time-series-imputation | 579c15331bea77d1cd2f344cf1a966f463f58c5b | cbbab1cbd02e200cd22f5ae0b40805bb9a15f34f | refs/heads/main | 2023-03-23T20:26:26.053309 | 2021-03-22T07:32:19 | 2021-03-22T07:32:19 | 350,239,414 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,871 | py | import numpy as np
import cvxpy as cp
import scipy
import pandas as pd
import matplotlib.pyplot as plt
from scipy.linalg import sqrtm
n = 10 #number of stocks
T_train = 100 # training period
T_test = 100 #testing period
T_truetest = 1000 # out of sample testing period
#parameters for generating normal returns
mu = 0.0 + np.linspace(-0.1, 0.5, n)
cov = np.ones((n,n))+np.eye(n)
cov = 1 * cov
#generating in-sample returns from Gaussian
def generate_data():
mean = mu
covariance = cov
data = np.random.multivariate_normal(mean, covariance, T_train + T_test + T_truetest)
#data = np.vstack((data, np.random.multivariate_normal(mean , covariance, T_test)))
return data
#generating mask --- MCAR
def missing(miss_prob=0.5):
#''True'' represents missing
mask = np.random.choice([True, False], size = (T_train,n), p = [miss_prob, 1-miss_prob])
mask = np.vstack((mask, np.full((T_test + T_truetest,n),False))) #no missing value for testing period
return mask
#generating mask --- MAR
#def missing(miss_prob1=0.5, miss_prob2=0.7):
#''True'' represents missing
# indicator = np.random.choice([True,False],size = n, p = [0.5, 0.5])
# mask = np.full((T_train,n),False)
# for i in range(n):
# if indicator[i] == True:
# mask[:,i] = np.random.choice([True, False], size = T_train, p = [miss_prob1, 1-miss_prob1])
# else:
# mask[:,i] = np.random.choice([True, False], size = T_train, p = [miss_prob2, 1-miss_prob2])
# mask = np.vstack((mask, np.full((T_test + T_truetest,n),False))) #no missing value for testing period
# return mask
#generating mask --- BLOCK
#def missing(miss_prob=0.7):
# mask = np.vstack((np.full((np.int(miss_prob*T_train),n),True),np.full((T_train - np.int(miss_prob*T_train) + T_test + T_truetest,n),False)))
# return mask
#generating mask --- MNAR
#def missing(data):
#''True'' represents missing
# mask = np.full((T_train+T_test+T_truetest,n),False)
# for i in range(n):
# for j in range(T_train):
# if np.abs(data[j,i]) > 0.3:
# mask[j,i] = True
# return mask
#prior for the paramter \mu, use flat prior
mu_p = np.repeat(0.0,n)
covp_inv = np.diag(np.full(n,0.0))
def individualposterior(data, mask, mu0, cov0_inv):
#compute mean and covariance of posterior of \mu given data up to time T in range(...)
meanlist = []
covariancelist = []
num_posteriors = T_test + 1
covariance_mu_inv = np.copy(cov0_inv)
mean_mu = np.matmul(covariance_mu_inv,mu0)
for t in range(T_train + T_test):
if not np.all(mask[t,:] == np.full(n,True)):
ind = (mask[t,:] == np.full(n,False))
cov_t = cov[np.ix_(ind,ind)]
cov_inv_expand = np.zeros((n,n))
cov_inv_expand[np.ix_(ind,ind)] = np.linalg.inv(cov_t)
covariance_mu_inv += cov_inv_expand
mean_t = np.zeros(n)
mean_t[ind] = data[t,ind]
mean_mu += np.matmul(cov_inv_expand,mean_t)
if t >= (T_train - 1):
covariance_mu = np.linalg.inv(covariance_mu_inv)
covariancelist.append(covariance_mu)
meanlist.append(np.matmul(covariance_mu, mean_mu))
return meanlist,covariancelist
def consensuscforwardkl(meanlist,covariancelist,delta_r, prediction):
num_posteriors = len(meanlist)
n = len(meanlist[0])
# Define optimization variables
weights = cp.Variable(num_posteriors)
gamma = cp.Variable(n)
# v, s, vt = np.linalg.svd(covariancelist[0].values)
s, v = np.linalg.eigh(covariancelist[0])
s_list = []
# s_list.append(s)
# s, v = np.linalg.eigh(covariancelist[1].values)
# s_list.append(s)
# for i in np.arange(1,num_posteriors,1):
for i in range(num_posteriors):
s_temp = np.zeros(n)
for j in range(n):
s_temp[j] = np.inner(v[:,j],np.matmul(covariancelist[i],v[:,j]))
s_list.append(s_temp)
c = np.zeros((num_posteriors,n))
for i in range(num_posteriors):
for j in range(n):
c[i, j] = np.inner(v[:,j],meanlist[i])/s_list[i][j]
sv_matrix = np.array(s_list)
inverse_sv = 1.0 / sv_matrix
#objective_fun = [cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights)),-1) for j in range(n)]
obj = cp.sum([gamma[j] for j in range(n)])
# Run optimization
objective = cp.Minimize(obj)
delta = delta_r * max([np.abs(c[-1,j]/inverse_sv[-1,j] - v[:,j].dot(prediction)) for j in range(n)])
constraints = [weights >= 0,
cp.sum(weights) == 1]
for j in range(n):
constraints.append(cp.sum(cp.multiply(c[:,j],weights)) <= (delta + v[:,j].dot(prediction)) * cp.sum(cp.multiply(inverse_sv[:,j], weights)))
constraints.append(cp.sum(cp.multiply(c[:,j],weights)) >= (-delta + v[:, j].dot(prediction)) * cp.sum(cp.multiply(inverse_sv[:,j], weights)))
#constraints.append(4 + cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights))-gamma[j],2)<= cp.power(cp.sum(cp.multiply(inverse_sv[:,j], weights))+gamma[j],2))
A = np.zeros((2,num_posteriors))
B = np.zeros((2,n))
B[1,j] = 1
for i in range(num_posteriors):
A[1,i] = inverse_sv[i,j]
C = np.zeros(2)
C[0] = 2
constraints.append(cp.SOC(A[1,:]@weights + B[1,:]@gamma, A @ weights - B @ gamma + C))
prob = cp.Problem(objective, constraints)
prob.solve()
solution = weights.value
#print(solution)
#print(solution)
final_sigma = scipy.linalg.inv(sum([solution[i] * scipy.linalg.inv(covariancelist[i]) for i in range(num_posteriors)]))
final_mu = final_sigma.dot(sum([solution[i] * np.inner(scipy.linalg.inv(covariancelist[i]), meanlist[i]) for i in range(num_posteriors)]))
return solution, final_mu, final_sigma
def consensuswasserstein(meanlist,covariancelist,delta_r, prediction):
num_posteriors = len(meanlist)
n = len(meanlist[0])
weights = cp.Variable(2)
Sigma1 = covariancelist[0]
Sigma2 = covariancelist[-1]
temp = sqrtm(Sigma2) @ Sigma1 @ sqrtm(Sigma2)
Psi = sqrtm(Sigma2) @ np.real(scipy.linalg.inv(sqrtm(temp))) @ sqrtm(Sigma2)
P = np.zeros((2,2))
P[0,0] = np.trace(Sigma1)
P[1,1] = np.trace(Sigma2)
P[0,1] = np.trace(Sigma1 @ Psi)
P[1,0] = np.trace(Sigma1 @ Psi)
obj = cp.quad_form(weights, P)
constraints = [weights >= 0,
cp.sum(weights) == 1,
weights[1] <= delta_r]
prob = cp.Problem(cp.Minimize(obj), constraints)
prob.solve()
solution = weights.value
final_mu = meanlist[0] * solution[0] + meanlist[-1] * solution[1]
final_sigma = (solution[0]*np.eye(n) + solution[1] * Psi) @ Sigma1 @ (solution[0]*np.eye(n) + solution[1] * Psi)
return solution, final_mu, final_sigma
def consensuswasserstein_general(meanlist,covariancelist,delta_r, prediction):
num_posteriors = len(meanlist)
n = len(meanlist[0])
# Define optimization variables
weights = cp.Variable(num_posteriors)
# v, s, vt = np.linalg.svd(covariancelist[0].values)
s, v = np.linalg.eigh(covariancelist[0])
s_list = []
# for i in np.arange(1,num_posteriors,1):
for i in range(num_posteriors):
s_temp = np.zeros(n)
for j in range(n):
s_temp[j] = np.inner(v[:,j],np.matmul(covariancelist[i],v[:,j]))
s_list.append(s_temp)
sv_matrix = np.array(s_list)
P = np.zeros((num_posteriors,num_posteriors))
for i in range(num_posteriors):
for j in range(num_posteriors):
P[i,j] = np.sum(np.multiply(np.sqrt(sv_matrix[i,:]),np.sqrt(sv_matrix[j,:])))
#obj = 0
#for i in range(num_posteriors):
# for j in range(num_posteriors):
# obj += weights[i] * weights[j] * np.sum(np.multiply(np.sqrt(sv_matrix[i,:]),np.sqrt(sv_matrix[j,:])))
obj = cp.quad_form(weights, P)
delta = delta_r * np.linalg.norm(meanlist[-1] - prediction)
constraints = [weights >= 0,
cp.sum(weights) == 1]
temp = 0
for i in range(num_posteriors):
temp += weights[i]*meanlist[i]
constraints.append(cp.norm(temp-prediction)<=delta)
prob = cp.Problem(cp.Minimize(obj), constraints)
prob.solve()
solution = weights.value
final_mu = np.zeros(n)
for i in range(num_posteriors):
final_mu += solution[i]*meanlist[i]
final_sigma = np.zeros((n,n))
for i in range(num_posteriors):
for j in range(n):
final_sigma += solution[i] * np.sqrt(sv_matrix[i,j]) * np.outer(v[:,j],v[:,j])
final_sigma = final_sigma @ final_sigma
return solution, final_mu, final_sigma
def imputation(data, mask, final_mu,final_sigma):
m = 10 #number of multiply-imputed dataset
total_time,num_stocks = data.shape
completed_data = np.zeros((m,total_time,num_stocks))
for k in range(m):
data_copy = np.copy(data)
unconditionalmean = np.random.multivariate_normal(final_mu, final_sigma)
for t in range(T_train):
if np.all(mask[t,:] == np.full(n,True)):
data_copy[t,:] = unconditionalmean
elif np.all(mask[t,:] == np.full(n,False)):
pass
else:
ind_miss = (mask[t,:] == np.full(n,True))
ind_obs = (mask[t,:] == np.full(n,False))
len_miss = len(data_copy[t,ind_miss])
data_copy[t,ind_miss] = np.zeros(len_miss)
cov11 = cov[np.ix_(ind_miss,ind_obs)]
cov12_inv = np.linalg.inv(cov[np.ix_(ind_obs,ind_obs)])
missing_condi_mean = unconditionalmean[ind_miss] + np.matmul(np.matmul(cov11,cov12_inv),data_copy[t,ind_obs] - unconditionalmean[ind_obs])
missing_condi_cov = cov[np.ix_(ind_miss,ind_miss)] - np.matmul(cov11,np.matmul(cov12_inv,np.matrix.transpose(cov11)))
data_copy[t,ind_miss] = missing_condi_mean
completed_data[k,:,:] = data_copy
return completed_data
def Greedy(data1):
data = np.copy(data1)
#data = data / 1000
mean = np.mean(data[:T_train,:],axis = 0)
#if np.sum(mean) <= 0:
# print('whoops')
weights = mean / np.linalg.norm(mean,2)
#c_returns = 1.0
returns = np.zeros(T_test)
for t in np.arange(T_train,T_train+T_test,1):
returns[t-T_train] = np.inner(data[t,:], weights)
#c_returns *= 1.0 + returns[t-T_train]
sharper = np.mean(returns)/np.std(returns)
#o_returns = 1.0
returns_o = np.zeros(T_truetest)
for t in np.arange(T_train+T_test,T_train+T_test+T_truetest,1):
returns_o[t-T_train - T_test] = np.inner(data[t,:], weights)
# o_returns *= 1.0 + returns_o[t-T_train - T_test]
o_sharper = np.mean(returns_o)/np.std(returns_o)
return sharper, o_sharper,np.mean(returns),np.mean(returns_o)
#return np.mean(returns),np.mean(returns_o)
n_experiment = 500
m = 10
num_delta = 10
mreturn_i_complex = np.zeros((n_experiment,m,num_delta))
mreturn_o_complex = np.zeros((n_experiment,m,num_delta))
mreturn_i_complex_wb = np.zeros((n_experiment,m,num_delta))
mreturn_o_complex_wb = np.zeros((n_experiment,m,num_delta))
mreturn_i_complex_wb_general = np.zeros((n_experiment,m,num_delta))
mreturn_o_complex_wb_general = np.zeros((n_experiment,m,num_delta))
for k in range(n_experiment):
print(k)
data = generate_data()
mask = missing()
meanlist,covariancelist = individualposterior(data, mask, mu_p, covp_inv)
deltalist_complex = np.linspace(0.000, 1.0, num = num_delta)
for i in range(num_delta):
#print(i)
_,final_mu,final_sigma = consensusforwardkl(meanlist,covariancelist,deltalist_complex[i],meanlist[0])
completed_data = imputation(data,mask,final_mu,final_sigma)
_,final_mu_wb,final_sigma_wb = consensuswasserstein(meanlist,covariancelist,deltalist_complex[i],meanlist[0])
completed_data_wb = imputation(data,mask,final_mu_wb,final_sigma_wb)
_,final_mu_wb_general,final_sigma_wb_general = consensuswasserstein_general(meanlist,covariancelist,deltalist_complex[i],meanlist[0])
completed_data_wb_general = imputation(data,mask,final_mu_wb_general,final_sigma_wb_general)
for j in range(m):
_, _,mreturn_i_complex[k,j,i], mreturn_o_complex[k,j,i] = Greedy(completed_data[j])
_, _,mreturn_i_complex_wb[k,j,i], mreturn_o_complex_wb[k,j,i] = Greedy(completed_data_wb[j])
_, _,mreturn_i_complex_wb_general[k,j,i], mreturn_o_complex_wb_general[k,j,i] = Greedy(completed_data_wb_general[j])
sds_r = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_r[:,i] = mreturn_i_complex[:,:,i].flatten()
sds_o_r = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_o_r[:,i] = mreturn_o_complex[:,:,i].flatten()
sds_r_wb = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_r_wb[:,i] = mreturn_i_complex_wb[:,:,i].flatten()
sds_o_r_wb = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_o_r_wb[:,i] = mreturn_o_complex_wb[:,:,i].flatten()
sds_r_wb_general = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_r_wb_general[:,i] = mreturn_i_complex_wb_general[:,:,i].flatten()
sds_o_r_wb_general = np.zeros((n_experiment * m , num_delta))
for i in range(num_delta):
sds_o_r_wb_general[:,i] = mreturn_o_complex_wb_general[:,:,i].flatten()
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex-mreturn_o_complex,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r - sds_o_r,axis=0),0),2)),marker="o",markersize=10,label = "ECMSE-KL")
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex_wb-mreturn_o_complex_wb,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r_wb - sds_o_r_wb,axis=0),0),2)),marker="p",markersize=10,linestyle='dashed', label = "ECMSE-WB")
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex_wb_general-mreturn_o_complex_wb_general,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r_wb_general - sds_o_r_wb_general,axis=0),0),2)),marker="s",markersize=10,linestyle='-.', label = "ECMSE-WB-Full")
plt.xlabel(r'$\delta/\delta_{max}$',fontsize=15)
plt.legend()
plt.legend(fontsize=15)
plt.show()
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex-mreturn_o_complex,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r - sds_o_r,axis=0),0),2)),marker="o",markersize=10,label = "ECMSE-KL")
plt.plot(deltalist_complex,253*253*np.power(np.maximum(np.mean(sds_r_wb - sds_o_r_wb,axis=0),0),2),marker="v",markersize=10,color = '#1f77b4', label = "ECBias^2-KL")
plt.plot(deltalist_complex,253*253*np.mean(np.power(np.std(mreturn_i_complex-mreturn_o_complex,axis=1),2),axis=0),marker="^",markersize=10,color='#1f77b4',label = "ECVar-KL")
plt.xlabel(r'$\delta/\delta_{max}$',fontsize=15)
plt.legend()
plt.legend(fontsize=15)
plt.show()
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex_wb-mreturn_o_complex_wb,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r_wb - sds_o_r_wb,axis=0),0),2)),marker="p",markersize=10,linestyle='dashed',color='#ff7f0e',label = "ECMSE-WB")
plt.plot(deltalist_complex,253*253*np.power(np.maximum(np.mean(sds_r_wb - sds_o_r_wb,axis=0),0),2),marker="v",markersize=10,color='#ff7f0e',linestyle='dashed',label = "ECBias^2-WB")
plt.plot(deltalist_complex,253*253*np.mean(np.power(np.std(mreturn_i_complex_wb-mreturn_o_complex_wb,axis=1),2),axis=0),marker="^",markersize=10,color='#ff7f0e',linestyle='dashed',label = "ECVar-WB")
plt.xlabel(r'$\delta/\delta_{max}$',fontsize=15)
plt.legend()
plt.legend(fontsize=15)
plt.show()
plt.plot(deltalist_complex,253*253*(np.mean(np.power(np.std(mreturn_i_complex_wb_general-mreturn_o_complex_wb_general,axis=1),2),axis=0)+np.power(np.maximum(np.mean(sds_r_wb_general - sds_o_r_wb_general,axis=0),0),2)),marker="s",markersize=10,linestyle='-.',color='#2ca02c',label = "ECMSE-WB-Full")
plt.plot(deltalist_complex,253*253*np.power(np.maximum(np.mean(sds_r_wb_general - sds_o_r_wb_general,axis=0),0),2),marker="v",markersize=10,color='#2ca02c',linestyle='-.',label = "ECBias^2-WB-Full")
plt.plot(deltalist_complex,253*253*np.mean(np.power(np.std(mreturn_i_complex_wb_general-mreturn_o_complex_wb_general,axis=1),2),axis=0),marker="^",markersize=10,color='#2ca02c',linestyle='-.',label = "ECVar-WB-Full")
plt.xlabel(r'$\delta/\delta_{max}$',fontsize=15)
plt.legend()
plt.legend(fontsize=15)
plt.show()
| [
"noreply@github.com"
] | xuhuiustc.noreply@github.com |
386494348a69dc42f26350743415cea70795bbb9 | a03a7935a191d63bee76fd3b85a61ee27f98904a | /test/tests/databases/bov.py | d2400c2618fb6604ee069a960e95c77fb50876f3 | [] | no_license | cchriste/visit | 57091c4a512ab87efd17c64c7494aa4cf01b7e53 | c72c413f571e56b52fb7221955219f11f4ba19e3 | refs/heads/master | 2020-04-12T06:25:27.458132 | 2015-10-12T15:41:49 | 2015-10-12T15:41:49 | 10,111,791 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,436 | py | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: bov.py
#
# Tests: mesh - 3D rectilinear, multiple domain
# plots - Pseudocolor, Subset, Label, Contour
# operators - Slice
#
# Programmer: Brad Whitlock
# Date: Fri Mar 17 14:37:45 PST 2006
#
# Modifications:
# Brad Whitlock, Thu May 4 14:02:29 PST 2006
# Added testing of INT and DOUBLE BOV files.
#
# ----------------------------------------------------------------------------
def SaveTestImage(name):
# Save these images somewhat larger than a regular test case image
# since the images contain a lot of text.
backup = GetSaveWindowAttributes()
swa = SaveWindowAttributes()
swa.width = 500
swa.height = 500
swa.screenCapture = 0
Test(name, swa)
SetSaveWindowAttributes(backup)
def TestBOVDivide(prefix, db, doSubset):
# Take a picture to make sure that the division took. There will be
# a lot of bricks.
OpenDatabase(db)
if doSubset:
AddPlot("Subset", "bricks")
subAtts = SubsetAttributes()
subAtts.legendFlag = 0
SetPlotOptions(subAtts)
else:
AddPlot("Pseudocolor", "myvar")
DrawPlots()
v = View3DAttributes()
v.viewNormal = (0.534598, 0.40012, 0.744385)
v.focus = (15, 15, 15)
v.viewUp = (-0.228183, 0.916444, -0.32873)
v.viewAngle = 30
v.parallelScale = 8.66025
v.nearPlane = -17.3205
v.farPlane = 17.3205
v.imagePan = (0, 0)
v.imageZoom = 1
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (15, 15, 15)
SetView3D(v)
Test(prefix + "00")
# Make sure there are the right number of zones.
Query("NumZones")
TestText(prefix + "01", GetQueryOutputString())
# Let's slice a few times to make sure that crucial areas have the
# right values
AddPlot("Mesh", "mesh")
AddPlot("Label", "myvar")
L = LabelAttributes()
L.textHeight1 = 0.03
L.textHeight2 = 0.03
SetPlotOptions(L)
SetActivePlots((0,1,2))
AddOperator("Slice")
s = SliceAttributes()
s.originType = s.Intercept # Point, Intercept, Percent, Zone, Node
s.originIntercept = 10.001
s.normal = (0, 0, 1)
s.axisType = s.ZAxis # XAxis, YAxis, ZAxis, Arbitrary
s.upAxis = (0, 1, 0)
s.project2d = 1
SetOperatorOptions(s)
DrawPlots()
v2 = GetView2D()
v2.windowCoords = (12.0201, 13.0004, 9.99781, 10.9888)
v2.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v2.fullFrameActivationMode = v2.Auto # On, Off, Auto
v2.fullFrameAutoThreshold = 100
SetView2D(v2)
SaveTestImage(prefix+"02")
# Move to another slice on the far edge that will have the max zone #
s.originIntercept = 19.998
SetOperatorOptions(s)
v3 = View2DAttributes()
v3.windowCoords = (19.2017, 20.0179, 19.1966, 20.0217)
v3.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v3.fullFrameActivationMode = v3.Auto # On, Off, Auto
v3.fullFrameAutoThreshold = 100
SetView2D(v3)
SaveTestImage(prefix+"03")
# Move to another slice in the middle.
s.originIntercept = 15.01
SetOperatorOptions(s)
v4 = View2DAttributes()
v4.windowCoords = (14.6419, 15.361, 15.638, 16.365)
v4.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v4.fullFrameActivationMode = v4.Auto # On, Off, Auto
v4.fullFrameAutoThreshold = 100
SetView2D(v4)
SaveTestImage(prefix+"04")
DeleteAllPlots()
# Test that ghost zones are right.
AddPlot("Pseudocolor", "myvar")
p = PseudocolorAttributes()
p.SetOpacityType(p.Constant)
p.opacity = 0.25
SetPlotOptions(p)
DrawPlots()
v5 = View3DAttributes()
v5.viewNormal = (0.772475, 0.402431, 0.491255)
v5.focus = (15, 15, 15)
v5.viewUp = (-0.355911, 0.915018, -0.18992)
v5.viewAngle = 30
v5.parallelScale = 8.66025
v5.nearPlane = -17.3205
v5.farPlane = 17.3205
v5.imagePan = (-0.0253114, 0.0398304)
v5.imageZoom = 1.20806
v5.perspective = 1
v5.eyeAngle = 2
v5.centerOfRotationSet = 0
v5.centerOfRotation = (15, 15, 15)
SetView3D(v5)
Test(prefix+"05")
# Zoom in on a contour plot to make sure that there are no tears.
# This means that the ghost zones were created properly.
ClearWindow()
p.SetOpacityType(p.FullyOpaque)
SetPlotOptions(p)
AddOperator("Isosurface")
iso = IsosurfaceAttributes()
iso.variable = "radial"
SetOperatorOptions(iso)
DrawPlots()
v6 = View3DAttributes()
v6.viewNormal = (0.373168, 0.412282, 0.831125)
v6.focus = (15, 15, 15)
v6.viewUp = (-0.181836, 0.910964, -0.370244)
v6.viewAngle = 30
v6.parallelScale = 8.66025
v6.nearPlane = -17.3205
v6.farPlane = 17.3205
v6.imagePan = (0.0994254, 0.0810457)
v6.imageZoom = 1.94126
v6.perspective = 1
v6.eyeAngle = 2
v6.centerOfRotationSet = 0
v6.centerOfRotation = (15, 15, 15)
SetView3D(v6)
Test(prefix+"06")
DeleteAllPlots()
CloseDatabase(db)
def TestBOVType(bovtype, prefixes):
# Test the original BOV file without it being divided.
TestSection("Reading BOV file of %s" % bovtype)
TestBOVDivide(prefixes[0], data_path("bov_test_data/%s_indices.bov") % bovtype, 0)
#
# Test 2 BOV files that are being subdivided into smaller bricks
# by the BOV plugin so that there are multiple domains that
# can be processed in parallel.
#
TestSection("Decomposing BOV of %s into smaller bricks" % bovtype)
TestBOVDivide(prefixes[1], data_path("bov_test_data/%s_indices_div.bov") % bovtype, 1)
TestSection("Decomposing BOV of %s with small header into smaller bricks" % bovtype)
TestBOVDivide(prefixes[2], data_path("bov_test_data/%s_indices_div_with_header.bov") % bovtype, 1)
def main():
# Define some expressions
DefineScalarExpression("x", "coord(mesh)[0]")
DefineScalarExpression("y", "coord(mesh)[1]")
DefineScalarExpression("z", "coord(mesh)[2]")
DefineScalarExpression("dx", "x - 15.")
DefineScalarExpression("dy", "y - 15.")
DefineScalarExpression("dz", "z - 15.")
DefineScalarExpression("radial", "sqrt(dx*dx + dy*dy + dz*dz)")
TestBOVType("FLOAT", ("bov_0_", "bov_1_", "bov_2_"))
TestBOVType("DOUBLE", ("bov_3_", "bov_4_", "bov_5_"))
TestBOVType("INT", ("bov_6_", "bov_7_", "bov_8_"))
Exit()
main()
| [
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] | bonnell@18c085ea-50e0-402c-830e-de6fd14e8384 |
52c5abfa9f36b345e7f6976b1ad49a3d735a1b40 | ec5a6872b9f1dd7dbf08caf79336954528289f7c | /src/__kinoa__/2020-02-18_18-07-00_Exp0/model0.py | ffe063722ef465e12534f6e19306c43c5877f525 | [
"MIT"
] | permissive | oleg-panichev/WiDS-Datathon-2020-Second-place-solution | 8fbba30d96890f7cac776348bcfb06fbe2781d2b | fce85710ebb8c3cb0235d0698cc6fbb1e1ab3fa5 | refs/heads/master | 2023-07-20T05:15:17.989663 | 2022-06-08T15:31:41 | 2022-06-08T15:31:41 | 243,058,108 | 11 | 6 | MIT | 2023-07-06T21:51:25 | 2020-02-25T17:26:55 | Python | UTF-8 | Python | false | false | 26,970 | py | import datetime
import gc
import glob
import numpy as np
import os
import pandas as pd
os.environ['KMP_DUPLICATE_LIB_OK']='True' # MacOS fix for libomp issues (https://github.com/dmlc/xgboost/issues/1715)
import lightgbm as lgb
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import KFold, RepeatedKFold, GroupKFold, StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import NuSVC
from tqdm import tqdm as tqdm
from kinoa import kinoa
from scipy.stats import ttest_ind, ks_2samp
from sklearn.impute import SimpleImputer
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
# from utils import nanmin, nanmax
def dprint(*args, **kwargs):
print("[{}] ".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + \
" ".join(map(str,args)), **kwargs)
dprint('PID: {}'.format(os.getpid()))
script_id = 0
data_path = '../input/'
id_col = 'encounter_id'
target_col = 'hospital_death'
fillna_with_est = True#False
train_lgbm = True
train_xgb = False
train_lr = False
# train_catboost = False
use_pl = False
pl_path = '__kinoa__/2020-01-22_16-48-56_Exp0/submission0.csv'
train = pd.read_csv(os.path.join(data_path, 'training_v2.csv'))
test = pd.read_csv(os.path.join(data_path, 'unlabeled.csv'))
if use_pl:
df = pd.read_csv(pl_path)
test.drop([target_col], axis=1, inplace=True)
df_pl = test.merge(df, on=id_col, how='left')
df_pl = df_pl[(df_pl[target_col] < 0.1) | (df_pl[target_col] > 0.9)]
df_pl[target_col] = np.round(df_pl[target_col].values)
train = pd.concat([train, df_pl], axis=0)
fd = pd.read_csv(os.path.join(data_path, 'WiDS Datathon 2020 Dictionary.csv'))
fd = fd[(fd['Data Type'] == 'string') | (fd['Data Type'] == 'binary')]
cat_features = list(fd['Variable Name'].values)
for c in cat_features:
if c not in train.columns or c == target_col:
cat_features.remove(c)
print(f'cat_features: {cat_features} ({len(cat_features)})')
# mv = train.isnull().mean(axis=0).sort_values()
# cols_to_drop = mv[mv > 0.9].index.values
# print(f'cols_to_drop: {cols_to_drop} ({len(cols_to_drop)})')
# train.drop(cols_to_drop, axis=1, inplace=True)
# test.drop(cols_to_drop, axis=1, inplace=True)
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def target_encode(trn_series=None,
tst_series=None,
target=None,
min_samples_leaf=1,
smoothing=1,
noise_level=0):
"""
Smoothing is computed like in the following paper by Daniele Micci-Barreca
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target)
assert trn_series.name == tst_series.name
temp = pd.concat([trn_series, target], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
# Apply average function to all target data
prior = target.mean()
# The bigger the count the less full_avg is taken into account
averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing
averages.drop(["mean", "count"], axis=1, inplace=True)
# Apply averages to trn and tst series
ft_trn_series = pd.merge(
trn_series.to_frame(trn_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=trn_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_trn_series.index = trn_series.index
ft_tst_series = pd.merge(
tst_series.to_frame(tst_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=tst_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_tst_series.index = tst_series.index
return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level)
# Drop constant columns
constant_cols = []
for c in train.columns:
if train[c].nunique(dropna=False) < 2:
constant_cols.append(c)
print(f'constant_cols in train: {constant_cols}')
train.drop(constant_cols, axis=1, inplace=True)
test.drop(constant_cols, axis=1, inplace=True)
constant_cols = []
for c in test.columns:
if c != target_col and test[c].nunique(dropna=False) < 2:
constant_cols.append(c)
print(f'constant_cols in test: {constant_cols}')
train.drop(constant_cols, axis=1, inplace=True)
test.drop(constant_cols, axis=1, inplace=True)
# imputer = SimpleImputer(missing_values=np.nan, strategy='median', copy=False)
# imputer.fit(train.values)
# # output is in numpy, so convert to df
# train = pd.DataFrame(imp_mean.transform(train), columns=train.columns)
# test = pd.DataFrame(imp_mean.transform(test), columns=test.columns)
# Add estimated variables to the dataset
# est_cols = [
# {
# 'name': 'weight',
# 'fillna': False,
# },
# {
# 'name': 'height',
# 'fillna': False,
# },
# {
# 'name': 'apache_4a_hospital_death_prob',
# 'fillna': False,
# },
# {
# 'name': 'apache_4a_icu_death_prob',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'urineoutput_apache',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'bmi',
# 'fillna': True, #False,
# }, # Worse
# {
# 'name': 'glucose_apache',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'age',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'gender',
# 'fillna': True,
# },
# {
# 'name': 'apache_2_diagnosis',
# 'fillna': True,
# },
# {
# 'name': 'd1_heartrate_min',
# 'fillna': False,
# },
# {
# 'name': 'd1_lactate_min',
# 'fillna': False,
# },
# {
# 'name': 'd1_wbc_min',
# 'fillna': False,
# },
# # 2020 02 17
# {
# 'name': 'hematocrit_apache',
# 'fillna': False,
# },
# {
# 'name': 'bun_apache',
# 'fillna': False,
# },
# {
# 'name': 'creatinine_apache',
# 'fillna': False,
# },
# {
# 'name': 'sodium_apache',
# 'fillna': False,
# },
# ]
est_cols = []
files = glob.glob('features_est1/*.csv')
for f in files:
fname = os.path.basename(f)[:-8]
d = {'name': fname, 'fillna': True}
# if fname == 'gender':
# d['fillna'] = True
est_cols.append(d)
dprint(f'len(est_cols): {len(est_cols)}')
print(est_cols)
for c in est_cols:
df = pd.read_csv(f'features_est1/{c["name"]}_est.csv', usecols=['encounter_id', c['name'] + '_est'])
train = train.merge(df, on=id_col, how='left')
test = test.merge(df, on=id_col, how='left')
if c['fillna']:
train.loc[train[c['name']].isnull(), c['name']] = train[c['name'] + '_est']
test.loc[test[c['name']].isnull(), c['name']] = test[c['name'] + '_est']
train.drop([c['name'] + '_est'], axis=1, inplace=True)
test.drop([c['name'] + '_est'], axis=1, inplace=True)
dprint(train.shape, test.shape)
min_max_cols = []
for c in train.columns:
if '_min' in c and c.replace('min', 'max') in train.columns:
min_max_cols.append(c)
print(f'min_max_cols: {min_max_cols} ({len(min_max_cols)})')
# Extract features
def extract_features(df):
cols = set(df.columns)
# df['d1_temp_minmax'] = df['d1_temp_max'] - df['d1_temp_min']
# df['d1_glucose_minmax'] = df['d1_glucose_max'] - df['d1_glucose_min']
# df['d1_resprate_minmax'] = df['d1_resprate_max'] - df['d1_resprate_min']
# df['d1_spo2_minmax'] = df['d1_spo2_max'] - df['d1_spo2_min']
# df['d1_platelets_minmax'] = df['d1_platelets_max'] - df['d1_platelets_min']
# df['d1_temp_mean'] = (df['d1_temp_max'] + df['d1_temp_min'])/2
# df['d1_glucose_mean'] = (df['d1_glucose_max'] + df['d1_glucose_min'])/2
# df['d1_resprate_mean'] = (df['d1_resprate_max'] + df['d1_resprate_min'])/2
# df['d1_spo2_mean'] = (df['d1_spo2_max'] + df['d1_spo2_min'])/2
# df['d1_platelets_mean'] = (df['d1_platelets_max'] + df['d1_platelets_min'])/2
# # df['d1_heartrate_minmax'] = df['d1_heartrate_max'] - df['d1_heartrate_min']
# # df['h1_heartrate_minmax'] = df['h1_heartrate_max'] - df['h1_heartrate_min']
# # df['d1_heartrate_mean'] = (df['d1_heartrate_max'] + df['d1_heartrate_min'])/2
# # df['h1_heartrate_mean'] = (df['h1_heartrate_max'] + df['h1_heartrate_min'])/2
# # df['h1_temp_minmax'] = df['h1_temp_max'] - df['h1_temp_min']
# # df['h1_glucose_minmax'] = df['h1_glucose_max'] - df['h1_glucose_min']
# # df['h1_resprate_minmax'] = df['h1_resprate_max'] - df['h1_resprate_min']
# # df['h1_spo2_minmax'] = df['h1_spo2_max'] - df['h1_spo2_min']
# # df['h1_platelets_minmax'] = df['h1_platelets_max'] - df['h1_platelets_min']
for c in min_max_cols:
vals = df[[c, c.replace('_min', '_max')]].values.copy()
df[c] = np.nanmin(vals, axis=1)
df[c.replace('_min', '_max')] = np.nanmax(vals, axis=1)
for c in min_max_cols:
df[c + 'max'] = df[c.replace('min', 'max')] - df[c]
df[c.replace('min', 'mean')] = (df[c.replace('min', 'max')] + df[c])/2
df[c.replace('min', 'std')] = np.nanstd(df[[c, c.replace('min', 'max')]].values, axis=1)
# df['abmi'] = df['age']*100*100*df['weight']/df['height']/df['height']
df['abmi'] = df['age']/df['bmi']
df['ahdp_bmi'] = df['apache_4a_hospital_death_prob']/df['bmi']
df['aidp_bmi'] = df['apache_4a_icu_death_prob']/df['bmi']
# df['apache_4a_hospicu_death_prob'] = df['apache_4a_hospital_death_prob'] + df['apache_4a_icu_death_prob']
# df['apache_4a_hospicu_death_prob_m'] = df['apache_4a_hospital_death_prob'] * df['apache_4a_icu_death_prob']
df['apache_4a_hospicu_death_prob_d'] = df['apache_4a_hospital_death_prob'] / df['apache_4a_icu_death_prob']
df['age_group'] = df['age']//5
df['weight_group'] = df['weight']//5
df['hr_a'] = df['d1_heartrate_max']/df['age']
df['hr_w'] = df['d1_heartrate_max']/df['weight']
if fillna_with_est:
df['bmi'] = 100*100*df['weight']/df['height']/df['height']
else:
df['bmi_w_est'] = 100*100*df['weight_est']/df['height']/df['height']
df['bmi_h_est'] = 100*100*df['weight']/df['height_est']/df['height_est']
df['bmi_wh_est'] = 100*100*df['weight_est']/df['height_est']/df['height_est']
# def age_category(x):
# ''' < 30 -> 1 >= 30, <55 -> 2 >= 55 -> 3 '''
# if x >= 15 and x <= 24:
# return 'igen'
# elif x >= 25 and x <= 54:
# return 'Prime_working_Age'
# elif x >= 55 and x <= 64:
# return 'Mature_working_Age'
# elif x >=65:
# return 'Elderly_working_Age'
# df['age_category'] = df['age'].apply(lambda x: age_category(x))
# df['patient_id'] = df.apply(lambda r: str(r['age']) + str(r['height']) + str(r['weight']) + str(r['ethnicity']) + str(r['gender']), axis=1)
# df.loc[df['apache_4a_hospital_death_prob'] == -1, 'apache_4a_hospital_death_prob'] = np.nan
# df.loc[df['apache_4a_icu_death_prob'] == -1, 'apache_4a_icu_death_prob'] = np.nan
# df['min_hr_0'] = (df['d1_heartrate_min'] == 0).astype(int)
# df['agi'] = df['weight']/df['age']
# df['hrw'] = df['d1_heartrate_max']/df['weight']
# cols = ['temp_apache', 'd1_temp_max', 'd1_temp_min', 'h1_temp_max', 'h1_temp_min']
# for c in cols:
# df[c] = df[c]/36.6
# df['apache_3j_bodysystem_apache_2_bodysystem'] = \
# df.apply(lambda r: str(r['apache_3j_bodysystem']) + '_' + str(r['apache_2_bodysystem']), axis=1)
# df['hospital_icu_id'] = df.apply(lambda r: str(r['hospital_id']) + '_' + str(r['icu_id']), axis=1)
new_cols = list(set(df.columns) - cols)
return new_cols
new_cols = extract_features(train)
extract_features(test)
train['is_test'] = 0
test['is_test'] = 1
df_all = pd.concat([train, test], axis=0)
dprint('Label Encoder...')
cols = [f_ for f_ in df_all.columns if df_all[f_].dtype == 'object']
print(cols)
cnt = 0
for c in tqdm(cols):
if c != id_col:
# print(c)
le = LabelEncoder()
# nan_idx = df_all[c].isnull()
df_all[c] = le.fit_transform(df_all[c].astype(str))
# df_all.loc[nan_idx, c] = np.nan
cnt += 1
del le
dprint('len(cols) = {}'.format(cnt))
cols = set(df_all.columns)
gfs = ['hospital_id', 'icu_id', 'age_group', 'apache_3j_diagnosis', 'gender', 'ethnicity', 'apache_3j_bodysystem', 'weight_group']#, 'icu_type']#, 'apache_2_bodysystem'] #+ \
# gfs += cat_features
# gfs = list(set(gfs))
# ['hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_2_bodysystem']
# ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi']#, 'd1_heartrate_min']
ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi', 'weight', 'height', 'd1_heartrate_min', 'h1_heartrate_min'] # 'bmi_w_est', 'bmi_h_est', 'bmi_wh_est',
# ffs += []
for gf in tqdm(gfs):
if gf in df_all.columns:
for ff in ffs:
g = df_all.groupby(gf)[ff].agg(['mean', 'std', 'min', 'max']).reset_index()
g.rename({'mean': f'{gf}_{ff}__mean', 'std': f'{gf}_{ff}__std', 'min': f'{gf}_{ff}__min', 'max': f'{gf}_{ff}__max'}, axis=1, inplace=True)
df_all = df_all.merge(g, on=gf, how='left')
g_cols = list(set(df_all.columns) - cols)
train = df_all.loc[df_all['is_test'] == 0].drop(['is_test'], axis=1)
test = df_all.loc[df_all['is_test'] == 1].drop(['is_test'], axis=1)
del df_all
gc.collect()
# # Fill nans
# train.fillna(train.mean(), inplace=True)
# test.fillna(train.mean(), inplace=True)
features = list(train.columns.values)
features.remove(id_col)
features.remove(target_col)
# Build the model
cnt = 0
p_buf = []
n_splits = 8
n_repeats = 1
kf = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=0)
# kf = StratifiedKFold(
# n_splits=n_splits,
# random_state=0)
err_buf = []
undersampling = 0
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 8,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'lambda_l1': 1.0,
'lambda_l2': 1.0,
'verbose': -1,
'num_threads': 4,
'num_leaves': 20, #256
# 'max_bin': 1312,
# 'num_leaves': 1111,
}
xgb_params = {
'max_depth': 9,
'eta': 0.05,
'objective': 'binary:logistic',
'subsample': 0.85,
'colsample_bytree': 0.85,
'alpha': 1,
'lambda': 1,
'eval_metric': 'auc',
'nthread': 4,
}
cols_to_drop = [
id_col,
target_col,
'patient_id',
'hospital_id',
] + ['icu_id', 'ethnicity']
# cols_to_use = features
X = train.drop(cols_to_drop, axis=1, errors='ignore')
y = train[target_col].values
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test[id_col].values
# feature_names = ['d1_bun_min', 'd1_lactate_min', 'apache_4a_hospital_death_prob_est', 'd1_spo2_mean',
# 'bmi', 'apache_4a_icu_death_prob', 'd1_heartrate_min', 'd1_heartrate_max', 'age', 'urineoutput_apache', 'd1_temp_max', 'heart_rate_apache',
# 'd1_wbc_min', 'icu_id', 'hospital_id', 'apache_2_diagnosis', 'apache_3j_diagnosis', 'd1_platelets_min', 'pre_icu_los_days',
# 'glucose_apache', 'd1_resprate_min', 'd1_glucose_min', 'creatinine_apache', 'wbc_apache', 'd1_sodium_max'] + \
# ['bmi', 'icu_admit_source', 'apache_2_diagnosis', 'apache_3j_diagnosis', 'arf_apache', 'intubated_apache', 'ventilated_apache', 'cirrhosis', 'hepatic_failure', 'leukemia', 'solid_tumor_with_metastasis'] + \
# ['gcs_motor_apache', 'ventilated_apache', 'heart_rate_apache', 'wbc_apache', 'gcs_verbal_apache', 'arf_apache', 'glucose_apache', 'albumin_apache', 'resprate_apache', 'intubated_apache', 'map_apache', 'apache_4a_hospicu_death_prob', 'urineoutput_apache_est', 'apache_4a_icu_death_prob_est', 'glucose_apache_est', 'gcs_unable_apache', 'hematocrit_apache', 'bilirubin_apache', 'creatinine_apache', 'apache_3j_diagnosis', 'ph_apache', 'fio2_apache', 'apache_post_operative', 'apache_4a_hospital_death_prob_est', 'sodium_apache', 'apache_4a_hospital_death_prob', 'apache_2_bodysystem', 'temp_apache', 'apache_2_diagnosis', 'urineoutput_apache', 'paco2_apache', 'bun_apache'] + \
# ['h1_temp_max', 'h1_resprate_min', 'h1_diasbp_min', 'h1_spo2_max', 'h1_arterial_ph_max', 'h1_bun_max', 'h1_platelets_max', 'h1_temp_min', 'h1_calcium_min', 'h1_lactate_min', 'h1_spo2_min', 'h1_heartrate_max', 'h1_diasbp_invasive_min', 'h1_sysbp_min', 'h1_arterial_pco2_max', 'h1_inr_max', 'h1_glucose_max', 'h1_sysbp_noninvasive_max', 'h1_pao2fio2ratio_max', 'h1_arterial_pco2_min'] + \
# ['d1_temp_max', 'd1_resprate_min', 'd1_diasbp_min', 'd1_spo2_max', 'd1_arterial_ph_max', 'd1_bun_max', 'd1_platelets_max', 'd1_temp_min', 'd1_calcium_min', 'd1_lactate_min', 'd1_spo2_min', 'd1_heartrate_max', 'd1_diasbp_invasive_min', 'd1_sysbp_min', 'd1_arterial_pco2_max', 'd1_inr_max', 'd1_glucose_max', 'd1_sysbp_noninvasive_max', 'd1_pao2fio2ratio_max', 'd1_arterial_pco2_min'] + g_cols + new_cols
# # + min_max_cols
# # feature_names = [c for c in X.columns if 'apache' in c]
# # feature_names = [c for c in X.columns if 'h1_' in c]
# feature_names += [c for c in X.columns if 'icu_' in c]
# feature_names += ['aids', 'cirrhosis', 'diabetes_mellitus', 'hepatic_failure', 'immunosuppression', 'leukemia', 'lymphoma', 'solid_tumor_with_metastasis']
# feature_names = list(set(feature_names))
# # feature_names = cat_features
# # feature_names = [c for c in feature_names if c in X.columns]
# print(f'feature_names = {feature_names}')
# X = X[feature_names]
# X_test = X_test[feature_names]
# # SFS Feature selection
# # model = lgb.LGBMRegressor()
# model = lgb.LGBMClassifier()
# sfs = SFS(model,
# k_features=X.shape[1],
# forward=True,
# floating=False,
# verbose=2,
# scoring='roc_auc',
# cv=4,
# n_jobs=-1)
# sfs = sfs.fit(X, y)
# # print(sfs.subsets_)
# best_score = -1e30
# for i in sfs.subsets_.keys():
# if sfs.subsets_[i]['avg_score'] > best_score:
# best_score = sfs.subsets_[i]['avg_score']
# feature_names = sfs.subsets_[i]['feature_names']
# feature_names = list(feature_names)
# print(f'best_score: {best_score}')
# print(f'feature_names = {feature_names}')
# # feature_names = ['d1_bun_min', 'd1_lactate_min', 'apache_4a_hospital_death_prob_est', 'd1_spo2_mean']
# X = X[feature_names]
# X_test = X_test[feature_names]
# # Feature selection
# cols_to_drop = []
# for c in X.columns:
# # t = ttest_ind(
# # X[c].fillna(X[c].mean()),
# # X_test[c].fillna(X_test[c].mean()))
# t = ks_2samp(
# X[c].dropna(),
# X_test[c].dropna())
# # print(c, t)
# if t[1] < 0.001:
# print(c, t)
# cols_to_drop.append(c)
# print(f'Dropping after statistical tests: {cols_to_drop}')
# X = X.drop(cols_to_drop, axis=1, errors='ignore')
# X_test = X_test.drop(cols_to_drop, axis=1, errors='ignore')
p_test = []
for fold_i, (train_index, valid_index) in enumerate(kf.split(X, y)):
x_train = X.iloc[train_index].copy()
x_valid = X.iloc[valid_index].copy()
y_train = y[train_index]
y_valid = y[valid_index]
x_test = X_test.copy()
# Frequency encoding
for c in cat_features:
# for c in ['hospital_id']:
if c in x_train.columns:
encoding = x_train.groupby(c).size()
encoding = encoding/len(x_train)
x_train[f'{c}_fe'] = x_train[c].map(encoding)
x_valid[f'{c}_fe'] = x_valid[c].map(encoding)
x_test[f'{c}_fe'] = x_test[c].map(encoding)
# # Target encoding
# # for c in ['ethnicity', 'gender', 'hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_3j_bodysystem', 'apache_2_bodysystem', \
# # 'hospital_id', 'icu_id', 'age_group', 'apache_3j_diagnosis']:
# # cols = x_train.columns
# for c in tqdm(cat_features): #cat_features:
# if c in x_train.columns:
# trn, sub = target_encode(x_train[c].copy(),
# x_valid[c].copy(),
# target=train.iloc[train_index][target_col].copy(),
# min_samples_leaf=1,
# smoothing=0.1,
# noise_level=0.001)
# x_train[c + '_te'] = trn
# x_valid[c + '_te'] = sub
# # x_valid[c] = sub
# trn, sub = target_encode(x_train[c].copy(),
# x_test[c].copy(),
# target=train.iloc[train_index][target_col].copy(),
# min_samples_leaf=1,
# smoothing=0.1,
# noise_level=0.001)
# x_test[c + '_te'] = sub
# # x_train[c] = trn
# # x_test[c] = sub
feature_names = list(x_train.columns)
n_features = x_train.shape[1]
dprint(f'n_features: {n_features}')
p_valid = []
# LGBM
if train_lgbm:
params = lgb_params.copy()
# pca = PCA(n_components=144)
# x_train = pca.fit_transform(x_train)
# x_valid = pca.transform(x_valid)
# x_test_pca = pca.transform(x_test)
# feature_names = ['pca_{}'.format(i) for i in range(x_train.shape[1])]
lgb_train = lgb.Dataset(
x_train,
y_train,
feature_name=feature_names,
)
lgb_train.raw_data = None
lgb_valid = lgb.Dataset(
x_valid,
y_valid,
)
lgb_valid.raw_data = None
model = lgb.train(
params,
lgb_train,
num_boost_round=5000,
valid_sets=[lgb_valid],
early_stopping_rounds=400,
verbose_eval=100,
)
if fold_i == 0:
importance = model.feature_importance()
model_fnames = model.feature_name()
tuples = sorted(zip(model_fnames, importance), key=lambda x: x[1])[::-1]
tuples = [x for x in tuples if x[1] > 0]
print('Important features:')
for i in range(40):
if i < len(tuples):
print(i, tuples[i])
else:
break
del importance, model_fnames, tuples
p_lgbm = model.predict(x_valid, num_iteration=model.best_iteration)
p_valid.append(p_lgbm)
err = roc_auc_score(y_valid, p_lgbm)
# err_buf.append(err)
dprint('{} LGBM AUC: {:.6f}'.format(fold_i, err))
p_lgbm_test = model.predict(x_test[feature_names], num_iteration=model.best_iteration)
p_test.append(p_lgbm_test)
# XGB
if train_xgb:
params = xgb_params.copy()
dtrain = xgb.DMatrix(x_train, label=y_train)
dvalid = xgb.DMatrix(x_valid, label=y_valid)
dtest = xgb.DMatrix(x_test[feature_names])
evallist = [(dvalid, 'eval')]
bst = xgb.train(
params,
dtrain,
5000,
evallist,
early_stopping_rounds=200,
verbose_eval=100
)
p_xgb = bst.predict(dvalid, ntree_limit=bst.best_iteration)
p_valid.append(p_xgb)
err = roc_auc_score(y_valid, p_xgb)
# err_buf.append(err)
dprint('{} XGB AUC: {:.6f}'.format(fold_i, err))
p_xgb_test = bst.predict(dtest, ntree_limit=bst.best_iteration)
p_test.append(p_xgb_test)
# LR
if train_lr:
model = LogisticRegression()
model.fit(x_train.fillna(0), y_train)
p_lr = model.predict_proba(x_valid.fillna(0))[:, 1]
p_valid.append(p_lr)
dprint('{} LR AUC: {:.6f}'.format(fold_i, err))
err = roc_auc_score(y_valid, p_lr)
p_lr_test = model.predict_proba(x_test.fillna(0))
p_test.append(p_lr_test)
# Ensemble evaluation
if len(p_valid) > 1:
p_ens = np.mean(p_valid, axis=0)
err = roc_auc_score(y[valid_index], p_ens)
dprint('{} ENS AUC: {:.6f}'.format(fold_i, err))
err_buf.append(err)
# x_train = X.iloc[train_index]
# x_valid = X.iloc[valid_index]
# model = NuSVC(
# probability=True,
# kernel='poly',
# degree=4,
# gamma='auto',
# random_state=0,
# nu=0.6,
# coef0=0.05)
# model.fit(x_train, y[train_index])
# p_nusvc = model.predict_proba(x_valid)[:, 1]
# err = roc_auc_score(y[valid_index], p_nusvc)
# print('{} {} NuSVC AUC: {}'.format(v, cnt + 1, err))
# p_nusvc_test = model.predict_proba(x_test)[:, 1]
# p_mean = 0.1*p_lgbm + 0.9*p_nusvc
# err = roc_auc_score(y[valid_index], p_mean)
# print('{} {} ENS AUC: {}'.format(v, cnt + 1, err))
# p = 0.1*p_lgbm_test + 0.9*p_nusvc_test
del model, lgb_train, lgb_valid
gc.collect
# break
err_mean = np.mean(err_buf)
err_std = np.std(err_buf)
dprint('AUC: {:.6f} +/- {:.6f}'.format(err_mean, err_std))
test_preds = np.mean(p_test, axis=0)
submission = pd.DataFrame()
submission[id_col] = id_test
submission[target_col] = test_preds
submission.to_csv('submission{}.csv'.format(script_id), index=False)
# Save backup
files = [
'model{}.py'.format(script_id),
'model{}.log'.format(script_id),
'submission{}.csv'.format(script_id),
# 'feature_importance{}.txt'.format(script_id),
# 'train_weights{}.csv'.format(script_id),
]
experiment_name = 'Exp{}'.format(script_id)
params = {}
params['n_models'] = cnt
scores = {}
scores['auc_mean'] = err_mean
scores['auc_std'] = err_std
scores['kaggle'] = np.nan
other = {}
other['n_features'] = n_features
other['n_splits'] = n_splits
comments = ''
kinoa.save(
files,
experiment_name=experiment_name,
params=params,
scores=scores,
other=other,
comments=comments,
working_dir='',
sort_log_by='experiment_datetime',
sort_log_ascending=True,
columns_order={'scores.kaggle': -1, 'scores.auc_std': -2, 'scores.auc_mean': -3}
)
dprint('Done!')
| [
"pole@Olegs-MacBook-Pro.local"
] | pole@Olegs-MacBook-Pro.local |
44a9e486f57f5d09e2a7f03dcd8cab278f223b96 | 8f9f6a5348b832e9f12ef6baf6bcdd8842ff1c83 | /core/migrations/0002_profile.py | 987bf0b7af1dcd2c764ae64e1df702d401441278 | [] | no_license | jbrit/raffle | 20b48d016ac50082733c7c34f3891beb268c4eb9 | 2ee83ffe564f59bc7afd6b12740ea3a98c42986e | refs/heads/main | 2023-06-04T14:22:09.808595 | 2021-03-19T14:13:38 | 2021-03-19T14:13:38 | 343,774,020 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 3.1.7 on 2021-03-02 23:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_confirmed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"pro.ajibolaojo@gmail.com"
] | pro.ajibolaojo@gmail.com |
8eefb7f6ca23a2a3f5dfc5c8ef76c3332af3d2bd | 5bc8f9c430f6256738d557ccbd7775fc1cb880e2 | /p1_out.py | 30a6fb31251ba011b7527a872e3490aae9bc9637 | [] | no_license | sssandan/findyourway | 11b48b2357cf790c457e8ecd65a5953a18498926 | 04475f3cd841460988a20ff299bc42c9aae3c3dd | refs/heads/master | 2020-05-31T11:10:46.948409 | 2019-08-31T16:29:00 | 2019-08-31T16:29:00 | 190,256,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | from p1_maps import maps
import requests
class steps:
def __init__(self, jsonDump):
self.__dic = jsonDump
def output(self):
num = len(self.__dic["route"]["locationSequence"]) - 1
for item in range(num):
for x in self.__dic["route"]["legs"][item]["maneuvers"]:
yield x["narrative"]
class totalDistance:
def __init__(self, jsonDump):
self.__dic = jsonDump
def output(self):
dist = self.__dic['route']['distance']
yield(round(dist))
class totalTime:
def __init__(self, jsonDump):
self.__dic = jsonDump
def output(self):
time = self.__dic['route']['formattedTime']
timeList = time.split(":")
minutes = int(timeList[0]) * 60 + int(timeList[1])
yield(minutes)
class latLong:
def __init__(self, jsonDump):
self.__dic = jsonDump
def output(self):
for i in range(len(self.__dic) + 1):
lng = str(round(self.__dic['route']['locations'][i]['latLng']['lng'], 2))
lat = str(round(self.__dic['route']['locations'][i]['latLng']['lat'], 2))
if lng[0] == '-':
lng = lng[1:] + 'W'
else:
lng = lng + 'E'
if lat[0] == '-':
lat = lat[1:] + 'S'
else:
lat = lat + 'N'
finalLatLng = lat + " " + lng
yield finalLatLng
class elevation:
def __init__(self, jsonDump):
self.__dic = jsonDump
def output(self):
url = ("http://open.mapquestapi.com/elevation/v1/profile" +
"?key=A8RmTl27mFnA8Q2h8h7HNQqUCTu5APBH&shapeFormat=raw&latLngCollection=")
for i in range(len(self.__dic['route']['locationSequence'])):
latlng = (str(self.__dic['route']['locations'][i]['latLng']['lat']) + ","
+ str(self.__dic['route']['locations'][i]['latLng']['lng']) + ",")
latlng = latlng.rstrip(",")
newUrl = url + latlng
newUrl = newUrl.rstrip()
newOutput = (requests.post(newUrl)).json()
elevationValue = newOutput['elevationProfile'][0]['height']
yield(elevationValue)
| [
"noreply@github.com"
] | sssandan.noreply@github.com |
a566062b0e553c01b062f47e0df2bba4c782a803 | 22ca7332409eabae5296332f29ad808f83bdd016 | /wagtail_tuto/wagtailmd/wagtail_hooks.py | f119abb8ee411817df09d4e2428339445ab03e01 | [] | no_license | lordvcs/wagtail_sample_blog | a473c0db1200eb783f2f80143e5ae75f73944b60 | 7d3efae9841b877efd4246c45e2ed604863f6838 | refs/heads/master | 2022-12-11T04:25:11.862573 | 2018-06-25T18:00:23 | 2018-06-25T18:00:23 | 133,362,600 | 1 | 0 | null | 2022-12-08T02:05:27 | 2018-05-14T13:11:41 | Python | UTF-8 | Python | false | false | 612 | py | from django.conf import settings
from wagtail.wagtailcore import hooks
@hooks.register('insert_editor_js')
def editor_js():
s = """<script src="{0}wagtailmd/js/simplemde.min.js"></script>"""
s += """<script src="{0}wagtailmd/js/simplemde.attach.js"></script>"""
return s.format(settings.STATIC_URL)
@hooks.register('insert_editor_css')
def editor_css():
s = """<link rel="stylesheet" href="{0}wagtailmd/css/simplemde.min.css">"""
s += """<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/latest/css/font-awesome.min.css">"""
return s.format(settings.STATIC_URL) | [
"diabolicfreak@gmail.com"
] | diabolicfreak@gmail.com |
3daed57886a82e22e3b15f823cfe857e1a36fab2 | 9892312f5543eafffbd86a084daf90c8b4628a59 | /DataAnalyticsWithPython-Training/student_files/ch01_numpy/01_numpy_array.py | 93d5e279ad293f1029c006a544fff00df4df4441 | [] | no_license | jigarshah2811/Data_Analytics_ML | 1718a79f8f569a4946b56cc499b17546beb9c67d | 107197cfd3e258c1a73c6930951463392159c3ed | refs/heads/master | 2022-01-19T19:42:14.746580 | 2019-07-21T20:21:07 | 2019-07-21T20:21:07 | 197,888,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | import numpy as np
# Create NumPy Array
arr = np.array([1, 2, 3, 4, 5])
arr2 = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
arr3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
print(type(arr)) # <class 'numpy.ndarray'>
print('Shape: {0}, Size: {1}, Axes: {2}, Types: {3}, Strides: {4}'
.format(arr3.shape, arr3.size, arr3.ndim, arr3.dtype, arr3.strides)) # (4, 3) 12 2 int32 (12, 4)
print(arr3) # [[ 1 2 3][4 5 6][7 8 9][10 11 12]]
print(arr3[0]) # [1 2 3]
print(arr3[0][2]) #3
# Create NumPy Array - With zeros
arr4 = np.zeros((2, 2))
print(arr4) # [[ 0. 0.] [ 0. 0.]]
print(arr4.tolist())
# Create NumPy Array - with ones
arr5 = np.ones((2, 2))
print(arr5) # [[ 1. 1.] [ 1. 1.]]
# Create NumPy Array - with any value
arr6 = np.full((2, 2), 6)
print(arr6) # [[ 6. 6.] [ 6. 6.]]
arr7 = np.eye(2)
print(arr7) # 2x2 identity matrix
print(np.eye(3)) # [[1. 0. 0.][0. 1. 0.][0. 0. 1.]]
print(np.eye(3, 3, 1)) # defines 3 rows, 3 cols, and the diagonal offset: [[0. 1. 0.][0. 0. 1.][0. 0. 0.]]
print(np.eye(3, k=-1)) # defines 3 rows/cols, diagonal offset is negative: [[0. 0. 0.][1. 0. 0.][0. 1. 0.]]
arr8 = np.empty((2, 2))
print(arr8) # 2x2 random numbers
# Create NumPy Array - With Range
arr9 = np.array(range(11, 20)) # [11 12 13 14 15 16 17 18 19]
print(arr9)
| [
"jigasha2@cisco.com"
] | jigasha2@cisco.com |
7cd9047e4d3b20bde1d83a2b46d6a8914f287ea1 | 0d33c9f6c8f97896f966e222997ddfc178fb3b18 | /best_news/settings.py | 09002b09104a237459c0c85b022f71dc7ef641d5 | [] | no_license | eduardo-monita/scrapy-django | 613bff15e4ec5c12b567dfc7bcff5513029b093d | 6043570c951d18b38c12913ff8434e0eaf429341 | refs/heads/master | 2022-12-13T08:58:55.383888 | 2020-09-03T03:06:10 | 2020-09-03T03:06:10 | 292,448,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | """
Django settings for best_news project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-g+h$iupp^j^912m@-!1ll2udwu3!d&*u=)jsp22$z95wexo9_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'best_news.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'best_news.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = 'news/templates/'
STATIC_ROOT = os.path.join(BASE_DIR, 'news/templates')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# STATICFILES_DIRS = (
# '/news/templates',
# ) | [
"noreply@github.com"
] | eduardo-monita.noreply@github.com |
f3197d14dbee34f7d0ebfe6c8268d9e4f61c5fde | 00ccdc877771cb0cf493526d1e201e0f625bf5e7 | /dohq_teamcity/api/test_api.py | 1335616d3fe97e39870d49b309ae190556a049db | [
"MIT"
] | permissive | expobrain/teamcity | a52928045166bb5d34f4a0396cb840bfee8f43d5 | 9f04c0692a2c5b277a608c2f11cc1fb48e0c87e2 | refs/heads/master | 2020-04-13T13:11:07.270515 | 2018-10-18T01:40:06 | 2018-10-18T01:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,708 | py | # coding: utf-8
"""
TeamCity REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
from dohq_teamcity.custom.base_model import TeamCityObject
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dohq_teamcity.models.test import Test # noqa: F401,E501
from dohq_teamcity.models.tests import Tests # noqa: F401,E501
class TestApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
base_name = 'Test'
def __init__(self, api_client=None):
self.api_client = api_client
def get_tests(self, **kwargs): # noqa: E501
"""get_tests # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tests(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str locator:
:param str fields:
:return: Tests
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_tests_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__get_tests_with_http_info(**kwargs) # noqa: E501
return data
def serve_instance(self, test_locator, **kwargs): # noqa: E501
"""serve_instance # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.serve_instance(test_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str test_locator: (required)
:param str fields:
:return: Test
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__serve_instance_with_http_info(test_locator, **kwargs) # noqa: E501
else:
(data) = self.__serve_instance_with_http_info(test_locator, **kwargs) # noqa: E501
return data
def __get_tests_with_http_info(self, **kwargs): # noqa: E501
"""get_tests # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_tests_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str locator:
:param str fields:
:return: Tests
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tests" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'locator' in params:
query_params.append(('locator', params['locator'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/tests', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tests', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __serve_instance_with_http_info(self, test_locator, **kwargs): # noqa: E501
"""serve_instance # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__serve_instance_with_http_info(test_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str test_locator: (required)
:param str fields:
:return: Test
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method serve_instance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'test_locator' is set
if ('test_locator' not in params or
params['test_locator'] is None):
raise ValueError("Missing the required parameter `test_locator` when calling `serve_instance`") # noqa: E501
collection_formats = {}
path_params = {}
if 'test_locator' in params:
if isinstance(params['test_locator'], TeamCityObject):
path_params['testLocator'] = params['test_locator'].locator_id
else:
path_params['testLocator'] = params['test_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/tests/{testLocator}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Test', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"allburov@gmail.com"
] | allburov@gmail.com |
b15b14e0a3c393b327f48b7c2211d0d7ea88c5fa | cd2d3b6be41eb9b96ecc3a22dc730325c21f22e6 | /charalog/log/qaswsq19.cgi | e5245d1c362c4cf17fe5f3a09188c1705fb8ddce | [] | no_license | cappuu/TC | c61f235349e9a68d472fa85bbea1adbef3ea154a | def08d09219e11bee2135f6b796569b769ee21c1 | refs/heads/master | 2021-09-10T19:37:33.847161 | 2018-03-31T22:56:05 | 2018-03-31T22:56:05 | 124,523,296 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 1,944 | cgi | 11월 : 남피의 기술을 <font color=red>+9</font> 개발했습니다.(15일23시38분)
10월 : 남피의 기술을 <font color=red>+7</font> 개발했습니다.(15일22시38분)
9월 : 남피의 기술을 <font color=red>+6</font> 개발했습니다.(15일21시38분)
8월 : 현재 기운이 충만한 상태입니다.(15일20시38분)
7월 : 남피의 기술을 <font color=red>+7</font> 개발했습니다.(15일19시39분)
7월 : 수확으로 <font color=red>2834</font>의 식량을 수확했습니다. [봉토추가봉록:34](15일19시39분)
7월 : [<font color=red>레벨업</font>] Lv.13이 되었다! 봉록이 <font color=red> 2950 </font>로 늘어났다!(15일19시39분)
7월 : [<font color=red>레벨업</font>] 무력이 1포인트 올랐습니다!(15일19시39분)
6월 : 남피의 기술을 <font color=red>+8</font> 개발했습니다.(15일18시38분)
5월 : 남피의 기술을 <font color=red>+9</font> 개발했습니다.(15일17시38분)
4월 : 남피의 기술을 <font color=red>+6</font> 개발했습니다.(15일16시38분)
3월 : 남피의 기술을 <font color=red>+6</font> 개발했습니다.(15일15시38분)
2월 : <font color=red>[상승] </font>:진등의 지력이 1포인트 올랐다.(15일14시40분)
2월 : 남피의 기술을 <font color=red>+8</font> 개발했습니다.(15일14시40분)
2월 : 기술치부대는 대장의 명령에 의해 남피성에 집결했습니다.(15일14시8분)
1월 : 북평의 기술을 <font color=red>+6</font> 개발했습니다.(15일13시40분)
1월 : 세금으로 <font color=red>3300</font>의 돈을 징수했습니다. [관직추가봉록:200] [봉토추가봉록:300](15일13시40분)
12월 : 북평의 기술을 <font color=red>+6</font> 개발했습니다.(15일12시39분)
11월 : 북평의 기술을 <font color=red>+9</font> 개발했습니다.(15일11시39분)
10월 : 북평의 기술을 <font color=red>+9</font> 개발했습니다.(15일10시38분)
| [
"lu2447315@gmail.com"
] | lu2447315@gmail.com |
39babd4a8480990d04555e018009e8949b8063e7 | 0cdf04165ca53cf8b85359ba18cd93240f885657 | /Portfolio Allocator/PortfolioAlloc.py | d01fb5cf91519635d298d9adda09be720fbaea03 | [] | no_license | kevroi/AlgoTrading | 2cb19373ee27e7e3adff21cdc0352b1ca387530b | 2302973de3cba232d9b363ee8177ef1ce2033d48 | refs/heads/main | 2023-08-29T01:29:26.928784 | 2021-10-21T23:08:35 | 2021-10-21T23:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | import math
import numpy as np
import pandas as pd
import requests
import xlsxwriter
from secrets import IEX_CLOUD_API_TOKEN
def chunks(lst, n):
# Produces n-sized chunks from a list
for i in range(0, len(lst), n):
yield lst[i:i+n]
portfolio_size = 10000000.0
stocks = pd.read_csv('sp_500_stocks.csv')
fund_columns = ['Ticker', 'Stock Price', 'Market Capitalization', 'Number of Shares to Buy']
fund_df = pd.DataFrame(columns=fund_columns)
counter = 0
for stock in stocks['Ticker']:
api_url = f'https://sandbox.iexapis.com/stable/stock/{stock}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
fund_df = fund_df.append(
pd.Series([stock, data['latestPrice'], data['marketCap'], 'N/A'],
index=fund_columns
),
ignore_index=True
)
counter += 1
print(f'{counter} of {len(stocks)} stocks downloaded', end='\r')
symbol_batches = list(chunks(stocks['Ticker'], 100))
symbols_strings = []
for i in symbol_batches:
symbols_strings.append(','.join(i))
for i in symbols_strings:
batch_api_url = f"https://sandbox.iexapis.com/stable/stock/market/batch?symbols={i}&types=quote&token={IEX_CLOUD_API_TOKEN}"
data = requests.get(batch_api_url).json()
for stock in i.split(','):
fund_df = fund_df.append(
pd.Series(
[
stock,
data[stock]['quote']['latestPrice'],
data[stock]['quote']['marketCap'],
'N/A'
],
index=fund_columns
),
ignore_index=True
)
index_market_val = fund_df['Market Capitalization'].sum()
fund_df['Number of Shares to Buy'] = (portfolio_size * fund_df['Market Capitalization'] / index_market_val) // fund_df['Stock Price'] # no fractional shares
writer = pd.ExcelWriter('SP500RecIndex.xlsx', engine='xlsxwriter')
fund_df.to_excel(writer, 'Recommended Trades', index=False)
# Formatting style of spreadsheet
bg_color = "#0A0A23"
font_color = "#FFFFFF"
font_name = 'Consolas'
string_format = writer.book.add_format(
{
'font_color': font_color,
'font_name': font_name,
'bg_color': bg_color,
'border': 1
}
)
dollar_format = writer.book.add_format(
{
'num_format': '$0.00',
'font_name': font_name,
'font_color': font_color,
'bg_color': bg_color,
'border': 1
}
)
int_format = writer.book.add_format(
{
'num_format': '0',
'font_name': font_name,
'font_color': font_color,
'bg_color': bg_color,
'border': 1
}
)
column_formats = {
'A': [fund_columns[0], string_format],
'B': [fund_columns[1], dollar_format],
'C': [fund_columns[2], dollar_format],
'D': [fund_columns[3], int_format]
}
for column in column_formats:
writer.sheets['Recommended Trades'].set_column(f"{column}:{column}", 18, column_formats[column][1])
writer.save() | [
"kevinroice@Kevins-Air.lan"
] | kevinroice@Kevins-Air.lan |
9bacc923ed82059fbae57f5fa103180176b09d90 | 750221e29c9c038be9f6434572fb05632edb0fb1 | /bccc/ui/item.py | 957ce9b432683c51142f0a815704a8ac97584228 | [
"Apache-2.0"
] | permissive | pombredanne/bccc | 01bd26803e157e48b4dbaafc62437e63a8bbfb59 | 9e0e0613283ef8d3539bf908e1f63bb44665cb62 | refs/heads/master | 2021-01-18T14:19:08.387808 | 2012-07-01T10:44:13 | 2012-07-01T10:44:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,446 | py | # Copyright 2012 Thomas Jost
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software stributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import bisect
import dateutil.tz
import urwid
from .util import BoxedEdit
# {{{ Basic item widget
class ItemWidget(urwid.FlowWidget):
attr_author = ("post author", "focused post author")
attr_date = ("post date", "focused post date")
attr_text = ("post text", "focused post text")
def __init__(self, id=None, author="", date="", text="", padding=0):
self._id = id
self._author = author
self._date = date
self._text = text
# Init sub-widgets
author_w = urwid.Text((" "*padding) + author, wrap="clip")
author_w = urwid.AttrMap(author_w, *self.attr_author)
date_w = urwid.Text(date, align="right", wrap="clip")
date_w = urwid.AttrMap(date_w, *self.attr_date)
text_w = urwid.Text(text)
text_w = urwid.Padding(text_w, left=4+padding, right=1)
text_w = urwid.AttrMap(text_w, *self.attr_text)
self.widgets = (author_w, date_w, text_w)
urwid.FlowWidget.__init__(self)
self._selectable = True
@property
def id(self): return self._id
@property
def author(self): return self._author
@property
def date(self): return self._date
@property
def text(self): return self._text
def keypress(self, size, key):
return key
def rows(self, size, focus=False):
return self.widgets[2].rows(size, focus) + 1
def render(self, size, focus=False):
maxcol = size[0]
# Render first line
author_col, _ = self.widgets[0].pack(focus=focus)
date_col, _ = self.widgets[1].pack(focus=focus)
canvas_head = None
if author_col + date_col <= maxcol:
# We can render them both!
canvas_author = self.widgets[0].render((maxcol-date_col,), focus)
canvas_date = self.widgets[1].render((date_col,), focus)
canv = [
(canvas_author, None, True, maxcol-date_col),
(canvas_date, None, True, date_col),
]
canvas_head = urwid.CanvasJoin(canv)
else:
# Only render author
canvas_head = self.widgets[0].render(size, focus)
# Render text
canvas_text = self.widgets[2].render(size, focus)
canv = [
(canvas_head, None, True),
(canvas_text, None, True),
]
out = urwid.CanvasCombine(canv)
return out
# }}}
# {{{ Single post/reply widget
class PostWidget(ItemWidget):
def __init__(self, post, padding=0):
self._item = post
author = post.author
date = post.published.astimezone(dateutil.tz.tzlocal()).strftime("%x - %X")
text = post.content
ItemWidget.__init__(self, post.id, author, date, text, padding)
@property
def item(self): return self._item
class ReplyWidget(PostWidget):
attr_author = ("reply author", "focused reply author")
attr_date = ("reply date", "focused reply date")
attr_text = ("reply text", "focused reply text")
def __init__(self, reply):
PostWidget.__init__(self, reply, padding=2)
self._in_reply_to = reply.in_reply_to
@property
def in_reply_to(self): return self._in_reply_to
def __lt__(self, other):
return self.item.published < other.item.published
def __eq__(self, other):
return type(other) is ReplyWidget and self.id == other.id
def __hash__(self):
return object.__hash__(self)
# }}}
# {{{ New post/reply composition widgets
class NewPostWidget(BoxedEdit):
attr_edit = ("new post text", "focused new post text")
attr_box = ("new post box", "focused new post box")
box_title = "New post"
status_base = "New post in {}"
def __init__(self, ui, channel):
self.ui = ui
self.channel = channel
super().__init__()
def update(self):
msg = self.status_base.format(self.channel.jid)
msg += " - {} characters".format(len(self.edit.edit_text))
msg += " [Alt+Enter to post, Escape to cancel and discard]"
self.ui.status.set_text(msg)
def validate(self, *args, **kwds):
text = self.edit.edit_text.strip()
if len(text) > 0:
self.channel.publish(text, *args, **kwds)
self.ui.threads_list.cancel_new_item()
def cancel(self):
self.ui.threads_list.cancel_new_item()
class NewReplyWidget(NewPostWidget):
attr_edit = ("new reply text", "focused new reply text")
attr_box = ("new reply box", "focused new reply box")
box_title = "New reply"
status_base = "New reply in {}"
def __init__(self, ui, channel, thread_id):
self.thread_id = thread_id
super().__init__(ui, channel)
def validate(self, *args, **kwds):
return super().validate(*args, in_reply_to=self.thread_id, **kwds)
# }}}
# Local Variables:
# mode: python3
# End:
| [
"schnouki@schnouki.net"
] | schnouki@schnouki.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.