blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6bfb54dc4cbe9b33ff4cf3b60028b65ef2ef074d | f34fef9e6ca75155ea14ea27807928c922b992cd | /resources/data/aces_1.0.3/python/aces_ocio/utilities.py | 12782d302b523ff947c148cf204a62dae504318e | [
"MIT"
] | permissive | Monsho/D3D12Samples | e86922b1e1fe300e9ecce8bcdf82856c028bff25 | 37aa7820fc5f1e711864a298322f03079f5d0fbc | refs/heads/master | 2023-01-05T21:49:23.538365 | 2022-12-20T23:26:42 | 2022-12-20T23:26:42 | 80,517,339 | 47 | 7 | null | null | null | null | UTF-8 | Python | false | false | 7,571 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines various package utilities objects.
"""
from __future__ import division
import itertools
import os
import re
from collections import OrderedDict
import PyOpenColorIO as ocio
__author__ = 'ACES Developers'
__copyright__ = 'Copyright (C) 2014 - 2016 - ACES Developers'
__license__ = ''
__maintainer__ = 'ACES Developers'
__email__ = 'aces@oscars.org'
__status__ = 'Production'
__all__ = ['ColorSpace',
'mat44_from_mat33',
'filter_words',
'files_walker',
'replace',
'sanitize',
'compact',
'colorspace_prefixed_name',
'unpack_default']
class ColorSpace(object):
"""
A container for data needed to define an *OCIO* *ColorSpace*.
"""
def __init__(self,
name,
aliases=None,
description=None,
bit_depth=ocio.Constants.BIT_DEPTH_F32,
equality_group='',
family=None,
is_data=False,
to_reference_transforms=None,
from_reference_transforms=None,
allocation_type=ocio.Constants.ALLOCATION_UNIFORM,
allocation_vars=None,
aces_transform_id=None):
"""
Constructor for ColorSpace container class.
Parameters
----------
name : str or unicode
Name of the colorspace.
All other arguments are optional
"""
if aliases is None:
aliases = []
if to_reference_transforms is None:
to_reference_transforms = []
if from_reference_transforms is None:
from_reference_transforms = []
if allocation_vars is None:
allocation_vars = [0, 1]
self.name = name
self.aliases = aliases
self.bit_depth = bit_depth
self.description = description
self.equality_group = equality_group
self.family = family
self.is_data = is_data
self.to_reference_transforms = to_reference_transforms
self.from_reference_transforms = from_reference_transforms
self.allocation_type = allocation_type
self.allocation_vars = allocation_vars
self.aces_transform_id = aces_transform_id
def mat44_from_mat33(mat33):
"""
Creates a 4x4 matrix from given 3x3 matrix.
Parameters
----------
mat33 : array of float
A 3x3 matrix
Returns
-------
array of float
A 4x4 matrix
"""
return [mat33[0], mat33[1], mat33[2], 0,
mat33[3], mat33[4], mat33[5], 0,
mat33[6], mat33[7], mat33[8], 0,
0, 0, 0, 1]
def filter_words(words, filters_in=None, filters_out=None, flags=0):
"""
A function to filter strings in an array
Parameters
----------
words : array of str or unicode
Array of strings
filters_in : array of str or unicode, optional
Words to match
filters_out : array of str or unicode, optional
Words to NOT match
flags : int, optional
Flags for re.search
Returns
-------
array of str or unicode
An array of matched or unmatched strings
"""
filtered_words = []
for word in words:
if filters_in:
filter_matched = False
for filter in filters_in:
if re.search(filter, word, flags):
filter_matched = True
break
if not filter_matched:
continue
if filters_out:
filter_matched = False
for filter in filters_out:
if re.search(filter, word, flags):
filter_matched = True
break
if filter_matched:
continue
filtered_words.append(word)
return filtered_words
def files_walker(directory, filters_in=None, filters_out=None, flags=0):
"""
A function to walk a directory hierarchy, only returning items that do or
do not match the specified filters
Parameters
----------
directory : str or unicode
The starting point for directory walking
filters_in : array of str or unicode, optional
File or directory names to match
filters_out : array of str or unicode, optional
File or directory names to NOT match
flags : int, optional
Flags for re.search
Returns
-------
iterable
The next matching file or directory name
"""
for parent_directory, directories, files in os.walk(
directory, topdown=False, followlinks=True):
for file in files:
path = os.path.join(parent_directory, file)
if os.path.isfile(path):
if not filter_words((path,), filters_in, filters_out, flags):
continue
yield path
def replace(string, data):
"""
Replaces the data occurrences in the string.
Parameters
----------
string : str or unicode
String to manipulate.
data : dict
Replacement occurrences.
Returns
-------
unicode
Manipulated string.
Examples
--------
>>> patterns = {'John' : 'Luke',
... 'Jane' : 'Anakin',
... 'Doe' : 'Skywalker',
... 'Z6PO' : 'R2D2'}
>>> data = 'Users are: John Doe, Jane Doe, Z6PO.'
>>> replace(data,patterns )
u'Users are: Luke Skywalker, Anakin Skywalker, R2D2.'
"""
for old, new in data.iteritems():
string = string.replace(old, new)
return string
def sanitize(path):
"""
Replaces occurrences of ' ', '(', or ')' in the string with an underscore.
Parameters
----------
path : str or unicode
Path string to manipulate.
Returns
-------
unicode
Manipulated string.
"""
return replace(path, {' ': '_', ')': '_', '(': '_'})
def compact(string):
"""
Removes blanks, underscores, dashes and parentheses.
Parameters
----------
string : str or unicode
String to compact.
Returns
-------
str or unicode
A compact version of that string.
"""
return replace(string.lower(),
OrderedDict(((' ', '_'),
('(', '_'),
(')', '_'),
('.', '_'),
('-', '_'),
('___', '_'),
('__', '_'),
('_', ''))))
def colorspace_prefixed_name(colorspace):
"""
Returns given *OCIO* colorspace prefixed name with its family name.
Parameters
----------
colorspace : ColorSpace
ColorSpace to prefix.
Returns
-------
str or unicode
Family prefixed *OCIO* colorspace name.
"""
prefix = colorspace.family.replace('/', ' - ')
return '%s - %s' % (prefix, colorspace.name)
def unpack_default(iterable, length=3, default=None):
"""
Unpacks given iterable maintaining given length and filling missing
entries with given default.
Parameters
----------
iterable : object
Iterable.
length : int
Iterable length.
default : object
Filling default object.
Returns
-------
iterable
"""
return itertools.islice(
itertools.chain(iter(iterable), itertools.repeat(default)), length)
| [
"cinderella.cage@gmail.com"
] | cinderella.cage@gmail.com |
179c039c28e0cfe0de7e338e59e0ee164eb8b987 | f2098afb99c02f86bd27d890475e52ab6e81b10b | /crypto_web/wsgi.py | 738d809a3e4d63c0791d0bb41b9cca238ea62d83 | [] | no_license | samfoxcode/Cryptocurrency-Trading-Website | 544d4bf89aee242e32e9540fad20bd7c696a362b | 99f9baaed10a0228db0078d3905471a8d512f9f9 | refs/heads/master | 2021-09-13T09:12:20.373582 | 2018-04-27T15:54:13 | 2018-04-27T15:54:13 | 117,155,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for crypto_web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crypto_web.settings")
application = get_wsgi_application()
| [
"samf1596@gmail.com"
] | samf1596@gmail.com |
fbbbc52eab2329bda34461328893fba1754e20a0 | 70934fe6d0feed93994a98650a543832897e69ae | /sinaweibo.bak/weibo_factory.py | 95cfe4e1548a4d14a120107301c0a8c022bd7623 | [] | no_license | zy-sunshine/sunblackshineblog | d1d3f0f69d6b8c006e70a0601bc0d520ec5002bb | ea7d1e2f8477a238501cecf8e63fd3d7a2a945c9 | refs/heads/master | 2021-01-01T16:19:52.954701 | 2011-10-29T17:12:59 | 2011-10-29T17:12:59 | 2,570,586 | 1 | 0 | null | null | null | null | GB18030 | Python | false | false | 1,878 | py | #encoding=utf8
#file:weibo_factory.py
#@author:carlos
#@date:2011-2-13
#@link:tieniuzai.com
from weibopy.auth import BasicAuthHandler
from weibopy.api import API
class SinaWeibo:
def __init__(self,username,password):
self.username = username
self.password = password
self.source ="app key" #在申请新浪微博开发者帐号并创建应用后获得
def getAtt(self, key):
try:
return self.obj.__getattribute__(key)
except Exception, e:
print e
return ''
def getAttValue(self, obj, key):
try:
return obj.__getattribute__(key)
except Exception, e:
print e
return ''
def basicAuth(self):
source = self.source
self.auth = BasicAuthHandler(self.username, self.password)
self.api = API(self.auth,source=source)
def parse_timeline(self,timeline):
result = []
for line in timeline:
self.obj = line
item ={}
user = self.getAtt("user")
item['mid'] = self.getAtt("id")
item['text'] = self.getAtt("text")
item['pic'] = self.getAtt("thumbnail_pic")
item['author_name']= user.screen_name
item['author_id'] = user.id
item['author_domain'] = user.domain
item['author_profile_image']= user.profile_image_url
item['created_at'] = self.getAtt('created_at')
item['source'] = self.getAtt('source')
item['retweeted_status'] = self.getAtt('retweeted_status')
result.append(item)
return result
def get_myself(self):
myself = self.api.get_user(id=1649938837)
#myself = self.api.get_user(user_id=self.auth.get_username)
self.obj = myself
user={}
user['profile_image_url'] = self.getAtt('profile_image_url')
user['name']=self.getAtt("screen_name")
user['description']=self.getAtt("description")
use = self.auth.get_username()
return user
def user_timeline(self):
timeline = self.api.user_timeline(count=10, page=1)
result = self.parse_timeline(timeline)
return result
| [
"zy.netsec@gmail.com"
] | zy.netsec@gmail.com |
fb9989e05ebcb95404af967bb7c263091849833b | 8f4bba3960f7ded3a84cd3482193721eeab1ceec | /tooff/13调整数组顺序奇数在偶数前.py | 706c59df28a2a91595b15d9d236fe2d9d91da16e | [] | no_license | zahidzqj/learn_python | 1e49b5faa34c0ed1bd1b3816fb9a3a8e094d7a4f | d272a2bd8ec9dea4c6edfd8faaf763bb13962348 | refs/heads/master | 2021-05-27T09:21:58.171999 | 2020-11-24T07:41:30 | 2020-11-24T07:41:30 | 128,086,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding:utf-8 -*-
'''
把数组分为两部分,组合起来
'''
class Solution:
def reOrderArray(self, array):
res1 = []
res2 = []
for i in array:
if i%2==1:
res1.append(i)
else:
res2.append(i)
array = res1 + res2
return array
a = Solution()
print(a.reOrderArray([1,2,3,4]))
| [
"920086481@qq.com"
] | 920086481@qq.com |
b027b0187d76b9df394d92412e8360f628ea0efd | 0a21cd04a2385889e5c89c35b081960cc33bc36c | /practice1/IfElseElIfExample.py | b55a5562187d01e0eb379b6353baa0264b2cc3c1 | [] | no_license | knagesh9841/PythonMarch2020 | f914111b75f7f5fccb5fa91ad1b1300c51f4b51c | a8deba81cc9565fbe701b7cdafe1c94e175638a2 | refs/heads/master | 2022-11-11T06:38:58.823454 | 2020-06-10T18:09:57 | 2020-06-10T18:09:57 | 262,742,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py |
Number = int(input("Enter User Input"))
if Number == 1:
print("One")
elif Number == 2:
print("Two")
else:
print("Wrong Input")
# One line if statement
if Number == 1: print("One")
# One line if else statement
print("One") if Number == 1 else print("Wrong Number")
# Nested If
Number = 41
if Number > 20:
if Number > 40:
print("Number is Greater Than 40")
else:
print("Number is Greater Than 20 But Less Than 40")
"""
The pass Statement
if statements cannot be empty, but if you for some reason have an if statement with no content, put in the pass statement to avoid getting an error.
We can use it in Loop,Function and Method
"""
if Number > 20:
pass
| [
"knagesh9841@yahoo.com"
] | knagesh9841@yahoo.com |
e2d5d8d090aaa5e2cd9b747edc034a82d1127e9a | 80d6f8ac402be590ca217c1da43254cbc65ec363 | /tests/test_api.py | d4df669d3f694f977e4b3f7d9cdb014130600e8b | [
"Apache-2.0"
] | permissive | mmilenkoski/mlbench-core | 7c1dfcff17b7ad4d1c0f600c98424f1e1456c1bb | d8309379de3ad1731660f7963495f8d160bbdb2a | refs/heads/master | 2021-03-01T20:44:39.352068 | 2020-01-21T14:32:24 | 2020-01-21T14:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,661 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mlbench_core.api` package."""
import pytest
import datetime
from mlbench_core.api import ApiClient
@pytest.fixture
def kubernetes_api_client_node_port(mocker):
mock_client = mocker.patch('kubernetes.client.CoreV1Api')
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = "NodePort"
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = 1
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [mocker.MagicMock(node_port=12345, port=80)]
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = 1
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = 1
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [mocker.MagicMock(type="ExternalIP", address="1.1.1.1")]
return mock_client
@pytest.fixture
def kubernetes_api_client_node_port_internal(mocker):
mock_client = mocker.patch('kubernetes.client.CoreV1Api')
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = "NodePort"
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = 1
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [mocker.MagicMock(node_port=12345, port=80)]
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__len__.return_value = 1
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = 1
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = 1
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [mocker.MagicMock(type="InternalIP", address="1.1.1.1")]
return mock_client
@pytest.fixture
def kubernetes_api_client_clusterip(mocker):
mock_client = mocker.patch('kubernetes.client.CoreV1Api')
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = "ClusterIP"
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = 1
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.cluster_ip = "1.1.1.1"
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = 12345
return mock_client
@pytest.fixture
def kubernetes_api_client_loadbalancer(mocker):
mock_client = mocker.patch('kubernetes.client.CoreV1Api')
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = "LoadBalancer"
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = 1
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = 12345
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.status.load_balancer.ingress.ip = "1.1.1.1"
return mock_client
@pytest.fixture
def kubernetes_api_client_incluster(mocker):
mock_client = mocker.patch('kubernetes.client.CoreV1Api')
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = 1
mock_client.return_value.list_namespaced_pod.return_value.items.__getitem__.return_value.status.pod_ip = "1.1.1.1"
return mock_client
def test_instantiation(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
with ApiClient(in_cluster=False) as client:
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_nodeport_internal(mocker, kubernetes_api_client_node_port_internal):
mocker.patch('kubernetes.config.load_kube_config')
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_url():
client = ApiClient(url="1.1.1.1:12345")
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_incluster(mocker, kubernetes_api_client_incluster):
mocker.patch('kubernetes.config.load_incluster_config')
client = ApiClient(in_cluster=True)
assert client is not None
assert client.endpoint == "http://1.1.1.1:80/api/"
def test_instantiation_clusterip(mocker, kubernetes_api_client_clusterip):
mocker.patch('kubernetes.config.load_kube_config')
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_loadbalancer(mocker, kubernetes_api_client_loadbalancer):
mocker.patch('kubernetes.config.load_kube_config')
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_get_all_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_all_metrics()
assert result is not None
assert result.result().json() == "a"
def test_get_run_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run_metrics("1", since=datetime.datetime.now(), summarize=100)
assert result is not None
assert result.result().json() == "a"
def test_get_pod_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_pod_metrics("rel-mlbench-worker-0", since=datetime.datetime.now(), summarize=100)
assert result is not None
assert result.result().json() == "a"
def test_post_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.post_metric("1", "loss", 10.0, cumulative=False)
assert result is not None
assert result.result().json() == "a"
def test_get_runs(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_runs()
assert result is not None
assert result.result().json() == "a"
def test_get_run(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run("1")
assert result is not None
assert result.result().json() == "a"
def test_create_run_official(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
image='PyTorch Cifar-10 ResNet-20 Open-MPI')
assert result is not None
assert result.result().json() == "a"
def test_create_run_custom(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
custom_image_name="localhost:5000/mlbench_worker:latest",
custom_image_command="/.openmpi/bin/mpirun /app/main.py",
custom_image_all_nodes=False)
assert result is not None
assert result.result().json() == "a"
def test_get_worker_pods(mocker, kubernetes_api_client_node_port):
mocker.patch('kubernetes.config.load_kube_config')
rg = mocker.patch('concurrent.futures.ThreadPoolExecutor')
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_worker_pods()
assert result is not None
assert result.result().json() == "a" | [
"ralf.grubenmann@gmail.com"
] | ralf.grubenmann@gmail.com |
da211621da6e551f24f04cd2363e93f9caa9b3a0 | aaaaaad35d89034aebd53a46925ac24db8fafd67 | /chess/checkfunc.py | b700eda88fe6ff2d969d253adf94babd78b237ec | [] | no_license | SajadR2000/chess | 003179285d82d0b06fd61513060ab00c5e5224db | 0ee878c250b40829ae06db659765df33025e3d9f | refs/heads/master | 2022-09-21T13:10:15.288823 | 2022-08-31T07:39:34 | 2022-08-31T07:39:34 | 196,441,158 | 0 | 1 | null | 2020-08-04T10:34:28 | 2019-07-11T17:48:27 | Python | UTF-8 | Python | false | false | 16,811 | py | import Piece
import board
def searchpiece(x, y):
for i in Piece.pieceslist:
if i.currentx == x and i.currenty == y:
return i
return 0
def searchsquare(x, y):
for i in board.squareslist:
if i.x == x and i.y == y:
return i
return 0
def check(color):
if color == "black":
cx = Piece.kingb.currentx
cxx = cx
cy = Piece.kingb.currenty
cyy = cy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if cx == cxx + 1 and cy == cyy + 1:
if checkpiece.color == "white":
if checkpiece.ptype == "pawn" or checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx += 1
cy += 1
else:
break
else:
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx += 1
cy += 1
else:
break
cx = cxx
cy =cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if cx == cxx - 1 and cy == cyy + 1:
if checkpiece.color == "white":
if checkpiece.ptype == "pawn" or checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx -= 1
cy += 1
else:
break
else:
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx -= 1
cy += 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx -= 1
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("black is checked!")
return 0
else:
cx += 1
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("black is checked")
return 0
else:
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("black is checked")
return 0
else:
cy += 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("black is checked")
return 0
else:
cx -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "white":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("black is checked")
return 0
else:
cx += 1
else:
break
cx = cxx
cy = cyy
if searchpiece(cx + 1, cy + 2) != 0:
if searchpiece(cx + 1, cy + 2).ptype == "knight" and searchpiece(cx +1, cy + 2).color == "white":
print("black is checked!")
return 0
if searchpiece(cx + 1, cy - 2) != 0:
if searchpiece(cx + 1, cy - 2).ptype == "knight" and searchpiece(cx + 1, cy - 2).color == "white":
print("black is checked!")
return 0
if searchpiece(cx - 1, cy + 2) != 0:
if searchpiece(cx - 1, cy + 2).ptype == "knight" and searchpiece(cx - 1, cy + 2).color == "white":
print("black is checked!")
return 0
if searchpiece(cx - 1, cy - 2) != 0:
if searchpiece(cx - 1, cy - 2).ptype == "knight" and searchpiece(cx - 1, cy - 2).color == "white":
print("black is checked!")
return 0
if searchpiece(cx + 2, cy + 1) != 0:
if searchpiece(cx + 2, cy + 1).ptype == "knight" and searchpiece(cx + 2, cy + 1).color == "white":
print("black is checked!")
return 0
if searchpiece(cx + 2, cy - 1) != 0:
if searchpiece(cx + 2, cy - 1).ptype == "knight" and searchpiece(cx + 2, cy - 1).color == "white":
print("black is checked!")
return 0
if searchpiece(cx - 2, cy + 1) != 0:
if searchpiece(cx - 2, cy + 1).ptype == "knight" and searchpiece(cx - 2, cy + 1).color == "white":
print("black is checked!")
return 0
if searchpiece(cx - 2, cy - 1) != 0:
if searchpiece(cx - 2, cy - 1).ptype == "knight" and searchpiece(cx - 2, cy - 1).color == "white":
print("black is checked!")
return 0
return 1
elif color == "white":
cx = Piece.kingw.currentx
cxx = cx
cy = Piece.kingw.currenty
cyy = cy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if cx == cxx - 1 and cy == cyy - 1:
if checkpiece.color == "black":
if checkpiece.ptype == "pawn" or checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx -= 1
cy -= 1
else:
break
else:
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx -= 1
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if cx == cxx + 1 and cy == cyy - 1:
if checkpiece.color == "black":
if checkpiece.ptype == "pawn" or checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx += 1
cy -= 1
else:
break
else:
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx += 1
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx -= 1
cy += 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "bishop":
print("white is checked!")
return 0
else:
cx += 1
cy += 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cy -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("white is checked")
return 0
else:
cy -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cy += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("white is checked")
return 0
else:
cy += 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx -= 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("white is checked")
return 0
else:
cx -= 1
else:
break
cx = cxx
cy = cyy
while 1 <= cx <= 8 and 1 <= cy <= 8:
if (cyy == cy and cxx == cx) or (searchsquare(cx, cy).state == "empty"):
cx += 1
continue
else:
checkpiece = searchpiece(cx, cy)
if checkpiece.color == "black":
if checkpiece.ptype == "queen" or checkpiece.ptype == "rook":
print("white is checked")
return 0
else:
cx += 1
else:
break
cx = cxx
cy = cyy
if searchpiece(cx + 1, cy + 2) != 0:
if searchpiece(cx + 1, cy + 2).ptype == "knight" and searchpiece(cx +1, cy + 2).color == "black":
print("white is checked!")
return 0
if searchpiece(cx + 1, cy - 2) != 0:
if searchpiece(cx + 1, cy - 2).ptype == "knight" and searchpiece(cx + 1, cy - 2).color == "black":
print("white is checked!")
return 0
if searchpiece(cx - 1, cy + 2) != 0:
if searchpiece(cx - 1, cy + 2).ptype == "knight" and searchpiece(cx - 1, cy + 2).color == "black":
print("white is checked!")
return 0
if searchpiece(cx - 1, cy - 2) != 0:
if searchpiece(cx - 1, cy - 2).ptype == "knight" and searchpiece(cx - 1, cy - 2).color == "black":
print("white is checked!")
return 0
if searchpiece(cx + 2, cy + 1) != 0:
if searchpiece(cx + 2, cy + 1).ptype == "knight" and searchpiece(cx + 2, cy + 1).color == "black":
print("white is checked!")
return 0
if searchpiece(cx + 2, cy - 1) != 0:
if searchpiece(cx + 2, cy - 1).ptype == "knight" and searchpiece(cx + 2, cy - 1).color == "black":
print("white is checked!")
return 0
if searchpiece(cx - 2, cy + 1) != 0:
if searchpiece(cx - 2, cy + 1).ptype == "knight" and searchpiece(cx - 2, cy + 1).color == "black":
print("white is checked!")
return 0
if searchpiece(cx - 2, cy - 1) != 0:
if searchpiece(cx - 2, cy - 1).ptype == "knight" and searchpiece(cx - 2, cy - 1).color == "black":
print("white is checked!")
return 0
return 1
| [
"noreply@github.com"
] | SajadR2000.noreply@github.com |
9ca59564e469220e213a2fdc8a9aaeafe0aa7443 | 2a0670440263fd18454c38a4b0758ecc8edea480 | /bin/experiments.py | fb16c75a7fb72280a74258a687bed768ddc832f0 | [] | no_license | Foolius/recsyslab | 4776e79338d6edb9a84d910da05ef3f95baa5891 | b01698c180cb86baa97394f7b3b51e3c849847cb | refs/heads/master | 2020-12-24T14:52:57.761966 | 2014-01-28T14:21:36 | 2014-01-28T14:21:36 | 8,237,819 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,730 | py | import util.reader
r = util.reader.stringSepReader("u.data", "\t")
internalID = r.getInternalUid("196")
import util.split
trainingDict, evaluationDict = util.split.split(r.getR(), 1234567890)
testMetricsList = []
import util.test
testMetricsList.append(util.test.hitrate)
testMetricsList.append(util.test.precision)
testMetricsList.append(util.test.f1)
testMetricsList.append(util.test.mrhr)
testMetricsList.append(util.test.auc)
recommenderList = []
import recommender.nonpersonalized
recommenderList.append(recommender.nonpersonalized.constant(
trainingDict).getRec)
recommenderList.append(recommender.nonpersonalized.randomRec(
trainingDict, 1234567890).getRec)
trainingMatrix, matrixEvaluationDict = (
util.split.splitMatrix(r.getMatrix(), 123456789))
import recommender.knn
recommenderList.append(recommender.knn.itemKnn(trainingMatrix, 20).getRec)
recommenderList.append(recommender.knn.userKnn(trainingMatrix, 50).getRec)
import recommender.BPRMF
W, H = recommender.BPRMF.learnModel(r.getMaxUid(), r.getMaxIid(),
0.01, 0.01, 0.01,
# regularization parameter
0.1, # learning rate
trainingDict, # training dict
150, # number of features
3, # number of epochs
r.numberOfTransactions)
import recommender.mf
BPRMF = recommender.mf.MFrec(W, H, trainingDict)
recommenderList.append(BPRMF.getRec)
import recommender.RankMFX
W, H = recommender.RankMFX.learnModel(r.getMaxUid(), r.getMaxIid(),
0.01, 0.01, 0.01,
# regularization parameter
0.1, # learning rate
trainingDict, # training dict
250, # number of features
5, # number of epochs
r.numberOfTransactions)
RankMFX = recommender.mf.MFrec(W, H, trainingDict)
recommenderList.append(RankMFX.getRec)
import recommender.svd
W, H = recommender.svd.learnModel(r.getMaxUid(), r.getMaxIid(),
0.0002, # learning rate
trainingDict, # training dict
770, # number of features
40, # number of epochs
1000) # number of iterations
svd = recommender.mf.MFrec(W, H, trainingDict)
recommenderList.append(svd.getRec)
import recommender.slopeone
slopeone = recommender.slopeone.slopeone(trainingDict)
recommenderList.append(slopeone.getRec)
results = {}
for rec in recommenderList:
results[rec] = {}
for test in testMetricsList:
results[rec][test] = -1
for rec in recommenderList:
for test in testMetricsList:
if not test.__name__ == 'auc':
t = 10
else:
t = r
if type(rec.__self__).__name__ == "userKnn" or type(
rec.__self__).__name__ == "itemKnn":
eva = matrixEvaluationDict
else:
eva = evaluationDict
results[rec][test] = test(eva, rec, t)
s = "recommender "
for test in testMetricsList:
s += " & " + test.__name__
s += " \\\\\n"
for rec in recommenderList:
s += type(rec.__self__).__name__
for test in testMetricsList:
s += " & " + str(results[rec][test])[0:6]
s += " \\\\\n"
print s
| [
"Foolius@server.fake"
] | Foolius@server.fake |
ed3ef4adb8c67b6a07c36f3560134f91d7de49d8 | 6f74aa95c41b88b87cfb971f7d5f50584a14a7f3 | /1 course/Lab6(массивы)/Lab6(2)без дурака.py | 28e3e0730c1cd52ae17eebef996ac3236d8aba7f | [] | no_license | tursunovJr/bmstu-python | c70332f83174d99274f9e296c1cbaf040486d386 | 9899082013fa87699e2a683fe68c6b9c12156815 | refs/heads/master | 2022-05-22T10:20:56.201921 | 2020-04-25T21:23:56 | 2020-04-25T21:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | a = list(map(int, input('Введите элементы массива: ').split()))
for i in a:
if i < 0:
a.remove(i)
print('Количество отрицательных элементов: ', len(a))
| [
"mr.jasur09@mail.ru"
] | mr.jasur09@mail.ru |
71cf86c96f1ba847636eea61053d0a0c4d035bb1 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/desktopvirtualization/v20201102preview/host_pool.py | c86dab27732d65d459e1b272f33ec2f1fec05f50 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,255 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['HostPoolArgs', 'HostPool']
@pulumi.input_type
class HostPoolArgs:
def __init__(__self__, *,
host_pool_type: pulumi.Input[Union[str, 'HostPoolType']],
load_balancer_type: pulumi.Input[Union[str, 'LoadBalancerType']],
preferred_app_group_type: pulumi.Input[Union[str, 'PreferredAppGroupType']],
resource_group_name: pulumi.Input[str],
custom_rdp_property: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
max_session_limit: Optional[pulumi.Input[int]] = None,
personal_desktop_assignment_type: Optional[pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']]] = None,
registration_info: Optional[pulumi.Input['RegistrationInfoArgs']] = None,
ring: Optional[pulumi.Input[int]] = None,
sso_client_id: Optional[pulumi.Input[str]] = None,
sso_client_secret_key_vault_path: Optional[pulumi.Input[str]] = None,
sso_context: Optional[pulumi.Input[str]] = None,
sso_secret_type: Optional[pulumi.Input[Union[str, 'SSOSecretType']]] = None,
ssoadfs_authority: Optional[pulumi.Input[str]] = None,
start_vm_on_connect: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validation_environment: Optional[pulumi.Input[bool]] = None,
vm_template: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a HostPool resource.
:param pulumi.Input[Union[str, 'HostPoolType']] host_pool_type: HostPool type for desktop.
:param pulumi.Input[Union[str, 'LoadBalancerType']] load_balancer_type: The type of the load balancer.
:param pulumi.Input[Union[str, 'PreferredAppGroupType']] preferred_app_group_type: The type of preferred application group type, default to Desktop Application Group
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] custom_rdp_property: Custom rdp property of HostPool.
:param pulumi.Input[str] description: Description of HostPool.
:param pulumi.Input[str] friendly_name: Friendly name of HostPool.
:param pulumi.Input[str] host_pool_name: The name of the host pool within the specified resource group
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[int] max_session_limit: The max session limit of HostPool.
:param pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']] personal_desktop_assignment_type: PersonalDesktopAssignment type for HostPool.
:param pulumi.Input['RegistrationInfoArgs'] registration_info: The registration info of HostPool.
:param pulumi.Input[int] ring: The ring number of HostPool.
:param pulumi.Input[str] sso_client_id: ClientId for the registered Relying Party used to issue WVD SSO certificates.
:param pulumi.Input[str] sso_client_secret_key_vault_path: Path to Azure KeyVault storing the secret used for communication to ADFS.
:param pulumi.Input[str] sso_context: Path to keyvault containing ssoContext secret.
:param pulumi.Input[Union[str, 'SSOSecretType']] sso_secret_type: The type of single sign on Secret Type.
:param pulumi.Input[str] ssoadfs_authority: URL to customer ADFS server for signing WVD SSO certificates.
:param pulumi.Input[bool] start_vm_on_connect: The flag to turn on/off StartVMOnConnect feature.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] validation_environment: Is validation environment.
:param pulumi.Input[str] vm_template: VM template for sessionhosts configuration within hostpool.
"""
pulumi.set(__self__, "host_pool_type", host_pool_type)
pulumi.set(__self__, "load_balancer_type", load_balancer_type)
pulumi.set(__self__, "preferred_app_group_type", preferred_app_group_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if custom_rdp_property is not None:
pulumi.set(__self__, "custom_rdp_property", custom_rdp_property)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pool_name is not None:
pulumi.set(__self__, "host_pool_name", host_pool_name)
if location is not None:
pulumi.set(__self__, "location", location)
if max_session_limit is not None:
pulumi.set(__self__, "max_session_limit", max_session_limit)
if personal_desktop_assignment_type is not None:
pulumi.set(__self__, "personal_desktop_assignment_type", personal_desktop_assignment_type)
if registration_info is not None:
pulumi.set(__self__, "registration_info", registration_info)
if ring is not None:
pulumi.set(__self__, "ring", ring)
if sso_client_id is not None:
pulumi.set(__self__, "sso_client_id", sso_client_id)
if sso_client_secret_key_vault_path is not None:
pulumi.set(__self__, "sso_client_secret_key_vault_path", sso_client_secret_key_vault_path)
if sso_context is not None:
pulumi.set(__self__, "sso_context", sso_context)
if sso_secret_type is not None:
pulumi.set(__self__, "sso_secret_type", sso_secret_type)
if ssoadfs_authority is not None:
pulumi.set(__self__, "ssoadfs_authority", ssoadfs_authority)
if start_vm_on_connect is not None:
pulumi.set(__self__, "start_vm_on_connect", start_vm_on_connect)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if validation_environment is not None:
pulumi.set(__self__, "validation_environment", validation_environment)
if vm_template is not None:
pulumi.set(__self__, "vm_template", vm_template)
@property
@pulumi.getter(name="hostPoolType")
def host_pool_type(self) -> pulumi.Input[Union[str, 'HostPoolType']]:
"""
HostPool type for desktop.
"""
return pulumi.get(self, "host_pool_type")
@host_pool_type.setter
def host_pool_type(self, value: pulumi.Input[Union[str, 'HostPoolType']]):
pulumi.set(self, "host_pool_type", value)
@property
@pulumi.getter(name="loadBalancerType")
def load_balancer_type(self) -> pulumi.Input[Union[str, 'LoadBalancerType']]:
"""
The type of the load balancer.
"""
return pulumi.get(self, "load_balancer_type")
@load_balancer_type.setter
def load_balancer_type(self, value: pulumi.Input[Union[str, 'LoadBalancerType']]):
pulumi.set(self, "load_balancer_type", value)
@property
@pulumi.getter(name="preferredAppGroupType")
def preferred_app_group_type(self) -> pulumi.Input[Union[str, 'PreferredAppGroupType']]:
"""
The type of preferred application group type, default to Desktop Application Group
"""
return pulumi.get(self, "preferred_app_group_type")
@preferred_app_group_type.setter
def preferred_app_group_type(self, value: pulumi.Input[Union[str, 'PreferredAppGroupType']]):
pulumi.set(self, "preferred_app_group_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="customRdpProperty")
def custom_rdp_property(self) -> Optional[pulumi.Input[str]]:
"""
Custom rdp property of HostPool.
"""
return pulumi.get(self, "custom_rdp_property")
@custom_rdp_property.setter
def custom_rdp_property(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_rdp_property", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of HostPool.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of HostPool.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="hostPoolName")
def host_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host pool within the specified resource group
"""
return pulumi.get(self, "host_pool_name")
@host_pool_name.setter
def host_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_pool_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maxSessionLimit")
def max_session_limit(self) -> Optional[pulumi.Input[int]]:
"""
The max session limit of HostPool.
"""
return pulumi.get(self, "max_session_limit")
@max_session_limit.setter
def max_session_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_session_limit", value)
@property
@pulumi.getter(name="personalDesktopAssignmentType")
def personal_desktop_assignment_type(self) -> Optional[pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']]]:
"""
PersonalDesktopAssignment type for HostPool.
"""
return pulumi.get(self, "personal_desktop_assignment_type")
@personal_desktop_assignment_type.setter
def personal_desktop_assignment_type(self, value: Optional[pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']]]):
pulumi.set(self, "personal_desktop_assignment_type", value)
@property
@pulumi.getter(name="registrationInfo")
def registration_info(self) -> Optional[pulumi.Input['RegistrationInfoArgs']]:
"""
The registration info of HostPool.
"""
return pulumi.get(self, "registration_info")
@registration_info.setter
def registration_info(self, value: Optional[pulumi.Input['RegistrationInfoArgs']]):
pulumi.set(self, "registration_info", value)
@property
@pulumi.getter
def ring(self) -> Optional[pulumi.Input[int]]:
"""
The ring number of HostPool.
"""
return pulumi.get(self, "ring")
@ring.setter
def ring(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ring", value)
@property
@pulumi.getter(name="ssoClientId")
def sso_client_id(self) -> Optional[pulumi.Input[str]]:
"""
ClientId for the registered Relying Party used to issue WVD SSO certificates.
"""
return pulumi.get(self, "sso_client_id")
@sso_client_id.setter
def sso_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sso_client_id", value)
@property
@pulumi.getter(name="ssoClientSecretKeyVaultPath")
def sso_client_secret_key_vault_path(self) -> Optional[pulumi.Input[str]]:
"""
Path to Azure KeyVault storing the secret used for communication to ADFS.
"""
return pulumi.get(self, "sso_client_secret_key_vault_path")
@sso_client_secret_key_vault_path.setter
def sso_client_secret_key_vault_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sso_client_secret_key_vault_path", value)
@property
@pulumi.getter(name="ssoContext")
def sso_context(self) -> Optional[pulumi.Input[str]]:
"""
Path to keyvault containing ssoContext secret.
"""
return pulumi.get(self, "sso_context")
@sso_context.setter
def sso_context(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sso_context", value)
@property
@pulumi.getter(name="ssoSecretType")
def sso_secret_type(self) -> Optional[pulumi.Input[Union[str, 'SSOSecretType']]]:
"""
The type of single sign on Secret Type.
"""
return pulumi.get(self, "sso_secret_type")
@sso_secret_type.setter
def sso_secret_type(self, value: Optional[pulumi.Input[Union[str, 'SSOSecretType']]]):
pulumi.set(self, "sso_secret_type", value)
@property
@pulumi.getter(name="ssoadfsAuthority")
def ssoadfs_authority(self) -> Optional[pulumi.Input[str]]:
"""
URL to customer ADFS server for signing WVD SSO certificates.
"""
return pulumi.get(self, "ssoadfs_authority")
@ssoadfs_authority.setter
def ssoadfs_authority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssoadfs_authority", value)
@property
@pulumi.getter(name="startVMOnConnect")
def start_vm_on_connect(self) -> Optional[pulumi.Input[bool]]:
"""
The flag to turn on/off StartVMOnConnect feature.
"""
return pulumi.get(self, "start_vm_on_connect")
@start_vm_on_connect.setter
def start_vm_on_connect(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "start_vm_on_connect", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="validationEnvironment")
def validation_environment(self) -> Optional[pulumi.Input[bool]]:
"""
Is validation environment.
"""
return pulumi.get(self, "validation_environment")
@validation_environment.setter
def validation_environment(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validation_environment", value)
@property
@pulumi.getter(name="vmTemplate")
def vm_template(self) -> Optional[pulumi.Input[str]]:
"""
VM template for sessionhosts configuration within hostpool.
"""
return pulumi.get(self, "vm_template")
@vm_template.setter
def vm_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_template", value)
class HostPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_rdp_property: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_name: Optional[pulumi.Input[str]] = None,
host_pool_type: Optional[pulumi.Input[Union[str, 'HostPoolType']]] = None,
load_balancer_type: Optional[pulumi.Input[Union[str, 'LoadBalancerType']]] = None,
location: Optional[pulumi.Input[str]] = None,
max_session_limit: Optional[pulumi.Input[int]] = None,
personal_desktop_assignment_type: Optional[pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']]] = None,
preferred_app_group_type: Optional[pulumi.Input[Union[str, 'PreferredAppGroupType']]] = None,
registration_info: Optional[pulumi.Input[pulumi.InputType['RegistrationInfoArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
ring: Optional[pulumi.Input[int]] = None,
sso_client_id: Optional[pulumi.Input[str]] = None,
sso_client_secret_key_vault_path: Optional[pulumi.Input[str]] = None,
sso_context: Optional[pulumi.Input[str]] = None,
sso_secret_type: Optional[pulumi.Input[Union[str, 'SSOSecretType']]] = None,
ssoadfs_authority: Optional[pulumi.Input[str]] = None,
start_vm_on_connect: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validation_environment: Optional[pulumi.Input[bool]] = None,
vm_template: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a HostPool definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] custom_rdp_property: Custom rdp property of HostPool.
:param pulumi.Input[str] description: Description of HostPool.
:param pulumi.Input[str] friendly_name: Friendly name of HostPool.
:param pulumi.Input[str] host_pool_name: The name of the host pool within the specified resource group
:param pulumi.Input[Union[str, 'HostPoolType']] host_pool_type: HostPool type for desktop.
:param pulumi.Input[Union[str, 'LoadBalancerType']] load_balancer_type: The type of the load balancer.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[int] max_session_limit: The max session limit of HostPool.
:param pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']] personal_desktop_assignment_type: PersonalDesktopAssignment type for HostPool.
:param pulumi.Input[Union[str, 'PreferredAppGroupType']] preferred_app_group_type: The type of preferred application group type, default to Desktop Application Group
:param pulumi.Input[pulumi.InputType['RegistrationInfoArgs']] registration_info: The registration info of HostPool.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[int] ring: The ring number of HostPool.
:param pulumi.Input[str] sso_client_id: ClientId for the registered Relying Party used to issue WVD SSO certificates.
:param pulumi.Input[str] sso_client_secret_key_vault_path: Path to Azure KeyVault storing the secret used for communication to ADFS.
:param pulumi.Input[str] sso_context: Path to keyvault containing ssoContext secret.
:param pulumi.Input[Union[str, 'SSOSecretType']] sso_secret_type: The type of single sign on Secret Type.
:param pulumi.Input[str] ssoadfs_authority: URL to customer ADFS server for signing WVD SSO certificates.
:param pulumi.Input[bool] start_vm_on_connect: The flag to turn on/off StartVMOnConnect feature.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] validation_environment: Is validation environment.
:param pulumi.Input[str] vm_template: VM template for sessionhosts configuration within hostpool.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a HostPool definition.
:param str resource_name: The name of the resource.
:param HostPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_rdp_property: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_name: Optional[pulumi.Input[str]] = None,
host_pool_type: Optional[pulumi.Input[Union[str, 'HostPoolType']]] = None,
load_balancer_type: Optional[pulumi.Input[Union[str, 'LoadBalancerType']]] = None,
location: Optional[pulumi.Input[str]] = None,
max_session_limit: Optional[pulumi.Input[int]] = None,
personal_desktop_assignment_type: Optional[pulumi.Input[Union[str, 'PersonalDesktopAssignmentType']]] = None,
preferred_app_group_type: Optional[pulumi.Input[Union[str, 'PreferredAppGroupType']]] = None,
registration_info: Optional[pulumi.Input[pulumi.InputType['RegistrationInfoArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
ring: Optional[pulumi.Input[int]] = None,
sso_client_id: Optional[pulumi.Input[str]] = None,
sso_client_secret_key_vault_path: Optional[pulumi.Input[str]] = None,
sso_context: Optional[pulumi.Input[str]] = None,
sso_secret_type: Optional[pulumi.Input[Union[str, 'SSOSecretType']]] = None,
ssoadfs_authority: Optional[pulumi.Input[str]] = None,
start_vm_on_connect: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validation_environment: Optional[pulumi.Input[bool]] = None,
vm_template: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostPoolArgs.__new__(HostPoolArgs)
__props__.__dict__["custom_rdp_property"] = custom_rdp_property
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["host_pool_name"] = host_pool_name
if host_pool_type is None and not opts.urn:
raise TypeError("Missing required property 'host_pool_type'")
__props__.__dict__["host_pool_type"] = host_pool_type
if load_balancer_type is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_type'")
__props__.__dict__["load_balancer_type"] = load_balancer_type
__props__.__dict__["location"] = location
__props__.__dict__["max_session_limit"] = max_session_limit
__props__.__dict__["personal_desktop_assignment_type"] = personal_desktop_assignment_type
if preferred_app_group_type is None and not opts.urn:
raise TypeError("Missing required property 'preferred_app_group_type'")
__props__.__dict__["preferred_app_group_type"] = preferred_app_group_type
__props__.__dict__["registration_info"] = registration_info
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["ring"] = ring
__props__.__dict__["sso_client_id"] = sso_client_id
__props__.__dict__["sso_client_secret_key_vault_path"] = sso_client_secret_key_vault_path
__props__.__dict__["sso_context"] = sso_context
__props__.__dict__["sso_secret_type"] = sso_secret_type
__props__.__dict__["ssoadfs_authority"] = ssoadfs_authority
__props__.__dict__["start_vm_on_connect"] = start_vm_on_connect
__props__.__dict__["tags"] = tags
__props__.__dict__["validation_environment"] = validation_environment
__props__.__dict__["vm_template"] = vm_template
__props__.__dict__["application_group_references"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201102preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20200921preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201019preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201110preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210114preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210201preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210309preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210401preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210401preview:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210712:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210712:HostPool"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210903preview:HostPool"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210903preview:HostPool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HostPool, __self__).__init__(
'azure-native:desktopvirtualization/v20201102preview:HostPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HostPool':
"""
Get an existing HostPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = HostPoolArgs.__new__(HostPoolArgs)
__props__.__dict__["application_group_references"] = None
__props__.__dict__["custom_rdp_property"] = None
__props__.__dict__["description"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["host_pool_type"] = None
__props__.__dict__["load_balancer_type"] = None
__props__.__dict__["location"] = None
__props__.__dict__["max_session_limit"] = None
__props__.__dict__["name"] = None
__props__.__dict__["personal_desktop_assignment_type"] = None
__props__.__dict__["preferred_app_group_type"] = None
__props__.__dict__["registration_info"] = None
__props__.__dict__["ring"] = None
__props__.__dict__["sso_client_id"] = None
__props__.__dict__["sso_client_secret_key_vault_path"] = None
__props__.__dict__["sso_context"] = None
__props__.__dict__["sso_secret_type"] = None
__props__.__dict__["ssoadfs_authority"] = None
__props__.__dict__["start_vm_on_connect"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["validation_environment"] = None
__props__.__dict__["vm_template"] = None
return HostPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> pulumi.Output[Sequence[str]]:
"""
List of applicationGroup links.
"""
return pulumi.get(self, "application_group_references")
@property
@pulumi.getter(name="customRdpProperty")
def custom_rdp_property(self) -> pulumi.Output[Optional[str]]:
"""
Custom rdp property of HostPool.
"""
return pulumi.get(self, "custom_rdp_property")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of HostPool.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of HostPool.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPoolType")
def host_pool_type(self) -> pulumi.Output[str]:
"""
HostPool type for desktop.
"""
return pulumi.get(self, "host_pool_type")
@property
@pulumi.getter(name="loadBalancerType")
def load_balancer_type(self) -> pulumi.Output[str]:
"""
The type of the load balancer.
"""
return pulumi.get(self, "load_balancer_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxSessionLimit")
def max_session_limit(self) -> pulumi.Output[Optional[int]]:
"""
The max session limit of HostPool.
"""
return pulumi.get(self, "max_session_limit")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="personalDesktopAssignmentType")
def personal_desktop_assignment_type(self) -> pulumi.Output[Optional[str]]:
"""
PersonalDesktopAssignment type for HostPool.
"""
return pulumi.get(self, "personal_desktop_assignment_type")
@property
@pulumi.getter(name="preferredAppGroupType")
def preferred_app_group_type(self) -> pulumi.Output[str]:
"""
The type of preferred application group type, default to Desktop Application Group
"""
return pulumi.get(self, "preferred_app_group_type")
@property
@pulumi.getter(name="registrationInfo")
def registration_info(self) -> pulumi.Output[Optional['outputs.RegistrationInfoResponse']]:
"""
The registration info of HostPool.
"""
return pulumi.get(self, "registration_info")
@property
@pulumi.getter
def ring(self) -> pulumi.Output[Optional[int]]:
"""
The ring number of HostPool.
"""
return pulumi.get(self, "ring")
@property
@pulumi.getter(name="ssoClientId")
def sso_client_id(self) -> pulumi.Output[Optional[str]]:
"""
ClientId for the registered Relying Party used to issue WVD SSO certificates.
"""
return pulumi.get(self, "sso_client_id")
@property
@pulumi.getter(name="ssoClientSecretKeyVaultPath")
def sso_client_secret_key_vault_path(self) -> pulumi.Output[Optional[str]]:
"""
Path to Azure KeyVault storing the secret used for communication to ADFS.
"""
return pulumi.get(self, "sso_client_secret_key_vault_path")
@property
@pulumi.getter(name="ssoContext")
def sso_context(self) -> pulumi.Output[Optional[str]]:
"""
Path to keyvault containing ssoContext secret.
"""
return pulumi.get(self, "sso_context")
@property
@pulumi.getter(name="ssoSecretType")
def sso_secret_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of single sign on Secret Type.
"""
return pulumi.get(self, "sso_secret_type")
@property
@pulumi.getter(name="ssoadfsAuthority")
def ssoadfs_authority(self) -> pulumi.Output[Optional[str]]:
"""
URL to customer ADFS server for signing WVD SSO certificates.
"""
return pulumi.get(self, "ssoadfs_authority")
@property
@pulumi.getter(name="startVMOnConnect")
def start_vm_on_connect(self) -> pulumi.Output[Optional[bool]]:
"""
The flag to turn on/off StartVMOnConnect feature.
"""
return pulumi.get(self, "start_vm_on_connect")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="validationEnvironment")
def validation_environment(self) -> pulumi.Output[Optional[bool]]:
"""
Is validation environment.
"""
return pulumi.get(self, "validation_environment")
@property
@pulumi.getter(name="vmTemplate")
def vm_template(self) -> pulumi.Output[Optional[str]]:
"""
VM template for sessionhosts configuration within hostpool.
"""
return pulumi.get(self, "vm_template")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
ea921a941fb058f4fb9cd37d24ba7abe103c2f09 | d52601b4998555131244532a4210c6df4100a308 | /env/bin/celeryd | 21e034cbbeb7990f3671ecc99893ae5ba94acef4 | [] | no_license | yuyude54/axf1813 | d87bd0d5b4adeafff55949d4ce917b525afa60e3 | 96db328a7b85ba1a19337f8d7c290ca270fefdd6 | refs/heads/master | 2020-04-07T08:39:25.702588 | 2018-11-19T13:19:28 | 2018-11-19T13:19:28 | 158,222,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/yubin/axf1813/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from celery.__main__ import _compat_worker
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_compat_worker())
| [
"2435361886@qq.com"
] | 2435361886@qq.com | |
971c4baea9d08b0ddc5e90e340dd81be98b65aa9 | d82ac0c5732ffb474dd10ee22fd2fad6f0095760 | /create_data.py | e60cafd93ab3a7a4a1079c026fc9b1bfe108884f | [] | no_license | arp1561/gender-recognition-opencv-python | b23d3b6f5bce5d4061c93d9ef5835ece56e2571e | ffb9eea844118855a49e85ef23924394a5c154b7 | refs/heads/master | 2021-01-13T11:57:26.317109 | 2017-02-12T08:04:29 | 2017-02-12T08:04:29 | 81,705,351 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | import pandas as pd
from os import listdir
from os.path import isfile, join
import numpy
import cv2
csv = pd.read_csv("data_path_female.txt",delim_whitespace="",header=None)
csv1 = pd.read_csv("data_path_male.txt",delim_whitespace="",header=None)
images_path,labels = csv[0],csv[1]
image_list=[]
label_list=[]
for i in range(len(labels)):
image = cv2.imread(images_path[i],0)
res = cv2.resize(image,(300,300))
image_list.append(res)
label_list.append(int(labels[i]))
images_path,labels = csv1[0],csv1[1]
for i in range(len(labels)):
image = cv2.imread(images_path[i],0)
res = cv2.resize(image,(300,300))
image_list.append(res)
label_list.append(int(labels[i]))
final_image_list = numpy.asarray(image_list)
final_label_list = numpy.asarray(label_list)
f = file("images.bin","wb")
g = file("labels.bin","wb")
numpy.save(f,final_image_list)
numpy.save(g,final_label_list)
| [
"joshiarpit2@gmail.com"
] | joshiarpit2@gmail.com |
49d37cc7feb53eca058890a9769b9f2baf24e94b | 560822e14d86d3748b708c674bfba364f469a885 | /nmap.py | 6bef89dd78e4ece94516528dfc72b778c51edd40 | [] | no_license | Moein-Ataei/Website-Scanner | 37a3f2095285e0c24b51a2b323ad37be3987daa7 | 99fd3d91858a76485d34f230e87359c3e795961f | refs/heads/master | 2020-12-08T15:01:46.141685 | 2020-01-10T09:20:52 | 2020-01-10T09:20:52 | 233,011,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import os
def get_nmap(option, ip):
command = f"nmap {option} {ip}"
process = os.popen(command)
result = str(process.read())
return result
# print(get_nmap('-F', 'localhost')) | [
"moein.ataei@gmail.com"
] | moein.ataei@gmail.com |
92fb28f8d18a6dbf993bb9a9bced8f2b69a508ca | 10c2ba04e502004e2df0dfb6c70fb83daccc1b30 | /Django/example/05-choice-field/main/editors/forms.py | eff290bbaf2f5b25b5877fc758cf4ca6d68ec0d7 | [] | no_license | yakenohara/KnowHow | 5e79d0015220524f98d9f7c767d2a727534b612d | ccd2aed2d4bf9363136aa30778b5adf382b25c4d | refs/heads/master | 2023-04-05T09:37:14.612262 | 2023-03-26T05:56:40 | 2023-03-26T05:56:40 | 136,476,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from django import forms
from .models import Editor
class EditorEditForm(forms.ModelForm):
class Meta:
model = Editor
fields = (
'name',
'sex',
)
| [
"38835246+yakenohara@users.noreply.github.com"
] | 38835246+yakenohara@users.noreply.github.com |
1f0a1563ab3144ee7205b5b9bead598f298ef4cb | 3aa698ea22a7c2328f8c3302d603fa2bf6825187 | /evennia/evennia/utils/inlinefuncs.py | b548d317d2f7ae738e924c8bcb84860ff9775166 | [
"BSD-3-Clause"
] | permissive | frastlin/iacs-map | 19a28c9eefd0237fc1c1fcc6a5d87b7f592e0b43 | a8b4f740ae3d94a0e1fc51e57475080e4bc97155 | refs/heads/master | 2020-03-21T16:31:55.356494 | 2018-12-15T21:15:40 | 2018-12-15T21:15:40 | 138,775,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,220 | py | """
Inline functions (nested form).
This parser accepts nested inlinefunctions on the form
```
$funcname(arg, arg, ...)
```
embedded in any text where any arg can be another $funcname{} call.
This functionality is turned off by default - to activate,
`settings.INLINEFUNC_ENABLED` must be set to `True`.
Each token starts with "$funcname(" where there must be no space
between the $funcname and (. It ends with a matched ending parentesis.
")".
Inside the inlinefunc definition, one can use `\` to escape. This is
mainly needed for escaping commas in flowing text (which would
otherwise be interpreted as an argument separator), or to escape `}`
when not intended to close the function block. Enclosing text in
matched `\"\"\"` (triple quotes) or `'''` (triple single-quotes) will
also escape *everything* within without needing to escape individual
characters.
The available inlinefuncs are defined as global-level functions in
modules defined by `settings.INLINEFUNC_MODULES`. They are identified
by their function name (and ignored if this name starts with `_`). They
should be on the following form:
```python
def funcname (*args, **kwargs):
# ...
```
Here, the arguments given to `$funcname(arg1,arg2)` will appear as the
`*args` tuple. This will be populated by the arguments given to the
inlinefunc in-game - the only part that will be available from
in-game. `**kwargs` are not supported from in-game but are only used
internally by Evennia to make details about the caller available to
the function. The kwarg passed to all functions is `session`, the
Sessionobject for the object seeing the string. This may be `None` if
the string is sent to a non-puppetable object. The inlinefunc should
never raise an exception.
There are two reserved function names:
- "nomatch": This is called if the user uses a functionname that is
not registered. The nomatch function will get the name of the
not-found function as its first argument followed by the normal
arguments to the given function. If not defined the default effect is
to print `<UNKNOWN>` to replace the unknown function.
- "stackfull": This is called when the maximum nested function stack is reached.
When this happens, the original parsed string is returned and the result of
the `stackfull` inlinefunc is appended to the end. By default this is an
error message.
Error handling:
Syntax errors, notably not completely closing all inlinefunc
blocks, will lead to the entire string remaining unparsed.
"""
import re
from django.conf import settings
from evennia.utils import utils
# example/testing inline functions
def pad(*args, **kwargs):
"""
Inlinefunc. Pads text to given width.
Args:
text (str, optional): Text to pad.
width (str, optional): Will be converted to integer. Width
of padding.
align (str, optional): Alignment of padding; one of 'c', 'l' or 'r'.
fillchar (str, optional): Character used for padding. Defaults to a space.
Kwargs:
session (Session): Session performing the pad.
Example:
`$pad(text, width, align, fillchar)`
"""
text, width, align, fillchar = "", 78, 'c', ' '
nargs = len(args)
if nargs > 0:
text = args[0]
if nargs > 1:
width = int(args[1]) if args[1].strip().isdigit() else 78
if nargs > 2:
align = args[2] if args[2] in ('c', 'l', 'r') else 'c'
if nargs > 3:
fillchar = args[3]
return utils.pad(text, width=width, align=align, fillchar=fillchar)
def crop(*args, **kwargs):
"""
Inlinefunc. Crops ingoing text to given widths.
Args:
text (str, optional): Text to crop.
width (str, optional): Will be converted to an integer. Width of
crop in characters.
suffix (str, optional): End string to mark the fact that a part
of the string was cropped. Defaults to `[...]`.
Kwargs:
session (Session): Session performing the crop.
Example:
`$crop(text, width=78, suffix='[...]')`
"""
text, width, suffix = "", 78, "[...]"
nargs = len(args)
if nargs > 0:
text = args[0]
if nargs > 1:
width = int(args[1]) if args[1].strip().isdigit() else 78
if nargs > 2:
suffix = args[2]
return utils.crop(text, width=width, suffix=suffix)
def clr(*args, **kwargs):
"""
Inlinefunc. Colorizes nested text.
Args:
startclr (str, optional): An ANSI color abbreviation without the
prefix `|`, such as `r` (red foreground) or `[r` (red background).
text (str, optional): Text
endclr (str, optional): The color to use at the end of the string. Defaults
to `|n` (reset-color).
Kwargs:
session (Session): Session object triggering inlinefunc.
Example:
`$clr(startclr, text, endclr)`
"""
text = ""
nargs = len(args)
if nargs > 0:
color = args[0].strip()
if nargs > 1:
text = args[1]
text = "|" + color + text
if nargs > 2:
text += "|" + args[2].strip()
else:
text += "|n"
return text
# we specify a default nomatch function to use if no matching func was
# found. This will be overloaded by any nomatch function defined in
# the imported modules.
_INLINE_FUNCS = {"nomatch": lambda *args, **kwargs: "<UKNOWN>",
"stackfull": lambda *args, **kwargs: "\n (not parsed: inlinefunc stack size exceeded.)"}
# load custom inline func modules.
for module in utils.make_iter(settings.INLINEFUNC_MODULES):
try:
_INLINE_FUNCS.update(utils.callables_from_module(module))
except ImportError as err:
if module == "server.conf.inlinefuncs":
# a temporary warning since the default module changed name
raise ImportError("Error: %s\nPossible reason: mygame/server/conf/inlinefunc.py should "
"be renamed to mygame/server/conf/inlinefuncs.py (note the S at the end)." % err)
else:
raise
# remove the core function if we include examples in this module itself
#_INLINE_FUNCS.pop("inline_func_parse", None)
# The stack size is a security measure. Set to <=0 to disable.
try:
_STACK_MAXSIZE = settings.INLINEFUNC_STACK_MAXSIZE
except AttributeError:
_STACK_MAXSIZE = 20
# regex definitions
_RE_STARTTOKEN = re.compile(r"(?<!\\)\$(\w+)\(") # unescaped $funcname{ (start of function call)
# note: this regex can be experimented with at https://regex101.com/r/kGR3vE/1
_RE_TOKEN = re.compile(r"""
(?<!\\)\'\'\'(?P<singlequote>.*?)(?<!\\)\'\'\'| # single-triplets escape all inside
(?<!\\)\"\"\"(?P<doublequote>.*?)(?<!\\)\"\"\"| # double-triplets escape all inside
(?P<comma>(?<!\\)\,)| # , (argument sep)
(?P<end>(?<!\\)\))| # ) (end of func call)
(?P<start>(?<!\\)\$\w+\()| # $funcname (start of func call)
(?P<escaped> # escaped tokens to re-insert sans backslash
\\\'|\\\"|\\\)|\\\$\w+\()|
(?P<rest> # everything else to re-insert verbatim
\$(?!\w+\()|\'{1}|\"{1}|\\{1}|[^),$\'\"\\]+)""",
re.UNICODE | re.IGNORECASE | re.VERBOSE | re.DOTALL)
# Cache for function lookups.
_PARSING_CACHE = utils.LimitedSizeOrderedDict(size_limit=1000)
class ParseStack(list):
"""
Custom stack that always concatenates strings together when the
strings are added next to one another. Tuples are stored
separately and None is used to mark that a string should be broken
up into a new chunk. Below is the resulting stack after separately
appending 3 strings, None, 2 strings, a tuple and finally 2
strings:
[string + string + string,
None
string + string,
tuple,
string + string]
"""
def __init__(self, *args, **kwargs):
super(ParseStack, self).__init__(*args, **kwargs)
# always start stack with the empty string
list.append(self, "")
# indicates if the top of the stack is a string or not
self._string_last = True
def __eq__(self, other):
return (super(ParseStack).__eq__(other) and
hasattr(other, "_string_last") and self._string_last == other._string_last)
def __ne__(self, other):
return not self.__eq__(other)
def append(self, item):
"""
The stack will merge strings, add other things as normal
"""
if isinstance(item, basestring):
if self._string_last:
self[-1] += item
else:
list.append(self, item)
self._string_last = True
else:
# everything else is added as normal
list.append(self, item)
self._string_last = False
class InlinefuncError(RuntimeError):
pass
def parse_inlinefunc(string, strip=False, **kwargs):
"""
Parse the incoming string.
Args:
string (str): The incoming string to parse.
strip (bool, optional): Whether to strip function calls rather than
execute them.
Kwargs:
session (Session): This is sent to this function by Evennia when triggering
it. It is passed to the inlinefunc.
kwargs (any): All other kwargs are also passed on to the inlinefunc.
"""
global _PARSING_CACHE
if string in _PARSING_CACHE:
# stack is already cached
stack = _PARSING_CACHE[string]
elif not _RE_STARTTOKEN.search(string):
# if there are no unescaped start tokens at all, return immediately.
return string
else:
# no cached stack; build a new stack and continue
stack = ParseStack()
# process string on stack
ncallable = 0
for match in _RE_TOKEN.finditer(string):
gdict = match.groupdict()
if gdict["singlequote"]:
stack.append(gdict["singlequote"])
elif gdict["doublequote"]:
stack.append(gdict["doublequote"])
elif gdict["end"]:
if ncallable <= 0:
stack.append(")")
continue
args = []
while stack:
operation = stack.pop()
if callable(operation):
if not strip:
stack.append((operation, [arg for arg in reversed(args)]))
ncallable -= 1
break
else:
args.append(operation)
elif gdict["start"]:
funcname = _RE_STARTTOKEN.match(gdict["start"]).group(1)
try:
# try to fetch the matching inlinefunc from storage
stack.append(_INLINE_FUNCS[funcname])
except KeyError:
stack.append(_INLINE_FUNCS["nomatch"])
stack.append(funcname)
ncallable += 1
elif gdict["escaped"]:
# escaped tokens
token = gdict["escaped"].lstrip("\\")
stack.append(token)
elif gdict["comma"]:
if ncallable > 0:
# commas outside strings and inside a callable are
# used to mark argument separation - we use None
# in the stack to indicate such a separation.
stack.append(None)
else:
# no callable active - just a string
stack.append(",")
else:
# the rest
stack.append(gdict["rest"])
if ncallable > 0:
# this means not all inlinefuncs were complete
return string
if _STACK_MAXSIZE > 0 and _STACK_MAXSIZE < len(stack):
# if stack is larger than limit, throw away parsing
return string + gdict["stackfull"](*args, **kwargs)
else:
# cache the stack
_PARSING_CACHE[string] = stack
# run the stack recursively
def _run_stack(item, depth=0):
retval = item
if isinstance(item, tuple):
if strip:
return ""
else:
func, arglist = item
args = [""]
for arg in arglist:
if arg is None:
# an argument-separating comma - start a new arg
args.append("")
else:
# all other args should merge into one string
args[-1] += _run_stack(arg, depth=depth + 1)
# execute the inlinefunc at this point or strip it.
kwargs["inlinefunc_stack_depth"] = depth
retval = "" if strip else func(*args, **kwargs)
return utils.to_str(retval, force_string=True)
# execute the stack from the cache
return "".join(_run_stack(item) for item in _PARSING_CACHE[string])
#
# Nick templating
#
"""
This supports the use of replacement templates in nicks:
This happens in two steps:
1) The user supplies a template that is converted to a regex according
to the unix-like templating language.
2) This regex is tested against nicks depending on which nick replacement
strategy is considered (most commonly inputline).
3) If there is a template match and there are templating markers,
these are replaced with the arguments actually given.
@desc $1 $2 $3
This will be converted to the following regex:
\@desc (?P<1>\w+) (?P<2>\w+) $(?P<3>\w+)
Supported template markers (through fnmatch)
* matches anything (non-greedy) -> .*?
? matches any single character ->
[seq] matches any entry in sequence
[!seq] matches entries not in sequence
Custom arg markers
$N argument position (1-99)
"""
import fnmatch
_RE_NICK_ARG = re.compile(r"\\(\$)([1-9][0-9]?)")
_RE_NICK_TEMPLATE_ARG = re.compile(r"(\$)([1-9][0-9]?)")
_RE_NICK_SPACE = re.compile(r"\\ ")
class NickTemplateInvalid(ValueError):
pass
def initialize_nick_templates(in_template, out_template):
"""
Initialize the nick templates for matching and remapping a string.
Args:
in_template (str): The template to be used for nick recognition.
out_template (str): The template to be used to replace the string
matched by the in_template.
Returns:
regex (regex): Regex to match against strings
template (str): Template with markers {arg1}, {arg2}, etc for
replacement using the standard .format method.
Raises:
NickTemplateInvalid: If the in/out template does not have a matching
number of $args.
"""
# create the regex for in_template
regex_string = fnmatch.translate(in_template)
n_inargs = len(_RE_NICK_ARG.findall(regex_string))
regex_string = _RE_NICK_SPACE.sub("\s+", regex_string)
regex_string = _RE_NICK_ARG.sub(lambda m: "(?P<arg%s>.+?)" % m.group(2), regex_string)
# create the out_template
template_string = _RE_NICK_TEMPLATE_ARG.sub(lambda m: "{arg%s}" % m.group(2), out_template)
# validate the tempaltes - they should at least have the same number of args
n_outargs = len(_RE_NICK_TEMPLATE_ARG.findall(out_template))
if n_inargs != n_outargs:
print n_inargs, n_outargs
raise NickTemplateInvalid
return re.compile(regex_string), template_string
def parse_nick_template(string, template_regex, outtemplate):
"""
Parse a text using a template and map it to another template
Args:
string (str): The input string to processj
template_regex (regex): A template regex created with
initialize_nick_template.
outtemplate (str): The template to which to map the matches
produced by the template_regex. This should have $1, $2,
etc to match the regex.
"""
match = template_regex.match(string)
if match:
return outtemplate.format(**match.groupdict())
return string
| [
"brandonkeithbiggs@gmail.com"
] | brandonkeithbiggs@gmail.com |
1b167d908b1dbbae705befe87ecfb250c84f4b2a | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14606.py | 992a0ac036c6de7ae67a625b61486cbe6bb0fc17 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # keep a Python script running even when the user tries to exit out
.pyw
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
4a2435dd1d2068244ea66fe23eb0cfd0985512d4 | 86fff122087c37b56fb70bdd114b83b1f121b45d | /glazing/migrations/0015_auto_20161113_2246.py | 59aaa2a66942b7450ab120ab423a99b9e0063a50 | [] | no_license | frankDock/pg_calculators | 034e34127a64fba806b20efac724ac7befaf64ea | 0b367b8b9ce4822c26974dda56e8e8cc4bd77187 | refs/heads/master | 2021-01-12T09:58:30.272561 | 2016-12-21T14:42:47 | 2016-12-21T14:42:47 | 76,320,179 | 0 | 0 | null | 2016-12-13T03:46:05 | 2016-12-13T03:46:05 | null | UTF-8 | Python | false | false | 2,717 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-13 22:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('glazing', '0014_auto_20161113_2227'),
]
operations = [
migrations.CreateModel(
name='SHGC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('SHGC', models.FloatField(default=0)),
('frame_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='glazing.Frame')),
],
options={
'verbose_name': 'Frame/Glass SHGC',
'verbose_name_plural': 'Frame/Glass SHGC',
},
),
migrations.CreateModel(
name='U_Value',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('U_value', models.FloatField(default=0)),
('frame_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='glazing.Frame')),
],
options={
'verbose_name': 'Frame/Glass U-value',
'verbose_name_plural': 'Frame/Glass U-values',
},
),
migrations.RemoveField(
model_name='frame_products_join_tbl',
name='frame_id',
),
migrations.RemoveField(
model_name='frame_products_join_tbl',
name='product_id',
),
migrations.AlterModelOptions(
name='glass',
options={'verbose_name': 'Glass Product', 'verbose_name_plural': 'Glass Products'},
),
migrations.AlterModelOptions(
name='glass_category',
options={'verbose_name': 'Glass Product Category', 'verbose_name_plural': 'Glass Product Categories'},
),
migrations.AlterModelOptions(
name='glazing_project',
options={'verbose_name': 'Glazing Project', 'verbose_name_plural': 'Glazing Projects'},
),
migrations.DeleteModel(
name='Frame_Products_Join_Tbl',
),
migrations.AddField(
model_name='u_value',
name='product_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='glazing.Glass'),
),
migrations.AddField(
model_name='shgc',
name='product_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='glazing.Glass'),
),
]
| [
"alon@schematech.co.za"
] | alon@schematech.co.za |
918bf5948ed9490633028cdeed9ea000c19a2374 | 560af8e32aa77bfb6c5d837e93d0dc2dd7c9142c | /client_project/wsgi.py | fec9bea087df286ce54366f90bbc4c61b4f9094f | [] | no_license | anirudhasj441/Fake-Api | c7a4aef6bf9eadc16709fe10f4cd3b526664cd4e | 86b6e496cbecf314ef6e6366a84b8f93ce7c775b | refs/heads/master | 2023-06-26T00:00:00.914657 | 2021-07-29T06:35:39 | 2021-07-29T06:35:39 | 390,625,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for client_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'client_project.settings')
application = get_wsgi_application()
| [
"anirudhasj441@gmail.com"
] | anirudhasj441@gmail.com |
e4a88b54323db57cc2d1b09b8b6560d91291a6cd | 0bfb55b41282803db96b90e7bba73d86be7e8553 | /administration/migrations/0011_auto_20161109_1845.py | fad5aaa70cdd3596ffe76dba25b75bc51d8583f1 | [
"MIT"
] | permissive | OpenFurry/honeycomb | eebf2272f8ae95eb686ad129555dbebcf1adcd63 | c34eeaf22048948fedcae860db7c25d41b51ff48 | refs/heads/master | 2021-01-11T01:52:40.978564 | 2016-12-29T18:08:38 | 2016-12-29T18:08:38 | 70,649,821 | 2 | 2 | null | 2016-12-29T18:08:39 | 2016-10-12T01:22:38 | Python | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-09 18:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administration', '0010_auto_20161109_0552'),
]
operations = [
migrations.AlterField(
model_name='ban',
name='end_date',
field=models.DateField(blank=True, null=True),
),
]
| [
"madison.scott-clary@canonical.com"
] | madison.scott-clary@canonical.com |
d1daee1c0acbc6e0cea9838cfe90cf7fead22e95 | 369cef5dae249cad45ef8596fa074d21a340487e | /client1.py | fd97e2a884947f6a6ad245e043fba4ab2d564f7a | [] | no_license | Maimoi/Socket-Programming-in-Python | a2122804383f5683fb7b5f55183e77790ff558b4 | cf4a018d9b83c8b5fb60a5302cf8d3555148befd | refs/heads/master | 2022-12-30T06:06:31.881250 | 2020-10-08T21:35:01 | 2020-10-08T21:35:01 | 302,271,274 | 0 | 0 | null | 2020-10-08T21:35:02 | 2020-10-08T08:02:18 | Python | UTF-8 | Python | false | false | 505 | py | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostbyname('localhost'), 1024))
complete_info = ''
while True:
#take only 7 bytes of the message from the server
message = s.recv(7)
#if the length of the message is less than or equal to 0, jsut break the loop
if len(message) <= 0 :
break
#add the message to the complete information string
complete_info += message.decode("utf-8")
#finally print the string
print(complete_info)
| [
"himohit1990@gmail.com"
] | himohit1990@gmail.com |
32fa0e6e401ac5dbe8faf7da29ea2fd5c036a071 | 906652e6b39282668cd849f707454ac6e43952f9 | /inclass/quiz1.py | d36b6b754abda32499c73a7d049ec76b155f4a3e | [] | no_license | keenanzucker/SoftDesSp15 | 4de8d332be38ffed9d3a16fa2c02ac6f4c8fd038 | 5b6a9fee718107570b8c2f4ae1200192131f45a5 | refs/heads/master | 2021-01-20T23:36:47.714656 | 2015-04-02T17:36:56 | 2015-04-02T17:36:56 | 30,047,147 | 0 | 0 | null | 2015-01-29T23:46:20 | 2015-01-29T23:46:19 | null | UTF-8 | Python | false | false | 612 | py |
def filter_out_negative_numbers(inputList):
finalList = [];
for i in inputList:
if i >= 0:
finalList.append(i)
return finalList
#filter_out_negative_numbers([-2.0, 5, 10, -100, 5])
def in_language(word):
if word == '':
return True
numA = 0
numB = 0
for i in word:
if i == 'a':
numA += 1
print numA
elif i == 'b':
numB += 1
else:
return False
if numA == numB:
return True
else:
return False
#print in_language('aaabb')
def fib(n):
if n == 0:
return 0
if n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
for i in range (0,10):
print fib(i)
| [
"Keenan.Zucker@students.olin.edu"
] | Keenan.Zucker@students.olin.edu |
8e71cbd0f6db70a670e50a0557ef9547ac264f2d | a0c076b23d0462d038e7986497a47c2f602b1301 | /locations/models.py | d01dcf811b9659dc002650ce46c71ba8aec3b357 | [] | no_license | georgemitchell/wedding | 2159b0d2e11d50a3dbb8087d34d3fdbb8f3f0e05 | 192ec741cda49e27395d2b01f7a919ab9bbadd15 | refs/heads/master | 2022-08-28T12:13:14.332909 | 2021-06-06T05:36:52 | 2021-06-06T05:36:52 | 74,855,909 | 0 | 0 | null | 2022-08-11T14:55:32 | 2016-11-26T23:19:33 | JavaScript | UTF-8 | Python | false | false | 2,593 | py | from django.db import models
class Location:
class OldMission:
id = 1
name = "Old Mission"
static_image = "om_square.jpg"
class TraverseCity:
id = 2
name = "Traverse City"
static_image = "tc_square.jpg"
class Leelanau:
id = 3
name = "Leelanau"
static_image = "l_square.jpg"
class Category(object):
class Base:
def __init__(self):
self.destinations = []
class Hotel(Base):
id = 1
name = "Hotels"
icon = "hotel"
class BedAndBreakfast(Base):
id = 2
name = "Bed & Breakfasts"
icon = "bandb"
class Winery(Base):
id = 3
name = "Wineries"
icon = "wineglass"
class Shop(Base):
id = 4
name = "Shopping"
icon = "shop"
class Food(Base):
id = 5
name = "Food"
icon = "food"
class Attraction(Base):
id = 6
name = "Attractions"
icon = "attraction"
def __init__(self):
self.categories = {}
def append_result(self, result):
if self.categories[result.category] is None:
pass
CATEGORY_LOOKUPS = {c.id: c for c in [Category.Hotel, Category.BedAndBreakfast, Category.Winery, Category.Shop, Category.Food, Category.Attraction]}
class Destination(models.Model):
LOCATIONS = (
(0, "All Locations"),
(Location.OldMission.id, Location.OldMission.name),
(Location.TraverseCity.id, Location.TraverseCity.name),
(Location.Leelanau.id, Location.Leelanau.name),
)
CATEGORIES = (
(0, "All Categories"),
(Category.Hotel.id, Category.Hotel.name),
(Category.BedAndBreakfast.id, Category.BedAndBreakfast.name),
(Category.Winery.id, Category.Winery.name),
(Category.Shop.id, Category.Shop.name),
(Category.Food.id, Category.Food.name),
(Category.Attraction.id, Category.Attraction.name)
)
name = models.CharField(max_length=256)
address = models.CharField(max_length=256)
location = models.SmallIntegerField(choices=LOCATIONS)
category = models.SmallIntegerField(choices=CATEGORIES)
static_image = models.CharField(max_length=128)
description = models.TextField()
url = models.URLField(null=True, blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=7, null=True, blank=True)
longitude = models.DecimalField(max_digits=10, decimal_places=7, null=True, blank=True)
def icon(self):
return CATEGORY_LOOKUPS[self.category].icon
| [
"george@circos.com"
] | george@circos.com |
0aef257107cc557b028a68b2632535b522c6d2fe | 9107d8bc72e45c0c6e7942e1abb47a2c7cee38af | /mplfinance/_version.py | 109f75a8d782b9e088db1c1d9c6c7dbf09611712 | [] | no_license | rbd72369/tradier-options-plotter | ef549067f2a8511e538dbfe966078b5314737a8a | 55125e37103de2d33d65f6d52fb7baea42a782fa | refs/heads/master | 2023-02-13T19:00:18.628193 | 2021-01-09T22:07:43 | 2021-01-09T22:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
version_info = (0, 12, 7, 'alpha', 1)
_specifier_ = {'alpha': 'a','beta': 'b','candidate': 'rc','final': ''}
__version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2],
'' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
| [
"teddy@teddyrowan.com"
] | teddy@teddyrowan.com |
c94ad6cb9b33cd88f9403e07d6b4521d00c538a1 | b2eeda79302d04e1391ffb40fdd83de53f6b54ef | /app/user/serializers.py | a652350a1a68061fbfa1f0afb9a8e322b53ab9c4 | [
"MIT"
] | permissive | Memo2704/recipe-app-api | 7c7286140ab17d5cf83d93216ffb74de9a3b19c2 | 91b441065547991a8027cbe1496aaf5795d56a49 | refs/heads/main | 2023-05-04T08:19:07.306969 | 2021-05-24T16:56:11 | 2021-05-24T16:56:11 | 334,730,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user auth object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| [
"tony.arteaga27@gmail.com"
] | tony.arteaga27@gmail.com |
b757835772edf23d3477770560bbe95cf15110f8 | bf7959048edc0005e04431a0864c719adc5ea9ea | /Contests/weekly/295/D.py | 5cdedcb03259b39f36791adb5099072be5de8d27 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | class Solution:
def minimumObstacles(self, grid) -> int:
# 0-1 bfs
m,n = len(grid), len(grid[0])
dist = [[inf]* n for _ in range(m)]
dist[0][0] = 0
q = deque([(0,0)])
while q:
x,y = q.popleft()
for dx,dy in [(1,0),(-1,0),(0,1),(0,-1)]:
if 0 <= x+dx < m and 0 <= y+dy < n:
g = grid[x][y]
if dist[x][y] + g < dist[x+dx][y+dy]:
dist[x+dx][y+dy] = dist[x][y] + g
if g == 0:
q.appendleft((x+dx,y+dy))
else:
q.append((x+dy,y+dy))
return dist[-1][-1] | [
"yohager@MBP-YHWVM7JCVJ-0141.local"
] | yohager@MBP-YHWVM7JCVJ-0141.local |
b0173056cf09e20ef265ae0bf916c84bfd972b1a | 86cd22354f2431087c9b3ff06188f071afb3eb72 | /383. Ransom Note.py | cf3b994fb40efd594dc1c7d269120b8b5583c390 | [] | no_license | tlxxzj/leetcode | 0c072a74d7e61ef4700388122f2270e46c4ac22e | 06dbf4f5b505a6a41e0d93367eedd231b611a84b | refs/heads/master | 2023-08-31T11:04:34.585532 | 2023-08-31T08:25:51 | 2023-08-31T08:25:51 | 94,386,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
letters = {}
for c in magazine:
if c in letters:
letters[c] += 1
else:
letters[c] = 1
for c in ransomNote:
if letters.get(c, 0) == 0:
return False
else:
letters[c] -= 1
return True | [
"tlxxzj@qq.com"
] | tlxxzj@qq.com |
d2c1ee090b37997f129b27b79778b792d6a6c6c6 | 923fa865d84c3760e1cd8116e1d698d245d547ac | /script_with_probs.py | cbb31f968abe7aa413e4a6def66ba850761016be | [] | no_license | joyceChu2020/files_backup | 4e489c773c0b75aa21b9a75b444aa24e0df26653 | 1c63602752529734130097a99836f278e8941611 | refs/heads/master | 2023-01-11T06:03:24.847256 | 2020-11-12T21:34:58 | 2020-11-12T21:34:58 | 263,699,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | import os
import shutil
import hashlib
inputpath = 'Y:\\hypercapnia'
outputpath = 'C:\\Users\\me\\backup\\hypercapnia\\'
file_str=['.raw', '.dcsraw', '.bin', '.txt', '.pdf', '.doc', '.docx']
for dirpath, dirnames, filenames in os.walk(inputpath):
for filen in filenames:
if os.path.splitext(filen)[1] in file_str or (filen.endswith('.mat') and filen.startswith('SC')):
structure = outputpath + '\\' + dirpath[len(inputpath):]
structure_file = structure + '\\' + filen
if os.path.exists(structure_file):
structure_file_hash = hashlib.sha256()
readSize=65556
with open(structure_file, 'rb') as f:
fb = f.read(readSize)
while len(fb)>0:
structure_file_hash.update(fb)
fb=f.read(readSize)
structure_file_id=structure_file_hash.hexdigest()
file_hash = hashlib.sha256()
with open(os.path.join(dirpath, filen), 'rb') as f:
fb = f.read(readSize)
while len(fb)>0:
file_hash.update(fb)
fb=f.read(readSize)
file_id = file_hash.hexdigest()
if structure_file_id == file_id:
next(filenames)
else:
whatToDo=input("The file you are trying to back up has the same name than the existing one.\n If you want to replace the existing file (which has different content), please type Y. \n Otherwise (where I'm only assuming you want to rename the file name), please type N \n Then press ENTER")
if whatToDo == 'Y':
os.makedirs(structure, exist_ok=True)
shutil.copy(os.path.join(dirpath, filen), structure)
if whatToDo == 'N':
nameYourFile=input('please type out a new valid file name:')
newFile=structure + '\\' + nameYourFile + '\\' + '.txt'
os.makedirs(newFile, exist_ok=True)
shutil.copyfile(os.path.join(dirpath, filen), newFile)
else:
os.makedirs(structure, exist_ok=True)
shutil.copy(os.path.join(dirpath, filen), structure)
for dirn in dirnames,
if dirn=='This' or dirn=='That':
for a, _, c in os.walk(os.path.join(path, dirn)):
for cc in c:
frompath = os.path.join(a, cc)
structure = outputpath + a[len(inputpath):]
os.makedirs(structure, exist_os=True)
shutil.copy(frompath, structure)
| [
"noreply@github.com"
] | joyceChu2020.noreply@github.com |
b307447339363ba5a9bc02068f4df4126d5e6527 | bb824786f095fbf6e6cf627ef9c04afdb5152477 | /apps/pages/migrations/0013_partnerspage.py | e96243788314b36b0fda1ef2712514fcbd92c772 | [] | no_license | Emilnurg/anas.ru | 19000063c7db98c15261056bb9590382db362d42 | 20cee1aee02da192c9c79a51bd0898c1dba0c98f | refs/heads/master | 2021-05-20T12:09:08.155749 | 2017-12-26T13:49:12 | 2017-12-26T13:49:12 | 252,287,670 | 0 | 0 | null | 2021-03-31T19:34:29 | 2020-04-01T21:10:48 | JavaScript | UTF-8 | Python | false | false | 6,865 | py | # -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.10.7 on 2017-06-08 15:27
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0012_auto_20170531_1612'),
]
operations = [
migrations.CreateModel(
name='PartnersPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Заголовок страницы')),
('title_ru', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('title_fr', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('subtitle', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('howto_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_subtitle', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_body', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_button_caption', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_ru', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_fr', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('questions_title_left', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_subtitle', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_ru', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_en', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_fr', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
],
options={
'verbose_name': 'Страница "Дилеры"',
},
),
]
| [
"wizzzet@gmail.com"
] | wizzzet@gmail.com |
d3d7772cbfdea6a07bffb594754b9505868e02e8 | 21160696434c4f0a449be88f3228bfdc02146ab8 | /RotatingWheels/ParaPostPro/Macros/Screenshots1.py | 91fc0bacc8b7a1e95bafb4cbc61a735d1bb801f4 | [] | no_license | olaafrossi/CFDRuns | 00a456bad257714643fb7c1304a1e33a3d5cce31 | 0928b4d0d77210fd2bbcd5175fce0a1a5724e3a4 | refs/heads/master | 2023-06-12T09:10:44.364992 | 2021-07-07T02:07:40 | 2021-07-07T02:07:40 | 161,521,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,487 | py | #### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [2094, 1162]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.StereoType = 0
renderView1.Background = [0.32, 0.34, 0.43]
# get layout
layout1 = GetLayout()
# place view in the layout
layout1.AssignView(0, renderView1)
# Properties modified on renderView1
renderView1.Background = [0.0, 0.0, 0.0]
# split cell
layout1.SplitVertical(0, 0.5)
# set active view
SetActiveView(None)
# Create a new 'Render View'
renderView2 = CreateView('RenderView')
renderView2.ViewSize = [2094, 565]
renderView2.AxesGrid = 'GridAxes3DActor'
renderView2.StereoType = 0
renderView2.Background = [0.32, 0.34, 0.43]
# place view in the layout
layout1.AssignView(2, renderView2)
# Properties modified on renderView2
renderView2.Background = [0.0, 0.0, 0.0]
# split cell
layout1.SplitHorizontal(2, 0.5)
# set active view
SetActiveView(None)
# Create a new 'Render View'
renderView3 = CreateView('RenderView')
renderView3.ViewSize = [1042, 565]
renderView3.AxesGrid = 'GridAxes3DActor'
renderView3.StereoType = 0
renderView3.Background = [0.32, 0.34, 0.43]
# place view in the layout
layout1.AssignView(6, renderView3)
# Properties modified on renderView3
renderView3.Background = [0.0, 0.0, 0.0]
# set active view
SetActiveView(renderView1)
# get active source.
fullCarReflect = GetActiveSource()
# set active source
SetActiveSource(fullCarReflect)
# show data in view
fullCarReflectDisplay = Show(fullCarReflect, renderView1)
# trace defaults for the display properties.
fullCarReflectDisplay.Representation = 'Surface'
fullCarReflectDisplay.ColorArrayName = [None, '']
fullCarReflectDisplay.OSPRayScaleArray = 'U'
fullCarReflectDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
fullCarReflectDisplay.SelectOrientationVectors = 'None'
fullCarReflectDisplay.ScaleFactor = 0.436339807510376
fullCarReflectDisplay.SelectScaleArray = 'None'
fullCarReflectDisplay.GlyphType = 'Arrow'
fullCarReflectDisplay.GlyphTableIndexArray = 'None'
fullCarReflectDisplay.DataAxesGrid = 'GridAxesRepresentation'
fullCarReflectDisplay.PolarAxes = 'PolarAxesRepresentation'
fullCarReflectDisplay.ScalarOpacityUnitDistance = 0.05310636881446335
fullCarReflectDisplay.SelectInputVectors = ['POINTS', 'U']
fullCarReflectDisplay.WriteLog = ''
# init the 'PiecewiseFunction' selected for 'OSPRayScaleFunction'
fullCarReflectDisplay.OSPRayScaleFunction.Points = [0.0, 0.0, 0.5, 0.0, 70.0, 1.0, 0.5, 0.0]
# reset view to fit data
renderView1.ResetCamera()
# set scalar coloring
ColorBy(fullCarReflectDisplay, ('POINTS', 'p'))
# rescale color and/or opacity maps used to include current data range
fullCarReflectDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
fullCarReflectDisplay.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'p'
pLUT = GetColorTransferFunction('p')
pLUT.LockDataRange = 1
pLUT.InterpretValuesAsCategories = 0
pLUT.ShowCategoricalColorsinDataRangeOnly = 0
pLUT.RescaleOnVisibilityChange = 0
pLUT.EnableOpacityMapping = 0
pLUT.RGBPoints = [-8334.9423828125, 0.0, 0.0, 1.0, 4306.30322265625, 1.0, 0.0, 0.0]
pLUT.UseLogScale = 0
pLUT.ColorSpace = 'HSV'
pLUT.UseBelowRangeColor = 0
pLUT.BelowRangeColor = [0.0, 0.0, 0.0]
pLUT.UseAboveRangeColor = 0
pLUT.AboveRangeColor = [1.0, 1.0, 1.0]
pLUT.NanColor = [0.498039215686, 0.498039215686, 0.498039215686]
pLUT.Discretize = 1
pLUT.NumberOfTableValues = 64
pLUT.ScalarRangeInitialized = 1.0
pLUT.HSVWrap = 0
pLUT.VectorComponent = 0
pLUT.VectorMode = 'Magnitude'
pLUT.AllowDuplicateScalars = 1
pLUT.Annotations = []
pLUT.ActiveAnnotatedValues = []
pLUT.IndexedColors = []
# Rescale transfer function
pLUT.RescaleTransferFunction(-2000.0, 1500.0)
# get opacity transfer function/opacity map for 'p'
pPWF = GetOpacityTransferFunction('p')
pPWF.Points = [-8334.9423828125, 0.0, 0.5, 0.0, 4306.30322265625, 1.0, 0.5, 0.0]
pPWF.AllowDuplicateScalars = 1
pPWF.ScalarRangeInitialized = 1
# Rescale transfer function
pPWF.RescaleTransferFunction(-2000.0, 1500.0)
# set active view
SetActiveView(renderView2)
# show data in view
fullCarReflectDisplay_1 = Show(fullCarReflect, renderView2)
# trace defaults for the display properties.
fullCarReflectDisplay_1.Representation = 'Surface'
fullCarReflectDisplay_1.ColorArrayName = [None, '']
fullCarReflectDisplay_1.OSPRayScaleArray = 'U'
fullCarReflectDisplay_1.OSPRayScaleFunction = 'PiecewiseFunction'
fullCarReflectDisplay_1.SelectOrientationVectors = 'None'
fullCarReflectDisplay_1.ScaleFactor = 0.436339807510376
fullCarReflectDisplay_1.SelectScaleArray = 'None'
fullCarReflectDisplay_1.GlyphType = 'Arrow'
fullCarReflectDisplay_1.GlyphTableIndexArray = 'None'
fullCarReflectDisplay_1.DataAxesGrid = 'GridAxesRepresentation'
fullCarReflectDisplay_1.PolarAxes = 'PolarAxesRepresentation'
fullCarReflectDisplay_1.ScalarOpacityUnitDistance = 0.05310636881446335
fullCarReflectDisplay_1.SelectInputVectors = ['POINTS', 'U']
fullCarReflectDisplay_1.WriteLog = ''
# init the 'PiecewiseFunction' selected for 'OSPRayScaleFunction'
fullCarReflectDisplay_1.OSPRayScaleFunction.Points = [0.0, 0.0, 0.5, 0.0, 70.0, 1.0, 0.5, 0.0]
# reset view to fit data
renderView2.ResetCamera()
# reset view to fit data
renderView2.ResetCamera()
# set scalar coloring
ColorBy(fullCarReflectDisplay_1, ('POINTS', 'p'))
# rescale color and/or opacity maps used to include current data range
fullCarReflectDisplay_1.RescaleTransferFunction(-2000.0, 1000.0)
# show color bar/color legend
fullCarReflectDisplay_1.SetScalarBarVisibility(renderView2, True)
# set active view
SetActiveView(renderView3)
# show data in view
fullCarReflectDisplay_2 = Show(fullCarReflect, renderView3)
# trace defaults for the display properties.
fullCarReflectDisplay_2.Representation = 'Surface'
fullCarReflectDisplay_2.ColorArrayName = [None, '']
fullCarReflectDisplay_2.OSPRayScaleArray = 'U'
fullCarReflectDisplay_2.OSPRayScaleFunction = 'PiecewiseFunction'
fullCarReflectDisplay_2.SelectOrientationVectors = 'None'
fullCarReflectDisplay_2.ScaleFactor = 0.436339807510376
fullCarReflectDisplay_2.SelectScaleArray = 'None'
fullCarReflectDisplay_2.GlyphType = 'Arrow'
fullCarReflectDisplay_2.GlyphTableIndexArray = 'None'
fullCarReflectDisplay_2.DataAxesGrid = 'GridAxesRepresentation'
fullCarReflectDisplay_2.PolarAxes = 'PolarAxesRepresentation'
fullCarReflectDisplay_2.ScalarOpacityUnitDistance = 0.05310636881446335
fullCarReflectDisplay_2.SelectInputVectors = ['POINTS', 'U']
fullCarReflectDisplay_2.WriteLog = ''
# init the 'PiecewiseFunction' selected for 'OSPRayScaleFunction'
fullCarReflectDisplay_2.OSPRayScaleFunction.Points = [0.0, 0.0, 0.5, 0.0, 70.0, 1.0, 0.5, 0.0]
# reset view to fit data
renderView3.ResetCamera()
# set scalar coloring
ColorBy(fullCarReflectDisplay_2, ('POINTS', 'p'))
# rescale color and/or opacity maps used to include current data range
fullCarReflectDisplay_1.RescaleTransferFunction(-2000.0, 1000.0)
# show color bar/color legend
fullCarReflectDisplay_2.SetScalarBarVisibility(renderView3, True)
# set active view
SetActiveView(renderView2)
# get color legend/bar for pLUT in view renderView2
pLUTColorBar = GetScalarBar(pLUT, renderView2)
pLUTColorBar.Title = 'p'
pLUTColorBar.ComponentTitle = ''
pLUTColorBar.RangeLabelFormat = '%-#6.1f'
# change scalar bar placement
pLUTColorBar.WindowLocation = 'AnyLocation'
pLUTColorBar.Position = [0.8945349952061361, 0.03362831858407078]
# change scalar bar placement
pLUTColorBar.ScalarBarLength = 0.3388495575221239
# Properties modified on pLUTColorBar
pLUTColorBar.Title = 'Pressure'
pLUTColorBar.ComponentTitle = '[Pa]'
pLUTColorBar.ScalarBarLength = 0.338849557522124
# set active view
SetActiveView(renderView3)
# get color legend/bar for pLUT in view renderView3
pLUTColorBar_1 = GetScalarBar(pLUT, renderView3)
pLUTColorBar_1.Title = 'p'
pLUTColorBar_1.ComponentTitle = ''
pLUTColorBar_1.RangeLabelFormat = '%-#6.1f'
# Properties modified on pLUTColorBar_1
pLUTColorBar_1.Title = 'Pressure'
pLUTColorBar_1.ComponentTitle = '[Pa]'
# set active view
SetActiveView(renderView1)
# get color legend/bar for pLUT in view renderView1
pLUTColorBar_2 = GetScalarBar(pLUT, renderView1)
pLUTColorBar_2.Title = 'p'
pLUTColorBar_2.ComponentTitle = ''
pLUTColorBar_2.RangeLabelFormat = '%-#6.1f'
# Properties modified on pLUTColorBar_2
pLUTColorBar_2.Title = 'Pressure'
pLUTColorBar_2.ComponentTitle = '[Pa]'
# current camera placement for renderView1
renderView1.CameraPosition = [-3.80018015426134, -2.58818131712464, 1.25806138237725]
renderView1.CameraFocalPoint = [2.77871843344151, 1.96208410343195, -0.134033844665608]
renderView1.CameraViewUp = [0.14309214267827705, 0.09451836618606432, 0.9851857272399234]
renderView1.CameraParallelScale = 2.54276280729466
# current camera placement for renderView3
renderView3.CameraPosition = [-0.025547266006469727, 0.0, 8.286780353439246]
renderView3.CameraFocalPoint = [-0.025547266006469727, 0.0, -1.5377007093335786]
renderView3.CameraParallelScale = 2.5427628072946633
# current camera placement for renderView2
renderView2.CameraPosition = [-5.1369606235715475, 0.0, 0.7675079703330994]
renderView2.CameraFocalPoint = [4.687520439201278, 0.0, 0.7675079703330994]
renderView2.CameraViewUp = [0.0, 0.0, 1.0]
renderView2.CameraParallelScale = 2.5427628072946633
# save screenshot ********************************************************************************************
SaveScreenshot('/home/olaaf/FOAM/GITCases/MasterCase/ParaPostPro/Press1.png', layout1, SaveAllViews=1,
ImageResolution=[3840, 2160])
# current camera placement for renderView1
renderView1.CameraPosition = [-3.5128990775510425, -2.930115880275356, 0.9427178483607985]
renderView1.CameraFocalPoint = [2.641117052980626, 2.298205640384767, 0.09566962794755436]
renderView1.CameraViewUp = [0.09211061201414687, 0.05267769806032644, 0.9943544113048649]
renderView1.CameraParallelScale = 2.54276280729466
# current camera placement for renderView3
renderView3.CameraPosition = [-0.025547266006469727, 0.0, -5.9307311843986215]
renderView3.CameraFocalPoint = [-0.025547266006469727, 0.0, 3.893749878374207]
renderView3.CameraParallelScale = 2.5427628072946633
# current camera placement for renderView2
renderView2.CameraPosition = [-0.025547266006469727, 0.0, 7.592059929430105]
renderView2.CameraFocalPoint = [-0.025547266006469727, 0.0, -2.2324211333427137]
renderView2.CameraParallelScale = 2.5427628072946633
# save screenshot ********************************************************************************************
SaveScreenshot('/home/olaaf/FOAM/GITCases/MasterCase/ParaPostPro/Press2.png', layout1, SaveAllViews=1,
ImageResolution=[3840, 2160])
# current camera placement for renderView1
renderView1.CameraPosition = [4.004615049832183, -1.800267050337295, -0.007647342275201474]
renderView1.CameraFocalPoint = [-3.6402954829156675, 3.255249649586947, 3.5303636802975724]
renderView1.CameraViewUp = [0.3804147967519827, -0.07009382934986254, 0.9221558639943792]
renderView1.CameraParallelScale = 2.54276280729466
# current camera placement for renderView3
renderView3.CameraPosition = [-0.025547266006469727, 6.20627329884169, 0.7675079703330994]
renderView3.CameraFocalPoint = [-0.025547266006469727, -3.618207763931148, 0.7675079703330994]
renderView3.CameraViewUp = [0.0, 0.0, 1.0]
renderView3.CameraParallelScale = 2.5427628072946633
# current camera placement for renderView2
renderView2.CameraPosition = [5.37274847361473, 0.0, 0.7675079703330994]
renderView2.CameraFocalPoint = [-4.451732589158093, 0.0, 0.7675079703330994]
renderView2.CameraViewUp = [0.0, 0.0, 1.0]
renderView2.CameraParallelScale = 2.5427628072946633
# save screenshot ********************************************************************************************
SaveScreenshot('/home/olaaf/FOAM/GITCases/MasterCase/ParaPostPro/Press3.png', layout1, SaveAllViews=1,
ImageResolution=[3840, 2160])
# current camera placement for renderView1
renderView1.CameraPosition = [4.382096544598962, -2.4888179095502325, 2.3422714558002866]
renderView1.CameraFocalPoint = [-4.13191240983388, 4.334493803053219, -2.3767991788657588]
renderView1.CameraViewUp = [-0.22485613270125082, 0.3486982591614691, 0.9098622113509268]
renderView1.CameraParallelScale = 2.54276280729466
# current camera placement for renderView3
renderView3.CameraPosition = [-0.025547266006469727, 6.20627329884169, 0.7675079703330994]
renderView3.CameraFocalPoint = [-0.025547266006469727, -3.618207763931148, 0.7675079703330994]
renderView3.CameraViewUp = [0.0, 0.0, 1.0]
renderView3.CameraParallelScale = 2.5427628072946633
# current camera placement for renderView2
renderView2.CameraPosition = [5.37274847361473, 0.0, 0.7675079703330994]
renderView2.CameraFocalPoint = [-4.451732589158093, 0.0, 0.7675079703330994]
renderView2.CameraViewUp = [0.0, 0.0, 1.0]
renderView2.CameraParallelScale = 2.5427628072946633
# save screenshot ********************************************************************************************
SaveScreenshot('/home/olaaf/FOAM/GITCases/MasterCase/ParaPostPro/Press4.png', layout1, SaveAllViews=1,
ImageResolution=[3840, 2160])
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
##Shot | [
"olaaf.rossi@gmail.com"
] | olaaf.rossi@gmail.com |
71c44b270f1029386b8c8079cc4f51467a806a60 | 8690ca0028c54b62d68badf1753fc6151ae03525 | /Part3 Levels of Aggregation/esem_data/Act/tpot_mnist_pipeline_triangulateAggregationLevelParticipantSplitaggr_5_groups7.py | d83ecdf48b217bda826b409cdf96307576b5488a | [] | no_license | brains-on-code/conducting-and-analyzing-human-studies | fd74ee77fdc56cc61bdc1e0cf9bf423780f5dddc | 548e7443f4d2bdb2db1f2858289b7d3518593c59 | refs/heads/master | 2021-06-26T21:30:56.386121 | 2020-12-22T13:49:16 | 2020-12-22T13:49:16 | 195,975,817 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:1.0
exported_pipeline = make_pipeline(
StandardScaler(),
LogisticRegression(C=0.1, dual=False, penalty="l2")
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"fmriml@fmriml.com"
] | fmriml@fmriml.com |
777f5a7ae56cf785c70587fe44546dc3acddc9d1 | 763384b199bc62b8e8bc941b4fd9b070813092ed | /EightPuzzle/successor.py | deffdf34637bb3dfe1e17bb2599e8cb740ac3b80 | [] | no_license | bungerm/portfolio | 56c464d546527207aab5052c98fead03e241f8e5 | a4fc3c97f43a15791e8ba51863ea8d0652d5fc30 | refs/heads/main | 2023-07-27T20:16:23.395022 | 2021-09-10T14:54:00 | 2021-09-10T14:54:00 | 326,769,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | # Successor Function
# CSC 480 HW1 - Search
#
# Description: The Successor function for the Modified Eight Puzzle.
# Takes a Node and exapnds it, returning an array of child nodes.
#
# See main.py for further details
from node import Node
def GetSuccessors(n):
successors = []
currentState = n.state
up = True
down = True
left = True
right = True
# Check where the blank tile can move
for i in range(0,9):
if currentState[i] == 0:
break
if i < 3:
up = False
if i > 5:
down = False
if i == 0 or i == 3 or i == 6:
left = False
if i == 2 or i == 5 or i == 8:
right = False
# Create the child nodes based of where the blank tile can move
if up:
newState = currentState[:]
newState[i] = newState[i-3]
newState[i-3] = 0
successors.append(Node(newState, n, "UP", newState[i]))
if left:
newState = currentState[:]
newState[i] = newState[i-1]
newState[i-1] = 0
successors.append(Node(newState, n, "LEFT", newState[i]))
if down:
newState = currentState[:]
newState[i] = newState[i+3]
newState[i+3] = 0
successors.append(Node(newState, n, "DOWN", newState[i]))
if right:
newState = currentState[:]
newState[i] = newState[i+1]
newState[i+1] = 0
successors.append(Node(newState, n, "RIGHT", newState[i]))
return successors | [
"bungerm42@gmail.com"
] | bungerm42@gmail.com |
cd5bdfb1fea846237b9755b1fbbca43280bebf1a | 747df35379e741159955673f0eb02672f79fb863 | /data_loader_module.py | 62b68e7d2f39c862edb642f6751d11af94bd514d | [] | no_license | sharax/ECE_143_Group_11 | 782f11dfc28af8959e89d788a554429254f2b369 | a0e20b0eca537d7e59d326225722bb08d851586b | refs/heads/master | 2020-04-21T11:12:28.138124 | 2019-03-16T05:14:58 | 2019-03-16T05:14:58 | 169,515,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py |
# coding: utf-8
# # Data Loader
#
# Module for loading the .csv file that contains the player attributes into a pandas dataframe
# In[1]:
def load_data():
'''
Loads the player_attributes.csv file into a pandas dataframe and returns the dataframe
Returns:
player_attr_dataframe (pd.DataFrame) : The player attributes dataframe
'''
import pandas as pd
player_attr_dataframe = pd.read_csv('player_attributes.csv', index_col=[0])
return player_attr_dataframe
| [
"noreply@github.com"
] | sharax.noreply@github.com |
7de2a615a19cb1d00955dde84742ec07a817d152 | 1e7d44d4de59c8588f932897efa68168bf57338e | /FSD/uint64.py | b162930bd6ce6fa71d79077bffa224ebe22af851 | [] | no_license | SkyToGround/SimplePythonKafkaConsumer | 413993436753f740d0906c3406c4e09962039403 | 7f180a5c5413d6bfe1d5aa89c482d9ed66945ffd | refs/heads/master | 2021-01-09T05:40:37.768416 | 2017-03-17T16:08:23 | 2017-03-17T16:08:23 | 80,808,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FSD
import flatbuffers
class uint64(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsuint64(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = uint64()
x.Init(buf, n + offset)
return x
# uint64
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# uint64
def Value(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# uint64
def ValueLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
def uint64Start(builder): builder.StartObject(1)
def uint64AddValue(builder, value): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def uint64StartValueVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def uint64End(builder): return builder.EndObject()
| [
"jonas.nilsson@esss.se"
] | jonas.nilsson@esss.se |
e9a8837c8a88cc74bd105f491189015ff3bc4815 | 5218b6c657a9ce92ca3a6ba330cc5fcf38f7ebd8 | /listing2-1.py | e706f76773ad0a260d510324991948d6ce16fe4d | [] | no_license | Sterlingclark/python_project | 918a307ea7b9e1bd8a6fd03487d953f2f0649650 | ee1775affc68ae09e4829f5b9c2a63bde08541e2 | refs/heads/master | 2022-07-30T20:52:39.891049 | 2020-05-23T17:47:22 | 2020-05-23T17:47:22 | 261,255,786 | 1 | 0 | null | 2020-05-20T23:30:43 | 2020-05-04T17:48:21 | Python | UTF-8 | Python | false | false | 174 | py | room_map = [[1, 0, 0, 0, 0],
[0, 0, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 3, 0, 0, 0],
[0, 0, 0, 0, 4]
]
print(room_map)
| [
"sterlingdclark13@gmail.com"
] | sterlingdclark13@gmail.com |
9b8b6f9bda493cd1e8800f462021606cf91863d6 | 641f76328bfeb7e54f0793a18c5b7c00595b98fd | /apps/sms/serializers.py | 4d653e2794abcb0dbb3ce297e9d2919f02b8d8f4 | [
"Apache-2.0"
] | permissive | lianxiaopang/camel-store-api | 1d16060af92eb01607757c0423377a8c94c3a726 | b8021250bf3d8cf7adc566deebdba55225148316 | refs/heads/master | 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 | Apache-2.0 | 2020-02-07T14:28:35 | 2020-02-06T06:17:47 | Python | UTF-8 | Python | false | false | 207 | py | from rest_framework import serializers
from .models import SmsRecord
class SmsRecordSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmsRecord
fields = '__all__' | [
"lyh@gzqichang.com"
] | lyh@gzqichang.com |
085504951890593b601aa0c70e56a87877e7ca10 | ae6cc0597c73d63885e0f54a4cbe9a16142e824e | /django_projects/mysite/ads/migrations/0003_auto_20210130_0000.py | fdbcd6c992bd8ad0d639ec2fc204cc84c1011801 | [] | no_license | fedeginer/paw | 4e97ee13baffb64f9c75caa7beffb02b468f6842 | 90453729931079c3d893c51ac614fa6c7cc5f7ee | refs/heads/master | 2023-06-09T03:59:57.560543 | 2021-06-21T17:58:40 | 2021-06-21T17:58:40 | 379,011,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | # Generated by Django 3.1.4 on 2021-01-30 00:00
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ads', '0002_auto_20210129_1725'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(validators=[django.core.validators.MinLengthValidator(3, 'Comment must be greater than 3 characters')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('ad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ads.ad')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='ad',
name='comments',
field=models.ManyToManyField(related_name='comments_owned', through='ads.Comment', to=settings.AUTH_USER_MODEL),
),
]
| [
"fedeginer@alu.comillas.edu"
] | fedeginer@alu.comillas.edu |
ccf6506cd94b33554464a83feb3802c05dd8b2a6 | 78d5ed168b813886fcd4516c90f122ae54f793db | /Leaning/NLP_project/BiLSTM-CRF命名实体识别/Batch.py | ffd48ef8749c2292bc494152a40466379433be95 | [] | no_license | LiaoBoWen/MyProject | 9919272c323d33a435eaa9f989ac73c058410e5b | 648fa01a0f0be1ace4b4b233822ccd2107ab0897 | refs/heads/master | 2020-07-21T07:55:29.301575 | 2019-11-02T03:28:19 | 2019-11-02T03:28:19 | 206,782,221 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | import numpy as np
class BatchGenerator:
def __init__(self,X,y,shuffle=False):
if not isinstance(X,np.ndarray):
X = np.asarray(X) # 当原X发生改变,X的输出也会跟着变
if not isinstance(y,np.ndarray):
y = np.asarray(y)
self._x = X
self._y = y
self._epochs_completed = 0
self._index_in_epoch = 0
self._number_examples = self._x.shape[0]
self._shuffle = shuffle
if self._shuffle:
new_index = np.random.permutation(self._number_examples)
self._x = self._x[new_index]
self._y = self._y[new_index]
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def num_examples(self):
return self._number_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self,batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._number_examples:
self._epochs_completed += 1
if self._shuffle :
new_index = np.random.permutation(self._number_examples)
self._x = self._x[new_index]
self._y = self._y[new_index]
start = 0
self._index_in_epoch =batch_size
assert batch_size <= self._number_examples
end = self._index_in_epoch
return self._x[start:end], self._y[start:end] | [
"1506349457@qq.com"
] | 1506349457@qq.com |
6114876cf3210e58b52e230be83c6c00913d2780 | 8393379be9ffd1ad803e401b846e143865cc0af3 | /Python/UseCase/列表表达式.py | 1c9f403e2f43142dd9a4f8dfebcc9cafe7d70aa6 | [] | no_license | weepwood/CodeBase | 406ce4195ede8bebc08a2c53e2a4a85e2e418e4e | a22bab83706d8c64941fe2ccd1b07f83900cf255 | refs/heads/master | 2023-07-15T22:11:27.387335 | 2021-08-25T03:09:57 | 2021-08-25T03:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | fruit = ["apple", "pear", "orange", "banana"]
# 全部替换为大写
for i in range(len(fruit)):
fruit[i] = fruit[i].upper()
print(fruit)
# 使用列表表达式全部替换为小写
fruit = [x.lower() for x in fruit]
print(fruit)
# 过滤首字母为 a 的字符串
filtered_friut = []
for f in fruit:
if f.startswith("a"):
filtered_friut.append(f)
print(filtered_friut)
# 使用列表表达式改写
filtered_friut2 = [x for x in fruit if x.startswith("b")]
print(filtered_friut2)
| [
"1379978893@qq.com"
] | 1379978893@qq.com |
b7750a59ddd97731a3db15d7ff43b162bbc4a22b | b9ca99a0244e5d5a07e0b27be8192ad01c4eda6c | /EIP/EIP系统所有流程.py | fad26b441598bfd879662694afe6d03b21d1d15c | [] | no_license | Boomshakal/spider | c3fdbf18f874ec9953509e4ce984b5476d25839f | e6779a3961f48325dd4992d88f88b8b3938225d7 | refs/heads/master | 2021-06-17T06:22:19.679444 | 2021-03-05T06:33:36 | 2021-03-05T06:33:36 | 154,489,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | import requests
import json
from lxml import etree
url="http://eip.megmeet.com:8008/km/review/km_review_index/kmReviewIndex.do?"
maxpage=5
headers={
"Cookie": "j_lang=zh-CN; JSESSIONID=40ABBC9A619C5860068184B1E339BC4D",
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
def get_onepage(page):
data = {
"method": "list",
"q.mydoc": "all",
"q.j_path": "/listAll",
"q.s_raq": "0.2347883935715236",
"pageno": page,
"rowsize": "30",
"orderby": "docCreateTime",
"ordertype": "down",
"s_ajax": "true"
}
text=requests.get(url,headers=headers,params=data).text
#print(type(text),text)
jsons=json.loads(text)
results=jsons.get('datas')
for result in results:
html=result[1]['value']
html=etree.HTML(html)
title=html.xpath('//span/text()')
print(title)
#print(html)
if __name__ == '__main__':
for page in range(1,maxpage+1):
get_onepage(page)
print("第{0}页加载完成!".format(page))
| [
"362169885@qq.com"
] | 362169885@qq.com |
807a3b762ecbaf87fb4b164464a47904c275b9eb | ebc52e685c7f2dda75e31d7d46baffd9f38e31d4 | /main.py | 910a00191bf5bc16f910031181b33666a0e0aad2 | [] | no_license | andreeabea/BrainTumorDetection | 0209cf75f04206ff13f9ed2775e25bd7fc4b2cb3 | c86906d622a0c97804f6a578c32413683f0e764d | refs/heads/master | 2022-08-21T03:49:14.642670 | 2020-05-25T20:40:34 | 2020-05-25T20:40:34 | 257,714,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | import unicodedata
import easygui as easygui
from imutils import paths
from sklearn.metrics import classification_report
import augment_dataset
import config
import cv2
import numpy as np
from init import testGen, totalTest, BS, trainGen
from tensorflow.keras.models import load_model
from plot_model import plotConfusionMatrix
new_model = load_model('models/BestModel')
new_model.build((None, 75, 75, 3))
# check model architecture
new_model.summary()
# evaluate the restored model
print("Evaluation on validation set:")
loss, acc = new_model.evaluate(testGen, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = new_model.predict_generator(testGen, steps=(totalTest // BS) + 1)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testGen.classes, predIdxs, target_names=testGen.class_indices.keys()))
# plotConfusionMatrix(testGen, predIdxs)
"""
Initial version. Making predictions on all test images.
"""
# images = list(paths.list_images(config.TEST_PATH))
#
# imgs = []
#
# for image in images:
# img = cv2.imread(image)
# img = cv2.resize(img, (75, 75))
# imgs.append(img)
#
# imgs = np.array(imgs)
# imgs = imgs/255
# predictions = new_model.predict(imgs)
uni_img = easygui.fileopenbox()
while uni_img:
img_path = unicodedata.normalize('NFKD', uni_img).encode('ascii', 'ignore')
img_path = img_path.decode('utf-8')
# read input image from gui
img = cv2.imread(img_path)
cv2.imshow("Brain MRI image", img)
cv2.waitKey()
# preprocess input image
preprocessedImg = augment_dataset.extractBrain(img)
cv2.imshow("Preprocessed image", preprocessedImg)
cv2.waitKey()
preprocessedImg = cv2.resize(preprocessedImg, (75, 75))
imgs = []
imgs.append(preprocessedImg)
imgs = np.array(imgs)
imgs = imgs / 255
predictions = new_model.predict(imgs)
#for i in range(0, len(imgs)):
print(predictions[0])
# if the probability of the tumor to exist is greater than the probability of not
if predictions[0][0] < predictions[0][1]:
print("yes")
else:
print("no")
uni_img = easygui.fileopenbox()
| [
"andreea.beatrice2@yahoo.com"
] | andreea.beatrice2@yahoo.com |
c3e2f95860b1519ebb3718386bc321193130c0f1 | 507c0e2e30273e208a7a1dba9b95952f03421823 | /hello-robot/src/CounterLibrary.py | f6a4a47da152813be93d1220e875098091021bf3 | [] | no_license | emeraldgcube/ohtu-2021-2ndrepo | 1fa82d71184266780367c65ead683d4b1dc90fdf | c85e14d762f623c9a20f51461752640e211ceb6b | refs/heads/main | 2023-04-11T01:25:07.695605 | 2021-04-28T20:31:56 | 2021-04-28T20:31:56 | 346,433,084 | 0 | 0 | null | 2021-04-25T15:56:01 | 2021-03-10T17:11:44 | Python | UTF-8 | Python | false | false | 571 | py | from counter import Counter
class CounterLibrary:
def __init__(self):
self._counter = Counter()
def increase_counter(self):
self._counter.increase()
def increment_counter_by(self, amount):
int_amount = int(amount)
self._counter.increment(int_amount)
def reset_counter(self):
self._counter.reset()
def counter_value_should_be(self, expected):
int_expected = int(expected)
if self._counter.value != int_expected:
raise AssertionError(f"{self._counter.value} != {int_expected}")
| [
"parssinentopi@gmail.com"
] | parssinentopi@gmail.com |
5692e1461442776306fc415db227d24cb23bc350 | e89693a2906534fa4a9d180b404cb96751302e8c | /Timing_Panel.py | 40755d697cc5406b4f8fed9f34ce2b8208e85f42 | [] | permissive | friedrich-schotte/Lauecollect | e278e00692d109e98450c27502986673bf59db6a | acfc5afe34b4df5891a0f8186b8df76625afb51d | refs/heads/master | 2022-09-12T02:59:04.363963 | 2022-08-18T16:39:01 | 2022-08-18T16:39:01 | 186,062,944 | 0 | 2 | MIT | 2020-07-20T02:04:07 | 2019-05-10T22:42:26 | Python | UTF-8 | Python | false | false | 11,389 | py | #!/usr/bin/env python
"""
Graphical User Interface for FPGA Timing System.
Author: Friedrich Schotte
Date created: 2015-05-27
Date last modified: 2022-07-31
Revision comment: Cleanup: self.timing_system.sequencer, self.timing_system.composer
"""
__version__ = "7.3.3"
from logging import warning
from traceback import format_exc
from Panel_3 import BasePanel
from reference import reference
class Timing_Panel(BasePanel):
"""Control Panel for FPGA Timing System"""
from monitored_property import monitored_property
timing_system_name = "BioCARS"
def __init__(self, timing_system_name=None):
if timing_system_name is not None:
self.timing_system_name = timing_system_name
BasePanel.__init__(self)
icon = "Timing System"
@monitored_property
def title(self):
return "Timing System [%s]" % self.timing_system_name
@property
def name(self):
return "Timing_Panel.%s" % self.timing_system_name
label_width = 190
@property
def parameters(self):
return [
[("Delay", self.timing_system.composer, "delay", "time"), {"choices": self.delay_choices}],
[("Nom. Delay", self.timing_system.composer, "nom_delay", "time"), {"choices": self.delay_choices}],
[("Mode", self.timing_system.composer, "mode", "str"), {"choices_reference": reference(self.timing_system.composer, "modes")}],
[("Period [1-kHz cycles]", self.timing_system.composer, "trigger_period_in_1kHz_cycles", "int"), {}],
[("Detector", self.timing_system.composer, "xdet_on", "Off/On"), {}],
[("Pump (laser)", self.timing_system.composer, "laser_on", "Off/On"), {}],
[("Probe (X-Ray)", self.timing_system.composer, "ms_on", "Off/On"), {}],
[("Trans", self.timing_system.composer, "trans_on", "Off/On"), {}],
[("Circulate", self.timing_system.composer, "pump_on", "Off/On"), {}],
[("Trigger code", self.timing_system.composer, "transc", "binary"), {}],
[("Image number", self.timing_system.registers.image_number, "count", "int"), {}],
[("X-ray detector trigger count", self.timing_system.channels.xdet.trig_count, "count", "int"), {}],
[("X-ray detector acquisition count", self.timing_system.channels.xdet.acq_count, "count", "int"), {}],
[("X-ray scope trigger count", self.timing_system.channels.xosct.trig_count, "count", "int"), {}],
[("X-ray scope acquisition count", self.timing_system.channels.xosct.acq_count, "count", "int"), {}],
[("Laser scope trigger count", self.timing_system.channels.losct.trig_count, "count", "int"), {}],
[("Laser scope acquisition count", self.timing_system.channels.losct.acq_count, "count", "int"), {}],
[("Pass number", self.timing_system.registers.pass_number, "count", "int"), {}],
[("Pulses", self.timing_system.registers.pulses, "count", "int"), {}],
[("Image number increment", self.timing_system.composer, "image_number_inc", "Off/On"), {}],
[("Pass number increment", self.timing_system.composer, "pass_number_inc", "Off/On"), {}],
[("Queue active", self.timing_system.sequencer, "queue_active", "Not Active/Active"), {}],
[("Acquiring", self.timing_system.sequencer, "acquiring", "Idle/Acquiring"), {}],
[("Current queue length [seq]", self.timing_system.sequencer, "current_queue_length", "int"), {}],
[("Current queue sequence cnt", self.timing_system.sequencer, "current_queue_sequence_count", "int"), {}],
[("Current queue repeat count", self.timing_system.sequencer, "current_queue_repeat_count", "int"), {}],
[("Current queue max repeat", self.timing_system.sequencer, "current_queue_max_repeat_count", "int"), {}],
[("Default queue name", self.timing_system.sequencer, "default_queue_name", "str"), {"choices": self.queue_choices}],
[("Current queue name", self.timing_system.sequencer, "current_queue_name", "str"), {"choices": self.queue_choices}],
[("Next queue name", self.timing_system.sequencer, "next_queue_name", "str"), {"choices": self.queue_choices}],
[("Next queue sequence cnt", self.timing_system.sequencer, "next_queue_sequence_count", "int"), {}],
[("Queue length [sequences]", self.timing_system.sequencer, "queue_length", "int"), {}],
[("Queue sequence count", self.timing_system.sequencer, "queue_sequence_count", "int"), {}],
[("Queue repeat count", self.timing_system.sequencer, "queue_repeat_count", "int"), {}],
[("Queue max repeat count", self.timing_system.sequencer, "queue_max_repeat_count", "int"), {}],
[("Cache", self.timing_system.sequencer, "cache_enabled", "Disabled/Caching"), {}],
[("Generating Packets", self.timing_system.acquisition, "generating_packets", "Idle/Generating"), {}],
[("Updating Queues", self.timing_system.sequencer, "update_queues", "Idle/Updating"), {}],
[("Packets generated", self.timing_system.sequencer, "cache_size", "int"), {}],
[("Packets loaded", self.timing_system.sequencer, "remote_cache_size", "int"), {}],
[("Sequencer Configured", self.timing_system.sequencer, "configured", "Not Configured/Configured"), {}],
[("Sequencer Running", self.timing_system.sequencer, "running", "Stopped/Running"), {}],
[("Sequence generator", self.timing_system.composer, "generator", "str"), {"read_only": True}],
[("Sequence generator version", self.timing_system.composer, "generator_version", "str"), {"read_only": True}],
[("Timing sequence version", self.timing_system.composer, "timing_sequence_version", "str"), {"read_only": True}],
[("Heatload chopper phase", self.timing_system.registers.hlcnd, "value", "time.6"),
{"choices": self.hlc_choices}],
[("Heatload chop. act. phase", self.timing_system.registers.hlcad, "value", "time.6"),
{"choices": self.hlc_choices}],
[("High-speed chopper phase", self.timing_system.channels.hsc.delay, "value", "time.4"),
{"choices": self.hsc_choices}],
[("P0 shift", self.timing_system.p0_shift, "value", "time.4"), {}],
[("X-ray delay", self.timing_system.composer, "xd", "time.6"), {}],
]
standard_view = [
"Delay",
"Mode",
"Pump (laser)",
"Acquiring",
"Sequencer Running",
]
@property
def application_buttons(self):
from Panel_3 import Application_Button
from application import application
return [
Application_Button(
"Channels...",
application(f"{self.domain_name}.Timing_Channel_Configuration_Panel.Timing_Channel_Configuration_Panel('{self.domain_name}')")
),
Application_Button(
"Clock...",
application(f"{self.domain_name}.Timing_Clock_Configuration_Panel.Timing_Clock_Configuration_Panel('{self.domain_name}')")
),
Application_Button(
"Sequence...",
application(f"{self.domain_name}.Configuration_Table_Panel.Configuration_Table_Panel('{self.domain_name}.sequence_modes')")
),
Application_Button(
"PP Modes...",
application(f"{self.domain_name}.Configuration_Table_Panel.Configuration_Table_Panel('{self.domain_name}.timing_modes')")
),
]
@property
def application_menu_items(self):
from Panel_3 import Application_Menu_Item
from application import application
return [
Application_Menu_Item(
"Setup...",
application(f"{self.domain_name}.Timing_Setup_Panel.Timing_Setup_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"Channel Configuration...",
application(f"{self.domain_name}.Timing_Channel_Configuration_Panel.Timing_Channel_Configuration_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"Calibration...",
application(f"{self.domain_name}.Timing_Calibration_Panel.Timing_Calibration_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"Clock Configuration...",
application(f"{self.domain_name}.Timing_Clock_Configuration_Panel.Timing_Clock_Configuration_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"PP Modes...",
application(f"{self.domain_name}.Configuration_Table_Panel.Configuration_Table_Panel('{self.domain_name}.timing_modes')")
),
Application_Menu_Item(
"Sequence Modes...",
application(f"{self.domain_name}.Configuration_Table_Panel.Configuration_Table_Panel('{self.domain_name}.sequence_modes')")
),
Application_Menu_Item(
"Configuration...",
application(f"{self.domain_name}.Timing_Configuration_Panel.Timing_Configuration_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"Delay Scan...",
application(f"{self.domain_name}.Timing_System_Delay_Scan_Panel.Timing_System_Delay_Scan_Panel('{self.domain_name}')")
),
Application_Menu_Item(
"Laser On Scan...",
application(f"{self.domain_name}.Timing_System_Laser_On_Scan_Panel.Timing_System_Laser_On_Scan_Panel('{self.domain_name}')")
),
]
@property
def timing_system(self):
from timing_system_client import timing_system_client
return timing_system_client(self.timing_system_name)
@property
def domain_name(self):
return self.timing_system_name
@property
def delay_choices(self):
from numpy import concatenate, arange
choices = concatenate(([-100e-12, 0], 10 ** (arange(-10, 1, 1.0))))
return choices
queue_choices = ["queue1", "queue2", "queue", ""]
@property
def hlc_choices(self):
choices = []
from numpy import arange, finfo
eps = finfo(float).eps
hsct = self.timing_system.hsct
try:
choices = arange(-12 * hsct, +12 * hsct + eps, hsct)
except ValueError:
warning(format_exc())
return choices
@property
def hsc_choices(self):
choices = []
from numpy import arange, finfo
eps = finfo(float).eps
P0t = self.timing_system.P0t
try:
choices = arange(-12 * P0t / 24, 12 * P0t / 24 + eps, P0t / 24)
except ValueError:
warning(format_exc())
return choices
if __name__ == '__main__':
timing_system_name = "BioCARS"
# timing_system_name = "LaserLab"
# timing_system_name = "TestBench"
msg_format = "%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s"
from redirect import redirect
redirect("%s.Timing_Panel" % timing_system_name, format=msg_format)
import wx
app = wx.GetApp() if wx.GetApp() else wx.App()
self = Timing_Panel(timing_system_name)
app.MainLoop()
| [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
9830e776c8a42b181f01171abd5a4f8c4c70daff | 28d82ed890654afac17c1342d24f12676685624b | /ask/migrations/0010_universalquestionlike.py | 20f8480f6ae30dbfd2df43b74d678c52a5d26603 | [] | no_license | Kudusov/AskKudusov | dd6607c3d93f888577f616edf710122fd5b98094 | 8ef18b2f44e83aa9f0ad0fe330b740a9f5338790 | refs/heads/master | 2021-10-09T10:55:15.962534 | 2018-12-26T20:20:36 | 2018-12-26T20:20:36 | 112,365,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-14 01:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ask', '0009_universalquestion'),
]
operations = [
migrations.CreateModel(
name='UniversalQuestionLike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.SmallIntegerField(default=1)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ask.Profile')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ask.UniversalQuestion')),
],
),
]
| [
"qudusov98@mail.ru"
] | qudusov98@mail.ru |
14daa40d939878f3b3a2561eb401d6d5569a7094 | b7f2fd7651d264e2846b7425570f62e8530fb398 | /GifBiscuit/settings.py | 7e436102fa3253ffb863e39f6000873dbcbb28e8 | [] | no_license | charlierm/GifBiscuit | 468d4585fcb46dffd215304bc4a01ea97b98c02f | 5e4fd5b9865647d373f854c370ac9c1586e4e9b9 | refs/heads/master | 2016-09-10T19:18:29.794593 | 2013-01-18T19:11:47 | 2013-01-18T19:11:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | # Django settings for GifBiscuit project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'London/Europe'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fzip%r0hvd=f=4$dil&nkkv74u8d2qrxf!6)d5)r7@n#3#!5%8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'GifBiscuit.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'GifBiscuit.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"joseph@redfern.me"
] | joseph@redfern.me |
db6802cee6da68cc71141f1e3102c152a6f20d28 | 7ee9bb43664c1c439d3408125f06714619fdc4d6 | /odev1.py | 160e6055113c482c57c6a77b4477500ffb5a8c3f | [] | no_license | sevdaghalarova/ileri_seviye_moduller | c53c8db6346da4c80b7a5c8f1fee9bb60441dcf3 | 736e9f97d36dae0125bc3ba5ca1c83e9d88c770c | refs/heads/main | 2023-01-03T05:42:49.078540 | 2020-10-19T21:54:50 | 2020-10-19T21:54:50 | 304,723,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """Bilgisayarınızdaki tüm mp4,txt ve pdf dosyalarını os modülüyle arayın ve bunların nerede
bulunduklarını ve isimlerini ayrı ayrı
"pdf_dosyalari.txt","mp4_dosyaları.txt","txt_dosyaları.txt" adlı dosyalara kaydedin."""
import os
for klasor_yolu,klasor_islemi,dosya_islemi in os.walk("/Users/sevdaagalarova/Desktop"): # bu yoldaki tum dosyalari getirir
for i in dosya_islemi:
if i.endswith("pdf"): # py ile biten dosyalari ekrana getirir
with open("PDF_dosyalari.txt", "w", encoding="utf-8") as file:
file.write(i+"\n")
elif i.endswith("py"):
with open("py_dosya.txt", "w", encoding="utf-8") as file1:
file1.write(i+"\n")
| [
"noreply@github.com"
] | sevdaghalarova.noreply@github.com |
d768465bd51e4517b1629e51404a5fcc3d2e422f | ed74957fd2b6f4d8cd0f18321f504977c6d3eff0 | /Common/__init__.py | 943f3f187ba6ee4764b58afe5e435cf48481fd74 | [
"Apache-2.0"
] | permissive | cyouwork/Python_Factory | 0c10bcdd8acee029eb25245694448130082ac2f7 | e42ba41ee823989ff5b74018b8c303050031785b | refs/heads/master | 2016-09-10T22:26:30.848669 | 2013-11-07T13:20:12 | 2013-11-07T13:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,128 | py | # coding=utf-8
# author=season
# import struct
# import socket
# import urllib2
# import json
# import datetime
# from ip2country import iplocate
# from dbconn import DbConn
# from logger import getlogger
# from colorLogger import coloLogger
# from subprocess import Popen, PIPE
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
#
#
# class CommonError(Exception):
# pass
#
#
# def iptoint(ip):
# return struct.unpack('!I', socket.inet_aton(ip))[0]
#
#
# def read_db(sql, dbconn):
# """从Mysql中读取数据"""
# dbconn_conf = {
# 'host': dbconn._host,
# 'port': dbconn._port,
# 'user': dbconn._user,
# 'password': dbconn._password,
# 'database': dbconn._database,
# 'sql': sql
# }
# cmd = """mysql -X -C -h {host} -P {port} -u {user} -p'{password}' \
# {database} --default-character-set=utf8 -s -e "{sql}" | iconv -t US-ASCII -c | sed 's///g'""".format(**dbconn_conf)
# return Popen(cmd, stdout=PIPE, shell=True).stdout
#
#
# def read_db_csv(sql, dbconn):
# """从Mysql中读取数据"""
# dbconn_conf = {
# 'host': dbconn._host,
# 'port': dbconn._port,
# 'user': dbconn._user,
# 'password': dbconn._password,
# 'database': dbconn._database,
# 'sql': sql
# }
# cmd = """mysql -C -h {host} -P {port} -u {user} -p'{password}' \
# {database} --default-character-set=utf8 -s -e "{sql}" """.format(**dbconn_conf)
# return Popen(cmd, stdout=PIPE, shell=True).stdout
#
#
# def xml_to_csv(source):
# '''将xml文件转成csv'''
# tree = ET.fromstring(source)
# to_list = []
# mylist = [t.getchildren() for t in tree.getchildren()]
# for i in [t for t in mylist]:
# text = [ii.text for ii in i]
# line = '"%s"' % '","'.join([x if x else 'None' for x in text])
# to_list.append(line)
# return to_list
#
#
# def csv_to_list(source):
# '''将csv文件转成list'''
# for line in source.split('\n'):
# if line:
# yield line
#
#
# def gameid_to_name(url='http://10.59.96.91/app.37wanwan.com/port/gamelist.php'):
# '''游戏id对应到游戏名'''
# request = urllib2.urlopen(url, timeout=10)
# content = json.loads(request.read(), encoding='gbk')
# return dict([(gameid, content[gameid]['name']) for gameid in content])
#
#
# _DATETIME_FORMATS = [
# '%a %b %d %H:%M:%S %Y',
# '%Y-%m-%d %H:%M:%S',
# '%Y-%m-%d %H:%M',
# '%Y-%m-%d %H',
# '%Y-%m-%d',
# '%Y%m%d%H%M%S',
# '%Y%m%d%H%M',
# '%Y%m%d%H',
# '%Y%m%d'
# ]
#
#
# def parse_datetime(in_datetime):
# if isinstance(in_datetime, datetime.datetime):
# return in_datetime
# for format in _DATETIME_FORMATS:
# try:
# return datetime.datetime.strptime(in_datetime, format)
# except ValueError, err:
# pass
# raise CommonError('Unrecognized date/time format: %r' % in_datetime)
#
#
# def bsearch(Sdict):
# Sdict = Sdict
# Keys = sorted(Sdict.keys())
# started = 0
# ended = len(Keys) - 1
#
# def wapper(key, started=started, ended=ended):
# if started > ended:
# return None
# index = (started + ended) / 2
# if Keys[index][0] <= key <= Keys[index][1]:
# return Sdict[Keys[index]]
# elif key < Keys[index][0]:
# return wapper(key, started=started, ended=index - 1)
# elif key > Keys[index][1]:
# return wapper(key, started=index + 1, ended=ended)
# return wapper
#
#
# _SPIDER_AGENTS = {
# '"Mozilla/4.0"': True,
# }
#
#
# def spider_filter(agent):
# return _SPIDER_AGENTS.get(agent, False)
#
# if __name__ == '__main__':
# import sys
# parse_datetime(sys.argv[1])
| [
"yangjy@yangjy.(none)"
] | yangjy@yangjy.(none) |
47f43bae0afd1a3e5f93f6d0f30be5940afcbe2d | 04507c6acd78ac5cb12e9ecfff5d82ad02433ab7 | /ABC 188/DD.py | 18ccb6f4e77336afe3c0698e00dabc4d31cf1224 | [] | no_license | cousin63/atcoder | 40e1bc32ce2f863155c0bc01f51dca59dc0b0118 | 133857447723736ebde7c37a79283283d19f5cf7 | refs/heads/main | 2023-04-13T16:41:00.789553 | 2021-04-25T06:59:09 | 2021-04-25T06:59:09 | 361,351,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | N, C = map(int, input().split())
event = []
for i in range(N):
a, b, c = map(int, input().split())
a -= 1
event.append((a, c))
event.append((b, -c))
event.sort()
#print(event)
ans = 0
fee = 0
t = 0
for x,y in event:
if x!=t:
ans += min(fee, C) * (x - t)
#print(min(fee, C) * (x - t))
t = x
fee += y
print(ans) | [
"noreply@github.com"
] | cousin63.noreply@github.com |
3ae90b4be704c71af5a0b1e338ce23517d38ce9f | c1ab0f3ccd1ae6f59a80bfcc3c2caaeed2868ba8 | /django_medcheck/core/migrations/0004_diagnosis_time.py | 3fe9f6ba308d6a050762ef67fae4fbe3415cd325 | [] | no_license | antz22/MedCheck | 0a91945f63c850b429f829b1e12e242f885ffdb9 | b75d44b13492f3dcac641504544eea3ef2e3754f | refs/heads/master | 2023-05-31T00:00:01.905437 | 2021-06-14T12:21:12 | 2021-06-14T12:21:12 | 376,289,998 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.2.3 on 2021-06-13 00:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_age'),
]
operations = [
migrations.AddField(
model_name='diagnosis',
name='time',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"anthonyznj@gmail.com"
] | anthonyznj@gmail.com |
d02c5e7c67ee80bc1dab4d0f1ba02e70f36ddb4a | 887a7ff30a1496e29622ddc9867a8b9a7e686a6d | /HoG/hog_python.py | 29ee4c33de66f5faa5d94a1146120a585c9aef38 | [] | no_license | DanyEle/SPD_19 | 906d6f2f4c22b41562615d0df5aa3871329c9de6 | 557e808372ed40843535bb3e2a20845e000a3753 | refs/heads/master | 2020-04-25T22:35:59.771327 | 2019-06-12T15:11:27 | 2019-06-12T15:11:27 | 173,116,473 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import math
"""
Created on Wed Jun 12 17:05:24 2019
@author: daniele
"""
def hog(img):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16 # Number of bins
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = []
mag_cells = []
cellx = celly = 8
for i in range(0,img.shape[0]/celly):
for j in range(0,img.shape[1]/cellx):
bin_cells.append(bin[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
mag_cells.append(mag[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= math.norm(hist) + eps
return hist | [
"daniele.gadler@yahoo.it"
] | daniele.gadler@yahoo.it |
434c5a3a8380b851bf1a4918cab7b4f11323ae5e | 019d06389d7adbcc8b64932cc195a4e6e919b0d6 | /need/wizard/__init__.py | 7a784798040cad8b1bd88509130f184e7ee126fb | [] | no_license | cskoufana/integc | 6c8f45d1cb39436fb870a8f7e11d50cb559b7851 | 0ca39085d971fa9d418a95d16983bf47994a91b3 | refs/heads/master | 2021-01-12T13:30:41.353588 | 2017-06-01T09:38:39 | 2017-06-01T09:38:39 | 69,959,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | #__author__ = 'yenke'
import need_payment
| [
"yenkemarius@yahoo.fr"
] | yenkemarius@yahoo.fr |
e4cf1646e7113984bb1f78c2b637acfe96d1cc47 | fcd130826ea590414c6fa10e0a43cdd76f94619c | /server.py | 845dae38eb1a2e69cfd14ac8d0812d1056a87cab | [] | no_license | Tenlern/schedule-chatbot | ddb96d3091f1c85a8977570ebe540062ff95285c | 92ee9017ac3d1ec23dc0f6d1f069143b0d5a6d9a | refs/heads/master | 2022-04-06T23:36:11.484371 | 2020-02-14T16:11:06 | 2020-02-14T16:11:06 | 238,038,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | import chatbot
import cherrypy
import os
import logging
WEBHOOK_HOST = 'reptileserver.herokuapp.com'
WEBHOOK_PORT = int(os.environ.get("PORT", 5000)) # 8443 # 443, 80, 88 or 8443 (port need to be 'open')
WEBHOOK_LISTEN = '0.0.0.0' # In some VPS you may need to put here the IP addr
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Path to the ssl certificate
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Path to the ssl private key
# Quick'n'dirty SSL certificate generation:
#
# openssl genrsa -out webhook_pkey.pem 2048
# openssl req -new -x509 -days 3650 -key webhook_pkey.pem -out webhook_cert.pem
#
# When asked for "Common Name (e.g. server FQDN or YOUR name)" you should reply
# with the same value in you put in WEBHOOK_HOST
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s/" % "ff"#(API_TOKEN)
# WebhookServer, process webhook calls
class WebhookServer(object):
@cherrypy.expose
def index(self):
if 'content-length' in cherrypy.request.headers and \
'content-type' in cherrypy.request.headers and \
cherrypy.request.headers['content-type'] == 'application/json':
length = int(cherrypy.request.headers['content-length'])
json_string = cherrypy.request.body.read(length).decode("utf-8")
update = chatbot.telebot.types.Update.de_json(json_string)
chatbot.bot.process_new_updates([update])
return 'Hi'
else:
raise cherrypy.HTTPError(403)
# Remove webhook, it fails sometimes the set if there is a previous webhook
chatbot.bot.remove_webhook()
# Set webhook
#chatbot.bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH, certificate=open(WEBHOOK_SSL_CERT, 'r'))
# Disable CherryPy requests log
access_log = cherrypy.log.access_log
for handler in tuple(access_log.handlers):
access_log.removeHandler(handler)
# Start cherrypy server
cherrypy.config.update({
'server.socket_host' : WEBHOOK_LISTEN,
'server.socket_port' : WEBHOOK_PORT,
'server.ssl_module' : 'builtin',
#'server.ssl_certificate': WEBHOOK_SSL_CERT,
#'server.ssl_private_key': WEBHOOK_SSL_PRIV
})
chatbot.bot.polling()
cherrypy.quickstart(WebhookServer(), WEBHOOK_URL_PATH, {'/': {}}) | [
"illyash20009@yandex.ru"
] | illyash20009@yandex.ru |
6cfea04f6d1e3ce471d2bc8d7b215d276670e1e7 | 50c23c7a6cddb716c1da8ad0f71017ea105f74de | /run.py | 9c983e1e305f0f9db238889737e8d5ff0dcb05e2 | [] | no_license | dkimpara/dodgsonWinner | df56075e2077faf499bb911573f3ffbb2745b0cc | 026fd0ea66516b2078f5cc4b16956cde87e3a15a | refs/heads/master | 2022-11-30T12:01:56.347577 | 2020-08-18T05:36:48 | 2020-08-18T05:36:48 | 283,106,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from writeup.experimentCode import *
from datetime import datetime
import numpy as np
if __name__ == '__main__':
#collect_data([20,30,40], np.linspace(2.05, 2.45, 5))
#collect_data([20,30,40], np.linspace(2.23, 2.26, 2))
for m in [85, 100]:
startTime = datetime.now()
collect_data([m], np.linspace(2.1, 2.5, 9))
print(datetime.now() - startTime)
| [
"dhammakimpara@gmail.com"
] | dhammakimpara@gmail.com |
f218029fe99686b97e2ab0a354ec8ba8624a4ec6 | 0726646c823e21e32e21dd51516dd45d414916f0 | /crates/gen-wasmtime-py/tests/run.py | bb40b5c57bcb36565851aa53ce504ef8343398a9 | [
"LLVM-exception",
"Apache-2.0"
] | permissive | syrusakbary/witx-bindgen | e304a0ad4e8d2c017ad8b6aab0ed86909df6d65a | ed2f07c0d76d77d4fe31726fd48ff6f8a76d1b4c | refs/heads/main | 2023-06-20T22:34:29.612376 | 2021-07-27T16:39:29 | 2021-07-27T16:39:29 | 390,169,220 | 1 | 1 | Apache-2.0 | 2021-07-28T00:58:56 | 2021-07-28T00:58:56 | null | UTF-8 | Python | false | false | 21,211 | py | from dataclasses import dataclass
from exports.bindings import Wasm
from imports.bindings import add_host_to_linker, Host, Expected
from typing import Tuple, Optional, List
import exports.bindings as e
import imports.bindings as i
import math
import sys
import wasmtime
@dataclass
class HostState(i.HostState):
val: int
HOST_STATE2_CLOSED = False
@dataclass
class HostState2(i.HostState2):
val: int
def drop(self) -> None:
global HOST_STATE2_CLOSED
HOST_STATE2_CLOSED = True
@dataclass
class Markdown(i.Markdown2):
buf: str = ''
def append(self, data: str) -> None:
self.buf += data
def render(self) -> str:
return self.buf.replace('red', 'green')
class HostImpl(Host):
def roundtrip_u8(self, a: int) -> int:
return a
def roundtrip_s8(self, a: int) -> int:
return a
def roundtrip_u16(self, a: int) -> int:
return a
def roundtrip_s16(self, a: int) -> int:
return a
def roundtrip_u32(self, a: int) -> int:
return a
def roundtrip_s32(self, a: int) -> int:
return a
def roundtrip_u64(self, a: int) -> int:
return a
def roundtrip_s64(self, a: int) -> int:
return a
def roundtrip_f32(self, a: float) -> float:
return a
def roundtrip_f64(self, a: float) -> float:
return a
def roundtrip_char(self, a: str) -> str:
return a
def multiple_results(self) -> Tuple[int, int]:
return (4, 5)
def set_scalar(self, a: int) -> None:
self.scalar = a
def get_scalar(self) -> int:
return self.scalar
def swap_tuple(self, a: Tuple[int, int]) -> Tuple[int, int]:
return (a[1], a[0])
def roundtrip_flags1(self, a: i.F1) -> i.F1:
return a
def roundtrip_flags2(self, a: i.F2) -> i.F2:
return a
def roundtrip_flags3(self, a: i.Flag8, b: i.Flag16, c: i.Flag32, d: i.Flag64) -> Tuple[i.Flag8, i.Flag16, i.Flag32, i.Flag64]:
return (a, b, c, d)
def roundtrip_record1(self, a: i.R1) -> i.R1:
return a
def tuple0(self, a: None) -> None:
pass
def tuple1(self, a: Tuple[int]) -> Tuple[int]:
return (a[0],)
def roundtrip_option(self, a: Optional[float]) -> Optional[int]:
if a:
return int(a)
return None
def roundtrip_result(self, a: i.Expected[int, float]) -> i.Expected[float, int]:
if isinstance(a, i.Ok):
return i.Ok(float(a.value))
return i.Err(int(a.value))
def roundtrip_enum(self, a: i.E1) -> i.E1:
return a
def invert_bool(self, a: bool) -> bool:
return not a
def variant_casts(self, a: i.Casts) -> i.Casts:
return a
def variant_zeros(self, a: i.Zeros) -> i.Zeros:
return a
def variant_typedefs(self, a: i.OptionTypedef, b: i.BoolTypedef, c: i.ResultTypedef) -> None:
pass
def variant_enums(self, a: bool, b: Expected[None, None], c: i.MyErrno) -> Tuple[bool, Expected[None, None], i.MyErrno]:
assert(a)
assert(isinstance(b, i.Ok))
assert(c == i.MyErrno.SUCCESS)
return (False, i.Err(None), i.MyErrno.A)
def list_param(self, a: bytes) -> None:
assert(a == b'\x01\x02\x03\x04')
def list_param2(self, a: str) -> None:
assert(a == 'foo')
def list_param3(self, a: List[str]) -> None:
assert(a == ['foo', 'bar', 'baz'])
def list_param4(self, a: List[List[str]]) -> None:
assert(a == [['foo', 'bar'], ['baz']])
def list_result(self) -> bytes:
return b'\x01\x02\x03\x04\x05'
def list_result2(self) -> str:
return 'hello!'
def list_result3(self) -> List[str]:
return ['hello,', 'world!']
def string_roundtrip(self, a: str) -> str:
return a
def unaligned_roundtrip1(self, a: List[int], b: List[int], c: List[int], d: List[i.Flag32], e: List[i.Flag64]) -> None:
assert(a == [1])
assert(b == [2])
assert(c == [3])
assert(d == [i.Flag32.B8])
assert(e == [i.Flag64.B9])
def unaligned_roundtrip2(self, a: List[i.UnalignedRecord], b: List[float], c: List[float], d: List[str], e: List[bytes]) -> None:
assert(a == [i.UnalignedRecord(a=10, b=11)])
assert(b == [100.0])
assert(c == [101.0])
assert(d == ['foo'])
assert(e == [b'\x66'])
def list_minmax8(self, a: bytes, b: List[int]) -> Tuple[bytes, List[int]]:
assert(a == b'\x00\xff')
assert(b == [-(1 << (8 - 1)), (1 << (8 - 1)) - 1])
return (a, b)
def list_minmax16(self, a: List[int], b: List[int]) -> Tuple[List[int], List[int]]:
assert(a == [0, (1 << 16) - 1])
assert(b == [-(1 << (16 - 1)), (1 << (16 - 1)) - 1])
return (a, b)
def list_minmax32(self, a: List[int], b: List[int]) -> Tuple[List[int], List[int]]:
assert(a == [0, (1 << 32) - 1])
assert(b == [-(1 << (32 - 1)), (1 << (32 - 1)) - 1])
return (a, b)
def list_minmax64(self, a: List[int], b: List[int]) -> Tuple[List[int], List[int]]:
assert(a == [0, (1 << 64) - 1])
assert(b == [-(1 << (64 - 1)), (1 << (64 - 1)) - 1])
return (a, b)
def list_minmax_float(self, a: List[float], b: List[float]) -> Tuple[List[float], List[float]]:
assert(a == [-3.4028234663852886e+38, 3.4028234663852886e+38, -float('inf'), float('inf')])
assert(b == [-sys.float_info.max, sys.float_info.max, -float('inf'), float('inf')])
return (a, b)
def host_state_create(self) -> i.HostState:
return HostState(100)
def host_state_get(self, a: i.HostState) -> int:
assert(isinstance(a, HostState))
return a.val
def host_state2_create(self) -> i.HostState2:
return HostState2(101)
def host_state2_saw_close(self) -> bool:
return HOST_STATE2_CLOSED
def two_host_states(self, a: i.HostState, b: i.HostState2) -> Tuple[i.HostState, i.HostState2]:
return (b, a)
def host_state2_param_record(self, a: i.HostStateParamRecord) -> None:
pass
def host_state2_param_tuple(self, a: i.HostStateParamTuple) -> None:
pass
def host_state2_param_option(self, a: i.HostStateParamOption) -> None:
pass
def host_state2_param_result(self, a: i.HostStateParamResult) -> None:
pass
def host_state2_param_variant(self, a: i.HostStateParamVariant) -> None:
pass
def host_state2_param_list(self, a: List[i.HostState2]) -> None:
pass
def host_state2_result_record(self) -> i.HostStateResultRecord:
return i.HostStateResultRecord(HostState(2))
def host_state2_result_tuple(self) -> i.HostStateResultTuple:
return (HostState(2),)
def host_state2_result_option(self) -> i.HostStateResultOption:
return HostState(2)
def host_state2_result_result(self) -> i.HostStateResultResult:
return i.Ok(HostState2(2))
def host_state2_result_variant(self) -> i.HostStateResultVariant:
return i.HostStateResultVariant0(HostState2(2))
def host_state2_result_list(self) -> List[i.HostState2]:
return [HostState2(2), HostState2(5)]
def markdown2_create(self) -> i.Markdown2:
return Markdown()
def buffer_u8(self, a: i.PullBuffer[int], b: i.PushBuffer[int]) -> int:
assert(len(a) == 1)
assert(len(b) == 10)
assert(a.pull() == 0)
assert(a.pull() == None)
b.push(1)
b.push(2)
b.push(3)
return 3
def buffer_u32(self, a: i.PullBuffer[int], b: i.PushBuffer[int]) -> int:
assert(len(a) == 1)
assert(len(b) == 10)
assert(a.pull() == 0)
assert(a.pull() == None)
b.push(1)
b.push(2)
b.push(3)
return 3
def buffer_bool(self, a: i.PullBuffer[bool], b: i.PushBuffer[bool]) -> int:
assert(len(a) <= len(b))
n = 0
while True:
val = a.pull()
if val is None:
break
b.push(not val)
n += 1
return n
def buffer_mutable1(self, x: List[i.PullBuffer[bool]]) -> None:
assert(len(x) == 1)
assert(len(x[0]) == 5)
assert(x[0].pull() == True)
assert(x[0].pull() == False)
assert(x[0].pull() == True)
assert(x[0].pull() == True)
assert(x[0].pull() == False)
assert(x[0].pull() == None)
def buffer_mutable2(self, a: List[i.PushBuffer[int]]) -> int:
assert(len(a) == 1)
assert(len(a[0]) > 4)
a[0].push(1)
a[0].push(2)
a[0].push(3)
a[0].push(4)
return 4
def buffer_mutable3(self, a: List[i.PushBuffer[bool]]) -> int:
assert(len(a) == 1)
assert(len(a[0]) > 3)
a[0].push(False)
a[0].push(True)
a[0].push(False)
return 3
def buffer_in_record(self, a: i.BufferInRecord) -> None:
pass
def buffer_typedef(self, a: i.ParamInBufferU8, b: i.ParamOutBufferU8, c: i.ParamInBufferBool, d: i.ParamOutBufferBool) -> None:
pass
def list_in_record1(self, a: i.ListInRecord1) -> None:
pass
def list_in_record2(self) -> i.ListInRecord2:
return i.ListInRecord2('list_in_record2')
def list_in_record3(self, a: i.ListInRecord3) -> i.ListInRecord3:
assert(a.a == 'list_in_record3 input')
return i.ListInRecord3('list_in_record3 output')
def list_in_record4(self, a: i.ListInAlias) -> i.ListInAlias:
assert(a.a == 'input4')
return i.ListInRecord4('result4')
def list_in_variant1(self, a: i.ListInVariant11, b: i.ListInVariant12, c: i.ListInVariant13) -> None:
assert(a == 'foo')
assert(b == i.Err('bar'))
assert(c == i.ListInVariant130('baz'))
def list_in_variant2(self) -> i.ListInVariant2:
return 'list_in_variant2'
def list_in_variant3(self, a: i.ListInVariant3) -> i.ListInVariant3:
assert(a == 'input3')
return 'output3'
def errno_result(self) -> i.Expected[None, i.MyErrno]:
return i.Err(i.MyErrno.B)
def list_typedefs(self, a: i.ListTypedef, c: i.ListTypedef3) -> Tuple[i.ListTypedef2, i.ListTypedef3]:
assert(a == 'typedef1')
assert(c == ['typedef2'])
return (b'typedef3', ['typedef4'])
def list_of_variants(self, a: List[bool], b: List[i.Expected[None, None]], c: List[i.MyErrno]) -> Tuple[List[bool], List[i.Expected[None, None]], List[i.MyErrno]]:
assert(a == [True, False])
assert(b == [i.Ok(None), i.Err(None)])
assert(c == [i.MyErrno.SUCCESS, i.MyErrno.A])
return (
[False, True],
[i.Err(None), i.Ok(None)],
[i.MyErrno.A, i.MyErrno.B],
)
def run(wasm_file: str) -> None:
print('Running', wasm_file)
store = wasmtime.Store()
module = wasmtime.Module.from_file(store.engine, wasm_file)
linker = wasmtime.Linker(store.engine)
linker.define_wasi()
wasi = wasmtime.WasiConfig()
wasi.inherit_stdout()
wasi.inherit_stderr()
store.set_wasi(wasi)
# Define state imported from python and register it with our linker
host = HostImpl()
add_host_to_linker(linker, store, host)
# Using the linker, instantiate the module and wrap it up in the export
# bindings.
wasm = Wasm(store, linker, module)
# Run all the tests!
allocated_bytes = wasm.allocated_bytes(store)
wasm.run_import_tests(store)
test_scalars(wasm, store)
test_records(wasm, store)
test_variants(wasm, store)
test_lists(wasm, store)
test_flavorful(wasm, store)
test_invalid(wasm, store)
test_handles(wasm, store)
# Ensure that we properly called `free` everywhere in all the glue that we
# needed to.
assert(allocated_bytes == wasm.allocated_bytes(store))
def test_scalars(wasm: Wasm, store: wasmtime.Store) -> None:
assert(wasm.roundtrip_u8(store, 1) == 1)
assert(wasm.roundtrip_u8(store, (1 << 8) - 1) == (1 << 8) - 1)
assert(wasm.roundtrip_u16(store, 1) == 1)
assert(wasm.roundtrip_u16(store, (1 << 16) - 1) == (1 << 16) - 1)
assert(wasm.roundtrip_u32(store, 1) == 1)
assert(wasm.roundtrip_u32(store, (1 << 32) - 1) == (1 << 32) - 1)
assert(wasm.roundtrip_u64(store, 1) == 1)
assert(wasm.roundtrip_u64(store, (1 << 64) - 1) == (1 << 64) - 1)
assert(wasm.roundtrip_s8(store, 1) == 1)
assert(wasm.roundtrip_s8(store, (1 << (8 - 1) - 1)) == (1 << (8 - 1) - 1))
assert(wasm.roundtrip_s8(store, -(1 << (8 - 1))) == -(1 << (8 - 1)))
assert(wasm.roundtrip_s16(store, 1) == 1)
assert(wasm.roundtrip_s16(store, (1 << (16 - 1) - 1)) == (1 << (16 - 1) - 1))
assert(wasm.roundtrip_s16(store, -(1 << (16 - 1))) == -(1 << (16 - 1)))
assert(wasm.roundtrip_s32(store, 1) == 1)
assert(wasm.roundtrip_s32(store, (1 << (32 - 1) - 1)) == (1 << (32 - 1) - 1))
assert(wasm.roundtrip_s32(store, -(1 << (32 - 1))) == -(1 << (32 - 1)))
assert(wasm.roundtrip_s64(store, 1) == 1)
assert(wasm.roundtrip_s64(store, (1 << (64 - 1) - 1)) == (1 << (64 - 1) - 1))
assert(wasm.roundtrip_s64(store, -(1 << (64 - 1))) == -(1 << (64 - 1)))
assert(wasm.multiple_results(store) == (100, 200))
inf = float('inf')
assert(wasm.roundtrip_f32(store, 1.0) == 1.0)
assert(wasm.roundtrip_f32(store, inf) == inf)
assert(wasm.roundtrip_f32(store, -inf) == -inf)
assert(math.isnan(wasm.roundtrip_f32(store, float('nan'))))
assert(wasm.roundtrip_f64(store, 1.0) == 1.0)
assert(wasm.roundtrip_f64(store, inf) == inf)
assert(wasm.roundtrip_f64(store, -inf) == -inf)
assert(math.isnan(wasm.roundtrip_f64(store, float('nan'))))
assert(wasm.roundtrip_char(store, 'a') == 'a')
assert(wasm.roundtrip_char(store, ' ') == ' ')
assert(wasm.roundtrip_char(store, '🚩') == '🚩')
wasm.set_scalar(store, 2)
assert(wasm.get_scalar(store) == 2)
wasm.set_scalar(store, 4)
assert(wasm.get_scalar(store) == 4)
def test_records(wasm: Wasm, store: wasmtime.Store) -> None:
assert(wasm.swap_tuple(store, (1, 2)) == (2, 1))
assert(wasm.roundtrip_flags1(store, e.F1.A) == e.F1.A)
assert(wasm.roundtrip_flags1(store, e.F1(0)) == e.F1(0))
assert(wasm.roundtrip_flags1(store, e.F1.A | e.F1.B) == (e.F1.A | e.F1.B))
assert(wasm.roundtrip_flags2(store, e.F2.C) == e.F2.C)
assert(wasm.roundtrip_flags2(store, e.F2(0)) == e.F2(0))
assert(wasm.roundtrip_flags2(store, e.F2.D) == e.F2.D)
assert(wasm.roundtrip_flags2(store, e.F2.C | e.F2.E) == (e.F2.C | e.F2.E))
r = wasm.roundtrip_record1(store, e.R1(8, e.F1(0)))
assert(r.a == 8)
assert(r.b == e.F1(0))
r = wasm.roundtrip_record1(store, e.R1(a=0, b=e.F1.A | e.F1.B))
assert(r.a == 0)
assert(r.b == (e.F1.A | e.F1.B))
wasm.tuple0(store, None)
assert(wasm.tuple1(store, (1,)) == (1,))
def test_variants(wasm: Wasm, store: wasmtime.Store) -> None:
assert(wasm.roundtrip_option(store, 1.) == 1)
assert(wasm.roundtrip_option(store, None) == None)
assert(wasm.roundtrip_option(store, 2.) == 2)
assert(wasm.roundtrip_result(store, e.Ok(2)) == e.Ok(2))
assert(wasm.roundtrip_result(store, e.Ok(4)) == e.Ok(4))
assert(wasm.roundtrip_result(store, e.Err(5)) == e.Err(5))
assert(wasm.roundtrip_enum(store, e.E1.A) == e.E1.A)
assert(wasm.roundtrip_enum(store, e.E1.B) == e.E1.B)
assert(wasm.invert_bool(store, True) == False)
assert(wasm.invert_bool(store, False) == True)
a1, a2, a3, a4, a5, a6 = wasm.variant_casts(store, (
e.C1A(1),
e.C2A(2),
e.C3A(3),
e.C4A(4),
e.C5A(5),
e.C6A(6.),
))
assert(a1 == e.C1A(1))
assert(a2 == e.C2A(2))
assert(a3 == e.C3A(3))
assert(a4 == e.C4A(4))
assert(a5 == e.C5A(5))
assert(a6 == e.C6A(6))
b1, b2, b3, b4, b5, b6 = wasm.variant_casts(store, (
e.C1B(1),
e.C2B(2),
e.C3B(3),
e.C4B(4),
e.C5B(5),
e.C6B(6.),
))
assert(b1 == e.C1B(1))
assert(b2 == e.C2B(2))
assert(b3 == e.C3B(3))
assert(b4 == e.C4B(4))
assert(b5 == e.C5B(5))
assert(b6 == e.C6B(6))
z1, z2, z3, z4 = wasm.variant_zeros(store, (
e.Z1A(1),
e.Z2A(2),
e.Z3A(3.),
e.Z4A(4.),
))
assert(z1 == e.Z1A(1))
assert(z2 == e.Z2A(2))
assert(z3 == e.Z3A(3))
assert(z4 == e.Z4A(4))
wasm.variant_typedefs(store, None, False, e.Err(None))
def test_lists(wasm: Wasm, store: wasmtime.Store) -> None:
wasm.list_param(store, b'\x01\x02\x03\x04')
wasm.list_param2(store, "foo")
wasm.list_param3(store, ["foo", "bar", "baz"])
wasm.list_param4(store, [["foo", "bar"], ["baz"]])
assert(wasm.list_result(store) == b'\x01\x02\x03\x04\x05')
assert(wasm.list_result2(store) == "hello!")
assert(wasm.list_result3(store) == ["hello,", "world!"])
assert(wasm.string_roundtrip(store, "x") == "x")
assert(wasm.string_roundtrip(store, "") == "")
assert(wasm.string_roundtrip(store, "hello ⚑ world") == "hello ⚑ world")
def test_flavorful(wasm: Wasm, store: wasmtime.Store) -> None:
wasm.list_in_record1(store, e.ListInRecord1("list_in_record1"))
assert(wasm.list_in_record2(store) == e.ListInRecord2(a="list_in_record2"))
assert(wasm.list_in_record3(store, e.ListInRecord3("list_in_record3 input")).a == "list_in_record3 output")
assert(wasm.list_in_record4(store, e.ListInRecord4("input4")).a == "result4")
wasm.list_in_variant1(store, "foo", e.Err("bar"), e.ListInVariant130('baz'))
assert(wasm.list_in_variant2(store) == "list_in_variant2")
assert(wasm.list_in_variant3(store, "input3") == "output3")
assert(isinstance(wasm.errno_result(store), e.Err))
r1, r2 = wasm.list_typedefs(store, "typedef1", ["typedef2"])
assert(r1 == b'typedef3')
assert(r2 == ['typedef4'])
def test_invalid(wasm: Wasm, store: wasmtime.Store) -> None:
def assert_throws(name: str, msg: str) -> None:
export = wasm.instance.exports(store)[name]
assert(isinstance(export, wasmtime.Func))
try:
export(store)
raise RuntimeError('expected exception')
except TypeError as e:
actual = str(e)
except OverflowError as e:
actual = str(e)
except ValueError as e:
actual = str(e)
except IndexError as e:
actual = str(e)
if not msg in actual:
print(actual)
assert(msg in actual)
assert_throws('invalid_bool', 'invalid variant discriminant for bool')
assert_throws('invalid_u8', 'must be between')
assert_throws('invalid_s8', 'must be between')
assert_throws('invalid_u16', 'must be between')
assert_throws('invalid_s16', 'must be between')
assert_throws('invalid_char', 'not a valid char')
assert_throws('invalid_e1', 'not a valid E1')
assert_throws('invalid_handle', 'handle index not valid')
assert_throws('invalid_handle_close', 'handle index not valid')
def test_handles(wasm: Wasm, store: wasmtime.Store) -> None:
# Param/result of a handle works in a simple fashion
s: e.WasmState = wasm.wasm_state_create(store)
assert(wasm.wasm_state_get_val(store, s) == 100)
# Deterministic destruction is possible
assert(wasm.wasm_state2_saw_close(store) == False)
s2: e.WasmState2 = wasm.wasm_state2_create(store)
assert(wasm.wasm_state2_saw_close(store) == False)
s2.drop(store)
assert(wasm.wasm_state2_saw_close(store) == True)
arg1 = wasm.wasm_state_create(store)
arg2 = wasm.wasm_state2_create(store)
c, d = wasm.two_wasm_states(store, arg1, arg2)
arg1.drop(store)
arg2.drop(store)
wasm.wasm_state2_param_record(store, e.WasmStateParamRecord(d))
wasm.wasm_state2_param_tuple(store, (d,))
wasm.wasm_state2_param_option(store, d)
wasm.wasm_state2_param_option(store, None)
wasm.wasm_state2_param_result(store, e.Ok(d))
wasm.wasm_state2_param_result(store, e.Err(2))
wasm.wasm_state2_param_variant(store, e.WasmStateParamVariant0(d))
wasm.wasm_state2_param_variant(store, e.WasmStateParamVariant1(2))
wasm.wasm_state2_param_list(store, [])
wasm.wasm_state2_param_list(store, [d])
wasm.wasm_state2_param_list(store, [d, d])
c.drop(store)
d.drop(store)
wasm.wasm_state2_result_record(store).a.drop(store)
wasm.wasm_state2_result_tuple(store)[0].drop(store)
opt = wasm.wasm_state2_result_option(store)
assert(opt is not None)
opt.drop(store)
result = wasm.wasm_state2_result_result(store)
assert(isinstance(result, e.Ok))
result.value.drop(store)
variant = wasm.wasm_state2_result_variant(store)
print(variant)
assert(isinstance(variant, e.WasmStateResultVariant0))
variant.value.drop(store)
for val in wasm.wasm_state2_result_list(store):
val.drop(store)
s.drop(store)
md = e.Markdown.create(store, wasm)
if md:
md.append(store, "red is the best color")
assert(md.render(store) == "green is the best color")
md.drop(store)
if __name__ == '__main__':
run(sys.argv[1])
| [
"noreply@github.com"
] | syrusakbary.noreply@github.com |
a3aa786ffa9172b133c09fd175857777db8ef640 | 3eebfc8f50e025755b41be4cc84999f38c569ffd | /dataset_import/import_traffic_sign.py | 506756c81282d5dd57c00408807760c302b7e197 | [] | no_license | Dmitri00/sign_classification | af939b61ce97d1f9c32ca24853f873571303fa56 | 4f6270b13e6766d3eb60f2bb8a735cf73907f3fd | refs/heads/master | 2023-01-20T20:32:32.921243 | 2020-11-27T12:30:41 | 2020-11-27T12:30:41 | 261,696,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from datasets.import_data_pipeline import data_transforms
from datasets.import_data_pipeline import dataset_split_pipeline
from datasets.import_data_pipeline import prepare_data
from datasets.traffic_sign import TrafficSign
def import_data(root, batch_size, num_workers):
default_transforms = data_transforms
default_splitter = dataset_split_pipeline
sampler_classes = {'train': RandomSampler, 'val': SequentialSampler}
dataset_class = TrafficSign
return prepare_data(dataset_class, root, default_transforms, default_splitter,
sampler_classes, batch_size, num_workers)
| [
"dima.toichkin@gmail.com"
] | dima.toichkin@gmail.com |
7352c3a42dd59b3f2782b9efd33d692247338565 | 8a91aaa0b8700ddda0814b1d6fb94e6df47fa362 | /NetworkBuilder.py | 5476dc074dff8f514d01cad78c3eb8eeb35a990d | [] | no_license | Justtolook/AI_helloWorld | f1dd2d3451bfa8793aff867298b0f1e60a43ec6a | 86ff85e136e33c4a296642f2109a6ed24c266ad2 | refs/heads/master | 2020-04-04T16:28:32.160647 | 2018-11-04T17:50:15 | 2018-11-04T17:50:15 | 156,079,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | import tensorflow as tf
class NetworkBuilder:
def __init__(self):
pass
def attach_conv_layer(self, input_layer, output_size=32, feature_size=[5, 5], strides=[1, 1, 1, 1], padding='SAME',
summary=False):
input_size = input_layer.get_shape().as_list()[-1]
conv_weights = tf.Variable(tf.random_normal([feature_size[0], feature_size[1], input_size, output_size]),
name='conv_weights')
conv_biases = tf.Variable(tf.random_normal([output_size], name='canv_biases'))
conv_layer = tf.nn.conv2d(input_layer, conv_weights, strides, padding, name='conv_layer')
if summary:
tf.summary.histogram(conv_weights.name, conv_weights)
return conv_layer
def attach_pooling_layer(self):
pass
def attach_flatten_layer(self):
pass
def attach_dense_layer(self):
pass
def attach_relu_layer(self):
pass
def attach_softmax_layer(self):
pass
def attach_sigmoid_layer(self):
pass
| [
"hlammert@hotmail.de"
] | hlammert@hotmail.de |
fb941c30c3ff7a4afdff1dc8342dc86c3c24b634 | c14b741830ace618eb40efb28715d155f35a8be1 | /venvs/edxapp/lib/python2.7/site-packages/dealer/contrib/django/staff.py | 4d6da87c84d529069c2d93ff0d38e76887a6080d | [] | no_license | myoortcloud/kmu-edx | 6ad57e44e923e153ad2a6ad5c36f2a4b1f98ecde | 02afdda4f0ce040cd7e542d5d80c3256cd9da4b4 | refs/heads/master | 2021-06-02T17:40:57.408858 | 2014-08-06T08:58:55 | 2014-08-06T08:58:55 | 22,097,363 | 0 | 1 | null | 2020-07-25T21:13:42 | 2014-07-22T08:57:55 | Python | UTF-8 | Python | false | false | 337 | py | from .settings import BACKEND
def context_processor(request):
" Append current SCM revision to template context. "
return dict(REVISION=BACKEND.revision)
class Middleware:
" Append current SCM revision to request object. "
@staticmethod
def process_request(request):
request.revision = BACKEND.revision
| [
"dotmagi@me.com"
] | dotmagi@me.com |
112f4aa81442b844c76a7d232671c177c7dd10fc | 2196eef6085ea26e8556474c0069a8dcda020c64 | /utils/re_ranking.py | a788d4f8957e059e5d5fd0c9adec463c5845b385 | [] | no_license | VIRC-lab-csust/LSDNN | f32423fa97f0490b98c786852242ed6c64714de9 | 2e95eeeabfd98cb1c71b925e1100dcf270f5cb93 | refs/heads/master | 2020-12-11T12:00:30.462389 | 2020-06-02T11:51:13 | 2020-06-02T11:51:13 | 233,843,411 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | #!/usr/bin/env python2/python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 14:46:56 2017
@author: luohao
Modified by Houjing Huang, 2017-12-22.
- This version accepts distance matrix instead of raw features.
- The difference of `/` division between python 2 and 3 is handled.
- numpy.float16 is replaced by numpy.float32 for numerical precision.
Modified by Zhedong Zheng, 2018-1-12.
- replace sort with topK, which save about 30s.
"""
"""
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
"""
"""
API
q_g_dist: query-gallery distance matrix, numpy array, shape [num_query, num_gallery]
q_q_dist: query-query distance matrix, numpy array, shape [num_query, num_query]
g_g_dist: gallery-gallery distance matrix, numpy array, shape [num_gallery, num_gallery]
k1, k2, lambda_value: parameters, the original paper is (k1=20, k2=6, lambda_value=0.3)
Returns:
final_dist: re-ranked distance, numpy array, shape [num_query, num_gallery]
"""
import numpy as np
def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=5, k2=5, lambda_value=0.3):
# The following naming, e.g. gallery_num, is different from outer scope.
# Don't care about it.
original_dist = np.concatenate(
[np.concatenate([q_q_dist, q_g_dist], axis=1),
np.concatenate([q_g_dist.T, g_g_dist], axis=1)],
axis=0)
original_dist = np.power(original_dist, 2).astype(np.float32)
original_dist = np.transpose(1. * original_dist/np.max(original_dist,axis = 0))
V = np.zeros_like(original_dist).astype(np.float32)
initial_rank = np.argsort(original_dist).astype(np.int32)
query_num = q_g_dist.shape[0]
gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
all_num = gallery_num
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i,:k1+1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]
fi = np.where(backward_k_neigh_index==i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate,:int(np.around(k1/2.))+1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,:int(np.around(k1/2.))+1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i,k_reciprocal_expansion_index])
V[i,k_reciprocal_expansion_index] = 1.*weight/np.sum(weight)
original_dist = original_dist[:query_num,]
if k2 != 1:
V_qe = np.zeros_like(V,dtype=np.float32)
for i in range(all_num):
V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:,i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist,dtype = np.float32)
for i in range(query_num):
temp_min = np.zeros(shape=[1,gallery_num],dtype=np.float32)
indNonZero = np.where(V[i,:] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
jaccard_dist[i] = 1-temp_min/(2.-temp_min)
final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num,query_num:]
return final_dist
| [
"49899527+qq326823564@users.noreply.github.com"
] | 49899527+qq326823564@users.noreply.github.com |
20f99c80dbb85d01bb880545247c17ccdfa77a31 | eea5dc484360750cbf982d6dfe6da0388f8f86b9 | /api/qlua/DelLabel_pb2.py | d9d3bfe80f5b7d1dbd7a55cd48b09a199b5329f8 | [] | no_license | mzhumakhanov/mmvddss | 1032c8c33ec88b76ddc9127781fe4378355ceff7 | 22dbea4a9d76491e9cd5206c5f534d45c44d41e3 | refs/heads/master | 2021-03-12T11:02:42.261422 | 2019-01-27T22:19:43 | 2019-01-27T22:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,508 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: DelLabel.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='DelLabel.proto',
package='qlua.rpc.DelLabel',
syntax='proto3',
serialized_options=_b('\n\010qlua.rpcH\001'),
serialized_pb=_b('\n\x0e\x44\x65lLabel.proto\x12\x11qlua.rpc.DelLabel\".\n\x07Request\x12\x11\n\tchart_tag\x18\x01 \x01(\t\x12\x10\n\x08label_id\x18\x02 \x01(\x05\"\x18\n\x06Result\x12\x0e\n\x06result\x18\x01 \x01(\x08\x42\x0c\n\x08qlua.rpcH\x01\x62\x06proto3')
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='qlua.rpc.DelLabel.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chart_tag', full_name='qlua.rpc.DelLabel.Request.chart_tag', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_id', full_name='qlua.rpc.DelLabel.Request.label_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=83,
)
_RESULT = _descriptor.Descriptor(
name='Result',
full_name='qlua.rpc.DelLabel.Result',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='qlua.rpc.DelLabel.Result.result', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=85,
serialized_end=109,
)
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Result'] = _RESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'DelLabel_pb2'
# @@protoc_insertion_point(class_scope:qlua.rpc.DelLabel.Request)
))
_sym_db.RegisterMessage(Request)
Result = _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), dict(
DESCRIPTOR = _RESULT,
__module__ = 'DelLabel_pb2'
# @@protoc_insertion_point(class_scope:qlua.rpc.DelLabel.Result)
))
_sym_db.RegisterMessage(Result)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"mogilevskiyva"
] | mogilevskiyva |
4370030b9d3cbb043403f330d78a8b73c377fcb0 | e227fc8e1eafe2da377f18fe6894619fd68c4c3c | /web/chirp_web/chirp_web/urls.py | 3a96a97677c5725dad5496e67b7f9cc85949210c | [
"MIT"
] | permissive | Suloch/chirp | 5f53b7325f8707cfd2aeaa19be950f5cd26c01c5 | 6da4a4f6acf4b1948c18ff54922fa199b0bb03b2 | refs/heads/master | 2022-11-28T05:08:41.819768 | 2022-11-17T12:20:18 | 2022-11-17T12:20:18 | 144,522,337 | 26 | 5 | MIT | 2022-11-17T12:20:19 | 2018-08-13T02:58:41 | C | UTF-8 | Python | false | false | 819 | py | """chirp_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('main/', include('main.urls', namespace='main')),
]
| [
"sulochpatel1@gmail.com"
] | sulochpatel1@gmail.com |
e63d5ca6725aba6362ffd0e7cd7dd80e30a4196c | 91738798613b24b923935546795722f16d299352 | /util/trace_transform/eliminate.py | 32194dc191316e8dd455e4d874f5437a739ec012 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brad-mengchi/asplos_2021_ae | 7d0241f90eff38a3ca5ddc0e198ab4ec22cbd141 | cd9316a57ee4db85ba3ceaa362e839ab4075d0df | refs/heads/master | 2023-03-07T00:18:18.930510 | 2021-02-13T01:47:37 | 2021-02-13T01:47:37 | 318,314,352 | 0 | 2 | BSD-2-Clause | 2021-02-12T21:50:20 | 2020-12-03T20:41:20 | HTML | UTF-8 | Python | false | false | 644 | py | from serial_file import serial_file
def eliminate(path, vfc, outpath):
for (fname, vfc_traces) in vfc:
vfc_traces.reset_iterator()
sf = serial_file(path + '/' + fname, "r")
out = open(outpath + '/' + fname, "w+")
line = sf.readline()
while line:
if line[:-1].split(' ')[0] == "insts":
line = "insts = ", str(vfc_traces.get_insts())
out.writelines(line)
out.write("\n")
elif not vfc_traces.compare_trace(sf.get_linenum()):
out.write(line)
line = sf.readline()
sf.close()
out.close()
| [
"zhan2308@purdue.edu"
] | zhan2308@purdue.edu |
fd337b75f0eb10484074e08ba64c0b939849c29f | ed756885498f009618c4be880f255e5c2402d537 | /web/feeds.py | d65fd10180cb506fc5739ed0d781aa1940d95fda | [] | no_license | GunioRobot/BlancoWeb | c6d1d242960918a6170ed0b1432ac36ea686546f | cff8540c8f5bc0a372cc3500b035f1fdbbc7eab8 | refs/heads/master | 2021-01-20T11:31:50.434756 | 2011-02-21T22:43:42 | 2011-02-21T22:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
from django.contrib.syndication.feeds import Feed;
from web.models import Evento;
class EventoFeed(Feed):
title = "Blanco Irish Tavern";
link = "web/feeds/eventos/";
description = "Suscríbete para conocer todas nuestras fiestas"
def items(self):
return Evento.objects.all().order_by("-fecha");
def item_title(self, item):
return item.nombre
def item_description(self, item):
descripcion = "%s" % item.fecha;
descripcion += " %s" % item.hora_inicio;
descripcion += " %s" % item.info;
return descripcion; | [
"jneight@gmail.com"
] | jneight@gmail.com |
aee80ab9fe78aa22684a72ea0e812c4808a2078a | 7f6edcc1c3f485c42dde1ed5f52ee40292d3aafa | /data_proc/filter_objects.py | 47ef438576feeb38cd82df588e2f0f2780eda385 | [] | no_license | RocketWill/PositionRefineNets-data-proc | 7d06281f7ed2c1ff923eb9c2dd0bc3000f7c0c17 | 1842fe78249a056f7928060f2e4d5cffa072b881 | refs/heads/main | 2023-01-29T03:17:27.811446 | 2020-12-17T09:21:03 | 2020-12-17T09:21:03 | 322,242,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,507 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: willc
"""
import numpy as np
from utils import py_cpu_nms
import cv2
file_name = "/Volumes/Will 1/thesis/cus_ktti_vis/data_proc/data/000015_pred.txt"
class Object3d(object):
""" 3d object label """
def __init__(self, label_file_line):
self.data = label_file_line.split(" ")
data = label_file_line.split(" ")
data[1:] = [float(x) for x in data[1:]]
# extract label, truncation, occlusion
self.type = data[0] # 'Car', 'Pedestrian', ...
self.truncation = data[1] # truncated pixel ratio [0..1]
self.occlusion = int(
data[2]
) # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = data[3] # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = data[4] # left
self.ymin = data[5] # top
self.xmax = data[6] # right
self.ymax = data[7] # bottom
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
# extract 3d bounding box information
self.h = data[8] # box height
self.w = data[9] # box width
self.l = data[10] # box length (in meters)
self.t = (data[11], data[12], data[13]) # location (x,y,z) in camera coord.
self.ry = data[14] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]
self.score = 0.0
if len(data) > 15:
self.score = data[15]
# print(self.score)
def estimate_diffculty(self):
""" Function that estimate difficulty to detect the object as defined in kitti website"""
# height of the bounding box
bb_height = np.abs(self.xmax - self.xmin)
if bb_height >= 40 and self.occlusion == 0 and self.truncation <= 0.15:
return "Easy"
elif bb_height >= 25 and self.occlusion in [0, 1] and self.truncation <= 0.30:
return "Moderate"
elif (
bb_height >= 25 and self.occlusion in [0, 1, 2] and self.truncation <= 0.50
):
return "Hard"
else:
return "Unknown"
def print_object(self):
print(
"Type, truncation, occlusion, alpha: %s, %d, %d, %f"
% (self.type, self.truncation, self.occlusion, self.alpha)
)
print(
"2d bbox (x0,y0,x1,y1): %f, %f, %f, %f"
% (self.xmin, self.ymin, self.xmax, self.ymax)
)
print("3d bbox h,w,l: %f, %f, %f" % (self.h, self.w, self.l))
print(
"3d bbox location, ry: (%f, %f, %f), %f"
% (self.t[0], self.t[1], self.t[2], self.ry)
)
print("Difficulty of estimation: {}".format(self.estimate_diffculty()))
def read_label(label_filename):
lines = [line.rstrip() for line in open(label_filename)]
objects = [Object3d(line) for line in lines]
return objects
def split_predict_result(objects):
# labels, lists, prefixs(start from zero)
labels = []
objs = []
prefix = []
for idx, obj in enumerate(objects):
label_name = obj.type
if label_name not in labels:
labels.append(label_name)
prefix.append(idx)
objs.append([])
x0, y0, x1, y1, score = obj.xmin, obj.ymin, obj.xmax, obj.ymax, (obj.ymax-obj.ymin)*(obj.xmax-obj.xmin)
objs[-1].append([x0, y0, x1, y1, score])
return labels, np.asarray(objs), prefix
def objects_to_label(objects, output_name):
with open(output_name, "w") as text_file:
for obj in objects:
text = " ".join(obj.data)
text_file.write(text + "\n")
print("successfully")
def nms_box():
pass
def nms_3d():
pass
if __name__ == "__main__":
objects = read_label(file_name)
labels, objs_list, prefix = split_predict_result(objects)
# print(labels)
# print(objs_list)
# print(prefix)
image = cv2.imread("/Volumes/Will 1/thesis/cus_ktti_vis/data_proc/data/000015.png")
new_objects = []
for idx, objs in enumerate(objs_list):
# print(objs)
for obj in objs:
cv2.rectangle(image, (int(obj[0]), int(obj[1])), (int(obj[2]), int(obj[3])), (0, 255, 0), 2)
keep = (py_cpu_nms(np.asarray(objs), 0.1))
for k in keep:
new_objects.append(objects[k + prefix[idx]])
print((new_objects))
objects_to_label(new_objects, "000015_pred_mod.txt")
cv2.imwrite("./test.jpg", image) | [
"chengyong@pku.edu.cn"
] | chengyong@pku.edu.cn |
943eb7ef872e91eecdf173c1d2bcf133d8a02938 | 484f9502f2d1fa35df77df8d3a08bd2bfd2d253d | /src/testers/unittests/test_ast_utils.py | 3debbde14f5a9683c67c251be3268860790f2dd6 | [
"Apache-2.0"
] | permissive | pmeerw/Triton | 5d1c58e93ed257f06f1586a1aa542d1ba307dcbb | 82f11d6b15302e7900ed7f9eb3d686b6313d5b37 | refs/heads/master | 2020-08-04T17:01:47.442181 | 2019-09-30T08:16:44 | 2019-09-30T08:16:44 | 212,212,395 | 0 | 0 | Apache-2.0 | 2019-10-01T22:38:58 | 2019-10-01T22:38:58 | null | UTF-8 | Python | false | false | 1,494 | py | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
"""Test AST utils."""
import unittest
from triton import *
class TestAstUtils(unittest.TestCase):
"""Testing the AST utilities."""
def setUp(self):
self.ctx = TritonContext()
self.ctx.setArchitecture(ARCH.X86_64)
self.astCtxt = self.ctx.getAstContext()
self.sv1 = self.ctx.newSymbolicVariable(8)
self.sv2 = self.ctx.newSymbolicVariable(8)
self.v1 = self.astCtxt.variable(self.sv1)
self.v2 = self.astCtxt.variable(self.sv2)
def test_lookingForNodes(self):
n = (((self.v1 + self.v2 * 3) + self.v2) - 1)
# Looking for variables
l = self.astCtxt.lookingForNodes(n, AST_NODE.VARIABLE)
self.assertEqual(len(l), 2)
self.assertEqual(l[0], self.v1)
self.assertEqual(l[1], self.v2)
self.assertEqual(l[0].getSymbolicVariable().getName(), self.sv1.getName())
self.assertEqual(l[1].getSymbolicVariable().getName(), self.sv2.getName())
l = self.astCtxt.lookingForNodes(n, AST_NODE.ANY)
self.assertEqual(len(l), 12)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVADD)
self.assertEqual(len(l), 2)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVSUB)
self.assertEqual(len(l), 1)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVMUL)
self.assertEqual(len(l), 1)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BV)
self.assertEqual(len(l), 2)
| [
"jonathan.salwan@gmail.com"
] | jonathan.salwan@gmail.com |
085c1ca688f066be5edb8c576b783805eb8506bd | 6d3b434d5815ce245fb21e8215cca886d4d37aa0 | /FullStack/Day04/loop_test.py | 48817b89287d517cff25a7526e2c8d3956c92eeb | [] | no_license | jeen0202/Pycrawling | 6f4dba67c85a39cfd0f011c2c031ffb9bdc207ab | 49b79443e871e113c32f90858d5792db93035c86 | refs/heads/main | 2023-08-03T00:13:07.087531 | 2021-10-06T05:25:49 | 2021-10-06T05:25:49 | 383,691,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/hello_loop')
def hello_name():
value_list = ['list1', 'list2', 'list3']
return render_template('loop.html', values=value_list)
if __name__ == '__main__':
app.run("localhost", port="8085") | [
"jeen0202@korea.ac.kr"
] | jeen0202@korea.ac.kr |
8d69a931823ce83ae12e98a902bc8017db25c58c | 2411a875608eab61e0b7f0d5d825cc142dd063a1 | /nsx-gen/lib/mobclient.py | 541dd56df2c91c3f3d5dcc0e585b30d15efd2395 | [
"Apache-2.0"
] | permissive | vmware-archive/nsx-edge-gen | 02ebf1cb025ee71f6737a2082f6c368507150426 | dd25e303897a6e34f28f0eb288920b25e896f6e7 | refs/heads/master | 2023-01-19T14:39:29.229624 | 2020-11-18T17:26:15 | 2020-11-18T17:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,830 | py | #!/usr/bin/env python
# nsx-edge-gen
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Sabha Parameswaran'
import base64
import cookielib
import ssl
import requests
import re
import time
from pyquery import PyQuery
from lxml import html, etree
import urllib
import urllib2
from urllib2 import urlopen, Request
from requests.utils import quote
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
DEBUG = False
# Grab following resource types from mob tree
# leaving first letter for some that have upper case like Cluster, Group, Datastore, Datacenter, Network
RESOURCE_TYPES = [ 'atacenter', 'atastore', 'host', 'domain', 'roup', 'luster', 'virtualwire', 'portgroup', 'etwork']
def get_context():
if get_context.context is not None:
return get_context.context
else:
raise Error(resourceType + ' config not loaded!!')
get_context.context = None
def set_context(context):
get_context.context = context
def create_url_opener():
cookies = cookielib.LWPCookieJar()
handlers = [
urllib2.HTTPHandler(debuglevel=1),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
return opener
def createBaseAuthToken(user, passwd):
return base64.b64encode('%s:%s' % (user, passwd))
def lookupSessionNonce(response):
pq = PyQuery(response)
vmware_session_nonce = ''
hidden_entry = pq('input:hidden')
if hidden_entry.attr('name') == 'vmware-session-nonce' :
vmware_session_nonce = hidden_entry.attr('value')
if DEBUG:
print('vmware-session-nonce: ' + vmware_session_nonce)
return vmware_session_nonce
def init_vmware_session():
context = get_context()
vcenterMobServiceInstanceUrl = '/mob/?moid=ServiceInstance&method=retrieveContent'
data = None #'vmware-session-nonce': context['vmware-session-nonce']}
cookies = None
serviceInstanceGetRespSock = queryVCenterMob(context, vcenterMobServiceInstanceUrl, 'GET', data, cookies)
serviceInstanceGetRespInfo = serviceInstanceGetRespSock.info()
cookies = serviceInstanceGetRespSock.info()['Set-Cookie']
serviceInstanceGetResp = serviceInstanceGetRespSock.read()
serviceInstanceGetRespSock.close()
if DEBUG:
print('Cookies: ' + cookies)
print('Info: ' + str(serviceInstanceGetRespInfo))
print('vCenter MOB response :\n' + str(serviceInstanceGetResp)+ '\n-----\n')
#if response.status_code != requests.codes.ok:
# raise Error('Unable to connect to vcenter, error message: ' + vcenterServiceInstanceResponse.text)
vmware_session_nonce = lookupSessionNonce(serviceInstanceGetResp)
context['vmware-session-nonce'] = vmware_session_nonce
context['vmware-cookies'] = cookies
return
def lookup_vsphere_config():
context = get_context()
init_vmware_session()
data = { 'vmware-session-nonce': context['vmware-session-nonce']}
cookies = context['vmware-cookies']
processVCenterMobRequest(context, '/mob/?moid=ServiceInstance&method=retrieveContent', 'POST', data, cookies )
return traversedMoidTree(context, data, cookies)
def refresh_vsphere_config():
context = get_context()
# if things were run within 5 seconds, return cached data..
moidMap = checkMoidMap()
if moidMap:
return moidMap
init_vmware_session()
data = { }
cookies = context['vmware-cookies']
# Now fetch everything
return traversedMoidTree(context, data, cookies)
def checkMoidMap():
context = get_context()
lastRefresh = context.get('LAST_REFRESH')
if lastRefresh and (time.time() - lastRefresh < 5):
moidMap = context['vcenterMobMap']
if moidMap:
return moidMap
return None
def traversedMoidTree(context, data, cookies):
method = 'GET'
# Fetch everything under the root group-d1
detailedMoidMap = processVCenterMobRequest(context, '/mob/?moid=group-d1', method, data, cookies)
# Now traverse the datacenter, datastore and host
datacenterMoidMap = { }
for key, entry in detailedMoidMap.iteritems():
if 'datacenter' in entry['moid']:
detailedVcenterMobUrl = '/mob/?moid=' + entry['moid']
datacenterMoidMap.update(processVCenterMobRequest(context, detailedVcenterMobUrl, method, data, cookies))
detailedMoidMap.update(datacenterMoidMap)
if DEBUG:
print('Datacenters Moid Map:\n' + str(detailedMoidMap))
hostMobUrl = '/mob/?moid=' + detailedMoidMap['host']['moid']
detailedMoidMap.update(processVCenterMobRequest(context, hostMobUrl, 'GET', data, cookies))
datastoreMoidMap = { }
for key, entry in detailedMoidMap.iteritems():
if 'datastore' in entry['moid']:
detailedVcenterMobUrl = '/mob/?moid=' + entry['moid']
datastoreMoidMap.update(processVCenterMobRequest(context, detailedVcenterMobUrl, method, data, cookies))
detailedMoidMap.update(datastoreMoidMap)
groupMoidMap = { }
for key, entry in detailedMoidMap.iteritems():
if 'group' in entry['moid']:
detailedVcenterMobUrl = '/mob/?moid=' + entry['moid']
groupMoidMap.update(processVCenterMobRequest(context, detailedVcenterMobUrl, method, data, cookies))
detailedMoidMap.update(groupMoidMap)
if DEBUG:
print('Entire Moid Map:\n' + str(detailedMoidMap))
# Save the tree locally
context['vcenterMobMap'] = detailedMoidMap
context['LAST_REFRESH'] = time.time()
return context['vcenterMobMap']
def processVCenterMobRequest(context, vcenterMobUrl, method, data, cookies):
mobRespSock = queryVCenterMob(context, vcenterMobUrl, method, data, cookies)
mobResp = mobRespSock.read()
mobRespSock.close()
if DEBUG:
print('\n\n Mob Response for url[' + vcenterMobUrl + ']:\n' + mobResp)
moidMap = generateMoidMap(mobResp, RESOURCE_TYPES)
return moidMap
def generateMoidMap(response, resourceTypes):
"""
for e in tree.xpath("//td/a[contains(text(),'datacenter')]/.."):
print('Entry: ' + str(e) + '\n\t content: ' + e.text_content() + '\n')
if e.attrib.has_key('href') and e.attrib['href'].find('datacenter') != -1:
print('Found Match 3 ............' + e.text)
if response is None or response == '':
with open('/Users/sparameswaran/workspace/nsx-edge-gen/complete_mob.html', 'r') as myfile:
#with open('/Users/sparameswaran/workspace/nsx-edge-gen/group-dump.html', 'r') as myfile:
response=myfile.read()#.replace('\n', '')
"""
response = html_decode(response)
tree = html.fromstring(response)
moidMap = {}
#for entry in tree.xpath("//td/a[contains(text(),'datacenter')]/.."):
for entry in tree.xpath("//td/a/.."):
href_and_rest = etree.tostring(entry)
if href_and_rest is None or 'alarm' in href_and_rest or 'javascript' in href_and_rest:
continue
if "onclick" in href_and_rest or 'doPath' in href_and_rest or 'query' in href_and_rest:
continue
if not any(searchString in href_and_rest for searchString in resourceTypes):
continue
if DEBUG:
print('Entry content is: ' + href_and_rest)
# for child in entry:
# print('child:' + child.tag + ', content: ' + etree.tostring(child) + ', value: ' + child.text_content() + ', complete string: ' + str(child))
# for nested_child in child:
# print('\t nestedchild:' + nested_child.tag + ', value: ' + etree.tostring(nested_child) + ' content: ' + nested_child.text_content() + ', complete string: ' + str(nested_child))
# for nested_child2 in nested_child:
# print('\t nestedchild2:' + nested_child2.tag + ', value: ' + etree.tostring(nested_child2) + ' content: ' +nested_child2.text_content() + ', complete string: ' + str(nested_child2))
"""
Sample entry:
<td class="clean"><a href="https://vcsa-01.haas-94.pez.pivotal.io/mob/?moid=dvportgroup%2d168">dvportgroup-168</a> (vxw-dvs-29-virtualwire-115-sid-5013-lswitch-test2-Services)</td>
<td><a href="https://vcsa-01.haas-94.pez.pivotal.io/mob/?moid=group%2dd1">group-d1</a> (Datacenters)</td>
<td class="html-attribute-name">class="<span class="html-attribute-value">clean</span>">
<span class="html-tag"><a class="html-attribute-name">href</a></span>
="<a class="html-attribute-value html-external-link" target="_blank" href="https://vcsa-01.haas-94.pez.pivotal.io/mob/?moid=datacenter-2">/mob/?moid=datacenter-2</a>
datacenter-2<span class="html-tag"/> (Datacenter)<span class="html-tag"/></td>
Add lazy capture for the name as in (datastore1 (3)) using .*?\( .. )
"""
match = re.search(r'href="(.*?)">(.*)</.*?\((.*)\).*', href_and_rest)
if match is not None:
href = match.group(1)
moid = match.group(2).replace('/mob/?moid=','')
mobName = match.group(3)
moidMap[mobName] = { 'moid' : moid, 'href': href }
if DEBUG:
print('Mob Name: ' + mobName + ', moid : ' + moid + ', href: ' + href )
if DEBUG:
print('Entry Map: ' + str(moidMap))
return moidMap
def lookup_moid(resourceName):
vcenterMobMap = checkMoidMap()
if not vcenterMobMap:
vcenterMobMap = refresh_vsphere_config()
if resourceName in vcenterMobMap:
return vcenterMobMap[resourceName]['moid']
if 'atastore' in resourceName:
if resourceName in vcenterMobMap:
return vcenterMobMap[resourceName]['moid']
elif 'vsan' + resourceName in vcenterMobMap:
return vcenterMobMap['vsan' + resourceName]['moid']
# Handle / and other characters
resourceName = quote(resourceName, safe='')
if resourceName in vcenterMobMap:
return vcenterMobMap[resourceName]['moid']
print('Unable to lookup Moid for resource: ' + resourceName)
return resourceName
def lookup_logicalswitch_managed_obj_name( resourceName):
vcenterMobMap = checkMoidMap()
if not vcenterMobMap:
vcenterMobMap = refresh_vsphere_config()
"""
# For logical switches, the user associated name would be something like:
moid: dvportgroup-272
name in moid map: vxw-dvs-29-virtualwire-179-sid-5029-lswitch-test4-Infra
user associated name: lswitch-test4-Infra
"""
for key in vcenterMobMap:
#print('key[{}] : {}'.format(key, str(vcenterMobMap[key])))
if resourceName in key:
return key
# If the length of the lswitch name is over 40 characters,
# then things get trimmed in the generated virtualwires
# Sample virtualwire: vxw-dvs-50-virtualwire-16-sid-5015-lswitch-edge-nsx-pipeline-sample-Dynamic-Serv
if len(resourceName) > 40:
lswitch_initial_chars = resourceName[0:5]
for key in vcenterMobMap:
#print('key[{}] : {}'.format(key, str(vcenterMobMap[key])))
if 'virtualwire' in key and lswitch_initial_chars in key:
associated_lsw_name = key[key.index(lswitch_initial_chars):]
if associated_lsw_name in resourceName:
return key
# Handle / and other characters
resourceName = quote(resourceName, safe='')
for key in vcenterMobMap:
#print('key[{}] : {}'.format(key, str(vcenterMobMap[key])))
if resourceName in key:
return key
print('Unable to lookup Moid for resource: {}'.format(resourceName))
return resourceName
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
def html_decode(s):
"""
Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>.
"""
htmlCodes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&')
)
for code in htmlCodes:
s = s.replace(code[1], code[0])
return s
def create_non_verify_sslcontext():
urlctx = ssl.create_default_context()
urlctx.check_hostname = False
urlctx.verify_mode = ssl.CERT_NONE
return urlctx
def queryVCenterMob(vcenter_ctx, url, method, data, cookies):
vcenterOriginUrl = 'https://' + vcenter_ctx['address']
vcenterMobUrl = vcenterOriginUrl + url
urlctx = create_non_verify_sslcontext()
opener = create_url_opener()
#data = urllib.urlencode({ 'vmware-session-nonce': context['vmware-session-nonce']})
if data is not None and method == 'POST':
req = urllib2.Request(vcenterMobUrl, data=urllib.urlencode(data))#, auth=auth, data=data, verify=False, headers=headers)
else:
req = urllib2.Request(vcenterMobUrl)
base64string = createBaseAuthToken(vcenter_ctx.get('admin_user'), vcenter_ctx.get('admin_passwd'))
#print('Url: {}'.format(vcenterMobUrl))
req.add_header('Authorization', "Basic %s" % base64string)
req.add_header('User-Agent', "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30")
req.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3')
req.add_header('Accept-Language', 'en-US,en;q=0.8')
req.add_header("Accept", "text/html,application/xhtml+xml,application/xml,;q=0.9,*/*;q=0.8")
# req.add_header('Referer', vcenterMobUrl)
# req.add_header('Origin', vcenterOriginUrl)
# req.add_header('Host', vcenter_ctx['address'])
if cookies is not None:
req.add_header("Cookie", cookies)
req.get_method = lambda: method
sock = urllib2.urlopen(req, context=urlctx)
return sock | [
"sabhap@pivotal.io"
] | sabhap@pivotal.io |
b730461584fd1635191b6c39592d880a8c69bad4 | 9effd8eca52b8efebaf56474f6919bfe117af8ad | /Inner Classes/Inner1.py | 365db8f1c541a79593f24821fc24de71c4c083ff | [] | no_license | maliaditya/Python-code-snippets | b7aec315654f3b74328b29d1d775dde06aa3edcf | c2db8c612b68b62eab8fc962b1ebb33dd9e6689e | refs/heads/master | 2023-03-29T20:49:58.264648 | 2021-04-01T08:59:07 | 2021-04-01T08:59:07 | 267,371,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | class Mobile:
def __init__(self):
print("Mobile constructor")
class App:
def __init__(self):
print("App constructor")
def appName(self):
print("Instagram")
Mobile().App().appName() | [
"41832893+maliaditya@users.noreply.github.com"
] | 41832893+maliaditya@users.noreply.github.com |
383b76f74748990c083718d317c1ededcf0adc17 | 09611f3b130dccde3830fa73f7a1a17d88a9af4a | /Python/pi/src/map/routing.py | 0a3394171e58d98c082f3984e4e3d63c46f0eff2 | [] | no_license | Guigajks/PI-VIII | fb656b6b3cd1078c8534844a2f4d046b2143fb8f | ee4db93d15cfeaa8a572f1a26c0b4ba9670f6444 | refs/heads/master | 2020-03-25T12:45:14.240582 | 2018-12-14T20:10:00 | 2018-12-14T20:10:00 | 143,792,102 | 0 | 0 | null | 2018-12-14T20:10:01 | 2018-08-06T23:00:22 | null | UTF-8 | Python | false | false | 126 | py | from django.conf.urls import url
from . import consumer
websocket_urlpatterns = [
url(r'^ws/$', consumer.MapConsumer),
] | [
"guigajks@bitbucket.org"
] | guigajks@bitbucket.org |
d793e6de7377d5ebc904c64c932ada471c962a70 | 772fea37684a53210d99a3010b92a4bd6f1317e8 | /rxcs/cs/finalRecon.py | 9e78ec3a834a8aa9cde7ea9a1f8c92619d4b7b62 | [
"BSD-2-Clause"
] | permissive | JacekPierzchlewski/RxCS | ac5ebd6721f4d261f5e9ef160a61c15d2ce020dd | 250f9ebfe9c12f49754f354e60cc511be76e4632 | refs/heads/master | 2020-12-14T16:09:15.593134 | 2017-03-22T15:53:46 | 2017-03-22T15:53:46 | 19,773,357 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,071 | py | """
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
1.0 | 20-JAN-2016 : * Version 1.0 released |br|
1.0r1 | 28-JAN-2016 : * Only real part of reconstructed signals is preserved
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
# =================================================================
# L1 solver object
# =================================================================
class finalRecon(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
# Name of group of RxCS modules and module name
self.strRxCSgroup = 'Reconstruction'
self.strModuleName = 'Final reconstruction'
self.__inputSignals() # Define the input signals
self.__parametersDefine() # Define the parameters
# Start L2 solver object
self.L2solv = rxcs.auxiliary.aldkrlsL2()
# Define parameters
def __inputSignals(self):
# Reconstructed signals coefficients
self.paramAddMan('lCoeff', 'Reconstructed signals coefficients', noprint=1)
self.paramType('lCoeff', list) # Must be a list
self.paramTypeEl('lCoeff', (np.ndarray)) # Elements must be np.ndarray
# Signals dictionary
self.paramAddMan('mDict', 'Dictionary matrix [time domain in 2nd dimension (columns) ]')
self.paramType('mDict', np.ndarray) # Must be a Numpy array
self.paramNDimEq('mDict', 2) # Must bo 2-dimensional
def __parametersDefine(self):
self.paramAddOpt('vInx', 'Indices of rows of the dictionary to be used in the reconstruction', noprint=1)
self.paramType('vInx', np.ndarray) # Must be a Numpy array
self.paramTypeEl('vInx', (int)) # Elements must be integers
self.paramNDimEq('vInx', 1) # Must bo 2-dimensional
self.paramUnique('vInx') # Elements must be unique
self.paramHE('vInx', 0) # Elements must be higher than 0
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute', [0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.checktInputSig() # Check if the observed signals and Theta
# matrices are correct
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
# Check if the observed signals and Theta matrices are correct
def checktInputSig(self):
# All the elements in the vector with indices of rows of the dictionary to be used in the reconstruction
# ('vInx') must be lower than the number of rows in the dictionary matrix
(nRowsDict, _) = self.mDict.shape
if self.wasParamGiven('vInx'):
for inxRow in self.vInx:
if inxRow >= nRowsDict:
strError = 'Indices of dictionary rows must be lower than the number of rows in the dictionary!'
raise ValueError(strError)
return
# Engine - reconstruct the signal coefficients
def __engine(self):
# Get the number of the signal coefficients
nSigCoeff = len(self.lCoeff)
# Get the number of columns (time samples) in the dictionary
(_, nT) = self.mDict.shape
# Reconstruct the signals
self.lSig = [] # List with the reconstructed signals
self.mSig = np.zeros((nSigCoeff, nT)) # Matrix with the reconstructed signals
# Loop over all the signal coefficients
if self.wasParamGiven('vInx'):
# Take only a part of the dictionary which should be used to reconstruct the signal
self.mDict_ = self.mDict[self.vInx, :]
for inxSig in np.arange(nSigCoeff):
vCoeff = self.lCoeff[inxSig][self.vInx] #
vSig = vCoeff.dot(self.mDict_) # Generate a signal
self.lSig.append(vSig) # Add the signal to the lst
self.mSig[inxSig, :] = vSig.real # Add the signal to the matrix with signals
else:
for inxSig in np.arange(nSigCoeff):
vSig = self.lCoeff[inxSig].dot(self.mDict) # Generate a signal
self.lSig.append(vSig)
self.mSig[inxSig, :] = vSig.real # Add the signal to the matrix with signals
return
| [
"jap@es.aau.dk"
] | jap@es.aau.dk |
2bef865ec48767a8816169565c3268d77904da1c | 346a7fe97299a299aa8411763094c16c6ff82e81 | /OntoED/model_base.py | 4b201e3c4593690f8d7e8ed5037c1f3a4633cc4f | [] | no_license | hanqingzhu258/OntoED | 576ad0de7b7b4d5df54be94476cbe6fb5da26d8f | 7997c92b2f75c0dc2bde6073f81566ab8bed018b | refs/heads/main | 2023-06-05T09:35:52.676659 | 2021-06-25T11:23:33 | 2021-06-25T11:23:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,919 | py | # -*- coding: utf-8 -*-
import os
import sys
import torch
from torch import optim, nn
from sklearn.metrics import f1_score, recall_score, precision_score
import settings.parameters as para
class LowResEDModel(nn.Module):
def __init__(self, support_sentence_encoder, query_sentence_encoder):
'''
sentence_encoder: Sentence encoder
self.cost: loss function
'''
nn.Module.__init__(self)
self.support_sentence_encoder = support_sentence_encoder
self.query_sentence_encoder = query_sentence_encoder
self.cost = nn.CrossEntropyLoss()
self.loss_for_td = nn.MSELoss()
self.fc_trigger = nn.Sequential(
nn.Dropout(para.DROPOUT_RATE),
nn.Linear(para.SIZE_EMB_WORD, para.SIZE_TRIGGER_LABEL, bias=True),
nn.ReLU(),
)
def forward(self, support, query, scope_support, scope_query, N, R, K, Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
R: Ratio of instances for each class
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
return: logits
'''
raise NotImplementedError
def loss(self, logits, label):
'''
logits: Logits with the size (..., class_num)
label: Label with whatever size.
return: [Loss] (A single value)
'''
N = logits.size(-1)
return self.cost(logits.view(-1, N), label.view(-1))
def accuracy(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Accuracy] (A single value)
'''
return torch.mean((pred.view(-1) == label.view(-1)).type(torch.FloatTensor))
def evaluation_metric(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Evaluation Metric]
'''
# average=None, get the P, R, and F1 value of a single class
if para.CUDA:
pred = pred.cpu()
label = label.cpu()
Precision = precision_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
Recall = recall_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
F1_score = f1_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
F1_score_micro = f1_score(y_true=label.view(-1), y_pred=pred.view(-1), average="micro")
return Precision, F1_score, Recall, F1_score_micro
def concat_support_query_items(self, items_support_trigger, items_query_trigger, item_scale=2):
if item_scale == 2:
Max_Num = items_support_trigger.size(-1)
items_support_trigger = items_support_trigger.view(para.SIZE_BATCH, -1, Max_Num)
items_query_trigger = items_query_trigger.view(para.SIZE_BATCH, -1, Max_Num)
items_trigger = torch.cat((items_support_trigger, items_query_trigger), dim=1).view(-1, Max_Num)
elif item_scale == 3:
D = items_support_trigger.size(-1)
Max_Num = items_support_trigger.size(-2)
items_support_trigger = items_support_trigger.view(para.SIZE_BATCH, -1, Max_Num, D)
items_query_trigger = items_query_trigger.view(para.SIZE_BATCH, -1, Max_Num, D)
items_trigger = torch.cat((items_support_trigger, items_query_trigger), dim=1).view(-1, Max_Num, D)
return items_trigger
class LowResEDFramework:
def __init__(self, train_data_loader, val_data_loader, test_data_loader):
'''
train_data_loader: DataLoader for training.
val_data_loader: DataLoader for validating.
test_data_loader: DataLoader for testing.
'''
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
def __load_model__(self, ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def item(self, x):
'''
PyTorch before and after 0.4
'''
return x.item()
def train(self, model, model_name,
B, N_for_train, N_for_eval, R_for_train, R_for_eval, K, Q,
ckpt_dir=para.CHECKPOINT_DIRECTORY,
test_result_dir=para.DATA_OUTPUT_DIRECTORY,
learning_rate=para.LR,
lr_step_size=para.SIZE_LR_STEP,
weight_decay=para.WEIGHT_DECAY,
train_iter=para.TRAIN_ITER,
val_iter=para.VAL_ITER,
val_step=para.VAL_STEP,
test_iter=para.TEST_ITER,
cuda=para.CUDA,
pretrain_model=None,
optimizer=optim.SGD,
noise_rate=0):
'''
model: a LowResEDModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
test_result_dir: Directory of test results
learning_rate: Initial learning rate
lr_step_size: Decay learning rate every lr_step_size steps
weight_decay: Rate of decaying weight
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
test_iter: Num of iterations of testing
cuda: Use CUDA or not
pretrain_model: Pre-trained checkpoint path
'''
print("Start training...")
# Init
parameters_to_optimize = filter(lambda x: x.requires_grad, model.parameters())
optimizer = optimizer(parameters_to_optimize, learning_rate, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=lr_step_size)
if pretrain_model:
checkpoint = self.__load_model__(pretrain_model)
model.load_state_dict(checkpoint['state_dict'])
start_iter = checkpoint['iter'] + 1
else:
start_iter = 0
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
if cuda:
model = model.cuda()
model.train()
# Training
best_acc = 0
not_best_count = 0 # Stop training after several epochs without improvement.
iter_loss = 0.0
iter_right = 0.0
iter_precision = 0.0
iter_f1 = 0.0
iter_f1_micro = 0.0
iter_recall = 0.0
iter_loss_ec = 0.0
iter_loss_td = 0.0
iter_right_td = 0.0
iter_precision_td = 0.0
iter_f1_td = 0.0
iter_f1_td_micro = 0.0
iter_recall_td = 0.0
iter_sample = 0.0
for it in range(start_iter, start_iter + train_iter):
scheduler.step()
support, query, label, label_support_trigger, label_query_trigger, scope_support, scope_query = \
self.train_data_loader.next_batch(B, N_for_train, R_for_train, K, Q, noise_rate=noise_rate)
logits, pred, logits_support_trigger, pred_support_trigger, logits_query_trigger, pred_query_trigger = \
model(support, query, scope_support, scope_query, N_for_train, R_for_train, K, Q)
# logits.size() -> (B, N_for_train * Q, N_for_train)
# pred.size() -> (B * N_for_train * Q)
# logits_support_trigger.size() -> (B, N_for_train * K, W, 2)
# pred_support_trigger.size() -> (B, N_for_train * K, W)
# logits_query_trigger.size() -> (B, N_for_train * Q, W, 2)
# pred_query_trigger.size() -> (B, N_for_train * Q, W)
# support_trigger_label.size() -> (B, N_for_train * K, W)
# query_trigger_label.size() -> (B, N_for_train * Q, W)
if torch.cuda.device_count() > 1:
logits_trigger = model.module.concat_support_query_items(logits_support_trigger, logits_query_trigger, 3)
label_trigger = model.module.concat_support_query_items(label_support_trigger, label_query_trigger, 2)
pred_trigger = model.module.concat_support_query_items(pred_support_trigger, pred_query_trigger, 2)
loss_td = model.module.loss(logits_trigger, label_trigger)
loss_ec = model.module.loss(logits, label)
loss = para.LOSS_RATIO_FOR_EC * loss_ec + para.LOSS_RATIO_FOR_TD * loss_td
right_td = model.module.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.module.evaluation_metric(pred_trigger, label_trigger)
right = model.module.accuracy(pred, label)
Precision, F1_score, Recall, F1_score_micro = model.module.evaluation_metric(pred, label)
else:
logits_trigger = model.concat_support_query_items(logits_support_trigger, logits_query_trigger, 3)
label_trigger = model.concat_support_query_items(label_support_trigger, label_query_trigger, 2)
pred_trigger = model.concat_support_query_items(pred_support_trigger, pred_query_trigger, 2)
loss_td = model.loss(logits_trigger, label_trigger)
loss_ec = model.loss(logits, label)
loss = para.LOSS_RATIO_FOR_EC * loss_ec + para.LOSS_RATIO_FOR_TD * loss_td
right_td = model.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.evaluation_metric(pred_trigger, label_trigger)
right = model.accuracy(pred, label)
Precision, F1_score, Recall, F1_score_micro = model.evaluation_metric(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_loss += self.item(loss.data)
iter_loss_ec += loss_ec
iter_precision += Precision
iter_recall += Recall
iter_f1 += F1_score
iter_f1_micro += F1_score_micro
iter_right += self.item(right.data)
iter_loss_td += loss_td
iter_precision_td += Precision_td
iter_recall_td += Recall_td
iter_f1_td += F1_score_td
iter_f1_td_micro += F1_score_td_micro
iter_right_td += self.item(right_td.data)
iter_sample += 1
sys.stdout.write(
'[TRAIN] step: {0:4} | loss: {1:2.6f}, '
'loss_ec: {2:2.6f}, precision: {3:2.6f}, recall: {4:2.6f}, '
'f1: {5:2.6f}, f1_micro: {6:2.6f}, accuracy: {7:3.2f}%, '
'loss_td: {8:2.6f}, precision_td: {9:2.6f}, recall_td: {10:2.6f}, '
'f1_td: {11:2.6f}, f1_td_micro: {12:2.6f}, accuracy_td: {13:3.2f}%'.format(
it + 1, iter_loss / iter_sample, iter_loss_ec / iter_sample, iter_precision / iter_sample, iter_recall / iter_sample,
iter_f1 / iter_sample, iter_f1_micro / iter_sample, 100 * iter_right / iter_sample,
iter_loss_td / iter_sample, iter_precision_td / iter_sample, iter_recall_td / iter_sample,
iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample, 100 * iter_right_td / iter_sample) + '\r')
sys.stdout.flush()
if it % val_step == 0:
iter_loss = 0.
iter_loss_ec = 0.
iter_precision = 0.
iter_recall = 0.
iter_f1 = 0.
iter_right = 0.
iter_loss_td = 0.
iter_precision_td = 0.
iter_recall_td = 0.
iter_f1_td = 0.
iter_right_td = 0.
iter_sample = 0.
if (it + 1) % val_step == 0:
precision, recall, f1, f1_micro, acc, precision_td, recall_td, f1_td, f1_td_micro, acc_td, = self.eval(model, model_name, B, N_for_eval, R_for_eval, K, Q, val_iter, noise_rate=noise_rate)
model.train()
if acc > best_acc:
print('Best checkpoint')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_path = os.path.join(ckpt_dir, model_name + '_' + str(N_for_train) + "Way-" + str(R_for_train) + "Ratio-Max" + str(K) + "Shot" + ".pth.tar")
torch.save({'state_dict': model.state_dict()}, save_path)
best_acc = acc
print("\n======================================================================================\n")
print("Finish training " + model_name)
test_precision, test_recall, test_f1, test_f1_micro, test_acc, test_precision_td, test_recall_td, test_f1_td, test_f1_td_micro, test_acc_td = \
self.eval(model, model_name, B, N_for_eval, R_for_eval, K, Q, test_iter,
ckpt=os.path.join(ckpt_dir, model_name + '_' + str(N_for_train) + "Way-" + str(R_for_train) + "Ratio-Max" + str(K) + "Shot" + '.pth.tar'), noise_rate=noise_rate)
print("\n======================================================================================\n")
print("Finish testing " + model_name)
print("LR:", para.LR, "LOSS_RATIO_FOR_TD:", para.LOSS_RATIO_FOR_TD, "LOSS_RATIO_FOR_EC:",
para.LOSS_RATIO_FOR_EC, "LOSS_RATIO_FOR_ERE:", para.LOSS_RATIO_FOR_ERE)
print(model_name + '_' + str(N_for_train) + "Way-" + str(R_for_train) + "Ratio-Max" + str(K) + "Shot")
print("Test precision_td: {}".format(test_precision_td))
print("Test recall_td: {}".format(test_recall_td))
print("Test f1_td: {}".format(test_f1_td))
print("Test f1_td_micro: {}".format(test_f1_td_micro))
print("Test accuracy_td: {}".format(test_acc_td))
print("Test precision: {}".format(test_precision))
print("Test recall: {}".format(test_recall))
print("Test f1: {}".format(test_f1))
print("Test f1_micro: {}".format(test_f1_micro))
print("Test accuracy: {}".format(test_acc))
def eval(self, model, model_name, B, N, R, K, Q, eval_iter, ckpt=None, noise_rate=0):
'''
model: a FewShotEDModel instance
B: Batch size
N: Num of classes for each batch
R: Ratio of instances
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if ckpt is None:
eval_dataset = self.val_data_loader
else:
checkpoint = self.__load_model__(ckpt)
model.load_state_dict(checkpoint['state_dict'])
eval_dataset = self.test_data_loader
iter_precision = 0.0
iter_recall = 0.0
iter_f1 = 0.0
iter_f1_micro = 0
iter_right = 0.0
iter_precision_td = 0.0
iter_recall_td = 0.0
iter_f1_td = 0.0
iter_f1_td_micro = 0.0
iter_right_td = 0.0
iter_sample = 0.0
for it in range(eval_iter):
support, query, label, label_support_trigger, label_query_trigger, scope_support, scope_query = \
eval_dataset.next_batch(B, N, R, K, Q, noise_rate=noise_rate)
logits, pred, logits_support_trigger, pred_support_trigger, logits_query_trigger, pred_query_trigger = \
model(support, query, scope_support, scope_query, N, R, K, Q)
if torch.cuda.device_count() > 1:
label_trigger = model.module.concat_support_query_items(label_support_trigger, label_query_trigger, 2)
pred_trigger = model.module.concat_support_query_items(pred_support_trigger, pred_query_trigger, 2)
right_td = model.module.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.module.evaluation_metric(pred_trigger, label_trigger)
right = model.module.accuracy(pred, label)
Precision, F1_score, Recall, F1_score_micro = model.module.evaluation_metric(pred, label)
else:
label_trigger = model.concat_support_query_items(label_support_trigger, label_query_trigger, 2)
pred_trigger = model.concat_support_query_items(pred_support_trigger, pred_query_trigger, 2)
right_td = model.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.evaluation_metric(pred_trigger, label_trigger)
right = model.accuracy(pred, label)
Precision, F1_score, Recall, F1_score_micro = model.evaluation_metric(pred, label)
iter_precision += Precision
iter_recall += Recall
iter_f1 += F1_score
iter_f1_micro += F1_score_micro
iter_right += self.item(right.data)
iter_precision_td += Precision_td
iter_recall_td += Recall_td
iter_f1_td += F1_score_td
iter_f1_td_micro += F1_score_td_micro
iter_right_td += self.item(right_td.data)
iter_sample += 1
sys.stdout.write('[EVAL] step: {0:4} | '
'precision: {1:2.6f}, recall: {2:2.6f}, f1: {3:2.6f}, f1_micro: {4:2.6f}, accuracy: {5:3.2f}%, '
'precision_td: {6:2.6f}, recall_td: {7:2.6f}, f1_td: {8:2.6f}, f1_td_micro: {9:2.6f}, accuracy_td: {10:3.2f}%'.format(
it + 1, iter_precision / iter_sample, iter_recall / iter_sample, iter_f1 / iter_sample, iter_f1_micro / iter_sample,
100 * iter_right / iter_sample, iter_precision_td / iter_sample, iter_recall_td / iter_sample,
iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample, 100 * iter_right_td / iter_sample) + '\r')
sys.stdout.flush()
print("")
return iter_precision / iter_sample, iter_recall / iter_sample, iter_f1 / iter_sample, iter_f1_micro / iter_sample, iter_right / iter_sample, iter_precision_td / iter_sample, iter_recall_td / iter_sample, iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample, iter_right_td / iter_sample
class OverallEDModel(nn.Module):
def __init__(self, sentence_encoder):
'''
sentence_encoder: Sentence encoder
self.cost: loss function
'''
nn.Module.__init__(self)
self.sentence_encoder = sentence_encoder
self.cost = nn.CrossEntropyLoss()
# MSELoss, NLLLoss, KLDivLoss
self.loss_for_td = nn.MSELoss()
self.fc_trigger = nn.Sequential(
nn.Dropout(para.DROPOUT_RATE),
nn.Linear(para.SIZE_EMB_WORD, para.SIZE_TRIGGER_LABEL, bias=True),
nn.ReLU(),
)
def forward(self, inputs):
'''
inputs: Inputs of the overall set.
return: logits
'''
raise NotImplementedError
def loss(self, logits, label):
'''
logits: Logits with the size (..., class_num)
label: Label with whatever size.
return: [Loss] (A single value)
'''
N = logits.size(-1)
return self.cost(logits.view(-1, N), label.view(-1))
def accuracy(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Accuracy] (A single value)
'''
return torch.mean((pred.view(-1) == label.view(-1)).type(torch.FloatTensor))
def evaluation_metric(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Evaluation Metric]
'''
# average=None, get the P, R, and F1 value of a single class
if para.CUDA:
pred = pred.cpu()
label = label.cpu()
Precision = precision_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
Recall = recall_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
F1_score = f1_score(y_true=label.view(-1), y_pred=pred.view(-1), average="macro")
F1_score_micro = f1_score(y_true=label.view(-1), y_pred=pred.view(-1), average="micro")
return Precision, F1_score, Recall, F1_score_micro
class OverallEDFramework:
def __init__(self, train_data_loader, val_data_loader, test_data_loader):
'''
train_data_loader: DataLoader for training.
val_data_loader: DataLoader for validating.
test_data_loader: DataLoader for testing.
'''
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
def __load_model__(self, ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def item(self, x):
'''
PyTorch before and after 0.4
'''
return x.item()
def train(self, model, model_name,
B,
ckpt_dir=para.CHECKPOINT_DIRECTORY,
test_result_dir=para.DATA_OUTPUT_DIRECTORY,
learning_rate=para.LR,
lr_step_size=para.SIZE_LR_STEP,
weight_decay=para.WEIGHT_DECAY,
train_iter=para.TRAIN_ITER,
val_iter=para.VAL_ITER,
val_step=para.VAL_STEP,
test_iter=para.TEST_ITER,
cuda=para.CUDA,
pretrain_model=None,
optimizer=optim.SGD,
noise_rate=0):
'''
model: a LowResEDModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
test_result_dir: Directory of test results
learning_rate: Initial learning rate
lr_step_size: Decay learning rate every lr_step_size steps
weight_decay: Rate of decaying weight
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
test_iter: Num of iterations of testing
cuda: Use CUDA or not
pretrain_model: Pre-trained checkpoint path
'''
print("Start training...")
# Init
parameters_to_optimize = filter(lambda x: x.requires_grad, model.parameters())
optimizer = optimizer(parameters_to_optimize, learning_rate, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=lr_step_size)
if pretrain_model:
checkpoint = self.__load_model__(pretrain_model)
model.load_state_dict(checkpoint['state_dict'])
start_iter = checkpoint['iter'] + 1
else:
start_iter = 0
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
if cuda:
model = model.cuda()
model.train()
# Training
best_acc = 0
not_best_count = 0 # Stop training after several epochs without improvement.
iter_loss = 0.0
iter_right = 0.0
iter_precision = 0.0
iter_f1 = 0.0
iter_f1_micro = 0
iter_recall = 0.0
iter_loss_ec = 0.0
iter_loss_td = 0.0
iter_right_td = 0.0
iter_precision_td = 0.0
iter_f1_td = 0.0
iter_f1_td_micro = 0
iter_recall_td = 0.0
iter_sample = 0.0
for it in range(start_iter, start_iter + train_iter):
scheduler.step()
overall, label_event, label_trigger = self.train_data_loader.next_batch_for_overall(B, noise_rate=noise_rate)
logits, pred, logits_trigger, pred_trigger = model(overall)
# logits.size() -> (B, #All_Instances, N)
# pred.size() -> (B * #All_Instances)
# logits_trigger.size() -> (B, #All_Instances, W, 2)
# pred_trigger.size() -> (B, #All_Instances, W)
# trigger_label.size() -> (B, #All_Instances, W)
if torch.cuda.device_count() > 1:
loss_td = model.module.loss(logits_trigger, label_trigger)
loss_ec = model.module.loss(logits, label_event)
loss = para.LOSS_RATIO_FOR_EC * loss_ec + para.LOSS_RATIO_FOR_TD * loss_td
right_td = model.module.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.module.evaluation_metric(pred_trigger, label_trigger)
right = model.module.accuracy(pred, label_event)
Precision, F1_score, Recall, F1_score_micro = model.module.evaluation_metric(pred, label_event)
else:
loss_td = model.loss(logits_trigger, label_trigger)
loss_ec = model.loss(logits, label_event)
loss = para.LOSS_RATIO_FOR_EC * loss_ec + para.LOSS_RATIO_FOR_TD * loss_td
right_td = model.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.evaluation_metric(pred_trigger, label_trigger)
right = model.accuracy(pred, label_event)
Precision, F1_score, Recall, F1_score_micro = model.evaluation_metric(pred, label_event)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_loss += self.item(loss.data)
iter_loss_ec += loss_ec
iter_precision += Precision
iter_recall += Recall
iter_f1 += F1_score
iter_f1_micro += F1_score_micro
iter_right += self.item(right.data)
iter_loss_td += loss_td
iter_precision_td += Precision_td
iter_recall_td += Recall_td
iter_f1_td += F1_score_td
iter_f1_td_micro += F1_score_td_micro
iter_right_td += self.item(right_td.data)
iter_sample += 1
sys.stdout.write(
'[TRAIN] step: {0:4} | loss: {1:2.6f}, '
'loss_ec: {2:2.6f}, precision: {3:2.6f}, recall: {4:2.6f}, '
'f1: {5:2.6f}, f1_micro: {6:2.6f}, accuracy: {7:3.2f}%, '
'loss_td: {8:2.6f}, precision_td: {9:2.6f}, recall_td: {10:2.6f}, '
'f1_td: {11:2.6f}, f1_td_micro: {12:2.6f}, accuracy_td: {13:3.2f}%'.format(it + 1,
iter_loss / iter_sample, iter_loss_ec / iter_sample, iter_precision / iter_sample, iter_recall / iter_sample,
iter_f1 / iter_sample, iter_f1_micro / iter_sample, 100 * iter_right / iter_sample,
iter_loss_td / iter_sample, iter_precision_td / iter_sample, iter_recall_td / iter_sample,
iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample, 100 * iter_right_td / iter_sample) + '\r')
sys.stdout.flush()
sys.stdout.flush()
if it % val_step == 0:
iter_loss = 0.
iter_loss_ec = 0.
iter_precision = 0.
iter_recall = 0.
iter_f1 = 0.
iter_right = 0.
iter_loss_td = 0.
iter_precision_td = 0.
iter_recall_td = 0.
iter_f1_td = 0.
iter_right_td = 0.
iter_sample = 0.
if (it + 1) % val_step == 0:
precision, recall, f1, f1_micro, acc, precision_td, recall_td, f1_td, f1_td_micro, acc_td, = self.eval(model, model_name, B, val_iter, noise_rate=noise_rate)
model.train()
if acc > best_acc:
print('Best checkpoint')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_path = os.path.join(ckpt_dir, model_name + '_Overall' + '.pth.tar')
torch.save({'state_dict': model.state_dict()}, save_path)
best_acc = acc
print("\n======================================================================================\n")
print("Finish training " + model_name)
test_precision, test_recall, test_f1, test_f1_micro, test_acc, test_precision_td, test_recall_td, test_f1_td, test_f1_td_micro, test_acc_td = \
self.eval(model, model_name, B, test_iter, ckpt=os.path.join(ckpt_dir, model_name + '_Overall' + '.pth.tar'), noise_rate=noise_rate)
print("\n======================================================================================\n")
print("Finish testing " + model_name)
print("LR:", para.LR, "LOSS_RATIO_FOR_TD:", para.LOSS_RATIO_FOR_TD, "LOSS_RATIO_FOR_EC:",
para.LOSS_RATIO_FOR_EC, "LOSS_RATIO_FOR_ERE:", para.LOSS_RATIO_FOR_ERE)
print(model_name + '_Overall')
print("Test precision_td: {}".format(test_precision_td))
print("Test recall_td: {}".format(test_recall_td))
print("Test f1_td: {}".format(test_f1_td))
print("Test f1_td_micro: {}".format(test_f1_td_micro))
print("Test accuracy_td: {}".format(test_acc_td))
print("Test precision: {}".format(test_precision))
print("Test recall: {}".format(test_recall))
print("Test f1: {}".format(test_f1))
print("Test f1_micro: {}".format(test_f1_micro))
print("Test accuracy: {}".format(test_acc))
def eval(self, model, model_name, B, eval_iter, ckpt=None, noise_rate=0):
'''
model: a FewShotEDModel instance
B: Batch size
N: Num of classes for each batch
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if ckpt is None:
eval_dataset = self.val_data_loader
else:
checkpoint = self.__load_model__(ckpt)
model.load_state_dict(checkpoint['state_dict'])
eval_dataset = self.test_data_loader
iter_precision = 0.0
iter_recall = 0.0
iter_f1 = 0.0
iter_f1_micro = 0.0
iter_right = 0.0
iter_precision_td = 0.0
iter_recall_td = 0.0
iter_f1_td = 0.0
iter_f1_td_micro = 0.0
iter_right_td = 0.0
iter_sample = 0.0
for it in range(eval_iter):
overall, label_event, label_trigger = eval_dataset.next_batch_for_overall(B, noise_rate=noise_rate)
logits, pred, logits_trigger, pred_trigger = model(overall)
if torch.cuda.device_count() > 1:
right_td = model.module.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.module.evaluation_metric(pred_trigger, label_trigger)
right = model.module.accuracy(pred, label_event)
Precision, F1_score, Recall, F1_score_micro = model.module.evaluation_metric(pred, label_event)
else:
right_td = model.accuracy(pred_trigger, label_trigger)
Precision_td, F1_score_td, Recall_td, F1_score_td_micro = model.evaluation_metric(pred_trigger, label_trigger)
right = model.accuracy(pred, label_event)
Precision, F1_score, Recall, F1_score_micro = model.evaluation_metric(pred, label_event)
iter_precision += Precision
iter_recall += Recall
iter_f1 += F1_score
iter_f1_micro += F1_score_micro
iter_right += self.item(right.data)
iter_precision_td += Precision_td
iter_recall_td += Recall_td
iter_f1_td += F1_score_td
iter_f1_td_micro += F1_score_td_micro
iter_right_td += self.item(right_td.data)
iter_sample += 1
sys.stdout.write('[EVAL] step: {0:4} | '
'precision: {1:2.6f}, recall: {2:2.6f}, f1: {3:2.6f}, f1_micro: {4:2.6f}, accuracy: {5:3.2f}%, '
'precision_td: {6:2.6f}, recall_td: {7:2.6f}, f1_td: {8:2.6f}, f1_td_micro: {9:2.6f}, accuracy_td: {10:3.2f}%'.format(
it + 1, iter_precision / iter_sample, iter_recall / iter_sample, iter_f1 / iter_sample,
iter_f1_micro / iter_sample, 100 * iter_right / iter_sample, iter_precision_td / iter_sample,
iter_recall_td / iter_sample, iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample,
100 * iter_right_td / iter_sample) + '\r')
sys.stdout.flush()
print("")
return iter_precision / iter_sample, iter_recall / iter_sample, iter_f1 / iter_sample, iter_f1_micro / iter_sample, iter_right / iter_sample, iter_precision_td / iter_sample, iter_recall_td / iter_sample, iter_f1_td / iter_sample, iter_f1_td_micro / iter_sample, iter_right_td / iter_sample
| [
"noreply@github.com"
] | hanqingzhu258.noreply@github.com |
b8cc6156455709a06572e21da36d30d39a079576 | ded7fb99380d36aef0e8e52b1e1bce993d956886 | /speech_trial.py | 3943216b7d6e86d2b75cd4fb4724ad92528bee63 | [] | no_license | dhairyaostwal/SurgicalGlass | 37be62e6322d2f971cffe008997b8fff470ca429 | c16c168e5d88a7b12baecd82b6a7c40e580cef0c | refs/heads/master | 2021-02-18T05:44:26.569606 | 2020-10-01T09:30:34 | 2020-10-01T09:30:34 | 245,167,207 | 0 | 0 | null | 2020-10-01T09:30:35 | 2020-03-05T13:13:52 | Python | UTF-8 | Python | false | false | 1,130 | py | import analyze as az
#import speech_trial as stt
from gtts import gTTS
import speech_recognition as sr
import os
import re
import webbrowser
import smtplib
import requests
'''import pyttsx3
engine = pyttsx3.init()
engine.say("I will speak this text")
engine.runAndWait()'''
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Ready...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
except sr.UnknownValueError:
print('Your last command couldn\'t be heard')
command = myCommand()
return command
def get_text():
text = myCommand()
SUBSCRIPTION_KEY_ENV_NAME = "e0a4ec68847644849409dce0a433d785"
# text = stt.rec()
#text = 'can someone pass the dissection sissors?'
print(text)
keys = az.key_phrases(SUBSCRIPTION_KEY_ENV_NAME,text)
ent = az.entity_extraction(SUBSCRIPTION_KEY_ENV_NAME,text)
print(keys)
return [keys,ent]
get_text() | [
"dhairya.ostwal@gmail.com"
] | dhairya.ostwal@gmail.com |
a56933bd6681e43cf345a102d75f3c814955242f | 1e35ffd787a15b827ecdf78f1c740687914cb7a8 | /People.py | 91753e6029d94a3d10a4acc406733e2e20e325c5 | [] | no_license | msaimam98/TicTacToe | 67978044607d6f5fea5dc125c3475b7e147f0066 | db714d7fafb972baf4f8d16d81a99c2176ec41ba | refs/heads/master | 2020-06-05T08:32:10.395270 | 2019-06-17T18:04:24 | 2019-06-17T18:04:24 | 192,377,254 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | class Player:
"""
Initialize a new User
"""
def __init__(self, name: str, token: int) -> None:
self.name = name
self.tracking_num = token
| [
"noreply@github.com"
] | msaimam98.noreply@github.com |
87cd7b6caedf20828b0a9a3d9035bb427d44719d | 19ebbf320b25151580ef37c3edd77174262698a3 | /jskparser/ast/type/classorinterfacetype.py | 98ad1e02b7d18ca47cae30d526e7f4380f0ae44f | [
"MIT"
] | permissive | plum-umd/java-sketch | f3cc889691aaac8a3a36e092523c1d6b32ac7206 | 3abffb73e1f0ad0f99ef03d5db90fc96c24aa84d | refs/heads/master | 2023-02-09T01:07:20.444520 | 2023-01-26T03:38:51 | 2023-01-26T03:38:51 | 33,940,151 | 21 | 9 | MIT | 2020-10-16T07:10:43 | 2015-04-14T15:29:04 | Java | UTF-8 | Python | false | false | 1,513 | py | #!/usr/bin/env python
from .type import Type
from ..typearguments import TypeArguments
class ClassOrInterfaceType(Type):
def __init__(self, kwargs={}):
if kwargs:
super(ClassOrInterfaceType, self).__init__(kwargs)
# ClassOrInterfaceType
self._scope = None
scope = kwargs.get(u'scope')
if scope:
scope.update({u'@t':u'ClassOrInterfaceType'})
self._scope = ClassOrInterfaceType(scope)
self._any = kwargs.get('any')
# TypeArguments typeArguments
self._typeArguments = TypeArguments(kwargs.get(u'typeArguments', {}))
# boolean any = false;
self._any = False
@property
def typee(self): return self
@typee.setter
def typee(self, v): pass
@property
def scope(self): return self._scope
@scope.setter
def scope(self, v): self._scope = v
@property
def anyy(self): return self._any
@anyy.setter
def anyy(self, v): self._any = v
@property
def typeArguments(self): return self._typeArguments
@typeArguments.setter
def typeArguments(self, v): self._typeArguments = v
def typeArgs(self):
return self.typeArguments.typeArguments
def isUsingDiamondOperator(self):
return self.typeArguments.isUsingDiamondOperator() if self.typeArguments else False
def __str__(self):
return '{}${}'.format(str(self.scope), self.name) if self.scope else self.name
| [
"jreeseue@gmail.com"
] | jreeseue@gmail.com |
8d88dda0b85a6bba883549b8c696f655641e1194 | 2e74c734bee0412a3108d8d3ea109dfef3ff6a1b | /config.py | 49671e39e454b61ffd498aa6ce062625f43f851f | [] | no_license | Myonpasu/DotaDraft | 534c503d6b9be7fa05fd4f21640c49e932d33657 | 11ee43146b39772b7365f28106d5d14a8456501e | refs/heads/master | 2020-03-24T19:24:35.853534 | 2018-08-13T01:25:44 | 2018-08-13T01:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | # Your Steam Web API key. These may be acquired from:
# https://steamcommunity.com/dev/apikey
STEAM_API_KEY = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Name of file in which to store raw data.
MATCH_DATA_FILE = 'matches.json'
# Filtering parameters for fetching match data. Values as stated at:
# https://wiki.teamfortress.com/wiki/WebAPI/GetMatchDetails#Result_data
game_mode = 2 # Captains mode
# Can specify multiple lobby types by comma separating values.
lobby_type = 0, 7 # 0 public matchmaking, 7 ranked.
human_players = 10
# Number of picks and bans in matches from the current patch.
DRAFT_LENGTH = 22
# Match IDs from which to fetch data.
# Set "start_match_id = None" to find and use the first match of the current patch.
# Set to "start_match_id = 'latest'" to use the most recent match stored in the local database.
start_match_id = None # Patch 7.19 begins on match 4032019767.
# Set "end_match_id = 'latest'" to use the most recent match played.
end_match_id = 'latest'
# Name of file in which to store training data.
TRAINING_DATA_FILE = 'training_data.json'
# Match IDs from which to create training data.
# Set "training_start_match_id = None" for no restriction on start.
training_start_match_id = None
# Set "training_end_match_id = None" for no restriction on end.
training_end_match_id = None
# Language for hero names.
LANGUAGE = 'english'
# Name of file in which to store hero data.
HERO_DATA_FILE = 'heroes.json'
| [
"seanandrewadamson@gmail.com"
] | seanandrewadamson@gmail.com |
67587fd02596295302eec252eab94b2bf319bc33 | 2b70814c99c9c5a75e080b6e778cd6fcb8055aed | /dojo/tools/kubebench/parser.py | 07d1cdcfa7723c50d82164d23d48d11a3286d34d | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | TarlogicSecurity/django-DefectDojo | 620688a0132d33eb9e15c1f3720afb0af8c91336 | 6a2b0e5939155554a284f90f58e6287632436ce8 | refs/heads/master | 2023-03-16T09:59:36.177034 | 2021-03-07T17:05:30 | 2021-03-07T17:05:30 | 175,589,325 | 2 | 0 | BSD-3-Clause | 2023-03-06T10:01:14 | 2019-03-14T09:26:30 | HTML | UTF-8 | Python | false | false | 2,938 | py | import json
from dojo.models import Finding
class KubeBenchParser(object):
def get_scan_types(self):
return ["kube-bench Scan"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
return "Import JSON reports of Kubernetes CIS benchmark scans."
def get_findings(self, json_output, test):
tree = json.load(json_output)
return self.get_chapters(tree, test)
def get_chapters(self, tree, test):
items = []
for node in tree:
items_from_tests = get_tests(node, test)
items += items_from_tests
return list(items)
def get_tests(tree, test):
items_from_tests = []
description = ''
if 'id' in tree:
description += tree['id'] + " "
if 'text' in tree:
description += tree['text']
description += '\n'
for node in tree['tests']:
items_from_results = get_results(node, test, description)
items_from_tests += items_from_results
return list(items_from_tests)
def get_results(tree, test, description):
items_from_results = []
if 'section' in tree:
description += tree['section'] + ' '
if 'desc' in tree:
description += tree['desc']
description += '\n'
for node in tree['results']:
item = get_item(node, test, description)
if item:
items_from_results.append(item)
return list(items_from_results)
def get_item(vuln, test, description):
if ('status' in vuln) and (vuln['status'].upper() != 'FAIL'):
return None
if 'test_number' not in vuln:
return None
unique_id_from_tool = vuln['test_number']
title = ''
if 'test_desc' in vuln:
title = vuln['test_desc']
else:
title = 'test_desc not found'
if 'test_number' in vuln:
description += vuln['test_number'] + ' '
if 'test_desc' in vuln:
description += vuln['test_desc']
description += '\n'
if 'audit' in vuln:
description += 'Audit: {}\n'.format(vuln['audit'])
# kube-bench doesn't define severities. Sine the findings are
# vulnerabilities, we set them to Medium
severity = 'Medium'
numerical_severity = Finding.get_numerical_severity(severity)
mitigation = ''
if 'remediation' in vuln:
mitigation = vuln['remediation']
finding = Finding(title=title,
test=test,
active=False,
verified=False,
description=description,
severity=severity,
numerical_severity=numerical_severity,
mitigation=mitigation,
unique_id_from_tool=unique_id_from_tool,
static_finding=True,
dynamic_finding=False)
return finding
| [
"noreply@github.com"
] | TarlogicSecurity.noreply@github.com |
744f1dda5ecaac99291fd666dccddefedfbe56cf | a413b0a0ae6626363df8cce3ee8b01d7d031e33c | /prediction codes/assessions/cross_validation/9/prediction.py | facca549e97c44e4c7bc4b156eebfd7df1a7f2c4 | [] | no_license | colebrookson/covid-19 | 5f57772eb8f1991697c6ef0a224c207add76ad68 | ebb57a3bc4ea677724b248f01bed4ac45eb1f3b8 | refs/heads/master | 2023-07-10T03:55:53.219502 | 2021-08-21T22:09:18 | 2021-08-21T22:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88,001 | py | from makeHistoricalData import makeHistoricalData
from models import GBM, GLM, KNN, NN, MM_GLM, GBM_grid_search, NN_grid_search
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
import seaborn as sns
from matplotlib import colors as mcolors
from pexecute.process import ProcessLoom
import time
from sys import argv
import sys
from math import floor, sqrt
import os
# import dill
import glob
import shutil
import zipfile
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import subprocess as cmd
import shelve
import matplotlib.pyplot as plt
import random
import datetime
import statistics
import tensorflow as tf
from numpy.random import seed
seed(1)
tf.random.set_seed(1)
plt.rcParams.update({'figure.max_open_warning': 0})
pivot = 'country'
r = 9 * 7 # the following day to predict
numberOfSelectedCounties = -1
target_mode = 'weeklyaverage'
spatial_mode = 'country'
numberOfSelectedCountiesname = 1535
push_flag = 0
# set the size of test set. validation and train sets will have 30/70 proportion from the remaining days (optional),
# the default values are |test_set| = |val_set| = r, |train_set| = the remaining days
test_size = 21
maxHistory = min((16 * 7 - ((2*r) -7) - ((int(argv[1]) - 6) * 7)), 5 * 7)
maxC = 100 # maximum number of covariates to be considered
data_address = (os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))+'/data/').replace('\\','/')
######################################################### split data to train, val, test
def splitData(numberOfCounties, main_data, target, spatial_mode, mode):
numberOfCounties = len(main_data['county_fips'].unique())
main_data = main_data.sort_values(by=['date of day t', 'county_fips'])
target = target.sort_values(by=['date of day t', 'county_fips'])
# we set the base number of days to the minimum number of days existed between the counties
# and then compute the validation size for the non-default state.
baseNumberOfDays = (main_data.groupby(['county_fips']).size()).min()
test_size = r
if target_mode == 'weeklyaverage':
test_size = 1
val_size = 1
# val_size = round(0.3 * (baseNumberOfDays - test_size))
else:
test_size = 21 #
val_size = 1
# val_size = round(0.3 * (baseNumberOfDays - test_size))
if mode == 'val':
X_test = main_data.tail(test_size * numberOfCounties).copy()
X_train_val = main_data.iloc[:-((test_size + r-1) * numberOfCounties)].tail(val_size * numberOfCounties).copy()
X_train_train = main_data.iloc[:-((val_size + test_size + r-1) * numberOfCounties)].copy()
y_test = target.tail(test_size * numberOfCounties).copy()
y_train_val = target.iloc[:-((test_size + r-1) * numberOfCounties)].tail(val_size * numberOfCounties).copy()
y_train_train = target.iloc[:-((val_size + test_size + r-1) * numberOfCounties)].copy()
return X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test
if mode == 'test':
X_test = main_data.tail(test_size * numberOfCounties).copy()
X_train = main_data.iloc[:-((test_size + r-1) * numberOfCounties)].copy()
y_test = target.tail(test_size * numberOfCounties).copy()
y_train = target.iloc[:-((test_size + r-1) * numberOfCounties)]
return X_train, X_test, y_train, y_test
########################################################### clean data
def clean_data(data, numberOfSelectedCounties, spatial_mode):
global numberOfDays
data = data.sort_values(by=['county_fips', 'date of day t'])
# select the number of counties we want to use
# numberOfSelectedCounties = numberOfCounties
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(data['county_fips'].unique())
using_data = data[(data['county_fips'] <= data['county_fips'].unique()[numberOfSelectedCounties - 1])]
using_data = using_data.reset_index(drop=True)
if (spatial_mode == 'county') or (spatial_mode == 'country'):
if pivot == 'county':
main_data = using_data.drop(['county_name', 'state_fips', 'state_name'],
axis=1) # , 'date of day t'
elif pivot == 'state':
main_data = using_data.drop(['county_name'],
axis=1) # , 'date of day t'
elif pivot == 'country':
main_data = using_data
elif (spatial_mode == 'state'):
main_data = using_data.drop(['county_name', 'state_name'],
axis=1)
numberOfDays = len(using_data['date of day t'].unique())
return main_data
########################################################### preprocess
def preprocess(main_data, spatial_mode, validationFlag):
if spatial_mode == 'state':
target = pd.DataFrame(main_data[['date of day t', 'county_fips', 'state_fips', 'Target']])
else:
target = pd.DataFrame(main_data[['date of day t', 'county_fips', 'Target']])
main_data = main_data.drop(['Target'], axis=1)
# produce train, validation and test data
if validationFlag: # validationFlag is 1 if we want to have a validation set and 0 otherwise
X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test = splitData(numberOfSelectedCounties,
main_data, target,
spatial_mode, 'val')
return X_train_train, X_train_val, X_test, y_train_train, y_train_val, y_test
else:
X_train, X_test, y_train, y_test = splitData(numberOfSelectedCounties, main_data, target, spatial_mode, 'test')
return X_train, X_test, y_train, y_test
################################ MASE_denominator
def mase_denominator(r, h, data, target_name, target_mode, numberOfSelectedCounties, spatial_mode):
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(data['county_fips'].unique())
X_train_train, X_train_val, X_test, y_train_train_date, y_train_val_date, y_test_date = preprocess(data,
spatial_mode, 1)
train_val_df = (y_train_train_date.append(y_train_val_date).reset_index(drop=True)).sort_values(by=['date of day t', 'county_fips'])
val_test_df = (train_val_df.append(y_test_date).reset_index(drop=True)).sort_values(by=['date of day t', 'county_fips'])
train_val = train_val_df.tail(len(y_train_train_date))
train_train = train_val_df.iloc[:-r,:].tail(len(y_train_train_date))
test = val_test_df.tail(len(y_test_date))
train = val_test_df.iloc[:-r,:].tail(len(y_test_date))
train = train.tail(len(test)).rename(columns={'Target': 'train-Target', 'date of day t': 'train-date'})
train_train = train_train.tail(len(train_val)).rename(
columns={'Target': 'train-Target', 'date of day t': 'train-date'})
train_val = train_val.rename(columns={'Target': 'val-Target', 'date of day t': 'val-date'})
test = test.rename(columns={'Target': 'test-Target', 'date of day t': 'test-date'})
df_for_train_val_MASE_denominator = pd.concat(
[train_train.reset_index(drop=True), train_val.reset_index(drop=True)], axis=1)
df_for_train_val_MASE_denominator['absolute-error'] = abs(df_for_train_val_MASE_denominator['val-Target'] -
df_for_train_val_MASE_denominator['train-Target'])
df_for_val_test_MASE_denominator = pd.concat([train.reset_index(drop=True), test.reset_index(drop=True)], axis=1)
df_for_val_test_MASE_denominator['absolute-error'] = abs(df_for_val_test_MASE_denominator['test-Target'] -
df_for_val_test_MASE_denominator['train-Target'])
train_val_MASE_denominator = df_for_train_val_MASE_denominator['absolute-error'].mean()
val_test_MASE_denominator = df_for_val_test_MASE_denominator['absolute-error'].mean()
# we need to have mase denominator based on target values for whole country (sum of target for all counties)
# this will be used for calculation of country error
df_for_train_val_MASE_denominator_country = df_for_train_val_MASE_denominator.groupby(['val-date']).sum()
df_for_train_val_MASE_denominator_country['absolute-error'] = abs(
df_for_train_val_MASE_denominator_country['val-Target'] -
df_for_train_val_MASE_denominator_country['train-Target'])
df_for_val_test_MASE_denominator_country = df_for_val_test_MASE_denominator.groupby(['test-date']).sum()
df_for_val_test_MASE_denominator_country['absolute-error'] = abs(
df_for_val_test_MASE_denominator_country['test-Target'] -
df_for_val_test_MASE_denominator_country['train-Target'])
train_val_MASE_denominator_country = df_for_train_val_MASE_denominator_country['absolute-error'].mean()
val_test_MASE_denominator_country = df_for_val_test_MASE_denominator_country['absolute-error'].mean()
return train_val_MASE_denominator, val_test_MASE_denominator, train_val_MASE_denominator_country, val_test_MASE_denominator_country
########################################################### run non-mixed methods in parallel
def parallel_run(method, X_train_train, X_train_val, y_train_train, y_train_val, best_loss, c):
y_prediction, y_prediction_train = None, None
if method == 'GBM':
y_prediction, y_prediction_train = GBM(X_train_train, X_train_val, y_train_train, y_train_val, best_loss['GBM'], mode = 'val')
elif method == 'GLM':
y_prediction, y_prediction_train = GLM(X_train_train, X_train_val, y_train_train, y_train_val, mode = 'val')
elif method == 'KNN':
y_prediction, y_prediction_train = KNN(X_train_train, X_train_val, y_train_train, y_train_val, mode = 'val')
elif method == 'NN':
y_prediction, y_prediction_train = NN(X_train_train, X_train_val, y_train_train, y_train_val, best_loss['NN'], mode = 'val')
return y_prediction, y_prediction_train
########################################################### run mixed methods in parallel
def mixed_parallel_run(method, X_train, X_test, y_train, y_test, best_loss):
y_prediction, y_prediction_train = None, None
if method == 'MM_GLM':
y_prediction, y_prediction_train = MM_GLM(X_train, X_test, y_train, y_test, mode = 'val')
elif method == 'MM_NN':
y_prediction, y_prediction_train = NN(X_train, X_test, y_train, y_test, best_loss[method], mode = 'val')
return y_prediction, y_prediction_train
########################################################### run algorithms in parallel except mixed models
def run_algorithms(X_train_dict, X_val_dict, y_train_dict, y_val_dict, best_loss, c, spatial_mode, county_fips):
from models import GBM, GLM, KNN, NN
t1 = time.time()
methods = ['GBM', 'GLM', 'KNN', 'NN']
X_train = {method: None for method in methods}
X_val = {method: None for method in methods}
y_train = {method: None for method in methods}
y_val = {method: None for method in methods}
loom = ProcessLoom(max_runner_cap=4)
# add the functions to the multiprocessing object, loom
if spatial_mode == 'country':
for method in methods:
X_train[method] = X_train_dict[method].drop(['county_fips', 'date of day t'], axis=1)
X_val[method] = X_val_dict[method].drop(['county_fips', 'date of day t'], axis=1)
y_train[method] = np.array(y_train_dict[method]['Target']).reshape(-1)
y_val[method] = np.array(y_val_dict[method]['Target']).reshape(-1)
loom.add_function(GBM, [X_train['GBM'], X_val['GBM'], y_train['GBM'], y_val['GBM'], best_loss['GBM'], 'test'], {})
loom.add_function(GLM, [X_train['GLM'], X_val['GLM'], y_train['GLM'], y_val['GLM'], 'test'], {})
loom.add_function(KNN, [X_train['KNN'], X_val['KNN'], y_train['KNN'], y_val['KNN'], 'test'], {})
loom.add_function(NN, [X_train['NN'], X_val['NN'], y_train['NN'], y_val['NN'], best_loss['NN'], 'test'], {})
if spatial_mode == 'county':
for method in methods:
X_train[method] = X_train_dict[method]
X_train[method] = X_train[method][X_train[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
X_val[method] = X_val_dict[method]
X_val[method] = X_val[method][X_val[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_train[method] = y_train_dict[method]
y_train[method] = y_train[method][y_train[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_val[method] = y_val_dict[method]
y_val[method] = y_val[method][y_val[method]['county_fips'] == county_fips].drop(
['county_fips', 'date of day t'], axis=1)
y_train[method] = np.array(y_train[method]['Target']).reshape(-1)
y_val[method] = np.array(y_val[method]['Target']).reshape(-1)
loom.add_function(GBM, [X_train['GBM'], X_val['GBM'], y_train['GBM'], y_val['GBM'], best_loss['GBM'], 'test'], {})
loom.add_function(GLM, [X_train['GLM'], X_val['GLM'], y_train['GLM'], y_val['GLM'], 'test'], {})
loom.add_function(KNN, [X_train['KNN'], X_val['KNN'], y_train['KNN'], y_val['KNN'], 'test'], {})
loom.add_function(NN, [X_train['NN'], X_val['NN'], y_train['NN'], y_val['NN'], best_loss['NN'], 'test'], {})
# run the processes in parallel
output = loom.execute()
t2 = time.time()
print('total time - run algorithms: ', t2 - t1)
return output[0]['output'], output[1]['output'], output[2]['output'], output[3]['output']
########################################################### run mixed models in parallel
def run_mixed_models(X_train_MM, X_test_MM, y_train_MM, y_test_MM, best_loss):
from models import GBM, GLM, KNN, NN, MM_GLM
t1 = time.time()
loom = ProcessLoom(max_runner_cap=2)
# add the functions to the multiprocessing object, loom
loom.add_function(MM_GLM, [X_train_MM['MM_GLM'], X_test_MM['MM_GLM'], y_train_MM['MM_GLM'], y_test_MM['MM_GLM'], 'test'], {})
loom.add_function(NN, [X_train_MM['MM_NN'], X_test_MM['MM_NN'], y_train_MM['MM_NN'], y_test_MM['MM_NN'],
best_loss['MM_NN'], 'test'], {})
# run the processes in parallel
output = loom.execute()
t2 = time.time()
print('total time - run mixed models: ', t2 - t1)
return output[0]['output'], output[1]['output']
####################################################################### update best loss
def update_best_loss(model_type, spatial_mode, county_fips, best_loss, X_train_train_to_use, X_train_val_to_use,
y_train_train, \
y_train_val, y_prediction_train, y_prediction, covariates, \
numberOfCovariates, max_c):
h = 1
if model_type == 'mixed_model':
loom = ProcessLoom(max_runner_cap=1)
c = numberOfCovariates
if numberOfCovariates > max_c:
c = max_c
y_predictions_test, y_predictions_train = [], []
if spatial_mode == 'county':
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend(
[y_prediction[county_fips]['GBM'][(h, c)], y_prediction[county_fips]['GLM'][(h, c)],
y_prediction[county_fips]['KNN'][(h, c)], y_prediction[county_fips]['NN'][(h, c)]])
elif spatial_mode == 'country':
y_predictions_test.extend([y_prediction['GBM'][(h, c)], y_prediction['GLM'][(h, c)],
y_prediction['KNN'][(h, c)], y_prediction['NN'][(h, c)]])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
if spatial_mode == 'county':
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend(
[y_prediction_train[county_fips]['GBM'][(h, c)], y_prediction_train[county_fips]['GLM'][(h, c)],
y_prediction_train[county_fips]['KNN'][(h, c)], y_prediction_train[county_fips]['NN'][(h, c)]])
elif spatial_mode == 'country':
y_predictions_train.extend([y_prediction_train['GBM'][(h, c)], y_prediction_train['GLM'][(h, c)],
y_prediction_train['KNN'][(h, c)], y_prediction_train['NN'][(h, c)]])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
loom.add_function(NN_grid_search, [X_train_mixedModel, y_train_train, X_test_mixedModel, y_train_val], {})
best_loss_output = loom.execute()
best_loss['MM_NN'] = best_loss_output[0]['output']
if model_type == 'none_mixed_model':
print('check 292')
loom = ProcessLoom(max_runner_cap=2)
if spatial_mode == 'country':
loom.add_function(GBM_grid_search, [X_train_train_to_use['GBM'][covariates],
y_train_train, X_train_val_to_use['GBM'][covariates],
y_train_val], {})
loom.add_function(NN_grid_search, [X_train_train_to_use['NN'][covariates],
y_train_train, X_train_val_to_use['NN'][covariates],
y_train_val], {})
if spatial_mode == 'county':
loom.add_function(GBM_grid_search, [X_train_train_to_use[county_fips][h]['GBM'][covariates],
y_train_train, X_train_val_to_use[county_fips][h]['GBM'][covariates],
y_train_val], {})
loom.add_function(NN_grid_search, [X_train_train_to_use[county_fips][h]['NN'][covariates],
y_train_train, X_train_val_to_use[county_fips][h]['NN'][covariates],
y_train_val], {})
best_loss_output = loom.execute()
best_loss['GBM'], best_loss['NN'] = best_loss_output[0]['output'], best_loss_output[1]['output']
return best_loss
###########################################################
def get_best_loss_mode(counties_best_loss_list):
methods_with_loss = ['GBM', 'NN', 'MM_NN']
best_loss = {method: None for method in methods_with_loss}
for method in methods_with_loss:
counties_best_loss_array = np.array(counties_best_loss_list[method])
# when we choose number_of_selected_counties smaller than number of different losses
# some times its not possibel to find mode
if len(np.unique(counties_best_loss_array)) == len(counties_best_loss_array):
best_loss[method] = random.choice(counties_best_loss_list[method])
else:
best_loss[method] = statistics.mode(counties_best_loss_list[method])
return (best_loss)
########################################################### generate data for best h and c
def generate_data(h, numberOfCovariates, covariates_names, numberOfSelectedCounties):
data = makeHistoricalData(h, r, test_size, 'death', 'mrmr', spatial_mode, target_mode, data_address, future_features, pivot, end_date)
data = clean_data(data, numberOfSelectedCounties, spatial_mode)
X_train, X_test, y_train, y_test = preprocess(data, spatial_mode, 0)
covariates = [covariates_names[i] for i in range(numberOfCovariates)]
best_covariates = force_features.copy()
indx_c = 0
for covar in covariates: # iterate through sorted covariates
indx_c += 1
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = covar.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
best_covariates.append(covariate)
best_covariates += ['county_fips',
'date of day t'] # we add this two columns to use when we want break data to county_data
X_train = X_train[best_covariates]
X_test = X_test[best_covariates]
return X_train, X_test, y_train, y_test
########################################################### plot validation results
def plot_results(row, col, numberOfCovariates, methods, history, errors, mode):
mpl.style.use('seaborn')
plt.rc('font', size=20)
fig, ax = plt.subplots(row, col, figsize=(40, 40))
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
colorset = set(sorted_names[::-1])
for item in colorset:
if ('white' in item) or ('light' in item):
colorset = colorset - {item}
colors = list(colorset - {'lavenderblush', 'aliceblue', 'lavender', 'azure',
'mintcream', 'honeydew', 'beige', 'ivory', 'snow', 'w'})
# colors = ['tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan',
# 'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple']
ind = 0
for i in range(row):
for j in range(col):
color = 0
for h in history:
errors_h = []
# x label: covariates
covariates_list = [c for c in range(1, numberOfCovariates + 1)][:maxC]
# y label: errors
for c in range(1, numberOfCovariates + 1):
errors_h.append(errors[methods[ind]][(h, c)])
if c == maxC:
break
ax[i, j].plot(covariates_list, errors_h, colors[color * 2], label="h = " + str(h))
ax[i, j].set_xlabel("Number Of Covariates")
ax[i, j].set_ylabel(mode)
ax[i, j].set_title(str(methods[ind]))
ax[i, j].legend()
ax[i, j].set_xticks(covariates_list)
color += 1
ind += 1
address = validation_address + 'plots_of_errors/'
if not os.path.exists(address):
os.makedirs(address)
plt.savefig(address + str(mode) + '.pdf')
########################################################### plot table for final results
def plot_table(table_data, col_labels, row_labels, name, mode):
fig = plt.figure() # dpi=50 figsize=(30, 10)
ax = fig.add_subplot(111)
colWidths = [0.1, 0.1, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
address = ''
if mode == 'val':
# colWidths.pop()
address = validation_address + 'tables/'
if not os.path.exists(address):
os.makedirs(address)
else:
address = test_address + 'tables/'
if not os.path.exists(address):
os.makedirs(address)
the_table = plt.table(cellText=table_data,
colWidths=colWidths,
rowLabels=row_labels,
colLabels=col_labels,
loc='center',
cellLoc='center')
the_table.auto_set_font_size(False)
the_table.set_fontsize(9)
the_table.scale(1.5, 1.5)
ax.axis('off')
plt.savefig(address + name + '.pdf', bbox_inches='tight')
csv_table = pd.DataFrame(table_data, columns=col_labels)
csv_table['method'] = list(row_labels)
csv_table.to_csv(address + name +'.csv', index = False)
########################################################### plotting mean errors (first error)
def plot_targets(method, x_axis, df, main_address):
mpl.style.use('default')
plt.rc('font', size=40)
fig, ax = plt.subplots(figsize=(60, 20))
ax.plot(x_axis, df['average of targets'], label='Target')
ax.plot(x_axis, df['average of predictions'], label='Prediction')
ax.set_xlabel('date', fontsize=40)
ax.set_ylabel('real and predicted targets for ' + str(method), fontsize=40)
ax.legend()
address = main_address + 'procedure_of_prediction/'
if not os.path.exists(address):
os.makedirs(address)
plt.savefig(address + 'procedure_' + str(method) + '.pdf')
########################################################### box plots and violin plots
def box_violin_plot(X, Y, figsizes, fontsizes, name, address):
mpl.style.use('default')
# box plot
fig = plt.figure(figsize=figsizes['box'])
plt.rc('font', size=fontsizes['box'])
plt.locator_params(axis='y', nbins=20)
sns.boxplot(x=X, y=Y)
plt.savefig(address + str(name) + 'boxplot.pdf')
plt.close()
# violin plot
fig = plt.figure(figsize=figsizes['violin'])
plt.rc('font', size=fontsizes['violin'])
plt.locator_params(axis='y', nbins=20)
sns.violinplot(x=X, y=Y)
plt.savefig(address + str(name) + 'violinplot.pdf')
plt.close()
########################################################### plot prediction and real values
def real_prediction_plot(df, r, test_size, target_name, target_mode, best_h, maxHistory, spatial_mode, methods,
future_mode, numberOfSelectedCounties):
address = test_address + 'plots_of_real_prediction_values/'
if not os.path.exists(address):
os.makedirs(address)
if target_mode == 'weeklyaverage':
label_prefix = 'Weekly averaged \n n'
elif target_mode == 'weeklymovingaverage':
label_prefix = 'weekly moving averaged \n n'
elif target_mode == 'differential':
label_prefix = 'differential \n n'
elif target_mode == 'logarithmic':
label_prefix = 'logarithmic \n n'
elif target_mode == 'cumulative':
label_prefix = 'cumulative \n n'
else:
label_prefix = 'N'
if target_name == 'confirmed':
label_suffix = 'cases'
else:
label_suffix = 's'
for method in methods:
method_prediction_df = df[method] # this df contain real and predicted target values
if pivot == 'county':
county_name_df = pd.read_csv(data_address + 'fixed-data.csv')[
['county_fips', 'county_name']] # we need county names for plot label
elif pivot == 'state':
county_name_df = pd.read_csv(data_address + 'fixed-data.csv')[
['state_fips', 'state_name']] # we need county names for plot label
county_name_df.rename(columns={'state_fips': 'county_fips', 'state_name': 'county_name'},
inplace=True)
county_name_df.drop_duplicates(subset=["county_fips", "county_name"],
keep='first', inplace=True)
df_for_plot = pd.merge(method_prediction_df, county_name_df, how='left')
if target_mode != 'weeklyaverage':
df_for_plot['date'] = df_for_plot['date of day t'].apply(
lambda x: datetime.datetime.strptime(x, '%m/%d/%y') + datetime.timedelta(days=r))
df_for_plot['weekday'] = df_for_plot['date'].apply(lambda x: x.weekday())
df_for_plot['date'] = df_for_plot['date'].apply(lambda x: datetime.datetime.strftime(x, '%m/%d/%y'))
else:
df_for_plot['date'] = df_for_plot['date of day t'].apply(lambda x: 'week ' + str(x + r))
df_for_plot.loc[df_for_plot['prediction'] < 0, 'prediction'] = 0
counties = []
for i in [36061, 40117, 51059]: # newyork + two random county
if len(df_for_plot[df_for_plot['county_fips'] == i]) > 0:
counties.append(i)
else:
counties = counties + random.sample(df_for_plot['county_fips'].unique().tolist(), 1)
length = list()
for county in counties:
length.append(len(df_for_plot[df_for_plot['county_fips'] == county]))
plot_with = max(length) + 20
fig, ax = plt.subplots(figsize=(plot_with, 75))
mpl.style.use('default')
plt.rc('font', size=45)
for index, county in enumerate(counties):
plt.subplot(311 + index)
county_df_for_plot = df_for_plot.loc[df_for_plot['county_fips'] == county]
plt.plot(county_df_for_plot['date'][:-(r - 1)],
county_df_for_plot['prediction'].round()[:-(r - 1)],
label='Train prediction', color='forestgreen', linewidth=2.0)
plt.plot(county_df_for_plot['date'][-r:],
county_df_for_plot['prediction'].round()[-r:],
label='Test prediction', color='dodgerblue', linewidth=2.0)
plt.plot(county_df_for_plot['date'],
county_df_for_plot['Target'].round(), label='Real values',
color='black', linewidth=2.0)
# if target_mode != 'cumulative':
# plt.plot(county_df_for_plot['date'][-r:],county_df_for_plot['Target'].round()[-(2*r):-r],'-.',color='gray',label='Naive prediction',linewidth=2.0)
if target_mode != 'weeklyaverage':
county_df_for_plot = county_df_for_plot.reset_index(drop=True)
weekend_index = county_df_for_plot[county_df_for_plot['weekday'].isin([5, 6])].index
for i in weekend_index:
plt.gca().get_xticklabels()[i].set_color("red")
plt.xticks(rotation=65)
fig.subplots_adjust(hspace=0.4)
plt.ylabel(label_prefix + 'umber of ' + target_name + label_suffix)
countyname = df_for_plot.loc[df_for_plot['county_fips'] == county, 'county_name'].unique()
plt.title(df_for_plot.loc[df_for_plot['county_fips'] == county, 'county_name'].unique()[0])
plt.legend()
plt.xlabel('Date')
plt.savefig(address + str(method) + ' real_prediction_values.pdf')
plt.close()
########################################################### get errors for each model in each h and c
def get_errors(h, c, method, y_prediction, y_prediction_train, y_test_date, y_train_date, regular_data,
MASE_denominator, numberOfSelectedCounties, target_name, mode):
# y_test_date and y_train_date are a dataframes with columns ['date of day t', 'county_fips', 'Target']
# set negative predictions to zero
y_prediction[y_prediction < 0] = 0
# country_errors show error for prediction of target variable for whole country
country_errors = {error: None for error in
['meanAbsoluteError', 'percentageOfAbsoluteError', 'adj_r_squared', 'second_error', 'MASE']}
# state_errors = {state_fips: {error: None
# for error in ['meanAbsoluteError', 'percentageOfAbsoluteError', 'adj_r_squared', 'second_error', 'MASE']}
# for state_fips in [1, 2]}
# next 8 lines sort y_prediction and y_prediction_train like output of preprocess function
# we need to sort predictions because in county and state mode their order may be cluttered
y_train_date['prediction'] = y_prediction_train
y_train_date = y_train_date.sort_values(by=['county_fips', 'date of day t'])
y_prediction_train = list(y_train_date['prediction'])
# y_train_date = y_train_date.drop(['prediction'], axis=1)
y_test_date['prediction'] = y_prediction
y_test_date = y_test_date.sort_values(by=['county_fips', 'date of day t'])
y_prediction = list(y_test_date['prediction'])
# y_test_date = y_test_date.drop(['prediction'], axis=1)
y_test = np.array(y_test_date['Target']).reshape(-1)
#############################################
if mode == 'val':
y_test_date = y_train_date.append(y_test_date)
y_prediction = list(y_test_date['prediction'])
y_test_date = y_test_date.drop(['prediction'], axis=1)
y_test = np.array(y_test_date['Target']).reshape(-1)
######################################
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(y_test_date['county_fips'].unique())
# we need data with regular target to return modified target to its original state
# in validation mode we read regular data in main function and passed to get_error to avoid redundancy
# but in test mode its not possible because each method has different h(best_h)
if target_mode not in ['regular','weeklyaverage']:
if mode == 'test':
regular_data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, 'regular', data_address,
future_features, pivot, end_date)
regular_data = clean_data(regular_data, numberOfSelectedCounties, spatial_mode)
temp_1, temp_2, regular_y_train_date, regular_y_test_date = preprocess(regular_data, spatial_mode, 0)
if mode == 'val':
temp_1, temp_2, temp_3, regular_y_train_date, regular_y_test_date, temp_4 = preprocess(regular_data,
spatial_mode, 1)
# if target mode is cumulative we need to return the target variable to its original state
if target_mode == 'cumulative':
cumulative_data = y_train_date.append(y_test_date)
cumulative_data['prediction'] = list(y_train_date['Target']) + list(y_prediction)
cumulative_data = cumulative_data.sort_values(by=['date of day t', 'county_fips'])
reverse_dates = cumulative_data['date of day t'].unique()[-(r + 1):][::-1]
for index in range(len(reverse_dates)):
date = reverse_dates[index]
past_date = reverse_dates[index + 1]
cumulative_data.loc[cumulative_data['date of day t'] == date, 'Target'] = list(
np.array(cumulative_data.loc[cumulative_data['date of day t'] == date, 'Target']) - np.array(
cumulative_data.loc[cumulative_data['date of day t'] == past_date, 'Target']))
cumulative_data.loc[cumulative_data['date of day t'] == date, 'prediction'] = list(
np.array(cumulative_data.loc[cumulative_data['date of day t'] == date, 'prediction']) - np.array(
cumulative_data.loc[cumulative_data['date of day t'] == past_date, 'prediction']))
if index == len(reverse_dates) - 2:
break
cumulative_data = cumulative_data.sort_values(by=['date of day t', 'county_fips'])
y_test_date = cumulative_data.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(cumulative_data.tail(r * numberOfSelectedCounties)['prediction']).reshape(-1)
# if target mode is logarithmic we need to return the target variable to its original state
if target_mode == 'logarithmic':
y_test = np.array(np.round(np.exp(y_test) - 1)).reshape(-1)
y_test_date['Target'] = list(np.round(np.exp(y_test_date['Target']) - 1))
y_prediction = np.array(np.exp(y_prediction) - 1).reshape(-1)
# if target mode is moving average we need to return the target variable to its original state
if target_mode == 'weeklymovingaverage':
# past values of targets that will be use for return the weeklymovingaverage target (predicted)
# to original state to calculate errors
regular_real_predicted_target = regular_y_train_date.append(
regular_y_test_date) # dataframe with columns ['date of day t', 'county_fips', 'Target']
regular_real_predicted_target['prediction'] = list(regular_y_train_date['Target']) + list(y_prediction)
regular_real_predicted_target = regular_real_predicted_target.sort_values(by=['date of day t', 'county_fips'])
regular_real_predicted_target = regular_real_predicted_target.tail((r + 6) * numberOfSelectedCounties)
dates = regular_real_predicted_target['date of day t'].unique()
for index in range(len(dates)):
ind = index + 6
date = dates[ind]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(7 * np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']))
for i in range(6):
past_date = dates[ind - (i + 1)]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']) - np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == past_date, 'prediction']))
if ind == len(dates) - 1:
break
y_test_date = regular_real_predicted_target.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(regular_real_predicted_target.tail(r * numberOfSelectedCounties)['prediction']).reshape(
-1)
# if target mode is differential we need to return the target variable to its original state
if target_mode == 'differential':
# past values of targets that will be use for return the differential target (predicted)
# to original state to calculate errors
regular_real_predicted_target = regular_y_train_date.append(
regular_y_test_date) # dataframe with columns ['date of day t', 'county_fips', 'Target']
regular_real_predicted_target['prediction'] = list(regular_y_train_date['Target']) + list(y_prediction)
regular_real_predicted_target = regular_real_predicted_target.sort_values(by=['date of day t', 'county_fips'])
regular_real_predicted_target = regular_real_predicted_target.tail((r + 1) * numberOfSelectedCounties)
dates = regular_real_predicted_target['date of day t'].unique()
for index in range(len(dates)):
date = dates[index + 1]
past_date = dates[index]
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction'] = list(np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == date, 'prediction']) + np.array(
regular_real_predicted_target.loc[
regular_real_predicted_target['date of day t'] == past_date, 'prediction']))
if index == len(dates) - 2:
break
y_test_date = regular_real_predicted_target.tail(r * numberOfSelectedCounties)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_prediction = np.array(regular_real_predicted_target.tail(r * numberOfSelectedCounties)['prediction']).reshape(
-1)
# make predictions rounded to their closest number
y_prediction = np.array(y_prediction)
if target_mode != 'weeklyaverage':
y_prediction = np.round(y_prediction)
# for calculating the country error we must sum up all the county's target values to get country target value
y_test_date['prediction'] = y_prediction
# y_test_date['state_fips'] = y_test_date['county_fips'].map(lambda x: int(str(x)[:2]) if len(str(x)) == 5 else int(str(x)[:1]))
# print(150 * '*')
# print(y_test_date.shape)
# print(y_test_date.columns.values)
# print(y_test_date.tail())
# print(150 * '*')
y_test_date_country = y_test_date.groupby(['date of day t']).sum()
y_test_country = np.array(y_test_date_country['Target']).reshape(-1)
y_prediction_country = np.array(y_test_date_country['prediction']).reshape(-1)
#############################################################
# write outputs into a file
orig_stdout = sys.stdout
f = open(env_address + 'out.txt', 'a')
sys.stdout = f
meanAbsoluteError = mean_absolute_error(y_test, y_prediction)
print("Mean Absolute Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % meanAbsoluteError)
sumOfAbsoluteError = sum(abs(y_test - y_prediction))
percentageOfAbsoluteError = np.mean((abs(y_test - y_prediction)/y_test)*100)
#(sumOfAbsoluteError / sum(y_test)) * 100
# we change zero targets into 1 and add 1 to their predictions
y_test_temp = y_test.copy()
y_test_temp[y_test == 0] = 1
y_prediction_temp = y_prediction.copy()
y_prediction_temp[y_test == 0] += 1
# meanPercentageOfAbsoluteError = sum((abs(y_prediction_temp - y_test_temp) / y_test_temp) * 100) / len(y_test)
print("Percentage of Absolute Error of ", method, " for h =", h, "and #covariates =", c,
": %.2f" % percentageOfAbsoluteError)
rootMeanSquaredError = sqrt(mean_squared_error(y_test, y_prediction))
print("Root Mean Squared Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % rootMeanSquaredError)
second_error = sum(abs(y_prediction - y_test))
# ### compute adjusted R squared error
# SS_Residual = sum((y_test - y_prediction.reshape(-1)) ** 2)
# SS_Total = sum((y_test - np.mean(y_test)) ** 2)
# r_squared = 1 - (float(SS_Residual)) / SS_Total
adj_r_squared = 1 # - (1 - r_squared) * (len(y_test) - 1) / (len(y_test) - c - 1)
print("Adjusted R Squared Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % adj_r_squared)
MASE_numerator = sum(abs(y_prediction_temp - y_test_temp)) / len(y_test)
MASE = MASE_numerator / MASE_denominator['county']
print("MASE Error of ", method, " for h =", h, "and #covariates =", c, ": %.2f" % MASE)
print("-----------------------------------------------------------------------------------------")
# calculate whole country error
country_errors['meanAbsoluteError'] = mean_absolute_error(y_test_country, y_prediction_country)
sumOfAbsoluteError = sum(abs(y_test_country - y_prediction_country))
country_errors['percentageOfAbsoluteError'] = np.mean((abs(y_test - y_prediction)/y_test)*100)
#(sumOfAbsoluteError / sum(y_test_country)) * 100
y_test_temp_country = y_test_country.copy()
y_test_temp_country[y_test_country == 0] = 1
y_prediction_temp_country = y_prediction_country.copy()
y_prediction_temp_country[y_test_country == 0] += 1
# meanPercentageOfAbsoluteError = sum((abs(y_prediction_temp - y_test_temp) / y_test_temp) * 100) / len(y_test)
# ### compute adjusted R squared error
# SS_Residual = sum((y_test_country - y_prediction_country.reshape(-1)) ** 2)
# SS_Total = sum((y_test_country - np.mean(y_test_country)) ** 2)
# r_squared = 1 - (float(SS_Residual)) / SS_Total
if len(y_test_country) - c - 1 > 0:
country_errors['adj_r_squared'] = 1 # - (1 - r_squared) * (len(y_test_country) - 1) / (
# len(y_test_country) - c - 1)
else:
country_errors['adj_r_squared'] = 1
MASE_numerator = sum(abs(y_prediction_temp_country - y_test_temp_country)) / len(y_test_country)
country_errors['MASE'] = MASE_numerator / MASE_denominator['country']
country_errors['second_error'] = (sum(y_prediction_country - y_test_country) / sum(y_test_country)) * 100
# save outputs in 'out.txt'
sys.stdout = orig_stdout
f.close()
# for the test mode we compute some additional errors, we need 'date of day t' column so we use the main dataframe
# we add our prediction, the difference between prediction and target ('error' column),
# the absolute difference between prediction and target ('absolute_error' column),
# the precentage of this difference (('percentage_error' column) -> we change zero targets into 1 and add 1 to their predictions),
# and second_error as follows and save these in 'all_errors' file
# then we compute the average of percentage_errors (and other values) in each day and save them in
# 'first_error' file
if mode == 'test':
# write outputs into a file
orig_stdout = sys.stdout
f = open(env_address + 'out.txt', 'a')
sys.stdout = f
first_error_address = test_address + 'averages_of_errors_in_each_day/'
all_errors_address = test_address + 'all_errors/' + str(method) + '/'
if not os.path.exists(first_error_address):
os.makedirs(first_error_address)
if not os.path.exists(all_errors_address):
os.makedirs(all_errors_address)
dataframe = pd.DataFrame(y_test_date, copy=True)
dataframe['prediction'] = y_prediction
dataframe['error'] = y_prediction - y_test
dataframe['absoulte_error'] = abs(y_prediction - y_test)
y_test_temp = y_test.copy()
y_test_temp[y_test == 0] = 1
y_prediction_temp = y_prediction.copy()
y_prediction_temp[y_test == 0] += 1
dataframe['percentage_error'] = ((abs(y_prediction_temp - y_test_temp)) / y_test_temp) * 100
second_error = (sum(dataframe['error']) / sum(y_test)) * 100
dataframe.to_csv(all_errors_address + 'all_errors_' + str(method) + '.csv')
box_violin_plot(dataframe['date of day t'], dataframe['percentage_error'],
figsizes={'box': (60, 30), 'violin': (100, 50)},
fontsizes={'box': 40, 'violin': 60}, name=str(method) + '_percentage_errors_in_each_day_',
address=all_errors_address)
box_violin_plot(dataframe['date of day t'], dataframe['error'], figsizes={'box': (20, 10), 'violin': (50, 30)},
fontsizes={'box': 15, 'violin': 30}, name=str(method) + '_pure_errors_in_each_day_',
address=all_errors_address)
dataframe['county_fips'] = dataframe['county_fips'].astype(float)
if numberOfSelectedCounties == -1:
numberOfSelectedCounties = len(dataframe['county_fips'])
first_error = pd.DataFrame((dataframe.groupby(['date of day t']).sum() / numberOfSelectedCounties))
first_error.columns = ['fips', 'average of targets', 'average of predictions', 'average of errors',
'average of absoulte_errors', 'average of percentage_errors']
first_error = first_error.drop(['fips'], axis=1)
first_error.to_csv(first_error_address + 'first_error_' + str(method) + '.csv')
plot_targets(method, first_error.index, first_error, first_error_address)
# save outputs in 'out.txt'
sys.stdout = orig_stdout
f.close()
return meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, MASE, country_errors
########################################################### push results to github
def push(message):
if push_flag == 1:
try:
cmd.run("git pull", check=True, shell=True)
print("everything has been pulled")
cmd.run("git add .", check=True, shell=True)
cmd.run(f"git commit -m '{message}'", check=True, shell=True)
cmd.run("git push", check=True, shell=True)
print('pushed.')
except:
print('could not push')
########################################################### zip some of the results
def make_zip(selected_for_email, subject):
for source_root in selected_for_email:
for i in [x[0] for x in os.walk(source_root)]:
address = mail_address + '//' + '/'.join(i.split('/')[3:])
# print(address)
if not os.path.exists(address):
os.makedirs(address)
for pdffile in glob.iglob(os.path.join(i, "*.pdf")):
shutil.copy(pdffile, address)
shutil.make_archive(subject, 'zip', mail_address)
########################################################### mail some of the results
def send_email(*attachments):
subject = "Server results"
body = " "
sender_email = "covidserver1@gmail.com"
receiver_email = ["arezo.h1371@yahoo.com"] # ,"arashmarioriyad@gmail.com"
CC_email = [] # "p.ramazi@gmail.com"
password = "S.123456.S"
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = ','.join(receiver_email) # receiver_email
message["Subject"] = subject
message["CC"] = ','.join(CC_email) # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
# Add attachments
for file_name in attachments:
f = open(file_name, 'rb')
ctype, encoding = mimetypes.guess_type(file_name)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
# in case of a text file
if maintype == 'text':
part = MIMEText(f.read(), _subtype=subtype)
# any other file
else:
part = MIMEBase(maintype, subtype)
part.set_payload(f.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(file_name))
message.attach(part)
f.close()
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email + CC_email, text)
########################################################## flatten
def flatten(data=None, h=None, c=None, method=None, covariates_list=None, state=1):
if state == 1:
result = []
for county_fips in data:
result += list(data[county_fips][method][(h, c)])
elif state == 2:
result = []
for county_fips in data:
result += list(data[county_fips][(h, c)])
elif state == 3:
result = pd.DataFrame(columns=covariates_list)
for county_fips in data:
result = pd.concat([result, data[county_fips][h][method][covariates_list]], ignore_index=True)
elif state == 4:
for county_fips in data:
result = pd.DataFrame(columns=data[county_fips].columns.values)
break
for county_fips in data:
result = pd.concat([result, data[county_fips]], ignore_index=True)
elif state == 5:
result = []
for county_fips in data:
result += list(data[county_fips])
result = np.array(result)
elif state == 6:
result = []
for county_fips in data:
result += list(data[county_fips][method])
return result
############################################################
# we define test as function to call it when h equal to half the maxHistory or when none of the models have improved in current h
def test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address):
columns_table_t = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'second error', 'mean absolute scaled error']
columns_table = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'sum of absolute error', 'mean absolute scaled error']
methods = ['GBM', 'GLM', 'KNN', 'NN', 'MM_GLM', 'MM_NN']
none_mixed_methods = ['GBM', 'GLM', 'KNN', 'NN']
mixed_methods = ['MM_GLM', 'MM_NN']
df_for_prediction_plot = {method: None for method in methods}
y_prediction = {}
y_prediction_train = {}
# run non-mixed methods on the whole training set with their best h and c
X_train_dict, X_test_dict, y_train_dict, y_test_dict = {}, {}, {}, {}
GBM, GLM, KNN, NN = run_algorithms(historical_X_train, historical_X_test, historical_y_train_date,
historical_y_test_date, best_loss, 0, spatial_mode, None)
y_prediction['GBM'], y_prediction_train['GBM'] = GBM
y_prediction['GLM'], y_prediction_train['GLM'] = GLM
y_prediction['KNN'], y_prediction_train['KNN'] = KNN
y_prediction['NN'], y_prediction_train['NN'] = NN
table_data = []
country_table_data = []
for method in none_mixed_methods:
meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, meanAbsoluteScaledError, country_errors = get_errors(
best_h[method]['MAPE'],
best_c[method]['MAPE'], method, y_prediction[method], y_prediction_train[method],
historical_y_test_date[method], historical_y_train_date[method],
None, val_test_MASE_denominator[best_h[method]['MAPE']], numberOfSelectedCounties, target_name, mode='test')
table_data.append([best_h[method]['MAPE'], best_c[method]['MAPE'], round(meanAbsoluteError, 2),
round(percentageOfAbsoluteError, 2), round(adj_r_squared, 2), round(second_error, 2),
round(meanAbsoluteScaledError, 2)])
country_table_data.append(
[best_h[method]['MAPE'], best_c[method]['MAPE'], round(country_errors['meanAbsoluteError'], 2),
round(country_errors['percentageOfAbsoluteError'], 2), round(country_errors['adj_r_squared'], 2),
round(country_errors['second_error'], 2), round(country_errors['MASE'], 2)])
push('a new table added')
for method in none_mixed_methods:
method_real_pred_df = historical_y_train_date[method].append(historical_y_test_date[method])
prediction = list(y_prediction_train[method]) + list(y_prediction[method])
method_real_pred_df['prediction'] = prediction
df_for_prediction_plot[method] = method_real_pred_df
# generate data for non-mixed methods with the best h and c of mixed models and fit mixed models on them
# (with the whole training set)
y_predictions = {'MM_GLM': [], 'MM_NN': []}
y_prediction = {}
# table_data = []
X_train_MM_dict, X_test_MM_dict, y_train_MM_dict, y_test_MM_dict = {}, {}, {}, {}
y_train, y_test = {}, {}
y_test_date = {}
for mixed_method in mixed_methods:
X_train, X_test, y_train_date, y_test_date[mixed_method] = generate_data(best_h[mixed_method]['MAPE'],
best_c[mixed_method]['MAPE'],
covariates_names,
numberOfSelectedCounties)
y_test_date_temp = y_test_date[mixed_method]
y_train[mixed_method] = y_train_date
y_test[mixed_method] = y_test_date_temp
mixed_model_covariates_names = list(X_train.columns)
X_train_to_use = {method: None for method in methods}
X_test_to_use = {method: None for method in methods}
for method in none_mixed_methods:
X_train_to_use[method] = X_train.copy()
X_test_to_use[method] = X_test.copy()
if method in models_to_log:
# make temporal and some fixed covariates logarithmic
negative_features = ['temperature', 'Retail', 'Grocery', 'Parks', 'Transit', 'Workplace', 'Residential']
for covar in mixed_model_covariates_names:
if (' t' in covar) and (covar.split(' ')[0] not in negative_features) and (
covar not in ['county_fips', 'date of day t']):
X_train_to_use[method][covar] = np.log((X_train_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
fix_log_list = ['total_population', 'population_density', 'area', 'median_household_income',
'houses_density', 'airport_distance', 'deaths_per_100000']
for covar in fix_log_list:
if covar in mixed_model_covariates_names:
X_train_to_use[method][covar] = np.log((X_train_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
X_train_dict[method] = X_train_to_use[method]
X_test_dict[method] = X_test_to_use[method]
y_train_dict[method] = y_train[mixed_method]
y_test_dict[method] = y_test[mixed_method]
GBM, GLM, KNN, NN = run_algorithms(X_train_dict, X_test_dict, y_train_dict, y_test_dict, best_loss, 0,
spatial_mode, None)
y_prediction['GBM'], y_prediction_train['GBM'] = GBM
y_prediction['GLM'], y_prediction_train['GLM'] = GLM
y_prediction['KNN'], y_prediction_train['KNN'] = KNN
y_prediction['NN'], y_prediction_train['NN'] = NN
y_predictions_test, y_predictions_train = [], []
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend([y_prediction['GBM'], y_prediction['GLM'], y_prediction['KNN'], y_prediction['NN']])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend(
[y_prediction_train['GBM'], y_prediction_train['GLM'], y_prediction_train['KNN'], y_prediction_train['NN']])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
X_train_MM_dict[mixed_method] = X_train_mixedModel
X_test_MM_dict[mixed_method] = X_test_mixedModel
y_train_MM_dict[mixed_method] = y_train[mixed_method]
y_test_MM_dict[mixed_method] = y_test[mixed_method]
y_test_MM_dict[mixed_method] = np.array(y_test_MM_dict[mixed_method]['Target']).reshape(-1)
y_train_MM_dict[mixed_method] = np.array(y_train_MM_dict[mixed_method]['Target']).reshape(-1)
# # save the entire session
# filename = env_address + 'test.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# mixed model with linear regression and neural network
MM_GLM, MM_NN = run_mixed_models(X_train_MM_dict, X_test_MM_dict, y_train_MM_dict, y_test_MM_dict, best_loss)
y_prediction['MM_GLM'], y_prediction_train['MM_GLM'] = MM_GLM
y_prediction['MM_NN'], y_prediction_train['MM_NN'] = MM_NN
for mixed_method in mixed_methods:
meanAbsoluteError, percentageOfAbsoluteError, adj_r_squared, second_error, meanAbsoluteScaledError, country_errors = get_errors(
best_h[mixed_method]['MAPE'],
best_c[mixed_method]['MAPE'], mixed_method, y_prediction[mixed_method], y_prediction_train[mixed_method],
y_test_date[mixed_method], y_train[mixed_method], None,
val_test_MASE_denominator[best_h[mixed_method]['MAPE']],
numberOfSelectedCounties, target_name, mode='test')
table_data.append([best_h[mixed_method]['MAPE'], best_c[mixed_method]['MAPE'], round(meanAbsoluteError, 2),
round(percentageOfAbsoluteError, 2),
round(adj_r_squared, 2), round(second_error, 2), round(meanAbsoluteScaledError, 2)])
country_table_data.append(
[best_h[mixed_method]['MAPE'], best_c[mixed_method]['MAPE'], round(country_errors['meanAbsoluteError'], 2),
round(country_errors['percentageOfAbsoluteError'], 2), round(country_errors['adj_r_squared'], 2),
round(country_errors['second_error'], 2), round(country_errors['MASE'], 2)])
table_name = 'table_of_best_test_results'
plot_table(table_data, columns_table_t, methods, table_name, mode='test')
table_name = 'table_of_country_best_test_results'
plot_table(country_table_data, columns_table_t, methods, table_name, mode='test')
push('a new table added')
for method in mixed_methods:
method_real_pred_df = y_train[method].append(y_test[method])
prediction = list(y_prediction_train[method]) + list(y_prediction[method])
method_real_pred_df['prediction'] = prediction
df_for_prediction_plot[method] = method_real_pred_df
if pivot != 'country' :
real_prediction_plot(df_for_prediction_plot, r, test_size, target_name, target_mode, best_h, maxHistory,
spatial_mode, methods, future_mode, numberOfSelectedCounties)
# mail the test results
selected_for_email = [test_address + '/tables', test_address + '/all_errors/NN', test_address + '/all_errors/KNN',
test_address + '/plots_of_real_prediction_values']
# zip_file_name = 'test results for h =' + str(maxHistory) + ' #counties=' + str(numberOfSelectedCountiesname)
# make_zip(selected_for_email, zip_file_name)
# send_email(zip_file_name + '.zip')
# # save the entire session
# filename = env_address + 'test.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
########################################################### main
def main(maxHistory):
print("main started")
history = [i for i in range(1, maxHistory + 1)]
print("history: ", history)
methods = ['GBM', 'GLM', 'KNN', 'NN', 'MM_GLM', 'MM_NN']
none_mixed_methods = ['GBM', 'GLM', 'KNN', 'NN']
# none_mixed_methods = ['GBM']
mixed_methods = ['MM_GLM', 'MM_NN']
target_name = 'death'
base_data = makeHistoricalData(0, r, test_size, target_name, 'mrmr', spatial_mode, target_mode, data_address,
future_features, pivot, end_date)
print("base data before clean shape: ", base_data.shape)
base_data_before_clean_columns = base_data.columns.values
base_data = clean_data(base_data, numberOfSelectedCounties, spatial_mode)
print("base data after clean shape: ", base_data.shape)
print("base data cleaned columns: ",
[c for c in base_data_before_clean_columns if c not in base_data.columns.values])
covariates_names = list(base_data.columns)
covariates_names.remove('Target')
covariates_names.remove('date of day t')
covariates_names.remove('county_fips')
# covariates_names.remove('daily-country-test-per-1000 t')
numberOfCovariates = len(covariates_names)
print('number of covariates: ', numberOfCovariates)
# print(covariates_names)
y_prediction = {'GBM': {}, 'GLM': {}, 'KNN': {}, 'NN': {}, 'MM_GLM': {}, 'MM_NN': {}}
y_prediction_train = {'GBM': {}, 'GLM': {}, 'KNN': {}, 'NN': {}, 'MM_GLM': {}, 'MM_NN': {}}
error_names = ['MAPE', 'MAE', 'adj-R2', 'sec', 'MASE']
complete_error_names = {'MAPE': 'Percentage Of Absolute Error', 'MAE': 'Mean Absolute Error',
'MASE': 'Mean Absolute Scaled Error', 'adj-R2': 'Adjusted R Squared Error',
'sec': 'Sum Of Absolute Error'}
validation_errors = {error: {method: {} for method in methods} for error in error_names}
minError = {method: {error: int(1e10) for error in error_names} for method in methods}
best_h = {method: {error: 0 for error in error_names} for method in methods}
best_c = {method: {error: 0 for error in error_names} for method in methods}
# best_loss = {method: None for method in ['GBM', 'NN', 'MM_NN']}
best_loss = {'GBM': 'poisson', 'MM_NN': 'MeanAbsoluteError', 'NN': 'poisson'}
columns_table_t = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'second error', 'mean absolute scaled error'] # table columns names
columns_table = ['best_h', 'best_c', 'mean absolute error', 'percentage of absolute error',
'adjusted R squared error',
'sum of absolute error', 'mean absolute scaled error'] # table columns names
train_val_MASE_denominator = {h: {key: None for key in ['county', 'country']} for h in history}
val_test_MASE_denominator = {h: {key: None for key in ['county', 'country']} for h in history}
historical_X_train = {} # X_train for best h and c
historical_X_test = {} # X_test for best h and c
historical_y_train = {} # y_train for best h and c
historical_y_test = {} # y_test for best h and c
historical_y_train_date = {} # y_train for best h and c with dates info
historical_y_test_date = {} # y_test for best h and c with dates info
parallel_outputs = {}
for h in history:
print(100 * "#")
print("h =", h)
data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, target_mode, data_address,
future_features, pivot, end_date)
print("data before clean shape:", data.shape)
# pre-process and split the data, 'date's have dates info
data = clean_data(data, numberOfSelectedCounties, spatial_mode)
print("data after clean shape:", data.shape)
X_train_train_to_use = {method: None for method in methods}
X_train_val_to_use = {method: None for method in methods}
X_test_to_use = {method: None for method in methods}
X_train_train, X_train_val, X_test, y_train_train_date, y_train_val_date, y_test_date = preprocess(data,
spatial_mode,
1)
# print([c for c in data.columns.values if c not in X_train_train])
# print("X_train_train shape:", X_train_train.shape)
# print("X_train_val shape:", X_train_val.shape)
# print("X_test shape:", X_test.shape)
# print("y_train_train_date shape:", y_train_train_date.shape)
# print("y_train_val_date shape:", y_train_val_date.shape)
# print("y_test_date shape:", y_test_date.shape)
# print("y columns:", y_test_date.columns.values)
if target_mode not in ['regular',
'weeklyaverage']: # we need regular data to return predicted values to first state
regular_data = makeHistoricalData(h, r, test_size, target_name, 'mrmr', spatial_mode, 'regular', data_address,
future_features, pivot, end_date)
regular_data = clean_data(regular_data, numberOfSelectedCounties, spatial_mode)
else:
regular_data = data
print("regular_data shape:", regular_data.shape)
train_val_MASE_denominator[h]['county'], val_test_MASE_denominator[h]['county'], train_val_MASE_denominator[h][
'country'], val_test_MASE_denominator[h]['country'] = mase_denominator(r, h,
regular_data, target_name,
target_mode,
numberOfSelectedCounties,
spatial_mode)
# print(train_val_MASE_denominator)
for method in methods:
X_train_train_to_use[method] = X_train_train.copy()
X_train_val_to_use[method] = X_train_val.copy()
X_test_to_use[method] = X_test.copy()
if method in models_to_log:
# make temporal and some fixed covariates logarithmic
negative_features = ['temperature', 'Retail', 'Grocery', 'Parks', 'Transit', 'Workplace', 'Residential']
for covar in covariates_names:
if (' t' in covar) and (covar.split(' ')[0] not in negative_features):
# print(covar)
X_train_train_to_use[method][covar] = np.log(
(X_train_train_to_use[method][covar] + 1).astype(float))
X_train_val_to_use[method][covar] = np.log(
(X_train_val_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
fix_log_list = ['total_population', 'population_density', 'area', 'median_household_income',
'houses_density', 'airport_distance', 'deaths_per_100000']
for covar in fix_log_list:
if covar in covariates_names:
X_train_train_to_use[method][covar] = np.log(
(X_train_train_to_use[method][covar] + 1).astype(float))
X_train_val_to_use[method][covar] = np.log(
(X_train_val_to_use[method][covar] + 1).astype(float))
X_test_to_use[method][covar] = np.log((X_test_to_use[method][covar] + 1).astype(float))
y_train_date = (pd.DataFrame(y_train_train_date).append(pd.DataFrame(y_train_val_date))).reset_index(drop=True)
y_train_train = np.array(y_train_train_date['Target']).reshape(-1)
y_train_val = np.array(y_train_val_date['Target']).reshape(-1)
y_test = np.array(y_test_date['Target']).reshape(-1)
y_train = np.array(
(pd.DataFrame(y_train_train).append(pd.DataFrame(y_train_val))).reset_index(drop=True)).reshape(-1)
print("y_train shape:", y_train.shape)
print("y_test shape:", y_test.shape)
# find best loss
# print(best_loss)
# if (h == 1):
# best_loss = update_best_loss('none_mixed_model', spatial_mode, None, best_loss, X_train_train_to_use,
# X_train_val_to_use, \
# y_train_train, y_train_val, None, None,
# data.columns.drop(['Target', 'date of day t', 'county_fips']), \
# numberOfCovariates, maxC)
print(best_loss)
print('force_features len: ', len(force_features))
covariates_list = []
covariates_list = force_features.copy()
print('covariates_list len:', len(covariates_list))
# covariates are sorted by their correlation with Target. We start from the first important covariate and
# in each loop we add the next important one
loom = ProcessLoom(max_runner_cap=len(base_data.columns) * len(none_mixed_methods) + 5)
indx_c = 0
for c in covariates_names: # iterate through sorted covariates
indx_c += 1
# print('h=', h, ' c=', indx_c)
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = c.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
covariates_list.append(covariate)
# print('covariates_list:', covariates_list)
for method in none_mixed_methods:
X_train_train_temp = X_train_train_to_use[method][covariates_list]
X_train_val_temp = X_train_val_to_use[method][covariates_list]
# print(X_train_train_temp.columns.values)
# print('X_train_train_temp shape:', X_train_train_temp.shape)
# print('X_train_val_temp shape:', X_train_val_temp.shape)
# print('y_train_train shape:', y_train_train.shape)
# print('y_train_val shape:', y_train_val.shape)
loom.add_function(parallel_run,
[method, X_train_train_temp, X_train_val_temp, y_train_train, y_train_val, best_loss,
indx_c])
if indx_c >= maxC:
break
print('covariates_list len:', len(covariates_list))
print('covariates_list:', covariates_list)
# run the processes in parallel
parallel_outputs['non_mixed'] = loom.execute()
ind = 0
for c in range(1, numberOfCovariates + 1):
for method in none_mixed_methods:
y_prediction[method][(h, c)], y_prediction_train[method][(h, c)] = parallel_outputs['non_mixed'][ind][
'output']
ind += 1
if c == maxC:
break
# for method in none_mixed_methods:
# print(y_prediction[method].keys())
# print(np.isnan(y_prediction[method][(h, 4)]))
# print(y_prediction[method][(h, 4)].shape)
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# find best loss
# if h == 1:
# best_loss = update_best_loss('mixed_model', spatial_mode, None, best_loss, None, None, y_train_train, \
# y_train_val, y_prediction_train, y_prediction, None, \
# numberOfCovariates, maxC)
print(best_loss)
# initiate loom for parallel processing
loom = ProcessLoom(max_runner_cap=len(base_data.columns) * len(mixed_methods) + 5)
indx_c = 0
for c in range(1, numberOfCovariates + 1):
indx_c += 1
for mixed_method in mixed_methods:
y_predictions_test, y_predictions_train = [], []
# Construct the outputs for the testing dataset of the 'MM' methods
y_predictions_test.extend([y_prediction['GBM'][(h, c)], y_prediction['GLM'][(h, c)],
y_prediction['KNN'][(h, c)], y_prediction['NN'][(h, c)]])
y_prediction_test_np = np.array(y_predictions_test).reshape(len(y_predictions_test), -1)
X_test_mixedModel = pd.DataFrame(y_prediction_test_np.transpose())
# Construct the outputs for the training dataset of the 'MM' methods
y_predictions_train.extend([y_prediction_train['GBM'][(h, c)], y_prediction_train['GLM'][(h, c)],
y_prediction_train['KNN'][(h, c)], y_prediction_train['NN'][(h, c)]])
y_prediction_train_np = np.array(y_predictions_train).reshape(len(y_predictions_train), -1)
X_train_mixedModel = pd.DataFrame(y_prediction_train_np.transpose())
loom.add_function(mixed_parallel_run,
[mixed_method, X_train_mixedModel, X_test_mixedModel, y_train_train, y_train_val,
best_loss])
if c == maxC:
break
# run the processes in parallel
parallel_outputs['mixed'] = loom.execute()
ind = 0
for c in range(1, numberOfCovariates + 1):
for mixed_method in mixed_methods:
y_prediction[mixed_method][(h, c)], y_prediction_train[mixed_method][(h, c)] = \
parallel_outputs['mixed'][ind]['output']
y_prediction[mixed_method][(h, c)] = np.array(y_prediction[mixed_method][(h, c)]).ravel()
y_prediction_train[mixed_method][(h, c)] = np.array(y_prediction_train[mixed_method][(h, c)]).ravel()
ind += 1
if c == maxC:
break
# for method in mixed_methods:
# print(y_prediction[method].keys())
# print(np.isnan(y_prediction[method][(h, 4)]))
# print(y_prediction[method][(h, 4)].shape)
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
number_of_improved_methods = 0 # we count number_of_improved_methods to run test if no method have improved in current h
indx_c = 0
print('force_features len: ', len(force_features))
covariates_list = []
covariates_list = force_features.copy()
covariates_list.append('county_fips')
covariates_list.append('date of day t')
print('covariates_list len: ', len(covariates_list))
# covariates_list = ['county_fips', 'date of day t']
# covariates_list.extend(force_features.copy())
for c in covariates_names: # iterate through sorted covariates
indx_c += 1
for covariate in data.columns: # add all historical covariates of this covariate and create a feature
pure_name = c.split(' ')[0]
cov_temp = covariate.split(' ')[0]
if pure_name == cov_temp and pure_name not in force_features:
covariates_list.append(covariate)
y_val = np.array(y_train_val_date['Target']).reshape(-1)
for method in methods:
X_train_train_temp = X_train_train_to_use[method][covariates_list]
X_train_val_temp = X_train_val_to_use[method][covariates_list]
X_test_temp = X_test_to_use[method][covariates_list]
validation_errors['MAE'][method][(h, indx_c)], validation_errors['MAPE'][method][(h, indx_c)], \
validation_errors['adj-R2'][method][(h, indx_c)], validation_errors['sec'][method][(h, indx_c)], \
validation_errors['MASE'][method][(h, indx_c)], country_errors = \
get_errors(h, indx_c, method, y_prediction[method][(h, indx_c)],
y_prediction_train[method][(h, indx_c)], y_train_val_date,
y_train_train_date, regular_data, train_val_MASE_denominator[h],
numberOfSelectedCounties, target_name, mode='val')
# find best errors
for error in error_names:
if validation_errors[error][method][(h, indx_c)] < minError[method][error]:
minError[method][error] = validation_errors[error][method][(h, indx_c)]
best_h[method][error] = h
# we should not consider force_features
best_c[method][error] = indx_c
if error == 'MAPE':
number_of_improved_methods += 1
if error == 'MAPE' and method != 'MM_GLM' and method != 'MM_NN':
historical_X_train[method] = (X_train_train_temp.append(X_train_val_temp)).reset_index(
drop=True)
historical_X_test[method] = X_test_temp
historical_y_train[method] = y_train
historical_y_test[method] = y_test
historical_y_train_date[method] = y_train_date
historical_y_test_date[method] = y_test_date
if indx_c == maxC:
break
# # save the entire session for each h and c
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# # save the entire session for each h
# filename = env_address + 'validation.out'
# my_shelf = shelve.open(filename, 'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = locals()[key]
# except:
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
# push the file of outputs
push('logs of h=' + str(h) + ' added')
# we run test if none of models have improved in curent h or if we passed half of maxhistory
if (number_of_improved_methods == -1): ###########################
print('jump to test process')
test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address)
# plot table for best results
table_data = []
for method in methods:
table_data.append([best_h[method]['MAPE'], best_c[method]['MAPE'], round(minError[method]['MAE'], 2),
round(minError[method]['MAPE'], 2), round(minError[method]['adj-R2'], 2),
round(minError[method]['sec'], 2), round(minError[method]['MASE'], 2)])
table_name = 'tabel_of_best_validation_results'
plot_table(table_data, columns_table, methods, table_name, mode='val')
# plot the results of methods on validation set
for error in error_names:
plot_results(3, 2, numberOfCovariates, methods, history, validation_errors[error], complete_error_names[error])
# # mail the validation results
# selected_for_email = [validation_address]
# zip_file_name = 'validation results for h =' + str(maxHistory) + ' #counties=' + str(numberOfSelectedCountiesname)
# make_zip(selected_for_email, zip_file_name)
# # send_email(zip_file_name + '.zip')
push('plots added')
################################################################################################################# test zone
test_process(h, r, test_size, target_name, spatial_mode, target_mode, best_h, best_c, historical_X_train, \
historical_X_test, historical_y_train_date, historical_y_test_date, best_loss, \
numberOfSelectedCounties, covariates_names, maxHistory, train_val_MASE_denominator, \
val_test_MASE_denominator, future_mode, test_address, env_address, mail_address)
print(best_loss)
if __name__ == "__main__":
end_date = int(argv[1])
begin = time.time()
future_mode = False
future_features = []
if r >= 28:
# future_mode = True
future_features = ['social-distancing-travel-distance-grade', 'social-distancing-encounters-grade',
'social-distancing-total-grade'] # sorted by their mrmr rank
if target_mode in ['weeklyaverage','augmentedweeklyaverage']:
r //= 7
maxHistory //= 7
test_size //= 7
force_features = []
force_mode = 0 # with force_mode we determine how many future feature have to be forced (be used in all eterations)
if future_mode:
for f in range(force_mode):
force_features.append('future-' + future_features[f])
# make directories for saving the results
validation_address = './' + str(end_date) + '/results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/validation/'
test_address = './' + str(end_date) + '/results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/test/'
env_address = './' + str(end_date) + '/results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/session_parameters/'
mail_address = './' + str(end_date) + '/results/counties=' + str(numberOfSelectedCountiesname) + ' max_history=' + str(
maxHistory) + '/email'
if not os.path.exists(mail_address):
os.makedirs(mail_address)
if not os.path.exists(test_address):
os.makedirs(test_address)
if not os.path.exists(validation_address):
os.makedirs(validation_address)
if not os.path.exists(env_address):
os.makedirs(env_address)
push('new folders added')
models_to_log = ['NN', 'GLM', 'GBM'] #, 'KNN' # models we want to make the features logarithmic for them, we remove KNN
main(maxHistory)
end = time.time()
push('final results added')
print("The total time of execution in minutes: ", round((end - begin) / 60, 2))
| [
"noreply@github.com"
] | colebrookson.noreply@github.com |
a1b24ec0a1cebfe884254cfc39cf4b663f6e3911 | 15da3bb0e353874b623b0876553f2d01c9430da6 | /security.py | 274090c6804f712a6a0be8766c207ab6af1611aa | [] | no_license | EliRibble/OSPi | 55c34385ebb2b09e9bd394dd8b78b3c84ce90cca | 46a11a7f65db91fe99bdca577cdee69bf7973913 | refs/heads/master | 2021-01-15T15:15:33.548863 | 2015-05-20T17:24:19 | 2015-05-20T17:24:19 | 35,950,721 | 0 | 0 | null | 2015-05-20T13:48:29 | 2015-05-20T13:48:28 | Python | UTF-8 | Python | false | false | 218 | py | import random
from web.session import sha1
def password_salt():
return "".join(chr(random.randint(33, 127)) for _ in xrange(64))
def password_hash(password, salt):
return sha1(password + salt).hexdigest()
| [
"junk@theribbles.org"
] | junk@theribbles.org |
f9186d755d160b923eb3e8c35cb07657dd2d757d | 61af06a08acd7dcad97ee0ddb86504910f9b1e45 | /decision_maker/urls.py | 4b981afd8fa6793a2fbf82fe62b016f5ee249208 | [] | no_license | C-Shi/Decision_Maker | 9c18d2b69b9f6507569852148ecaa973131bb67e | c966d652274c4a936790caf8043fa0747bbebb9b | refs/heads/master | 2020-04-10T16:21:26.014609 | 2018-12-14T09:25:48 | 2018-12-14T09:25:48 | 161,142,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | """decision_maker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('decision.urls'))
]
| [
"cshi@ualberta.ca"
] | cshi@ualberta.ca |
526a6eeb29fc2589583002f2006648e788fe06aa | e6ba1dde1f21e4817215668905565edc4616fff8 | /build/gscam/catkin_generated/pkg.develspace.context.pc.py | 1b0a36804ac16f9e95293bc043d8d3ee606623a4 | [] | no_license | shu-98/catkin_ws | dde162026e114dd6e96a90994e6699ac38e85e68 | 8b1094ee815227f3ca766db73103ae2306a948c7 | refs/heads/master | 2020-07-01T15:14:38.537402 | 2019-12-24T11:33:06 | 2019-12-24T11:33:06 | 201,206,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sou/catkin_ws/src/gscam/include;/usr/include/gstreamer-1.0;/usr/lib/x86_64-linux-gnu/gstreamer-1.0/include;/usr/include/glib-2.0;/usr/lib/x86_64-linux-gnu/glib-2.0/include".split(';') if "/home/sou/catkin_ws/src/gscam/include;/usr/include/gstreamer-1.0;/usr/lib/x86_64-linux-gnu/gstreamer-1.0/include;/usr/include/glib-2.0;/usr/lib/x86_64-linux-gnu/glib-2.0/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;image_transport;sensor_msgs;camera_calibration_parsers;camera_info_manager".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgscam;-lgstapp-1.0;-lgstbase-1.0;-lgstreamer-1.0;-lgobject-2.0;-lglib-2.0".split(';') if "-lgscam;-lgstapp-1.0;-lgstbase-1.0;-lgstreamer-1.0;-lgobject-2.0;-lglib-2.0" != "" else []
PROJECT_NAME = "gscam"
PROJECT_SPACE_DIR = "/home/sou/catkin_ws/devel"
PROJECT_VERSION = "1.0.1"
| [
"jdayeissaw@outlook.jp"
] | jdayeissaw@outlook.jp |
c4a35b1184ddc9951b0bf9e8a1ceeaccd2c708a0 | b951ee6d2de741e84f7bfe2dc5a66853c1d5cd4e | /Array/LinkedInstead.py | 5960eaa4dc231e2a7ddbf5349c752a8df806be84 | [] | no_license | Chongkai-Ma/Fundamentals-of-Python-Data-Structures | e78569f79dfad16ebc18121c250c25d91bb94754 | 170e58d23d9ee73c53b2ab596d7fcfc3e63eccc9 | refs/heads/master | 2020-09-21T03:46:04.980838 | 2019-12-04T14:18:27 | 2019-12-04T14:18:27 | 224,669,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/python3
from node import Node
head = None
for count in range(1, 10):
head = Node(count, head)
probe = head
targetItem = 5
while probe != None and targetItem != probe.data:
probe = probe.next
if probe != None:
probe.data = 88888
print ("The item has been changed")
else:
print ("The item has not been found")
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
da2c797441188d198de8c57c9a59473cbd5ed769 | c36fdb4d07aeaf0b1e568c45e8020b34c6fa5aca | /usps_integration/models/__init__.py | 136e6ed2d4acabb36fa173d1a7051621eeeb8395 | [] | no_license | planetodooofficial/usps_integration_v13 | c6cf33c49d753c44831d3f6e1da10271d37f0e82 | ad69aa442b0ee65d1b7589b7f7ca409313f908aa | refs/heads/master | 2022-12-24T10:05:39.397215 | 2020-09-30T19:14:55 | 2020-09-30T19:14:55 | 258,160,591 | 0 | 3 | null | 2020-09-30T19:14:56 | 2020-04-23T09:52:32 | Python | UTF-8 | Python | false | false | 1,098 | py | # -*- encoding: utf-8 -*-
##############################################################################
# Copyright (c) 2015 - Present Planet Odoo. All Rights Reserved
# Author: [Planet Odoo]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at:
# <http://www.gnu.org/licenses/gpl.html>.
#
##############################################################################
from . import shipping_conf
from . import delivery
from . import product
from . import sale
from . import shipping_endicia
from . import endicia
from . import shipping
from . import stock
| [
"https://Anjeel@bitbucket.org"
] | https://Anjeel@bitbucket.org |
7e57dcbf4f48f8fcfe88cb68a3ebfbe549f6d2ab | e944d912def98d7546d17c4303169f52517348ca | /interview_questions/basic/sum_of_rows_1.py | 046b5461ff003408ecb4ae700d527c671bb16873 | [] | no_license | balajich/python-crash-course | 0710854de3cd04695f969cbfe774ef336f707f48 | e62b578f7dc93f6a47fbae00dac2d496b985fe8d | refs/heads/master | 2021-07-30T16:00:45.392119 | 2021-07-29T11:41:49 | 2021-07-29T11:41:49 | 192,034,679 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | '''
Take matrix as input and returns sum of rows`
'''
import numpy as np
def rowsum(matrix):
"""
:param matrix (list): A list of lists where each inner list represents a row.
:returns: (list) A list containing the sum of each row.
"""
result=[]
for sub_list in matrix:
result.append(sum(sub_list))
return result
print(rowsum([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) # Should print [6, 15, 24] | [
"balaji.chopparapu@gmail.com"
] | balaji.chopparapu@gmail.com |
861df4598ba1fd37b569d1606e7b8b78e823be62 | 630b7ab30623f8a83d3ecc8e1ce800db4114df5b | /pf-net/model_PFNet.py | 54120c373b43d986775a8e8f8ac68c541180237e | [
"Apache-2.0"
] | permissive | 63445538/Contrib | d0c6d38a82db66a859403338a2921cbf763f7960 | 8860692e341020bb4332ff9f59b17a0c8cd9c748 | refs/heads/master | 2022-12-07T04:57:21.249293 | 2020-09-07T04:47:28 | 2020-09-07T04:47:28 | 293,422,878 | 0 | 0 | Apache-2.0 | 2020-09-07T04:36:06 | 2020-09-07T04:36:05 | null | UTF-8 | Python | false | false | 10,782 | py | import math
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid import ParamAttr
class Conv1D(fluid.dygraph.Layer):
def __init__(self,
prefix,
num_channels=3,
num_filters=1,
size_k=1,
padding=0,
groups=1,
act=None):
super(Conv1D, self).__init__()
fan_in = num_channels * size_k * 1
k = 1. / math.sqrt(fan_in)
param_attr = ParamAttr(
name=prefix + "_w",
initializer=fluid.initializer.Uniform(
low=-k, high=k))
bias_attr = ParamAttr(
name=prefix + "_b",
initializer=fluid.initializer.Uniform(
low=-k, high=k))
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=(1, size_k),
stride=1,
padding=(0, padding),
groups=groups,
act=act,
param_attr=param_attr,
bias_attr=bias_attr)
def forward(self, x):
x = fluid.layers.unsqueeze(input=x, axes=[2])
x = self._conv2d(x)
x = fluid.layers.squeeze(input=x, axes=[2])
return x
class Convlayer(fluid.dygraph.Layer):
def __init__(self, point_scales):
super(Convlayer, self).__init__()
self.point_scales = point_scales
self.conv1 = Conv2D(1, 64, (1, 3))
self.conv2 = Conv2D(64, 64, 1)
self.conv3 = Conv2D(64, 128, 1)
self.conv4 = Conv2D(128, 256, 1)
self.conv5 = Conv2D(256, 512, 1)
self.conv6 = Conv2D(512, 1024, 1)
self.maxpool = Pool2D(pool_size=(self.point_scales, 1), pool_stride=1)
self.bn1 = BatchNorm(64, act='relu')
self.bn2 = BatchNorm(64, act='relu')
self.bn3 = BatchNorm(128, act='relu')
self.bn4 = BatchNorm(256, act='relu')
self.bn5 = BatchNorm(512, act='relu')
self.bn6 = BatchNorm(1024, act='relu')
def forward(self, x):
x = fluid.layers.unsqueeze(x, 1)
# x = fluid.layers.relu(self.conv1(x))
# x = fluid.layers.relu(self.conv2(x))
# x_128 = fluid.layers.relu(self.conv3(x))
# x_256 = fluid.layers.relu(self.conv4(x_128))
# x_512 = fluid.layers.relu(self.conv5(x_256))
# x_1024 = fluid.layers.relu(self.conv6(x_512))
x = self.bn1(self.conv1(x))
x = self.bn2(self.conv2(x))
x_128 = self.bn3(self.conv3(x))
x_256 = self.bn4(self.conv4(x_128))
x_512 = self.bn5(self.conv5(x_256))
x_1024 = self.bn6(self.conv6(x_512))
x_128 = fluid.layers.squeeze(input=self.maxpool(x_128), axes=[2])
x_256 = fluid.layers.squeeze(input=self.maxpool(x_256), axes=[2])
x_512 = fluid.layers.squeeze(input=self.maxpool(x_512), axes=[2])
x_1024 = fluid.layers.squeeze(input=self.maxpool(x_1024), axes=[2])
L = [x_1024, x_512, x_256, x_128]
x = fluid.layers.concat(L, 1)
return x
class Latentfeature(fluid.dygraph.Layer):
def __init__(self, num_scales, each_scales_size, point_scales_list):
super(Latentfeature, self).__init__()
self.num_scales = num_scales
self.each_scales_size = each_scales_size
self.point_scales_list = point_scales_list
self.Convlayers1 = Convlayer(point_scales=self.point_scales_list[0])
self.Convlayers2 = Convlayer(point_scales=self.point_scales_list[1])
self.Convlayers3 = Convlayer(point_scales=self.point_scales_list[2])
self.conv1 = Conv1D(prefix='lf', num_channels=3, num_filters=1, size_k=1, act=None)
self.bn1 = BatchNorm(1, act='relu')
def forward(self, x):
outs = [self.Convlayers1(x[0]), self.Convlayers2(x[1]), self.Convlayers3(x[2])]
latentfeature = fluid.layers.concat(outs, 2)
latentfeature = fluid.layers.transpose(latentfeature, perm=[0, 2, 1])
# latentfeature = fluid.layers.relu(self.conv1(latentfeature))
latentfeature = self.bn1(self.conv1(latentfeature))
latentfeature = fluid.layers.squeeze(latentfeature, axes=[1])
return latentfeature
class PointcloudCls(fluid.dygraph.Layer):
def __init__(self, num_scales, each_scales_size, point_scales_list, k=40):
super(PointcloudCls, self).__init__()
self.latentfeature = Latentfeature(num_scales, each_scales_size, point_scales_list)
self.fc1 = Linear(1920, 1024)
self.fc2 = Linear(1024, 512)
self.fc3 = Linear(512, 256)
self.fc4 = Linear(256, k)
# self.dropout = nn.Dropout(p=0.3)
self.bn1 = BatchNorm(1024, act='relu')
self.bn2 = BatchNorm(512, act='relu')
self.bn3 = BatchNorm(256, act='relu')
def forward(self, x):
x = self.latentfeature(x)
x = self.bn1(self.fc1(x))
x = self.bn2(self.fc2(x))
x = self.bn3(self.fc3(x))
# x = self.bn2(self.dropout(self.fc2(x)))
# x = self.bn3(self.dropout(self.fc3(x)))
x = self.fc4(x)
return fluid.layers.log_softmax(x, axis=1)
class PFNetG(fluid.dygraph.Layer):
def __init__(self, num_scales, each_scales_size, point_scales_list, crop_point_num):
super(PFNetG, self).__init__()
self.crop_point_num = crop_point_num
self.latentfeature = Latentfeature(num_scales, each_scales_size, point_scales_list)
self.fc1 = Linear(input_dim=1920, output_dim=1024, act='relu')
self.fc2 = Linear(input_dim=1024, output_dim=512, act='relu')
self.fc3 = Linear(input_dim=512, output_dim=256, act='relu')
self.fc1_1 = Linear(input_dim=1024, output_dim=128 * 512, act='relu')
self.fc2_1 = Linear(input_dim=512, output_dim=64 * 128, act='relu')
self.fc3_1 = Linear(input_dim=256, output_dim=64 * 3)
self.conv1_1 = Conv1D(prefix='g1_1', num_channels=512, num_filters=512, size_k=1, act='relu')
self.conv1_2 = Conv1D(prefix='g1_2', num_channels=512, num_filters=256, size_k=1, act='relu')
self.conv1_3 = Conv1D(prefix='g1_3', num_channels=256, num_filters=int((self.crop_point_num * 3) / 128),
size_k=1, act=None)
self.conv2_1 = Conv1D(prefix='g2_1', num_channels=128, num_filters=6, size_k=1, act=None)
def forward(self, x):
x = self.latentfeature(x)
x_1 = self.fc1(x) # 1024
x_2 = self.fc2(x_1) # 512
x_3 = self.fc3(x_2) # 256
pc1_feat = self.fc3_1(x_3)
pc1_xyz = fluid.layers.reshape(pc1_feat, [-1, 64, 3], inplace=False)
pc2_feat = self.fc2_1(x_2)
pc2_feat_reshaped = fluid.layers.reshape(pc2_feat, [-1, 128, 64], inplace=False)
pc2_xyz = self.conv2_1(pc2_feat_reshaped) # 6x64 center2
pc3_feat = self.fc1_1(x_1)
pc3_feat_reshaped = fluid.layers.reshape(pc3_feat, [-1, 512, 128], inplace=False)
pc3_feat = self.conv1_1(pc3_feat_reshaped)
pc3_feat = self.conv1_2(pc3_feat)
pc3_xyz = self.conv1_3(pc3_feat) # 12x128 fine
pc1_xyz_expand = fluid.layers.unsqueeze(pc1_xyz, axes=[2])
pc2_xyz = fluid.layers.transpose(pc2_xyz, perm=[0, 2, 1])
pc2_xyz_reshaped1 = fluid.layers.reshape(pc2_xyz, [-1, 64, 2, 3], inplace=False)
pc2_xyz = fluid.layers.elementwise_add(pc1_xyz_expand, pc2_xyz_reshaped1)
pc2_xyz_reshaped2 = fluid.layers.reshape(pc2_xyz, [-1, 128, 3], inplace=False)
pc2_xyz_expand = fluid.layers.unsqueeze(pc2_xyz_reshaped2, axes=[2])
pc3_xyz = fluid.layers.transpose(pc3_xyz, perm=[0, 2, 1])
pc3_xyz_reshaped1 = fluid.layers.reshape(pc3_xyz, [-1, 128, int(self.crop_point_num / 128), 3], inplace=False)
pc3_xyz = fluid.layers.elementwise_add(pc2_xyz_expand, pc3_xyz_reshaped1)
pc3_xyz_reshaped2 = fluid.layers.reshape(pc3_xyz, [-1, self.crop_point_num, 3], inplace=False)
return pc1_xyz, pc2_xyz_reshaped2, pc3_xyz_reshaped2 # center1 ,center2 ,fine
# class _netlocalD(nn.Module):
# def __init__(self, crop_point_num):
# super(_netlocalD, self).__init__()
# self.crop_point_num = crop_point_num
# self.conv1 = torch.nn.Conv2d(1, 64, (1, 3))
# self.conv2 = torch.nn.Conv2d(64, 64, 1)
# self.conv3 = torch.nn.Conv2d(64, 128, 1)
# self.conv4 = torch.nn.Conv2d(128, 256, 1)
# self.maxpool = torch.nn.MaxPool2d((self.crop_point_num, 1), 1)
# self.bn1 = nn.BatchNorm2d(64)
# self.bn2 = nn.BatchNorm2d(64)
# self.bn3 = nn.BatchNorm2d(128)
# self.bn4 = nn.BatchNorm2d(256)
# self.fc1 = nn.Linear(448, 256)
# self.fc2 = nn.Linear(256, 128)
# self.fc3 = nn.Linear(128, 16)
# self.fc4 = nn.Linear(16, 1)
# self.bn_1 = nn.BatchNorm1d(256)
# self.bn_2 = nn.BatchNorm1d(128)
# self.bn_3 = nn.BatchNorm1d(16)
#
# def forward(self, x):
# x = F.relu(self.bn1(self.conv1(x)))
# x_64 = F.relu(self.bn2(self.conv2(x)))
# x_128 = F.relu(self.bn3(self.conv3(x_64)))
# x_256 = F.relu(self.bn4(self.conv4(x_128)))
# x_64 = torch.squeeze(self.maxpool(x_64))
# x_128 = torch.squeeze(self.maxpool(x_128))
# x_256 = torch.squeeze(self.maxpool(x_256))
# Layers = [x_256, x_128, x_64]
# x = torch.cat(Layers, 1)
# x = F.relu(self.bn_1(self.fc1(x)))
# x = F.relu(self.bn_2(self.fc2(x)))
# x = F.relu(self.bn_3(self.fc3(x)))
# x = self.fc4(x)
# return x
#
# if __name__ == '__main__':
# def weights_init_normal(m):
# classname = m.__class__.__name__
# if classname.find("Conv2d") != -1:
# torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
# elif classname.find("Conv1d") != -1:
# torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
# elif classname.find("BatchNorm2d") != -1:
# torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
# torch.nn.init.constant_(m.bias.data, 0.0)
# elif classname.find("BatchNorm1d") != -1:
# torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
# torch.nn.init.constant_(m.bias.data, 0.0)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# convlayer_torch = Convlayer(256)
# convlayer_torch.to(device)
# convlayer_torch.apply(weights_init_normal)
# torch.save({'state_dict': convlayer_torch.state_dict()},
# 'Checkpoints/convlayer_torch.pth')
# convlayer_pp = Convlayer_pp(256)
# input1 = torch.randn(64, 2048, 3)
# input2 = torch.randn(64, 512, 3)
# input3 = torch.randn(64, 256, 3)
# input_ = [input1, input2, input3]
# netG = _netG(3, 1, [2048, 512, 256], 1024)
# output = netG(input_)
# print(output)
| [
"caorui0303@gmail.com"
] | caorui0303@gmail.com |
e59f804daffd73963a42e77a36b5adf3f11b3299 | 2f31a0c287584c4a8db6a2622b15b4a1c613756a | /main.py | ca254ca0eeaa676f6a36eaa36e8897f1d5eb5e1c | [] | no_license | sathiz1993/GoNodePython | 41b214acc16a23a9f85de6d4c704c5ffc9a90897 | d9ae736268ec42d27ceb7b49bfb361c895ad087b | refs/heads/master | 2022-04-21T04:57:40.571992 | 2020-04-19T16:18:02 | 2020-04-19T16:18:02 | 256,806,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import csv
import json
with open('data.json') as f:
covidData = json.load(f)
dailyCase = covidData['cases_time_series']
csvData = [['Date', 'Case']]
for data in dailyCase:
csvData.append([data['date'], data['dailyconfirmed']])
with open("output.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(csvData) | [
"sathiz1993@gmail.com"
] | sathiz1993@gmail.com |
484f6e5666d79ab0841dc247818ac74ca0ac5ac0 | 6c87962838139108bfbc8220c056120d18b894d0 | /main.py | 04b1d8f6b5ccbbd5c6728bc9e2bc6a299a67478c | [] | no_license | cihusss/analyzer | e2144eb0b9be89de74e076b46ba38ae8b5aa2828 | 93d23fbc983a8fc45df007b3859db08d975e0ead | refs/heads/master | 2023-01-03T15:03:06.684214 | 2020-10-27T19:37:16 | 2020-10-27T19:37:16 | 297,706,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from flask import Flask, escape, request, render_template
import skugrind
import json
app = Flask(__name__)
class Params:
def __init__(self):
self.category = 'category'
self.product = 'product'
self.zipcode = 'zipcode'
@app.route('/', methods = ['GET', 'POST'])
def home():
if request.form:
print(request.form)
global p
p = Params()
p.category = request.form.get('category')
p.product = request.form.get('product')
p.zipcode = request.form.get('zipcode')
print(f'printing Params class {p.category}')
run()
return render_template("home.html")
sys.exit()
def run():
print ('Im running here`')
@app.route('/skugrind', methods = ['GET', 'POST'])
def scrape():
skugrind.setup()
return 'Successful scrape!'
# return json.dumps(scraper.output_data, indent = 4)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7777, debug=True)
| [
"tmilicevic@triadretail.com"
] | tmilicevic@triadretail.com |
637ed4dde35f2c09ea90b3423775928b81f639db | 815934cc7d4317804bb66632bb9dda7ab590ab7d | /list_of_words.py | 8c48fbc2e4a9e25d40aa2db5057967c3ad9606b4 | [] | no_license | hagom/python4inf | 68671b29c045f4b1ef07c652a7439976b313db70 | af67035558f1798ba53b9db6d13d182c453f6e21 | refs/heads/master | 2023-03-05T14:19:51.187539 | 2021-02-21T15:12:47 | 2021-02-21T15:12:47 | 340,931,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | fname = raw_input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
line = line.rstrip()
line = line.split()
for word in line:
lst.append(word)
lst.sort()
newlst = list()
for words in lst:
if words in newlst: continue
newlst.append(words)
newlst.sort()
print newlst | [
"hgonzalezmata@gmail.com"
] | hgonzalezmata@gmail.com |
c209f65866272c1949e0f237f056f599e2d6ccc6 | 61337408377db0968002acb2ee1fa9afdbc80193 | /containers/core/app/ml_engine/libs/trainer.py | 1ec282cb0bfe004de2a173b370218e55ce275810 | [
"MIT"
] | permissive | pystokes/AERecommend | b3cb3f9ca59fab8e322621590115adf7a12d5589 | 18a7a51dfa554bb5b5b6998760be28718311f321 | refs/heads/master | 2022-12-27T08:46:25.720586 | 2020-10-06T08:27:37 | 2020-10-06T08:27:37 | 293,477,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,661 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from logging import getLogger
import math
import torch
import torch.nn as nn
from torch import optim
from utils.common import CommonUtils
from utils.optimizers import Optimizers
logger = getLogger('ML_ENGINE')
class Trainer(object):
def __init__(self, model, device, config, save_dir):
self.model = model
self.device = device
self.config = config
self.save_dir = save_dir
def run(self, train_loader, validate_loader):
loss_fn = nn.MSELoss()
optimizer = Optimizers.get_optimizer(self.config.train.optimizer, self.model.parameters())
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.config.train.optimizer.T_max)
logger.info('Begin training')
for epoch in range(1, self.config.train.epoch+1):
enable_scheduler = (epoch > self.config.train.optimizer.wait_decay_epoch)
if epoch == self.config.train.optimizer.wait_decay_epoch + 1:
logger.info(f'Enable learning rate scheduler at Epoch: {epoch:05}')
# Warm restart
if enable_scheduler and (epoch % self.config.train.optimizer.T_max == 1):
for param_group in optimizer.param_groups:
param_group['lr'] = self.config.train.optimizer.lr
train_loss = self._train(loss_fn, optimizer, train_loader)
valid_loss = self._validate(loss_fn, validate_loader)
if enable_scheduler:
scheduler.step()
logger.info(f'Epoch [{epoch:05}/{self.config.train.epoch:05}], Loss: {train_loss:.5f}, Val Loss: {valid_loss:.5f}')
if epoch % self.config.train.weight_save_period == 0:
save_path = self.save_dir.joinpath('weights', f'weight-{str(epoch).zfill(5)}_{train_loss:.5f}_{valid_loss:.5f}.pth')
CommonUtils.save_weight(self.model, save_path)
logger.info(f'Saved weight at Epoch : {epoch:05}')
def _train(self, loss_fn, optimizer, train_data_loader):
# Keep track of training loss
train_loss = 0.
# Train the model in each mini-batch
self.model.train()
for mini_batch in train_data_loader:
# Send data to GPU dvice
if self.device.type == 'cuda':
images = mini_batch[1].to(self.device)
else:
images = mini_batch[1]
# Forward
optimizer.zero_grad()
outputs = self.model(images)
loss = loss_fn(outputs, images)
# Backward and update weights
loss.backward()
optimizer.step()
# Update training loss
train_loss += loss.item()
train_loss /= len(train_data_loader.dataset)
return train_loss
def _validate(self, loss_fn, validate_data_loader):
# Keep track of validation loss
valid_loss = 0.0
# Not use gradient for inference
self.model.eval()
with torch.no_grad():
# Validate in each mini-batch
for mini_batch in validate_data_loader:
# Send data to GPU dvice
if self.device.type == 'cuda':
images = mini_batch[1].to(self.device)
else:
images = mini_batch[1]
# Forward
outputs = self.model(images)
loss = loss_fn(outputs, images)
# Update validation loss
valid_loss += loss.item()
valid_loss /= len(validate_data_loader.dataset)
return valid_loss
| [
"ml4forecast@gmail.com"
] | ml4forecast@gmail.com |
9f2f07591921cca072d28477de6b228a3f3ed8bb | 8051d0aef3fa75ccf1101292b63bd30e8718da63 | /Utils.py | 82b85d6dcf6743cd0c312a6e935e75e9e62c0dd1 | [] | no_license | z-jack/SDUSpider | b3cbca03a1dac1f98142cf852fc15bc884c60db4 | 2112c6ac44336adf7d10b515a843e004d6164848 | refs/heads/master | 2021-04-26T22:13:39.154703 | 2018-03-08T07:30:45 | 2018-03-08T07:30:45 | 124,044,509 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,136 | py | from urllib.parse import urlparse
import errno
import sys
import os
ERROR_INVALID_NAME = 123
# validation code from https://stackoverflow.com/questions/9532499
def is_pathname_valid(pathname: str) -> bool:
"""
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True
# If any other exception was raised, this is an unrelated fatal issue
# (e.g., a bug). Permit this exception to unwind the call stack.
#
# Did we mention this should be shipped with Python already?
def is_path_creatable(pathname: str) -> bool:
"""
`True` if the current user has sufficient permissions to create the passed
pathname; `False` otherwise.
"""
# Parent directory of the passed path. If empty, we substitute the current
# working directory (CWD) instead.
dirname = os.path.dirname(pathname) or os.getcwd()
return os.access(dirname, os.W_OK)
def is_path_exists_or_creatable(pathname: str) -> bool:
"""
`True` if the passed pathname is a valid pathname for the current OS _and_
either currently exists or is hypothetically creatable; `False` otherwise.
This function is guaranteed to _never_ raise exceptions.
"""
try:
# To prevent "os" module calls from raising undesirable exceptions on
# invalid pathnames, is_pathname_valid() is explicitly called first.
return is_pathname_valid(pathname) and (
os.path.exists(pathname) or is_path_creatable(pathname))
# Report failure on non-fatal filesystem complaints (e.g., connection
# timeouts, permissions issues) implying this path to be inaccessible. All
# other exceptions are unrelated fatal issues and should not be caught here.
except OSError:
return False
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except:
return False
| [
"emailjiong@126.com"
] | emailjiong@126.com |
e1eeeb4c21fbd250dc62e07c598b2cc0c465f402 | 9aaad4cff922e067118644fe1234435d83320335 | /chatbot_pre.py | 422b1d794bb44b52656be59defb4b3e49cf95fad | [] | no_license | JangYoungJune/ssafy_s2_bootcamp_CUagain | 1a10997e40574fff3255fa8db0dbb8bd44205d6e | ed31b62b6c31b7b8d226e2e82aea134117d0c14e | refs/heads/master | 2020-06-18T22:43:19.722764 | 2019-07-12T00:10:23 | 2019-07-12T00:10:23 | 196,480,001 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,458 | py | from flask import Flask
from slack import WebClient
from slackeventsapi import SlackEventAdapter
import csv
#input slack token and signing_secret
SLACK_TOKEN = ''
SLACK_SIGNING_SECRET = ''
app = Flask(__name__)
pre_request_user_list = []
# /listening 으로 슬랙 이벤트를 받습니다.
slack_events_adaptor = SlackEventAdapter(SLACK_SIGNING_SECRET, "/listening", app)
slack_web_client = WebClient(token=SLACK_TOKEN)
filename_list_cubest_easy = 'easy5_list.csv'
filename_list_cubest_jeasy = 'jeasy5_list.csv'
filename_list_cubest_snack = 'snack5_list.csv'
filename_list_cubest_ice = 'ice5_list.csv'
filename_list_cubest_food = 'food5_list.csv'
filename_list_cubest_drink = 'drink5_list.csv'
filename_list_pbgoods = 'favor_pb_goods_list.csv'
filename_list_plus_event = 'plus_event_list.csv'
def get_text_from_file(filename):
with open('C:/Users/student/Desktop/CU/'+filename) as file:
reader = csv.reader(file, delimiter='|')
list_=[]
for row in reader:
title = row[1]
price = row[2]
list_.append(title + ':' + price + '원')
return '\n'.join(list_)
def get_list_from_file(filename):
list_ = []
with open('C:/Users/student/Desktop/CU/'+filename,'r',encoding='UTF-8') as file:
reader = csv.reader(file, delimiter='|',quotechar='"')
for row in reader:
if row:
list_.append([row[0],row[1],row[2]])
return list_
@slack_events_adaptor.on("app_mention")
def app_mentioneevent_datad(event_data):
channel = event_data["event"]["channel"]
text = event_data["event"]["text"]
user = event_data["event"]["user"]
event_ts = event_data["event"]["ts"]
unique_user = {'channel':channel,
'user': user,
'event_ts': float(event_ts),
'request':'NONE'}
keywords = ""
#2분 넘어가면 지워지게 처리하는 로직
for pre_request_user in range(len(pre_request_user_list)):
if float(event_ts) - float(pre_request_user_list[pre_request_user]["event_ts"]) >= 120:
del pre_request_user_list[pre_request_user]
# 최근 기록에 있는지 확인하는 로직
pre_request_user = list(filter(lambda a:a['channel']==channel and a['user']==user, pre_request_user_list))
#분기처리 > 텍스트 처리
if len(pre_request_user) > 0 :
now_dic = pre_request_user[0]
text_after = text[text.index('>')+1:].strip()
if now_dic["request"].startswith("CUBEST") :
list_in_CUBEST = ["간편식사","즉석조리","과자류","아이스크림","식품","음료"]
if text_after in list_in_CUBEST:
if text_after == "간편식사":
keywords = "간편식사 베스트 5:\n"
list_cubest_easyeat = get_text_from_file(filename_list_cubest_easy)
keywords += list_cubest_easyeat
now_dic["request"] = "NONE"
elif text_after == "즉석조리":
keywords = "즉석조리 베스트 5:\n"
list_cubest_instant = get_text_from_file(filename_list_cubest_jeasy)
keywords += list_cubest_instant
now_dic["request"] = "NONE"
elif text_after == "과자류":
keywords = "과자류 베스트 5:\n"
list_cubest_snack = get_text_from_file(filename_list_cubest_snack)
keywords += list_cubest_snack
now_dic["request"] = "NONE"
elif text_after == "아이스크림":
keywords = "아이스크림 베스트 5:\n"
list_cubest_icecream = get_text_from_file(filename_list_cubest_ice)
keywords += list_cubest_icecream
now_dic["request"] = "NONE"
elif text_after == "식품":
keywords = "식품 베스트 5:\n"
list_cubest_food = get_text_from_file(filename_list_cubest_food)
keywords += list_cubest_food
now_dic["request"] = "NONE"
elif text_after == "음료":
keywords = "음료 베스트 5:\n"
list_cubest_drink = get_text_from_file(filename_list_cubest_drink)
keywords += list_cubest_drink
now_dic["request"] = "NONE"
else:
keywords = '["간편식사"/"즉석조리"/"과자류"/"아이스크림"/"식품"/"음료"] 카테고리를 입력해 CU의 베스트상품을 확인하세요!'
elif now_dic["request"].startswith("PLUS"):
if now_dic["request"] == "PLUS":
list_in_PLUS = ["1+1","2+1","3+1"]
if text_after == list_in_PLUS[0]:
keywords = '"1+1"를 입력했습니다! [리스트/검색:원하는검색명]을 통해 1+1제품을 확인하세요!'
now_dic["request"] += "BUYONE"
elif text_after == list_in_PLUS[1]:
keywords = '"2+1"를 입력했습니다! [리스트/검색:원하는검색명]을 통해 2+1제품을 확인하세요!'
now_dic["request"] += "BUYTWO"
elif text_after == list_in_PLUS[2]:
keywords = '"3+1"를 입력했습니다! [리스트/검색:원하는검색명]을 통해 3+1제품을 확인하세요!'
now_dic["request"] += "BUYTHREE"
else:
keywords = '["1+1"/"2+1"/"3+1"]로 원하는 플러스 상품을 확인하세요!'
print()
elif "BUYONE" in now_dic["request"]:
if "검색" in text_after:
search_keyword = text_after.split(':')[1]
keywords = "1+1상품 검색 결과입니다."
list_buyone_list = get_list_from_file(filename_list_plus_event)
list_buyone_search_list = [(n[0] + ":" + n[1] + "원") for n in list_buyone_list if '1+1' == n[2] and search_keyword in n[0]]
list_buyone_search_text = '\n' + '\n'.join(list_buyone_search_list)
keywords += list_buyone_search_text
now_dic["request"] = "NONE"
elif text_after in "리스트":
keywords = "1+1 상품 리스트 결과입니다."
list_buyone_list = get_list_from_file(filename_list_plus_event)
list_buyone_list_list = [(n[0] + ":" + n[1] + "원") for n in list_buyone_list if '1+1' == n[2]]
list_buyone_list_text = '\n' + '\n'.join(list_buyone_list_list)
keywords += list_buyone_list_text
now_dic["request"] = "NONE"
else:
keywords = '"리스트" / "검색:원하는검색명"로 1+1상품을 확인하세요!'
elif "BUYTWO" in now_dic["request"]:
if "검색" in text_after:
search_keyword = text_after.split(':')[1]
keywords = "2+1상품 검색 결과입니다."
list_buytwo_list = get_list_from_file(filename_list_plus_event)
list_buytwo_search_list = [(n[0] + ":" + n[1] + "원") for n in list_buytwo_list if '2+1' == n[2] and search_keyword in n[0]]
list_buytwo_search_text = '\n' + '\n'.join(list_buytwo_search_list)
keywords += list_buytwo_search_text
now_dic["request"] = "NONE"
elif text_after in "리스트":
keywords = "2+1 상품 리스트 결과입니다."
list_buytwo_list = get_list_from_file(filename_list_plus_event)
list_buytwo_list_list = [(n[0] + ":" + n[1] + "원") for n in list_buytwo_list if '2+1' == n[2]]
list_buytwo_list_text = '\n' + '\n'.join(list_buytwo_list_list)
keywords += list_buytwo_list_text
now_dic["request"] = "NONE"
else:
keywords = '"리스트" / "검색:원하는검색명"로 2+1상품을 확인하세요!'
elif "BUYTHREE" in now_dic["request"]:
if "검색" in text_after:
search_keyword = text_after.split(':')[1]
keywords = "3+1상품 검색 결과입니다."
list_buythree_list = get_list_from_file(filename_list_plus_event)
list_buythree_search_list = [(n[0] + ":" + n[1] + "원") for n in list_buythree_list if '3+1' == n[2] and search_keyword in n[0]]
list_buythree_search_text = '\n' + '\n'.join(list_buythree_search_list)
keywords += list_buythree_search_text
now_dic["request"] = "NONE"
elif text_after in "리스트":
keywords = "3+1 상품 리스트 결과입니다."
list_buythree_list = get_list_from_file(filename_list_plus_event)
list_buythree_list_list = [(n[0] + ":" + n[1] + "원") for n in list_buythree_list if '3+1' == n[2]]
list_buythree_list_text = '\n' + '\n'.join(list_buythree_list_list)
keywords += list_buythree_list_text
now_dic["request"] = "NONE"
else:
keywords = '"리스트" / "검색:원하는검색명"로 3+1상품을 확인하세요!'
elif now_dic["request"].startswith("PBGOODS"):
if "검색" in text_after:
search_keyword = text_after.split(':')[1]
keywords = "PB상품 검색 결과입니다."
list_pbgoods_search = get_list_from_file(filename_list_pbgoods)
list_pbgoods_search_pb = [(n[1] + ":" + n[2] + "원") for n in list_pbgoods_search if 'PB_PB' == n[0] and search_keyword in n[1]]
list_pbgoods_search_diff = [(n[1] + ":" + n[2] + "원") for n in list_pbgoods_search if 'PB_diff' == n[0] and search_keyword in n[1]]
list_pbgoods_search = '\n' + '\n'.join(list_pbgoods_search_pb) + '\n' + '\n'.join(list_pbgoods_search_diff)
keywords += list_pbgoods_search
now_dic["request"] = "NONE"
elif text_after in "인기":
keywords = "PB상품 인기 BEST 결과입니다."
list_pbgoods_best = get_list_from_file(filename_list_pbgoods)
list_pbgoods_best_pb = [(n[1]+":"+n[2]+"원") for n in list_pbgoods_best if 'PB_PB'==n[0]][:5]
list_pbgoods_best_diff = [(n[1]+":"+n[2]+"원") for n in list_pbgoods_best if 'PB_diff'==n[0]][:5]
list_pbgoods_best = '\n'+'\n'.join(list_pbgoods_best_pb)+'\n'+'\n'.join(list_pbgoods_best_diff)
keywords += list_pbgoods_best
now_dic["request"] = "NONE"
else:
keywords = '"인기" / "검색:원하는검색명"로 PB상품을 확인하세요!'
else:
list_in_NONE = ["CU베스트","플러스행사","PB상품"]
if text_after in list_in_NONE:
if text_after == list_in_NONE[0]:
keywords = '"CU베스트"를 입력했습니다! 카테고리를 골라주세요 [간편식사/즉석조리/과자류/아이스크림/식품/음료]'
now_dic["request"] = "CUBEST"
elif text_after == list_in_NONE[1]:
keywords = '"플러스행사"를 입력했습니다! 카테고리를 골라주세요 [1+1/2+1/3+1]'
now_dic["request"] = "PLUS"
elif text_after == list_in_NONE[2]:
keywords = '"PB상품"를 입력했습니다! 카테고리를 골라주세요 [인기 / 검색:원하는검색명]'
now_dic["request"] = "PBGOODS"
else:
keywords = '"CU베스트 / 플러스행사 / PB상품"을 입력해주세요!'
now_dic["event_ts"] = float(event_ts)
else:
# 처음 오신분들 / 아직 처음도 안물어본 사람들
keywords = '안녕하세요 cu입니다!("CU베스트 / 플러스행사 / PB상품"을 입력해주세요!)'
pre_request_user_list.append(unique_user)
slack_web_client.chat_postMessage(
channel=channel,
text=keywords
)
if __name__ == '__main__':
app.run('127.0.0.1', port=4040)
| [
"cyj9212@gmail.com"
] | cyj9212@gmail.com |
6c97df099274970c06b328c0561e696fe4c99d3c | d01ccb00377f68b3268cd9b132203a002c1ca7bb | /pythoncentral.io/exam7-2.py | 325dded1cc1ba7719b51cadaa87c25cb8f882c7a | [] | no_license | hrsano645/pyside_practice | c327c594171565d63db429881501423c7c02e260 | a895119aa888c3a1bde219fd59b52f24bd428f32 | refs/heads/master | 2021-01-10T10:09:34.663574 | 2015-10-15T07:15:00 | 2015-10-15T07:15:00 | 44,090,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | # coding: utf-8
from __future__ import division, print_function, absolute_import, unicode_literals
__author__ = 'hiroshi'
import sys
from PySide.QtGui import *
def main():
# Create a Qt application
app = QApplication(sys.argv)
# Our main window will be a QListView
list = QListView()
list.setWindowTitle('Honey-Do List')
list.setMinimumSize(600, 400)
# Create an empty model for the list's data
model = QStandardItemModel(list)
# Add some textual items
foods = [
'Cookie dough', # Must be store-bought
'Hummus', # Must be homemade
'Spaghetti', # Must be saucy
'Dal makhani', # Must be spicy
'Chocolate whipped cream' # Must be plentiful
]
for food in foods:
# Create an item with a caption
item = QStandardItem(food)
# Add a checkbox to it
item.setCheckable(True)
# Add the item to the model
model.appendRow(item)
def on_item_changed(item):
# If the changed item is not checked, don't bother checking others
if not item.checkState():
return
# Loop through the items until you get None, which
# means you've passed the end of the list
i = 0
while model.item(i):
if not model.item(i).checkState():
return
i += 1
app.quit()
model.itemChanged.connect(on_item_changed)
# Apply the model to the list view
list.setModel(model)
# Show the window and run the app
list.show()
app.exec_()
if __name__ == '__main__':
main()
| [
"hrs.sano645@gmail.com"
] | hrs.sano645@gmail.com |
7d4fff4b0e5e0bb4d6d5e641e95456b7d4777456 | 5150675cf3df057aed75b4fa61d2e45bdd306aff | /2018-07-23_Run0055.py | 0e72474ff927658524dff98fed41785d177ccba8 | [] | no_license | Dan-Schaefer/IPP-Neural-Networks | 601a8e2e322a2437b7b90674ff5dc9577b5d7819 | 7f6dd09469f8c560072ba399114265a21c5de7c6 | refs/heads/master | 2021-10-20T03:58:13.105216 | 2019-02-25T16:08:38 | 2019-02-25T16:08:38 | 121,515,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | py | #! /usr/bin/env python
'''
Trains 7D QuaLiKiz-NN with a single output (efiTG)
'''
from __future__ import print_function
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.optimizers import RMSprop, adam, Adam
from keras.initializers import TruncatedNormal, glorot_normal
from keras import regularizers
from keras import backend as K
import pandas
import numpy
import sys
import os
keras.backend.clear_session()
# Define new Metric: rmse = Root Mean Square Error
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_true-y_pred )))
# Gets the current file name. Useful for procedurally generating output/log files.
file_name = os.path.basename(sys.argv[0][:-3])
# Define neural network parameters
batch_size = 10
#num_classes = 1
epochs = 100
# Load Data (which is in HDF5 or .h5 format)
store = pandas.HDFStore("unstable_training_gen3_7D_nions0_flat_filter8.h5")
target_df = store['/output/efeETG_GB'].to_frame() # This one is relatively easy to train
input_df = store['input']
# Puts inputs and outputs in the same pandas dataframe.
# Also only keeps overlapping entries.
joined_dataFrame = target_df.join(input_df)
# Normalize data by standard deviation and mean-centering the data
joined_dataFrame['efeETG_GB'] = (joined_dataFrame['efeETG_GB'] - joined_dataFrame['efeETG_GB'].mean()) / joined_dataFrame['efeETG_GB'].std()
joined_dataFrame['Ati'] = (joined_dataFrame['Ati'] - joined_dataFrame['Ati'].mean()) / joined_dataFrame['Ati'].std()
joined_dataFrame['Ate'] = (joined_dataFrame['Ate'] - joined_dataFrame['Ate'].mean()) / joined_dataFrame['Ate'].std()
joined_dataFrame['An'] = (joined_dataFrame['An'] - joined_dataFrame['An'].mean()) / joined_dataFrame['An'].std()
joined_dataFrame['q'] = (joined_dataFrame['q'] - joined_dataFrame['q'].mean()) / joined_dataFrame['q'].std()
joined_dataFrame['smag'] = (joined_dataFrame['smag'] - joined_dataFrame['smag'].mean()) / joined_dataFrame['smag'].std()
joined_dataFrame['x'] = (joined_dataFrame['x'] - joined_dataFrame['x'].mean()) / joined_dataFrame['x'].std()
joined_dataFrame['Ti_Te'] = (joined_dataFrame['Ti_Te'] - joined_dataFrame['Ti_Te'].mean()) / joined_dataFrame['Ti_Te'].std()
# Shuffles dataset
shuffled_joined_dataFrame = joined_dataFrame.reindex(numpy.random.permutation(
joined_dataFrame.index))
# Creates a pandas dataframe for the outputs
shuffled_clean_output_df = shuffled_joined_dataFrame['efeETG_GB']
# Creates a pandas dataframe for the inputs
shuffled_clean_input_df = shuffled_joined_dataFrame.drop('efeETG_GB', axis=1)
# Creates training dataset (90% of total data) for outputs
y_train = shuffled_clean_output_df.iloc[:int(
numpy.round(len(shuffled_clean_output_df)*0.9))]
# Creates training dataset (90% of total data) for inputs
x_train = shuffled_clean_input_df.iloc[:int(
numpy.round(len(shuffled_clean_input_df)*0.9))]
# Creates testing dataset (10% of total data) for outputs
y_test = shuffled_clean_output_df.iloc[int(
numpy.round(len(shuffled_clean_output_df)*0.9)):]
# Creates testing dataset (10% of total data) for inputs
x_test = shuffled_clean_input_df.iloc[int(
numpy.round(len(shuffled_clean_input_df)*0.9)):]
# Deletes pandas dataframes that are no longer needed
del target_df, input_df
# Closes the HDFStore. This is good practice.
store.close()
'''
# Layers (FYI)
Input_Layer = 7
Hidden_Layer_1 = 30
Hidden_Layer_2 = 30
Output_Layer = 1
'''
# Define neural network
model = Sequential()
model.add(Dense(30,
input_shape=(7,),
activation='tanh',
kernel_initializer='glorot_normal',
kernel_regularizer=regularizers.l2(0.00000001),
use_bias=True, bias_initializer='glorot_normal'))
model.add(Dense(30,
activation='tanh',
kernel_initializer='glorot_normal',
kernel_regularizer=regularizers.l2(0.00000001),
use_bias=True, bias_initializer='glorot_normal'))
model.add(Dense(1,
activation='linear'))
model.summary()
model.compile(loss='mean_squared_error', #categorical_crossentropy
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
metrics=['accuracy', "mae", "mean_squared_error", rmse])
# Add CallBacks (including TensorBoard)
tbCallBack = keras.callbacks.TensorBoard(
log_dir='TensorBoard_logs/' + str(file_name), write_graph = False, write_images=False, write_grads=False, histogram_freq=5)
EarlyStoppingCallBack = keras.callbacks.EarlyStopping(
monitor='val_rmse', min_delta=0, patience=15, verbose=0, mode='auto')
history = model.fit(x = x_train,
y = y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=2,
validation_data=(x_test, y_test),
callbacks=[tbCallBack, EarlyStoppingCallBack])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print("score")
print(score)
print("model.metrics_names")
print(model.metrics_names)
# Create output file
OutputFile = open("./Loss-Values/" +str(file_name) +".txt", "w+")
OutputFile.write("Test loss: " + str(score[0]) + "\n")
OutputFile.write("Test accuracy: " + str(score[1]) + "\n")
OutputFile.write("val_mean_absolute_error: " +str(score[2]) + "\n")
OutputFile.write("val_mean_squared_error: " +str(score[3]) + "\n")
OutputFile.write("RMSE: " +str(score[4]) + "\n")
OutputFile.close()
# creates a HDF5 file 'my_model.h5'
model.save("./Saved-Networks/" + str(file_name) +".h5")
del history
del model
| [
"danielschaefer01@gmail.com"
] | danielschaefer01@gmail.com |
e7a6da8b913047441c8ecbd61af44920ea566c1b | 95d20fe737d711cf92d68130eb59b6aef4435ec2 | /pyecharts数据可视化/中国国内生产总值.py | 20106c170698be17ce82f33a10baa3719363738d | [] | no_license | CCH21/Python | d11b93851579d85f972828c760a96c5be1f79531 | 33e218810856971f3f1f97a2b8a4c8dce761362e | refs/heads/master | 2022-04-29T11:48:01.816283 | 2022-03-17T11:53:01 | 2022-03-17T11:53:01 | 226,452,057 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | #!/usr/bin/env python3
import csv
from pyecharts import Line
Quarter = []
GDP = []
Primary_industry = []
Secondary_industry = []
Tertiary_industry = []
with open('中国国内生产总值.csv', 'r', newline='') as csv_in_file:
filereader = csv.reader(csv_in_file)
head = next(filereader)
for row_list in filereader:
Quarter.append(row_list[0])
gdp = round(eval(row_list[2][:-1]) / 100, 3)
GDP.append(gdp)
pri = round(eval(row_list[4][:-1]) / 100, 3)
Primary_industry.append(pri)
sec = round(eval(row_list[6][:-1]) / 100, 3)
Secondary_industry.append(sec)
ter = round(eval(row_list[8][:-1]) / 100, 3)
Tertiary_industry.append(ter)
Quarter = Quarter[::-1]
GDP = GDP[::-1]
Primary_industry = Primary_industry[::-1]
Secondary_industry = Secondary_industry[::-1]
Tertiary_industry = Tertiary_industry[::-1]
line = Line('中国国内生产总值同比增长率', '时间:2006年第1季度-2020年第1季度 数据来源:东方财富网', width=1280, height=720)
line.add('国内生产总值', Quarter, GDP, is_smooth=False, mark_point=['max'], mark_line=['average'], legend_pos='right')
line.add('第一产业', Quarter, Primary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.add('第二产业', Quarter, Secondary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.add('第三产业', Quarter, Tertiary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.render('中国国内生产总值.html')
| [
"1398635912@qq.com"
] | 1398635912@qq.com |
8e5b226df11a9d0ac8efbc98100db75c96cb6160 | 887ca3020218909e25b9fa07abc9e95065dc0e9b | /Ecommerce/settings.py | 285b8a9fa4ac3570e83c942e7d28d2d3820790e7 | [] | no_license | Abhi09jeet2000/ecommerceFAM | e683734fa62268f3298b11e6999b0930bffbe92a | f713c4911fcfe23a068a7b5089587df8f3c371af | refs/heads/master | 2022-11-14T10:01:08.032094 | 2020-06-19T06:42:47 | 2020-06-19T06:42:47 | 272,952,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,487 | py | """
Django settings for Ecommerce project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+(o*nm)413@zm$2e7a38-yteu$oc$7*zcp&lijq@^wtjtg%=_o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'shop.apps.ShopConfig',
'seller.apps.SellerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['Ecommerce/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
#Managing Media
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
NEW_MEDIA_ROOT = os.path.join(BASE_DIR, 'media1')
NEW_MEDIA_URL = '/media1/' | [
"noreply@github.com"
] | Abhi09jeet2000.noreply@github.com |
169846b938321c8dbce20033e51e9b4ee75fa83b | fad91cfb3adc96f27f8502dc20831526bce7bd64 | /simpleaddition.py | 4cb42e8bae71ec71963f6915d4a247cbe0428d7c | [] | no_license | silvouette/kattis_solutions | a452842eda75a3e22c2c43e943b4a04f8b318e19 | 47e93b86ac8bf97997e5b2f44c80731848da83f8 | refs/heads/main | 2023-05-16T08:29:53.583357 | 2021-06-11T05:00:50 | 2021-06-11T05:00:50 | 375,909,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | int1 = int(input())
int2 = int(input())
print(int1+int2) | [
"noreply@github.com"
] | silvouette.noreply@github.com |
178f1b1b9e71512abf2e9e3f7fc910ec5d1d377f | 5bd19438a14764fb2af14d3212d0f2e0633909c2 | /app.py | 33d8980f73a209a19a6a2759413c6bd23c7d5578 | [] | no_license | tjf91/flask-tutorial | 54b2629331895af77471b1bc38c3cfe3c3daadae | de20a238ce7facd01494fc9fabbbd1eb87c65193 | refs/heads/master | 2023-03-09T09:23:29.455619 | 2021-02-22T01:03:54 | 2021-02-22T01:03:54 | 341,033,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from flask import Flask,render_template, jsonify
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config.from_object('config.Config')
@app.route("/")
def home():
print(app.config)
return render_template('index.html', name= 'James')
@app.route('/api/hello')
def hello():
return jsonify({"response":"hello"})
@app.route('/api/<id>')
def employee_details(id):
return render_template('index.html', id=id)
app.run() | [
"tudorjflorea@gmail.com"
] | tudorjflorea@gmail.com |
52c0334887c6155097602a647cc48677621d9129 | 6fece37ca627612fcf9d259d4842e9cdcaf8f0e5 | /model.py | 05a77d51301e1d7e9805fdfe3669646168b09ed8 | [] | no_license | interruping/nine5mok | df93dc832534bc8721707928df2a6251fcee8dcf | bc216e71815e6f02f40fdba45326f1a51c470c73 | refs/heads/master | 2020-04-15T00:07:26.873936 | 2019-01-06T00:56:08 | 2019-01-06T00:56:08 | 164,229,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,410 | py | import tensorflow as tf
import numpy as np
from tqdm import tqdm, trange
from operator import itemgetter
from batchup import data_source
import cv2
class Nine5MokModel(object):
def __init__(self, sess, imgsize, imgchannel):
if len(imgsize) is not 2 and type(imgsize) is not tuple:
raise ValueError('Invalid image size. imgsize param must be tuple that has 2 length')
if type(imgchannel) is not int:
raise ValueError('Invalid image channel, imgchannel param must be interger type value')
self._input_width = imgsize[0]
self._input_height = imgsize[1]
self._BOARD_SIZE = 9
self._sess = sess
self._global_step = tf.Variable(0, name='global_step', trainable=True)
self._input = tf.placeholder(tf.float32, shape=(None, self._input_width, self._input_height, imgchannel), name='imgs_input')
self._is_train = tf.placeholder_with_default(False, shape=(),name='is_train')
self._labels = tf.placeholder(tf.float32, shape=(None, self._BOARD_SIZE, self._BOARD_SIZE, 2), name='imgs_label')
self._learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')
# 120, 90 -> 60, 45
x = self._inception_conv(self._input, 16)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
# 60, 45 -> 30, 22
x = self._inception_conv(x, 32)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
# 30, 22 -> 15, 11
x = self._inception_conv(x, 64)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
# 15, 11 -> 7, 5
x = self._inception_conv(x, 128)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
# 7, 5 -> 3, 2
x = self._inception_conv(x, 256)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
# 3, 2 -> 1, 1
x = self._inception_conv(x, 512)
x = tf.layers.max_pooling2d(x, [2, 2], strides=2)
x = tf.reshape(x, shape=(-1, 512))
x = tf.layers.dense(x, 9 * 9 * 2, activation=tf.nn.sigmoid)
x = tf.reshape(x, shape=(-1, 9, 9, 2))
self._logits = x
self._loss_op = tf.losses.mean_squared_error(self._labels, self._logits, reduction=tf.losses.Reduction.MEAN)
optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self._train_op = optimizer.minimize(loss=self._loss_op, global_step=self._global_step)
acc, self._accuracy_op = tf.metrics.accuracy(labels=self._labels, predictions=tf.round(self._logits), name='accuracy_operation')
self._loss_summary = tf.summary.scalar('loss', self._loss_op)
self._accuracy_summary = tf.summary.scalar('accuracy', self._accuracy_op)
self._train_summary_writer = tf.summary.FileWriter('./logs/tain', graph_def=sess.graph_def)
self._validation_summary_writer = tf.summary.FileWriter('./logs/validation', graph_def=sess.graph_def)
self._hook_every_n = 0
self._hook = None
self._saver = tf.train.Saver(max_to_keep=100)
def _residual_conv(self, input_tensor, output_channel):
block_input = tf.layers.conv2d(input_tensor, output_channel, [3, 3], strides=1, padding='SAME')
x = tf.nn.leaky_relu(block_input, alpha=0.1)
x = tf.layers.batch_normalization(x, training=self._is_train)
block_input = tf.layers.batch_normalization(block_input, training=self._is_train)
# conv_block
x = tf.layers.conv2d(x, output_channel, [3, 3], padding='SAME')
x = tf.layers.batch_normalization(x, training=self._is_train)
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, output_channel, [3, 3], padding='SAME')
x = tf.layers.batch_normalization(x, training=self._is_train)
return tf.add(x, block_input)
def _inception_conv(self, input_tensor, output_channel):
if output_channel % 4 is not 0:
raise ValueError('output_channel must be multiples of 4.')
one_channel = output_channel // 4
## first pipe line
first1x1conv = tf.layers.conv2d(input_tensor, one_channel, [1, 1], strides=1, padding='SAME')
first1x1conv = tf.nn.leaky_relu(first1x1conv, alpha=0.1)
first1x3conv = tf.layers.conv2d(first1x1conv, one_channel, [1, 3], strides=1, padding='SAME')
first1x3conv = tf.nn.leaky_relu(first1x3conv, alpha=0.1)
first3x1conv = tf.layers.conv2d(first1x3conv, one_channel, [3, 1], strides=1, padding='SAME')
first3x1conv = tf.nn.leaky_relu(first3x1conv, alpha=0.1)
first1x3conv2 = tf.layers.conv2d(first3x1conv, one_channel, [1, 3], strides=1, padding='SAME')
first1x3conv2 = tf.nn.leaky_relu(first1x3conv2, alpha=0.1)
first3x1conv2 = tf.layers.conv2d(first1x3conv2, one_channel, [3, 1], strides=1, padding='SAME')
first3x1conv2 = tf.nn.leaky_relu(first3x1conv2, alpha=0.1)
first_output = first3x1conv2
## second pipe line
second1x1conv = tf.layers.conv2d(input_tensor, one_channel, [1, 1], strides=1, padding='SAME')
second1x1conv = tf.nn.leaky_relu(second1x1conv, alpha=0.1)
second1x3conv = tf.layers.conv2d(second1x1conv, one_channel, [1, 3], strides=1, padding='SAME')
second1x3conv = tf.nn.leaky_relu(second1x3conv, alpha=0.1)
second3x1conv = tf.layers.conv2d(second1x3conv, one_channel, [3, 1], strides=1, padding='SAME')
second3x1conv = tf.nn.leaky_relu(second3x1conv, alpha=0.1)
second_output = second3x1conv
## third pipe line
third_pool = tf.layers.max_pooling2d(input_tensor, [3, 3], strides=1, padding='SAME')
third1x1conv = tf.layers.conv2d(third_pool, one_channel, [1, 1], strides=1, padding='SAME')
third1x1conv = tf.nn.leaky_relu(third1x1conv, alpha=0.1)
third_output = third1x1conv
## fourth pipe line
fourth1x1conv = tf.layers.conv2d(input_tensor, one_channel, [1, 1], strides=1, padding='SAME')
fourth1x1conv = tf.nn.leaky_relu(fourth1x1conv, alpha=0.1)
fourth_output = fourth1x1conv
return tf.concat([first_output, second_output, third_output, fourth_output], axis=-1)
def set_train_hook(self, every_n, hook):
self._hook = hook
self._hook_every_n = every_n
def minibatch_preprocessing(self, minibatch_filenames):
BASE_PATH = './input/train50k_s/'
img_for_stacking = []
for filename in minibatch_filenames:
img = cv2.imread(BASE_PATH + filename, 0)
img = np.reshape(img, (90, 120, 1))
img_for_stacking.append(np.transpose(img, axes=[1, 0, 2]))
return np.stack(img_for_stacking, axis=0)
def train(self, input_filenames, labels, batch_size, learning_rate):
input_filename_idxs = np.array([i for i in range(0, len(input_filenames))])
train_ds = data_source.ArrayDataSource([input_filename_idxs, labels])
train_batch_iter = train_ds.batch_iterator(batch_size=batch_size)
train_batch_total = len(input_filenames) // batch_size if len(input_filenames) % batch_size == 0 else len(input_filenames) // batch_size + 1
epoch_train_loss = []
epoch_train_accuracy = []
train_batch_tqdm = tqdm(train_batch_iter, total=train_batch_total)
for train_step, [minibatch_filename_idxs, minibatch_train_labels] in enumerate(train_batch_tqdm):
train_batch_tqdm.set_description('training...')
minibatch_filenames = itemgetter(*minibatch_filename_idxs)(input_filenames)
minibatch_files = self.minibatch_preprocessing(minibatch_filenames)
train_feed = {
self._input: minibatch_files,
self._labels: minibatch_train_labels,
self._learning_rate: learning_rate,
self._is_train: True,
}
minibatch_loss, loss_summary, _ = \
self._sess.run([self._loss_op, self._loss_summary , self._train_op], feed_dict=train_feed)
train_feed = {
self._input: minibatch_files,
self._labels: minibatch_train_labels,
}
minibatch_accuracy, accuracy_summary = self._sess.run([self._accuracy_op, self._accuracy_summary], feed_dict=train_feed)
epoch_train_loss.append(minibatch_loss)
epoch_train_accuracy.append(minibatch_accuracy)
global_step = self._sess.run(self._global_step)
self._train_summary_writer.add_summary(loss_summary, global_step=global_step)
self._train_summary_writer.flush()
self._train_summary_writer.add_summary(accuracy_summary, global_step=global_step)
self._train_summary_writer.flush()
train_batch_tqdm.set_postfix(minibatch_loss=minibatch_loss, minibatch_accuracy=minibatch_accuracy)
if self._hook and global_step % self._hook_every_n == 0:
self._hook()
epoch_train_loss = np.mean(epoch_train_loss)
epoch_train_accuracy = np.mean(epoch_train_accuracy)
return epoch_train_loss, epoch_train_accuracy
def validate(self, input_filenames, labels, batch_size):
input_filename_idxs = np.array([i for i in range(0, len(input_filenames))])
valid_ds = data_source.ArrayDataSource([input_filename_idxs, labels])
valid_batch_iter = valid_ds.batch_iterator(batch_size=batch_size)
valid_batch_total = len(input_filenames) // batch_size if len(input_filenames) % batch_size == 0 else len(input_filenames) // batch_size + 1
epoch_valid_loss = []
epoch_valid_accuracy = []
valid_batch_tqdm = tqdm(valid_batch_iter, total=valid_batch_total)
for valid_step, [minibatch_valid_filename_idxs, minibatch_valid_labels] in enumerate(valid_batch_tqdm):
valid_batch_tqdm.set_description('validating...')
minibatch_valid_filenames = itemgetter(*minibatch_valid_filename_idxs)(input_filenames)
minibatch_valid_files = self.minibatch_preprocessing(minibatch_valid_filenames)
valid_feed = {
self._input: minibatch_valid_files,
self._labels: minibatch_valid_labels,
}
minibatch_loss, minibatch_accuracy = \
self._sess.run([self._loss_op, self._accuracy_op], feed_dict=valid_feed)
epoch_valid_loss.append(minibatch_loss)
epoch_valid_accuracy.append(minibatch_accuracy)
valid_batch_tqdm.set_postfix(minibatch_loss=minibatch_loss, minibatch_accuracy=minibatch_accuracy)
epoch_valid_loss = np.mean(epoch_valid_loss)
epoch_valid_accuracy = np.mean(epoch_valid_accuracy)
global_step = self._sess.run(self._global_step)
valid_loss_summary = tf.Summary()
valid_loss_summary.value.add(tag="loss", simple_value=epoch_valid_loss)
valid_loss_summary.value.add(tag="accuracy", simple_value=epoch_valid_accuracy)
self._validation_summary_writer.add_summary(valid_loss_summary, global_step=global_step)
self._validation_summary_writer.flush()
return epoch_valid_loss, epoch_valid_accuracy
def predict(self, input_filenames, batch_size):
input_filename_idxs = np.array([i for i in range(0, len(input_filenames))])
predict_ds = data_source.ArrayDataSource([input_filename_idxs])
predict_batch_iter = predict_ds.batch_iterator(batch_size=batch_size)
predict_batch_total = len(input_filenames) // batch_size if len(input_filenames) % batch_size == 0 else len(input_filenames) // batch_size + 1
epoch_logits = []
predict_batch_tqdm = tqdm(predict_batch_iter, total=predict_batch_total)
for predict_step , [minibatch_predict_filename_idxs] in enumerate(predict_batch_tqdm):
predict_batch_tqdm.set_description('predicting...')
minibatch_predict_filenames = itemgetter(*minibatch_predict_filename_idxs)(input_filenames)
minibatch_predict_files = self.minibatch_preprocessing(minibatch_predict_filenames)
logit_feed = {
self._input: minibatch_predict_files,
}
minibatch_logits = self._sess.run(self._logits , feed_dict=logit_feed)
epoch_logits.append(minibatch_logits)
return np.concatenate(epoch_logits, axis=0)
def save(self):
self._saver.save(self._sess, './model/nine5mok', global_step=self._global_step, write_meta_graph=False)
def restore(self, checkpoint):
self._saver.restore(self._sess, checkpoint)
def restore_latest(self):
latest_model = tf.train.latest_checkpoint('./model/')
if not latest_model:
self._sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
else:
self._saver.restore(self._sess, latest_model)
self._sess.run(tf.local_variables_initializer())
if __name__ == '__main__':
sess = tf.InteractiveSession()
Nine5MokModel(sess, (120, 90), 1)
| [
"interruping4dev@gmail.com"
] | interruping4dev@gmail.com |
787a125204b197d0d2287a4821f214e7514697d8 | 37a0d59dbc334291b205e05a6ce881acb839e41f | /example/example_model.py | 800b6834af6a2a385ad525665b56467b91335b1a | [
"Apache-2.0"
] | permissive | hoanghieu/vertabelo-sqlalchemy | c5ed3d8cedade34bc6be139acf18e3cfb4989074 | 8c588c4275ea17c64392b16f83f30d63b74ebdf8 | refs/heads/master | 2020-04-01T14:03:41.787822 | 2015-04-16T08:44:59 | 2015-04-16T08:44:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # -*- encoding: utf-8 -*-
# begin
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, BigInteger,String, ForeignKey, Unicode, Binary, LargeBinary, Time, DateTime, Date, Text, Boolean
from sqlalchemy.orm import relationship, backref, deferred
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order (Base):
__tablename__ = "order"
# describe me please
id = Column('id', Integer, primary_key = True)
# describe me pleaseUnknown SQL type: 'character(12)'
order_no = Column('order_no', String)
# describe me please
client_id = Column('client_id', Integer, ForeignKey('client.id'))
client = relationship('Client', foreign_keys=client_id)
class Product (Base):
__tablename__ = "product"
# describe me please
id = Column('id', Integer, primary_key = True)
# describe me please
product_category_id = Column('product_category_id', Integer, ForeignKey('product_category.id'))
# describe me pleaseUnknown SQL type: 'character(10)'
sku = Column('sku', String)
# describe me please
name = Column('name', Unicode)
# describe me please
price = Column('price', BigInteger)
# describe me please
description = Column('description', Unicode)
# describe me please
image = deferred(Column('image', LargeBinary))
product_category = relationship('ProductCategory', foreign_keys=product_category_id)
class OrderItem (Base):
__tablename__ = "order_item"
# describe me please
id = Column('id', Integer, primary_key = True)
# describe me please
order_id = Column('order_id', Integer, ForeignKey('order.id'))
# describe me please
product_id = Column('product_id', Integer, ForeignKey('product.id'))
# describe me please
amount = Column('amount', Integer)
order = relationship('Order', foreign_keys=order_id)
product = relationship('Product', foreign_keys=product_id)
class ProductCategory (Base):
__tablename__ = "product_category"
# describe me please
id = Column('id', Integer, primary_key = True)
# describe me please
name = Column('name', Unicode)
# describe me please
parent_category_id = Column('parent_category_id', Integer, ForeignKey('product_category.id'))
product_category = relationship('ProductCategory', foreign_keys=parent_category_id)
class Client (Base):
__tablename__ = "client"
# describe me please
id = Column('id', Integer, primary_key = True)
# describe me please
full_name = Column('full_name', Unicode)
# describe me please
email = Column('email', Unicode)
# end
| [
"nabla@e-point.pl"
] | nabla@e-point.pl |
a531ad222f1ad9d58508ac1533cedda53a5082cc | e3ac2b64b641c004a2bb6698770cc30babb29a7a | /tests/test_functions.py | 064af0a89530dfad179195b36f473af1d3262f31 | [
"ISC"
] | permissive | NicCage1337/as3ninja | d03443bbc9eab9576955ec831ee999a831ec3985 | 34c169ef1dd030d38201b90f5333699540649fba | refs/heads/master | 2020-09-14T02:51:21.412269 | 2019-11-09T20:22:33 | 2019-11-09T20:22:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | # -*- coding: utf-8 -*-
from uuid import UUID
import pytest
from as3ninja.functions import *
def test_ninjafunctions_is_dict():
assert type(ninjafunctions) == dict
class Test_iterfiles:
@staticmethod
def test_all_files():
content_types = {"json": dict, "yaml": dict, "text": str}
loop_min = 3
loop_count = 0
for dirname, filename, fileextension, fcontent in iterfiles(
"tests/testdata/functions/iterfiles/*/*.*"
):
loop_count += 1
assert dirname in ("json", "text", "yaml")
assert filename
assert fileextension in ("json", "txt", "yaml")
assert isinstance(fcontent, content_types[dirname])
assert loop_count >= loop_min
@staticmethod
def test_json():
for dirname, filename, fcontent in iterfiles(
"tests/testdata/functions/iterfiles/**/*.json"
):
assert dirname in ("json", "json/subdir")
assert filename
assert isinstance(fcontent, dict)
@staticmethod
def test_nonexistent():
with pytest.raises(FileNotFoundError):
for dirname, filename, fcontent in iterfiles("nonexistend/**/*.json"):
print(dirname) # this never happens
@staticmethod
def test_nonexistent_missing_ok():
for dirname, filename, fcontent in iterfiles(
pattern="nonexistend/**/*.json", missing_ok=True
):
print(dirname) # this never happens
def test_uuid():
myuuid = uuid()
assert isinstance(myuuid, str)
assert isinstance(UUID(myuuid), UUID)
| [
"github@simonkowallik.com"
] | github@simonkowallik.com |
35a2e4411670a59d855b9d8813a173f54c84aa9d | 7f87b6b4f1e218843fa2d03efec8840408a5a1c9 | /inv/migrations/0002_subcategoria.py | 2424586b205c9d5f539157703298df8c209e288c | [] | no_license | angiengel91/pypos | 03661a3d6d5025a0aeac90ce05c8adf8b1977823 | 359bb2f13dc2912bb32dfdc3f303e4cbcd213b11 | refs/heads/master | 2020-11-27T23:21:52.257819 | 2020-01-03T02:25:55 | 2020-01-03T02:25:55 | 229,644,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | # Generated by Django 3.0.1 on 2019-12-23 19:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('inv', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Subcategoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado', models.BooleanField(default=True)),
('fc', models.DateTimeField(auto_now_add=True)),
('fm', models.DateTimeField(auto_now=True)),
('um', models.IntegerField(blank=True, null=True)),
('Descripcion', models.CharField(help_text='Descripción de la subcategoría', max_length=100)),
('objCategoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inv.Categoria')),
('uc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Subcategorias',
'unique_together': {('objCategoria', 'Descripcion')},
},
),
]
| [
"angiengel.zapata@gmail.com"
] | angiengel.zapata@gmail.com |
92f4a128811233e9904efabf086e54f56061abfe | 391952e57c97177eb358b8750453090b84393355 | /Solutions/Task2/853506_Vlad_Lykashonok/lab2/build/lib.linux-x86_64-2.7/testModuleByVlad/core.py | 813bd25692b0e6fe3cedf932bfc66e1fd9dcafba | [] | no_license | Lykashonok/BSUIR-PYTHON-2020 | 1495a67eb2c563f15cb60c8fd3636805c25209b0 | 0fb716b8693cda9467bd34fe719c6f8ab2365aca | refs/heads/master | 2021-05-18T04:17:34.785974 | 2020-04-23T14:44:24 | 2020-04-23T14:44:24 | 251,102,642 | 0 | 0 | null | 2020-03-29T18:24:06 | 2020-03-29T18:24:05 | null | UTF-8 | Python | false | false | 4,346 | py | import unittest
import json # to check wether jsonByVlad works correctly
import time # to check memoization speed
import sys
sys.path.append('../')
from vectorByVlad.core import VectorByVlad
from jsonByVlad.core import to_json, from_json
from memoizationByVlad.core import factorial, fibonacci
from externalSortByVlad.core import external_sort
class TestJson(unittest.TestCase):
def test_to_json(self):
self.assertEqual(to_json('{"name":null,"age":30,"city":"New York","cards":["1234","4321"]}'), json.loads('{"name":null,"age":30,"city":"New York","cards":["1234","4321"]}'))
self.assertEqual(to_json('{"bool":false,"bool2":true,"string":"string","array":[{"obj":{}},"4321"]}'), json.loads('{"bool":false,"bool2":true,"string":"string","array":[{"obj":{}},"4321"]}'))
self.assertRaises(ValueError, to_json, '{"bool":,"bool2":,"string":"string",:[{"obj":{}},"4321"]}')
def test_from_json(self):
obj = {"name":None,"age":30,"bool":False,"bool2":True,"tmp":[],"city":"New York","empty":{},"cards":["1234","4321","qwe",False,True,None,[],{}]}
self.assertEqual(from_json(obj),json.dumps(obj))
def test_vbv_operations(self):
vector = VectorByVlad([1, 2, 3])
self.assertEqual(str(vector), 'VBV(1, 2, 3)')
self.assertTrue(-vector or VectorByVlad([0]) == True and vector != VectorByVlad([0]) and not vector == '2')
self.assertEqual(vector, vector)
self.assertNotEqual(vector, -vector)
self.assertEqual(vector+vector-vector, vector)
self.assertEqual(abs(VectorByVlad([0,0,0])), 0)
self.assertEqual(VectorByVlad([0])[0], VectorByVlad([1,2]) * VectorByVlad([0,0]) * 5)
if vector != 2: vector[0] = 2 * vector[0]
self.assertEqual(vector[0], 2)
vector += vector
vector -= VectorByVlad([0,0,0])
vector *= 2
vector = vector * 2
self.assertEqual(vector[0], 16)
def test_vbv_raises(self):
vector = VectorByVlad([1, 2, 3])
with self.assertRaises(ValueError):
vector[20]
vector *= vector
vector[-25] = 2
vector = vector * 'qwe'
vector[1] = 'qwe'
def test_external_sort(self):
def is_sorted(file):
with open(file) as file_to_output:
number_prev = file_to_output.readline()
number_new = number_prev
while number_new:
number_new = file_to_output.readline()
if number_new and int(number_new) < int(number_prev): return False
return True
#if buffer will be bigger than file, part of code wont be covered, cause it wont create more than 1 file
external_sort(FILE_TO_SORT="numbers.txt", FILE_TO_OUTPUT="output.txt", BUFFER_SIZE=2000)
self.assertTrue(is_sorted("output.txt"))
def test_external_sort_raises(self):
with self.assertRaises(ValueError):
external_sort("output.txt", "output.txt")
def test_memoization(self):
# calculate time for first execution
time_passed_first = time.time()
self.assertEqual(fibonacci(30), 832040)
time_passed_first = (time.time() - time_passed_first)
# calculate time for second
time_passed_second = time.time()
self.assertEqual(fibonacci(30), 832040)
time_passed_second = (time.time() - time_passed_second)
# see if second time less more than 2 times than first,
# it means that second execution was rather faster
# cause there wasnt any computations
self.assertTrue(time_passed_second / time_passed_first < 0.5)
time_passed_first = time.time()
self.assertEqual(factorial(6), 720)
time_passed_first = (time.time() - time_passed_first)
time_passed_second = time.time()
self.assertEqual(factorial(6), 720)
time_passed_second = (time.time() - time_passed_second)
self.assertTrue(time_passed_second / time_passed_first < 0.5)
def test_memoization_raise(self):
with self.assertRaises(ValueError):
fibonacci(-20)
factorial(-1)
def main():
unittest.main()
# to test with coverage type
# coverage run testModuleByVlad.py && coverage report
if __name__ == '__main__':
unittest.main() | [
"dembyvlad@gmail.com"
] | dembyvlad@gmail.com |
c799bc9198fff55fc6a1642d264a35eed90b6ea0 | bfeec685e05b201b43157c803614e672d2a430a4 | /accounts/tests/test_authentication.py | f33b7e70b9abd66e7221dddd75516d6a7d7bbc79 | [] | no_license | taska-tester/python | 32e8cc535dadd5e3b4010289288ff1a0f1873566 | 7054504bda47c4bfe4410b2d2cb5884dc233d8a7 | refs/heads/main | 2023-04-01T15:38:21.685259 | 2021-04-15T05:53:32 | 2021-04-15T05:53:32 | 349,272,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from accounts.authentication import PasswordlessAuthenticationBackend
from accounts.models import Token
User = get_user_model()
class AuthenticateTest(TestCase):
def test_returns_None_if_no_such_token(self):
result = PasswordlessAuthenticationBackend().authenticate(
'no-such-token'
)
self.assertIsNone(result)
def test_returns_new_user_with_correct_email_if_token_exists(self):
email = 'edith@example.com'
token = Token.objects.create(email=email)
user = PasswordlessAuthenticationBackend().authenticate(token.uid)
new_user = User.objects.get(email=email)
self.assertEqual(user, new_user)
def test_returns_existing_user_with_correct_email_if_token_exists(self):
email = 'edith@example.com'
existing_user = User.objects.create(email=email)
token = Token.objects.create(email=email)
user = PasswordlessAuthenticationBackend().authenticate(token.uid)
self.assertEqual(user, existing_user)
class GetUserTest(TestCase):
def test_gets_user_by_email(self):
User.objects.create(email='another@example.com')
desired_user = User.objects.create(email='edith@example.com')
found_user = PasswordlessAuthenticationBackend().get_user(
'edith@example.com'
)
self.assertEqual(found_user, desired_user)
def test_returns_None_if_no_user_with_that_email(self):
self.assertIsNone(
PasswordlessAuthenticationBackend().get_user('edith@example.com')
)
| [
"jason.lai@taskaprosthetics.com"
] | jason.lai@taskaprosthetics.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.