index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,100 | 9f7f6351d1c8e5ecac307d1f0becdb44974c3dec | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import fnmatch
import os
import subprocess
# ---------------------------------------------------------------------------------------------------------------------------------------
# Create Folder
# ---------------------------------------------------------------------------------------------------------------------------------------
def main( LOCAL_DATASET_PATH_LOG, FARSIGHT_BIN_EXE, LOCAL_DATASET_PATH_DATA_DEBUG, FILE_GFP, runCopy_db_log, PROJEOPT, IMAGETYPE ):
NEW_NAME = LOCAL_DATASET_PATH_DATA_DEBUG+'/'+os.path.basename(FILE_GFP)
print "\t\t"+NEW_NAME
if( os.path.exists(NEW_NAME+'zPro_X.tif') & os.path.exists(NEW_NAME+'zPro_Y.tif') & os.path.exists(NEW_NAME+'zPro_Z.tif') & os.path.exists(NEW_NAME+'zPro_X_Re.tif') & os.path.exists(NEW_NAME+'zPro_Y_Re.tif') & os.path.exists(NEW_NAME+'zPro_Z_Re.tif') ):
print "Projection already exist"
else:
print "Projection does not exist"
#runCopy_db_log = LOCAL_DATASET_PATH_LOG +'/runCopyProjections.log'
TEMP = FARSIGHT_BIN_EXE+'/ftkMainDarpa PROJECTION '+FILE_GFP+'.nrrd '+LOCAL_DATASET_PATH_DATA_DEBUG+' '+PROJEOPT+' '+IMAGETYPE+'>> '+runCopy_db_log+' 2>&1'
TEMP2 = subprocess.Popen(TEMP, shell=True)
print 'Projection of '+FILE_GFP
TEMP2.communicate()
TEMP_FILE = open(runCopy_db_log, 'a')
TEMP_FILE.write('\nCOMMAND: '+TEMP+'\n')
TEMP_FILE.close()
#FILE = FILE.rsplit('/',1)[0]
if __name__ == "__main__":
main() |
988,101 | d05f493b9f0cd8a16e7ebd209fb7cb0b97c9678e | import pytest
import numpy as np
from pypif import pif
import random as rnd
from citrine_converters.mechanical.converter import process_files
"""
README
Format:
TEST NAME
-Description
PASS/FAIL
**note numbers correspond to order of tests. Tests Start on line 441
1. test_stress_strain_both_files
-The tests generates simple data that has stress and strain defined in one file. The test passes the function two files
with both stress and strain data contained in each.
PASS
2. test_stress_redefined
-This test generates one file with stress defined twice and no strain defined.
PASS
3. test_strain_redefined
-This test generates one file with strain defined twice and no stress defined.
PASS
4. test_differ_times_one_file
-This test generates one file with differing times
PASS
5. test_differ_times_two_files
-This test generates two files with differing end times
PASS
6. test_time_not_in
-This test generates one file with no time at all in it
PASS
7. test_stress_not_in
-This test generates one file with no stress given
PASS
8. test_strain_not_in
-This test generates one file with no strain given
PASS
9. test_time_not_in_two_files
-This test generates two files with no time included
PASS
10. test_stress_not_in_two_files
-This test passes two identical files that only contain strain data
PASS
11. test_strain_not_in_two_files
-This test passes in two identical files that only contain stress data
PASS
12. test_swapped_stress_strain_one_file
-This test generates one file but with stress and strain swapped in order.
PASS
13. test_swapped_stress_strain_two_files
-This test swaps the file input
PASS
14. test_process_single_file
-This tests generates an expected pif, and then compares it to the function generated pif (from one file)
PASS
15. test_process_two_filenames
-This test generates an expected pif, and then compares it to the functions generated pif (from two files)
PASS
16. test_bad_number_of_files
-This test inputs three files and zero files into the function to make sure it throws an assertion error
PASS
"""
@pytest.fixture
def generate_expected_one_file():
"""Generates the expected pif into one file"""
fname = 'resources/simple_data.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, 100)
strain = np.linspace(0, 100)
strain_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time))),
pif.Property(name='strain',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return {
'file_name': fname,
'expected': expected
}
filee = 'resources/simple_data.json'
@pytest.fixture
def generate_expected_two_files():
"""Generates expected pif into two files"""
fname = {'stress': 'resources/simple_stress.json',
'strain': 'resources/simple_strain.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))]),
pif.System(
properties=[
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))])
]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return {
'file_names': fname,
'expected': {
'stress': expected[0],
'strain': expected[1]
}
}
files = {'stress': 'resources/simple_stress.json',
'strain': 'resources/simple_strain.json'}
@pytest.fixture
def generate_no_time_one_file():
"""Generates a file with no time included"""
fname = 'resources/simple_data_no_time.json'
stress = np.linspace(0, 100)
strain = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name=None
)
),
pif.Property(name='strain',
scalars=list(strain),
)
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return fname # only needs to return the file name since we wont calculate pifs with no time
@pytest.fixture
def generate_no_time_two_files():
"""Generates two files with no time included"""
fname = {'stress': 'resources/simple_stress_no_time.json',
'strain': 'resources/simple_strain_no_time.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100))
)]),
pif.System(
properties=[
pif.Property(name='strain',
scalars=list(np.linspace(0, 1))
)])
]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return {
'file_names': fname,
'expected': {
'stress': expected[0],
'strain': expected[1]
}
}
@pytest.fixture
def generate_no_stress_one_file():
"""Generates a file with no stress"""
fname = 'resources/simple_data_no_stress.json'
strain = np.linspace(0, 100)
strain_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='strain',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
# from StringIO import StringIO
# sio = StringIO()
# pif.dump(expected, sio)
#
# return {
# 'StringIO': sio,
# 'expected': expected
# }
return {
'file_name': fname,
'expected': expected
}
@pytest.fixture
def generate_no_strain_one_file():
"""Generates a file with no strain"""
fname = 'resources/simple_data_no_strain.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return {
'file_name': fname,
'expected': expected
}
@pytest.fixture
def generate_differ_times_one_file():
"""Generates a file with differing time ending points"""
fname = 'resources/differ_times.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, rnd.randint(1, 100))
strain = np.linspace(0, 100)
strain_time = np.linspace(0, rnd.randint(1, 100)) # generates a random time interval
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time))),
pif.Property(name='strain',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return fname
@pytest.fixture
def generate_differ_times_two_files():
"""Generates differing ending times to see if the function catches it"""
fname = {'stress': 'resources/simple_stress_differ_times.json',
'strain': 'resources/simple_strain_differ_times.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, rnd.randint(1, 100)))))]),
pif.System(
properties=[
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, rnd.randint(1, 100)))))])
]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return fname
@pytest.fixture
def generate_swapped_stress_strain_one_file():
"""Swaps the stress and strain info in one file"""
fname = 'resources/simple_swapped_data.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, 100)
strain = np.linspace(0, 100)
strain_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='strain',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time))),
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return {
'file_name': fname,
'expected': expected
}
@pytest.fixture
def generate_two_files_both_stress_strain():
"""Generates two files that have both stress and strain in each file"""
fname = {'stress': 'resources/double_stress.json',
'strain': 'resources/double_strain.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))]),
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))
])]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return fname
@pytest.fixture
def generate_stress_redefined():
fname = 'resources/stress_redefined.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, 100)
strain = np.linspace(0, 100)
strain_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='stress',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time))),
pif.Property(name='stress',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return fname
@pytest.fixture
def generate_strain_redefined():
fname = 'resources/strain_redefined.json'
stress = np.linspace(0, 100)
stress_time = np.linspace(0, 100)
strain = np.linspace(0, 100)
strain_time = np.linspace(0, 100)
expected = pif.System(
subSystems=None,
properties=[
pif.Property(name='strain',
scalars=list(strain),
conditions=pif.Value(
name='time',
scalars=list(strain_time))),
pif.Property(name='strain',
scalars=list(stress),
conditions=pif.Value(
name='time',
scalars=list(stress_time)))
])
with open(fname, 'w') as data:
pif.dump(expected, data)
return fname
# ---------------------------Begin Tests---------------------------
# NUM 1
def test_stress_strain_both_files(generate_two_files_both_stress_strain):
"""Inputs data with stress/strain defined twice"""
fname = generate_two_files_both_stress_strain
with pytest.raises(Exception):
process_files([fname[0],fname[1]])
# NUM 2
def test_stress_redefined(generate_stress_redefined):
"""Inputs one file with stress defined twice and no strain"""
fname = generate_stress_redefined
try:
process_files([fname])
raise Exception('The redefined stress data and lack of strain data was not caught')
except IOError:
pass
# NUM 3
def test_strain_redefined(generate_strain_redefined):
"""Inputs one file with strain defined twice and no stress"""
fname = generate_strain_redefined
try:
process_files([fname])
raise Exception('The redefined strain data and lack of stress data was not caught')
except IOError:
pass
# NUM 4
def test_differ_times_one_file(generate_differ_times_one_file):
"""Tests to see if function catches differing end time should throw an error"""
fname = generate_differ_times_one_file
with pytest.raises(Exception):
process_files([fname])
# NUM 5
def test_differ_times_two_files(generate_differ_times_two_files):
"""Inputs two files with differing time data"""
fname = generate_differ_times_two_files
with pytest.raises(Exception):
process_files([fname[0], fname[1]])
# NUM 6
def test_time_not_in(generate_no_time_one_file):
# This test it to check whether the function picks up the lack of one of these in its files
fname = generate_no_time_one_file
with pytest.raises(Exception):
process_files([fname])
# NUM 7
def test_stress_not_in(generate_no_stress_one_file):
"""Input file has no stress"""
fname = generate_no_stress_one_file
with pytest.raises(Exception):
process_files([fname])
# NUM 8
def test_strain_not_in(generate_no_strain_one_file):
"""Input file has no strain"""
fname = generate_no_strain_one_file
with pytest.raises(Exception) as f:
process_files([fname])
# NUM 9
def test_time_not_in_two_files(generate_no_time_two_files):
"""Two file input with no time data"""
fname = generate_no_time_two_files
with pytest.raises(Exception):
process_files([fname[0], fname[1]])
# process_files(['resources/simple_stress.json', 'resources/simple_strain.json'])
# above comment is for testing
# NUM 10
def test_stress_not_in_two_files(generate_no_stress_one_file):
"""Two file input with no stress"""
fname = generate_no_stress_one_file
with pytest.raises(Exception):
process_files([fname, fname])
# NUM 11
def test_strain_not_in_two_files(generate_no_strain_one_file):
"""Two file input with no strain"""
fname = generate_no_strain_one_file
with pytest.raises(Exception):
process_files([fname, fname])
# NUM 12
def test_swapped_stress_strain_one_file(generate_swapped_stress_strain_one_file):
"""Input swapped stress strain into function"""
einfo = generate_swapped_stress_strain_one_file
expected = einfo['expected']
fname = einfo['file_name']
results = process_files([fname])
A = results.properties[0].scalars
B = expected.properties[0].scalars
C = results.properties[1].scalars
D = expected.properties[1].scalars
assert np.array_equal(A, B), \
'Result and expected pifs differ in stress values'
assert np.array_equal(C, D), \
'Result and expected pifs differ in strain values'
# NUM 13
def test_swapped_stress_strain_two_files(generate_expected_two_files):
"""Swaps file input"""
# create local variables and run fixtures
einfo = generate_expected_two_files
expected = einfo['expected']
fname = einfo['file_names']
results = process_files([fname['strain'], fname['stress']])
# compare the pifs
A = results.properties[0].scalars
B = expected['stress'].properties[0].scalars
C = results.properties[1].scalars
D = expected['strain'].properties[0].scalars
assert np.array_equal(A, B), \
'Results and expected pifs differ in stress values'
assert np.array_equal(C, D), \
'Results snd expected pifs differ in strain values'
assert getattr( results, 'uid', None) is None, \
'Result UID should be None'
assert getattr(results, 'names', None) is None, \
'Result should not be named'
assert getattr(results, 'classifications', None) is None, \
'Result should not have any classifications.'
assert len(results.properties) == \
len(expected['stress'].properties) + \
len(expected['strain'].properties), \
'The length of the result and expected properties lists do not match.'
assert getattr(results, "ids", None) is None, \
'Result ids should be None'
assert getattr(results, 'source', None) is None, \
'Result source should be None'
assert getattr(results, 'quantity', None) is None, \
'Result quantity should be None'
assert getattr(results, 'preparation', None) is None,\
'Result preparation should be None'
assert getattr(results, "subSystems", None) is None, \
'Results subSystem should be None'
assert getattr(results, 'references', None) is None,\
'Results references should be None'
assert getattr(results, 'contacts', None) is None, \
'Results contacts should be None'
assert getattr(results, 'licenses', None) is None,\
'Results licenses should be None'
assert getattr(results,'tags', None) is None,\
'Results tags should be None'
# NUM 14
def test_process_single_file(generate_expected_one_file):
"""Tests process_files with one file input"""
einfo = generate_expected_one_file
expected = einfo['expected']
fname = einfo['file_name']
results = process_files([fname])
# compare the pifs
A = results.properties[0].scalars
B = expected.properties[0].scalars
C = results.properties[1].scalars
D = expected.properties[1].scalars
assert np.array_equal(A, B), \
'Result and expected pifs differ in stress values'
assert np.array_equal(C, D), \
'Result and expected pifs differ in strain values'
assert getattr( results, 'uid', None) is None, \
'Result UID should be None'
assert getattr(results, 'names', None) is None, \
'Result should not be named'
assert getattr(results, 'classifications', None) is None, \
'Result should not have any classifications.'
assert len(results.properties) == \
len(expected.properties), \
'The length of the result and expected properties lists do not match.'
assert getattr(results, "ids", None) is None, \
'Result ids should be None'
assert getattr(results, 'source', None) is None, \
'Result source should be None'
assert getattr(results, 'quantity', None) is None, \
'Result quantity should be None'
assert getattr(results, 'preparation', None) is None,\
'Result preparation should be None'
assert getattr(results, "subSystems", None) is None, \
'Results subSystem should be None'
assert getattr(results, 'references', None) is None,\
'Results references should be None'
assert getattr(results, 'contacts', None) is None, \
'Results contacts should be None'
assert getattr(results, 'licenses', None) is None,\
'Results licenses should be None'
assert getattr(results,'tags', None) is None,\
'Results tags should be None'
# NUM 15
def test_process_two_filenames(generate_expected_two_files):
"""Tests process_files with two file inputs"""
# create local variables and run fixtures
einfo = generate_expected_two_files
expected = einfo['expected']
fname = einfo['file_names']
results = process_files([fname['stress'], fname['strain']])
# compare the pifs
A = results.properties[0].scalars
B = expected['stress'].properties[0].scalars
C = results.properties[1].scalars
D = expected['strain'].properties[0].scalars
assert np.array_equal(A, B), \
'Results and expected pifs differ in stress values'
assert np.array_equal(C, D), \
'Results snd expected pifs differ in strain values'
assert getattr( results, 'uid', None) is None, \
'Result UID should be None'
assert getattr(results, 'names', None) is None, \
'Result should not be named'
assert getattr(results, 'classifications', None) is None, \
'Result should not have any classifications.'
assert len(results.properties) == \
len(expected['stress'].properties) + \
len(expected['strain'].properties), \
'The length of the result and expected properties lists do not match.'
assert getattr(results, "ids", None) is None, \
'Result ids should be None'
assert getattr(results, 'source', None) is None, \
'Result source should be None'
assert getattr(results, 'quantity', None) is None, \
'Result quantity should be None'
assert getattr(results, 'preparation', None) is None,\
'Result preparation should be None'
assert getattr(results, "subSystems", None) is None, \
'Results subSystem should be None'
assert getattr(results, 'references', None) is None,\
'Results references should be None'
assert getattr(results, 'contacts', None) is None, \
'Results contacts should be None'
assert getattr(results, 'licenses', None) is None,\
'Results licenses should be None'
assert getattr(results,'tags', None) is None,\
'Results tags should be None'
def test_bad_number_of_files():
"""Inputs 3 files and 0 files"""
with pytest.raises(Exception):
process_files(['resources/simple_data.json', 'resources/simple_data.json', 'resources/simple_data.json'])
with pytest.raises(Exception):
process_files([])
|
988,102 | 08363b8c1e8da250b29e0d728723f6622a6ba47a | # Resource: http://wiki.scipy.org/Cookbook/Matplotlib
import excelLoad as eL
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
array = (eL.total_petroleum_stocks.T['United States'].values[3:]/10000 - .12 ) / .07
matrix = array.reshape(6,5)
# QUESTION 3:
# Try to create a Hinton diagram, depicting consumption, positive or negative
# and degrees
# http://matplotlib.org/examples/specialty_plots/hinton_demo.html
# Main issue was just reformatting the dataframe into proper size/value range
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
year = 1983
plt.title('U.S. Oil Reserves: 1983-2012', color='w', weight='bold')
# Subtitles in a graph
# http://stackoverflow.com/questions/1388450/giving-graphs-a-subtitle-in-matplotlib
plt.figtext(.5,.04,'Oil Reserves as measured in terms of Millions of Barrels ',fontsize=10,ha='center')
plt.figtext(.5,.01,'Red < 40% of max capacity, Orange < 70% of max capacity, Blue < 90% of max capacity',fontsize=10,ha='center')
# Adding text to a rectangle
# http://stackoverflow.com/questions/14531346/how-to-add-a-text-into-a-rectangle
for (x,y),w in np.ndenumerate(matrix):
if w < .4:
color = 'red'
elif w < .7:
color = 'orange'
elif w < .9:
color = 'blue'
else:
color = 'white'
size = np.sqrt(np.abs(w))
# Plotting as rectangles
rect = plt.Rectangle([x - size / 2.5, y - size / 2], size, size,
facecolor=color, edgecolor=color, label = "X")
ax.add_patch(rect)
rx, ry = rect.get_xy()
cx = rx + rect.get_width()/2.0
cy = ry = rect.get_height()/2.0
ax.annotate(year, (x - size/4, y + size/3), color='w', weight='bold')
year += 1
# Plotting as circles
# circle1=plt.Circle((w,w),.2,color=color)
# fig = plt.gcf()
# print fig
# print fig.gca()
# fig.gca().add_artist(circle1)
ax.autoscale_view()
ax.invert_yaxis()
ax.plot(label = "U.S. Oil Reserves")
if __name__ == '__main__':
# # plt.show()
# print np.random.rand(20, 20) - 0.5
hinton(matrix)
plt.show()
# # How do you map to a shared row/column, other than the above, which takes too long |
988,103 | 5c6c21278368458adbc95b26948788c9c7991320 | from django.urls import path
from apps.articles.views import main_page, SearchResultsView
app_name = 'articles'
urlpatterns = [
path('search/', main_page, name='main-page'),
path('results/', SearchResultsView.as_view(), name='search-results')
]
|
988,104 | ecfc4973bab9a115b7c5b5654d6fa7fb89376ef4 | import os
from html.parser import HTMLParser
import datetime
result = []
title = ""
class Parser(HTMLParser):
beginTr = False
beginTitle = False
row = -1
def handle_starttag(self, tag, attrs):
if tag == 'tr':
self.beginTr = True
self.row += 1
flag = False
for key, value in attrs:
if key == 'bgcolor':
result.append([value])
flag = True
if not flag:
result.append([''])
if tag == 'title':
self.beginTitle = True
pass
def handle_endtag(self, tag):
if tag == 'tr':
self.beginTr = False
if tag == 'title':
self.beginTitle = False
def handle_data(self, data):
if self.beginTr:
result[self.row].append(data.strip())
if self.beginTitle:
global title
title = data.strip()
with open('List of cities in China.html', 'r') as fd:
parser = Parser()
parser.feed(fd.read())
cities = []
with open(f'{title}.csv', 'w') as fd:
fd.write(','.join(filter(lambda k: len(k) > 0, result[0])) + '\n')
for city in result[1:]:
if city[9] == 'ZZZZ-none':
fd.write(f'{city[2]},{city[4]},{city[7]},{city[10]},{city[12]}\n')
cities.append([city[0], city[2], city[4],
city[7], city[10], city[12]])
elif city[9] == 'direct administration' or city[8] == 'ZZZZ-none':
fd.write(f'{city[2]},{city[4]},{city[6]},{city[9]},{city[11]}\n')
cities.append([city[0], city[2], city[4],
city[6], city[9], city[11]])
elif city[10] == 'direct administration':
fd.write(f'{city[2]},{city[5]},{city[7]},{city[10]},{city[12]}\n')
cities.append([city[0], city[2], city[5],
city[7], city[10], city[12]])
else:
fd.write(f'{city[2]},{city[4]},{city[6]},{city[8]},{city[10]}\n')
cities.append([city[0], city[2], city[4],
city[6], city[8], city[10]])
with open(f'color.csv', 'w') as fd:
fd.write(','.join(filter(lambda k: len(k) > 0, result[0])) + '\n')
for city in result[1:]:
if len(city[0].strip()) > 0:
if city[9] == 'ZZZZ-none':
fd.write(
f'{city[2]},{city[4]},{city[7]},{city[10]},{city[12]}\n')
elif city[9] == 'direct administration' or city[8] == 'ZZZZ-none':
fd.write(
f'{city[2]},{city[4]},{city[6]},{city[9]},{city[11]}\n')
elif city[10] == 'direct administration':
fd.write(
f'{city[2]},{city[5]},{city[7]},{city[10]},{city[12]}\n')
else:
fd.write(
f'{city[2]},{city[4]},{city[6]},{city[8]},{city[10]}\n')
cities = sorted(
cities, key=lambda city: datetime.datetime.strptime(city[-1], '%Y-%m-%d'))
with open('output.html', 'w') as out:
content = ''
for city in cities:
if city[3] == 'autonomous' or city[3] == 'municipal':
content += f'''
<tr bgcolor="{city[0]}">
<td><a href="{city[1].replace(' ','_')}" title="{city[1]}">{city[1]}</a></td>
<td><span style="font-size:125%;"><span lang="zh-CN" title="Chinese language text">{city[2]}</span></span></td>
<td><span style="font-size:85%;"><i>{city[3]}</i></span></td>
<td><span style="font-size:85%;"><i>{city[4]}</i></span></td>
<td>{city[5]}
</td></tr>
'''
elif city[4] != 'direct administration':
content += f'''
<tr bgcolor="{city[0]}">
<td><a href="{city[1].replace(' ','_')}" title="{city[1]}">{city[1]}</a></td>
<td><span style="font-size:125%;"><span lang="zh-CN" title="Chinese language text">{city[2]}</span></span></td>
<td><a href="{city[3]}" title="{city[3]}">{city[3]}</a></td>
<td><a href="{city[4]}" title="{city[4]}">{city[4]}</a></td>
<td>{city[5]}
</td></tr>
'''
else:
content += f'''
<tr bgcolor="{city[0]}">
<td><a href="{city[1].replace(' ','_')}" title="{city[1]}">{city[1]}</a></td>
<td><span style="font-size:125%;"><span lang="zh-CN" title="Chinese language text">{city[2]}</span></span></td>
<td><a href="{city[3]}" title="{city[3]}">{city[3]}</a></td>
<td><span style="font-size:85%;"><i>{city[4]}</i></span></td>
<td>{city[5]}
</td></tr>
'''
template = '''
<!DOCTYPE html>
<html class="client-nojs" lang="en" dir="ltr">
<head>
<meta charset="UTF-8"/>
<title>Cities in China</title>
<style type="text/css">
table
{
border-collapse:collapse;
}
table, td, th
{
border:1px solid black;
}
</style>
</head>
<body>
<table class="wikitable sortable selected_now jquery-tablesorter" id="cities">
<thead><tr>
<th class="headerSort" tabindex="0" role="columnheader button" title="Sort ascending">City</th>
<th class="headerSort" tabindex="0" role="columnheader button" title="Sort ascending">Chinese</th>
<th class="headerSort" tabindex="0" role="columnheader button" title="Sort ascending">Province</th>
<th class="headerSort" tabindex="0" role="columnheader button" title="Sort ascending">Prefecture</th>
<th class="headerSort" tabindex="0" role="columnheader button" title="Sort ascending">Founded</th>
</tr></thead><tbody>''' + content + '''
</tbody><tfoot></tfoot></table>
</body>
</html>
'''
out.write(template)
|
988,105 | 948cf36787cd95091c6cb36123a74a63d28c0b03 | from .adafruit_st77xx import Adafruit_ST77XX |
988,106 | 00da12991d425552960e2e119f847270b19d3623 | #!/usr/bin/env python2
import sys, re
lines = sys.stdin.readlines()
for i in lines:
i = i.strip()
# reverse line
print(i[::-1])
|
988,107 | 9a84d4be830f1f49e454ced46d039cd50e938084 | # _____ ?
#
# ___ writeTofile data filename
# # Convert binary data to proper format and write it on Hard Disk
# w__ o.. ? __ __ file
# ?.w.. ?
# print("Stored blob data into: " ? "\n")
#
# ___ readBlobData empId
# ___
# sqliteConnection _ ?.c.. 'SQLite_Python.db'
# cursor _ ?.c..
# print("Connected to SQLite")
#
# sql_fetch_blob_query _ """S.. _ f.. new_employee w.. id = ?"""
# ?.e.. ? ?
# record _ ?.f_a..
# ___ row __ ?
# print("Id = ", ? 0], "Name = ", ? 1
# name _ ? 1
# photo _ ? 2
# resumeFile _ ? 3
#
# print("Storing employee image and resume on disk \n")
# photoPath _ "E:\pynative\Python\photos\db_data\\" + ? + ".jpg"
# resumePath _ "E:\pynative\Python\photos\db_data\\" + ? + "_resume.txt"
# ? p.. pP..
# ? rF.. rP..
#
# ?.c..
#
# _____ ?.E.. __ error
# print("Failed to read blob data f.. sqlite table" ?
# f..
# __ (?
# ?.c..
# print("sqlite connection is closed")
#
# ? 1
# ? 2
#
# # Output:
# #
# # Connected to SQLite
# # Id = 1 Name = Smith
# # Storing employee image and resume on disk
# #
# # Stored blob data into: E:\pynative\Python\photos\db_data\Smith.jpg
# #
# # Stored blob data into: E:\pynative\Python\photos\db_data\Smith_resume.txt
# #
# # sqlite connection is closed
# #
# # Connected to SQLite
# # Id = 2 Name = David
# # Storing employee image and resume on disk
# #
# # Stored blob data into: E:\pynative\Python\photos\db_data\David.jpg
# #
# # Stored blob data into: E:\pynative\Python\photos\db_data\David_resume.txt
# #
# # sqlite connection is closed |
988,108 | 126d9369c001990491b4a1f1a147b348c05d8ac9 | from collections import deque
class ZigzagIterator:
# def __init__(self, v1: List[int], v2: List[int]):
# self.i = 0
# self.v = []
# n1, n2 = len(v1), len(v2)
# size = max(n1, n2)
# for i in range(size):
# if i < n1:
# self.v.append(v1[i])
# if i < n2:
# self.v.append(v2[i])
#
# def next(self) -> int:
# self.i += 1
# return self.v[self.i - 1]
#
# def hasNext(self) -> bool:
# return self.i < len(self.v)
# Follow up: we need a queue to store the lists and return their values in order
def __init__(self, v1: List[int], v2: List[int]):
self.q = deque()
if v1:
self.q.append(v1)
if v2:
self.q.append(v2)
def next(self) -> int:
nxt = self.q.popleft()
if len(nxt) - 1 > 0:
self.q.append(nxt[1:])
return nxt[0]
def hasNext(self) -> bool:
return len(self.q) > 0 |
988,109 | a84dec98d555041ee450dfadfd033252658c169c | import random
import time
import math
import os
#inp = raw_input()
print time.localtime(time.time())
def s():
return time.asctime(time.localtime(time.time()))
print s()
x = random.randint(0,100)
print x
print math.sqrt(x)
dict1 = dict()
dict1['red'] = '1'
dict1['blue']= '2'
textfile = open('test.txt')
text = textfile.readlines()
print text[5]
for i in range(1,5):
print i
for x in 'shana'[::-1]:
print x
textfile.close()
textfile2 = open('test.txt', 'a')
textfile2.write('test')
textfile2.writelines('test2\n')
textfile2.close()
textfile3 = open('test2.txt', 'w')
textfile3.writelines('check this out')
textfile3.close()
a = 9999999999999999999999999999999999
b = 2
while a > b:
a = math.sqrt(a)
print a
def
|
988,110 | 83d7f232a388bb831f29b57138c9169c40857a25 | import unittest
import inspect
from lizard import analyze_file, FileAnalyzer, get_extensions
def get_go_function_list(source_code):
return analyze_file.analyze_source_code(
"a.go", source_code).function_list
class Test_parser_for_Go(unittest.TestCase):
def test_empty(self):
functions = get_go_function_list("")
self.assertEqual(0, len(functions))
def test_no_function(self):
result = get_go_function_list('''
for name in names {
print("Hello, \(name)!")
}
''')
self.assertEqual(0, len(result))
def test_one_function(self):
result = get_go_function_list('''
func sayGoodbye() { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
self.assertEqual(0, result[0].parameter_count)
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_one_with_parameter(self):
result = get_go_function_list('''
func sayGoodbye(personName string, alreadyGreeted chan bool) { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
self.assertEqual(2, result[0].parameter_count)
def test_one_function_with_return_value(self):
result = get_go_function_list('''
func sayGoodbye() string { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
def test_one_function_with_complexity(self):
result = get_go_function_list('''
func sayGoodbye() { if ++diceRoll == 7 { diceRoll = 1 }}
''')
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_interface(self):
result = get_go_function_list('''
type geometry interface{
area() float64
perim() float64
}
func sayGoodbye() { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
def test_interface_followed_by_a_class(self):
result = get_go_function_list('''
type geometry interface{
area() float64
perim() float64
}
class c { }
''')
self.assertEqual(0, len(result))
|
988,111 | c4dccde9b8896b7c336fa1cefc755615eca185bb | import time
import tkinter as tk
from tkinter import *
root=Tk()
root.geometry("500x200+0+0")
root.title("Real-time Digital Clock")
clock_frame=Label(root,font=('times',100,'bold'),bg='black',fg='red')
clock_frame.pack(fill='both',expand=1)
def ticks(time1=""):
# Get the curreent local time from the system
time2= time.strftime('%A\n%I:%M:%S\n%D')
# if the time string has changes, update it
if time2 !=time1:
time1=time2
clock_frame.config(text=time2)
# calls itself every 200 milliseconds to update
clock_frame.after(200,ticks)
ticks()
mainloop()
|
988,112 | c6d8b41f9f0265d57a152513fbca937e486b0d97 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib.auth import logout
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.views.generic.edit import FormView
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from scientificWork.models import Publication, Rand, Participation
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response
from datetime import datetime
from moevmCommon.models import UserProfile
# Константы
MAX_ELEMENT_PAGE = 3; # Максимальное количество элементов на странице
def isint(value):
try:
int(value)
return True
except ValueError:
return False
def index(request):
return render(request,'scientificWork/index.html')
def competitions(request):
comp_list = Participation.objects.all()
users = UserProfile.objects.all()
users_with_names = User.objects.all()
userName = ''
t=''
n = ''
dt = ''
p = ''
r=''
rk=''
if request.GET:
userName = request.GET.get('userName')
t=request.GET.get('type')
n = request.GET.get('name')
p = request.GET.get('place')
dt = request.GET.get('date')
r = request.GET.get('reiteration')
rk = request.GET.get('rank')
if (userName != ''):
userNameList = userName.split()
users_with_names = users_with_names.filter(last_name__icontains=userNameList[0])
if len(userNameList) > 1: users_with_names = users_with_names.filter(first_name__icontains=userNameList[1])
if users_with_names:
A = []
for item in users_with_names:
users = UserProfile.objects.all()
users = users.filter(user_id=item.id)
if len(userNameList) > 2:
users = users.filter(patronymic__icontains=userNameList[2])
if users.count() > 0:
A.append(users[0].id)
#user_ids = users_with_names[0].id
comp_list = comp_list.filter(user_id__in=A)
if (t != ''): comp_list = comp_list.filter(type=t)
if (n != ''): comp_list = comp_list.filter(name=n)
if (p != ''): comp_list = comp_list.filter(place=p)
if (dt != ''):
datetime_objects = dt.split("-")
if len(datetime_objects) == 1:
if isint(dt):
comp_list = comp_list.filter(date__year=int(dt))
else:
comp_list = comp_list.filter(type='sdafsdfasdf');
else:
datetime_objects = datetime.strptime(dt, '%d-%M-%Y').strftime('%Y-%M-%d')
comp_list = comp_list.filter(date=datetime_objects)
if (r != ''): comp_list = comp_list.filter(reiteration=r)
if (rk != ''): comp_list = comp_list.filter(rank=rk)
if 'button_reset' in request.GET:
comp_list=Participation.objects.all()
paginator = Paginator(comp_list, MAX_ELEMENT_PAGE)
page = request.GET.get('page')
try:
comp_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
comp_list = paginator.page(1)
except EmptyPage:
comp_list = paginator.page(paginator.num_pages)
return render(request, 'scientificWork/competitions.html',
{'comps': comp_list,
't': t,
'p': p,
'n': n,
'dt': dt,
'rk': rk,
'r': r,
'userName': userName,
})
def publications(request):
s = Publication.objects.all()
users_with_names = User.objects.all()
userName = ''
pH = ''
pl = ''
tp = ''
dt = ''
vl = ''
uvl = ''
ed = ''
nm = ''
type = ''
ISBN = ''
number = ''
editor = ''
nameSbornik = ''
reiteration = ''
if request.GET:
userName = request.GET.get('userName')
pH = request.GET.get('publishingHouseName')
pl = request.GET.get('place')
tp = request.GET.get('typePublication')
dt = request.GET.get('date')
vl = request.GET.get('volume')
uvl = request.GET.get('unitVolume')
ed = request.GET.get('edition')
nm = request.GET.get('bookName')
type = request.GET.get('type')
ISBN = request.GET.get('isbn')
number = request.GET.get('number')
editor = request.GET.get('editor')
nameSbornik = request.GET.get('nameSbornik')
reiteration = request.GET.get('reiteration')
if (userName != ''):
userNameList = userName.split()
users_with_names = users_with_names.filter(last_name__icontains=userNameList[0])
if len(userNameList) > 1: users_with_names = users_with_names.filter(first_name__icontains=userNameList[1])
if users_with_names:
A = []
for item in users_with_names:
users = UserProfile.objects.all()
users = users.filter(user_id=item.id)
if len(userNameList) > 2:
users = users.filter(patronymic__icontains=userNameList[2])
if users.count() > 0:
A.append(users[0].id)
#user_ids = users_with_names[0].id
s = s.filter(user_id__in=A)
if (pH != ''): s = s.filter(publishingHouseName=pH)
if (pl != ''): s = s.filter(place=pl)
if (tp != ''): s = s.filter(typePublication=tp)
if (dt != ''):
datetime_objects = dt.split("-")
if len(datetime_objects) == 1:
if isint(dt):
s = s.filter(date__year=int(dt))
else:
s = s.filter(type='-23534fdsg')
else:
datetime_objects = datetime.strptime(dt, '%d-%M-%Y').strftime('%Y-%M-%d')
s = s.filter(date=datetime_objects)
if (vl != ''): s = s.filter(volume=vl)
if (uvl != ''): s = s.filter(unitVolume=uvl)
if (ed != ''): s = s.filter(edition=ed)
if (nm != ''): s = s.filter(bookName=nm)
if (type != ''): s = s.filter(type=type)
if (ISBN != ''): s = s.filter(isbn=ISBN)
if (number != ''): s = s.filter(number=number)
if (editor != ''): s = s.filter(editor=editor)
if (nameSbornik != ''): s = s.filter(nameSbornik=nameSbornik)
if (reiteration != ''): s = s.filter(reiteration=reiteration)
if 'button_reset' in request.GET:
s = Publication.objects.all()
paginator = Paginator(s, MAX_ELEMENT_PAGE)
page = request.GET.get('page')
try:
s = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
s = paginator.page(1)
except EmptyPage:
s = paginator.page(paginator.num_pages)
return render(request, 'scientificWork/publications.html',
{'notes': s,
'userName': userName,
'pH': pH,
'pl': pl,
'tp': tp,
'dt': dt,
'vl': vl,
'uvl': uvl,
'ed': ed,
'nm': nm,
'type': type,
'ISBN': ISBN,
'number': number,
'editor': editor,
'nameSbornik': nameSbornik,
'reiteration': reiteration
})
def rads(request):
rand_list=Rand.objects.all()
users = UserProfile.objects.all()
users_with_names = User.objects.all()
userName = ''
n=''
c=''
if request.GET:
userName = request.GET.get('userName')
n=request.GET.get('name')
c=request.GET.get('cipher')
if(n!=''):rand_list=rand_list.filter(name=n)
if(c!=''):rand_list=rand_list.filter(cipher=c)
if (userName != ''):
userNameList = userName.split(" ")
users_with_names = users_with_names.filter(last_name__icontains=userNameList[0])
if len(userNameList) > 1: users_with_names = users_with_names.filter(first_name__icontains=userNameList[1])
if users_with_names:
A = []
for item in users_with_names:
users = UserProfile.objects.all()
users = users.filter(user_id=item.id)
if len(userNameList) > 2:
users = users.filter(patronymic__icontains=userNameList[2])
if users.count() > 0:
A.append(users[0].id)
rand_list = rand_list.filter(user_id__in=A)
if 'button_reset' in request.GET:
rand_list=Rand.objects.all()
paginator=Paginator(rand_list,MAX_ELEMENT_PAGE)
page=request.GET.get('page')
try:
rand_list=paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
rand_list=paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
rands=paginator.page(paginator.num_pages)
return render(request,'scientificWork/rads.html',{"rands": rand_list,'n':n,'c':c, 'userName': userName,})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/scientificWork/')
else:
return HttpResponse("Your account is disabled.")
else:
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'scientificWork/login.html', {})
# Используйте декоратор login_required(), чтобы гарантировать, что только авторизированные пользователи смогут получить доступ к этому представлению.
@login_required
def user_logout(request):
# Поскольку мы знаем, что только вошедшие в систему пользователи имеют доступ к этому представлению, можно осуществить выход из системы
logout(request)
return HttpResponseRedirect('/scientificWork/')
def strength(request):
aspirant = UserProfile.objects.filter(academic_state='a').count();
doctorant = UserProfile.objects.filter(academic_state='d').count();
soiskatel = UserProfile.objects.filter(academic_state='s').count();
stajer = UserProfile.objects.filter(academic_state='st').count();
return render(request,'scientificWork/strength.html', {
'aspirant' : aspirant,
'doctorant' : doctorant,
'soiskatel' : soiskatel,
'stajer' : stajer
}) |
988,113 | 7c267d33729db02a5fd4fc62437c5bd82ae7266d | #!/usr/bin/env python3
#
# author: Abhishek Pandey
# date: 09-08-2020
# description: Use a trained network to predict the class for an input image.Prints the most likely classes.
#
# Use argparse Expected Call with <> indicating expected user input:
# python predict.py </path/to/image> <checkpoint>
# --top_k <return top K most likely classes>
# --category_names <path to a JSON file that maps the class values to other category names>
# --gpu
# Example command:
# python predict.py flowers/test/17/image_03864.jpg checkpoint.pth --category_names cat_to_name.json --top_k 5 --gpu True
##
#main imports
import argparse
import sys
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from time import time, sleep
import time
from collections import OrderedDict
#import torch
import torch
import torch.nn.functional as F
from torch import nn
from torch import optim
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from PIL import Image
# Main program function defined below
def main():
# start time
startTime = time.time()
# Creates & retrieves Command Line Arugments
args = getArguments()
# Set device to cuda if gpu flag is set
if args.gpu==True:
device = 'cuda'
else:
device = 'cpu'
# If given, read the mapping of categories to class names
cat_to_name = {}
if args.category_names:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
# Load checkpoint and get the model
model = load_checkpoint(args.checkpoint, args.gpu)
print(model)
# setting actual class labels convrter on probabilities
model.idx_to_class = dict([[v,k] for k, v in model.class_to_idx.items()])
# Predict probabilities and classes
probs, clas = predict(args.img_path, model, args.top_k, args.gpu)
print(probs)
print(clas)
# Convert categories into real names
if cat_to_name:
clas = [cat_to_name[str(cat)] for cat in clas]
# Print results
print('\nThe top {} most likely classes are:'.format(args.top_k))
max_name_len = len(max(clas, key=len))
row_format ="{:<" + str(max_name_len + 2) + "}{:<.4f}"
for prob, name in zip(probs, clas):
print(row_format.format(name, prob))
# verall runtime in seconds & prints it in hh:mm:ss format
total_time = time.time() - startTime
print("Total Elapsed Runtime: {:.0f}m {:.0f}s".format(total_time//60, total_time % 60))
#argument parser function
def getArguments():
"""
Retrieves and parses the command line arguments created. This function returns these arguments as an
ArgumentParser object.
Parameters:
None -
Returns:
parse_args() - CLI data structure
"""
parser = argparse.ArgumentParser()
# Manditory arguments
parser.add_argument('img_path', type=str, help='path to input image')
parser.add_argument('checkpoint', type=str, help='path to a saved checkpoint')
#option arguments
parser.add_argument('--top_k', type=int, default=3, dest='top_k', help='return top K most likely classes')
parser.add_argument('--category_names', type=str, dest='category_names', help='path to a JSON file that maps the class values to other category names')
parser.add_argument('--gpu', type=bool, default=False, dest='gpu', const=True, nargs='?', help='options to include cpu or cuda')
# return parsed argument collection
return parser.parse_args()
#Checkpoint loading function
def load_checkpoint(filepath, gpu):
'''
loads a model, classifier, state_dict and class_to_idx from a torch save
'''
if gpu==True:
checkpoint = torch.load(filepath)
else:
checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)
model = checkpoint['model']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
optimizer = checkpoint['optimizer']
return model
#defining prediction function
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# DONE: Implement the code to predict the class from an image file
image = Image.open(image_path).convert('RGB')
image = process_image(image_path)
image = torch.from_numpy(image).unsqueeze_(0).float()
if gpu==True and torch.cuda.is_available():
toCuda = torch.device("cuda:0")
model = model.to(toCuda)
image = image.to(toCuda)
else:
toCuda = torch.device("cpu")
model.cpu()
image.cpu()
model.eval()
# Calculate class probabilities
with torch.no_grad():
outputs = model.forward(image)
# Get topk probabilities and classes
probs, class_idxs = outputs.topk(topk)
probs, class_idxs = probs.to('cpu'), class_idxs.to('cpu')
probs = probs.exp().data.numpy()[0]
class_idxs = class_idxs.data.numpy()[0]
#print(class_idxs)
# Convert from indices to the actual class labels
try:
## Convert from indices to the actual class labels
classes = np.array([model.idx_to_class[idx] for idx in class_idxs])
except KeyError:
print("The key does not exist!")
return probs, classes
# image processing function
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
image = Image.open(image_path)
if image.size[0] > image.size[1]:
image.thumbnail((4500,256))
else:
image.thumbnail((256,4500))
left_margin = (image.width -224)/2
bottom_margin = (image.height -224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
image = image.crop((left_margin,bottom_margin,right_margin,top_margin))
image_new = np.array(image)/225
mean = np.array([0.485,0.456,0.406])
std = np.array([0.229,0.224,0.225])
image_new = (image_new - mean)/std
image_new = image_new.transpose((2,0,1))
return image_new
#main function call
if __name__ == "__main__":
main() |
988,114 | 6b23409108d0cdfbac74f97933d4886c1722e283 | """Provide a connection."""
|
988,115 | 21354cec921692b7e21898d4f622f4a9bdd7e842 | from textblob import TextBlob
import glob
femaleSentiment = []
maleSentiment = []
for comment in open(glob.glob("static/FemaleTwitterComments.txt")[0], 'rb').read().split('\n'):
femaleSentiment.append(TextBlob(comment).sentiment.polarity)
for comment in open(glob.glob("static/MaleTwitterComments.txt")[0], 'rb').read().split('\n'):
maleSentiment.append(TextBlob(comment).sentiment.polarity)
male = float(sum(maleSentiment)) / float(len(maleSentiment))
female = float(sum(femaleSentiment)) / float(len(femaleSentiment))
def getDatabase():
return {"Male": male, "Female": female}
|
988,116 | 34fdb485970caf34c6b00e3ab3856e030e8422e5 | #!/usr/bin/python
#coding:utf-8
import urllib, json
import sys
def sendsms(appkey, mobile, tpl_id, tpl_value):
sendurl = 'http://v.juhe.cn/sms/send' # 短信发送的URL,无需修改
params = 'key=%s&mobile=%s&tpl_id=%s&tpl_value=%s' % \
(appkey, mobile, tpl_id, urllib.quote(tpl_value)) #组合参数
wp = urllib.urlopen(sendurl+"?"+params)
content = wp.read() #获取接口返回内容
result = json.loads(content)
if result:
error_code = result['error_code']
if error_code == 0:
#发送成功
smsid = result['result']['sid']
print "sendsms success, smsid: %s" % (smsid)
else:
#发送失败
print "sendsms error:(%s) %s" % (error_code, result['reason'])
else:
#请求失败
print "request sendsms error"
def main():
if (len(sys.argv) != 2):
print "Usage: %s CODE" % sys.argv[0]
return
code = sys.argv[1]
appkey = '54c0098217b3d609a965f08bde50bc58' # 您申请的短信服务appkey
mobile = '13715171313' # 短信接受者的手机号码
tpl_id = '15668' # 申请的短信模板ID,根据实际情况修改
tpl_value = '%23code%23%3D' + code # 短信模板变量,根据实际情况修改
sendsms(appkey, mobile, tpl_id, tpl_value) # 请求发送短信
if __name__ == '__main__':
main()
|
988,117 | 88ab94145bcbe7fea90a8c463480b677562eedf7 | from selenium import webdriver
driver = webdriver.Firefox()
driver.get("http://www.baidu.com")
#
size = driver.find_element_by_id('kw').size
print(size)
#
text = driver.find_element_by_id("cp").text
print(text)
#
attribute1 = driver.find_element_by_id("kw").get_attribute('type')
print(attribute1)
#
result = driver.find_element_by_id("kw").is_displayed()
print(result)
driver.quit() |
988,118 | ec6bab83b77e05ee48d772cd2afacf5f0191f4aa | import logging
from typing import List
import requests
from bs4 import BeautifulSoup
from requests.exceptions import HTTPError
from src.load_config import LOG_LEVEL, LOG_FORMAT
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT)
logger = logging.getLogger(__name__)
def get_html_data_list(site: str) -> List[str]:
result = []
html = requests.get(site)
try:
html.raise_for_status()
except HTTPError as err:
logger.error(f'cannot get html from {site} because of {html.status_code}')
soup = BeautifulSoup(html.text, 'html.parser')
for link in soup.find_all('a'):
if link.get('href').startswith('ERA5'):
result.append(link.get('href'))
return result
|
988,119 | 015a2b452f0de72819c83489c95a52142650df3f | from django import template
from signup.access import is_coordinator
register = template.Library()
@register.simple_tag(takes_context=True)
def is_coord(context):
return is_coordinator(context['user'])
|
988,120 | 40fb8cc1b4ef2d5b2a48ea999a1776e988c920a9 | import numpy as np
class Bandit:
def __init__(self, k=10, mean=0, variance=1, reward_variance=1):
self.k = k
self.mean = mean
self.variance = variance
self.reward_variance = reward_variance
self.rewards = np.random.normal(self.mean, self.variance, self.k)
def step(self, action):
if not 0 <= action < self.k:
raise IndexError("Select a valid action!")
return np.random.normal(self.rewards[action], self.reward_variance)
def get_action_space(self):
return np.arange(self.k)
def get_rewards(self):
return self.rewards
def __repr__(self):
return f"k-Armed Bandit (Stochastic, Stationary)"
|
988,121 | d410c7634d239d9543a7c39447ae7d82acc200c2 | # When using iterdescendants() on an etree, is it ok to modify the tree?
for element in doc.iterfind('.//%s'%tag):
element.getparent().remove(element)
|
988,122 | aab85827a5d3cc96f899bd0590a84e4ac5463a1a | from typing import Dict
from django.test import TestCase
from .accessor import *
from rest_framework.test import APIClient, APITestCase
from .utils import *
class TestBlog(APITestCase):
def setUp(self):
self.user1 = {
"username": "hoge",
"email": "hoge@hoge.hoge",
"password": "hogehoge",
}
self.user2 = {
"username": "fuga",
"email": "fuga@fuga.fuga",
"password": "fugafuga",
}
self.test_data = [
{
"title": "title 1",
"description": "test description1"
},
{
"title": "test1",
"description": "Test description test"
},
{
"title": "hogehoge",
"description": "hogehogehogehoge"
},
{
"title": "test2",
"description": "Test description"
},
{
"title": "fuga",
"description": "fugafuga"
},
]
self.client = APIClient()
def _register_test_data(self):
register_users(self.client, [self.user1, self.user2, ])
for i, test_datum in enumerate(self.test_data):
user = self.user1 if i % 2 == 0 else self.user2
set_auth_token(self.client, user)
self.client.post("/api/blog/", data=test_datum)
def test_01_register(self):
output_test_function(self.test_01_register)
register_users(self.client, [self.user1, self.user2, ])
res1 = self.client.post("/api/blog/", data=self.test_data[0])
assert res1.status_code >= 300, f"Register blog1 Not login: \n[{res1.status_code}]: {res1.data}"
set_auth_token(self.client, self.user1)
res1 = self.client.post("/api/blog/", data=self.test_data[0])
res2 = self.client.post("/api/blog/", data=self.test_data[1])
assert res1.status_code < 300, f"Register blog1 User1: \n[{res1.status_code}]: {res1.data}"
assert res2.status_code < 300, f"Register blog2 User1: \n[{res2.status_code}]: {res2.data}"
set_auth_token(self.client, self.user2)
res1 = self.client.post("/api/blog/", data=self.test_data[2])
res2 = self.client.post("/api/blog/", data=self.test_data[3])
assert res1.status_code < 300, f"Register blog1 User2: \n[{res1.status_code}]: {res1.data}"
assert res2.status_code < 300, f"Register blog2 User2: \n[{res2.status_code}]: {res2.data}"
def test_02_list(self):
output_test_function(self.test_02_list)
self._register_test_data()
res = self.client.get("/api/blog/")
assert res.status_code < 300 and len(res.data) == len(
self.test_data), f"Blog list: \n[{res.status_code}]: {res.data}"
def test_03_find(self):
output_test_function(self.test_03_find)
self._register_test_data()
res = self.client.get("/api/blog/", data={"id": 1})
assert res.status_code < 300 and len(res.data) == 1, f"Blog list: \n[{res.status_code}]: {res.data}"
res = self.client.get("/api/blog/", data={"id": 3})
assert res.status_code < 300 and len(res.data) == 1, f"Blog list: \n[{res.status_code}]: {res.data}"
def test_04_keyword_search(self):
output_test_function(self.test_04_keyword_search)
self._register_test_data()
res = self.client.get("/api/blog/", data={"title": "test"})
assert res.status_code < 300 and len(res.data) == 2, f"Blog search1: \n[{res.status_code}]: {res.data}"
res = self.client.get("/api/blog/", data={"title": "testtest"})
assert res.status_code < 300 and len(res.data) == 0, f"Blog search2: \n[{res.status_code}]: {res.data}"
def test_05_user_search(self):
output_test_function(self.test_05_user_search)
self._register_test_data()
res = self.client.get("/api/blog/", data={"author": 1})
assert res.status_code < 300 and len(res.data) == 3, f"Blog search1: \n[{res.status_code}]: {res.data}"
res = self.client.get("/api/blog/", data={"author": 2})
assert res.status_code < 300 and len(res.data) == 2, f"Blog search2: \n[{res.status_code}]: {res.data}"
def test_06_update(self):
output_test_function(self.test_06_update)
self._register_test_data()
set_auth_token(self.client, self.user1)
blog_id = 1
update_res = self.client.put(f"/api/blog/{blog_id}/", data={"title": "new title"})
assert update_res.status_code < 300, f"Blog update1: \n[{update_res.status_code}]: {update_res.data}"
res = self.client.get("/api/blog/", data={"id": blog_id})
assert res.status_code < 300 and res.data[0][
"title"] == "new title", f"Blog update1(find): \n[{res.status_code}]: {res.data}"
def test_07_update_not_own_blog(self):
output_test_function(self.test_07_update_not_own_blog)
self._register_test_data()
set_auth_token(self.client, self.user2)
blog_id = 1
update_res = self.client.put(f"/api/blog/{blog_id}/", data={"title": "new title"})
assert update_res.status_code >= 300, f"Blog update2(other user): \n[{update_res.status_code}]: {update_res.data}"
res = self.client.get("/api/blog/", data={"id": blog_id})
assert res.status_code < 300 and res.data[0][
"title"] == self.test_data[0]["title"], f"Blog update2(find): \n[{res.status_code}]: {res.data}"
def test_08_delete(self):
output_test_function(self.test_08_delete)
self._register_test_data()
set_auth_token(self.client, self.user2)
blog_id = 2
res = self.client.delete(f"/api/blog/{blog_id}/")
assert res.status_code < 300, f"Blog delete: \n[{res.status_code}]: {res.data}"
res = self.client.get("/api/blog/", data={"id": blog_id})
assert res.status_code < 300 and len(res.data) == 0, f"Deleted blog search: \n[{res.status_code}]: {res.data}"
def test_09_delete_not_own_blog(self):
output_test_function(self.test_09_delete_not_own_blog)
self._register_test_data()
set_auth_token(self.client, self.user1)
blog_id = 2
res = self.client.delete(f"/api/blog/{blog_id}/")
assert res.status_code >= 300, f"Blog delete(other): \n[{res.status_code}]: {res.data}"
res = self.client.get("/api/blog/", data={"id": blog_id})
assert res.status_code < 300 and len(
res.data) == 1, f"Blog search(not deleted): \n[{res.status_code}]: {res.data}"
|
988,123 | f995a5348089f766f934c1b989a588d15bf56d01 | class Solver:
def __init__(self, list):
self.numberlist = self.load_numbers(list)
self.dragon_size = 50
self.days_gone = 0
self.berserk = 0
self.herd = 0
def solve(self):
for sheepcount in self.numberlist:
self.herd += (int(sheepcount) - self.dragon_size)
if self.herd < 0:
self.herd = 0
self.berserk += 1
self.dragon_size -= 1
else:
self.berserk = 0
self.dragon_size += 1
if self.berserk == 5:
break
else:
self.days_gone += 1
def load_numbers(self, list):
with open(list) as list_file:
numberlist = list_file.readlines()[0].split(', ')
return numberlist
if __name__ == '__main__':
solver = Solver("sau.txt")
solver.solve()
print solver.days_gone
|
988,124 | ac4f6cf98266fd22334d56913178efc8ce0c6f17 | #!/usr/bin/python3
'''
Script to test app log config file
Created on Oct 20, 2022
Arguments
-f (or --file) FILE : Path to the file that will be used to construct the loggers
@author: riaps
'''
import argparse
import logging.config
import time
import riaps.utils.spdlog_setup as spdlog_setup
def test_loggers(loggers, msg):
for logger in loggers:
# level = loggers[logger].getEffectiveLevel()
# level_name = logging.getLevelName(level)
# print(f"logger: {logger} level: {level_name}")
loggers[logger].info(msg)
print("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-F", "--file", default="riaps-log.conf")
parser.add_argument("-s", "--spd", action='store_true')
args = parser.parse_args()
if args.spd:
loggers = spdlog_setup.from_file(args.file)
else:
logging.config.fileConfig(args.file)
loggers = logging.root.manager.loggerDict
root_logger = logging.getLogger() # get the root logger
for logger in loggers:
loggers[logger] = logging.getLogger(logger)
loggers["root"] = root_logger
for i in range(10):
test_loggers(loggers, f"message: {i}")
time.sleep(1)
|
988,125 | 39c20aaa1ae55e9db9abf3ed75c7aa4fb0019196 |
from rest_framework import serializers
from albums.models import Album
from tracks.serializers import TrackListSerializer
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
class AlbumTracksSerializer(serializers.ModelSerializer):
tracks = TrackListSerializer(many=True)
class Meta:
model = Album
fields = ('title', 'tracks',)
class BandAlbumSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Album
fields = ('url', 'title')
|
988,126 | 4b0ed5979087fe50255675ad346fb31f3682849c | import turtle
import random
t = turtle.Turtle()
colors = ["red", "green", "blue", "orange", "purple", "pink", "yellow"]
randnum = random.randint(1,10)
randX = random.randint(-00, 200)
randY = random.randint(-200, 200)
randoffset = random.randint(0, 100)
randwidth = random.randint(1, 150)
randheight = random.randint(1, 150)
randrotation = random.randint(0, 30)
randradius = random.randint(1, 60)
randcount = random.randint(1, 75)
def setup():
turtle.setup(width=1000, height=800, startx=0, starty=0)
t.showturtle()
t.speed(60)
return
def setRandomColor():
color = random.choice(colors)
t.color(color)
return
def drawRectangle(newX, newY, width, height, offset, count, rotation):
t.penup()
t.goto(newX, newY)
t.forward(offset)
t.right(360/count)
t.right(rotation)
t.pendown()
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
t.penup()
return
def drawRectanglePattern(centerX, centerY, offset, width, height, count, rotation):
for i in range(count):
setRandomColor()
drawRectangle(centerX, centerY, width, height, offset, count, rotation)
return
def drawCircle(newX, newY, offset, radius, count):
t.penup()
t.goto(newX, newY)
t.forward(offset)
t.right(360 / count)
t.pendown()
t.circle(radius)
t.penup()
return
def drawCirclePattern(centerX, centerY, offset, radius, count):
for i in range(count):
setRandomColor()
drawCircle(centerX, centerY, offset, radius, count)
return
def drawSuperPattern(num):
for i in range(num):
randnum = random.randint(1, 10)
randX = random.randint(-200, 200)
randY = random.randint(-200, 200)
randoffset = random.randint(0, 100)
randwidth = random.randint(1, 150)
randheight = random.randint(1, 150)
randrotation = random.randint(0, 30)
randradius = random.randint(1, 60)
randcount = random.randint(1, 75)
if (randnum >= 1 and randnum < 6):
drawRectanglePattern(randX, randY, randoffset, randwidth, randheight, randcount, randrotation)
elif (randnum > 5):
drawCirclePattern(randX, randY, randoffset, randradius, randcount)
return
def reset():
turtle.clearscreen()
return
def done():
turtle.done()
return |
988,127 | bf6ba0543a7cd111b57c07c6b0dcf5cd2d9d6979 | import numpy as np
import viz
def diffusion_step(Xmid, t, get_mu_sigma, denoise_sigma, mask, XT, rng,
trajectory_length, logr_grad):
"""
Run a single reverse diffusion step
----------
Parameters
----------
Xmid : array
Current value of X
t : int
Current timestep in diffusion
logr_grad : function handle
function handle to d/dx log r(x), where we
mix in r(x^(t)) = r(x=x^(t)) ** (T-t)/T into the diffusion
where x is the image
trajectory_length : int
Length of the trajectory
"""
mu, sigma = get_mu_sigma(Xmid, np.array([[t]]))
if (denoise_sigma is not None) and (logr_grad is not None):
print 'unverified behavior with denoise_sigma and logr_grad both on'
if logr_grad is not None:
mu += (sigma * logr_grad(Xmid) * (trajectory_length - t)
/ (1. * trajectory_length))
# note mu, sigma have dimension
# (n_samples, n_colors, spatial_width, spatial_width)
if denoise_sigma is not None:
sigma_new = (sigma**-2 + denoise_sigma**-2)**-0.5
mu_new = mu * sigma_new**2 * sigma**-2 + XT * sigma_new**2 * denoise_sigma**-2
sigma = sigma_new
mu = mu_new
if mask is not None:
mu.flat[mask] = XT.flat[mask]
sigma.flat[mask] = 0.
Xmid = mu + sigma*rng.normal(size=Xmid.shape)
return Xmid
def generate_inpaint_mask(n_samples, n_colors, spatial_width):
"""
The mask will be True where we keep the true image, and False where we're
inpainting.
"""
mask = np.zeros((n_samples, n_colors, spatial_width, spatial_width), dtype=bool)
# simple mask -- just mask out half the image
mask[:,:,:,spatial_width/2:] = True
return mask.ravel()
def generate_samples(model, get_mu_sigma, n_samples=36,
inpaint=False, denoise_sigma=None, logr_grad=None,
X_true=None,
base_fname_part1="samples", base_fname_part2='',
num_intermediate_plots=4, seed=12345):
"""
Run the reverse diffusion process (generative model).
"""
# use the same noise in the samples every time, so they're easier to
# compare across learning
rng = np.random.RandomState(seed)
spatial_width = model.spatial_width
n_colors = model.n_colors
# set the initial state X^T of the reverse trajectory
XT = rng.normal(size=(n_samples,n_colors,spatial_width,spatial_width))
if denoise_sigma is not None:
XT = X_true + XT*denoise_sigma
base_fname_part1 += '_denoise%g'%denoise_sigma
if inpaint:
mask = generate_inpaint_mask(n_samples, n_colors, spatial_width)
XT.flat[mask] = X_true.flat[mask]
base_fname_part1 += '_inpaint'
if logr_grad is not None:
base_fname_part1 += '_logrperturb'
else:
mask = None
if X_true is not None:
viz.plot_images(X_true, base_fname_part1 + '_true' + base_fname_part2)
viz.plot_images(XT, base_fname_part1 + '_t%04d'%model.trajectory_length + base_fname_part2)
Xmid = XT.copy()
for t in xrange(model.trajectory_length-1, 0, -1):
Xmid = diffusion_step(Xmid, t, get_mu_sigma, denoise_sigma, mask, XT, rng,
model.trajectory_length, logr_grad)
if np.mod(model.trajectory_length-t,
int(np.ceil(model.trajectory_length/(num_intermediate_plots+2.)))) == 0:
viz.plot_images(Xmid, base_fname_part1 + '_t%04d'%t + base_fname_part2)
X0 = Xmid
viz.plot_images(X0, base_fname_part1 + '_t%04d'%0 + base_fname_part2)
|
988,128 | bc3f0f0f9c2a1df89b1f3a8b8d425d05fe659a9d | #!/usr/bin/env python
from __future__ import print_function
import pychromecast
import argparse
import subprocess
import os
import socket
import sys
__progname__ = "castctrl"
def turn_on_tv(cast):
cast.start_app("CC1AD845") # com.google.cast.media, the default mp4 player
cast.quit_app()
def play_video(url, filename, cast, port=8000):
if filename:
# Finding local IP for url
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((cast.host,8008))
local_ip = s.getsockname()[0]
s.close()
process = subprocess.Popen(['nohup', 'python', '-m', 'SimpleHTTPServer', str(port)],
stdout=open('/dev/null', 'w'),
stderr=open('/tmp/cast.log', 'a'),
preexec_fn=os.setpgrp )
print("Web server started for current directory ({}) at http://{}:{}. You can stop this web server (after the video finishes) with 'kill {}'.".format(os.getcwd(), local_ip, port, process.pid), file=sys.stderr)
url = "http://{}:8000/{}".format(local_ip, url)
cast.play_media((url), "video/mp4")
def pause_video(cast):
cast.media_controller.pause()
def stop_video(cast):
cast.quit_app()
def main():
casts = pychromecast.get_chromecasts_as_dict()
parser = argparse.ArgumentParser(prog=__progname__)
parser.add_argument("--file", help="Filename of media to play")
parser.add_argument("--url", help="URL of media to play. You should probably specify this if nothing else.")
# parser.add_argument("-p", "--pause", help="Pause playback", action='store_true')
parser.add_argument("--power", help="Turn on TV and switch to Chromecast", action="store_true")
parser.add_argument("-s", "--stop", help="Stop playback", action='store_true')
parser.add_argument("-d", "--device", help="Select device. List devices with -D")
parser.add_argument("-D", "--devices", help="List devices", action='store_true')
parser.add_argument("--port", help="Specify port for web server (if you pick a local file above)", type=int)
args = parser.parse_args()
if args.devices:
print(", ".join(casts.keys()), file=sys.stderr)
return
if args.device:
cast = casts[args.device]
else:
cast = casts[next(iter(casts))]
if args.power:
power_on_tv(cast)
return
if args.url or args.file:
play_video(args.url, args.file, cast)
return
# elif args.pause:
# pause_video(cast)
# return
elif args.stop:
stop_video(cast)
return
if __name__ == "__main__":
main()
|
988,129 | 3099e106185ab5b5062ff35e4dc20515ee27b1f3 | from watson.modules.chatmodule import ChatModule, command_function
class BangBangModule(ChatModule):
'''
This is a module to contain the "!!" command, which repeats the last command a user gave
'''
__module_name__ = "bangbang"
__module_description__ = "Allows users to repeat their last command"
@command_function("!!", storable = False)
def bangbang(self, user):
'''
Repeats the last command the user gave, if there was one
'''
command = self.bot.state.get_last_command(user)
if command:
self.bot.do_command(user, command)
self.bot.state.store_command(user, command)
else:
self.speak(user, "You have no previous commands for me to repeat")
|
988,130 | 63b12ba67c872ffbebfa28dc3f50958da760ceb9 | from rest_framework import serializers
from .models import Expense
class ExpenseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Expense
fields = ('id', 'type', 'amount', 'date_created', 'description') |
988,131 | 67667789106a10dd847176dab8d3ec029813a6d2 |
class Node(object):
def __init__(self, value):
self.value = value
self.edges = []
class Edge(object):
def __init__(self, value, node_from, node_to):
self.value = value
self.node_from = node_from
self.node_to = node_to
class Graph(object):
def __init__(self, edges = [], nodes = []):
self.edges = edges
self.nodes = nodes
def insert_node(self, new_node_value):
new_node = Node(new_node_value)
self.nodes.append(new_node)
def insert_edge(self, new_edge_value, node_from_val, node_to_val):
from_found = None
to_found = None
for node in self.nodes:
if node_from_val == node.value:
from_found = node
if node_to_val == node.value:
to_found = node
if from_found == None:
from_found = Node(node_from_val)
self.nodes.append(from_found)
if to_found == None:
to_found = Node(node_to_val)
self.nodes.append(to_found)
new_edge = Edge(new_edge_value, from_found, to_found)
from_found.edges.append(new_edge)
to_found.edges.append(new_edge)
self.edges.append(new_edge)
def EdgeList(self):
a = []
for edge in self.edges:
a.append((edge.value, edge.node_from.value, edge.node_to.value))
return a
def AdjencyList(self):
adjlist = [None]*len(self.nodes)
for edge in self.edges:
if adjlist[edge.node_from.value]!=None:
adjlist[edge.node_from.value].append((edge.node_to.value, edge.value))
else:
adjlist[edge.node_from.value] = [edge.node_to.value, edge.value]
return adjlist
def AdjMatrix(self):
adjmatrix = [[0 for i in range(len(self.nodes)+1)] for j in range(len(self.nodes)+1)]
for edge in self.edges:
adjmatrix[edge.node_from.value][edge.node_to.value] = edge.value
return adjmatrix
def main():
graph = Graph()
graph.insert_edge(100, 1, 2)
graph.insert_edge(101, 1, 3)
graph.insert_edge(102, 1, 4)
graph.insert_edge(103, 3, 4)
print(graph.EdgeList())
print(graph.AdjencyList())
print(graph.AdjMatrix())
if __name__ == '__main__':
main()
|
988,132 | 11cbd36244ddfe9f9c81f464a95b508709137fb1 | import sys
sys.stdin = open("input.txt")
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
arr.sort()
max_num = 0
for i in range(N):
if max_num < arr[i][1]:
max_num = arr[i][1]
max_idx = i
mid = max_idx # 3
cnt = arr[max_idx][1]
for i in range(mid):
if arr[i][1] <= arr[i+1][1]:
cnt += (arr[i+1][0]-arr[i][0]) * arr[i][1]
else:
arr[i+1][1] = arr[i][1]
cnt += (arr[i+1][0]-arr[i][0]) * arr[i][1]
for i in range(N-1, mid, -1):
if arr[i][1] <= arr[i-1][1]:
cnt += (arr[i][0]-arr[i-1][0]) * arr[i][1]
else:
arr[i-1][1] = arr[i][1]
cnt += (arr[i][0]-arr[i-1][0]) * arr[i][1]
print(cnt)
|
988,133 | f7075100f3ce0cdc7b5acf0f841136294580ee57 | # Allen Institute Software License - This software license is the 2-clause BSD
# license plus a third clause that prohibits redistribution for commercial
# purposes without further permission.
#
# Copyright 2017-2018. Allen Institute. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Redistributions for commercial purposes are not permitted without the
# Allen Institute's written permission.
# For purposes of this license, commercial purposes is the incorporation of the
# Allen Institute's software into anything for which you will charge fees or
# other compensation. Contact terms@alleninstitute.org for commercial licensing
# opportunities.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from workflow_engine.simple_router import SimpleRouter
import os
import yaml
import logging
_log = logging.getLogger('workflow_engine.client_settings')
class settings_attr_dict(dict):
__getattr__ = dict.get
_DEFAULT_SETTINGS_DICT = {
'broker_url': 'pyamqp://blue_sky_user:blue_sky_user@message_queue:5672/',
'result_backend': 'rpc://',
'result_persistent': True,
'task_serializer': 'json',
'result_serializer': 'json',
'result_expires': 3600, # 1 hour in seconds
'broker_connection_timeout': 10,
'broker_connection_retry': True,
'soft_time_limit': 600,
'time_limit': 2400,
'accept_content': ['json'],
'worker_prefetch_multiplier': 1,
'timezone': 'US/Pacific',
'enable_utc': True,
'worker_hijack_root_logger': False,
'broker_transport_options': {
'max_retries': 3,
'interval_start': 0,
'interval_step': 10,
'interval_max': 30
}
}
def load_settings_yaml():
settings_dict = _DEFAULT_SETTINGS_DICT
try:
blue_sky_settings = os.environ.get(
'BLUE_SKY_SETTINGS',
'blue_sky_settings.yml'
)
with open(blue_sky_settings) as f:
settings_dict = yaml.load(f, Loader=yaml.SafeLoader)
except Exception as e:
raise Exception('need to set BLUE_SKY_SETTINGS' + str(e))
return settings_attr_dict(settings_dict)
def configure_worker_app(
app,
app_name,
worker_name=None,
worker_names=None
):
if worker_names is None:
if worker_name is None:
worker_names = []
else:
worker_names = [ worker_name ]
router = SimpleRouter(app_name)
app.conf.imports = (
'workflow_engine.celery.setup_logging_handler',
)
app.config_from_object(load_settings_yaml())
app.conf.task_queue_max_priority = 10
app.conf.broker_heartbeat = 0 # Fix https://github.com/celery/celery/issues/4867
app.conf.task_queues = router.task_queues(worker_names)
app.conf.task_routes = (
router.route_task,
{
'workflow_engine.process.workers.workflow_tasks.run_workflow_node_jobs_by_id':
{
'routing_key': 'at_em.#'
}
}
)
|
988,134 | 1323908d0af6c74b8fb4f4804a896e4bc401934c | from pathlib import Path
import numpy as np
import cv2
import os
import errno
import json
def load_subpix_png(path, scale_factor=256.0):
"""load one channel images holding decimal information and stored as 16-bit
pngs and normalize them by scale_factor.
Args:
path ([pathlib.Path, str]): path of the file to load
scale_factor (float, optional): the factor used to divide the 16-bit uint
integers of the input file. The scaling factor is only used when the
pngs are 16bit. Defaults value is 256.0.
Raises:
FileNotFoundError: when path points to a file that does not exists.
Returns:
nd.array: the loaded image in np.float32 format, normalized if applicable.
"""
if not Path(path).is_file():
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), str(path))
disparity = cv2.imread(str(path), -1)
disparity_type = disparity.dtype
disparity = disparity.astype(np.float32)
if disparity_type == np.uint16:
disparity = disparity / scale_factor
return disparity
def save_subpix_png(path, img, scale_factor=256.0):
"""Save a float one channel image as .png, keeping decimal information.
To keep decimal information while allowing easy preview of image data,
instead of saving information as .tif image, unsuitable for preview or 8 bit
pngs, discretizing the values the of the image, this function
multiplies the image by scale_factor and stores it is as a 16bit png. The
resulting image can be loaded and subpixel information can be recovered by
deviding the uint16 values by the same scale_factor. This process is lossy,
but allows dephtmaps and disparities to be stored as png images for easy
preview.
Args:
path ([str, pathlib.Path]): path to store the image.
img (np.array): the image to store.
scale_factor (float, optional): the factor to divide the uint16 values.
Defaults to 256.0.
"""
Path(path).parent.mkdir(parents=True, exist_ok=True)
img = img.astype(np.float32) * scale_factor
if np.amax(img) > (2**16)-1:
warnings.warn("image out of range(" + str(np.amax(img)/scale_factor) +
"), try with a smaller scale factor. loading this file " +
"will results in invalid values, file: "+str(path))
img[img > (2**16)-1] = 0
img = img.astype(np.uint16)
cv2.imwrite(str(path), img)
def parse_occlusion_image(path, noc=False):
"""parse occlusion image and return valid mask containing either all
pixels except pixel of which ground truth is not known or only non occluded
pixels.
Args:
path ([pathlib.Path, str]): path to occlusion image
noc (bool, optional): include only pixels visible from both views.
Defaults to False.
Raises:
FileNotFoundError: occlusion image not found
Returns:
nd.array: mask indicating valid pixels
"""
occlusion_color = cv2.imread(str(path))
if occlusion_color is None:
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), path)
h, w, c = occlusion_color.shape
# regions without ground truth information are blue
mask = np.full((h, w), fill_value=True)
mask[np.where(np.all(occlusion_color == (255, 0, 0), axis=-1))] = False
if noc:
# regions outside the other image's borders are in yellow
mask[np.where(np.all(occlusion_color == (0, 255, 255), axis=-1))] = False
# regions schene occluded in the right image are in red
mask[np.where(np.all(occlusion_color == (0, 0, 255), axis=-1))] = False
# regions schene occluded in the left image are in green
mask[np.where(np.all(occlusion_color == (0, 255, 0), axis=-1))] = False
return mask
def agg_paths(dataset_root_dir):
"""aggregates filepaths from the datasets and returns them in a dictionary
format.
Args:
dataset_root_dir ([pathlib.Path, str]): path to datasets's root directory
"""
root_dir_p = Path(dataset_root_dir)
experiment_dirs = sorted([e for e in root_dir_p.iterdir()])
left_paths=[]
right_paths=[]
occl_paths=[]
disparity_paths=[]
depth_paths=[]
calib_paths=[]
sample_name=[]
for experiment in experiment_dirs:
modality_gt_dirs = sorted([gt_dir for gt_dir in experiment.iterdir() if 'Ground_truth' in gt_dir.name])
for modality_gt_dir in modality_gt_dirs:
occl_paths.extend(sorted([p.resolve()
for p in (modality_gt_dir/'OcclusionL').iterdir()]))
disparity_paths.extend(sorted([p.resolve()
for p in (modality_gt_dir/'Disparity').iterdir()]))
depth_paths.extend(sorted([p.resolve()
for p in (modality_gt_dir/'DepthL').iterdir()]))
left_paths.extend(sorted([(experiment/'Left_rectified'/p.name).resolve()
for p in (modality_gt_dir/'OcclusionL').iterdir()]))
right_paths.extend(sorted([(experiment/'Right_rectified'/p.name).resolve()
for p in (modality_gt_dir/'OcclusionL').iterdir()]))
calib_paths.extend(sorted([(experiment/'Rectified_calibration'/(p.stem+'.json')).resolve()
for p in (modality_gt_dir/'OcclusionL').iterdir()]))
sample_name.extend(sorted([(p.parents[2]).name+' - ' + (p.parents[1]).name.split('_')[-1]+ ' - '+p.stem for p in (modality_gt_dir/'Disparity').iterdir()]))
return {'left': left_paths,
'right': right_paths,
'occ': occl_paths,
'disparity': disparity_paths,
'depth': depth_paths,
'calib': calib_paths,
'name': sample_name}
|
988,135 | cb207d257daeee828eb0d35618d7b58461f42658 | # -*- coding: utf-8 -*-
# @Date : 2018-06-12 17:06:23
# @Author : GEFE (gh_efe@163.com)
# @Version : 1.0.0
# @Describe : 根据特征使用贝叶斯训练模型
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB
from sklearn.linear_model import LinearRegression,LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
id_context_w2v_path = r'../../data/feature/id_context_w2v.csv'
id_penalty_path = r'../../data/corpus/output/id_penalty.csv'
X = pd.read_csv(id_context_w2v_path)[:100]
# print(X)
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
# pca = PCA(n_components=100)
# X = pca.fit_transform(X)
# print(X)
Y = pd.read_csv(id_penalty_path)['penalty'][:100]
# print(Y)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=.2, random_state=0)
m = GaussianNB()
m.fit(x_train, y_train)
y_pre = m.predict(x_test)
erro_count = 0
for y1,y2 in zip(y_pre,y_test):
if y1 == y2:
continue
erro_count += 1
print("错误率%.3f" % (erro_count/len(y_test)))
|
988,136 | 5b766e70a777b1a58c16ab0e910addd26132e23c | # Generated by Django 3.1.6 on 2021-04-02 15:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('loginapp', '0005_auto_20210402_2053'),
]
operations = [
migrations.AlterModelTable(
name='member',
table='loginapp_member',
),
]
|
988,137 | 10e3ebd64b74240cd6f2e83dbf252a9849397ab9 | # Name: Ben Koczwara
# Date: Sept.16,2013
# Purpose: to create a program that finds the prime numbers from 2 to 5000
print "Prime Numbers"
print "-------------"
print
print "This program will find all the prime numbers between 2 and 5000"
print
values = []
for x in range(2,5001):
values.append(x)
for y in values:
print y,
for z in range(2,5001):
num = y*z
if num in values:
values.remove(num)
|
988,138 | 3a07d47d8e2794e71f0768a87086821a452391f1 | import datetime
import random
import json
secret_no = random.randint(1, 30)
wrong_guess = []
def play_easy(name):
while True:
attempts = 0
score_list = scores()
guess = int(input("Guess the secret number (between 1 and 30): "))
attempts += 1
if guess == secret_no:
score_list.append({"attempts": attempts, "date": str(datetime.datetime.now()), "name": name, "Secret_no.": secret_no,
"wrong_guess": wrong_guess})
print("You've guessed it - congratulations! It's number " + str(secret_no))
print("Attempts needed: " + str(attempts))
with open("score_list.json", "w") as score_file:
score_file.write(json.dumps(score_list))
again = input("Would you like to try again y/n ").lower()
if again == "n":
break
elif guess > secret_no:
print("Your guess is not correct... try something smaller")
elif guess < secret_no:
print("Your guess is not correct... try something bigger")
wrong_guess.append(guess)
def play_hard(name):
while True:
attempts = 0
score_list = scores()
guess = int(input("Guess the secret number (between 1 and 30): "))
attempts += 1
if guess == secret_no:
score_list.append({"attempts": attempts, "date": str(datetime.datetime.now()), "name": name, "Secret_no.": secret_no,"wrong_guess": wrong_guess})
with open("score_list.json", "w") as score_file:
score_file.write(json.dumps(score_list))
print(f"you guessed correct, the no. is {secret_no}")
print(f"You guessed {attempts} times")
again = input("Would you like to try again y/n ").lower()
if again == "n":
break
elif guess != secret_no:
print("Wrong guess, try again")
wrong_guess.append(guess)
def scores():
with open("score_list.json", "r") as score_file:
score_list = json.loads(score_file.read())
return score_list
def top_score():
score_list = scores()
new_score_list = sorted(score_list, key=lambda k: k["attempts"])[:3]
return new_score_list
name = input("What is your name ? ")
print(f"Hej {name}")
selection = input("Would you like to A) play the game easy mode, B) play the game hard mode, C) see the scores or type anything else to quit?")
while True:
if selection.upper() == "A":
play_easy(name)
elif selection.upper() == "B":
play_hard(name)
elif selection.upper() == "C":
for x in top_score():
print(str(x["attempts"]) + " attempts and date " + str(x["date"]))
else:
break
|
988,139 | 1f2e7c08d4915dcaa91e346ea6e26a7676edc375 | import streamlit as st
import joblib
import numpy as np
st.write("""
# GRE addmission prediction app
This app predict **GRE addmission chance**
""")
st.sidebar.header('Specify the input parameters')
def user_input_features():
gre=st.sidebar.slider("enter the gre score in range 0 to 1" , min_value=0.0,max_value=1.0,value=0.5)
tofel=st.sidebar.slider("enter the tofel score in range 0 to 1", min_value=0.0,max_value=1.0,value=0.5)
University_R=st.sidebar.slider("enter the university rating in range 0 to 1", min_value=0.0, max_value=1.0,value=0.5)
SOP=st.sidebar.slider(
"enter the SOP in range 0 to 1", min_value=0.0, max_value=1.0,value=0.5)
CGPA=st.sidebar.slider(
"enter the CGPA in range 0 to 1", min_value=0.0, max_value=1.0,value=0.5)
feature =np.array([[gre,tofel,University_R,SOP,CGPA]])
return feature
feature=user_input_features()
st.header("specified input parameters")
st.write(feature)
st.write('----')
predictor = joblib.load('regressor.joblib')
prediction=predictor.predict(feature)
st.header('chance of addmission is ')
st.write(prediction)
|
988,140 | 54eb12356b5816b01d31e2147d0b427ae3642571 | import os, urllib2, sys
import json
from pprint import pprint
pwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(pwd)
def convertJsonToHtml(jsonFile):
tkdArray = jsonFile
print("print json format: ")
for tkdItem in tkdArray:
title = tkdItem["title"]
# for keywords in tkdItem["keywords"]:
# print(keywords)
keywordsA = tkdItem["keywords"][0]
keywordsB = tkdItem["keywords"][1]
keywordsC = tkdItem["keywords"][2]
desc = tkdItem["desc"]
f = open(title, 'w')
msg = """
<html>
<head>
<meta name="keywords" content="%s, %s, %s">
<meta name="description" content="%s">
</head>
<body>
<pre>%s<pre>
</body>
</html>
"""%(keywordsA, keywordsB, keywordsC, desc, desc)
f.write(msg)
f.close()
def readJson(jsonFile):
getFile = jsonFile
with open(getFile) as data_file:
data = json.load(data_file)
convertJsonToHtml(data)
if __name__ == '__main__':
readJson(sys.argv[1])
|
988,141 | f8b5cd3ae444a5ae9eb690be64a1f7ee6fab1de9 | # Description
# 中文
# English
# Given two binary strings, return their sum (also a binary string).
# Have you met this question in a real interview?
# Example
# Example 1:
# Input:
# a = "0", b = "0"
# Output:
# "0"
# Example 2:
# Input:
# a = "11", b = "1"
# Output:
# "100"
class Solution:
"""
@param a: a number
@param b: a number
@return: the result
"""
def addBinary(self, a, b):
# write your code here
indexa = len(a) - 1
indexb = len(b) - 1
sum = ''
carry = 0
while indexa >= 0 or indexb >= 0:
if indexa >= 0:
bitA = int(a[indexa])
else:
bitA = 0
if indexb >= 0:
bitB = int(b[indexb])
else:
bitB = 0
if (bitB + bitA + carry) % 2 == 0:
sum = '0' + sum
else:
sum = '1' + sum
carry = (bitA + bitB + carry) // 2
indexa, indexb = indexa - 1, indexb - 1
# important!!! last carry must be only 1 or 0
if carry == 1:
sum = '1' + sum
return sum |
988,142 | c45d6a0b91c87b36981d1e4622c385900d843ce9 | def cal_aver(lists,length):
'''
This functions implements calculating average of the values in list
:param lists: the list of values
:param length: the length of the list
:return: the average value stored in the final variable
:raise: no exceptions
:precondition: none
:complexity:best case 0(n), worst case 0(n), where n is the len(lists)
'''
total = 0
value = 0
final = 0
j = 0
while j< (length):
value = lists[j]
total = total + value
j = j+1
final = total //length
return final
def main():
'''
This function implements the creating and inserting values into the list
and calling other functions
:param : none
:return: does not return anything
:raise: no exceptions
:precondition: none
:complexity: 0(n) for the while loop and summation of the respective time
complextys of the other functions which are called
'''
size = int(input('Enter the size of the list'))
the_list = [0]*size
i = 0
while i < (size):
the_list[i] = int(input('Value: '))
i = i+1
average = cal_aver(the_list,size)
print(average)
main()
|
988,143 | cffe85a2a6aff88bb21cdc1366b61d8d26d158ec | import requests
def find_definition(word):
url = f"https://api.dictionaryapi.dev/api/v2/entries/en_GB/{word}"
response = requests.get(url)
if response.status_code == 404: # if GB dictionary does not contain word try US dictionary
url = f"https://api.dictionaryapi.dev/api/v2/entries/en_US/{word}"
response = requests.get(url)
if response.status_code == 200: # success
output = response.json()
meanings = output[0]["meanings"]
first_meaning = meanings[0]
definitions = first_meaning["definitions"]
definition = definitions[0]["definition"]
return definition
else:
# Definition could not be found for the given word
return f"Definition not found, answer: {word}"
|
988,144 | f76dce46a68dd8dd59a26b8767bdde06b83d8b7f | class LexisMsg(object):
MSG_INITIAL_FAILED="Failed to initial"
MSG_INITIAL_FINISHED="Initial stage finished"
MSG_PROCESS_FINISHED="English process stage finished"
MSG_PROCESS_FAILED="Failed to get English hyperlink process done"
MSG_TRANSFER_FINISHED="Transfer process stage finished"
MSG_TRANSFER_FAILED="Failed to transfer data to production"
MSG_BACKUP_FINISHED="Backup data finished"
MSG_BACKUP_FAILED="Failed to backup data"
MSG_HYPERLINK_FINISHED="English process stage finished"
|
988,145 | c7fc9c827817cfa51164b491508253aec3f869e7 | # The Automobile class holds general data
# about an automobile in inventory.
class Automobile:
# The --init--method accepts arguments for the
# make, model, mileage, and price. It initializes
# the data attributes with these values.
def __init__(self, make, model, mileage, price):
self.__make = make
self.__model = model
self.__mileage = mileage
self.__price = price
# The following methods are mutators for the
# class's data attributes.
def set_make(self, make):
self.__make = make
def set_model(self, model):
self.__model = model
def set_mileage(self, mileage):
self.__mileage = mileage
def set_price(self, price):
self.__price = price
# The following methods are the accessors
# for the class's data attributes.
def get_make(self):
return self.__make
def get_model(self):
return self.__model
def get_mileage(self):
return self.__mileage
def get_price(self):
return self.__price
# The Car class represents a car. It is a subclass
# of the Automobile class.
class Car(Automobile):
# The --init-- method accepts arguments for the
# car's make, model, mileage, price, and doors.
def __init__(self, make, model, mileage, price, doors):
# Call the superclass's --init-- method and pass
# the required arguments. Note that we also have
# to pass self as an argument.
Automobile.__init__(self, make, model, mileage, price)
# Initialize the --doors attribute.
self.__doors = doors
# The set-doors method is the mutator for the
# --doors attribute.
def set_doors(self, doors):
self.__doors = doors
# The get-doors method is the accessor for the
# --doors attribute.
def get_doors(self):
return self.__doors
|
988,146 | 6330ed9464426c9832376cd051194899f75f5db2 | import cherrypy
import pickle
import database as db
from pour_serial_class_2 import pour_serial
from mako.template import Template
from mako.lookup import TemplateLookup
import os, os.path
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
lookup = TemplateLookup(directories=['html'])
datafilename = 'data.pkl'
#ser = {}
pour_serial_obj = pour_serial()
try:
database = pickle.load(open(datafilename, 'r'))
except IOError:
database = db.Database()
def save_data():
global database
pickle.dump(database, open(datafilename, 'w'))
dbase = pickle.load(open(datafilename, 'r'))
class Server(object):
@cherrypy.expose()
def index(self):
tmpl = lookup.get_template('header.html')
return tmpl.render()
class Pour(object):
exposed = True
def table(self):
pass
def get_subpour_names(self):
return dict((num, database.subpours[num].name)
for num in database.subpours.keys())
def GET(self, n=None):
tmpl = lookup.get_template('pours.html')
args = dict(subpour_names=self.get_subpour_names(), n=n, pours=database.pours)
return tmpl.render(**args)
def POST(self, **args):
tmpl = lookup.get_template('pours.html')
n = str(database.next_pour())
subpours = args['subpours'].split(", ")
if not args['name'] or len(subpours) == 0:
return self.GET()
database.pours[n] = db.PourData(name=args['name'], subpours=subpours)
save_data()
return tmpl.render(subpour_names=self.get_subpour_names(), n=n, pours=database.pours)
def PUT(self, n, **args):
subpours = args['subpours'].split(", ")
if not args['name'] or len(subpours) == 0:
return self.GET()
database.pours[n].update(subpours=subpours, name=args['name'])
save_data()
def DELETE(self, n):
del database.pours[n]
class Subpour(object):
exposed = True
@cherrypy.expose()
def table(self):
tmpl = lookup.get_template('subpours_table.html')
return tmpl.render(subpours=database.subpours)
def GET(self, n=None):
global database
tmpl = lookup.get_template('subpours.html')
args = dict()
args['n'] = n
args['subpours'] = database.subpours
if n==None:
args['form_method'] = 'POST'
else:
args['form_method'] = 'PUT'
return tmpl.render(**args)
def POST(self, **args):
global database
args['water'] = 'water' in args
args['post_center'] = 'post_center' in args
n = str(database.next_subpour())
database.subpours[n] = db.SubpourData(**args)
save_data()
f = open(datafilename)
db_ = pickle.load(open(datafilename, 'r'))
raise cherrypy.HTTPRedirect('/subpours/' + str(n))
def PUT(self, n, **args):
global database
args['water'] = 'water' in args
args['post_center'] = 'post_center' in args
database.subpours[n].update(**args)
save_data()
raise cherrypy.HTTPRedirect('/subpours/' + str(n))
def DELETE(self, n):
global database
del database.subpours[n]
raise cherrypy.HTTPRedirect('/subpours/')
class status:
exposed = True
def GET(self):
if pour_serial_obj.ser is None:
return "no arduino connected"
elif pour_serial_obj.temperature is None:
return "no response from arduino"
else:
resp = "water temp %.02f°F" % pour_serial_obj.temperature
if pour_serial_obj.pour_time is not None:
resp += ", pouring for %.02f seconds" % pour_serial_obj.pour_time
return resp
class RunPour:
exposed = True
def GET(self, n):
send_pour([database.subpours[s] for s in database.pours[n].subpours])
cherrypy.config.update({'server.socket_host': '127.0.0.1',
'server.socket_port': 9999,
})
conf = {'/css': {'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.join(current_dir, 'css')},
'/jquery-ui': {'tools.staticdir.on': True, 'tools.staticdir.dir': os.path.join(current_dir, 'jquery-ui')}}
cherrypy.tree.mount(Pour(), '/pours',
{'/' : {'request.dispatch' : cherrypy.dispatch.MethodDispatcher()}})
cherrypy.tree.mount(Pour().table, '/pours/table')
cherrypy.tree.mount(Subpour(), '/subpours',
{'/' : {'request.dispatch' : cherrypy.dispatch.MethodDispatcher()}})
cherrypy.tree.mount(Subpour().table, '/subpours/table')
cherrypy.tree.mount(status(), '/status',
{'/' : {'request.dispatch' : cherrypy.dispatch.MethodDispatcher()}})
cherrypy.tree.mount(RunPour(), '/run',
{'/' : {'request.dispatch' : cherrypy.dispatch.MethodDispatcher()}})
server = Server()
cherrypy.quickstart(server, config=conf)
|
988,147 | 3aa79edae6d86ade623d429a8bbb7c44942391cd | import sys
sys.stdin = open("input.txt", "rt")
n = int(input())
p = []
for i in range(n):
p.append(list(map(int, input().split())))
for i in range(1, len(p)):#마지막까지
p[i][0] = p[i][0]+min(p[i-1][1],p[i-1][2])#현재R+이전G,B중 최소값
p[i][1] = p[i][1]+min(p[i-1][0],p[i-1][2])#현재G+이전R,B중 최소값
p[i][2] = p[i][2]+min(p[i-1][0],p[i-1][1])#현재B+이전R,G중 최소값
print(min(p[n-1][0],p[n-1][1],p[n-1][2]))
|
988,148 | 8c7a778e04b39080aa0f0ae810303360ec46fd9c | # -*- coding: utf-8 -*-
from tornado.options import define
define('debug', default=False, help='enable debug mode')
define('port', default=5000, help='run on this port', type=int)
|
988,149 | bc94d7f26bc00853cec7433a675cd147be2f4e19 | from __future__ import division
import os
import sys
import gdal, gdalconst
from gdalconst import *
from shapely.geometry import LineString, Polygon
#crs conversion
#from pyproj import Proj, transform
class envi_file(object):
def __init__(self, file_name):
'''opens a envi file so that operations can be performed with it'''
'''code from: https://chris35wills.github.io/python-gdal-raster-io/'''
#driver.Register()
self.file_name=file_name
self.inDs = gdal.Open(file_name, GA_ReadOnly)
if self.inDs is None:
print(
"Couldn't open this file: " + file_name)
print(
'\nPerhaps you need an ENVI .hdr file?')
sys.exit("Try again!")
else:
print(
"%s opened successfully" % file_name)
print(
'~~~~~~~~~~~~~~'
,'Get image size'
,'~~~~~~~~~~~~~~')
self.cols = self.inDs.RasterXSize
self.rows = self.inDs.RasterYSize
self.bands = self.inDs.RasterCount
print ("columns: %i" % self.cols)
print ("rows: %i" % self.rows)
print ("bands: %i" % self.bands)
print('~~~~~~~~~~~~~~')
print('Get georeference information')
print('~~~~~~~~~~~~~~')
self.geotransform = self.inDs.GetGeoTransform()
originX = self.geotransform[0]
originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
print("origin x: %i" % originX)
print("origin y: %i" % originY)
print("width: %2.2f" % self.pixelWidth)
print("height: %2.2f" % self.pixelHeight)
self.band_x = self.inDs.GetRasterBand(1).ReadAsArray(0, 0, self.cols, self.rows)
self.band_y = self.inDs.GetRasterBand(2).ReadAsArray(0, 0, self.cols, self.rows)
def get_array(self, band):
'''code from: https://chris35wills.github.io/python-gdal-raster-io/'''
# Set pixel offset.....
print('~~~~~~~~~~~~~~')
print('Convert image to 2D array')
print('~~~~~~~~~~~~~~')
band = self.inDs.GetRasterBand(band)
self.image_array = band.ReadAsArray(0, 0, self.cols, self.rows)
image_array_name = self.file_name
print(type(self.image_array))
print(self.image_array.shape)
return self.image_array
#self.pixelWidth, (self.geotransform, self.inDs)
def crop_points(self, wkt_region_file, output_filenm):
from shapely import wkt as wkt
from shapely.geometry import Point as Point
bound_file = open(wkt_region_file, 'r')
wkt_region_geom = bound_file.readlines()
bound_file.close()
print(wkt_region_geom)
bounding_poly = wkt.loads(wkt_region_geom[0])
poly_bbox = bounding_poly.bounds
out_file = open(output_filenm, 'w')
for row in range(0, self.rows):
for col in range(0, self.cols):
coords_center = (self.band_x[row][col], self.band_y[row][col])
if coords_center[0]>poly_bbox[0] and coords_center[0]<poly_bbox[2] and coords_center[1]>poly_bbox[1] and coords_center[0]<poly_bbox[3]:
if bounding_poly.contains(Point(coords_center)):
out_file.write(str(row)+','+str(col)+'\n')
print("wrote point")
else:
print("rejected point")
out_file.close()
def crop_points_bbox(self, bbox, points_in_region):
from shapely.geometry import Point as Point
correct_bbox_min = coord_transformer(28992, 32631, bbox[0])
correct_bbox_max = coord_transformer(28992, 32631, bbox[1])
poly_bbox = (correct_bbox_min[0], correct_bbox_min[1], correct_bbox_max[0], correct_bbox_max[1])
print(poly_bbox)
out_list = []
input_points = open(points_in_region, 'r')
potential_points = input_points.readlines()
input_points.close()
for pot_pt in potential_points:
pot_pt=pot_pt.split(',')
row = int(pot_pt[0])
col = int(pot_pt[1])
coords_center = (self.band_x[row][col], self.band_y[row][col])
#print(row, col, coords_center)
#coords_center = coord_transformer(25831, 28992, coords_center)
#print(coords_center[0], poly_bbox[3]+4)
if coords_center[0]>poly_bbox[0]-10 and coords_center[0]<poly_bbox[2]+10 and coords_center[1]>poly_bbox[1]-10 and coords_center[1]<poly_bbox[3]+10:
out_list.append((row, col))
#print("wrote point", row, col, coords_center)
return out_list
def selection_to_pixels(self, input_file, output_file):
input = open(input_file, 'r')
selected_points = input.readlines()
input.close()
output = open(output_file, 'w')
output.write("geometry\n")
output.close()
for point_str in selected_points:
index = point_str.replace('\n','').split(',')
row = int(index[0])
col = int(index[1])
if row==0 or row==self.rows-1 or col==0 or col==self.cols-1:
#print("end of line")
continue
center_coords = (self.band_x[row][col], self.band_y[row][col])
print(center_coords)
print(row, col)
diagonal_neighbor_coords = [(self.band_x[row-1][col-1], self.band_y[row-1][col-1]), (self.band_x[row+1][col-1], self.band_y[row+1][col-1]), (self.band_x[row+1][col+1], self.band_y[row+1][col+1]), (self.band_x[row-1][col+1], self.band_y[row-1][col+1])]
pixel_border_pt_coords = []
for neigbor_coords in diagonal_neighbor_coords:
mean_coords = ((neigbor_coords[0]+center_coords[0])/2.0, (neigbor_coords[1]+center_coords[1])/2.0)
pixel_border_pt_coords.append(mean_coords)
pixel_border_pt_coords.append(pixel_border_pt_coords[0])
output = open(output_file, 'a')
output.write(str(LineString(pixel_border_pt_coords))+'\n')
#print(pixel_border_pt_coords)
#output.write(str(MultiPoint(diagonal_neighbor_coords))+'\n')
#output.write(str(Point(center_coords))+'\n')
output.close()
def selection_to_pixels_bbox(self, selected_points, output_file, correction_vector, correction_unit, roof_polygon, deviations, side, building_id, roof_surf_id):
correction_x = correction_vector[0]*correction_unit
correction_y = correction_vector[1]*correction_unit
#new_CRS_roof_pts = []
# for pt in roof_polygon.exterior.coords:
# correct_pt = coord_transformer(28992, 32631, pt)
# new_CRS_roof_pts.append(correct_pt)
#new_CRS_roof_polygon = LineString(new_CRS_roof_pts)
for index in selected_points:
row = index[0]
col = index[1]
#print(row, col)
if row == 0 or row == self.rows - 1 or col == 0 or col == self.cols - 1:
# print("end of line")
continue
center_coords = [self.band_x[row][col]+correction_x, self.band_y[row][col]+correction_y]
#print(center_coords)
#center_coords = coord_transformer(25831, 28992, center_coords)
diagonal_neighbor_coords = [[self.band_x[row - 1][col - 1]+correction_x, self.band_y[row - 1][col - 1]+correction_y],
[self.band_x[row][col - 1] + correction_x,self.band_y[row][col - 1] + correction_y],
[self.band_x[row + 1][col - 1]+correction_x, self.band_y[row + 1][col - 1]+correction_y],
[self.band_x[row + 1][col + 1]+correction_x, self.band_y[row + 1][col + 1]+correction_y],
[self.band_x[row][col + 1] + correction_x,self.band_y[row][col + 1] + correction_y],
[self.band_x[row - 1][col + 1]+correction_x, self.band_y[row - 1][col + 1]+correction_y]]
pixel_border_pt_coords = []
pixel_border_pt_coords_28992 = []
for neigbor_coords in diagonal_neighbor_coords:
mean_coords = ((neigbor_coords[0] + center_coords[0]) / 2.0, (neigbor_coords[1] + center_coords[1]) / 2.0)
pixel_border_pt_coords.append(mean_coords)
pixel_border_pt_coords_28992.append(coord_transformer(32631, 28992, mean_coords))
pixel_border_pt_coords.append(pixel_border_pt_coords[0])
pixel_border_pt_coords_28992.append(pixel_border_pt_coords_28992[0])
#print(LineString(pixel_border_pt_coords))
cell = Polygon(pixel_border_pt_coords_28992)
overlap_area_ratio = Polygon(pixel_border_pt_coords_28992).intersection(roof_polygon).area/cell.area
if overlap_area_ratio>=0.7:
#print(cell)
#print(deviations)
if deviations == None:
deviation_perc = 0
else:
deviation_perc = cell.intersection(deviations).area/cell.area
#print("dev", deviation_perc)
#print("intersects")
output = open(output_file, 'a')
output.write(str(building_id) + ';' + str(roof_surf_id) + ';' + side + ';' + str(row) + ';' + str(col) + ';' + str(LineString(pixel_border_pt_coords_28992)) + ';' + str(cell.area) + ';' + str(deviation_perc) + ';' + str(overlap_area_ratio) + '\n')
# print(pixel_border_pt_coords)
# output.write(str(MultiPoint(diagonal_neighbor_coords))+'\n')
# output.write(str(Point(center_coords))+'\n')
output.close()
def coord_transformer(input_CRS, output_CRS, point_coords):
#https://gis.stackexchange.com/questions/78838/converting-projected-coordinates-to-lat-lon-using-python
point = gdal.ogr.Geometry(gdal.ogr.wkbPoint)
point.AddPoint(point_coords[0], point_coords[1])
inSpatialRef = gdal.osr.SpatialReference()
inSpatialRef.ImportFromEPSG(input_CRS)
outSpatialRef = gdal.osr.SpatialReference()
outSpatialRef.ImportFromEPSG(output_CRS)
coordTransform = gdal.osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
point.Transform(coordTransform)
return((point.GetX(),point.GetY()))
def to_xy_file(band_x, band_y, output_filenm):
'''writes the 5000 to 5500th line to a xyz file'''
file = open(output_filenm, 'w')
file.write('x y z\n')
print(len(band_x))
for line_index in range(5000, 5500):
for col_index in range(0, len(band_x[0])):
file.write(str(band_x[line_index][col_index]) + ' ' + str(band_y[line_index][col_index]) + ' ' + '0\n')
file.close()
def select_to_xy_file(band_x, band_y, selection, output_filenm):
'''writes the coordinates from the pixels of selection (list of tuples) to a xyz file'''
file = open(output_filenm, 'w')
file.write('x y z\n')
print(len(band_x))
for coord in selection:
file.write(str(band_x[coord[1]][coord[0]]) + ' ' + str(band_y[coord[1]][coord[0]]) + ' ' + '0\n')
file.close()
if __name__=="__main__":
geocorr_file = envi_file(r"C:\Users\P.A. Ruben\Desktop\Master thesis\12 - coding space\3D-Models-in-Urban-Mining\APEX_data\MM097_ROTTE_140917_a031d_calibr_cube000_igm.bsq")
geocorr_x = geocorr_file.get_array(1)
geocorr_y = geocorr_file.get_array(2)
#print(geocorr_x, geocorr_y)
#geocorr_file.crop_points('bbox_wkt.txt', 'apex_points_in_bbox_flight_3.txt')
#geocorr_file.selection_to_pixels('apex_points_in_bbox_flight_3.txt', 'pixel_geometry_flight_3.txt')
#print(geocorr_x[4144][370],geocorr_y[4144][370])
to_xy_file(geocorr_x, geocorr_y, 'xy_coord_wgs84.xyz')
#list_pts_luxor=[(228,4144),(369,4145),(376,4140),(379,4143),(371,4147),(288,4045),(317,4032),(298,4058),(271,4072),(463,4125),(475,4076),(475,4086),(449,4104),(479,4113),(552,4279),(657,4251),(327,4057),(331,4053),(332,4061),(359,4145),(388,4143),(381,4137)]
#select_to_xy_file(geocorr_x, geocorr_y, list_pts_luxor, 'xy_coord_wgs84.xyz')
|
988,150 | 03f9f0157eed4325722cb6eacc739129d02c99df | class TipoHabilidade:
FISICAS_E_MOTORAS = "Fisicas e Motoras"
INTELECTUAIS_E_TECNICAS = "Intelectuais e Tecnicas"
COMPORTAMENTAIS = "Comportamentais"
def __init__(self, nome):
self._nome = nome
@property
def nome(self) -> str:
return self._nome
def __hash__(self):
return hash(self.nome)
def __eq__(self,other):
return (self.__class__ == other.__class__ and
self.nome == other.nome
) |
988,151 | a031a1f9547e349e63e4617c00744fcca34bceae | import math
praise = "You are doing great"
praise = praise.upper()
number_of_characters = len(praise)
result = praise + "!" * (number_of_characters //2)
print (result)
#create a function with def, give it a name and define the parameters that are expected
def yell(text) :
text = text.upper()
number_of_characters = len(text)
result = text + "!" * (number_of_characters //2)
print (result)
yell ("You are doing great")
yell ("Don't repeat yourself")
#writing code to split a cheque.
def split_cheque (total, number_of_people) :
#two parameters needed for calculation in the function. colon to start body of fuction
return math.ceil(total/number_of_people)
# don't necessarily need to create a new variable to hold the information of cost per person (cost_per_person = math.ceil(total/number_of_people))
#cost_per_person is the value we want to return within the function.
#can just return the value on one line instead of creating a new variable to return
#use keyword return to do so.
#math.ceil function rounds up amount from calculation in parens
total_due = float(input("What is the total? "))
#create a variavle for total_due, coerce it to be a float from a string so you have a number to work with that allows decimals
number_of_people = int(input("How many people? "))
#coerce the input to int so it's a number you can do math with (int, no need for decimal.)
amount_due = split_cheque (total_due,number_of_people)
#make variables arguments so it can be used with different answers, whatever input given by user.
#value of cost per person is stored in variable amount_due
print (f"Each person owes {amount_due}")
#import math module so we can call on a function to round up (ceil)
#imports go at top of the file |
988,152 | 9431c2a6cee9a50f199c9e762ddeab8b8104c4fe | from functools import reduce
lista = [1, 3, -1, 15, 9]
listaDobles = map(lambda x: x*2, lista)
listaPares = filter(lambda x: x % 2 == 0, lista)
sumatorio = reduce(lambda x, y: x + y, lista)
sumatorioDobles = reduce(lambda x,y: x + y*2, lista)
suma100 = reduce(lambda x,y: x+y, range(101))
print (list(listaPares))
print(sumatorio)
print(sumatorioDobles)
print(suma100)
|
988,153 | 641c35892df568d2126f0d234ad052e8bf170de6 | from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
from hyperactive import Hyperactive
data = load_breast_cancer()
X, y = data.data, data.target
def pipeline1(filter_, gbc):
return Pipeline([("filter_", filter_), ("gbc", gbc)])
def pipeline2(filter_, gbc):
return gbc
def model(opt):
gbc = GradientBoostingClassifier(
n_estimators=opt["n_estimators"],
max_depth=opt["max_depth"],
min_samples_split=opt["min_samples_split"],
min_samples_leaf=opt["min_samples_leaf"],
)
filter_ = SelectKBest(f_classif, k=opt["k"])
model_ = opt["pipeline"](filter_, gbc)
scores = cross_val_score(model_, X, y, cv=3)
return scores.mean()
search_space = {
"k": list(range(2, 30)),
"n_estimators": list(range(10, 200, 10)),
"max_depth": list(range(2, 12)),
"min_samples_split": list(range(2, 12)),
"min_samples_leaf": list(range(1, 11)),
"pipeline": [pipeline1, pipeline2],
}
hyper = Hyperactive()
hyper.add_search(model, search_space, n_iter=30)
hyper.run()
|
988,154 | b4fe94bb1eca6a7d30273fd9bbafcfc54c0cd794 | #! /usr/bin/env python3
import unittest
# also known as chopsearch, binary chop, logarithmnic search, half-interval search.
# this is good for sorted arrays, or arrays that have seen a rotation.
# search by repeated diving the search in half
# worst case is O(log n)
# constant space
def binarysearch_bad(arr, needle, l, r):
if l == r:
return -1
if r - l == 1:
if arr[l] == needle:
return l
else:
return -1
mid = l + ((r - l) // 2)
if (arr[mid] == needle):
return mid
if arr[mid] > needle:
return binarysearch(arr, needle, l, mid)
if arr[mid] < needle:
return binarysearch(arr, needle, mid, r)
# this is nicer recursion. implemented with slices, which are references to original array so no increase in space complexity and
# reduced logic complexity.
def binarysearchrecursive(arr, needle):
target = len(arr) // 2
if (len(arr) == 1 and arr[0] != needle):
raise Exception('{0} not in list'.format(needle))
if arr[target] == needle:
return target
elif arr[target] > needle:
return binarysearchrecursive(arr[:target], needle)
else:
return target + binarysearchrecursive(arr[target:], needle)
# Iterative approach, no recursion
# time complexity of O(logn), time complexity of O(n)
# how many times do we half the array before we find the answer? worst case.
# n * 1/2 * 1/2 * 1/2...= 1
# n * (1/2)^x = 1
# x is the amount of times we half the array until we find 1
# n * (1^x/2^x) = 1
# n * 1/2^x) = 1
# n/2^x = 1
# n/1 = 2^x
# n = 2^x
# log2N = log2 2^x # what power do we raise 2 to, to get 2^x?
# log2N = x
# drop the constants
# logN
def binarysearch(arr, needle):
left = -1
right = len(arr)
while left + 1 < right:
distance = right - left
guess = left + (distance // 2)
if arr[guess] == needle:
return guess
elif arr[guess] > needle:
right = guess
else:
left = guess
return -1 # not found
class Test(unittest.TestCase):
def test_notfound(self):
self.assertEqual(binarysearch([1,2,3,4,5,6,7,8,9,10], 0), -1)
def test_found(self):
self.assertEqual(binarysearch([1,2,3,4,5,6,7,8,9,10], 3), 2)
def test_notfoundrecursive(self):
self.assertRaises(Exception, binarysearchrecursive, [1,2,3,4,5,6,7,8,9,10], 0)
def test_foundrecursive(self):
self.assertEqual(binarysearchrecursive([1,2,3,4,5,6,7,8,9,10], 3), 2)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
988,155 | 57d584888068da19e5b7095be7687cd06c6c5720 | import datetime
import json
from google.appengine.ext import ndb
import webapp2
class Header(ndb.Model):
"""Contains a single HTTP header for a resource."""
name = ndb.StringProperty()
value = ndb.StringProperty()
class Resource(ndb.Model):
"""Contents of a single URL."""
path = ndb.StringProperty()
content = ndb.TextProperty()
content_type = ndb.StringProperty()
include_last_modified = ndb.BooleanProperty()
modified_time = ndb.DateTimeProperty()
expires_seconds = ndb.IntegerProperty()
headers = ndb.StructuredProperty(Header, repeated=True)
class ContentJsonManager(webapp2.RequestHandler):
def find_resource(self):
# Strip the leading /content_manger_json from the path to get the path
# of the resource being saved.
resource_path = self.request.path[21:]
results = Resource.query(Resource.path == resource_path).fetch(1)
# TODO: could return a tuple of result, path to avoid recalculating
# the path when creating a new resource.
if len(results) > 0:
return results[0]
return None
def get(self):
resource = self.find_resource()
if resource is not None:
resource_data = {
'content': resource.content,
'ctype': resource.content_type,
'headers': [],
}
if resource.include_last_modified:
resource_data['incdate'] = 'true'
if resource.expires_seconds != -1:
resource_data['expires'] = resource.expires_seconds
for header in resource.headers:
resource_data['headers'].append('%s:%s' % (
header.name, header.value))
else:
resource_data = {}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(resource_data))
def post(self):
resource = self.find_resource()
if resource is None:
resource = Resource()
resource.path = self.request.path[21:]
resource_data = json.loads(self.request.body)
resource.content = resource_data['content']
resource.content_type = resource_data['ctype']
resource.include_last_modified = 'incdate' in resource_data
resource.modified_time = datetime.datetime.now()
if 'expires' in resource_data:
resource.expires_seconds = int(resource_data['expires'])
else:
resource.expires_seconds = -1
resource.headers = []
for header_name_value in resource_data['headers']:
# Headers are sent from the client JS in the form name:value.
resource.headers.append(Header(
name=header_name_value[:header_name_value.index(':')],
value=header_name_value[header_name_value.index(':') + 1:]))
resource.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.write('saved resource %s' % (resource.path,))
class ContentLister(webapp2.RequestHandler):
def get(self):
"""Lists a few resources with pagination."""
resources = []
starting_path = self.request.get('start')
if starting_path:
resources = Resource.query(Resource.path >= starting_path).order(
Resource.path).fetch(11)
else:
resources = Resource.query().order(Resource.path).fetch(11)
self.response.headers['Content-Type'] = 'text/html'
self.response.write('<!doctype><html><head>' +
'<title>Content Lister</title></head><body>Resources:<br>')
for i in xrange(10):
if i < len(resources):
# TODO: constructing the path this way makes the resource
# path a possible vector for XSS.
self.response.write('%s ' % (resources[i].path,) +
'<a href="/content_manager%s">' % (
resources[i].path,) +
'Edit</a> <a href="%s">View</a><br>' % (
resources[i].path,))
if len(resources) > 10:
self.response.write(
'<a href="/content_lister?start=%s">Next</a>' % (
resources[10].path,))
self.response.write('</body></html>')
class ResourceRenderer(webapp2.RequestHandler):
def get(self):
results = Resource.query(Resource.path == self.request.path).fetch(1)
if len(results) < 1:
# There was no resource with this path so return a 404.
self.response.write(
'<html><head><title>Not Found</title></head>' +
'<body>Not Found</body></html>')
self.response.headers['Content-Type'] = 'text/html'
self.response.status = '404 Not Found'
else:
resource = results[0]
self.response.write(resource.content)
self.response.headers['Content-Type'] = \
resource.content_type.encode('ascii', 'ignore')
self.response.status = '200 OK'
if resource.include_last_modified:
# Format the modified time as Mon, 06 Jul 2015 08:47:21 GMT
self.response.headers['Last-Modified'] = \
resource.modified_time.strftime(
'%a, %d %b %Y %H:%M:%S GMT')
if resource.expires_seconds != -1:
self.response.headers['Expires'] = (datetime.datetime.now() +
datetime.timedelta(
seconds=resource.expires_seconds)).strftime(
'%a, %d %b %Y %H:%M:%S GMT')
for header in resource.headers:
self.response.headers[
header.name.encode('ascii', 'ignore')] = \
header.value.encode('ascii', 'ignore')
app = webapp2.WSGIApplication([
('/content_manager_json.*', ContentJsonManager),
('/content_lister.*', ContentLister),
('/.*', ResourceRenderer),
], debug=True)
|
988,156 | cf9ca4f2093909037dcc26d0ad10e36431b5e510 | # -*- coding: utf-8 -*-
"""Multi-level dictionary
.. module:: lib.array.multidict
:platform: Unix
:synopsis: Multi-level dictionary
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
from collections import defaultdict
class MultiDict(defaultdict):
"""Class MultiDict
Inherited from defaultdict
"""
def __init__(self):
"""Class constructor
Called when object is initialized
Args:
none
"""
defaultdict.__init__(self, MultiDict)
def __repr__(self):
"""Method overrides __repr__
Args:
none
Returns:
str
"""
return dict.__repr__(self)
|
988,157 | fd5cd4d3c3907a256b1de6a5f1ef7c6af14d652e | #!/usr/bin/env python
import haxxpkgs
import sys
if __name__ == "__main__":
drv = getattr(haxxpkgs, sys.argv[1])
print(str(drv()))
|
988,158 | a7abd7ab0d3d481d1cd3022127c88749ca942af9 | k=int(input())
a=0
for i in range(1,k):
b=k//i
if k%i==0 and b%2==1:
print(i)
a+=1
break
if a==0:
print(k)
|
988,159 | 0b986e7ce6d5f1bf0548e7f581f260f92007a256 | import nltk
import pymorphy2
from collections import Counter
import math
import numpy
def compute_tfidf(corpus):
def compute_tf(text):
tf_text = Counter(text)
for i in tf_text:
tf_text[i] = tf_text[i] / float(len(text))
return tf_text
def compute_idf(word, corpus):
return math.log10(len(corpus) / sum([1.0 for i in corpus if word in i]))
documents_list = []
for text in corpus:
tf_idf_dictionary = {}
computed_tf = compute_tf(text)
for word in computed_tf:
tf_idf_dictionary[word] = computed_tf[word] * compute_idf(word, corpus)
documents_list.append(tf_idf_dictionary)
return documents_list
f = open("reviews.txt", encoding="UTF-8")
g = open("out.txt", "w", encoding="UTF-8")
analyzer = pymorphy2.MorphAnalyzer()
puncto = [',', '.', ':', '?', '«', '»', '-', '(', ')', '!', '\'', '—', ';', '”', '...']
words = []
texts = f.read().replace('\n', ' ').split("SPLIT")
normalized_texts = []
print(len(texts))
for text in texts:
tokens = nltk.word_tokenize(text)
normalized_words = []
for token in tokens:
if token in puncto: continue
word = analyzer.parse(token)[0]
normalized_words.append(word.normal_form)
normalized_texts.append(normalized_words)
tfidf = compute_tfidf(normalized_texts)
print(tfidf)
#TF-IDF for every word
for dictionary in tfidf:
sorted_dictionary = {k: v for k, v in sorted(dictionary.items(), key=lambda item: item[1], reverse=True)}
print(sorted_dictionary, file=g)
#Sumarise of all unique words and sort
all_unique_words = []
for dictionary in tfidf:
for key in dictionary.keys():
all_unique_words.append(key)
unique_words = numpy.unique(all_unique_words)
print(len(numpy.unique(all_unique_words)))
unique_dictionary = {}
for word in unique_words:
unique_dictionary[word] = 0
for dictionary in tfidf:
unique_dictionary[word] += dictionary.get(word, 0)
sorted_dictionary = {k: v for k, v in sorted(unique_dictionary.items(), key=lambda item: item[1], reverse=True)}
print(sorted_dictionary, file=g) |
988,160 | c9089d71d7665abdca419775926c57e218ed1a92 | import pymysql as p
c = p.connect(host = 'localhost',
user = 'root',
password = 'Gaganmalvi@123',
database = 'college')
a = c.cursor()
delval = input('Enter Roll Number of student whose record you want to purge. :')
a.execute('delete from student where rno = '+delval)
c.commit()
print('Successfully deleted. Exiting...')
|
988,161 | a2c45bf3a8dd60fcf03dbb792d90f50c6803d78f | import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile # get the api
import numpy as np
fs, data = wavfile.read('track01_ijsvogel.wav')
length = data.shape[0] / fs
times = 10
fft_fs = fs / 10
print fft_fs
print fs
a = data.T[0]
for i in range(length * times):
print fft_fs*(i+1)
# two channel track, get the first channel
# 8-bit track, b is now normalized on [-1,1)
#b=[(x/2**8.)*2-1 for x in a]
c = fft(a[i*fft_fs:fft_fs*(i+1)]) # calculate fourier transform (complex numbers list)
d = len(c)/2 # you only need half of the fft list (real signal symmetry)
X[i,:] = np.log(np.abs(z[:nfft/2]))
plt.plot(abs(c[:(d-1)]),'r')
plt.show()
from pylab import *
specgram(b)
|
988,162 | 15c21db671993dd6ae915e3a0264b57892ab1110 | import cv2
import numpy as np
image = cv2.imread(r"../picture_data/change.png")
image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(image_gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
counters, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = counters[0]
mask = np.zeros(image_gray,np.uint8)
counters = cv2.drawContours(mask,[cnt],0,255,-1)
pixel_points = np.transpose(np.nonzero(mask))
cv2.namedWindow("mask",cv2.WINDOW_NORMAL)
cv2.imshow("mask",counters)
cv2.waitKey()
cv2.destroyAllWindows() |
988,163 | f968c49c2ead57c593ffa9a087b121545f6cb7bc | from django.urls import reverse, resolve
from core.dj_import import HttpRequest
from django.test.client import Client
from core.tests.base import ( SetUpBrandsCategoriesModelsMixin,
getSingleEbayCategoryMixin,
getUrlQueryStringOff,
BaseUserWebTestCase )
from ..forms import CreateSearchForm, UpdateSearchForm
from ..models import Search
from categories.models import Category
from finders.models import ItemFound
# from pprint import pprint
# Create your tests here.
class TestFormValidation(
getSingleEbayCategoryMixin,
SetUpBrandsCategoriesModelsMixin,
BaseUserWebTestCase ):
''' Search Form Tests '''
# helpful:
# https://stackoverflow.com/questions/2257958/django-unit-testing-for-form-edit '''
def test_save_redirect(self):
#
'''after saving the form, next page should be the detail'''
#
dFormData = dict(
cTitle = 'Great Widget 1',
cPriority = "A1",
cKeyWords = "Blah bleh blih",
iUser = self.user1.id )
#
form = CreateSearchForm( data = dFormData )
form.request = self.request
form.instance.iUser = self.user1
self.assertTrue( form.is_valid() )
# test save
form.save()
oSearch = Search.objects.get( cTitle = 'Great Widget 1' )
self.assertEqual(
getUrlQueryStringOff( oSearch.get_absolute_url() )[0],
reverse('searching:detail', kwargs={ 'pk': oSearch.id } ) )
def test_form_valid(self):
# has key words
dFormData = dict(
cTitle = "My clever search 3",
cKeyWords = "Blah bleh blih",
cPriority = "A2",
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
if form.errors:
print()
print('form has at least one error:')
for k, v in form.errors.items():
print( k, ' -- ', v )
self.assertTrue( form.is_valid() )
# has a category
dFormData = dict(
cTitle = "My clever search 4",
iDummyCategory = 10, # see core.tests
cPriority = "A3",
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
form.user = self.user1
self.assertTrue( form.is_valid() )
# cPriority not good
dFormData = dict(
cTitle = "My clever search 4",
iDummyCategory = 10, # see core.tests
cPriority = "A",
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertFalse( form.is_valid() )
# no key words, no category
dFormData = dict(
cTitle = "My clever search 5",
cPriority = "A4",
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertFalse( form.is_valid() )
# has an invalid category
dFormData = dict(
cTitle = "My clever search 6",
iDummyCategory = 'abc',
cPriority = "A5",
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertFalse( form.is_valid() )
# has an set My Category without an ebay category
#
oCategory = Category.objects.filter( cTitle = "Capacitor Checker" )[0]
#
dFormData = dict(
cTitle = "My clever search 7",
cPriority = "A6",
iMyCategory = oCategory.id,
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertFalse( form.is_valid() )
dFormData = dict(
cTitle = "My clever search 8",
iDummyCategory = 10, # see core.tests
cPriority = "A7",
iMyCategory = oCategory.id,
iUser = self.user1 )
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertTrue( form.is_valid() )
def test_add_stuff_already_there(self):
#
dFormData = dict(
cTitle = "My clever search",
cKeyWords = "Blah bleh blih",
cPriority = "A8",
iUser = self.user1 )
#
form = CreateSearchForm(data=dFormData)
form.request = self.request
form.instance.iUser = self.user1
self.assertTrue( form.is_valid() )
form.save()
#
dFormData = dict(
cTitle = "Very clever search 1",
cKeyWords = "Blah bleh blih", # same as above
cPriority = "B1",
iUser = self.user1 )
#
form = CreateSearchForm( data = dFormData )
form.request = self.request
# self.assertFalse( form.is_valid() ) # cannot test for now
#
dFormData = dict(
cTitle = "Very clever search 2",
cKeyWords = "Blah blih bleh", # same but different order
cPriority = "B2",
iUser = self.user1 )
#
form = CreateSearchForm( data = dFormData )
form.request = self.request
self.assertTrue( form.is_valid() ) # not comparing sets yet
#
'''
print( 'cTitle:', dFormData['cTitle'] )
if form.errors:
print('form has at least one error:')
for k, v in form.errors.items():
print( k, ' -- ', v )
else:
print( 'no form errors at bottom!' )
'''
|
988,164 | 28150d606d0d21f154525153cd9f60fe7aae046a | import json, os
from typing import List, Any, Dict
from sqlalchemy import Column, String, Integer, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
Base = declarative_base()
class BookDetailsDim(Base):
__tablename__ = "book_details_dim"
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String, nullable=False)
author = Column(String)
publisher = Column(String)
description = Column(String)
primary_isbn13 = Column(String(13))
primary_isbn10 = Column(String(10))
added_on = Column(DateTime(timezone=True), server_default=func.now())
class Database:
@staticmethod
def get_conns(conn_id):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'connections.json')) as f:
conns = json.load(f)
return conns[conn_id]
def __init__(self, conn_id) -> None:
conn = self.get_conns(conn_id)
conn_config = {
"usr": conn['login'],
"pw": conn['password'] or '',
"host": conn['host'] or 'localhost',
"db": conn['schema'] or ''
}
self.engine = create_engine(
f"mysql+pymysql://{conn_config['usr']}:{conn_config['pw']}@{conn_config['host']}/{conn_config['db']}"
)
session_maker = sessionmaker()
session_maker.configure(bind=self.engine)
Base.metadata.create_all(self.engine)
self.session = session_maker()
def query_table_to_df(self, table_object):
query = self.session.query(table_object)
df = pd.read_sql(query.statement, self.session.bind)
return df
def add_book_details_data(self, vectors: List[Dict[str, Any]], columns) -> None:
for entry in vectors:
vec = BookDetailsDim(
**{k: entry[k] for k in columns}
)
# insert or update vector
self.session.merge(vec)
self.session.commit()
if __name__ == "__main__":
db = Database(conn_id='books_db')
df = db.query_table_to_df(BookDetailsDim)
print(df.head())
|
988,165 | c6f5e90019b453bedc6cf0c3e28ebb2844073b69 | # Modules
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
# Parameters
coin_flips = 10000
heads_bias = 0.3
# Discretize possible biases of coin to heads
heads_biases = np.arange(0, 1, 0.01)
# Uniform probability density function prior
prior = np.ones(len(heads_biases)) / len(heads_biases)
pdfs = np.zeros((coin_flips, len(heads_biases)))
pdfs[0] = prior
# Flip coins and update priors
for flip in range(1, coin_flips):
# P(B|A): Likelihood of flipped coin result for all possible biases of coin to heads
likelihood = heads_biases if np.random.rand() < heads_bias else 1 - heads_biases
# P(B): Evidence or overall probability of observing heads
evidence = sum(likelihood * pdfs[flip - 1])
# P(A|B): Posterior probability distribution after observing the coin flip
pdfs[flip] = (likelihood * pdfs[flip - 1]) / evidence
# Set up figure to be animated
fig, ax = plt.subplots()
ax.set_xlabel('Bias of Coin to Heads')
ax.set_ylim(0, 1)
line, = ax.plot(heads_biases, pdfs[0], 'r-', linewidth=2)
# Update plot for animation
def update(i):
ax.set_title(f'Coin Flip {i}')
line.set_ydata(pdfs[i])
# Save animated evolution of coin bias for heads probability density function
anim = FuncAnimation(fig, update, frames=[x**2 for x in range(100)], interval=100)
anim.save('bayes_coin_flipping.gif', dpi=80, writer='imagemagick') |
988,166 | d3cb0e384163a363ae3e323e11dbe91840a63287 | """update_coordinates.py
Updates a package's spatial coordinates (just
MultiPoint implemented) and geographic names with those taken from a csv file
with the format:
[city;country;latitude °N;longitude °E]
Usage:
update_coordinates.py <coord_file> <pkg_url>
Arguments:
<coord_file> input file
<pkg_url> The URL of the package in CKAN
"""
from docopt import docopt
import ckanapi as ck
import os
APIKEY = os.environ['CKAN_APIKEY_PROD1']
COORDFILE = 'SCORE_coordinates_repository_conv.csv'
#URL = 'https://data.eawag.ch/dataset/illicit-drugs-in-wastewater-score-initiative'
URL = https://data.eawag.ch/dataset/edit/a-test-for-usability
|
988,167 | 3396307e3df4d3ffafdbb29294fb465dc0799d59 | import sys
import csv
from collections import defaultdict
from operator import itemgetter
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
SeqTable=[]
csv.field_size_limit(1000000000)
def Genomictabulator(fasta):
print >> sys.stderr, "Cargando genoma en la memoria RAM ...",
f = open(fasta)
for chrfa in SeqIO.parse(f, "fasta"):
table = str(chrfa.id), chrfa.seq
SeqTable.append(table)
f.close()
print >> sys.stderr, "OK"
def DR_counter(intron, ichr, strand, istart, iend, Genome):
introns_finded_DR = [intron]
L = 100 #Solo permite que se corra L pares de bases para buscar DR
#Extrayendo regiones exonicas colindantes
SJ5U = Genome[ichr][istart-L : istart].lower()
SJ5D = Genome[ichr][istart : istart+L].lower()
SJ3U = Genome[ichr][iend-L : iend].lower()
SJ3D = Genome[ichr][iend : iend+L].lower()
if strand == "-":
SJ5U = Genome[ichr][iend : iend+L].lower().reverse_complement()
SJ5D = Genome[ichr][iend-L : iend].lower().reverse_complement()
SJ3U = Genome[ichr][istart : istart+L].lower().reverse_complement()
SJ3D = Genome[ichr][istart-L : istart].lower().reverse_complement()
DRU = 0
DRD = 0
#Contando directos repetidos y generando intrones no consenso alternativos
try:
while SJ5U[L-1-DRU]==SJ3U[L-1-DRU]:
DRU += 1
if strand == "+":
introns_finded_DR = introns_finded_DR + [ichr + ':' + str(istart-DRU) + strand + str(iend-DRU)]
elif strand == "-":
introns_finded_DR = introns_finded_DR + [ichr + ':' + str(istart+DRU) + strand + str(iend+DRU)]
if SJ5U[L-1-DRU]!=SJ3U[L-1-DRU]:
break
except IndexError:
pass
try:
while SJ5D[DRD]==SJ3D[DRD]:
DRD += 1
if strand == "+":
introns_finded_DR = introns_finded_DR + [ichr + ':' + str(istart+DRD) + strand + str(iend+DRD)]
elif strand == "-":
introns_finded_DR = introns_finded_DR + [ichr + ':' + str(istart-DRD) + strand + str(iend-DRD)]
if SJ5D[DRD]!=SJ3D[DRD]:
break
except IndexError:
pass
return introns_finded_DR
def main(NA12878, hg19):
reader1 = csv.reader(open(NA12878), delimiter = ' ')
reader2 = csv.reader(open(hg19), delimiter = ' ')
intron_list = set([])
Genome = dict(SeqTable)
NA12878_list = []
hg19_list = []
for row in reader1:
intron = row[0]
coverage = row[1]
chr = row[2]
strand = row[3]
start = row[4]
end = row[5]
dn = row[7]
DR_introns = DR_counter(intron, chr, strand, int(start), int(end), Genome)
DR_intron_ID = ','.join(sorted(DR_introns))
#if dn != 'GTAG' and dn != 'GCAG' and dn != 'ATAC':
NA12878_list.append((DR_intron_ID, [intron, chr, strand, start, end, dn, coverage]))
intron_list.add((DR_intron_ID))
NA12878_dict = dict(NA12878_list)
for row in reader2:
hg19_intron = row[0]
hg19_coverage = row[1]
hg19_chr = row[2]
hg19_strand = row[3]
hg19_start = row[4]
hg19_end = row[5]
hg19_dn = row[7]
DR_introns = DR_counter(hg19_intron, hg19_chr, hg19_strand, int(hg19_start), int(hg19_end), Genome)
DR_intron_ID = ','.join(sorted(DR_introns))
if hg19_dn != 'GTAG' and hg19_dn != 'GCAG' and hg19_dn != 'ATAC':
# hg19_list.append((DR_intron_ID, [intron, chr, strand, start, end, dn, coverage]))
# intron_list.add((DR_intron_ID))
try:
info = NA12878_dict[DR_intron_ID]
#if bodymap_dict.has_key(DR_intron_ID)== True:
NA12878_intron = info[0]
NA12878_chr = info[1]
NA12878_strand = info [2]
NA12878_start = info[3]
NA12878_end = info[4]
NA12878_dn = info[5]
NA12878_coverage = info[6]
if NA12878_dn == 'GTAG' or NA12878_dn == 'GCAG' or NA12878_dn == 'ATAC':
print hg19_intron, "Canonical_in_hg19", hg19_dn, NA12878_dn
except KeyError:
print hg19_intron, "NA12878_only", hg19_dn, "NO"
#pass
#NA12878_dict = dict(NA12878_list)
#hg19_dict = dict(hg19_list)
#intron_list_sort = sorted(list(intron_list))
if __name__ == '__main__':
Genomictabulator(sys.argv[1])
main(sys.argv[2],sys.argv[3])
|
988,168 | 1ff0967ebe7a404811d7050417b95778a0a7494c | """
Studio editing view for OpenAssessment XBlock.
"""
from __future__ import absolute_import
import copy
import logging
from uuid import uuid4
import pkg_resources
import six
from six.moves import zip
from django.conf import settings
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy
from openassessment.xblock.data_conversion import (
create_rubric_dict,
make_django_template_key,
update_assessments_format
)
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.resolve_dates import resolve_dates
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.validation import validator
from voluptuous import MultipleInvalid
from xblock.core import XBlock
from xblock.fields import List, Scope
from web_fragments.fragment import Fragment
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class StudioMixin(object):
"""
Studio editing view for OpenAssessment XBlock.
"""
DEFAULT_CRITERIA = [
{
'label': '',
'options': [
{
'label': ''
},
]
}
]
NECESSITY_OPTIONS = {
"required": ugettext_lazy("Required"),
"optional": ugettext_lazy("Optional"),
"": ugettext_lazy("None")
}
STUDIO_EDITING_TEMPLATE = 'openassessmentblock/edit/oa_edit.html'
BASE_EDITOR_ASSESSMENTS_ORDER = copy.deepcopy(DEFAULT_EDITOR_ASSESSMENTS_ORDER)
# Since the XBlock problem definition contains only assessment
# modules that are enabled, we need to keep track of the order
# that the user left assessments in the editor, including
# the ones that were disabled. This allows us to keep the order
# that the user specified.
editor_assessments_order = List(
default=DEFAULT_EDITOR_ASSESSMENTS_ORDER,
scope=Scope.content,
help="The order to display assessments in the editor."
)
def studio_view(self, context=None): # pylint: disable=unused-argument
"""
Render the OpenAssessment XBlock for editing in Studio.
Args:
context: Not actively used for this view.
Returns:
(Fragment): An HTML fragment for editing the configuration of this XBlock.
"""
rendered_template = get_template(
self.STUDIO_EDITING_TEMPLATE
).render(self.editor_context())
fragment = Fragment(rendered_template)
if settings.DEBUG:
self.add_javascript_files(fragment, "static/js/src/oa_shared.js")
self.add_javascript_files(fragment, "static/js/src/oa_server.js")
self.add_javascript_files(fragment, "static/js/src/studio")
else:
# TODO: switch to add_javascript_url once XBlock resources are loaded from the CDN
js_bytes = pkg_resources.resource_string(__name__, "static/js/openassessment-studio.min.js")
fragment.add_javascript(js_bytes.decode('utf-8'))
js_context_dict = {
"FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
}
fragment.initialize_js('OpenAssessmentEditor', js_context_dict)
return fragment
def editor_context(self):
"""
Update the XBlock's XML.
Returns:
dict with keys
'rubric' (unicode), 'prompt' (unicode), 'title' (unicode),
'submission_start' (unicode), 'submission_due' (unicode),
'assessments (dict)
"""
# In the authoring GUI, date and time fields should never be null.
# Therefore, we need to resolve all "default" dates to datetime objects
# before displaying them in the editor.
__, __, date_ranges = resolve_dates(
self.start, self.due,
[
(self.submission_start, self.submission_due)
] + [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.valid_assessments
],
self._
)
submission_start, submission_due = date_ranges[0]
assessments = self._assessments_editor_context(date_ranges[1:])
self.editor_assessments_order = self._editor_assessments_order_context()
# Every rubric requires one criterion. If there is no criteria
# configured for the XBlock, return one empty default criterion, with
# an empty default option.
criteria = copy.deepcopy(self.rubric_criteria_with_labels)
if not criteria:
criteria = self.DEFAULT_CRITERIA
# To maintain backwards compatibility, if there is no
# feedback_default_text configured for the xblock, use the default text
feedback_default_text = copy.deepcopy(self.rubric_feedback_default_text)
if not feedback_default_text:
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
course_id = self.location.course_key if hasattr(self, 'location') else None
return {
'prompts': self.prompts,
'prompts_type': self.prompts_type,
'title': self.title,
'submission_due': submission_due,
'submission_start': submission_start,
'assessments': assessments,
'criteria': criteria,
'feedbackprompt': self.rubric_feedback_prompt,
'feedback_default_text': feedback_default_text,
'text_response': self.text_response if self.text_response else '',
'file_upload_response': self.file_upload_response if self.file_upload_response else '',
'necessity_options': self.NECESSITY_OPTIONS,
'file_upload_type': self.file_upload_type,
'white_listed_file_types': self.white_listed_file_types_string,
'allow_latex': self.allow_latex,
'leaderboard_show': self.leaderboard_show,
'editor_assessments_order': [
make_django_template_key(asmnt)
for asmnt in self.editor_assessments_order
],
'teams_feature_enabled': self.team_submissions_enabled,
'teams_enabled': self.teams_enabled,
'base_asset_url': self._get_base_url_path_for_course_assets(course_id),
'is_released': self.is_released(),
'teamsets': self.get_teamsets(course_id),
'selected_teamset_id': self.selected_teamset_id,
}
@XBlock.json_handler
def update_editor_context(self, data, suffix=''): # pylint: disable=unused-argument
"""
Update the XBlock's configuration.
Args:
data (dict): Data from the request; should have the format described
in the editor schema.
Keyword Arguments:
suffix (str): Not used
Returns:
dict with keys 'success' (bool) and 'msg' (str)
"""
# Validate and sanitize the data using a schema
# If the data is invalid, this means something is wrong with
# our JavaScript, so we log an exception.
try:
data = EDITOR_UPDATE_SCHEMA(data)
except MultipleInvalid:
logger.exception('Editor context is invalid')
return {'success': False, 'msg': self._('Error updating XBlock configuration')}
# Check that the editor assessment order contains all the assessments.
current_order = set(data['editor_assessments_order'])
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
# Backwards compatibility: "staff-assessment" may not be present.
# If that is the only problem with this data, just add it manually and continue.
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {'staff-assessment'}:
data['editor_assessments_order'].append('staff-assessment')
logger.info('Backwards compatibility: editor_assessments_order now contains staff-assessment')
else:
logger.exception('editor_assessments_order does not contain all expected assessment types')
return {'success': False, 'msg': self._('Error updating XBlock configuration')}
if not data['text_response'] and not data['file_upload_response']:
return {'success': False, 'msg': self._("Error: both text and file upload responses can't be disabled")}
if not data['text_response'] and data['file_upload_response'] == 'optional':
return {'success': False,
'msg': self._("Error: in case if text response is disabled file upload response must be required")}
if not data['file_upload_response'] and data['text_response'] == 'optional':
return {'success': False,
'msg': self._("Error: in case if file upload response is disabled text response must be required")}
# Backwards compatibility: We used to treat "name" as both a user-facing label
# and a unique identifier for criteria and options.
# Now we treat "name" as a unique identifier, and we've added an additional "label"
# field that we display to the user.
# If the JavaScript editor sends us a criterion or option without a "name"
# field, we should assign it a unique identifier.
for criterion in data['criteria']:
if 'name' not in criterion:
criterion['name'] = uuid4().hex
for option in criterion['options']:
if 'name' not in option:
option['name'] = uuid4().hex
xblock_validator = validator(self, self._)
success, msg = xblock_validator(
create_rubric_dict(data['prompts'], data['criteria']),
data['assessments'],
submission_start=data['submission_start'],
submission_due=data['submission_due'],
leaderboard_show=data['leaderboard_show']
)
if not success:
return {'success': False, 'msg': self._(u'Validation error: {error}').format(error=msg)}
# At this point, all the input data has been validated,
# so we can safely modify the XBlock fields.
self.title = data['title']
self.display_name = data['title']
self.prompts = data['prompts']
self.prompts_type = data['prompts_type']
self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order']
self.rubric_feedback_prompt = data['feedback_prompt']
self.rubric_feedback_default_text = data['feedback_default_text']
self.submission_start = data['submission_start']
self.submission_due = data['submission_due']
self.text_response = data['text_response']
self.file_upload_response = data['file_upload_response']
if data['file_upload_response']:
self.file_upload_type = data['file_upload_type']
self.white_listed_file_types_string = data['white_listed_file_types']
else:
self.file_upload_type = None
self.white_listed_file_types_string = None
self.allow_latex = bool(data['allow_latex'])
self.leaderboard_show = data['leaderboard_show']
self.teams_enabled = bool(data.get('teams_enabled', False))
self.selected_teamset_id = data.get('selected_teamset_id', '')
return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}
@XBlock.json_handler
def check_released(self, data, suffix=''): # pylint: disable=unused-argument
"""
Check whether the problem has been released.
Args:
data (dict): Not used
Keyword Arguments:
suffix (str): Not used
Returns:
dict with keys 'success' (bool), 'message' (unicode), and 'is_released' (bool)
"""
# There aren't currently any server-side error conditions we report to the client,
# but we send success/msg values anyway for consistency with other handlers.
return {
'success': True, 'msg': u'',
'is_released': self.is_released()
}
def _assessments_editor_context(self, assessment_dates):
"""
Transform the rubric assessments list into the context
we will pass to the Django template.
Args:
assessment_dates: List of assessment date ranges (tuples of start/end datetimes).
Returns:
dict
"""
assessments = {}
for asmnt, date_range in zip(self.rubric_assessments, assessment_dates):
# Django Templates cannot handle dict keys with dashes, so we'll convert
# the dashes to underscores.
template_name = make_django_template_key(asmnt['name'])
assessments[template_name] = copy.deepcopy(asmnt)
assessments[template_name]['start'] = date_range[0]
assessments[template_name]['due'] = date_range[1]
# In addition to the data in the student training assessment, we need to include two additional
# pieces of information: a blank context to render the empty template with, and the criteria
# for each example (so we don't have any complicated logic within the template). Though this
# could be accomplished within the template, we are opting to remove logic from the template.
student_training_module = self.get_assessment_module('student-training')
student_training_template = {
'answer': {
'parts': [
{'text': ''} for _ in self.prompts
]
}
}
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
for criterion in criteria_list:
criterion['option_selected'] = ""
student_training_template['criteria'] = criteria_list
if student_training_module:
student_training_module = update_assessments_format([student_training_module])[0]
example_list = []
# Adds each example to a modified version of the student training module dictionary.
for example in student_training_module['examples']:
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
# Equivalent to a Join Query, this adds the selected option to the Criterion's dictionary, so that
# it can be easily referenced in the template without searching through the selected options.
for criterion in criteria_list:
for option_selected in example['options_selected']:
if option_selected['criterion'] == criterion['name']:
criterion['option_selected'] = option_selected['option']
example_list.append({
'answer': example['answer'],
'criteria': criteria_list,
})
assessments['training'] = {'examples': example_list, 'template': student_training_template}
# If we don't have student training enabled, we still need to render a single (empty, or default) example
else:
assessments['training'] = {'examples': [student_training_template], 'template': student_training_template}
return assessments
def _editor_assessments_order_context(self):
"""
Create a list of assessment names in the order
the user last set in the editor, including
assessments that are not currently enabled.
Returns:
list of assessment names
"""
# Start with the default order, to pick up any assessment types that have been added
# since the user last saved their ordering.
effective_order = copy.deepcopy(self.BASE_EDITOR_ASSESSMENTS_ORDER)
# Account for changes the user has made to the default order
user_order = copy.deepcopy(self.editor_assessments_order)
effective_order = self._subset_in_relative_order(effective_order, user_order)
# Account for inconsistencies between the user's order and the problems
# that are currently enabled in the problem (These cannot be changed)
enabled_assessments = [asmnt['name'] for asmnt in self.valid_assessments]
enabled_ordered_assessments = [
assessment for assessment in enabled_assessments if assessment in user_order
]
effective_order = self._subset_in_relative_order(effective_order, enabled_ordered_assessments)
return effective_order
def _subset_in_relative_order(self, superset, subset):
"""
Returns a copy of superset, with entries that appear in subset being reordered to match
their relative ordering in subset.
"""
superset_indices = [superset.index(item) for item in subset]
sorted_superset_indices = sorted(superset_indices)
if superset_indices != sorted_superset_indices:
for index, superset_index in enumerate(sorted_superset_indices):
superset[superset_index] = subset[index]
return superset
def _get_base_url_path_for_course_assets(self, course_key):
"""
Returns base url path for course assets
"""
if course_key is None:
return None
placeholder_id = uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = six.text_type(course_key.make_asset_key('asset', placeholder_id).for_branch(None))
if not url_path.startswith('/'):
url_path = '/' + url_path
return url_path.replace(placeholder_id, '')
def get_team_configuration(self, course_id):
"""
Returns a dict with team configuration settings.
"""
configuration_service = self.runtime.service(self, 'teams_configuration')
team_configuration = configuration_service.get_teams_configuration(course_id)
if not team_configuration:
return None
return team_configuration
def get_teamsets(self, course_id):
"""
Wrapper around get_team_configuration that returns team names only for display
"""
team_configuration = self.get_team_configuration(course_id)
if not team_configuration:
return None
return team_configuration.teamsets
|
988,169 | 5a0a350475c7fedd536416ea7a88c73d5cd29af3 | #!/usr/bin/env python
"""
You must have networkx, matplotlib>=87.7 for this program to work.
"""
# Author: Rishi Thakkar (rishirt.us@gmail.com)
try:
import matplotlib.pyplot as plt
plot_lib=True
except:
plot_lib=False
import networkx as nx
import random
import sys
import os
import shutil
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
##### Get user inputs #####
print("Welcome to the Graph Generator!\n")
print(bcolors.HEADER + bcolors.BOLD + bcolors.UNDERLINE + "Graph Type to Use:" + bcolors.ENDC)
modeDescription = bcolors.WARNING + "Mode 0 - " + bcolors.ENDC + bcolors.OKGREEN + "Random Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 1 - " + bcolors.ENDC + bcolors.OKGREEN + "Complete Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 2 - " + bcolors.ENDC + bcolors.OKGREEN + "Barbell Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 3 - " + bcolors.ENDC + bcolors.OKGREEN + "2D Grid Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 4 - " + bcolors.ENDC + bcolors.OKGREEN + "Dorogovtsev Goltsev Mmendes Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 5 - " + bcolors.ENDC + bcolors.OKGREEN + "Cycle Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 6 - " + bcolors.ENDC + bcolors.OKGREEN + "Circular Ladder Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 7 - " + bcolors.ENDC + bcolors.OKGREEN + "Lollipop Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 8 - " + bcolors.ENDC + bcolors.OKGREEN + "Wheel Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 9 - " + bcolors.ENDC + bcolors.OKGREEN + "Star Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 10 - " + bcolors.ENDC + bcolors.OKGREEN + "Path Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 11 - " + bcolors.ENDC + bcolors.OKGREEN + "Moebius Kantor Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 12 - " + bcolors.ENDC + bcolors.OKGREEN + "Tutte Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 13 - " + bcolors.ENDC + bcolors.OKGREEN + "Truncated Tetrahedron Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 14 - " + bcolors.ENDC + bcolors.OKGREEN + "Truncated Cube Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 15 - " + bcolors.ENDC + bcolors.OKGREEN + "Sedgewick Maze Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 16 - " + bcolors.ENDC + bcolors.OKGREEN + "Pappus Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 17 - " + bcolors.ENDC + bcolors.OKGREEN + "Bull Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 18 - " + bcolors.ENDC + bcolors.OKGREEN + "Krackhardt Kite Graph" + bcolors.ENDC + "\n"
print(modeDescription)
##### Generate Graph #####
while(1):
mode = int(input("Please enter mode of graph type for generation: "))
if mode == 0:
nodes = int(input("Number of nodes: "))
edgeP = float(input("Probability of edge formation: "))
G=nx.fast_gnp_random_graph(nodes, edgeP)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 1:
nodes = int(input("Number of nodes: "))
G=nx.complete_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 2:
nodesL = int(input("Number of outer nodes (>= 1): "))
nodesR = int(input("Number of nodes for connections: "))
G=nx.barbell_graph(nodesL, nodesR)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 3:
rows = int(input("Number of rows: "))
cols = int(input("Number of cols: "))
G=nx.grid_2d_graph(rows, cols)
pos=nx.spectral_layout(G)
break
elif mode == 4:
nodes = int(input("Number of generations (<= 5): "))
if nodes > 5:
print("Invalid input! Please execute script again.")
sys.exit();
G=nx.dorogovtsev_goltsev_mendes_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 5:
nodes = int(input("Number of nodes: "))
G=nx.cycle_graph(nodes)
pos=nx.circular_layout(G)
break
elif mode == 6:
nodes = int(input("Number of nodes: "))
G=nx.circular_ladder_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 7:
nodesK = int(input("Number of nodes in candy: "))
nodesP = int(input("Number of nodes in stick: "))
G=nx.lollipop_graph(nodesK, nodesP)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 8:
nodes = int(input("Number of nodes: "))
G=nx.wheel_graph(nodes)
pos=nx.spectral_layout(G)
break
elif mode == 9:
nodes = int(input("Number of nodes: "))
G=nx.star_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 10:
nodes = int(input("Number of nodes: "))
G=nx.path_graph(nodes)
pos=nx.circular_layout(G)
break
elif mode == 11:
G=nx.moebius_kantor_graph()
pos=nx.spectral_layout(G)
break
elif mode == 12:
G=nx.tutte_graph()
pos=nx.spectral_layout(G)
break
elif mode == 13:
G=nx.truncated_tetrahedron_graph()
pos=nx.spectral_layout(G)
break
elif mode == 14:
G=nx.truncated_cube_graph()
pos=nx.spectral_layout(G)
break
elif mode == 15:
G=nx.sedgewick_maze_graph()
pos=nx.spectral_layout(G)
break
elif mode == 16:
G=nx.pappus_graph()
pos=nx.spectral_layout(G)
break
elif mode == 17:
G=nx.bull_graph()
pos=nx.spectral_layout(G)
break
elif mode == 18:
G=nx.krackhardt_kite_graph()
break
else:
print("Please enter a valid number.")
costsChecker = int(input("Cost Mode (0 - random / 1 - cost of 1): "))
# assigns random weights to all of the edges
for (u, v) in G.edges():
if costsChecker == 0:
G.edge[u][v]['weight'] = random.randint(0,500)
else:
G.edge[u][v]['weight'] = 1
##### Setup Enviornment ####
if os.path.isdir("./topology"):
shutil.rmtree("./topology")
os.mkdir("./topology")
##### Output Files #####
# Write initial costs to file and create gold topology, grid graph is special case
edgeChecker = {}
edgeList = open("./topology/networkTopology.txt", 'w')
goldFile = open("./topology/goldNetwork.txt", 'w')
if mode != 3:
for v in G:
initCostFile = open("./topology/nodecosts" + str(v), 'w')
goldFile.write("Node: " + str(v) + "\n")
for n in G.neighbors(v):
initCostFile.write(str(n) + " " + str(G[v][n]['weight']) + "\n")
goldFile.write(" -> " + str(n) + ", cost = " + str(G[v][n]['weight']) + "\n")
if v*256 + n not in edgeChecker.keys() and v*256 + n not in edgeChecker.keys():
edgeList.write(str(v) + " " + str(n) + "\n")
edgeChecker[v*256 + n] = True
edgeChecker[n*256 + v] = True
else:
for v in G:
initCostFile = open("./topology/nodecosts" + str(v[0]*cols + v[1]), 'w')
goldFile.write("Node: " + str(v[0]*cols + v[1]) + "\n")
for n in G.neighbors(v):
initCostFile.write(str(n[0]*cols + n[1]) + " " + str(G[v][n]['weight']) + "\n")
goldFile.write(" -> " + str(n[0]*cols + n[1]) + ", cost = " + str(G[v][n]['weight']) + "\n")
if ((v[0]*cols + v[1])*256 + n[0]*cols + n[1]) not in edgeChecker.keys() and ((n[0]*cols + n[1])*256 + v[0]*cols + v[1]) not in edgeChecker.keys():
edgeList.write(str(v[0]*cols + v[1]) + " " + str(n[0]*cols + n[1]) + "\n")
edgeChecker[v*256 + n] = True
edgeChecker[n*256 + v] = True
if plot_lib == True:
plt.figure(1,figsize=(20,20))
try:
pos
except NameError:
plt.axis('off')
nx.draw_networkx(G,node_color='#A0CBE2',width=.5,with_labels=True, )
else:
nx.draw(G,pos,node_color='#A0CBE2',width=.5,with_labels=True)
plt.savefig("./topology/networkTopology.png") # save as png
|
988,170 | 550a528214c7bd7e45fa85cb7e76b2ff307c5b34 | import json
def handle(event, context):
categories = ["Books", "Python", "AWS", "Java"]
return {
"statusCode": 200,
"body": json.dumps({"statusCode": 200, "data": categories}),
"isBase64Encoded": False,
}
|
988,171 | 3752e7d77004e7a96d5f7706f258a6ddcd0d9233 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import copy
####QUESTION 1####
## a
data = pd.read_csv('real_estate.csv')
raw_data = data.copy()
lenth = data.shape[0]
index_in_file = []
index_in_code = []
for i in range(lenth):
if data.iloc[i].isnull().any():
index_in_file.append(i + 2)
index_in_code.append(i)
print('The index of removed rows in the file are ',index_in_file)
data = data.drop(index = data.index[index_in_code])
data = data.reset_index(drop = False)
data = data.drop(['transactiondate', 'latitude', 'longitude', 'price'], axis = 1)
##b
features = ['age', 'nearestMRT', 'nConvenience']
for feature in features:
minx = min(data[feature])
maxx = max(data[feature])
for i in data.index:
data.loc[i, feature] = (data.loc[i,feature] - minx) / (maxx - minx)
mean_feature = {}
for feature in features:
mean_feature[feature] = np.mean(data[feature])
print('The mean value of feature \'', feature, '\' = ', mean_feature[feature])
####QUESTION 2####
lenth = data.shape[0]
cut = lenth // 2
lenth = data.shape[0]
cut = lenth // 2
print('The first row of the training set is \n', data.loc[0][1:],
'\nThe last row of the training set is \n', data.loc[cut - 1][1:],
'\nThe first row of the testing set is \n', data.loc[cut][1:],
'\nThe last row of the tesing set is \n', data.loc[cut+cut-1][1:])
#split train set and test set
X = np.zeros((lenth,1,4))
for i in data.index:
X[i] = [1,data.loc[i, 'age'], data.loc[i,'nearestMRT'], data.loc[i, 'nConvenience']]
y = np.zeros((lenth,1))
for i in range(len(X)):
raw_index = data.loc[i, 'index'] #the index in raw_data
y[i] = raw_data.loc[raw_index, 'price']
X_train = X[0 : cut]
X_test = X[cut : cut + cut]
y_train = y[0 : cut]
y_test = y[cut : cut + cut]
####QUESTION 5####
##a
losses = []
fig, ax = plt.subplots(3, 3, figsize=(10, 10))
nIter = 400
alphas = [10, 5, 2, 1, 0.5, 0.25, 0.1, 0.05, 0.01]
for eta in alphas:
w0 = np.ones((4, 1))
loss_per_iter = []
for iteration in range(nIter):
loss_all_row = 0
partial_matrix = np.zeros((4, 1))
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
loss_all_row += (1 / 4 * (float(y[i]) - dot_item) ** 2 + 1) ** 0.5 - 1
# update w0
for j in range(len(partial_matrix)): # for each w in w0
partial_matrix[j] += X_train[i][0][j] * (dot_item - float(y[i])) / (
2 * (((dot_item - float(y[i])) ** 2 + 4)) ** 0.5)
loss_per_iter.append(loss_all_row / len(X_train))
w0 = w0 - eta * (1 / len(X_train)) * partial_matrix
losses.append(loss_per_iter)
for i, ax in enumerate(ax.flat):
ax.plot(losses[i])
ax.set_title(f"step size: {alphas[i]}")
plt.tight_layout()
plt.show()
# for i in range(len(losses)):
# print('step size = ', alphas[i])
# print(losses[i][-10:])
##c
eta = 0.3
w0 = np.ones((4,1))
w_list = []
w_list.append(w0)
for iteration in range(nIter):
loss_all_row = 0
partial_matrix = np.zeros((4,1))
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
loss_all_row += (1/4 * (float(y_train[i]) - dot_item)**2 + 1)**0.5 - 1
#update w0
for j in range(len(partial_matrix)): #for each w in w0
partial_matrix[j] += X_train[i][0][j]*(dot_item - float(y_train[i])) / (2*(((dot_item - float(y_train[i]))**2 + 4))**0.5)
w0 = w0 - eta * (1/len(X_train)) * partial_matrix
w_list.append(w0)
print('The final weight vector is: \n', w_list[-1])
w_list = np.array(w_list)
w_t = w_list.T[0]
for i in range(len(w_t)):
plt.plot(w_t[i], label = ['w_0', 'w_1', 'w_2', 'w_3'][i])
plt.legend()
plt.show()
train_loss = 0
w0 = w_list[-1]
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
train_loss += (1/4 * (float(y_train[i]) - dot_item)**2 + 1)**0.5 - 1
print('The loss on the train set is ', train_loss / len(X_train))
test_loss = 0
for i in range(len(X_test)):
dot_item = float(np.dot(X_test[i], w0))
test_loss += (1/4 * (float(y_test[i]) - dot_item)**2 + 1)**0.5 - 1
print('The loss on the test set is ', test_loss/len(X_test))
####QUESTION 6####
##a
epoch = 6
losses = []
fig, ax = plt.subplots(3, 3, figsize=(10, 10))
nIter = 400
alphas = [10, 5, 2, 1, 0.5, 0.25, 0.1, 0.05, 0.01]
for eta in alphas:
w0 = np.ones((4, 1))
loss_per_eta = []
partial_matrix = np.zeros((4, 1))
for iteration in range(epoch):
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
# update w0
for j in range(len(partial_matrix)): # for each w in w0
partial_matrix[j] = X_train[i][0][j] * (dot_item - float(y_train[i])) / (
2 * (((dot_item - float(y_train[i])) ** 2 + 4)) ** 0.5)
w0 = w0 - eta * partial_matrix
loss_all_row = 0
for k in range(len(X_train)):
dot_item = float(np.dot(X_train[k], w0))
loss_all_row += (1 / 4 * (float(y_train[k]) - dot_item) ** 2 + 1) ** 0.5 - 1
loss_per_eta.append(loss_all_row / len(X_train))
losses.append(loss_per_eta)
for i, ax in enumerate(ax.flat):
ax.plot(losses[i])
ax.set_title(f"step size: {alphas[i]}") # plot titles
plt.tight_layout() # plot formatting
plt.show()
##c
w0 = np.ones((4,1))
eta = 0.4
w_list = []
w_list.append(w0)
loss_per_eta = []
partial_matrix = np.zeros((4,1))
for iteration in range(epoch):
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
#update w0
for j in range(len(partial_matrix)): #for each w in w0
partial_matrix[j] = X_train[i][0][j]*(dot_item - float(y_train[i])) / (2*(((dot_item - float(y_train[i]))**2 + 4))**0.5)
w0 = w0 - eta * partial_matrix
w_list.append(w0)
w_list = np.array(w_list)
w_t = w_list.T[0]
for i in range(len(w_t)):
plt.plot(w_t[i], label = ['w_0', 'w_1', 'w_2', 'w_3'][i])
plt.legend()
plt.show()
train_loss = 0
w0 = w_list[-1]
print('The final model is \n', w0)
for i in range(len(X_train)):
dot_item = float(np.dot(X_train[i], w0))
train_loss += (1/4 * (float(y_train[i]) - dot_item)**2 + 1)**0.5 - 1
print('The loss on the train set is ', train_loss / len(X_train))
test_loss = 0
for i in range(len(X_test)):
dot_item = float(np.dot(X_test[i], w0))
test_loss += (1/4 * (float(y_test[i]) - dot_item)**2 + 1)**0.5 - 1
print('The loss on the test set is ', test_loss/len(X_test))
|
988,172 | 56fd53599e063e757c52c7ebd1f3070ffaaff31d | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#Strings
class_name = "p5_Delinquents: "
#Error: class_name + 5 You cannot concatenate a String and number
student1 = "Carlos"
top_delinquents = student1 + ", Nicholas, Gionna, Gonzalo"
class_students = class_name + top_delinquents
print(class_students)
same_name = "2"
third_student = 3
print(type(same_name))
student2 = "Carlos " + same_name
print(student2)
student3 = "Carlos " + str(third_student)
print(student3)
#Int
grade1 = int(input("What did you get in p1? "))
grade2 = int(input("What did you get in p2? "))
grade3 = int(input("What did you get in p3? "))
grade4 = int(input("What did you get in p4? "))
print("To verify: You scored: " + "\n" + str(grade1) + "\n" + str(grade2) + "\n" + str(grade3) + "\n" + str(grade4))
gpa = (grade1 + grade2 + grade3 + grade4) / 4
print("GPA: " + str(gpa))
print(type(gpa)) |
988,173 | 20902e8faa98576feaf4997be561e8ae9aeda0ba | # encoding:utf-8
from django.contrib.auth.models import User
from django.conf.urls import url
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.utils import trailing_slash
from oauth2_provider.views.base import TokenView
class AuthResource(ModelResource):
'''
用户认证:注册,登录,注销
'''
class Meta:
queryset = User.objects.all()
allowed_methods = ['post', 'put']
resource_name = 'auth'
serializer = Serializer(formats=['json'])
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/login%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('login'), name="api_login"), # 用户登录
url(r'^(?P<resource_name>%s)/register%s$' %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('register'), name='api_register'), # 用户注册
]
def login(self, request, *args, **kwargs):
pass
def register(self, request, **kwargs):
pass
|
988,174 | b402a953d6dfb69640a7c1ae562ceae4040ce9f1 | from django.urls import include, path, reverse
from rest_framework import status
from rest_framework.test import APITestCase, URLPatternsTestCase
from user.models import User
from products.models import Product
class ProductsTests(APITestCase, URLPatternsTestCase):
urlpatterns = [path(r'^api/products/', include('products.urls')),]
"""
Product list endpoint test.
"""
def test_should_get_all_products(self):
user = User.objects.create(email='olivia@ovi.it')
Product.objects.create(title='lamp', description='description', price=55)
Product.objects.create(title='bed', description='description', price=55)
url = reverse('products-list')
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
"""
Create product endpoint test.
"""
def test_should_create_product(self):
user = User.objects.create(email='olivia@ovi.it')
url = reverse('products-list')
self.client.force_authenticate(user=user)
data = {'title': 'bed', 'description': 'new_description', 'price': '90'}
response = self.client.post(url, data, format='json')
number_of_current_products = len(Product.objects.get_queryset())
product = Product.objects.first()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(number_of_current_products, 1)
self.assertEqual(product.title, 'bed')
self.assertEqual(product.description, 'new_description')
self.assertEqual(product.price, 90)
def test_should_not_create_product_when_no_details_are_found(self):
user = User.objects.create(email='olivia@ovi.it')
url = reverse('products-list')
self.client.force_authenticate(user=user)
response = self.client.post(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
Product detail endpoint test.
"""
def test_should_get_product_detail(self):
user = User.objects.create(email='olivia@ovi.it')
product = Product.objects.create(title='lamp', description='description', price=55.00)
url = '/%5Eapi/products/product/{0}'.format(str(product.id))
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], 1)
self.assertEqual(response.data['title'], 'lamp')
self.assertEqual(response.data['description'], 'description')
self.assertEqual(response.data['price'], '55.00')
def test_should_not_get_product_detail_request_when_product_does_not_exists(self):
user = User.objects.create(email='olivia@ovi.it')
Product.objects.create(title='lamp', description='description', price=55.00)
url = '/%5Eapi/products/product/{0}/'.format(str(90))
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
"""
Product put endpoint test.
"""
def test_should_update_product(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
data = {'title': 'bed', 'description': 'new_description', 'price': '90'}
url = '/%5Eapi/products/product/{0}'.format(str(product1.id))
self.client.force_authenticate(user=user)
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], 1)
self.assertEqual(response.data['title'], 'bed')
self.assertEqual(response.data['description'], 'new_description')
self.assertEqual(response.data['price'], '90.00')
def test_should_create_product_when_product_is_not_found(self):
user = User.objects.create(email='olivia@ovi.it')
data = {'title': 'bed', 'description': 'new_description', 'price': '90'}
url = '/%5Eapi/products/product/{0}'.format(str(2))
self.client.force_authenticate(user=user)
response = self.client.put(url, data, format='json')
number_of_current_products = len(Product.objects.get_queryset())
product = Product.objects.first()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(number_of_current_products, 1)
self.assertEquals(product.title, 'bed')
self.assertEquals(product.description, 'new_description')
self.assertEquals(product.price, 90)
def test_should_not_update_product_when_nothing_to_update(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
url = '/%5Eapi/products/product/{0}'.format(product1.id)
self.client.force_authenticate(user=user)
response = self.client.put(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
"""
Product patch endpoint test.
"""
def test_should_patch_product(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
data = {'description': 'new_description'}
url = '/%5Eapi/products/product/{0}'.format(str(product1.id))
self.client.force_authenticate(user=user)
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], 1)
self.assertEqual(response.data['title'], 'lamp')
self.assertEqual(response.data['description'], 'new_description')
self.assertEqual(response.data['price'], '55.00')
def test_should_not_patch_product_when_nothing_to_patch(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
url = '/%5Eapi/products/product/{0}'.format(str(product1.id))
self.client.force_authenticate(user=user)
response = self.client.patch(url, format='json')
product1_patched = Product.objects.get(pk=product1.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(product1_patched.description, 'description')
self.assertEqual(product1_patched.title, 'lamp')
self.assertEqual(product1_patched.price, 55)
def test_should_not_patch_product_when_product_is_not_found(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
data = {'description': 'new_description'}
url = '/%5Eapi/products/product/{0}/'.format(str(67))
self.client.force_authenticate(user=user)
response = self.client.patch(url, data, format='json')
product1_patched = Product.objects.get(pk=product1.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(product1_patched.description, 'description')
self.assertEqual(product1_patched.title, 'lamp')
self.assertEqual(product1_patched.price, 55)
"""
Product delete endpoint test.
"""
def test_should_delete_correct_product(self):
user = User.objects.create(email='olivia@ovi.it')
product1 = Product.objects.create(title='lamp', description='description', price=55)
product2 = Product.objects.create(title='bed', description='description', price=55)
number_of_current_products = len(Product.objects.get_queryset())
self.assertEqual(number_of_current_products, 2)
url = '/%5Eapi/products/product/{0}'.format(str(product1.id))
self.client.force_authenticate(user=user)
response = self.client.delete(url, format='json')
number_of_current_products = len(Product.objects.get_queryset())
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(number_of_current_products, 1)
def test_should_not_delete_product_when_product_is_not_found(self):
user = User.objects.create(email='olivia@ovi.it')
Product.objects.create(title='lamp', description='description', price=55)
Product.objects.create(title='bed', description='description', price=55)
number_of_current_products = len(Product.objects.get_queryset())
self.assertEqual(number_of_current_products, 2)
url = '/%5Eapi/products/product/{0}/'.format(str(8))
self.client.force_authenticate(user=user)
response = self.client.delete(url, format='json')
number_of_current_products = len(Product.objects.get_queryset())
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(number_of_current_products, 2)
def test_should_return_all_products_for_specific_category(self):
user = User.objects.create(email='olivia@ovi.it')
Product.objects.create(title='lamp', description='description', price=55, category="BEDROOM")
url = '/%5Eapi/products/product/category/{0}'.format("BEDROOM")
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.data[0]['id'], 1)
self.assertEqual(response.data[0]['title'], 'lamp')
self.assertEqual(response.data[0]['description'], 'description')
self.assertEqual(response.data[0]['price'], '55.00')
self.assertEqual(response.data[0]['category'], 'BEDROOM')
def test_should_return_error_when_category_do_not_exists(self):
user = User.objects.create(email='olivia@ovi.it')
Product.objects.create(title='lamp', description='description', price=55, category="roof")
url = '/%5Eapi/products/product/category/{0}'.format("kitchen")
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
988,175 | 428278935f2422e2748fcb02966b2a5e599fa624 | import os
import sys
sys.path.append('modules')
import add_problem
import review
import json_to_csv
import csv_to_json
active = True
os.system('color 1e')
while active:
print('请输入要进行的操作:')
msg = ('(a=添加题目, r=复习错题, q=退出, c=从表格导入题目, ' +
'j=从数据库导出至表格) ')
c = input(msg)
if c == 'a':
add_problem.add_problem()
elif c == 'r':
review.review()
elif c == 'j':
json_to_csv.json_to_csv()
elif c == 'c':
csv_to_json.csv_to_json()
elif c == 'q':
active = False
else:
print("输入不合法")
continue
os.system("cls") |
988,176 | 7b754b0b5c4497a6f8c1f23951b45f208b492e42 | '''
Created on Jun 22, 2010
@author: wye
'''
from af import ActivityBase
class behav():
'''
Describes behavior of acts
'''
def __init__(self):
'''
Constructor
'''
self._actdict = None
def setActDict(self, actdict):
self._actdict = actdict
def act1Behav(self):
self._actdict['act2']['input2']['list_tok2'] = 3
self._actdict['act3']['input3']['list_tok3'] = 4
print 'act1 finished'
def act2Behav(self):
print 'act2 finished'
def act3Behav(self):
print 'act3 finished'
class XXActivity (ActivityBase.ActivityBase):
def __init__(self, impl):
'''
Constructor
'''
ActivityBase.ActivityBase.__init__(self)
self.__impl = impl
self._actdict = {'userinput':'yes',
'stepno':1,
'act1':{'input1':{'source1':'user input', 'state1':'false'}},
'act2':{'input2':{'source2':'act1', 'min_tok2':3, 'max_tok2':3, 'list_tok2':'no2'}},
'act3':{'input3':{'source3':'act1', 'min_tok3':4, 'max_tok3':4, 'list_tok3':'no3'}}}
self.__impl.setActDict(self._actdict)
def step(self, stepno):
if stepno == 1:
if self._actdict['act1']['input1']['state1'] == True:
self._actdict['stepno'] = 2
if self._actdict['userinput'] == 'yes':
self._actdict['userinput'] = 'inputted'
self.act1()
else:
print 'Insufficient input 1'
if stepno == 2:
if self._actdict['act2']['input2']['list_tok2'] == self._actdict['act2']['input2']['min_tok2']:
self._actdict['stepno'] = 3
self.act2()
else:
print 'Insufficient input 2'
if stepno == 3:
if self._actdict['act3']['input3']['list_tok3'] == self._actdict['act3']['input3']['min_tok3']:
self.act3()
else:
print 'Insufficient input 3'
def act1(self):
self.__impl.act1Behav()
self.run()
def act2(self):
self.__impl.act2Behav()
self.run()
def act3(self):
self.__impl.act3Behav()
if __name__ == "__main__":
impl = behav()
x = XXActivity(impl)
x.run()
|
988,177 | 47a2f86e7ca36486f2d8b1269edc3f515f3ac4fd | # Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flexflow.core.flexflow_type import ActiMode, AggrMode, PoolType, DataType, LossType, MetricsType, OpType, enum_to_int, int_to_enum
class PyTorchModel(object):
def __init__(self, filename):
self.tensor_dict = {}
self.filename = filename
def apply(self, ffmodel, input_tensors):
in_file = open(self.filename, "r")
output_tensors = []
lines = in_file.readlines()
input_idx = 0
for line in lines:
items = line.strip().split(",")
assert len(items) >= 3, "wrong format"
items = [i.strip() for i in items]
print(items)
#get op name
op_name = items[0]
#get previous ops' name
prev_ops_list = items[1].split(":")
prev_ops_list = [i.strip() for i in prev_ops_list]
for i in prev_ops_list:
if i == "":
prev_ops_list.remove(i)
#get op type
op_type = int_to_enum(OpType, int(items[2]))
if op_type == OpType.INPUT:
assert len(prev_ops_list) == 0, "wrong format"
self.tensor_dict[op_name] = input_tensors[input_idx]
input_idx += 1
elif op_type == OpType.LINEAR:
assert len(items) == 6, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
od = int(items[3])
activ = int_to_enum(ActiMode, int(items[4]))
bias = bool(int(items[5]))
self.tensor_dict[op_name] = ffmodel.dense(input=input_tensor, out_dim=od, activation=activ, use_bias=bias, name=op_name)
elif op_type == OpType.CONV2D:
assert len(items) == 13, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
oc = int(items[3])
kh = int(items[4])
kw = int(items[5])
sh = int(items[6])
sw = int(items[7])
ph = int(items[8])
pw = int(items[9])
activ = int_to_enum(ActiMode, int(items[10]))
group = int(items[11])
bias = bool(int(items[12]))
self.tensor_dict[op_name] = ffmodel.conv2d(input=input_tensor, out_channels=oc, kernel_h=kh, kernel_w=kw, stride_h=sh, stride_w=sw, padding_h=ph, padding_w=pw, activation=activ, groups=group, use_bias=bias, name=op_name)
elif op_type == OpType.POOL2D:
assert len(items) == 8, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
kh = int(items[3])
sh = int(items[4])
ph = int(items[5])
pt = int_to_enum(PoolType, int(items[6]))
activ = int_to_enum(ActiMode, int(items[7]))
self.tensor_dict[op_name] = ffmodel.pool2d(input=input_tensor, kernel_h=kh, kernel_w=kh, stride_h=sh, stride_w=sh, padding_h=ph, padding_w=ph, pool_type=pt, activation=activ, name=op_name)
elif op_type == OpType.DROPOUT:
assert len(items) == 4, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
r = int(item[3])
self.tensor_dict[op_name] = ffmodel.dropout(input=input_tensor, rate=r, seed=0, name=op_name)
elif op_type == OpType.FLAT:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.flat(input=input_tensor, name=op_name)
elif op_type == OpType.RELU:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.relu(input=input_tensor, name=op_name)
elif op_type == OpType.SIGMOID:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.sigmoid(input=input_tensor, name=op_name)
elif op_type == OpType.TANH:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.tanh(input=input_tensor, name=op_name)
elif op_type == OpType.ELU:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.elu(input=input_tensor, name=op_name)
elif op_type == OpType.SOFTMAX:
assert len(items) == 3, "wrong format"
assert len(prev_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[prev_ops_list[0]]
self.tensor_dict[op_name] = ffmodel.softmax(input=input_tensor, name=op_name)
elif op_type == OpType.CONCAT:
assert len(items) == 4, "wrong format"
assert len(prev_ops_list) >= 2, "wrong format"
input_tensors = []
for i in prev_ops_list:
input_tensors.append(self.tensor_dict[i])
ax = int(items[3])
self.tensor_dict[op_name] = ffmodel.concat(tensors=input_tensors, axis=ax, name=op_name)
elif op_type == OpType.OUTPUT:
self.tensor_dict[op_name] = []
for i in prev_ops_list:
self.tensor_dict[op_name].append(self.tensor_dict[i])
output_tensors = self.tensor_dict[op_name]
#print(output_tensors[1].handle.impl)
else:
assert 0, "unknown op"
in_file.close()
return output_tensors
|
988,178 | c4251c46f19388a3d6c1055310407b7b545f410a | # encoding: utf-8
"""
@author:Administrator
@file: page_taskcompatible.py
@time: 2018/11/15
"""
import allure
from Base.base_page import BasePage
from Unit.tool import parse
class Page_Task_Compatible(BasePage):
'''标准兼容性测试'''
def __init__(self, AutoRead=False):
self.page = {}
if AutoRead:
self.read_page()
def read_page(self):
self.page = parse(self.get_current_aspx())
@allure.step(u'标准兼容性测试:{0}')
def into_task_compatible(self, url):
self.get_url(url)
@allure.step(u'点击开始测试')
def click_start_test(self):
self.click_wait(self.page['B_StartTest'])
@allure.step(u'点击上传应用')
def click_uploadapp(self):
self.click(self.page['B_UPLoadApp'])
@allure.step(u'取消上传文件')
def click_close(self):
self.click(self.page['B_close']) |
988,179 | 8730ac2e93214dad8c604629d8e22f376098aad6 | # Exercicio 13
#
# Crie um programa que peça o nome do cliente, idade, endereço, email e telefone.
#
# Depois crie um menu interativo com as seguintes opções: Dados, Endereço, Contato.
#
# Se o usuário selecionar "Dados" deve aparecer o nome do cliente e a idade
#
# Se o usuário selecionar "Endereço" deve aparecer o nome do cliente e o endereço
#
# Se o usuário selecionar "Contato" deve aparecer o nome do cliente, email e o telefone
nome = input("Insira o nome: ")
idade = int(input("Insira a idade: "))
endereco = input("Insira o endereco: ")
email = input("Insira o email: ")
telefone = input("Insira o telefone: ")
print ("""
1. Dados
2. Endereço
3. Idade
""")
opcao = int(input("Escolha a operação: "))
if opcao == 1:
print(nome, ",", idade)
elif opcao == 2:
print(nome, ",", endereco)
else:
print(nome, ",", email, ",", telefone)
|
988,180 | fcb9dc924e94ded6f9619cac9777a1744aa9c04c | # -*- coding:utf-8 -*-
'''
第一个只出现一次的字符
===========================
在字符串中找出第一个只出现一次的字符。如输入"abaccdeff",则输出"b"。
'''
def firstNotRepeatingChar(s):
if not isinstance(s, str) or len(s) == 0:
return
charTable = {}
for i in range(len(s)):
if s[i] not in charTable:
charTable[s[i]] = 1
else:
charTable[s[i]] += 1
for i in range(len(s)):
if charTable[s[i]] == 1:
return s[i]
return None
import unittest
class TestFirstNotRepeatingChar(unittest.TestCase):
def test_first_not_repeating_char(self):
self.assertEqual(firstNotRepeatingChar('google'), 'l')
self.assertEqual(firstNotRepeatingChar('aabccdbd'), None)
self.assertEqual(firstNotRepeatingChar('abcdefg'), 'a')
self.assertEqual(firstNotRepeatingChar(None), None)
if __name__ == '__main__':
unittest.main() |
988,181 | 36eaca0e84733412887f4332c8e5083d0ca13312 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
def add_beautiful_subplot(fig, subplot, xlim, ylim):
axes = fig.add_subplot(
subplot, xlim=xlim, ylim=ylim
)
for side in ['bottom', 'right', 'top', 'left']:
axes.spines[side].set_visible(False)
plt.xlabel('S', fontsize=18)
plt.ylabel('C', fontsize=18)
xmin, xmax = axes.get_xbound()
ymin, ymax = axes.get_ylim()
head_width = 1. / 50. * (ymax - ymin)
head_length = 1. / 100. * (xmax - xmin)
dps = fig.dpi_scale_trans.inverted()
bbox = axes.get_window_extent().transformed(dps)
width, height = bbox.width, bbox.height
yhead_width = 1. / 50. * (xmax - xmin) * height / width
yhead_length = 1. / 100. * (ymax - ymin) * width / height
linewidth = 1.
xwidth = min(1.0, (ymax - ymin) / 1000.)
axes.arrow(
xmin, 0, xmax - xmin, 0, linewidth=linewidth, width=xwidth,
head_width=head_width, head_length=head_length,
length_includes_head=True, fc='k', ec='k'
)
axes.arrow(
0, ymin, 0, ymax - ymin, linewidth=1.,
head_width=yhead_width, head_length=yhead_length,
length_includes_head=True, fc='k', ec='k'
)
|
988,182 | 67faf5485d451ae309f33e47d4c81b0ec678e1b1 | from dmn import DMN_QA
if __name__=='__main__':
QA_SYS = DMN_QA()
print('[+] QA_SYS object created')
QA_SYS.train_model() |
988,183 | b3bf4dffbcb6d988b1f74a2e5af7b78ae8c4aec8 | import numpy as np
import h5py
np.random.seed(1000)
from scipy.io import loadmat
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
n_splits=4
train_size=0.8
test_size=0.2
eeg2 = r""
class EEGSchizoDatasetBalanced():
"""EEG Alco Train dataset."""
def __init__(self):
"""
Args:
none.
"""
h5f = h5py.File('schizo_scalars_unbalanced.h5','r')
self.spikes_seizure_eeg = h5f['dataset_schizo_scalars_unbalanced'][:]
self.spikes_seizure_eeg=np.swapaxes(self.spikes_seizure_eeg,1,2)
scalers = {}
for i in range(self.spikes_seizure_eeg.shape[1]):
scalers[i] = StandardScaler()
self.spikes_seizure_eeg[:, i, :] = scalers[i].fit_transform(self.spikes_seizure_eeg[:, i, :])
h5f.close()
h5f = h5py.File('schizo_labels_unbalanced.h5','r')
self.labels_seizure_eeg = h5f['dataset_schizo_labels_unbalanced'][:]
print(str(np.sum(self.labels_seizure_eeg))+'/'+str(len(self.labels_seizure_eeg)))
h5f.close()
def get_data(self):
#all folds
dataArray = list()
sss = StratifiedShuffleSplit(n_splits=n_splits, train_size=train_size, test_size=test_size, random_state=0)
for train_index, test_index in sss.split(self.spikes_seizure_eeg, self.labels_seizure_eeg):
trainLabels=self.labels_seizure_eeg[train_index]
trainValues=self.spikes_seizure_eeg[train_index]
testLabels=self.labels_seizure_eeg[test_index]
testValues=self.spikes_seizure_eeg[test_index]
#BALANCING TRAINING DATA
positivesIndices=trainLabels==1
positiveEEGs=trainValues[positivesIndices]
negativeEEGs=trainValues[~positivesIndices]
print('positiveEEGs: '+str(len(positiveEEGs)))
print('negativeEEGs: '+str(len(negativeEEGs)))
n=np.min([len(positiveEEGs),len(negativeEEGs)])
print(n)
trainValues=(np.concatenate((positiveEEGs[0:n],negativeEEGs[0:n]),axis=0))
trainLabels=(np.concatenate((np.full((n),1),np.full((n),0)),axis=0))
shuffle = np.random.RandomState(seed=0).permutation(len(trainValues))
trainValues = trainValues[shuffle]
trainLabels = trainLabels[shuffle]
currentSplit = {'X_train': (trainValues), 'X_test': (testValues),
'y_train': (trainLabels), 'y_test': (testLabels)}
dataArray.append(currentSplit)
return dataArray
def __len__(self):
return len(self.spikes_seizure_eeg)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
eeg = torch.tensor(self.spikes_seizure_eeg[idx])
print('eeg size (in getitem): '+str(eeg.size()))
label = self.labels_seizure_eeg[idx]
sample = {'eeg': eeg, 'label': label}
return sample
schizoDataset = EEGSchizoDatasetBalanced()
dataArray = schizoDataset.get_data()
X_train = dataArray[1]['X_train']
X_test = dataArray[1]['X_test']
y_train = dataArray[1]['y_train']
y_test = dataArray[1]['y_test']
print("Train dataset : ", X_train.shape, y_train.shape)
print("Test dataset : ", X_test.shape, y_test.shape)
print("Train dataset metrics : ", X_train.mean(), X_train.std())
print("Test dataset : ", X_test.mean(), X_test.std())
print("Nb classes : ", len(np.unique(y_train)))
print('TESTING')
np.save(eeg2 + 'X_train.npy', X_train)
np.save(eeg2 + 'y_train.npy', y_train)
np.save(eeg2 + 'X_test.npy', X_test)
np.save(eeg2 + 'y_test.npy', y_test)
|
988,184 | 44240a1244c99ac9fdffe69900c7aa995a2a642f | import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
import constants
"""
This is a common utils class file which can be used across py packages
"""
def gettickerdata(tickername):
"""
function calls bloomberg API and scraps the current price
"""
r = requests.get(constants.bloomurl + getticker(tickername) + ':US')
soup = BeautifulSoup(r.text, 'html.parser')
results = soup.find_all('div', class_="price")
return ("$" + results[0].text)
def getticker(ticker):
"""
getticker: connect to mongoDB hosted in mLab
"""
# getDBConnection
db = getDBConnection()
# getTable
tickerCollection = db['tickerdata']
tkrlist = tickerCollection.find({"Name": {"$regex": "^" + ticker, "$options": 'i'}})
# print (tickerCollection.find({"Name":{"$regex":"^"+ticker,"$options":'i'}}).explain())
# get possible combination of the ticker val
tickrCombination = map((ticker + '{0}').format, [".", ",", " "])
if tkrlist.count() > 1:
for i in tkrlist:
if (i['Name'][:len(tickrCombination[0])] in tickrCombination):
return i['Symbol']
else:
return tkrlist[0]['Symbol']
def getDBConnection():
"""
client = MongoClient('mongodb://localhost:27017/')
return DB data
:return:
"""
# Connect to mLab DB
client = MongoClient(constants.mongo_uri)
# getDB
db = client['stockitdb']
return db
def ticker_wrapper(ticker):
"""
This is a wrapper to get the ticker data
"""
pass
|
988,185 | 859e6e486c7e8f3996782cce821ab38d6707e447 | #!/usr/bin/env python3
import fileinput
import math
def find_sum(numbers, num_summands, target):
if num_summands == 0 and target == 0:
return True, []
if num_summands == 0 or target < 0 or len(numbers) == 0:
return False, None
for index, number in enumerate(numbers):
found_solution, summands = find_sum(numbers[index+1:], num_summands - 1, target - number)
if found_solution:
return True, summands + [number]
return False, None
def test_task1():
expenses = [1721, 979, 366, 299, 675, 1456]
found_solution, summands = find_sum(expenses, 2, 2020)
assert found_solution
assert math.prod(summands) == 514579
print('tests for task 1: ok')
def solve_task1():
expenses = [int(line) for line in fileinput.input()]
found_solution, summands = find_sum(expenses, 2, 2020)
assert found_solution
solution = math.prod(summands)
print(f'answer to task 1: {solution}')
def test_task2():
expenses = [1721, 979, 366, 299, 675, 1456]
found_solution, summands = find_sum(expenses, 3, 2020)
assert found_solution
assert math.prod(summands) == 241861950
print('tests for task 2: ok')
def solve_task2():
expenses = [int(line) for line in fileinput.input()]
found_solution, summands = find_sum(expenses, 3, 2020)
assert found_solution
solution = math.prod(summands)
print(f'answer to task 2: {solution}')
def main():
test_task1()
solve_task1()
test_task2()
solve_task2()
if __name__ == '__main__':
main()
|
988,186 | d1d46bde9195cc0425234a907fcdd090416c87aa | import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or\
b'\xf2\x95\xa2\xfe\xfcQ\xf44j\xca\xf2t\xa0\x9a<\xf8'
|
988,187 | a23bb575ba70d38348b10e080673439d891047ef | from survival.components.position_component import PositionComponent
from survival.components.resource_component import ResourceComponent
from survival.entity_layer import EntityLayer
from survival.esper import World
from survival.graph_search import graph_search
from survival.settings import AGENT_VISION_RANGE
from survival.tile_layer import TileLayer
class GameMap:
def __init__(self, width, height):
self.width = width
self.height = height
self.tile_layer = TileLayer(width, height)
self.entity_layer = EntityLayer(width, height)
def draw(self, camera):
visible_area = camera.get_visible_area()
self.tile_layer.draw(camera, visible_area)
def add_entity(self, entity, pos):
self.entity_layer.add_entity(entity, pos.grid_position)
def move_entity(self, from_pos, to_pos):
self.entity_layer.move_entity(from_pos, to_pos)
def remove_entity(self, pos):
self.entity_layer.remove_entity(pos)
def get_entity(self, pos) -> int:
if not self.in_bounds(pos):
return None
return self.entity_layer.get_entity(pos)
def is_colliding(self, pos):
return not self.in_bounds(pos) or self.entity_layer.is_colliding(pos)
def in_bounds(self, pos):
return 0 <= pos[0] < self.width and 0 <= pos[1] < self.height
def get_cost(self, pos):
return self.tile_layer.get_cost(pos)
def find_nearby_resources(self, world: World, player: int, position: PositionComponent, search_range: int = 5):
entity_position = position.grid_position
x_range = [entity_position[0] - search_range, entity_position[0] + search_range]
y_range = [entity_position[1] - search_range, entity_position[1] + search_range]
# Check if range is not out of map bounds
if x_range[0] < 0:
x_range[0] = 0
if x_range[1] >= self.width:
x_range[1] = self.width - 1
if y_range[0] < 0:
y_range[0] = 0
if y_range[1] >= self.height:
y_range[1] = self.height - 1
found_resources = []
for y in range(y_range[0], y_range[1]):
for x in range(x_range[0], x_range[1]):
ent = self.get_entity([x, y])
if ent == player:
continue
if ent is not None and world.has_component(ent, ResourceComponent):
res_position = world.component_for_entity(ent, PositionComponent).grid_position
path, cost = graph_search(self, position, tuple(res_position), world)
found_resources.append([ent, path, cost])
return found_resources
def find_nearest_resource(self, world: World, player: int, position: PositionComponent):
resources = self.find_nearby_resources(world, player, position, AGENT_VISION_RANGE)
nearest = None
for resource in resources:
if nearest is None or resource[2] < nearest[2]:
nearest = resource
return nearest
|
988,188 | 3af87127c16383fd46b74f952604595f6c9fadab | import pandas as pd
import numpy as np
import requests
import logging
import datetime
import argparse
import gevent
from random import randint
import os
import yaml
class ConnectorAuthenticationError(Exception):
"""An error indicating that an external API (such as Google) returns
a response indicating that the user token/authentication from the
onboarding form is broken
"""
pass
def read_config(client_name):
filepath = os.path.join(os.getcwd(), client_name, "config.yml")
with open(filepath) as config:
config = yaml.safe_load(config)
return config
class DataHandler:
def __init__(
self,
json_url,
user,
config,
file_name="train_dataset_for_robyn.csv",
):
self.config = config
self.json_url = json_url
self.user = user
self.logger = logging.basicConfig(level=logging.INFO)
self.columns_to_group_by = ["sourceMedium", config["colnames"].get('date_col'), "campaign", "type"]
self.date_column = config["colnames"].pop("date_col")
self.file_name = file_name
self.campaign_type_dict = {
"Retargeting": "Retargeting",
"Prospecting": "Prospecting",
"No-Targeting": "No-Targeting",
}
self.columns_mapping = {
config["colnames"].get("totalcost_col"): "S",
config["colnames"].get("impressions_col"): "I",
config["colnames"].get("clicks_col"): "C",
}
# Remove None keys if exist
self.columns_mapping = {
k: v for k, v in self.columns_mapping.items() if k is not None
}
@staticmethod
def split_campaign(x, split_by=""):
try:
return x.split(split_by)[1]
except:
return None
def _verify_response(self, response, is_retry: bool = False):
if response.status_code == 401:
data = response.json()
error_msg = data.get("message")
raise ConnectorAuthenticationError(error_msg)
elif not response.ok:
if not is_retry and response.status_code == 429:
self.logger.info("Sleeping and trying again")
gevent.sleep(5 + randint(0, 5))
with requests.Session() as session:
response = session.send(response.request)
return self._verify_response(response, is_retry=True)
self.logger.error(
"Something went wrong while getting data. "
"Status code: %s - %s"
% (
response.status_code,
response.text,
)
)
raise Exception(
"Something went wrong while getting data. "
"Status code: %s - %s"
% (
response.status_code,
response.json()["message"],
)
)
return response.json()
def get_data_from_url(self):
res = requests.get(self.json_url)
data = self._verify_response(res)
data = pd.DataFrame(data["data"])
return data
def get_total_revenue(self, df):
# get total revenue for all sources
df_total_revenue = (
df.groupby(self.date_column)[self.config["colnames"]["revenue_col"]]
.sum()
.reset_index(name="total_revenue")
)
# Group by high-level fields
df = df.groupby(self.columns_to_group_by).sum().reset_index()
df = pd.merge(df, df_total_revenue, on=self.date_column)
return df
def get_fb_impressions_df(self, df, fb=None):
# self.config['facebook_source_medium']
if self.config.get("facebook_split") :
index_columns = list(
set(self.columns_to_group_by + [self.date_column, "total_revenue"])
)
df_fb = (
df[df.sourceMedium == fb]
.set_index(index_columns)
.unstack(["sourceMedium"])
)
if len(df_fb) > 0:
df_fb.columns = [
"{}_{}".format(t, v[0].upper()) for v, t in df_fb.columns
]
df_fb = df_fb.unstack(["campaign"])
df_fb.columns = ["{}_{}".format(v, t) for v, t in df_fb.columns]
return df_fb
else:
return None
def get_costs_df(self, df, fb=None):
if self.config.get("facebook_split"):
df = df.loc[df.sourceMedium != fb, :]
columns = [self.date_column, "total_revenue", "sourceMedium"] + [
v
for k, v in self.config["colnames"].items()
if v != self.config["colnames"]["revenue_col"]
]
df_others = (
df[columns]
.groupby([self.date_column, "total_revenue", "sourceMedium"])
.sum()
.unstack(["sourceMedium"])
)
df_others.columns = [
"{}_{}".format(t, self.columns_mapping.get(v)) for v, t in df_others.columns
]
return df_others
def transform_date_column(self, df):
try:
df[self.date_column] = df[self.date_column].dt.date
except:
df[self.date_column] = df[self.date_column].apply(
lambda x: datetime.datetime.fromtimestamp(x / 1000)
)
df[self.date_column] = df[self.date_column].dt.date
return df
def rename_campaign_type(self, df):
if "facebook_campaign_type_dict" and "split_campaigns_by" in self.config:
df.campaign = df.campaign.apply(
lambda x: self.split_campaign(
x, split_by=self.config["split_campaigns_by"]
)
)
type_dict = self.config["facebook_campaign_type_dict"]
for tp in set(df.campaign.dropna()):
if tp not in type_dict.keys():
type_dict[tp] = "Other"
df.campaign = df.campaign.map(type_dict)
df.campaign.fillna("None", inplace=True)
return df
def source_filter(self, df):
# Select sources from config file
if "source_medium" in self.config:
mask = df.sourceMedium.apply(lambda x: x in self.config["source_medium"])
df = df.loc[mask, :]
return df
def data_preparation(self, df):
df = self.rename_campaign_type(df)
df = self.get_total_revenue(df)
df = self.source_filter(df)
df.sourceMedium = df.sourceMedium.str.replace("-", "_")
df_fb = self.get_fb_impressions_df(
df, fb=self.config.get("facebook_source_medium")
)
df_others = self.get_costs_df(df, fb=self.config.get("facebook_source_medium"))
if df_fb is not None:
result = pd.merge(df_others, df_fb, left_index=True, right_index=True)
else:
result = df_others
result.fillna(0, inplace=True)
result = result.loc[:, (result != 0).any(axis=0)]
result.reset_index(inplace=True)
result = self.transform_date_column(result)
result.set_index(self.date_column, inplace=True)
return result
def save_df_to_file(self, df):
filepath = os.path.join(os.getcwd(), self.user, self.file_name)
df.to_csv(filepath)
def main(arguments, config):
DH = DataHandler(
json_url=arguments["json_url"], user=arguments["user"], config=config
)
df = DH.get_data_from_url()
result = DH.data_preparation(df)
DH.save_df_to_file(result)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--json-url",
required=True,
help="URL for the json data from database",
)
parser.add_argument(
"--user",
required=True,
help="User name",
)
parsed, unknown = parser.parse_known_args()
arguments = vars(parsed)
config = read_config(arguments["user"])
main(arguments, config)
|
988,189 | 2f2566a0ef9bcca392b56c0d26fac302a639f035 | from django.urls import path
from .views import index, pet_all, pet_detail, pet_like, pets_create, pets_delete, pets_edit
app_name = 'common'
urlpatterns = [
path('', index, name='index'),
path('pets/', pet_all, name='pet_all'),
path('pets/details/<int:pk>/', pet_detail, name='pet_detail'),
path('pets/like/<int:pk>/', pet_like, name='pet_like'),
path('pets/create/', pets_create, name='pets_create'),
path('pets/edit/<int:pk>', pets_edit, name='pets_edit'),
path('pets/delete/<int:pk>', pets_delete, name='pets_delete'),
path('pets/detail/<int:pk>', pet_detail, name='pets_detail'),
] |
988,190 | ae6b1b1f7fcb0e6998a4571f8cc28b26de04a367 | import string
class TEA:
def __init__(self, key):
key = hex(key)[2:]
self.k0 = int(key[0:8],16)
self.k1 = int(key[8:16],16)
self.k2 = int(key[16:24],16)
self.k3 = int(key[24:],16)
self.test = []
def padArr(self, arr):
padAmt = (8-len(arr)%8)%8
for i in range(padAmt):
arr.append('00')
return arr
def encryptBlocks(self, blocks):
block0 = blocks[0]
block1 = blocks[1]
s = 0
delta = 0x9e3779b9
for i in range(32):
s += delta
s &= 0xffffffff
block0 += (((block1<<4)&0xffffffff) + self.k0) ^ (block1 + s) ^ (((block1>>5)&0xffffffff) + self.k1)
block0 &= 0xffffffff
block1 += (((block0<<4)&0xffffffff) + self.k2) ^ (block0 + s) ^ (((block0>>5)&0xffffffff) + self.k3)
block1 &= 0xffffffff
return (block0, block1)
def decryptBlocks(self, blocks):
block0 = blocks[0]
block1 = blocks[1]
s = 0xC6EF3720
delta = 0x9e3779b9
for i in range(32):
block1 -= (((block0<<4)&0xFFFFFFFF) + self.k2) ^ (block0 + s) ^ (((block0>>5)&0xFFFFFFFF) + self.k3)
block1 &= 0xffffffff
block0 -= (((block1<<4)&0xFFFFFFFF) + self.k0) ^ (block1 + s) ^ (((block1>>5)&0xFFFFFFFF) + self.k1)
block0 &= 0xffffffff
s -= delta
s &= 0xffffffff
return (block0, block1)
def encrypt(self, plaintext):
charArr = []
if isinstance(plaintext, str):
charArr = [hex(ord(char))[2:] for char in plaintext]
elif isinstance(plaintext,int):
charArr = [hex(plaintext)[i:i+2] for i in range(2, len(hex(plaintext))-1, 2)]
if len(hex(plaintext))%2 !=0:
charArr.append(hex(plaintext)[-1] + '0')
charArr = self.padArr(charArr)
#intArr consists of 32-bit ints
intArr = [int(''.join(charArr[i:i+4]),16) for i in range(0, len(charArr), 4)]
ciphertext = ''
newIntArr = []
for i in range(0, len(intArr), 2):
blocks = self.encryptBlocks((intArr[i],intArr[i+1]))
newIntArr.append(blocks[0])
newIntArr.append(blocks[1])
str0 = hex(blocks[0])[2:].rjust(8,'0')
str1 = hex(blocks[1])[2:].rjust(8,'0')
ciphertext += str0 + str1
return ciphertext
def decrypt(self, ciphertext):
#ciphertext += hex(blocks[0])[2:] + hex(blocks[1])[2:]
intArr = [int(ciphertext[i:i+8],16) for i in range(0,len(ciphertext), 8)]
plaintext = '0x'
plainCharArr = []
for i in range(0, len(intArr), 2):
blocks = self.decryptBlocks((intArr[i],intArr[i+1]))
blocks = (blocks[0] << 32) | blocks[1]
blockhex = hex(blocks)[2:]
plaintext += blockhex
for i in range(0,16,2):
plainCharArr.append(chr(int(blockhex[i:i+2],16)))
count = 0
# count alphabet letters in result of decryption
for char in plainCharArr[:-7]: # up to 7 characters can be padded
if char in string.ascii_letters:
count +=1
# if most of the result is alphabet letters, treat the result as a string
# otherwise return it as a hexidecimal string
if count > .8*(len(plainCharArr)-7):
plaintext = ''.join(plainCharArr)
return plaintext
|
988,191 | 5dd1cddae8f03b3f720e2ebc2c4bc4ae985955a6 | import glob
import os
import pandas as pd
import psycopg2
from sql_queries import *
def process_song_file(cursor, filepath):
"""
Processes the data in a single song file.
:param cursor: Cursor used to execute statements in Postgres.
:param filepath: Path to the song file to be processed.
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert artist record
artist_columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']
artist_data = df[artist_columns].values[0].tolist()
cursor.execute(artist_table_insert, artist_data)
# insert song record
song_columns = ['song_id', 'title', 'artist_id', 'year', 'duration']
song_data = df[song_columns].values[0].tolist()
cursor.execute(song_table_insert, song_data)
def process_log_file(cursor, filepath):
"""
Processes the data in a single log file.
:param cursor: Cursor used to execute statements in Postgres.
:param filepath: Path to the log file to be processed.
"""
def get_timestamp_data(df):
# convert timestamp column to datetime
timestamp = pd.to_datetime(df['ts'], unit='ms')
return (df['ts'].values,
timestamp.dt.hour.values,
timestamp.dt.day.values,
timestamp.dt.week.values,
timestamp.dt.month.values,
timestamp.dt.year.values,
timestamp.dt.weekday.values)
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df['page'] == 'NextSong']
# insert time data records
time_data = get_timestamp_data(df)
column_labels = ('timestamp', 'hour', 'day', 'week', 'month', 'year', 'weekday')
time_df = pd.DataFrame(data=dict(zip(column_labels, time_data)))
for i, row in time_df.iterrows():
cursor.execute(time_table_insert, list(row))
# load user table
user_columns = ['userId', 'firstName', 'lastName', 'gender', 'level']
user_df = df[user_columns]
# insert user records
for i, row in user_df.iterrows():
cursor.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get song_id and artist_id from song and artist tables
cursor.execute(song_select, (row.song, row.artist, row.length))
results = cursor.fetchone()
if results:
song_id, artist_id = results
else:
song_id, artist_id = None, None
# insert songplay record
songplay_data = (
row['ts'], row['userId'], row['level'], song_id, artist_id, row['sessionId'], row['location'],
row['userAgent'])
cursor.execute(songplay_table_insert, songplay_data)
def process_data(cursor, connection, filepath, function):
"""
Processes the data, stored in JSON format, in a given directory, saving it in the corresponding data model in Postgres.
:param cursor: Cursor used to interact with the database.
:param connection: Connection to the database.
:param filepath: Path to the directory with the JSONs to be processed.
:param function: Function to process each JSON file located inside `filepath`
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
function(cursor, datafile)
connection.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
"""
Populates the database with the song and log data located at data/song_data and data/log_data, respectively.
"""
conn = psycopg2.connect('host=127.0.0.1 dbname=sparkifydb user=student password=student')
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', function=process_song_file)
process_data(cur, conn, filepath='data/log_data', function=process_log_file)
conn.close()
if __name__ == '__main__':
main()
|
988,192 | 0cecbd471679f50d43b4d58067f0614d0edfc70a | from . import feature_extraction
from . import prediction
from . import sequence_modeling
from . import transformation
# from recognition.minimal_text_recognition.modules import feature_extraction
# from recognition.minimal_text_recognition.modules import prediction
# from recognition.minimal_text_recognition.modules import sequence_modeling
# from recognition.minimal_text_recognition.modules import transformation
|
988,193 | 02de8f412a0c2b754745b1b776eee3f5e65fe749 | '''
Created on 02.09.2013
@author: Solonarv
Classes to represent data in a binary format.
Name is BDT = Binary Data Tags
'''
import gzip
from util import Enum
data_types = Enum(("END", "BYTE", "SHORT", "INT", "LONG", "STRING", "LIST", "COMPOUND",))
class BDTBase:
"""
ABC of all types of binary data tag
"""
enc_len = 0
def __init__(self, tp):
"""
The super constructor only stores the tag's type
"""
self.tp = tp
def encode(self, compress=0):
"""
Encode a tag to binary representation.
Optional compress argument indicates gzip compress level (1-9).
Default value is0, means don't compress.
"""
raw = bytes(self._encode())
return gzip.compress(raw, compress) if compress else raw
def _encode(self) -> bytearray:
"""
Abstract method to do the actual encoding
"""
return bytearray((self.tp,))
@classmethod
def decode(cls, data):
tp = data[0]
if tp == data_types.END:
return BDTEnd()
elif tp == data_types.BYTE:
return BDTByte(data[1])
elif tp == data_types.SHORT:
return BDTShort.decode(data[1:])
elif tp == data_types.INT:
return BDTInt.decode(data[1:])
elif tp == data_types.LONG:
return BDTLong.decode(data[1:])
elif tp == data_types.STRING:
return BDTString.decode(data[1:])
elif tp == data_types.LIST:
return BDTList.decode(data[1:])
elif tp == data_types.COMPOUND:
return BDTCompound.decode(data[1:])
class BDTCompound(BDTBase):
"""
Tag that stores a String -> Tag mapping
It's encoded as an association list in binary form
"""
def __init__(self):
super().__init__(data_types.COMPOUND)
self._entries = {}
def put_tag(self, key, tag):
"""
Store a tag inside this compound tag
"""
self._entries[key] = tag
def put_byte(self, key, val):
self.put_tag(key, BDTByte(val))
def put_short(self, key, val):
self.put_tag(key, BDTShort(val))
def put_int(self, key, val):
self.put_tag(key, BDTInt(val))
def put_long(self, key, val):
self.put_tag(key, BDTLong(val))
def put_string(self, key, val):
self.put_tag(key, BDTString(val))
def get_tag(self, key):
"""
Retrieves a tag from this compound tag
"""
return self._entries[key]
def get_integral(self, key):
"""
Retrieves a byte, short, int or long from this compound tag
(depending on what's stored)
"""
tag = self.get_tag(key)
if isinstance(tag, (BDTByte, BDTShort, BDTInt, BDTLong)):
return tag.val
else:
raise KeyError("No integral value for key %s found in BDTCompound @%#x" % (key, id(self)))
def get_string(self, key):
"""
Retrieves a string from this ompound tag
"""
tag = self.get_tag(key)
if isinstance(tag, BDTString):
return tag.val
else:
raise KeyError("No string value for key %s found in BDTCompound @%#x" % (key, id(self)))
def _encode(self)->bytearray:
b = BDTBase._encode(self)
b += bytearray(len(self._entries).to_bytes(4, 'big'))
for k in self._entries:
enc = self._entries[k]._encode()
b += BDTString(k)._encode()[1:] + bytearray(len(enc).to_bytes(4, 'big')) + enc
return b
@classmethod
def decode(cls, data) -> BDTBase:
target = cls()
ln = int.from_bytes(bytes(data[:4]), 'big')
data = data[4:]
for _ in range(ln):
sl = int.from_bytes(data[:2], 'big')
k = BDTString.decode(data[:sl+2]).val
data = data[sl+2:]
el = int.from_bytes(data[:4], 'big')
v = BDTBase.decode(data[:el+4])
data = data[el+4:]
target.put_tag(k, v)
class BDTList(BDTBase):
"""
Tag that stores a list of other tags of same type
"""
def __init__(self, tp_id):
super().__init__(data_types.LIST)
self._entries = []
self.target_tp = tp_id
def add_tag(self, tag):
if tag.tp == self.target_tp: self._entries.append(tag)
def __getitem__(self, *args, **kwargs): return self._entries.__getitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs): return self._entries.__delitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs): return self._entries.__iter__(*args, **kwargs)
def _encode(self)->bytearray:
b = BDTBase._encode(self)
b += bytearray((self.target_tp,))
save_lengths = (tpid_to_class[self.target_tp].enc_len > 0)
for e in self._entries:
enc = e._encode()[1:] # Strip type indicator from encoded entry
if save_lengths: b += bytearray(len(enc).to_bytes(4, 'big'))
b += enc
return b
@classmethod
def decode(cls, data):
ls = cls(data[0]); data = data[1:]
tar_class = tpid_to_class[ls.target_tp]
enc_len = tar_class.enc_len
while data:
dat = bytearray()
if enc_len > 0:
dat = data[:enc_len]
data = data[enc_len:]
else:
l = int.from_bytes(data[:4], 'big')
dat = data[4:l+4]
data = data[l+4:]
ls.add_tag(tar_class.decode(dat))
class BDTEnd(BDTBase):
"""
Tag used to signify the end of a tag compound or a list
"""
def __init__(self): super().__init__(data_types.END)
def _encode(self)->bytearray:
return BDTBase._encode(self)
class BDTIntegral(BDTBase):
"""
Base class of classes returned by BDTIntegral_ClassFactory.
Tag that stores an integer value of some length
"""
def BDTIntegral_ClassFactory(int_len, tp_id, name = None):
"""
Subclass BDTIntegral to create a tag holding an integer value
of some length. Metaprogramming FTW!
"""
if name is None:
name = "BDTIntegral_len%i" % int_len
doc = "\nTag that stores a %i-byte (%i bit) integer value.\n" % (int_len, int_len * 8)
class result(BDTIntegral):
__doc__ = doc
def __init__(self, val):
super().__init__(self.tp_id)
self.val = val % 256**self.int_len
def _encode(self)->bytearray:
return BDTBase._encode(self) + self.val.to_bytes(self.int_len)
@classmethod
def decode(cls, data) -> BDTBase:
return cls(int.from_bytes(data, 'big'))
result.__name__ = name
result.int_len = int_len
result.tp_id = tp_id
result.enc_len = int_len
return result
BDTByte = BDTIntegral_ClassFactory(1, data_types.BYTE, "BDTByte")
BDTShort = BDTIntegral_ClassFactory(2, data_types.SHORT, "BDTShort")
BDTInt = BDTIntegral_ClassFactory(4, data_types.INT, "BDTInt")
BDTLong = BDTIntegral_ClassFactory(8, data_types.LONG, "BDTLong")
class BDTString(BDTBase):
"""
Tag that stores a string up to 65535 chars long
"""
def __init__(self, val):
super().__init__(data_types.STRING)
self.val = str(val)[:0x10000]
def _encode(self)->bytearray:
return BDTBase._encode(self) \
+ bytearray(len(self.val).to_bytes(2, 'big')) \
+ bytearray(self.val, 'utf8')
@classmethod
def decode(cls, data):
ln = int.from_bytes(data[:2], 'big')
return BDTString(data[2:2+ln].decode("utf8"))
tpid_to_class = [BDTEnd, BDTByte, BDTShort, BDTInt, BDTLong, BDTString, BDTList, BDTCompound] |
988,194 | c374ba396c9cc3f4baea010a235f5c4b09511f36 | from tkinter import *
from tkinter import messagebox
def closewindow():
messagebox.showinfo(title="警告",message="不要关闭,好好回答")
# print("1")
return
def closeallwindow():
window.destroy()
def love():
love = Toplevel(window)
love.geometry("300x100+520+200")
love.title("好巧,我也是!")
lable = Label(love,text="好巧,我也是",font=("微软雅黑",15))
lable.pack()
btn = Button(love,text="确定",width=10,height=2,command = closeallwindow)
btn.pack()
love.protocol("WM_DELETE_WINDOW",closelove)
def closelove():
return
# def closenolove():
def nolove():
# print("2")
no_love = Toplevel(window)
no_love.geometry("300x100+520+200")
no_love.title("再考虑考虑呗!")
lable = Label(no_love,text="再考虑考虑呗!",font=("微软雅黑",15))
lable.pack()
btn = Button(no_love,text="好的",width=10,height=2,command = no_love.destroy)
btn.pack()
no_love.protocol("WM_DELETE_WINDOW",closenolove)
def closenolove():
# messagebox.showinfo("再考虑一下呗!","再考虑一下呗!")
nolove()
window = Tk()
window.title("小姐姐你喜欢我吗?")
window.geometry("400x430+500+200")
window.protocol("WM_DELETE_WINDOW",closewindow)
lable = Label(window,text="hey,小姐姐",font=("微软雅黑",13),fg = "red")
lable.grid(row = 0,column = 0)
lable1 = Label(window,text="你喜欢我吗?",font = ("微软雅黑",18))
lable1.grid(row = 1,column = 1,sticky = E)
photo = PhotoImage(file="./biu.png")
imagelable = Label(window,image=photo)
imagelable.grid(row = 2,columnspan = 2)
butt = Button(window,text="喜欢",width = 15,height = 2,command = love)
butt.grid(row = 3,column = 0,sticky = W)
butt1 = Button(window,text="不喜欢",width = 10,command = nolove)
butt1.grid(row = 3,column = 1,sticky = E)
window.mainloop() |
988,195 | 29ff6c32f177a5470a8547ea07371c2443dbd18e | from read_parms_input_lhs import *
from simulation_odemodel_sci import *
from plot_sci import *
from record_equilibrium import *
# Partitions for LHS, can be changed
nparts = 1000
# number of samples to generate
num_samples = 100
# Range (in days)
start = 0
end = 1000
# Read Parameters
beta_B,beta_D,beta_BD,b_C,b_I,phi_C,phi_I,gamma_C,gamma_I,\
delta_D,delta_B,m,q,k_n,k_i,x_D,x_B,y,z,rho_D,rho_B,\
alpha_D,alpha_D_F,alpha_D_X,alpha_D_M,alpha_D_H,alpha_B_F,alpha_B,alpha_B_X,alpha_B_M,alpha_B_H,\
alpha_H_F,alpha_H_X,alpha_H_M,alpha_H_T,alpha_F_H,alpha_X_H,alpha_M_H,\
a_B,a_D,b_B,b_D,epsilon_F,epsilon_T,epsilon_H,epsilon_M,epsilon_X,\
sigma_H,sigma_F_D_D,sigma_X_D_D,sigma_M_D_D,sigma_T_D_D,sigma_F_B_B,sigma_X_B_B,sigma_M_B_B,\
d_H_D,d_H_B,d_YDF_D,d_YDX_D,d_YDM_D,d_YDT_D,d_ZBF_B,d_ZBX_B,d_ZBM_B,\
pi_Dpu,pi_Bpu,mu_Dpu,mu_Bpu,eta_Dpu,eta_Bpu,pi_Dpr,pi_Bpr,mu_Dpr,mu_Bpr,eta_Dpr,eta_Bpr,\
pi_Dsu,pi_Bsu,mu_Dsu,mu_Bsu,eta_Dsu,eta_Bsu,pi_Dsr,pi_Bsr,mu_Dsr,mu_Bsr,eta_Dsr,eta_Bsr,\
pi_Ddu,pi_Bdu,mu_Ddu,mu_Bdu,eta_Ddu,eta_Bdu,pi_Ddr,pi_Bdr,mu_Ddr,mu_Bdr,eta_Ddr,eta_Bdr,\
zeta_Dsu,zeta_Dpu,zeta_Dsr,zeta_Dpr,theta_j_Dig,theta_n_Dig,aS_j_Dig,aS_n_Dig,aC_j_Dig,aC_n_Dig,aI_j_Dig,aI_n_Dig,\
N_Dig0, S_Dpu0, C_Dpu0, I_Dpu0, S_Bpu0, C_Bpu0, I_Bpu0, H_Dpu0, H_Bpu0, T_Dpu0, F_Dpu0, F_Bpu0, X_Dpu0, X_Bpu0, M_Dpu0, M_Bpu0,\
S_Dsu0, C_Dsu0, I_Dsu0, S_Bsu0, C_Bsu0, I_Bsu0, H_Dsu0, H_Bsu0, T_Dsu0, F_Dsu0, F_Bsu0, X_Dsu0, X_Bsu0, M_Dsu0, M_Bsu0,\
S_Ddu0, C_Ddu0, I_Ddu0, S_Bdu0, C_Bdu0, I_Bdu0, H_Ddu0, H_Bdu0, T_Ddu0, F_Ddu0, F_Bdu0, X_Ddu0, X_Bdu0, M_Ddu0, M_Bdu0,\
S_Dpr0, C_Dpr0, I_Dpr0, S_Bpr0, C_Bpr0, I_Bpr0, H_Dpr0, H_Bpr0, T_Dpr0, F_Dpr0, F_Bpr0, X_Dpr0, X_Bpr0, M_Dpr0, M_Bpr0,\
S_Dsr0, C_Dsr0, I_Dsr0, S_Bsr0, C_Bsr0, I_Bsr0, H_Dsr0, H_Bsr0, T_Dsr0, F_Dsr0, F_Bsr0, X_Dsr0, X_Bsr0, M_Dsr0, M_Bsr0,\
S_Ddr0, C_Ddr0, I_Ddr0, S_Bdr0, C_Bdr0, I_Bdr0, H_Ddr0, H_Bdr0, T_Ddr0, F_Ddr0, F_Bdr0, X_Ddr0, X_Bdr0, M_Ddr0, M_Bdr0,\
A,A_Dpu,A_Bpu,A_Dpr,A_Bpr,A_Dsu,A_Bsu,A_Dsr,A_Bsr,A_Ddu,A_Bdu,A_Ddr,A_Bdr,\
y0_u, y0_r = read_parms_input_lhs('parms_input_final.xlsx', nparts)
# Simulation
t, S_Dpu_list, C_Dpu_list, I_Dpu_list, S_Bpu_list, C_Bpu_list, I_Bpu_list, H_Dpu_list, H_Bpu_list, T_Dpu_list, F_Dpu_list, F_Bpu_list, X_Dpu_list, X_Bpu_list, M_Dpu_list, M_Bpu_list, S_Dsu_list, C_Dsu_list, I_Dsu_list, S_Bsu_list, C_Bsu_list, I_Bsu_list, H_Dsu_list, H_Bsu_list, T_Dsu_list, F_Dsu_list, F_Bsu_list, X_Dsu_list, X_Bsu_list, M_Dsu_list, M_Bsu_list, S_Ddu_list, C_Ddu_list, I_Ddu_list, S_Bdu_list, C_Bdu_list, I_Bdu_list, H_Ddu_list, H_Bdu_list, T_Ddu_list, F_Ddu_list, F_Bdu_list, X_Ddu_list, X_Bdu_list, M_Ddu_list, M_Bdu_list,\
S_Dpr_list, C_Dpr_list, I_Dpr_list, S_Bpr_list, C_Bpr_list, I_Bpr_list, H_Dpr_list, H_Bpr_list, T_Dpr_list, F_Dpr_list, F_Bpr_list, X_Dpr_list, X_Bpr_list, M_Dpr_list, M_Bpr_list, S_Dsr_list, C_Dsr_list, I_Dsr_list, S_Bsr_list, C_Bsr_list, I_Bsr_list, H_Dsr_list, H_Bsr_list, T_Dsr_list, F_Dsr_list, F_Bsr_list, X_Dsr_list, X_Bsr_list, M_Dsr_list, M_Bsr_list, S_Ddr_list, C_Ddr_list, I_Ddr_list, S_Bdr_list, C_Bdr_list, I_Bdr_list, H_Ddr_list, H_Bdr_list, T_Ddr_list, F_Ddr_list, F_Bdr_list, X_Ddr_list, X_Bdr_list, M_Ddr_list, M_Bdr_list =\
simulation_sci(nparts, start, end, num_samples, beta_B,beta_D,beta_BD,b_C,b_I,phi_C,phi_I,gamma_C,gamma_I,\
delta_D,delta_B,m,q,k_n,k_i,x_D,x_B,y,z,rho_D,rho_B,\
alpha_D,alpha_D_F,alpha_D_X,alpha_D_M,alpha_D_H,alpha_B_F,alpha_B,alpha_B_X,alpha_B_M,alpha_B_H,\
alpha_H_F,alpha_H_X,alpha_H_M,alpha_H_T,alpha_F_H,alpha_X_H,alpha_M_H,\
a_B,a_D,b_B,b_D,epsilon_F,epsilon_T,epsilon_H,epsilon_M,epsilon_X,\
sigma_H,sigma_F_D_D,sigma_X_D_D,sigma_M_D_D,sigma_T_D_D,sigma_F_B_B,sigma_X_B_B,sigma_M_B_B,\
d_H_D,d_H_B,d_YDF_D,d_YDX_D,d_YDM_D,d_YDT_D,d_ZBF_B,d_ZBX_B,d_ZBM_B,\
pi_Dpu,pi_Bpu,mu_Dpu,mu_Bpu,eta_Dpu,eta_Bpu,pi_Dpr,pi_Bpr,mu_Dpr,mu_Bpr,eta_Dpr,eta_Bpr,\
pi_Dsu,pi_Bsu,mu_Dsu,mu_Bsu,eta_Dsu,eta_Bsu,pi_Dsr,pi_Bsr,mu_Dsr,mu_Bsr,eta_Dsr,eta_Bsr,\
pi_Ddu,pi_Bdu,mu_Ddu,mu_Bdu,eta_Ddu,eta_Bdu,pi_Ddr,pi_Bdr,mu_Ddr,mu_Bdr,eta_Ddr,eta_Bdr,\
zeta_Dsu,zeta_Dpu,zeta_Dsr,zeta_Dpr,theta_j_Dig,theta_n_Dig,aS_j_Dig,aS_n_Dig,aC_j_Dig,aC_n_Dig,aI_j_Dig,aI_n_Dig,\
A,A_Dpu,A_Bpu,A_Dpr,A_Bpr,A_Dsu,A_Bsu,A_Dsr,A_Bsr,A_Ddu,A_Bdu,A_Ddr,A_Bdr,\
y0_u, y0_r)
# Calculate & Plot the mean values of compartments:
S_Dpu_mean, C_Dpu_mean, I_Dpu_mean, S_Bpu_mean, C_Bpu_mean, I_Bpu_mean, H_Dpu_mean, H_Bpu_mean, T_Dpu_mean, F_Dpu_mean, F_Bpu_mean, X_Dpu_mean, X_Bpu_mean, M_Dpu_mean, M_Bpu_mean, S_Dsu_mean, C_Dsu_mean, I_Dsu_mean, S_Bsu_mean, C_Bsu_mean, I_Bsu_mean, H_Dsu_mean, H_Bsu_mean, T_Dsu_mean, F_Dsu_mean, F_Bsu_mean, X_Dsu_mean, X_Bsu_mean, M_Dsu_mean, M_Bsu_mean, S_Ddu_mean, C_Ddu_mean, I_Ddu_mean, S_Bdu_mean, C_Bdu_mean, I_Bdu_mean, H_Ddu_mean, H_Bdu_mean, T_Ddu_mean, F_Ddu_mean, F_Bdu_mean, X_Ddu_mean, X_Bdu_mean, M_Ddu_mean, M_Bdu_mean,\
S_Dpr_mean, C_Dpr_mean, I_Dpr_mean, S_Bpr_mean, C_Bpr_mean, I_Bpr_mean, H_Dpr_mean, H_Bpr_mean, T_Dpr_mean, F_Dpr_mean, F_Bpr_mean, X_Dpr_mean, X_Bpr_mean, M_Dpr_mean, M_Bpr_mean, S_Dsr_mean, C_Dsr_mean, I_Dsr_mean, S_Bsr_mean, C_Bsr_mean, I_Bsr_mean, H_Dsr_mean, H_Bsr_mean, T_Dsr_mean, F_Dsr_mean, F_Bsr_mean, X_Dsr_mean, X_Bsr_mean, M_Dsr_mean, M_Bsr_mean, S_Ddr_mean, C_Ddr_mean, I_Ddr_mean, S_Bdr_mean, C_Bdr_mean, I_Bdr_mean, H_Ddr_mean, H_Bdr_mean, T_Ddr_mean, F_Ddr_mean, F_Bdr_mean, X_Ddr_mean, X_Bdr_mean, M_Ddr_mean, M_Bdr_mean = \
cal_sci_means(t, nparts, S_Dpu_list, C_Dpu_list, I_Dpu_list, S_Bpu_list, C_Bpu_list, I_Bpu_list, H_Dpu_list, H_Bpu_list, T_Dpu_list, F_Dpu_list, F_Bpu_list, X_Dpu_list, X_Bpu_list, M_Dpu_list, M_Bpu_list, S_Dsu_list, C_Dsu_list, I_Dsu_list, S_Bsu_list, C_Bsu_list, I_Bsu_list, H_Dsu_list, H_Bsu_list, T_Dsu_list, F_Dsu_list, F_Bsu_list, X_Dsu_list, X_Bsu_list, M_Dsu_list, M_Bsu_list, S_Ddu_list, C_Ddu_list, I_Ddu_list, S_Bdu_list, C_Bdu_list, I_Bdu_list, H_Ddu_list, H_Bdu_list, T_Ddu_list, F_Ddu_list, F_Bdu_list, X_Ddu_list, X_Bdu_list, M_Ddu_list, M_Bdu_list,\
S_Dpr_list, C_Dpr_list, I_Dpr_list, S_Bpr_list, C_Bpr_list, I_Bpr_list, H_Dpr_list, H_Bpr_list, T_Dpr_list, F_Dpr_list, F_Bpr_list, X_Dpr_list, X_Bpr_list, M_Dpr_list, M_Bpr_list, S_Dsr_list, C_Dsr_list, I_Dsr_list, S_Bsr_list, C_Bsr_list, I_Bsr_list, H_Dsr_list, H_Bsr_list, T_Dsr_list, F_Dsr_list, F_Bsr_list, X_Dsr_list, X_Bsr_list, M_Dsr_list, M_Bsr_list, S_Ddr_list, C_Ddr_list, I_Ddr_list, S_Bdr_list, C_Bdr_list, I_Bdr_list, H_Ddr_list, H_Bdr_list, T_Ddr_list, F_Ddr_list, F_Bdr_list, X_Ddr_list, X_Bdr_list, M_Ddr_list, M_Bdr_list)
## Delivery ward
plot_sci('D', S_Dpu_mean, S_Dpr_mean, S_Dsu_mean, S_Dsr_mean, S_Ddu_mean, S_Ddr_mean,\
C_Dpu_mean, C_Dpr_mean, C_Dsu_mean, C_Dsr_mean, C_Ddu_mean, C_Ddr_mean,\
I_Dpu_mean, I_Dpr_mean, I_Dsu_mean, I_Dsr_mean, I_Ddu_mean, I_Ddr_mean, t)
# Neonatal care
# plot_sci('B', S_Bpu_mean, S_Bpr_mean, S_Bsu_mean, S_Bsr_mean, S_Bdu_mean, S_Bdr_mean,\
# C_Bpu_mean, C_Bpr_mean, C_Bsu_mean, C_Bsr_mean, C_Bdu_mean, C_Bdr_mean,\
# I_Bpu_mean, I_Bpr_mean, I_Bsu_mean, I_Bsr_mean, I_Bdu_mean, I_Bdr_mean, t)
# find the final Equilibrium values & parms of compartments:
record_equilibrium('parms_output.xlsx', nparts, beta_B,beta_D,beta_BD,b_C,b_I,phi_C,phi_I,gamma_C,gamma_I,\
delta_D,delta_B,m,q,k_n,k_i,x_D,x_B,y,z,rho_D,rho_B,\
alpha_D,alpha_D_F,alpha_D_X,alpha_D_M,alpha_D_H,alpha_B_F,alpha_B,alpha_B_X,alpha_B_M,alpha_B_H,\
alpha_H_F,alpha_H_X,alpha_H_M,alpha_H_T,alpha_F_H,alpha_X_H,alpha_M_H,\
a_B,a_D,b_B,b_D,epsilon_F,epsilon_T,epsilon_H,epsilon_M,epsilon_X,\
sigma_H,sigma_F_D_D,sigma_X_D_D,sigma_M_D_D,sigma_T_D_D,sigma_F_B_B,sigma_X_B_B,sigma_M_B_B,\
d_H_D,d_H_B,d_YDF_D,d_YDX_D,d_YDM_D,d_YDT_D,d_ZBF_B,d_ZBX_B,d_ZBM_B,\
pi_Dpu,pi_Bpu,mu_Dpu,mu_Bpu,eta_Dpu,eta_Bpu,pi_Dpr,pi_Bpr,mu_Dpr,mu_Bpr,eta_Dpr,eta_Bpr,\
pi_Dsu,pi_Bsu,mu_Dsu,mu_Bsu,eta_Dsu,eta_Bsu,pi_Dsr,pi_Bsr,mu_Dsr,mu_Bsr,eta_Dsr,eta_Bsr,\
pi_Ddu,pi_Bdu,mu_Ddu,mu_Bdu,eta_Ddu,eta_Bdu,pi_Ddr,pi_Bdr,mu_Ddr,mu_Bdr,eta_Ddr,eta_Bdr,\
zeta_Dsu,zeta_Dpu,zeta_Dsr,zeta_Dpr,theta_j_Dig,theta_n_Dig,aS_j_Dig,aS_n_Dig,aC_j_Dig,aC_n_Dig,aI_j_Dig,aI_n_Dig,\
A,A_Dpu,A_Bpu,A_Dpr,A_Bpr,A_Dsu,A_Bsu,A_Dsr,A_Bsr,A_Ddu,A_Bdu,A_Ddr,A_Bdr,\
y0_u, y0_r,\
S_Dpu_list, C_Dpu_list, I_Dpu_list, S_Bpu_list, C_Bpu_list, I_Bpu_list, H_Dpu_list, H_Bpu_list, T_Dpu_list, F_Dpu_list, F_Bpu_list, X_Dpu_list, X_Bpu_list, M_Dpu_list, M_Bpu_list, S_Dsu_list, C_Dsu_list, I_Dsu_list, S_Bsu_list, C_Bsu_list, I_Bsu_list, H_Dsu_list, H_Bsu_list, T_Dsu_list, F_Dsu_list, F_Bsu_list, X_Dsu_list, X_Bsu_list, M_Dsu_list, M_Bsu_list, S_Ddu_list, C_Ddu_list, I_Ddu_list, S_Bdu_list, C_Bdu_list, I_Bdu_list, H_Ddu_list, H_Bdu_list, T_Ddu_list, F_Ddu_list, F_Bdu_list, X_Ddu_list, X_Bdu_list, M_Ddu_list, M_Bdu_list,\
S_Dpr_list, C_Dpr_list, I_Dpr_list, S_Bpr_list, C_Bpr_list, I_Bpr_list, H_Dpr_list, H_Bpr_list, T_Dpr_list, F_Dpr_list, F_Bpr_list, X_Dpr_list, X_Bpr_list, M_Dpr_list, M_Bpr_list, S_Dsr_list, C_Dsr_list, I_Dsr_list, S_Bsr_list, C_Bsr_list, I_Bsr_list, H_Dsr_list, H_Bsr_list, T_Dsr_list, F_Dsr_list, F_Bsr_list, X_Dsr_list, X_Bsr_list, M_Dsr_list, M_Bsr_list, S_Ddr_list, C_Ddr_list, I_Ddr_list, S_Bdr_list, C_Bdr_list, I_Bdr_list, H_Ddr_list, H_Bdr_list, T_Ddr_list, F_Ddr_list, F_Bdr_list, X_Ddr_list, X_Bdr_list, M_Ddr_list, M_Bdr_list)
|
988,196 | b9c04a44116fe00fbd882ea91d05f4dc4aea4221 | test = {
'name': 'Question 6',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # If this causes an error, write AssertionError
>>> check_strategy(always_roll(5)) == None
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> def fail_15_20(score, opponent_score):
... if score == 15 and opponent_score == 20:
... return 100
... return 5
>>> # If this causes an error, write AssertionError
>>> check_strategy(fail_15_20) == None
AssertionError
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> def fail_102_115(score, opponent_score):
... if score == 102 and opponent_score == 115:
... return 100
... return 5
>>> fail_102_115 == check_strategy(fail_102_115, 120)
AssertionError
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Make sure that you check all valid pairs of scores!
>>> # Scores can range from 0 to the goal score for both players.
>>> all_scores = set()
>>> def check_completeness(score, opponent_score):
... all_scores.add((score, opponent_score))
... return 5
>>> # Be specific about the error type (AssertionError, rather than Error)
>>> check_strategy(check_completeness)
>>> count = 0
>>> for score in range(100):
... for opponent_score in range(100):
... if (score, opponent_score) in all_scores:
... count += 1
>>> count
10000
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
}
]
}
|
988,197 | e1e4563bd1f922c07fd3877307676b154f6fc57b | #!/usr/bin/env python3
import re
from pathlib import Path
from textwrap import wrap, indent
path = Path('/home/matthias/.xmonad/xmonad.hs')
# re01 = re.compile('(\("M-)(.+)(--)') # identifies line with shortcut definition
re01 = re.compile(r'\("M-') # identifies line with shortcut definition
re02 = re.compile(r'"\s*,\s') # splits shortcut from command/comment
re03 = re.compile(r'\)\s+--\s') # splits comment from command
re04 = re.compile(r'(,\s+)?\("') # tidies up shortcut
prev = None # saves previous line's content in case of \-continuatiom
klen = 0 # max shortcut key combination lenght
kmap = {} # keymap with explanations extracted from xmonad.hs
with open(path, 'r') as hndl:
for line in hndl.readlines():
line = line.strip()
if line[:2] != '--':
# remove commented out lines
if line[:1] == "\\" and prev is not None:
# previous line ended with continuation backslash,
# current line starts with continuation backslash,
# pre-pend previous line to current
line = prev + line[1:]
prev = None
if line[-1:] == "\\":
prev = line[:-1]
elif re01.search(line):
tmp1 = re02.split(line)
keyc = re04.sub('', tmp1[0])
tmp2 = re03.split(tmp1[1])
try:
expl = tmp2[1]
except IndexError:
# no comment at end of line, display command
expl = tmp2[0]
if keyc.__len__() > klen:
klen = keyc.__len__()
kmap[keyc] = expl
titl = 'XMONAD Shortcuts'
tsep = titl.__len__() * '='
print()
print(titl)
print(tsep)
print()
for skey, expl in kmap.items():
dlim = ' - '
skey = skey.ljust(klen, ' ')
expl = wrap(expl, 79 - dlim.__len__() - klen)
print(skey, dlim, expl[0], sep='')
for item in expl[1:]:
pfix = (klen + dlim.__len__()) * ' '
print(indent(item, pfix))
print()
while True:
inpt = input("Press q to quit: ")
if inpt == 'q':
exit()
|
988,198 | 0da1acc8d1500c6b4d7a8df03c59a0e47fb560d2 | from django.contrib import admin
from .models import City,Services,Hotel,Comments,Photos_outside,Photos_inside,Services_hotel
# Register your models here.
admin.site.register([City,Services,Hotel,Comments,Photos_outside,Photos_inside,Services_hotel]) |
988,199 | 7d466fc4840b3a17bfd7bd1a01ee733f9548220f | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 491)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 9, 781, 191))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lineEdit_1 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_1.setObjectName(_fromUtf8("lineEdit_1"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit_1)
self.label = QtGui.QLabel(self.horizontalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.label_2 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_5)
self.lineEdit_2 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEdit_2)
self.lineEdit_3 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEdit_3)
self.lineEdit_4 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEdit_4)
self.lineEdit_5 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_5.setObjectName(_fromUtf8("lineEdit_5"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEdit_5)
self.horizontalLayout.addLayout(self.formLayout_2)
self.formLayout_3 = QtGui.QFormLayout()
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.lineEdit_6 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_6.setObjectName(_fromUtf8("lineEdit_6"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit_6)
self.label_6 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_6)
self.label_7 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_7)
self.label_8 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_8)
self.label_9 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_9)
self.label_10 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_10)
self.lineEdit_7 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_7.setObjectName(_fromUtf8("lineEdit_7"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEdit_7)
self.lineEdit_8 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_8.setObjectName(_fromUtf8("lineEdit_8"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEdit_8)
self.lineEdit_9 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_9.setObjectName(_fromUtf8("lineEdit_9"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEdit_9)
self.lineEdit_10 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_10.setObjectName(_fromUtf8("lineEdit_10"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEdit_10)
self.label_21 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_21)
self.lineEdit_21 = QtGui.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_21.setObjectName(_fromUtf8("lineEdit_21"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.lineEdit_21)
self.horizontalLayout.addLayout(self.formLayout_3)
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 310, 781, 141))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.formLayout_4 = QtGui.QFormLayout()
self.formLayout_4.setObjectName(_fromUtf8("formLayout_4"))
self.lineEdit_11 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_11.setText(_fromUtf8(""))
self.lineEdit_11.setObjectName(_fromUtf8("lineEdit_11"))
self.formLayout_4.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit_11)
self.label_11 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.formLayout_4.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_11)
self.label_12 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.formLayout_4.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_12)
self.label_13 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.formLayout_4.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_13)
self.label_14 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout_4.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_14)
self.label_15 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.formLayout_4.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_15)
self.lineEdit_12 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_12.setText(_fromUtf8(""))
self.lineEdit_12.setObjectName(_fromUtf8("lineEdit_12"))
self.formLayout_4.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEdit_12)
self.lineEdit_13 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_13.setText(_fromUtf8(""))
self.lineEdit_13.setObjectName(_fromUtf8("lineEdit_13"))
self.formLayout_4.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEdit_13)
self.lineEdit_14 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_14.setText(_fromUtf8(""))
self.lineEdit_14.setObjectName(_fromUtf8("lineEdit_14"))
self.formLayout_4.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEdit_14)
self.lineEdit_15 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_15.setText(_fromUtf8(""))
self.lineEdit_15.setObjectName(_fromUtf8("lineEdit_15"))
self.formLayout_4.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEdit_15)
self.horizontalLayout_2.addLayout(self.formLayout_4)
self.formLayout_5 = QtGui.QFormLayout()
self.formLayout_5.setObjectName(_fromUtf8("formLayout_5"))
self.lineEdit_16 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_16.setText(_fromUtf8(""))
self.lineEdit_16.setObjectName(_fromUtf8("lineEdit_16"))
self.formLayout_5.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit_16)
self.label_16 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.formLayout_5.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_16)
self.label_17 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.formLayout_5.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_17)
self.label_18 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.formLayout_5.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_18)
self.label_19 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.formLayout_5.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_19)
self.label_20 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.formLayout_5.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_20)
self.lineEdit_17 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_17.setText(_fromUtf8(""))
self.lineEdit_17.setObjectName(_fromUtf8("lineEdit_17"))
self.formLayout_5.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEdit_17)
self.lineEdit_18 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_18.setText(_fromUtf8(""))
self.lineEdit_18.setObjectName(_fromUtf8("lineEdit_18"))
self.formLayout_5.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEdit_18)
self.lineEdit_19 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_19.setText(_fromUtf8(""))
self.lineEdit_19.setObjectName(_fromUtf8("lineEdit_19"))
self.formLayout_5.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEdit_19)
self.lineEdit_20 = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_20.setText(_fromUtf8(""))
self.lineEdit_20.setObjectName(_fromUtf8("lineEdit_20"))
self.formLayout_5.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEdit_20)
self.horizontalLayout_2.addLayout(self.formLayout_5)
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(14, 272, 771, 31))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Winch Battery Calculator", None))
self.lineEdit_1.setText(_translate("MainWindow", "12", None))
self.label.setText(_translate("MainWindow", "Motor Nominal Voltage (Vnom)", None))
self.label_2.setText(_translate("MainWindow", "Motor Stall Torque at Vnom (Nm)", None))
self.label_3.setText(_translate("MainWindow", "Motor ESR (Ohm)", None))
self.label_4.setText(_translate("MainWindow", "Motor No Load Current at Vnom (A)", None))
self.label_5.setText(_translate("MainWindow", "Motor No Load RPM at Vnom (RPM)", None))
self.lineEdit_2.setText(_translate("MainWindow", "2.42", None))
self.lineEdit_3.setText(_translate("MainWindow", "0.091", None))
self.lineEdit_4.setText(_translate("MainWindow", "2.7", None))
self.lineEdit_5.setText(_translate("MainWindow", "5310", None))
self.lineEdit_6.setText(_translate("MainWindow", "0.06", None))
self.label_6.setText(_translate("MainWindow", "Spool Diameter (m)", None))
self.label_7.setText(_translate("MainWindow", "Input Voltage (V)", None))
self.label_8.setText(_translate("MainWindow", "PWM Duty Cycle (%)", None))
self.label_9.setText(_translate("MainWindow", "Battery Capacity (Ah)", None))
self.label_10.setText(_translate("MainWindow", "Motor Torque Load (%)", None))
self.lineEdit_7.setText(_translate("MainWindow", "12", None))
self.lineEdit_8.setText(_translate("MainWindow", "100", None))
self.lineEdit_9.setText(_translate("MainWindow", "80", None))
self.lineEdit_10.setText(_translate("MainWindow", "13", None))
self.label_21.setText(_translate("MainWindow", "Gear Ratio", None))
self.lineEdit_21.setText(_translate("MainWindow", "256", None))
self.label_11.setText(_translate("MainWindow", "Max Torque Available (Nm)", None))
self.label_12.setText(_translate("MainWindow", "Load on Output Shaft (Nm)", None))
self.label_13.setText(_translate("MainWindow", "Effective Motor Voltage (V)", None))
self.label_14.setText(_translate("MainWindow", "Input Current (A)", None))
self.label_15.setText(_translate("MainWindow", "Runtime (hours)", None))
self.label_16.setText(_translate("MainWindow", "Power In (W)", None))
self.label_17.setText(_translate("MainWindow", "Power Out (W)", None))
self.label_18.setText(_translate("MainWindow", "Efficiency (%)", None))
self.label_19.setText(_translate("MainWindow", "Shaft RPM (RPM)", None))
self.label_20.setText(_translate("MainWindow", "Travel Speed (km/h)", None))
self.pushButton.setText(_translate("MainWindow", "Calculate", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.