text stringlengths 38 1.54M |
|---|
from django.contrib import admin
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', views.adminview, name='adminview'),
path('adminauth/', views.authenticateadmin, name='adminauth'),
path('adminhome/', views.adminhome, name='adminhome'),
path('adminlogout/', views.logoutadmin, name='adminlogout'),
path('addingcustomers/', views.addcustomer, name='addingcustomers'),
path('deletecustomer/<int:customerpk>/', views.deletecustomer, name='deletecustomer'),
path('', views.index, name='index'),
path('signings/', views.signings, name='signings'),
path('signup/', views.signup, name='signup'),
path('login/', views.login, name='login'),
path('userlogin/', views.usersignin, name='userlogin'),
path('usersignup/', views.usersignup, name='usersignup'),
path('userhome/', views.userhome, name='userhome'),
path('userauth', views.authenticateuser, name='userauth'),
path('userlogout', views.userlogout, name='userlogout'),
path('deleteuser/<int:useridpk>/', views.deleteuser, name='deleteuser'),
path('edituser/<int:customerpk>/', views.edituser, name='edituser'),
path('editting/<int:customerpk>/', views.editting, name='editting'),
path('newentry/', views.takingphoto, name='newentry'),
path('productentry/', views.productentry, name='productentry'),
path('deleteproduct/<int:productidpk>/', views.deleteproduct, name='deleteproduct'),
path('shareproduct/<int:productidpk>/', views.shareproduct, name='shareproduct'),
path('notification/', views.notification, name='notification'),
path('feed/', views.feed, name='feed'),
path('contact/<str:contactto>/', views.contact, name='contact'),
path('storemsg/<str:contactto>/', views.storemsg, name='storemsg'),
path('notifytext/', views.notifytext, name='notifytext'),
]
#urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
##Arthur A. Burkey III
##Python Drill: PyDrill_scripting_27_idle
##Title: Daily File Transfer scripting project - Python 2.7 - IDLE
##Scenario: Your company's users create or edit a collection of text files
##throughout the day. These text files represent data about customer
##orders.
##Once per day, any files that are new, or that were edited within the
##previous 24 hours, must be sent to the home office. To facilitate this,
##these new or updated files need to be copied to a specific 'destination'
##folder on a computer, so that a special file transfer program can grab
##them and transfer them to the home office.
##The process of figuring out which files are new or recently edited, and
##copying them to the 'destination' folder, is currently being done
##manually. This is very expensive in terms of manpower.
##You have been asked to create a script that will automate this task,
##saving the company a lot of money over the long term.
##Guidelines:
##You should create two folders; one to hold the files that get created or
##modified throughout the day, and another to receive the folders that your
##script determines should be copied over daily.
##To aid in your development efforts, you should create .txt files to add
##to the first folder, using Notepad or similar program. You should also
##copy some older text files in there if you like.
##You should use files
##that you can edit, so that you can control whether they are meant to be
##detected as 'modified in the last 24 hours' by your program.
import datetime
import shutil
import os, sys
import os.path, time
src = "C:/Users/Student/Desktop/Python/"
dst = "C:/Users/Student/Desktop/Python/copyOver/"
prntSrc = os.listdir(src)
prntDst = os.listdir(dst)
timestamp = (time.time())
vorTimestamp = timestamp - 86400
print timestamp
print 'BEFORE copy operation in Source Dir: '
print(prntSrc)
##print ''
##print 'AFTER copy operations in Destination Dir: '
##print(prntDst)
def modification_date():
for x in prntSrc:
filetime = os.path.getmtime(src + x)
if filetime > vorTimestamp:
print x
print filetime
if x.endswith(".txt"):
shutil.copy(x, dst)
print ''
print 'AFTER copy operations in Destination Dir: '
print(prntDst)
modification_date()
|
import re
import numpy as np
import pandas as pd
import pytest
from woodwork.datacolumn import DataColumn
from woodwork.exceptions import ColumnNameMismatchWarning, DuplicateTagsWarning
from woodwork.logical_types import (
Categorical,
CountryCode,
Datetime,
Double,
Integer,
NaturalLanguage,
Ordinal,
SubRegionCode,
ZIPCode
)
from woodwork.tests.testing_utils import to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_datacolumn_init(sample_series):
data_col = DataColumn(sample_series, use_standard_tags=False)
# Koalas doesn't support category dtype
if not (ks and isinstance(sample_series, ks.Series)):
sample_series = sample_series.astype('category')
pd.testing.assert_series_equal(to_pandas(data_col.to_series()), to_pandas(sample_series))
assert data_col.name == sample_series.name
assert data_col.logical_type == Categorical
assert data_col.semantic_tags == set()
def test_datacolumn_init_with_logical_type(sample_series):
data_col = DataColumn(sample_series, NaturalLanguage)
assert data_col.logical_type == NaturalLanguage
assert data_col.semantic_tags == set()
data_col = DataColumn(sample_series, "natural_language")
assert data_col.logical_type == NaturalLanguage
assert data_col.semantic_tags == set()
data_col = DataColumn(sample_series, "NaturalLanguage")
assert data_col.logical_type == NaturalLanguage
assert data_col.semantic_tags == set()
def test_datacolumn_init_with_semantic_tags(sample_series):
semantic_tags = ['tag1', 'tag2']
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
assert data_col.semantic_tags == set(semantic_tags)
def test_datacolumn_init_wrong_series():
error = 'Series must be one of: pandas.Series, dask.Series, koalas.Series, numpy.ndarray, or pandas.ExtensionArray'
with pytest.raises(TypeError, match=error):
DataColumn([1, 2, 3, 4])
with pytest.raises(TypeError, match=error):
DataColumn({1, 2, 3, 4})
def test_datacolumn_init_with_name(sample_series, sample_datetime_series):
name = 'sample_series'
changed_name = 'changed_name'
dc_use_series_name = DataColumn(sample_series)
assert dc_use_series_name.name == name
assert dc_use_series_name.to_series().name == name
warning = 'Name mismatch between sample_series and changed_name. DataColumn and underlying series name are now changed_name'
with pytest.warns(ColumnNameMismatchWarning, match=warning):
dc_use_input_name = DataColumn(sample_series, name=changed_name)
assert dc_use_input_name.name == changed_name
assert dc_use_input_name.to_series().name == changed_name
warning = 'Name mismatch between sample_datetime_series and changed_name. DataColumn and underlying series name are now changed_name'
with pytest.warns(ColumnNameMismatchWarning, match=warning):
dc_with_ltype_change = DataColumn(sample_datetime_series, name=changed_name)
assert dc_with_ltype_change.name == changed_name
assert dc_with_ltype_change.to_series().name == changed_name
def test_datacolumn_inity_with_falsy_name(sample_series):
falsy_name = 0
warning = 'Name mismatch between sample_series and 0. DataColumn and underlying series name are now 0'
with pytest.warns(ColumnNameMismatchWarning, match=warning):
dc_falsy_name = DataColumn(sample_series.copy(), name=falsy_name)
assert dc_falsy_name.name == falsy_name
assert dc_falsy_name.to_series().name == falsy_name
def test_datacolumn_init_with_extension_array():
series_categories = pd.Series([1, 2, 3], dtype='category')
extension_categories = pd.Categorical([1, 2, 3])
data_col = DataColumn(extension_categories)
series = data_col.to_series()
assert series.equals(series_categories)
assert series.name is None
assert data_col.name is None
assert data_col.dtype == 'category'
assert data_col.logical_type == Categorical
series_ints = pd.Series([1, 2, None, 4], dtype='Int64')
extension_ints = pd.arrays.IntegerArray(np.array([1, 2, 3, 4], dtype="int64"), mask=np.array([False, False, True, False]))
data_col_with_name = DataColumn(extension_ints, name='extension')
series = data_col_with_name.to_series()
assert series.equals(series_ints)
assert series.name == 'extension'
assert data_col_with_name.name == 'extension'
series_strs = pd.Series([1, 2, None, 4], dtype='string')
data_col_different_ltype = DataColumn(extension_ints, logical_type='NaturalLanguage')
series = data_col_different_ltype.to_series()
assert series.equals(series_strs)
assert data_col_different_ltype.logical_type == NaturalLanguage
assert data_col_different_ltype.dtype == 'string'
def test_datacolumn_init_with_numpy_array():
numpy_array = np.array([1, 2, 3, 4])
expected_series = pd.Series([1, 2, 3, 4], dtype='Int64')
dc = DataColumn(numpy_array)
assert dc.name is None
assert dc.logical_type == Integer
assert dc.semantic_tags == {'numeric'}
assert dc.dtype == 'Int64'
assert dc._series.equals(expected_series)
dc = DataColumn(numpy_array, logical_type='NaturalLanguage', name='test_col')
expected_series.name = 'test_col'
assert dc.name == 'test_col'
assert dc.logical_type == NaturalLanguage
assert dc.semantic_tags == set()
assert dc.dtype == 'string'
assert dc._series.equals(expected_series.astype('string'))
def test_datacolumn_with_alternate_semantic_tags_input(sample_series):
semantic_tags = 'custom_tag'
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
assert data_col.semantic_tags == {'custom_tag'}
semantic_tags = {'custom_tag', 'numeric'}
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
assert data_col.semantic_tags == semantic_tags
def test_invalid_logical_type(sample_series):
error_message = "Invalid logical type specified for 'sample_series'"
with pytest.raises(TypeError, match=error_message):
DataColumn(sample_series, int)
error_message = "String naturalllanguage is not a valid logical type"
with pytest.raises(ValueError, match=error_message):
DataColumn(sample_series, 'naturalllanguage')
def test_semantic_tag_errors(sample_series):
error_message = "semantic_tags must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
DataColumn(sample_series, semantic_tags=int)
error_message = "semantic_tags must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
DataColumn(sample_series, semantic_tags={'index': {}, 'time_index': {}})
error_message = "semantic_tags must contain only strings"
with pytest.raises(TypeError, match=error_message):
DataColumn(sample_series, semantic_tags=['index', 1])
def test_datacolumn_description(sample_series):
column_description = "custom description"
data_col = DataColumn(sample_series, description=column_description)
assert data_col.description == column_description
new_description = "updated description text"
data_col.description = new_description
assert data_col.description == new_description
def test_datacolumn_description_error(sample_series):
err_msg = "Column description must be a string"
with pytest.raises(TypeError, match=err_msg):
DataColumn(sample_series, description=123)
def test_datacolumn_repr(sample_series):
data_col = DataColumn(sample_series, use_standard_tags=False)
# Koalas doesn't support categorical
if ks and isinstance(sample_series, ks.Series):
dtype = 'object'
else:
dtype = 'category'
assert data_col.__repr__() == f'<DataColumn: sample_series (Physical Type = {dtype}) ' \
'(Logical Type = Categorical) (Semantic Tags = set())>'
def test_set_semantic_tags(sample_series):
semantic_tags = {'tag1', 'tag2'}
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
assert data_col.semantic_tags == semantic_tags
new_tags = ['new_tag']
new_col = data_col.set_semantic_tags(new_tags)
assert new_col is not data_col
assert new_col.semantic_tags == set(new_tags)
def test_set_semantic_tags_with_index(sample_series):
semantic_tags = {'tag1', 'tag2'}
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
data_col._set_as_index()
assert data_col.semantic_tags == {'tag1', 'tag2', 'index'}
new_tags = ['new_tag']
new_col = data_col.set_semantic_tags(new_tags)
assert new_col.semantic_tags == {'index', 'new_tag'}
new_col2 = new_col.set_semantic_tags(new_tags, retain_index_tags=False)
assert new_col2.semantic_tags == {'new_tag'}
def test_set_semantic_tags_with_time_index(sample_datetime_series):
semantic_tags = {'tag1', 'tag2'}
data_col = DataColumn(sample_datetime_series, semantic_tags=semantic_tags, use_standard_tags=False)
data_col._set_as_time_index()
assert data_col.semantic_tags == {'tag1', 'tag2', 'time_index'}
new_tags = ['new_tag']
new_col = data_col.set_semantic_tags(new_tags)
assert new_col.semantic_tags == {'time_index', 'new_tag'}
new_col2 = new_col.set_semantic_tags(new_tags, retain_index_tags=False)
assert new_col2.semantic_tags == {'new_tag'}
def test_adds_numeric_standard_tag():
series = pd.Series([1, 2, 3])
semantic_tags = 'custom_tag'
logical_types = [Integer, Double]
for logical_type in logical_types:
data_col = DataColumn(series, logical_type=logical_type, semantic_tags=semantic_tags)
assert data_col.semantic_tags == {'custom_tag', 'numeric'}
def test_adds_category_standard_tag():
series = pd.Series([1, 2, 3])
semantic_tags = 'custom_tag'
logical_types = [Categorical, CountryCode, Ordinal(order=(1, 2, 3)), SubRegionCode, ZIPCode]
for logical_type in logical_types:
data_col = DataColumn(series, logical_type=logical_type, semantic_tags=semantic_tags)
assert data_col.semantic_tags == {'custom_tag', 'category'}
def test_does_not_add_standard_tags():
series = pd.Series([1, 2, 3])
semantic_tags = 'custom_tag'
data_col = DataColumn(series,
logical_type=Double,
semantic_tags=semantic_tags,
use_standard_tags=False)
assert data_col.semantic_tags == {'custom_tag'}
def test_add_custom_tags(sample_series):
semantic_tags = 'initial_tag'
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
new_col = data_col.add_semantic_tags('string_tag')
assert new_col is not data_col
assert new_col.semantic_tags == {'initial_tag', 'string_tag'}
new_col2 = new_col.add_semantic_tags(['list_tag'])
assert new_col2.semantic_tags == {'initial_tag', 'string_tag', 'list_tag'}
new_col3 = new_col2.add_semantic_tags({'set_tag'})
assert new_col3.semantic_tags == {'initial_tag', 'string_tag', 'list_tag', 'set_tag'}
def test_warns_on_setting_duplicate_tag(sample_series):
semantic_tags = ['first_tag', 'second_tag']
data_col = DataColumn(sample_series, semantic_tags=semantic_tags, use_standard_tags=False)
expected_message = "Semantic tag(s) 'first_tag, second_tag' already present on column 'sample_series'"
with pytest.warns(DuplicateTagsWarning) as record:
data_col.add_semantic_tags(['first_tag', 'second_tag'])
assert len(record) == 1
assert record[0].message.args[0] == expected_message
def test_set_logical_type_with_standard_tags(sample_series):
data_col = DataColumn(sample_series,
logical_type=NaturalLanguage,
semantic_tags='original_tag',
use_standard_tags=True)
new_col = data_col.set_logical_type(Categorical)
assert isinstance(new_col, DataColumn)
assert new_col is not data_col
assert new_col.logical_type == Categorical
assert new_col.semantic_tags == {'category'}
def test_set_logical_type_without_standard_tags(sample_series):
data_col = DataColumn(sample_series,
logical_type=NaturalLanguage,
semantic_tags='original_tag',
use_standard_tags=False)
new_col = data_col.set_logical_type(Categorical)
assert isinstance(new_col, DataColumn)
assert new_col is not data_col
assert new_col.logical_type == Categorical
assert new_col.semantic_tags == set()
def test_set_logical_type_retains_index_tag(sample_series):
data_col = DataColumn(sample_series,
logical_type=NaturalLanguage,
semantic_tags='original_tag',
use_standard_tags=False)
data_col._set_as_index()
assert data_col.semantic_tags == {'index', 'original_tag'}
new_col = data_col.set_logical_type(Categorical)
assert new_col.semantic_tags == {'index'}
new_col = data_col.set_logical_type(Categorical, retain_index_tags=False)
assert new_col.semantic_tags == set()
def test_set_logical_type_retains_time_index_tag(sample_datetime_series):
data_col = DataColumn(sample_datetime_series,
logical_type=Datetime,
semantic_tags='original_tag',
use_standard_tags=False)
data_col._set_as_time_index()
assert data_col.semantic_tags == {'time_index', 'original_tag'}
new_col = data_col.set_logical_type(Categorical)
assert new_col.semantic_tags == {'time_index'}
new_col = data_col.set_logical_type(Categorical, retain_index_tags=False)
assert new_col.semantic_tags == set()
def test_reset_semantic_tags_with_standard_tags(sample_series):
semantic_tags = 'initial_tag'
data_col = DataColumn(sample_series,
semantic_tags=semantic_tags,
logical_type=Categorical,
use_standard_tags=True)
new_col = data_col.reset_semantic_tags()
assert new_col is not data_col
assert new_col.semantic_tags == Categorical.standard_tags
def test_reset_semantic_tags_without_standard_tags(sample_series):
semantic_tags = 'initial_tag'
data_col = DataColumn(sample_series,
semantic_tags=semantic_tags,
use_standard_tags=False)
new_col = data_col.reset_semantic_tags()
assert new_col is not data_col
assert new_col.semantic_tags == set()
def test_reset_semantic_tags_with_index(sample_series):
semantic_tags = 'initial_tag'
data_col = DataColumn(sample_series,
semantic_tags=semantic_tags,
use_standard_tags=False)
data_col._set_as_index()
new_col = data_col.reset_semantic_tags(retain_index_tags=True)
assert new_col.semantic_tags == {'index'}
new_col = data_col.reset_semantic_tags()
assert new_col.semantic_tags == set()
def test_reset_semantic_tags_with_time_index(sample_datetime_series):
semantic_tags = 'initial_tag'
data_col = DataColumn(sample_datetime_series,
semantic_tags=semantic_tags,
use_standard_tags=False)
data_col._set_as_time_index()
new_col = data_col.reset_semantic_tags(retain_index_tags=True)
assert new_col.semantic_tags == {'time_index'}
new_col = data_col.reset_semantic_tags()
assert new_col.semantic_tags == set()
def test_remove_semantic_tags(sample_series):
tags_to_remove = [
'tag1',
['tag1'],
{'tag1'}
]
data_col = DataColumn(sample_series,
semantic_tags=['tag1', 'tag2'],
use_standard_tags=False)
for tag in tags_to_remove:
new_col = data_col.remove_semantic_tags(tag)
assert new_col is not data_col
assert new_col.semantic_tags == {'tag2'}
def test_remove_standard_semantic_tag(sample_series):
# Check that warning is raised if use_standard_tags is True - tag should be removed
data_col = DataColumn(sample_series,
logical_type=Categorical,
semantic_tags='tag1',
use_standard_tags=True)
expected_message = "Removing standard semantic tag(s) 'category' from column 'sample_series'"
with pytest.warns(UserWarning) as record:
new_col = data_col.remove_semantic_tags(['tag1', 'category'])
assert len(record) == 1
assert record[0].message.args[0] == expected_message
assert new_col.semantic_tags == set()
# Check that warning is not raised if use_standard_tags is False - tag should be removed
data_col = DataColumn(sample_series,
logical_type=Categorical,
semantic_tags=['category', 'tag1'],
use_standard_tags=False)
with pytest.warns(None) as record:
new_col = data_col.remove_semantic_tags(['tag1', 'category'])
assert len(record) == 0
assert new_col.semantic_tags == set()
def test_remove_semantic_tags_raises_error_with_invalid_tag(sample_series):
data_col = DataColumn(sample_series,
semantic_tags='tag1')
error_msg = re.escape("Semantic tag(s) 'invalid_tagname' not present on column 'sample_series'")
with pytest.raises(LookupError, match=error_msg):
data_col.remove_semantic_tags('invalid_tagname')
def test_raises_error_setting_index_tag_directly(sample_series):
error_msg = re.escape("Cannot add 'index' tag directly. To set a column as the index, "
"use DataTable.set_index() instead.")
with pytest.raises(ValueError, match=error_msg):
DataColumn(sample_series, semantic_tags='index')
data_col = DataColumn(sample_series)
with pytest.raises(ValueError, match=error_msg):
data_col.add_semantic_tags('index')
with pytest.raises(ValueError, match=error_msg):
data_col.set_semantic_tags('index')
def test_raises_error_setting_time_index_tag_directly(sample_series):
error_msg = re.escape("Cannot add 'time_index' tag directly. To set a column as the time index, "
"use DataTable.set_time_index() instead.")
with pytest.raises(ValueError, match=error_msg):
DataColumn(sample_series, semantic_tags='time_index')
data_col = DataColumn(sample_series)
with pytest.raises(ValueError, match=error_msg):
data_col.add_semantic_tags('time_index')
with pytest.raises(ValueError, match=error_msg):
data_col.set_semantic_tags('time_index')
def test_set_as_index(sample_series):
data_col = DataColumn(sample_series)
data_col._set_as_index()
assert 'index' in data_col.semantic_tags
def test_set_as_time_index(sample_series):
data_col = DataColumn(sample_series)
data_col._set_as_time_index()
assert 'time_index' in data_col.semantic_tags
def test_to_series(sample_series):
data_col = DataColumn(sample_series)
series = data_col.to_series()
assert series is data_col._series
pd.testing.assert_series_equal(to_pandas(series), to_pandas(data_col._series))
def test_shape(sample_series):
col = DataColumn(sample_series)
col_shape = col.shape
series_shape = col.to_series().shape
if dd and isinstance(sample_series, dd.Series):
col_shape = (col_shape[0].compute(),)
series_shape = (series_shape[0].compute(),)
assert col_shape == (4,)
assert col_shape == series_shape
def test_len(sample_series):
col = DataColumn(sample_series)
assert len(col) == len(sample_series) == 4
def test_dtype_update_on_init(sample_datetime_series):
dc = DataColumn(sample_datetime_series,
logical_type='DateTime')
assert dc._series.dtype == 'datetime64[ns]'
def test_dtype_update_on_ltype_change():
dc = DataColumn(pd.Series([1, 2, 3]),
logical_type='Integer')
assert dc._series.dtype == 'Int64'
dc = dc.set_logical_type('Double')
assert dc._series.dtype == 'float64'
def test_ordinal_requires_instance_on_init(sample_series):
error_msg = 'Must use an Ordinal instance with order values defined'
with pytest.raises(TypeError, match=error_msg):
DataColumn(sample_series, logical_type=Ordinal)
with pytest.raises(TypeError, match=error_msg):
DataColumn(sample_series, logical_type="Ordinal")
def test_ordinal_requires_instance_on_update(sample_series):
dc = DataColumn(sample_series, logical_type="NaturalLanguage")
error_msg = 'Must use an Ordinal instance with order values defined'
with pytest.raises(TypeError, match=error_msg):
dc.set_logical_type(Ordinal)
with pytest.raises(TypeError, match=error_msg):
dc.set_logical_type("Ordinal")
def test_ordinal_with_order(sample_series):
if (ks and isinstance(sample_series, ks.Series)) or (dd and isinstance(sample_series, dd.Series)):
pytest.xfail('Fails with Dask and Koalas - ordinal data validation not compatible')
ordinal_with_order = Ordinal(order=['a', 'b', 'c'])
dc = DataColumn(sample_series, logical_type=ordinal_with_order)
assert isinstance(dc.logical_type, Ordinal)
assert dc.logical_type.order == ['a', 'b', 'c']
dc = DataColumn(sample_series, logical_type="NaturalLanguage")
new_dc = dc.set_logical_type(ordinal_with_order)
assert isinstance(new_dc.logical_type, Ordinal)
assert new_dc.logical_type.order == ['a', 'b', 'c']
def test_ordinal_with_incomplete_ranking(sample_series):
if (ks and isinstance(sample_series, ks.Series)) or (dd and isinstance(sample_series, dd.Series)):
pytest.xfail('Fails with Dask and Koalas - ordinal data validation not supported')
ordinal_incomplete_order = Ordinal(order=['a', 'b'])
error_msg = re.escape("Ordinal column sample_series contains values that are not "
"present in the order values provided: ['c']")
with pytest.raises(ValueError, match=error_msg):
DataColumn(sample_series, logical_type=ordinal_incomplete_order)
def test_ordinal_with_nan_values():
nan_series = pd.Series(['a', 'b', np.nan, 'a'])
ordinal_with_order = Ordinal(order=['a', 'b'])
dc = DataColumn(nan_series, logical_type=ordinal_with_order)
assert isinstance(dc.logical_type, Ordinal)
assert dc.logical_type.order == ['a', 'b']
def test_latlong_formatting(latlongs):
expected_series = pd.Series([(1, 2), (3, 4)])
if ks and isinstance(latlongs[0], ks.Series):
expected_series = ks.Series([[1, 2], [3, 4]])
elif dd and isinstance(latlongs[0], dd.Series):
expected_series = dd.from_pandas(expected_series, npartitions=2)
expected_dc = DataColumn(expected_series, logical_type='LatLong', name='test_series')
for series in latlongs:
dc = DataColumn(series, logical_type='LatLong', name='test_series')
pd.testing.assert_series_equal(to_pandas(dc.to_series()), to_pandas(expected_series))
assert dc == expected_dc
def test_datacolumn_equality(sample_series, sample_datetime_series):
# Check different parameters to DataColumn
str_col = DataColumn(sample_series, logical_type='Categorical')
str_col_2 = DataColumn(sample_series, logical_type=Categorical)
str_col_diff_tags = DataColumn(sample_series, logical_type=Categorical, semantic_tags={'test'})
diff_name_col = DataColumn(sample_datetime_series, logical_type=Categorical)
diff_dtype_col = DataColumn(sample_series, logical_type=NaturalLanguage)
diff_description_col = DataColumn(sample_series, logical_type='Categorical', description='description')
diff_metadata_col = DataColumn(sample_series, logical_type='Categorical', metadata={'interesting_values': ['a', 'b']})
assert str_col == str_col_2
assert str_col != str_col_diff_tags
assert str_col != diff_name_col
assert str_col != diff_dtype_col
assert str_col != diff_description_col
assert str_col != diff_metadata_col
# Check columns with same logical types but different parameters
ordinal_ltype_1 = Ordinal(order=['a', 'b', 'c'])
ordinal_ltype_2 = Ordinal(order=['b', 'a', 'c'])
ordinal_col_1 = DataColumn(sample_series, logical_type=ordinal_ltype_1)
ordinal_col_2 = DataColumn(sample_series, logical_type=ordinal_ltype_2)
assert str_col != ordinal_col_1
assert ordinal_col_1 != ordinal_col_2
assert ordinal_col_1 == ordinal_col_1
datetime_ltype_instantiated = Datetime(datetime_format='%Y-%m%d')
datetime_col_format = DataColumn(sample_datetime_series, logical_type=datetime_ltype_instantiated)
datetime_col_param = DataColumn(sample_datetime_series, logical_type=Datetime(datetime_format=None))
datetime_col_instantiated = DataColumn(sample_datetime_series, logical_type=Datetime())
datetime_col = DataColumn(sample_datetime_series, logical_type=Datetime)
assert datetime_col != datetime_col_instantiated
assert datetime_col_instantiated != datetime_col_format
assert datetime_col_instantiated == datetime_col_param
# Check different underlying series
str_col = DataColumn(sample_series, logical_type='NaturalLanguage')
changed_series = sample_series.copy().replace(to_replace='a', value='test')
null_col = DataColumn(changed_series, logical_type='NaturalLanguage')
# We only check underlying data for equality with pandas dataframes
if isinstance(str_col.to_series(), pd.Series):
assert str_col != null_col
else:
assert str_col == null_col
def test_datacolumn_metadata(sample_series):
column_metadata = {'metadata_field': [1, 2, 3], 'created_by': 'user0'}
data_col = DataColumn(sample_series)
assert data_col.metadata == {}
data_col = DataColumn(sample_series, metadata=column_metadata)
assert data_col.metadata == column_metadata
new_metadata = {'date_created': '1/1/19', 'created_by': 'user1'}
data_col.metadata = {**data_col.metadata, **new_metadata}
assert data_col.metadata == {'date_created': '1/1/19', 'metadata_field': [1, 2, 3], 'created_by': 'user1'}
data_col.metadata.pop('created_by')
assert data_col.metadata == {'date_created': '1/1/19', 'metadata_field': [1, 2, 3]}
data_col.metadata['number'] = 1012034
assert data_col.metadata == {'date_created': '1/1/19', 'metadata_field': [1, 2, 3], 'number': 1012034}
def test_datacolumn_metadata_error(sample_series):
err_msg = "Column metadata must be a dictionary"
with pytest.raises(TypeError, match=err_msg):
DataColumn(sample_series, metadata=123)
|
#!/usr/bin/env python3
from typing import Optional, Generator
from datetime import date, datetime
from collections import namedtuple
import re
import requests
AUTORENKALENDER_URL = "https://www.projekt-gutenberg.org/info/kalender/autoren.js"
AUTHOR_URL_BASE = "https://www.projekt-gutenberg.org/autoren/namen/"
CAMEL_CASE_REGEX = re.compile(r"(?<!^)(?=[A-Z])")
AUTHOR_REGEX = re.compile(r"^Autor\(")
LINE_REGEX = re.compile(
r'^Autor\("'
r'(?P<last_name>.*)",'
r'"(?P<first_name>.*)",'
r'"(?P<birth_string>\d{8})",'
r'"(?P<death_string>\d{8})",'
r'"(?P<url>.*html)"\);$'
)
Author = namedtuple("Author", ["last_name", "first_name", "birth", "death", "url"])
def hyphen_name(name: str) -> str:
return " ".join(CAMEL_CASE_REGEX.sub("-", word) for word in name.split())
def parse_date(string: str) -> Optional[date]:
try:
return datetime.strptime(string, "%Y%m%d").date()
except ValueError:
return None
def same_day(left: date, right: date) -> bool:
return left.day == right.day and left.month == right.month
def parse_author(line: str) -> Optional[Author]:
match = LINE_REGEX.match(line)
if match:
birth_string = match.group("birth_string")
death_string = match.group("death_string")
last_name = match.group("last_name")
first_name = match.group("first_name")
return Author(
last_name=hyphen_name(last_name),
first_name=hyphen_name(first_name),
birth=parse_date(birth_string),
death=parse_date(death_string),
url=AUTHOR_URL_BASE + match.group("url"),
)
return None
def even_years_alive(author) -> Optional[int]:
today = date.today()
if author.birth and same_day(author.birth, today):
return today.year - author.birth.year
return None
def even_years_dead(author) -> Optional[int]:
today = date.today()
if author.death and same_day(author.death, today):
return today.year - author.death.year
return None
def print_info(author) -> None:
years_alive = even_years_alive(author)
years_dead = even_years_dead(author)
if years_alive or years_dead:
print(
"[{name}]({url})".format(
name=(
author.first_name.strip() + " " + author.last_name.strip()
).strip(),
url=author.url,
)
)
if years_alive:
print("{}. Geburtstag".format(years_alive))
if years_dead:
print("{}. Todestag".format(years_dead))
print()
def author_lines() -> Generator[str, None, None]:
"""Returns the text of the Autorenkalender JavaScript file"""
response = requests.get(AUTORENKALENDER_URL)
if response.status_code == 200:
for line in response.text.splitlines():
if AUTHOR_REGEX.search(line):
yield line
if __name__ == "__main__":
for author_line in author_lines():
author = parse_author(author_line)
if author:
print_info(author)
|
#!/usr/bin/env python
# coding: utf-8
# # U-Netを用いたUAV画像セグメンテーションについて
# ①データの確認、探索
# ②データの前処理
# ③U-Netのモデルの定義、トレーニング
# ④U-Netモデルの性能評価の確認
# OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
# OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/.
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import os, glob, sys, re
from tqdm import tqdm
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader, random_split
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2 as ToTensor
from tqdm import tqdm as tqdm
import cv2
import random
from fpathutils import copyLocaliImages, trainPairCheck
from u_net_pytorch import UNet, IoU, DiceBCELoss, DiceLoss, save_ckp, load_ckp, format_image, format_mask, saveScoreCSV, EarlyStopping, get_train_transform, LoadDataSet
from u_net_pytorch import visualize_training_predict
import seaborn as sns
import re
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(torch.cuda.is_available(),device)
className =sys.argv[1]
num_epochs=int(sys.argv[2])
BATCHSIZE = int(sys.argv[3])
resizeValue = int(sys.argv[4])
datasetDirName = sys.argv[5]
# orgDir = f"03_datasetforModel/Forest tsumura 2 50m P4Pv2_{className}/org_crop4Corner_5120_3072_Size1024_lap512_rotate_flipMirror"
# orgDir = f"03_datasetforModel/Forest tsumura 2 50m P4Pv2_{className}/org_crop4Corner_5120_3072_Size1024_lap512"
orgDir = f"uav_cnn_{className}\\{datasetDirName}"
# orgDir = sys.argv[5]
imageSize = re.search(".*_Size(\d+)_lap.*",datasetDirName).group(1)
# os.makedirs(orgDir,exist_ok=True)
trainPairCheck(orgDir=orgDir)
# orgDir = copyLocaliImages(orgDir, copyDir=f"\\\\matsui_gpu_nsi\\datas\\uav_cnn_{className}")
imgPaths = glob.glob(os.path.join(orgDir,"*.jpg"))
# if len(imgPaths)>=5000:
# imgPaths = random.sample(imgPaths,5000)
normalize = False
train_dataset = LoadDataSet(imgPaths, resizeValue, transform=get_train_transform(resizeValue,normalize))
print("datasets count\t",train_dataset.__len__())
split_valid_ratio = 0.2
train_size=int(np.round(train_dataset.__len__()*(1 - split_valid_ratio),0))
valid_size=int(np.round(train_dataset.__len__()*split_valid_ratio,0))
# BATCHSIZE = train_dataset.__len__()//20
# BATCHSIZE = 8
train_data, valid_data = random_split(train_dataset, [train_size, valid_size])
train_loader = DataLoader(dataset=train_data, batch_size=BATCHSIZE, shuffle=True)
val_loader = DataLoader(dataset=valid_data, batch_size=BATCHSIZE)
if num_epochs<=20:
modelID = f"Test_data{train_dataset.__len__()}_modelResize-{str(resizeValue).zfill(4)}_Normalize{str(normalize)}_batch{BATCHSIZE}_epoch{num_epochs}_class-{className}"
else:
modelID = f"data{train_dataset.__len__()}_imageSize-{imageSize}_modelResize-{str(resizeValue).zfill(4)}_Normalize{str(normalize)}_batch{BATCHSIZE}_epoch{num_epochs}_class-{className}"
workDir = "04_trainingModel"
workDir = os.path.join(workDir, modelID)
os.makedirs(workDir, exist_ok=True)
print(workDir)
print("Length of train data:\t\t{}".format(len(train_data)))
print("Length of validation data:\t{}".format(len(valid_data)))
print("Length of ALL data:\t\t{}".format(train_dataset.__len__()))
#<---------------各インスタンス作成---------------------->
model = UNet(3,1).cuda()
optimizer = torch.optim.Adam(model.parameters(),lr = 1e-3)
criterion = DiceLoss()
accuracy_metric = IoU()
valid_loss_min = np.Inf
checkpoint_path = os.path.join(workDir,'chkpoint_')
best_model_path = os.path.join(workDir,'bestmodel.pt')
total_train_loss = []
total_train_score = []
total_valid_loss = []
total_valid_score = []
# EarlyStoppingの初期化
early_stopping = EarlyStopping(patience=5, delta=0.2)
'''deltaを0.2に設定した場合、EarlyStoppingは検証損失が前回の最小値から
0.2以上改善しない場合にカウンタをインクリメントします。
つまり、前回の最小値が0.5だった場合、
新しい損失が0.3以上であれば改善とみなされ、カウンタはリセットされます。
しかし、新しい損失が0.5から0.7までの範囲であれば改善とみなされず、
カウンタがインクリメントされます。我慢エポック数(patience)に達した時点で学習が停止します。
'''
losses_value = 0
for epoch in range(num_epochs):
#<---------------トレーニング---------------------->
train_loss = []
train_score = []
valid_loss = []
valid_score = []
pbar = tqdm(train_loader, desc = 'description')
#<---------------学習---------------------->
for x_train, y_train, orgPath in pbar:
x_train = torch.autograd.Variable(x_train).cuda()
y_train = torch.autograd.Variable(y_train).cuda()
optimizer.zero_grad()
output = model(x_train)
## 損失計算
loss = criterion(output, y_train)
losses_value = loss.item()
## 精度評価
score = accuracy_metric(output,y_train)
loss.backward()
optimizer.step()
train_loss.append(losses_value)
train_score.append(score.item())
pbar.set_description(f"Epoch: {epoch+1}, loss: {losses_value}, IoU: {score}")
#<---------------評価---------------------->
with torch.no_grad():
for image,mask,orgPath in val_loader:
image = torch.autograd.Variable(image).cuda()
mask = torch.autograd.Variable(mask).cuda()
output = model(image)
## 損失計算
loss = criterion(output, mask)
losses_value = loss.item()
## 精度評価
score = accuracy_metric(output,mask)
valid_loss.append(losses_value)
valid_score.append(score.item())
if epoch!=0 and epoch%10==0:
visualize_training_predict(image,mask,output,workDir,True,True)
saveScoreCSV(workDir,modelID,total_train_loss,total_valid_loss, total_train_score, total_valid_score)
save_ckp(checkpoint, False, checkpoint_path, best_model_path)
total_train_loss.append(np.mean(train_loss))
total_train_score.append(np.mean(train_score))
total_valid_loss.append(np.mean(valid_loss))
total_valid_score.append(np.mean(valid_score))
print(f"Train Loss: {total_train_loss[-1]}, Train IOU: {total_train_score[-1]}",f"Valid Loss: {total_valid_loss[-1]}, Valid IOU: {total_valid_score[-1]}")
checkpoint = {
'epoch': epoch + 1,
'valid_loss_min': total_valid_loss[-1],
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
if epoch>=20:
# checkpointの保存
save_ckp(checkpoint, False, checkpoint_path, best_model_path)
# 評価データにおいて最高精度のモデルのcheckpointの保存
if total_valid_loss[-1] <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min,total_valid_loss[-1]))
save_ckp(checkpoint, True, checkpoint_path, best_model_path)
valid_loss_min = total_valid_loss[-1]
print("bestModel",checkpoint_path)
# EarlyStoppingの評価
early_stopping(np.mean(valid_loss))
# EarlyStoppingが有効化された場合は学習を停止
if early_stopping.early_stop:
print("Epoch EarlyStop",epoch)
break
print("Epoch End",epoch+1)
saveScoreCSV(workDir,modelID,total_train_loss,total_valid_loss, total_train_score, total_valid_score)
plt.figure(1)
plt.figure(figsize=(15,5))
sns.set_style(style="darkgrid")
plt.subplot(1, 2, 1)
sns.lineplot(x=range(1,num_epochs+1), y=total_train_loss, label="Train Loss")
sns.lineplot(x=range(1,num_epochs+1), y=total_valid_loss, label="Valid Loss")
plt.title("Loss")
plt.xlabel("epochs")
plt.ylabel("DiceLoss")
plt.subplot(1, 2, 2)
sns.lineplot(x=range(1,num_epochs+1), y=total_train_score, label="Train Score")
sns.lineplot(x=range(1,num_epochs+1), y=total_valid_score, label="Valid Score")
plt.title("Score (IoU)")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xlabel("epochs",fontsize=18)
plt.ylabel("IoU",fontsize=18)
plt.tick_params(labelsize=18)
plt.savefig(os.path.join(workDir,f"Unet_score_{modelID}.png"))
# plt.show()
plt.close()
# ## ④U-Netモデルの性能評価の確認
best_model_path = os.path.join(workDir,'bestmodel.pt')
model, optimizer, start_epoch, valid_loss_min = load_ckp(best_model_path, model, optimizer)
# 続いて入力画像と教師データ、モデルによる出力を表示する関数を用意し、出力を行います。
predict_orgPaths = glob.glob(os.path.join(orgDir,"DJI_0065*.jpg"))
predict_orgPaths = random.sample(predict_orgPaths, 3)
# outImagePath = os.path.join(workDir,modelID,"OutImages")
# os.makedirs(outImagePath,exist_ok=True)
# def visualize_predict(model, n_images, imgSave=False):
# figure, ax = plt.subplots(nrows=n_images, ncols=3, figsize=(15, 5*n_images))
# with torch.no_grad():
# for data,mask in val_loader:
# data = torch.autograd.Variable(data, volatile=True).cuda()
# mask = torch.autograd.Variable(mask, volatile=True).cuda()
# o = model(data)
# break
# for img_no in tqdm(range(0, n_images)):
# tm=o[img_no][0].data.cpu().numpy()
# img = data[img_no].data.cpu()
# msk = mask[img_no].data.cpu()
# img = format_image(img)
# msk = format_mask(msk)
# # msk = cv2.medianBlur(np.array(msk),11)
# outImagePath_pred = os.path.join(workDir,"OutImages",f"{img_no}_pred.png")
# outImagePath_img = os.path.join(workDir,"OutImages",f"{img_no}_org.png")
# outImagePath_msk = os.path.join(workDir,"OutImages",f"{img_no}_msk.png")
# os.makedirs(os.path.dirname(outImagePath_pred),exist_ok=True)
# # print(tm.shape, img.shape, msk.shape)
# cv2.imwrite(outImagePath_pred,tm)
# cv2.imwrite(outImagePath_img, img)
# cv2.imwrite(outImagePath_msk,np.array(msk))
# ax[img_no, 0].imshow(img)
# ax[img_no, 1].imshow(msk, interpolation="nearest", cmap="gray")
# ax[img_no, 2].imshow(tm, interpolation="nearest", cmap="gray")
# ax[img_no, 0].set_title("Input Image")
# ax[img_no, 1].set_title("Labeled Mask Conifer")
# ax[img_no, 2].set_title("Predicted Mask Conifer")
# ax[img_no, 0].set_axis_off()
# ax[img_no, 1].set_axis_off()
# ax[img_no, 2].set_axis_off()
# plt.tight_layout()
# if imgSave:
# plt.savefig(os.path.join(workDir, f"predictedSet_{modelID}.png"))
# # plt.show()
from predict_image import getUAVImageName, predictUAVImageCropLap, getCropLapSize
import time
cropSize,lapSize = getCropLapSize(datasetDirName)
workDir_pred = os.path.join(workDir,"predictedUAVimgs")
os.makedirs(workDir_pred,exist_ok=True)
UAVImageNames = [getUAVImageName(imgPath) for imgPath in imgPaths]
UAVImageNames = list(set(UAVImageNames))
for UAVImageName in tqdm(UAVImageNames):
UAVPath = os.path.join(f"H:/マイドライブ/Forest/src//03_datasetforModel/Forest tsumura 2 50m P4Pv2_{className}/org",UAVImageName+".JPG")
workDir_pred = os.path.join(workDir,"predictedUAVimgs_lapSize-"+str(lapSize))
os.makedirs(workDir_pred,exist_ok=True)
predictUAVImageCropLap(UAVimgPath=UAVPath,
saveDir=workDir_pred,
model=model,
resizeSize=resizeValue,
cropSize=int(cropSize),
lapSize=int(lapSize),
className=className)
time.sleep(2)
lapSize2 = 0
workDir_pred = os.path.join(workDir,"predictedUAVimgs_lapSize-"+str(lapSize2))
os.makedirs(workDir_pred,exist_ok=True)
UAVPath = os.path.join(f"H:/マイドライブ/Forest/src//03_datasetforModel/Forest tsumura 2 50m P4Pv2_{className}/org",UAVImageName+".JPG")
predictUAVImageCropLap(UAVimgPath=UAVPath,
saveDir=workDir_pred,
model=model,
resizeSize=resizeValue,
cropSize=int(cropSize),
lapSize=int(lapSize2),
className=className) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
import json
class RunCommandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'SWAS-OPEN', '2020-06-01', 'RunCommand','SWAS-OPEN')
self.set_method('POST')
def get_WorkingDir(self): # String
return self.get_query_params().get('WorkingDir')
def set_WorkingDir(self, WorkingDir): # String
self.add_query_param('WorkingDir', WorkingDir)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_CommandContent(self): # String
return self.get_query_params().get('CommandContent')
def set_CommandContent(self, CommandContent): # String
self.add_query_param('CommandContent', CommandContent)
def get_Timeout(self): # Integer
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Integer
self.add_query_param('Timeout', Timeout)
def get_WindowsPasswordName(self): # String
return self.get_query_params().get('WindowsPasswordName')
def set_WindowsPasswordName(self, WindowsPasswordName): # String
self.add_query_param('WindowsPasswordName', WindowsPasswordName)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_WorkingUser(self): # String
return self.get_query_params().get('WorkingUser')
def set_WorkingUser(self, WorkingUser): # String
self.add_query_param('WorkingUser', WorkingUser)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Parameters(self): # Map
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameters): # Map
self.add_query_param("Parameters", json.dumps(Parameters))
def get_EnableParameter(self): # Boolean
return self.get_query_params().get('EnableParameter')
def set_EnableParameter(self, EnableParameter): # Boolean
self.add_query_param('EnableParameter', EnableParameter)
|
import threading
import traceback
import sys
import time
__author__ = 'paoolo'
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def decode(val):
bin_str = '0b'
for char in val:
val = ord(char) - 0x30
bin_str += '%06d' % int(bin(val)[2:])
return int(bin_str, 2)
class Hokuyo(object):
SHORT_COMMAND_LEN = 5
MD_COMMAND_REPLY_LEN = 20
LASER_ON = 'BM\n'
LASER_OFF = 'QT\n'
RESET = 'RS\n'
VERSION_INFO = 'VV\n'
SENSOR_STATE = 'II\n'
SENSOR_SPECS = 'PP\n'
SET_SCIP2 = 'SCIP2.0\n'
CHARS_PER_VALUE = 3.0
CHARS_PER_LINE = 66.0
CHARS_PER_BLOCK = 64.0
START_DEG = 119.885
STEP_DEG = 0.35208516886930985
START_STEP = 44
STOP_STEP = 725
VERSION_INFO_LINES = 6
SENSOR_STATE_LINES = 8
SENSOR_SPECS_LINES = 9
def __init__(self, port):
self.__port = port
self.__port_lock = threading.RLock()
self.__timestamp, self.__angles, self.__distances = 0, [], []
self.__scan_lock = threading.Lock()
self.__is_active = True
self.__scanning_allowed = False
def __offset(self):
count = 2
result = ''
self.__port_lock.acquire()
try:
a = self.__port.read(1)
b = self.__port.read(1)
while not ((a == '\n' and b == '\n') or (a == '' and b == '')):
result += a
a = b
b = self.__port.read(1)
count += 1
finally:
self.__port_lock.release()
result += a
result += b
sys.stderr.write('READ %d EXTRA BYTES: "%s"\n' % (count, str(result)))
def __execute_command(self, command):
self.__port_lock.acquire()
try:
self.__port.write(command)
result = self.__port.read(len(command))
assert result == command
finally:
self.__port_lock.release()
return result
def __short_command(self, command, check_response=True):
result = ''
self.__port_lock.acquire()
try:
try:
result += self.__execute_command(command)
result += self.__port.read(Hokuyo.SHORT_COMMAND_LEN)
if check_response:
assert result[-5:-2] == '00P'
assert result[-2:] == '\n\n'
return result
except BaseException as e:
sys.stderr.write('RESULT: "%s"' % result)
traceback.print_exc()
self.__offset()
finally:
self.__port_lock.release()
def __long_command(self, cmd, lines, check_response=True):
result = ''
self.__port_lock.acquire()
try:
try:
result += self.__execute_command(cmd)
result += self.__port.read(4)
if check_response:
assert result[-4:-1] == '00P'
assert result[-1:] == '\n'
line = 0
while line < lines:
char = self.__port.read_byte()
if not char is None:
char = chr(char)
result += char
if char == '\n':
line += 1
else: # char is None
line += 1
assert result[-2:] == '\n\n'
return result
except BaseException as e:
sys.stderr.write('RESULT: "%s"' % result)
traceback.print_exc()
self.__offset()
finally:
self.__port_lock.release()
def terminate(self):
self.reset()
self.__is_active = False
self.__port_lock.acquire()
try:
self.__port.close()
finally:
self.__port_lock.release()
def laser_on(self):
return self.__short_command(Hokuyo.LASER_ON, check_response=True)
def laser_off(self):
return self.__short_command(Hokuyo.LASER_OFF)
def reset(self):
return self.__short_command(Hokuyo.RESET)
def set_scip2(self):
"for URG-04LX"
return self.__short_command(Hokuyo.SET_SCIP2, check_response=False)
def set_motor_speed(self, motor_speed=99):
return self.__short_command('CR' + '%02d' % motor_speed + '\n', check_response=False)
def set_high_sensitive(self, enable=True):
return self.__short_command('HS' + ('1\n' if enable else '0\n'), check_response=False)
def get_version_info(self):
return self.__long_command(Hokuyo.VERSION_INFO, Hokuyo.VERSION_INFO_LINES)
def get_sensor_state(self):
return self.__long_command(Hokuyo.SENSOR_STATE, Hokuyo.SENSOR_STATE_LINES)
def get_sensor_specs(self):
return self.__long_command(Hokuyo.SENSOR_SPECS, Hokuyo.SENSOR_SPECS_LINES)
def __get_and_parse_scan(self, cluster_count, start_step, stop_step):
distances = {}
result = ''
count = ((stop_step - start_step) * Hokuyo.CHARS_PER_VALUE * Hokuyo.CHARS_PER_LINE)
count /= (Hokuyo.CHARS_PER_BLOCK * cluster_count)
count += 1.0 + 4.0 # paoolo(FIXME): why +4.0?
count = int(count)
self.__port_lock.acquire()
try:
result += self.__port.read(count)
finally:
self.__port_lock.release()
assert result[-2:] == '\n\n'
result = result.split('\n')
result = [line[:-1] for line in result]
result = ''.join(result)
i = 0
start = (-Hokuyo.START_DEG + Hokuyo.STEP_DEG * cluster_count * (start_step - Hokuyo.START_STEP))
for chunk in chunks(result, 3):
distances[- ((Hokuyo.STEP_DEG * cluster_count * i) + start)] = decode(chunk)
i += 1
return distances
def get_single_scan(self, start_step=START_STEP, stop_step=STOP_STEP, cluster_count=1):
self.__port_lock.acquire()
try:
cmd = 'GD%04d%04d%02d\n' % (start_step, stop_step, cluster_count)
self.__port.write(cmd)
result = self.__port.read(len(cmd))
assert result == cmd
result += self.__port.read(4)
assert result[-4:-1] == '00P'
assert result[-1] == '\n'
result = self.__port.read(6)
assert result[-1] == '\n'
scan = self.__get_and_parse_scan(cluster_count, start_step, stop_step)
return scan
except BaseException as e:
traceback.print_exc()
self.__offset()
finally:
self.__port_lock.release()
def __get_multiple_scans(self, start_step=START_STEP, stop_step=STOP_STEP, cluster_count=1,
scan_interval=0, number_of_scans=0):
self.__port_lock.acquire()
try:
cmd = 'MD%04d%04d%02d%01d%02d\n' % (start_step, stop_step, cluster_count, scan_interval, number_of_scans)
self.__port.write(cmd)
result = self.__port.read(len(cmd))
assert result == cmd
result += self.__port.read(Hokuyo.SHORT_COMMAND_LEN)
assert result[-2:] == '\n\n'
index = 0
while number_of_scans == 0 or index > 0:
index -= 1
result = self.__port.read(Hokuyo.MD_COMMAND_REPLY_LEN)
assert result[:13] == cmd[:13]
result = self.__port.read(6)
assert result[-1] == '\n'
scan = self.__get_and_parse_scan(cluster_count, start_step, stop_step)
yield scan
except BaseException as e:
traceback.print_exc()
self.__offset()
finally:
self.__port_lock.release()
def enable_scanning(self, _enable_scanning):
self.__scanning_allowed = _enable_scanning
def __set_scan(self, scan):
if scan is not None:
timestamp = int(time.time() * 1000.0)
angles, distances = Hokuyo.__parse_scan(scan)
self.__scan_lock.acquire()
try:
self.__angles, self.__distances, self.__timestamp = angles, distances, timestamp
finally:
self.__scan_lock.release()
def get_scan(self):
if not self.__scanning_allowed:
scan = self.get_single_scan()
self.__set_scan(scan)
self.__scan_lock.acquire()
try:
return self.__angles, self.__distances, self.__timestamp
finally:
self.__scan_lock.release()
def scanning_loop(self):
while self.__is_active:
if self.__scanning_allowed:
self.__port_lock.acquire()
for scan in self.__get_multiple_scans():
self.__set_scan(scan)
if not self.__scanning_allowed or not self.__is_active:
self.laser_off()
self.laser_on()
self.__port_lock.release()
break
time.sleep(0.1)
@staticmethod
def __parse_scan(scan):
angles = sorted(scan.keys())
distances = list(map(scan.get, angles))
return angles, distances
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
user = Table('user', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('fname', VARCHAR(length=128)),
Column('lname', VARCHAR(length=128)),
Column('nickname', VARCHAR(length=128)),
Column('username', VARCHAR(length=128)),
Column('password', VARCHAR(length=1024)),
Column('email', VARCHAR(length=1024)),
Column('permissions', VARCHAR(length=1024)),
)
advertisement = Table('advertisement', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('url', String(length=200), primary_key=True, nullable=False),
Column('name', String(length=200), primary_key=True, nullable=False),
Column('importance', Integer, primary_key=True, nullable=False),
)
issuu = Table('issuu', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('code', Integer, primary_key=True, nullable=False),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].drop()
post_meta.tables['advertisement'].create()
post_meta.tables['issuu'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].create()
post_meta.tables['advertisement'].drop()
post_meta.tables['issuu'].drop()
|
import numpy as np
from scipy.special import binom
import time
from itertools import combinations_with_replacement as cwr, starmap
import matplotlib.pyplot as plt
from decimal import Decimal
""" Citation: https://gist.github.com/Juanlu001/7284462
"""
def bernstein(n, k):
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def bezier(points, res=1000, dim=3):
N = len(points)
t = np.linspace(0, 1, num=res)
curve = np.zeros((res, dim))
for ii in range(N):
curve += np.outer(bernstein(N - 1, ii)(t), points[ii])
return curve
def casteljau(points, res, dim, deg=3):
N = Decimal(str(len(points)))
t = np.linspace(0, 1, num=res)
t = [Decimal(t) for t in t.tolist()]
if dim > 1: points = [[Decimal(str(p2)) for p2 in p1] for p1 in points.tolist()]
else: points = [Decimal(str(p1)) for p1 in points.tolist()]
curve = np.zeros((res, dim))
curve = [[Decimal(str(c2)) for c2 in c1] for c1 in curve.tolist()]
for ii in range(len(points)):
coeff = Decimal(str(binom(len(points)-1, ii)))
bp = [coeff * (ti ** Decimal(str(ii))) * (Decimal('1')-ti) ** ((N-Decimal('1'))-ii) for ti in t]
pi = points[ii]
for mm in range(len(bp)):
for nn in range(len(pi)):
curve[mm][nn] += [cr[0]*cr[1] for cr in zip(bp, pi)]
return curve
class ExpSmooth:
def __init__(self, x, **kwargs):
self.x = x
self.options = {
'gamma': 0.1,
'alpha': 0.1,
'beta': 0.1,
'res': x.shape[0],
'verbose': False
}
self.options.update(kwargs)
def __modulo_value(self, key):
return self.options[key] % 0.999999999999
def exponential_single(self, alpha=None):
x = self.x.copy()
if alpha is None:
alpha = self.__modulo_value('alpha')
N = x.shape[0]
S_t = np.zeros_like(x)
S_t[0, :] = x[0, :]
for ii in range(1, N):
S_t[ii, :] = alpha*x[ii-1, :] + (1.0-alpha)*S_t[ii-1, :]
return alpha, S_t, np.linalg.norm(x - S_t)
def exponential_double(self, *args, **kwargs):
x = self.x.copy()
N = x.shape[0]
if kwargs is not None:
if 'verbose' in kwargs.keys(): self.options['verbose'] = kwargs['verbose']
if len(args) > 1:
alpha, gamma = args
elif len(args) == 1:
param = args[0]
alpha, gamma = param
if alpha is None:
alpha = self.__modulo_value('alpha')
if gamma is None:
gamma = self.__modulo_value('gamma')
b1 = np.mean(np.diff(x, 0), 0)
S_t = np.zeros_like(x)
b_t = np.zeros_like(x)
S_t[0, :] = x[0, :]
b_t[0, :] = x[1, :] - x[0, :] # b1
for ii in range(1, N):
S_t[ii, :] = alpha*x[ii, :] + (1.0-alpha)*(S_t[ii-1, :] + b_t[ii-1, :])
b_t[ii, :] = gamma*(S_t[ii, :] - S_t[ii-1, :]) + (1.0 - gamma)*b_t[ii-1, :]
MSE = (np.tanh(np.linalg.norm(np.subtract(x, S_t))) + 1.0) / 2.0
if self.options['verbose'] == True:
print(alpha, gamma, MSE)
return S_t, alpha, gamma, MSE
def exponential_triple(self, *args):
pass
if len(args) > 1:
alpha, beta, gamma = args
elif len(args) == 1:
param = args[0]
alpha, beta, gamma = param
if alpha is None:
alpha = self.__modulo_value('alpha')
if gamma is None:
gamma = self.__modulo_value('gamma')
if beta is None:
beta = self.__modulo_value('beta')
x = self.x.copy()
N = x.shape[0]
L = 4 ### Figure out how to find # of periods
S_t = np.zeros_like(x)
S_t[0, :] = x[0, :]
b_t = np.zeros_like(x)
I_t = np.zeros_like(x)
for ii in range(1, N):
S_t[ii, :] = alpha*(x[ii, :] / I_t[ii-L, :]) + (1.00 - alpha)*(S_t[ii-1, :] + b_t[ii-1, :])
b_t[ii, :] = gamma*(S_t[ii, :] - S_t[ii-1, :]) + (1.00 - gamma)*b_t[ii-1, :]
I_t[ii, :] = beta*(x[ii, :] / S_t[ii, :]) + (1.00 - beta)*I_t[ii-L, :]
def fit(self, order='1', search_size=10, bounds=(0.000, 0.999), verbose=False, method='starmap'):
if verbose == True: self.options['verbose'] = True
if verbose == False: self.options['verbose'] = False
x = self.x.copy()
pick_alg = {'1': self.exponential_single,
'2': self.exponential_double,
'3': self.exponential_triple}
smoother = pick_alg[order]
search_range = np.linspace(bounds[0], bounds[1], search_size)
t0 = time.time()
if method == 'starmap':
comps = starmap(smoother, cwr(search_range, 2))
# elif method == 'imap':
# comps = imap(smoother, cwr(search_range, 2))
optimal_values = min(comps, key=lambda getter: getter[-1])
t1 = time.time()
total_min = int(round(t1 - t0, 3) / 60)
total_sec = round(t1 - t0, 3) - total_min*60.0
print("best alpha: %.3f, best gamma: %.3f, mean squared error: %.3f"\
% (optimal_values[1], optimal_values[2], optimal_values[-1]))
print("Total time: %.3f minutes and %.3f seconds." % (total_min, total_sec))
return optimal_values
def test_exp():
# x = np.random.normal(0, 0.1, (1000, 2))
x = np.vstack((np.logspace(0.01, 0.9, 200), np.linspace(0, 100, 200))).T
es = ExpSmooth(x)
opt = es.fit(order='2', search_size=10, bounds=(0.000, 0.999), method='imap')
plt.figure()
ax2 = plt.subplot()
ax2.plot(x[:, 0], color='b')
ax1 = plt.subplot()
ax1.plot(opt[0][:, 0], color='y', alpha=0.5, lw=5.0)
plt.show()
def test_doub():
# x = np.random.normal(0, 0.1, (1000, 2))
x = np.vstack((np.logspace(0.01, 0.9, 200), np.linspace(0, 100, 200))).T
es = ExpSmooth(x)
opt = es.exponential_double(0.3, 0.5, verbose=True)
ax = plt.subplot()
ax.plot(x[:, 0], color='b')
ax1 = plt.subplot()
ax1.plot(opt[0][:, 0], color='y', alpha=0.5, lw=5.0)
plt.show()
if __name__ == '__main__':
test_exp()
# test_doub() |
import sys
sys.path.append(r'../engine')
sys.path.append(r'../rule')
import re
import os
from flask import Flask, request
import app.settings as settings
from .scanner import *
# from web.upload import handle_upload
# from web.git import clone
from web.dashboard import (
home,
scan_result,
scans,
view_file,
)
from web.upload import (
handle_upload
)
from app import utils
app = Flask(__name__,
template_folder='../templates',
static_folder='../static')
app.config['UPLOAD_FOLDER'] = settings.UPLOAD_FOLDER
@app.template_filter('slugify')
def _slugify(string):
if not string:
return ''
return utils.slugify(string)
@app.template_filter('deslugify')
def _deslugify(string):
if not string:
return ''
return utils.deslugify(string)
@app.template_filter('relative')
def relative(string):
if not string:
return ''
result = re.compile(r'[A-Fa-f0-9]{64}[/\\]').search(string)
if not result:
return string
return string.split(result.group(0), 1)[1]
@app.context_processor
def _year():
return {'year': utils.year()}
@app.template_filter('js_escape')
def _js_escape(string):
if not string:
return ''
return utils.js_escape(string)
@app.route('/', methods=['GET'])
def index():
"""Handle Index."""
return home()
@app.route('/upload/', methods=['POST'])
def upload():
"""Upload and scan from zip."""
return handle_upload(app, request)
@app.route('/scan/', methods=['POST'])
def scan_filename():
"""Scan with filename."""
filename = request.form['filename']
return scan(filename)
@app.route('/result/', methods=['GET'])
def show_result():
"""Show a scan result."""
return scan_result(request.args.get('filename'))
@app.route('/view_file', methods=['POST'])
def view():
return view_file(request)
|
"""
Copyright (c) 2017-2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import ceil
import networkx as nx
# Concat infer : N - number of inputs to concat
# axis - dimension number for tensors concatenation
import numpy as np
from mo.graph.graph import Node
from mo.ops.op import Op
class CorrelationOp(Op):
op = 'Correlation'
def __init__(self, graph: nx.MultiDiGraph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'infer': CorrelationOp.corr_infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'pad',
'kernel_size',
'max_displacement',
'stride_1',
'stride_2',
'single_direction',
'do_abs',
'correlation_type'
]
@staticmethod
def corr_infer(node: Node):
outn = node.out_node(0)
inn = node.in_node(0)
outn.shape = np.zeros(4, dtype=int)
outn.shape[0] = inn.shape[0]
bottomchannels = inn.shape[1]
paddedbottomheight = inn.shape[2]
paddedbottomwidth = inn.shape[3] + 2 * node.pad
kernel_radius_ = (node.kernel_size - 1) / 2;
border_size_ = node.max_displacement + kernel_radius_
outn.shape[3] = ceil((float)(paddedbottomwidth - border_size_ * 2) / node.stride_1)
outn.shape[2] = ceil((float)(paddedbottomheight - kernel_radius_ * 2) / node.stride_1)
neighborhood_grid_radius_ = node.max_displacement / node.stride_2
if node.single_direction != 0:
neighborhood_grid_width_ = neighborhood_grid_radius_ + 1
else:
neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1
outn.shape[1] = neighborhood_grid_width_ * neighborhood_grid_width_
|
from django.views.generic import (
ListView,
CreateView,
DetailView,
UpdateView,
DeleteView
)
from django.urls import reverse, reverse_lazy
from django.db.models import Q
from django.utils.http import urlencode
from webapp.models import Product, Category
from webapp.forms import ProductForm, SearchForm
from django.contrib.auth.mixins import PermissionRequiredMixin
class IndexView(ListView):
template_name = 'products/index.html'
model = Product
context_object_name = 'products'
ordering = ('category', 'name')
paginate_by = 5
paginate_orphans = 2
def get(self, request, **kwargs):
self.form = SearchForm(request.GET)
self.search_data = self.get_search_data()
return super(IndexView, self).get(request, **kwargs)
def get_queryset(self):
queryset = super().get_queryset()
if self.search_data:
queryset = queryset.filter(
Q(name__icontains=self.search_data) |
Q(description__icontains=self.search_data)
)
return queryset.exclude(balance=0)
def get_search_data(self):
if self.form.is_valid():
return self.form.cleaned_data['search_value']
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['search_form'] = self.form
context['categories'] = Category.objects.all()
if self.search_data:
context['query'] = urlencode({'search_value': self.search_data})
return context
class ProductByCategory(ListView):
template_name = 'products/by_category.html'
model = Product
context_object_name = 'products_by_category'
ordering = ('name')
paginate_by = 5
paginate_orphans = 2
def get(self, request, **kwargs):
self.form = SearchForm(request.GET)
self.search_data = self.get_search_data()
return super().get(request, **kwargs)
def get_queryset(self):
queryset = super().get_queryset()
if self.search_data:
queryset = queryset.filter(
Q(name__icontains=self.search_data) |
Q(description__icontains=self.search_data)
)
return queryset.exclude(balance=0).filter(
category__name=self.kwargs.get(
'selected_category'))
def get_search_data(self):
if self.form.is_valid():
return self.form.cleaned_data['search_value']
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.form
if self.search_data:
context['query'] = urlencode({'search_value': self.search_data})
return context
class ProductView(DetailView):
model = Product
template_name = 'products/view.html'
class CreateProductView(PermissionRequiredMixin, CreateView):
template_name = 'products/create.html'
form_class = ProductForm
model = Product
success_url = reverse_lazy('product-list')
permission_required = 'webapp.add_product'
class ProductUpdateView(PermissionRequiredMixin, UpdateView):
form_class = ProductForm
model = Product
template_name = 'products/update.html'
context_object_name = 'product'
permission_required = 'webapp.change_product'
def get_success_url(self):
return reverse('product-view', kwargs={'pk': self.kwargs.get('pk')})
class ProductDeleteView(PermissionRequiredMixin, DeleteView):
model = Product
template_name = 'products/delete.html'
context_object_name = 'product'
success_url = reverse_lazy('product-list')
permission_required = 'webapp.delete_product'
|
import unittest
from LAB7 import *
class EncryptTestCase(unittest.TestCase):
# Tests for 'LAB7'
def test_recursive_triangle_1(self):
self.assertEqual(recursive_triangle(2,4), ' **\n *')
def test_recursive_triangle_5(self):
self.assertEqual(recursive_triangle(5,5), '*****\n ****\n ***\n **\n *')
def test_recursive_triangle_4(self):
self.assertEqual(recursive_triangle(4,4), '****\n ***\n **\n *')
def test_k_greater_than_n(self):
self.assertEqual(recursive_triangle(5,4), '*****\n****\n ***\n **\n *')
if __name__ == '__main__':unittest.main(exit=False) |
# implement caesar cipher
import cs50
import sys
if len(sys.argv) != 2:
print("Usage: python caesar.py k")
sys.exit(1)
else:
# prompt user for input
uin = cs50.get_string("plaintext: ")
# declare variables
k = int(sys.argv[1])
ascii = 0
caesar = 0
position = 0
out = ""
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lower = "abcdefghijklmnopqrstuvwxyz"
# iterate through string and convert each letter using k
for i in range(len(uin)):
position = 0
ascii = ord(uin[i])
if ascii >= 65 and ascii <= 90:
while uin[i] != upper[position]:
position += 1
caesar = (position + k) % 26
out += upper[caesar]
elif ascii >= 97 and ascii <= 122:
while uin[i] != lower[position]:
position += 1
caesar = (position + k) % 26
out += lower[caesar]
else:
out += uin[i]
print("ciphertext:", out) |
"""Module for scanning system values"""
import shutil
import platform
from uuid import getnode as get_mac
import getpass
import socket
import time
import threading
from datetime import datetime
import psutil
from custom_logger import logger
class SystemValue:
"""Class for getting system values"""
def __init__(self):
pass
def cpu(self):
"""Get CPU usage"""
cpu = psutil.cpu_percent(interval=1)
logger.debug('CPU: %s', str(cpu))
return cpu
def ram(self):
"""Get RAM usage"""
ram = psutil.virtual_memory().available * 100 / psutil.virtual_memory().total
logger.debug('RAM: %s', str(int(ram)))
return int(ram)
def hdd(self):
"""Get HDD usage"""
total, used, free = shutil.disk_usage("/")
logger.debug('Total HDD: {} GiB'.format((total // (2**30))))
logger.debug("Used HDD: {} GiB".format(used // (2**30)))
logger.debug('Free HDD: {} GiB'.format((free // (2**30))))
return free // (2**30)
def sockets(self):
"""Get sockets count"""
result = psutil.net_connections()
logger.debug('SOCKETS: %s', str(len(result)))
return len(result)
def _lan(self):
old_value = 0
while True:
new_value = psutil.net_io_counters().bytes_sent + psutil.net_io_counters().bytes_recv
if old_value:
self.lan(new_value)
old_value = new_value
time.sleep(1)
def convert_to_gbit(self, value):
"""Convert bits to Gbit"""
return value/1024./1024./1024.*8
def lan(self, value):
"""Print lan data"""
print("%0.3f" % self.convert_to_gbit(value))
class SystemScanner:
"""Class for scanning system for System Values"""
def __init__(self, func, cpu=None, ram=None, hdd=None, sockets=None):
self._thread = None
self.stop_thread = False
self.system_value = SystemValue()
self.callback = func
self.cpu_value = cpu
self.ram_value = ram
self.hdd_value = hdd
self.sockets_value = sockets
self.time_period = {}
self.time_period['CPU'] = 0
self.time_period['RAM'] = 0
self.time_period['HDD'] = 0
self.time_period['SOCKETS'] = 0
self.info()
def _check(self, value, user_value, check_type, less=False):
if value:
condition = None
if less:
condition = user_value < value
else:
condition = user_value > value
if condition:
self.time_period[check_type] += 1
if self.time_period[check_type] >= 4:
logger.warning(str(check_type) + ' alert!!!!')
self.callback(check_type, user_value)
self.time_period[check_type] = 0
else:
self.time_period[check_type] = 0
def _check_less(self, value, user_value, check_type):
self._check(value, user_value, check_type, True)
def _thread_func(self):
while not self.stop_thread:
cpu = None
ram = None
hdd = None
sockets = None
if self.cpu_value:
cpu = self.system_value.cpu()
if self.ram_value:
ram = self.system_value.ram()
if self.hdd_value:
hdd = self.system_value.hdd()
if self.sockets_value:
sockets = self.system_value.sockets()
self._check(self.cpu_value, cpu, 'CPU')
self._check_less(self.ram_value, ram, 'RAM')
self._check_less(self.hdd_value, hdd, 'HDD')
self._check(self.sockets_value, sockets, 'SOCKETS')
time.sleep(4)
def scan(self):
self._thread = threading.Thread(target=self._thread_func)
self._thread.start()
logger.info('Start SystemScanner')
def stop(self):
self.stop_thread = True
logger.info('Stopping SystemScanner')
def info(self):
logger.info('USERNAME: %s', getpass.getuser())
logger.info('IP address: %s', socket.gethostbyname(socket.gethostname()))
logger.info('MAC address: ' + ':'.join(("%012X" % get_mac())[i:i+2] for i in range(0, 12, 2)))
logger.info('OS: %s', platform.platform())
boot_time = str(datetime.fromtimestamp(psutil.boot_time()).strftime('%Y-%m-%d %H:%M:%S'))
logger.info('Server boot time: %s', boot_time)
if __name__ == '__main__':
obj = SystemValue()
# obj.cpu()
# obj.ram()
# obj.hdd()
# obj.sockets()
# obj._lan()
def alert_func(alert_type, value):
print('alert happened: ', alert_type, value)
obj = SystemScanner(alert_func, ram=70, cpu=10)
obj.scan()
# time.sleep(10)
# obj.stop()
|
import hashlib
import time
from flask import Blueprint, render_template, redirect, url_for, flash, session, request
from flask_login import current_user, login_user, logout_user
from aat_main.forms.auth_forms import LoginForm
from aat_main.models.account_model import AccountModel
from aat_main.utils.pillow_helper import ImageCaptchaHelper
from aat_main.utils.random_helper import RandomHelper
from aat_main.utils.smtp_helper import EmailHelper
auth_bp = Blueprint('auth_bp', __name__, url_prefix='/auth')
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index_bp.home'))
form = LoginForm()
if form.validate_on_submit():
user = AccountModel.search_account_by_email(form.email.data)
# if form.login_captcha.data.lower().strip() != session.get('login_captcha'):
# flash('Fail (Server) : Incorrect login captcha')
# return render_template('login.html', title='Log In', form=form)
if not user:
flash('Fail (Server) : Account does not exist')
return render_template('login.html', title='Log In', form=form)
elif form.password.data != user.password:
flash('Fail (Server) : Password is wrong')
return render_template('login.html', title='Log In', form=form)
login_user(user, remember=True)
create_time = time.strftime('%Y-%m-%d %H:%M:%S')
start_time = time.strftime('%Y-%m-%d 00:00:00')
end_time = time.strftime('%Y-%m-%d 23:59:59')
# if len(CreditModel.check_credit_by_time(current_user.email, 'login', start_time, end_time)) == 0:
# CreditModel.insert_credit(current_user.email, 'login', 'today\'s first login', 0, 5, create_time)
# AccountModel().update_credit(current_user.email, 5)
# flash('Success (Server) : Today\'s first login, credit +5')
# else:
# flash('Success (Server) : Login successful')
if next_url := request.args.get('next'):
return redirect(next_url)
return redirect(url_for('index_bp.home'))
return render_template('login.html', title='Log In', form=form)
@auth_bp.route('/logout/')
def logout():
logout_user()
return redirect(url_for('auth_bp.login'))
@auth_bp.route('/login/captcha/')
def login_captcha():
try:
code, base64_str = ImageCaptchaHelper().get_image_captcha()
session['login_captcha'] = code.lower()
return base64_str
except:
return 'Fail (Server) : Login captcha generate failed'
@auth_bp.route('/login/password/', methods=['POST'])
def login_password():
try:
email = request.form.get('email').strip()
user = AccountModel.search_account_by_email(email)
if not user:
return 'Fail (Server) : Account is not existed'
password = RandomHelper.generate_code(5)
subject = 'Reset password for AAT'
content = f"<br/>Welcome to login AAT, your password is reset as <span style='color:orange;'>{password}</span>"
receiver_email = email
sender_name = 'AAT team'
sender_email = 'aat@cs.cf.ac.uk'
EmailHelper.send_email(subject, content, receiver_email, sender_name, sender_email)
password = hashlib.md5(password.encode()).hexdigest()
update_time = time.strftime('%Y-%m-%d %H:%M:%S')
AccountModel().update_account(email, user.id, password, user.name, user.role, user.avatar, user.profile, update_time)
return 'Success (Server) : Reset password successful'
except:
return 'Fail (Server) : Email send failed'
|
import os
import pytest
from azureml.core import Model
from azure_utils.configuration.notebook_config import project_configuration_file
from azure_utils.configuration.project_configuration import ProjectConfiguration
from azure_utils.machine_learning.contexts.realtime_score_context import (
RealtimeScoreAKSContext,
MLRealtimeScore,
DeepRealtimeScore,
)
from azure_utils.machine_learning.contexts.workspace_contexts import WorkspaceContext
from tests.mocks.azureml.azureml_mocks import MockMLRealtimeScore, MockDeepRealtimeScore
DEEP_TRAIN_PY = """
import keras.backend as K
from keras import initializers
from keras.engine import Layer, InputSpec
from keras.engine.topology import get_source_inputs
from keras.layers import Activation
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import ZeroPadding2D
from keras.layers import add
from keras.models import Model
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
WEIGHTS_PATH = "https://github.com/adamcasson/resnet152/releases/download/v0.1/resnet152_weights_tf.h5"
WEIGHTS_PATH_NO_TOP = "https://github.com/adamcasson/resnet152/releases/download/v0.1/resnet152_weights_tf_notop.h5"
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.')
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.')
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting`include_top=True` '
'and loading `imagenet` weights, '
'`input_shape` should be ' +
str(default_shape) + '.')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape=' + str(input_shape) + '`')
return input_shape
class Scale(Layer):
def __init__(
self,
weights=None,
axis=-1,
momentum=0.9,
beta_init="zero",
gamma_init="one",
**kwargs
):
self.momentum = momentum
self.axis = axis
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (int(input_shape[self.axis]),)
self.gamma = K.variable(self.gamma_init(shape), name="%s_gamma" % self.name)
self.beta = K.variable(self.beta_init(shape), name="%s_beta" % self.name)
self.trainable_weights = [self.gamma, self.beta]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
input_shape = self.input_spec[0].shape
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
out = K.reshape(self.gamma, broadcast_shape) * x + K.reshape(
self.beta, broadcast_shape
)
return out
def get_config(self):
config = {"momentum": self.momentum, "axis": self.axis}
base_config = super(Scale, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def identity_block(input_tensor, kernel_size, filters, stage, block):
eps = 1.1e-5
if K.common.image_dim_ordering() == "tf":
bn_axis = 3
else:
bn_axis = 1
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
scale_name_base = "scale" + str(stage) + block + "_branch"
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + "2a", use_bias=False)(
input_tensor
)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2a")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2a")(x)
x = Activation("relu", name=conv_name_base + "2a_relu")(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + "2b_zeropadding")(x)
x = Conv2D(
nb_filter2,
(kernel_size, kernel_size),
name=conv_name_base + "2b",
use_bias=False,
)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2b")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2b")(x)
x = Activation("relu", name=conv_name_base + "2b_relu")(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", use_bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2c")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2c")(x)
x = add([x, input_tensor], name="res" + str(stage) + block)
x = Activation("relu", name="res" + str(stage) + block + "_relu")(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
eps = 1.1e-5
if K.common.image_dim_ordering() == "tf":
bn_axis = 3
else:
bn_axis = 1
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
scale_name_base = "scale" + str(stage) + block + "_branch"
x = Conv2D(
nb_filter1, (1, 1), strides=strides, name=conv_name_base + "2a", use_bias=False
)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2a")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2a")(x)
x = Activation("relu", name=conv_name_base + "2a_relu")(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + "2b_zeropadding")(x)
x = Conv2D(
nb_filter2,
(kernel_size, kernel_size),
name=conv_name_base + "2b",
use_bias=False,
)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2b")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2b")(x)
x = Activation("relu", name=conv_name_base + "2b_relu")(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", use_bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "2c")(x)
x = Scale(axis=bn_axis, name=scale_name_base + "2c")(x)
shortcut = Conv2D(
nb_filter3, (1, 1), strides=strides, name=conv_name_base + "1", use_bias=False
)(input_tensor)
shortcut = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + "1")(
shortcut
)
shortcut = Scale(axis=bn_axis, name=scale_name_base + "1")(shortcut)
x = add([x, shortcut], name="res" + str(stage) + block)
x = Activation("relu", name="res" + str(stage) + block + "_relu")(x)
return x
def ResNet152(
include_top=True,
weights=None,
input_tensor=None,
input_shape=None,
large_input=False,
pooling=None,
classes=1000,
):
if weights not in {"imagenet", None}:
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization) or `imagenet` "
"(pre-training on ImageNet)."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights` as imagenet with `include_top`"
" as true, `classes` should be 1000"
)
eps = 1.1e-5
if large_input:
img_size = 448
else:
img_size = 224
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=img_size,
min_size=197,
data_format=K.image_data_format(),
require_flatten=include_top,
)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# handle dimension ordering for different backends
if K.common.image_dim_ordering() == "tf":
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3), name="conv1_zeropadding")(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1", use_bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name="bn_conv1")(x)
x = Scale(axis=bn_axis, name="scale_conv1")(x)
x = Activation("relu", name="conv1_relu")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name="pool1")(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block="a", strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block="b")
x = identity_block(x, 3, [64, 64, 256], stage=2, block="c")
x = conv_block(x, 3, [128, 128, 512], stage=3, block="a")
for i in range(1, 8):
x = identity_block(x, 3, [128, 128, 512], stage=3, block="b" + str(i))
x = conv_block(x, 3, [256, 256, 1024], stage=4, block="a")
for i in range(1, 36):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block="b" + str(i))
x = conv_block(x, 3, [512, 512, 2048], stage=5, block="a")
x = identity_block(x, 3, [512, 512, 2048], stage=5, block="b")
x = identity_block(x, 3, [512, 512, 2048], stage=5, block="c")
if large_input:
x = AveragePooling2D((14, 14), name="avg_pool")(x)
else:
x = AveragePooling2D((7, 7), name="avg_pool")(x)
# include classification layer by default, not included for feature extraction
if include_top:
x = Flatten()(x)
x = Dense(classes, activation="softmax", name="fc1000")(x)
else:
if pooling == "avg":
x = GlobalAveragePooling2D()(x)
elif pooling == "max":
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name="resnet152")
# load weights
if weights == "imagenet":
if include_top:
weights_path = get_file(
"resnet152_weights_tf.h5",
WEIGHTS_PATH,
cache_subdir="models",
md5_hash="cdb18a2158b88e392c0905d47dcef965",
)
else:
weights_path = get_file(
"resnet152_weights_tf_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
md5_hash="4a90dcdafacbd17d772af1fb44fc2660",
)
model.load_weights(weights_path, by_name=True)
if K.backend() == "theano":
layer_utils.convert_all_kernels_in_model(model)
if include_top:
maxpool = model.get_layer(name="avg_pool")
shape = maxpool.output_shape[1:]
dense = model.get_layer(name="fc1000")
layer_utils.convert_dense_weights_data_format(
dense, shape, "channels_first"
)
if K.image_data_format() == "channels_first" and K.backend() == "tensorflow":
warnings.warn(
"You are using the TensorFlow backend, yet you "
"are using the Theano "
"image data format convention "
'(`image_data_format="channels_first"`). '
"For best performance, set "
'`image_data_format="channels_last"` in '
"your Keras config "
"at ~/.keras/keras.json."
)
return model
if __name__ == "__main__":
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import os
os.makedirs("outputs", exist_ok=True)
model = ResNet152(include_top=False, input_shape=(200, 200, 3), pooling="avg", weights="imagenet")
model.save_weights("outputs/model.pkl")
"""
class MockWorkspaceCreationTests:
"""Workspace Creation Test Suite"""
@pytest.fixture(scope="class")
def context_type(self):
"""
Abstract Workspace Type Fixture - Update with Workspace Context to test
"""
raise NotImplementedError
@pytest.fixture(scope="class")
def files_for_testing(self):
"""
:return:
"""
raise NotImplementedError
@pytest.fixture(scope="class")
def realtime_score_context(
self, context_type: RealtimeScoreAKSContext, files_for_testing
) -> RealtimeScoreAKSContext:
"""
Get or Create Context for Testing
:param context_type: impl of WorkspaceContext
:param test_files: Dict of input Files
:return:
"""
raise NotImplementedError
def test_mock_get_or_create(
self,
realtime_score_context: RealtimeScoreAKSContext,
context_type: WorkspaceContext,
):
"""
Assert Context Type and Creation
:param realtime_score_context: Testing Context
:param context_type: Expected Context Type
"""
assert realtime_score_context
assert hasattr(realtime_score_context, "_subscription_id")
assert hasattr(realtime_score_context, "_resource_group")
assert hasattr(realtime_score_context, "_workspace_name")
assert hasattr(realtime_score_context, "project_configuration_file")
assert hasattr(realtime_score_context, "score_py")
assert hasattr(realtime_score_context, "train_py")
def test_mock_get_or_create_model(
self, monkeypatch, realtime_score_context: MLRealtimeScore
):
"""
:param realtime_score_context: Testing Context
"""
@staticmethod
def mockreturn_2(
workspace, name, id, tags, properties, version, model_framework, run_id
):
return {
"name": "mock",
"id": "1",
"createdTime": "11/8/2020",
"description": "",
"mimeType": "a",
"properties": "",
"unpack": "",
"url": "localhost",
"version": 1,
"experimentName": "expName",
"runId": 1,
"datasets": None,
"createdBy": "mock",
"framework": "python",
"frameworkVersion": "1",
}
def mock_get_model_path_remote(model_name, version, workspace):
return "."
def mock_initialize(self, workspace, obj_dict):
pass
monkeypatch.setattr(Model, "_get", mockreturn_2)
monkeypatch.setattr(Model, "_get_model_path_remote", mock_get_model_path_remote)
monkeypatch.setattr(Model, "_initialize", mock_initialize)
realtime_score_context.prepare_data(".")
assert realtime_score_context.get_or_create_model()
assert os.path.isfile("model.pkl")
def test_mock_get_compute_targets(
self, realtime_score_context: RealtimeScoreAKSContext
):
"""
:param realtime_score_context: Testing Context
"""
assert realtime_score_context.compute_targets
def test_mock_get_webservices(
self, realtime_score_context: RealtimeScoreAKSContext
):
"""
:param realtime_score_context: Testing Context
"""
assert realtime_score_context.webservices
@property
def models(self):
"""Return a dictionary where the key is model name, and value is a :class:`azureml.core.model.Model` object.
Raises a :class:`azureml.exceptions.WebserviceException` if there was a problem interacting with
model management service.
:return: A dictionary of models.
:rtype: dict[str, azureml.core.Model]
:raises: azureml.exceptions.WebserviceException
"""
return {}
class TestMockDeployRTS(MockWorkspaceCreationTests):
@pytest.fixture(scope="class")
def context_type(self):
"""
:return:
"""
return MLRealtimeScore
@pytest.fixture(scope="class")
def files_for_testing(self):
return {"train_py": "create_model.py", "score_py": "driver.py"}
@pytest.fixture
def realtime_score_context(
self, monkeypatch, context_type: MLRealtimeScore, files_for_testing
) -> MLRealtimeScore:
"""
Get or Create Context for Testing
:param files_for_testing:
:param context_type: impl of WorkspaceContext
:return:
"""
def mockreturn(train_py, score_py):
project_configuration = ProjectConfiguration(project_configuration_file)
assert project_configuration.has_value("subscription_id")
assert project_configuration.has_value("resource_group")
assert project_configuration.has_value("workspace_name")
ws = MockMLRealtimeScore(
subscription_id=project_configuration.get_value("subscription_id"),
resource_group=project_configuration.get_value("resource_group"),
workspace_name=project_configuration.get_value("workspace_name"),
configuration_file=project_configuration_file,
score_py=score_py,
train_py=train_py,
)
return ws
monkeypatch.setattr(context_type, "get_or_create_workspace", mockreturn)
return context_type.get_or_create_workspace(
train_py=files_for_testing["train_py"],
score_py=files_for_testing["score_py"],
)
class TestMockDeployDeepRTS(MockWorkspaceCreationTests):
@pytest.fixture(scope="class")
def context_type(self):
"""
:return:
"""
return DeepRealtimeScore
@pytest.fixture(scope="class")
def files_for_testing(self):
return {"train_py": "create_deep_model.py", "score_py": "deep_driver.py"}
@pytest.fixture
def realtime_score_context(
self, monkeypatch, context_type: MLRealtimeScore, files_for_testing
) -> DeepRealtimeScore:
"""
Get or Create Context for Testing
:param files_for_testing:
:param context_type: impl of WorkspaceContext
:return:
"""
def mockreturn(train_py, score_py):
project_configuration = ProjectConfiguration(project_configuration_file)
assert project_configuration.has_value("subscription_id")
assert project_configuration.has_value("resource_group")
assert project_configuration.has_value("workspace_name")
ws = MockDeepRealtimeScore(
project_configuration.get_value("subscription_id"),
project_configuration.get_value("resource_group"),
project_configuration.get_value("workspace_name"),
project_configuration_file,
score_py=score_py,
train_py=train_py,
)
return ws
monkeypatch.setattr(context_type, "get_or_create_workspace", mockreturn)
return context_type.get_or_create_workspace(
train_py=files_for_testing["train_py"],
score_py=files_for_testing["score_py"],
)
def test_mock_get_or_create_model(
self, monkeypatch, realtime_score_context: DeepRealtimeScore
):
"""
:param realtime_score_context: Testing Context
"""
if not os.path.isfile("script/train.py"):
os.makedirs("script", exist_ok=True)
create_model_py = DEEP_TRAIN_PY
with open("script/train.py", "w") as file:
file.write(create_model_py)
assert os.path.isfile("script/train.py")
@staticmethod
def mockreturn_2(
workspace, name, id, tags, properties, version, model_framework, run_id
):
return {
"name": "mock",
"id": "1",
"createdTime": "11/8/2020",
"description": "",
"mimeType": "a",
"properties": "",
"unpack": "",
"url": "localhost",
"version": 1,
"experimentName": "expName",
"runId": 1,
"datasets": None,
"createdBy": "mock",
"framework": "python",
"frameworkVersion": "1",
}
def mock_get_model_path_remote(model_name, version, workspace):
return "."
def mock_initialize(self, workspace, obj_dict):
pass
monkeypatch.setattr(Model, "_get", mockreturn_2)
monkeypatch.setattr(Model, "_get_model_path_remote", mock_get_model_path_remote)
monkeypatch.setattr(Model, "_initialize", mock_initialize)
assert realtime_score_context.get_or_create_model()
assert os.path.isfile("outputs/model.pkl")
|
#---------Archana Bahuguna 6th Jan 14 -------------------------
# To check how a functions arguments are passed when it is
# passed as an arg to another fn
#--------- Incomplete ------
def fn2call(n):
print 'From inside fn2call'
print n
n +=1
return None
def fn1(f):
print 'From inside fn1'
return None`
|
# Robosample tools
import numpy as np
def func0(series):
result = np.zeros((series.size))
for i in range(series.size):
result[i] = (np.mean(series[:i]) / np.mean(series[i:]))
return result
#
def func1(series):
result = np.zeros((series.size))
for i in range(series.size):
result[i] = (np.mean(series[:i]) / np.mean(series))
return result
#
def func2(series):
result = np.zeros((series.size))
for i in range(series.size):
result[i] = np.log(np.sum(np.exp(series[:i])))
return result
#
|
import os
import sys
import gc
import numpy as np
import pandas as pd
from datetime import datetime
pd.options.display.max_columns = 1000
pd.options.display.max_rows = 1000
pd.options.mode.use_inf_as_na = True
pd.options.display.float_format = '{:.3f}'.format
float_formatter = lambda x: "%.4f" % x
np.set_printoptions(formatter={'float_kind':float_formatter})
from .config import cfg
def get_logger(name):
from logging import config, getLogger, FileHandler, Formatter
config.dictConfig(cfg['logger_config'])
logger = getLogger()
t = datetime.now().strftime('%m%d-%H%M')
fh = FileHandler('mltools_log_' + name + '_' + t + '.txt', mode='w')
formatter = Formatter('%(asctime)s: %(message)s', datefmt='%H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def load_hparams(file_in):
import yaml
with open(file_in) as f:
hparams = f.read()
return yaml.load(hparams, Loader=yaml.Loader)
def cpu_count():
from multiprocessing import cpu_count
return cpu_count()
def map_fn(func, args, n_processes=None, backend=None, **kwargs):
'''Parallel map function. Allows selection of the backend multiprocessing
engine to accomodate platform and resources available.
Args:
func: callable to execute
args: iterable with args to pass to func
n_processes: number of jobs (threads or processes, depending on the value
of backend). If None, uses cfg.n_cores_default from .config.py
backend: string
'mp': multiprocessing.Pool
'tpe': ThreadPoolExecutor
'ppe': ProcessPoolExecutor
'joblib-threads': joblib, prefer='threads'
'joblib'-processes: joblib, prefer='processes' (loky backend)
'dask': dask.delayed
'dask-dist': dask.distributed.Client
Returns: list of outputs
'''
def mp_map(func, args, n_workers=None):
from multiprocessing import Pool
with Pool(n_workers) as p:
output = p.map(func, args)
return output
def tpe_map(func, args, n_processes=None):
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=n_processes) as e:
output = list(e.map(func, args))
return output
def ppe_map(func, args, n_processes=None):
from concurrent.futures import ProcessPoolExecutor
with ProcessPoolExecutor(max_workers=n_processes) as e:
output = e.map(func, args)
return output
def joblib_map(func, args, n_jobs=None, prefer='threads', verbose=10):
from joblib import Parallel, delayed
output = Parallel(n_jobs=n_jobs, prefer=prefer, verbose=verbose)(
(delayed(func)(arg) for arg in args))
return output
def dask_map(func, args, **kwargs):
import dask
job_list = []
for job in args:
dl = dask.delayed(func)(job)
job_list.append(dl)
output = list(dask.compute(*job_list))
return output
def dask_dist_map(func, args, threads_per_worker=1, n_workers=None, **kwargs):
from dask.distributed import Client, progress
client = Client(threads_per_worker=threads_per_worker,
n_workers=n_workers,
**kwargs)
print('scheduler at', client.scheduler_info()['address'], end='')
console_ip = client.scheduler_info()['address'].split(':')[1]
print(', console ', console_ip + ':8787', sep='')
futures = client.map(func, args)
output = client.gather(futures)
client.close()
return output
if n_processes is None: n_processes = cfg['n_cores_default']
if backend is None: backend = cfg['default_map_fn_backend']
if backend=='mp':
return mp_map(func, args, n_workers=n_processes, **kwargs)
elif backend=='tpe':
return tpe_map(func, args, n_processes=n_processes, **kwargs)
elif backend=='ppe':
return ppe_map(func, args, n_processes=n_processes, **kwargs)
elif backend=='joblib-threads':
return joblib_map(func, args, n_jobs=n_processes, prefer='threads', **kwargs)
elif backend=='joblib-processes':
return joblib_map(func, args, n_jobs=n_processes, prefer='processes', **kwargs)
elif backend=='dask':
return dask_map(func, args)
elif backend=='dask-dist':
return dask_dist_map(func, args, n_workers=n_processes, **kwargs)
|
from .duet_core import *
from .duet_envs import *
import pandas as pd
import numpy as np
def load(filename):
source = DataSource(filename)
senv = SensEnv({source: 1})
return DuetWrapper(np.load(filename), senv, LInf())
def zeros(x):
if isinstance(x, DuetWrapper):
y = unwrap(x)
r = np.zeros(y)
return DuetWrapper(r, x.senv, LInf())
else:
return np.zeros(x)
# def zeros(x):
# y = unwrap(x)
# r = np.zeros(y)
# return DuetWrapper(r, x.senv.truncate(0), LInf())
def exp(x):
if isinstance(x,DuetWrapper):
y = unwrap(x)
r = np.exp(y)
if hasattr(x, 'senv'):
return DuetWrapper(r, x.senv, LInf())
else:
return DuetWrapper(r, SensEnv({}), LInf())
else:
return np.exp(x)
# def exp(x):
# if isinstance(x,DuetWrapper):
# y = unwrap(x)
# r = np.exp(y)
# if hasattr(x, 'senv'):
# return DuetWrapper(r, x.senv.exp(), LInf())
# else:
# return DuetWrapper(r, SensEnv({}), LInf())
# else:
# return np.exp(x)
def abs(x):
y = unwrap(x)
r = np.abs(y)
return DuetWrapper(r, get_senv(x), LInf())
def sign(x):
y = unwrap(x)
r = np.sign(y)
return DuetWrapper(r, get_senv(x), LInf())
# def sign(x):
# y = unwrap(x)
# r = np.sign(y)
# return DuetWrapper(r, get_senv(x).truncate(2), LInf())
def sqrt(x):
y = unwrap(x)
r = np.sqrt(x)
return DuetWrapper(r, get_senv(x), LInf())
# def sqrt(x):
# y = unwrap(x)
# r = np.sqrt(x)
# return DuetWrapper(r, get_senv(x).sqrt(), LInf())
def dot(x,y):
a = unwrap(x)
b = unwrap(y)
return DuetWrapper(np.dot(a,b), get_senv(x)+get_senv(y), LInf())
# def dot2(x,y):
# a = unwrap(x)
# b = unwrap(y)
# return DuetWrapper(np.dot(a,b), length(x)*(get_senv(x)*get_senv(y)), LInf())
def subtract(x,y):
if isinstance(x, DuetWrapper) or isinstance(y, DuetWrapper):
a = unwrap(x)
b = unwrap(y)
return DuetWrapper(np.subtract(a,b), get_senv(x)+get_senv(y), LInf())
else:
return np.subtract(x, y)
def sum(x,*args,**kwargs):
if isinstance(x,DuetWrapper):
if isinstance(x.mode, L2):
y = unwrap(x)
r = np.sum(y,*args,**kwargs)
return DuetWrapper(r, x.senv.truncate(x.mode.bound), x.mode)
else:
y = unwrap(x)
r = np.sum(y,*args,**kwargs)
return DuetWrapper(r, SensEnv(), x.mode)
else:
return np.sum(x,*args,**kwargs)
def sum2(x,*args,**kwargs):
if isinstance(x,DuetWrapper):
if isinstance(x.mode, L2):
y = unwrap(x)
r = np.sum(y,*args,**kwargs)
return DuetWrapper(r, x.senv.truncate(x.mode.bound), x.mode)
else:
y = unwrap(x)
r = np.sum(y,*args,**kwargs)
return DuetWrapper(r, SensEnv(), x.mode)
else:
return np.sum(x,*args,**kwargs)
# sum = np.sum
linalg = np.linalg
random = np.random
old_norm = np.linalg.norm
where = np.where
# abs = np.abs
def where(a,b,c):
return DuetWrapper(np.where(a.val,b.val,c.val),a.senv+b.senv+c.senv,LInf())
def where2(a):
return DuetWrapper(np.where(a.val),a.senv,LInf())
def where3(a):
return np.where(a)
def norm(x, ord=None, axis=None):
xp = unwrap(x)
new_senv = get_senv(x).scale(float('inf'))
return DuetWrapper(old_norm(xp, ord, axis), new_senv, LInf())
linalg.norm = norm
def array_split(arr, n):
arrp = unwrap(arr)
split_arr = np.array_split(arrp, n)
sv = get_senv(arr)
wrapped_arr = np.array([DuetWrapper(x, sv, LInf()) for x in split_arr])
return wrapped_arr
|
# coding: utf-8
# # A simple tutorial to Stellar LAbel Machine (SLAM)
#
# **Bo Zhang** (<mailto:bozhang@nao.cas.cn>), Created on Thu Jan 19 15:48:12 2017
#
#
#
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import os
from slam.slam import Slam
from slam.diagnostic import compare_labels
from slam.apogee import aspcapStar_read, apStar_read
from slam.normalization import normalize_spectrum, normalize_spectra_block
from slam.binning import interp_pchip, interp_linear, interp_nearest
from astropy.table import Table, Column
from joblib import Parallel, delayed, dump, load
os.chdir("/home/cham/PycharmProjects/slam/doc/example_DR10/Data")
# In[3]:
""" load catalog """
t = Table.read('../reference_labels.csv')
label3 = np.array(t['Teff_{corr}', 'logg_{corr}', '[M/H]_{corr}'].to_pandas())
# In[4]:
""" define functions """
def apStar_read_interp(fp, wave_interp, i_use=0):
spec = apStar_read(fp, full=True, meta=True, verbose=False)
wave_rest = spec['wave']#/(1+rv/299792.458)
if spec.meta['NVISITS'] > 1:
flux_interp = interp_linear(wave_rest, spec['flux'][:, i_use], wave_interp, 0)
ivar_interp = (1./interp_linear(wave_rest, spec['flux_err'][:, i_use], wave_interp, 1E10))**2
mask_interp = interp_nearest(wave_rest, spec['mask'][:, i_use], wave_interp, 1)
else:
flux_interp = interp_linear(wave_rest, spec['flux'], wave_interp, 0)
ivar_interp = (1./interp_linear(wave_rest, spec['flux_err'], wave_interp, 1E10))**2
mask_interp = interp_nearest(wave_rest, spec['mask'], wave_interp, 1)
return flux_interp, ivar_interp, mask_interp
def apStar_read_block(fps, wave_interp, n_jobs=1, verbose=False):
r = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(apStar_read_interp)(fp,wave_interp,0) for fp in fps)
flux_block = np.array([_[0] for _ in r])
ivar_block = np.array([_[1] for _ in r])
mask_block = np.array([_[2] for _ in r])
return flux_block, ivar_block, mask_block
def aspcapStar_read_interp(fp, wave_interp, i_use=0):
spec = aspcapStar_read(fp, meta=True)
return spec['flux'], 1./spec['flux_err'].data**2.
def aspcapStar_read_block(fps, n_jobs=1, verbose=False):
r = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(aspcapStar_read_interp)(fp,wave_interp,0) for fp in fps)
flux_block = np.array([_[0] for _ in r])
ivar_block = np.array([_[1] for _ in r])
ivar_block = np.where(np.isfinite(ivar_block), ivar_block, np.zeros_like(ivar_block))
return flux_block, ivar_block
# In[5]:
""" define wavelength grid according to the first spectrum """
spec = aspcapStar_read(t['ID'][0], True, True)
wave_interp = spec['wave'].data
""" read all spectra """
flux_block, ivar_block = aspcapStar_read_block(t['ID'].data, n_jobs=-1)
""" normalization """
r1 = normalize_spectra_block(
wave_interp, flux_block, (15200., 17000.), 30., p=(1E-8, 1E-7), q=0.7,
eps=1E-19, ivar_block=flux_block > 0, rsv_frac=1., n_jobs=10, verbose=5)
flux_norm, flux_cont = r1
ivar_norm = flux_cont**2*ivar_block
# In[6]:
""" training """
k = Slam(wave_interp, flux_norm, ivar_norm, label3)
# train: using simple user-defined hyper-parameters
# it takes ~2 min using 32 cores
k.train_pixels(method='simple', n_jobs=24, verbose=5, C=2.0, epsilon=0.1, gamma=1.)
# In[7]:
""" test """
# here simply to test SLAM on training sample
slc = slice(0, flux_norm.shape[0], 20)
# 1. derive stellar parameters using template matching
# it takes a few minutes
label_init = k.predict_labels_quick(flux_norm[slc, :], ivar_norm[slc, :], n_jobs=24)
# 2. re-fine the initial guess
# it takes an hour ...
label_refined = k.predict_labels_multi(label_init, flux_norm[slc, :], ivar_norm[slc, :], n_jobs=24)
# In[10]:
""" plot results """
fig = compare_labels(label3[slc, :], label_refined, 'ASPCAP', 'SLAM')
# In[11]:
fig
# In[ ]:
|
GCE_PARAMS = ('service_account_info', '/path/to/credentialfile')
GCE_KEYWORD_PARAMS = {'project' : 'project_name', 'datacenter' : 'asia-northease1-c'}
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestClient(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.pubsub.client import Client
return Client
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_list_topics_no_paging(self):
from gcloud.pubsub.topic import Topic
PROJECT = 'PROJECT'
CREDS = _Credentials()
CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
RETURNED = {'topics': [{'name': TOPIC_PATH}]}
# Replace the connection on the client with one of our own.
CLIENT_OBJ.connection = _Connection(RETURNED)
# Execute request.
topics, next_page_token = CLIENT_OBJ.list_topics()
# Test values are correct.
self.assertEqual(len(topics), 1)
self.assertTrue(isinstance(topics[0], Topic))
self.assertEqual(topics[0].name, TOPIC_NAME)
self.assertEqual(next_page_token, None)
self.assertEqual(len(CLIENT_OBJ.connection._requested), 1)
req = CLIENT_OBJ.connection._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/projects/%s/topics' % PROJECT)
self.assertEqual(req['query_params'], {})
def test_list_topics_with_paging(self):
from gcloud.pubsub.topic import Topic
PROJECT = 'PROJECT'
CREDS = _Credentials()
CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
RETURNED = {'topics': [{'name': TOPIC_PATH}],
'nextPageToken': TOKEN2}
# Replace the connection on the client with one of our own.
CLIENT_OBJ.connection = _Connection(RETURNED)
# Execute request.
topics, next_page_token = CLIENT_OBJ.list_topics(SIZE, TOKEN1)
# Test values are correct.
self.assertEqual(len(topics), 1)
self.assertTrue(isinstance(topics[0], Topic))
self.assertEqual(topics[0].name, TOPIC_NAME)
self.assertEqual(next_page_token, TOKEN2)
self.assertEqual(len(CLIENT_OBJ.connection._requested), 1)
req = CLIENT_OBJ.connection._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/projects/%s/topics' % PROJECT)
self.assertEqual(req['query_params'],
{'pageSize': SIZE, 'pageToken': TOKEN1})
def test_list_subscriptions_no_paging(self):
from gcloud.pubsub.subscription import Subscription
PROJECT = 'PROJECT'
CREDS = _Credentials()
CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS)
SUB_NAME = 'subscription_name'
SUB_PATH = 'projects/%s/subscriptions/%s' % (PROJECT, SUB_NAME)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
SUB_INFO = [{'name': SUB_PATH, 'topic': TOPIC_PATH}]
RETURNED = {'subscriptions': SUB_INFO}
# Replace the connection on the client with one of our own.
CLIENT_OBJ.connection = _Connection(RETURNED)
# Execute request.
subscriptions, next_page_token = CLIENT_OBJ.list_subscriptions()
# Test values are correct.
self.assertEqual(len(subscriptions), 1)
self.assertTrue(isinstance(subscriptions[0], Subscription))
self.assertEqual(subscriptions[0].name, SUB_NAME)
self.assertEqual(subscriptions[0].topic.name, TOPIC_NAME)
self.assertEqual(next_page_token, None)
self.assertEqual(len(CLIENT_OBJ.connection._requested), 1)
req = CLIENT_OBJ.connection._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/projects/%s/subscriptions' % PROJECT)
self.assertEqual(req['query_params'], {})
def test_list_subscriptions_with_paging(self):
from gcloud.pubsub.subscription import Subscription
PROJECT = 'PROJECT'
CREDS = _Credentials()
CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS)
SUB_NAME = 'subscription_name'
SUB_PATH = 'projects/%s/subscriptions/%s' % (PROJECT, SUB_NAME)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
ACK_DEADLINE = 42
PUSH_ENDPOINT = 'https://push.example.com/endpoint'
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
SUB_INFO = [{'name': SUB_PATH,
'topic': TOPIC_PATH,
'ackDeadlineSeconds': ACK_DEADLINE,
'pushConfig': {'pushEndpoint': PUSH_ENDPOINT}}]
RETURNED = {'subscriptions': SUB_INFO, 'nextPageToken': TOKEN2}
# Replace the connection on the client with one of our own.
CLIENT_OBJ.connection = _Connection(RETURNED)
# Execute request.
subscriptions, next_page_token = CLIENT_OBJ.list_subscriptions(
SIZE, TOKEN1)
# Test values are correct.
self.assertEqual(len(subscriptions), 1)
self.assertTrue(isinstance(subscriptions[0], Subscription))
self.assertEqual(subscriptions[0].name, SUB_NAME)
self.assertEqual(subscriptions[0].topic.name, TOPIC_NAME)
self.assertEqual(subscriptions[0].ack_deadline, ACK_DEADLINE)
self.assertEqual(subscriptions[0].push_endpoint, PUSH_ENDPOINT)
self.assertEqual(next_page_token, TOKEN2)
self.assertEqual(len(CLIENT_OBJ.connection._requested), 1)
req = CLIENT_OBJ.connection._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/projects/%s/subscriptions' % PROJECT)
self.assertEqual(req['query_params'],
{'pageSize': SIZE, 'pageToken': TOKEN1})
def test_list_subscriptions_with_topic_name(self):
from gcloud.pubsub.subscription import Subscription
PROJECT = 'PROJECT'
CREDS = _Credentials()
CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS)
SUB_NAME_1 = 'subscription_1'
SUB_PATH_1 = 'projects/%s/subscriptions/%s' % (PROJECT, SUB_NAME_1)
SUB_NAME_2 = 'subscription_2'
SUB_PATH_2 = 'projects/%s/subscriptions/%s' % (PROJECT, SUB_NAME_2)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
SUB_INFO = [{'name': SUB_PATH_1, 'topic': TOPIC_PATH},
{'name': SUB_PATH_2, 'topic': TOPIC_PATH}]
TOKEN = 'TOKEN'
RETURNED = {'subscriptions': SUB_INFO, 'nextPageToken': TOKEN}
# Replace the connection on the client with one of our own.
CLIENT_OBJ.connection = _Connection(RETURNED)
# Execute request.
subscriptions, next_page_token = CLIENT_OBJ.list_subscriptions(
topic_name=TOPIC_NAME)
# Test values are correct.
self.assertEqual(len(subscriptions), 2)
self.assertTrue(isinstance(subscriptions[0], Subscription))
self.assertEqual(subscriptions[0].name, SUB_NAME_1)
self.assertEqual(subscriptions[0].topic.name, TOPIC_NAME)
self.assertTrue(isinstance(subscriptions[1], Subscription))
self.assertEqual(subscriptions[1].name, SUB_NAME_2)
self.assertEqual(subscriptions[1].topic.name, TOPIC_NAME)
self.assertTrue(subscriptions[1].topic is subscriptions[0].topic)
self.assertEqual(next_page_token, TOKEN)
self.assertEqual(len(CLIENT_OBJ.connection._requested), 1)
req = CLIENT_OBJ.connection._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'],
'/projects/%s/topics/%s/subscriptions'
% (PROJECT, TOPIC_NAME))
self.assertEqual(req['query_params'], {})
def test_topic(self):
PROJECT = 'PROJECT'
TOPIC_NAME = 'TOPIC_NAME'
CREDS = _Credentials()
client_obj = self._makeOne(project=PROJECT, credentials=CREDS)
new_topic = client_obj.topic(TOPIC_NAME)
self.assertEqual(new_topic.name, TOPIC_NAME)
self.assertTrue(new_topic._client is client_obj)
self.assertEqual(new_topic.project, PROJECT)
self.assertEqual(new_topic.full_name,
'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME))
self.assertFalse(new_topic.timestamp_messages)
class _Credentials(object):
_scopes = None
@staticmethod
def create_scoped_required():
return True
def create_scoped(self, scope):
self._scopes = scope
return self
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
|
"""
Binary CW example: Semicoherent MCMC search
==========================================================
MCMC search of a CW signal produced by a source in a binary
system using the semicoherent F-statistic.
"""
import os
import numpy as np
import pyfstat
# If False, sky priors are used
directed_search = True
# If False, ecc and argp priors are used
known_eccentricity = True
label = "PyFstatExampleSemiCoherentBinarySearchUsingMCMC"
outdir = os.path.join("PyFstat_example_data", label)
logger = pyfstat.set_up_logger(label=label, outdir=outdir)
# Properties of the GW data
data_parameters = {
"sqrtSX": 1e-23,
"tstart": 1000000000,
"duration": 10 * 86400,
"detectors": "H1",
}
tend = data_parameters["tstart"] + data_parameters["duration"]
mid_time = 0.5 * (data_parameters["tstart"] + tend)
# Properties of the signal
depth = 0.1
signal_parameters = {
"F0": 30.0,
"F1": 0,
"F2": 0,
"Alpha": 0.15,
"Delta": 0.45,
"tp": mid_time,
"argp": 0.3,
"asini": 10.0,
"ecc": 0.1,
"period": 45 * 24 * 3600.0,
"tref": mid_time,
"h0": data_parameters["sqrtSX"] / depth,
"cosi": 1.0,
}
data = pyfstat.BinaryModulatedWriter(
label=label, outdir=outdir, **data_parameters, **signal_parameters
)
data.make_data()
theta_prior = {
"F0": signal_parameters["F0"],
"F1": signal_parameters["F1"],
"F2": signal_parameters["F2"],
"asini": {
"type": "unif",
"lower": 0.9 * signal_parameters["asini"],
"upper": 1.1 * signal_parameters["asini"],
},
"period": {
"type": "unif",
"lower": 0.9 * signal_parameters["period"],
"upper": 1.1 * signal_parameters["period"],
},
"tp": {
"type": "unif",
"lower": mid_time - signal_parameters["period"] / 2.0,
"upper": mid_time + signal_parameters["period"] / 2.0,
},
}
if directed_search:
for key in "Alpha", "Delta":
theta_prior[key] = signal_parameters[key]
else:
theta_prior.update(
{
"Alpha": {
"type": "unif",
"lower": signal_parameters["Alpha"] - 0.01,
"upper": signal_parameters["Alpha"] + 0.01,
},
"Delta": {
"type": "unif",
"lower": signal_parameters["Delta"] - 0.01,
"upper": signal_parameters["Delta"] + 0.01,
},
}
)
if known_eccentricity:
for key in "ecc", "argp":
theta_prior[key] = signal_parameters[key]
else:
theta_prior.update(
{
"ecc": {
"type": "unif",
"lower": signal_parameters["ecc"] - 5e-2,
"upper": signal_parameters["ecc"] + 5e-2,
},
"argp": {
"type": "unif",
"lower": signal_parameters["argp"] - np.pi / 2,
"upper": signal_parameters["argp"] + np.pi / 2,
},
}
)
ntemps = 3
log10beta_min = -1
nwalkers = 150
nsteps = [100, 200]
mcmc = pyfstat.MCMCSemiCoherentSearch(
label=label,
outdir=outdir,
nsegs=10,
sftfilepattern=data.sftfilepath,
theta_prior=theta_prior,
tref=signal_parameters["tref"],
minStartTime=data_parameters["tstart"],
maxStartTime=tend,
nsteps=nsteps,
nwalkers=nwalkers,
ntemps=ntemps,
log10beta_min=log10beta_min,
binary=True,
)
mcmc.run(
plot_walkers=True,
walker_plot_args={"plot_det_stat": True, "injection_parameters": signal_parameters},
)
mcmc.plot_corner(add_prior=True, truths=signal_parameters)
mcmc.plot_prior_posterior(injection_parameters=signal_parameters)
mcmc.print_summary()
|
# -*- coding: utf-8 -*-
import time
import settings
from bson import ObjectId
from log import logger
from mongokit import Connection, IS
from ext import DynamicType, BaseModel
import datetime
from utils import get_year_month_day_str_ch
from utils import get_hour_minute_str
from utils import get_hour_minute_second_str
from utils import get_year_week_day_str
from utils import get_year_month_day_str
from utils import get_year_week_str
from utils import get_year_week_day_str_ch
from utils import get_year_week_day_str_ch_from_format_str
connection = Connection()
@connection.register
class FeedBack(BaseModel):
__collection__ = "FeedBack_Col"
structure = {
"message": basestring,
"username": basestring,
"email": basestring,
"status": int, # 0: New 1: OK 2: Do not fix
"createtime": datetime.datetime,
"fixtime": datetime.datetime,
"rfs": dict #reserve fields
}
def create(self, message, username, email):
fb = self()
fb['createtime'] = datetime.datetime.now()
fb['message'] = message
fb['username'] = username
fb['status'] = 0
fb['email'] = email
fb['fixtime'] = datetime.datetime.now()
fb.save()
def update_status(self, id, status):
fb = self.find_one({"_id": id})
fb.update({"status": int(status)})
if status == 1:
fb['fixtime'] = datetime.datetime.now()
fb.save()
def get_all(self):
return connection.FeedBack.find({})
@connection.register
class Bulletin(BaseModel):
__collection__ = "Bulletin_Col"
structure = {
"name": basestring,
"time": datetime.datetime,
"type": int, # 0:URL 1:Text
"teamname": basestring,
"text": basestring,
"teamID":ObjectId,
"username": basestring,
"useremail": basestring,
"rfs": dict #reserve fields
}
@connection.register
class User(BaseModel):
__collection__ = "User_Col"
structure = {
"name": basestring,
"email": basestring,
"addtime": datetime.datetime,
"password": basestring,
"role": int, # reserve fields
"tel_office": basestring,
"tel_personal":basestring,
"job_number": basestring,
"address":basestring,
"age":int,
"sex":int,
"rfs": dict #reserve fields
}
default_values = {
"role": 0,
"rfs": {}
}
def regist(self, email, username, password):
flag = self.find_by_email(email)
if flag:
user = self() # WTF
user["addtime"] = datetime.datetime.now()
user['email'] = email
user['name'] = username
user['password'] = password
user.save()
return True, ""
else:
return False, u"此邮箱已注册"
def find_by_email(self, email):
user = self.find_one({'email': email})
if user:
return False
else:
return True
def get_user_by_email(self, email):
user = self.find_one({'email': email})
return user
def login_action(self, email, password):
user = self.find_one({'email': email, "password":password})
if user:
return True, ""
else:
return False, u"邮箱或者密码错误"
def add_user_by_teamadd_action(self, email, username):
user = self.get_user_by_email(email)
if user:
return user['_id']
else:
user = self() # WTF
user["addtime"] = datetime.datetime.now()
user["password"] = ""
user["administrator"] = False
user['email'] = email
user['username'] = username
user['administrator'] = False
user.save()
return user['_id']
@connection.register
class Team(BaseModel):
__collection__ = "Team_Col"
structure = {
"name": basestring,
"create_time": datetime.datetime,
"status": int, # 0:nomal 1:archive 2:deleted
"ownerID": ObjectId,
"ownername": basestring,
"owneremail": basestring,
"day_or_week": int, # 0: day 1:week
# day_detail
"day_detail_create_time": basestring, # 12:15
"day_detail_remind_interval": int, # Minute
"day_detail_summary_time": basestring,
"day_detail_create_flag": basestring, # flag create which day ; value: "2015-18-3"
"day_detail_summary_flag": basestring, # flag summary which day ; value: "2015-18-3"
"day_detail_notify_flag": int,
# week datail
"week_detail_create_time": basestring, # 3-12:15
"week_detail_remind_interval": int, # Minute
"week_detail_summary_time": basestring, # 5-20:00
"week_detail_create_flag": basestring, # flag create which week ; value: "2015-18"
"week_detail_summary_flag": basestring, # flag summary which week ; value: "2015-18"
"week_detail_notify_flag": int,
# task_detail
"member_task_summary_time": basestring,
"team_task_summary_time": basestring,
"member_task_summary_flag": basestring,
"team_task_summary_flag": basestring,
# member
# [{?},{?}]
# |
# v
# {
# "name": "jack",
# "email":"zhangzhihe@beyondsoft.com",
# "role": int,
# "tel_personal":basestring,
# "job_number": basestring,
# "address":basestring
# }
# role: 0: 码农 1: 关注者 2: 所有者 3:码农&&所有者 4:关注者&&所有者
"members": list, #[ObjectId], # user list
"rfs": dict #reserve fields
}
required_fields = ["name"]
default_values = {
"create_time": datetime.datetime.now(),
"status": 0,
# day_detail
"day_detail_create_time": "09:00", # 12:15
# "day_detail_remind_time": "16:00", # 12:30
"day_detail_remind_interval": 15, # Minute
"day_detail_summary_time": "20:00",
"day_detail_create_flag": "1970-1-1", # flag create which day ; value: "2015-18-3"
"day_detail_summary_flag": "1970-1-1", # flag summary which day ; value: "2015-18-3"
"day_detail_notify_flag": 0,
# week datail
"week_detail_create_time": "5-09:00",
# "week_detail_remind_time": "5-16:00",
"week_detail_remind_interval": 15,
"week_detail_summary_time": "5-20:00",
"week_detail_create_flag": "1970-1", # flag create which week ; value: "2015-18"
"week_detail_summary_flag": "1970-1", # flag summary which week ; value: "2015-18"
"week_detail_notify_flag": 0,
# task_detail
"member_task_summary_time": "20:00",
"team_task_summary_time": "20:15",
"day_or_week": 0
}
def createTeam(self, date):
_team = self.find_team_by_name(date['name'])
if _team:
return False, u"Team名称'" + date['name'] + u"'已经存在"
else:
team = self() # WTF
team["create_time"] = datetime.datetime.now()
team["day_detail_create_flag"] = "1970-1-1"
team["day_detail_summary_flag"] = "1970-1-1"
team["day_detail_notify_flag"] = 0
team["week_detail_create_flag"] = "1970-1"
team["week_detail_summary_flag"] = "1970-1"
team["week_detail_notify_flag"] = 0
for d in date:
team[d] = date[d]
team.save()
return True, ""
def find_team_by_name(self, name):
team = self.find_one({"name":name})
return team
def get_summary_week(self, year_week=None):
year_week = year_week or get_year_week_str()
teamID = self['_id']
weeks = connection.WeekDetail.find({"teamID":teamID, "year_week":year_week})
weeks_detail = []
for w in weeks:
_week = {}
username = w['username']
_week['username'] = username
_week['status'] = w['status']
_week['submit_list'] = w['submit_list']
_week['rfs'] = w['rfs']
weeks_detail.append(_week)
return weeks_detail
def get_summary_day(self, year_week_day=None):
year_week_day = year_week_day or get_year_week_day_str()
teamID = self['_id']
days = connection.DayDetail.find({"teamID":teamID, "year_week_day":year_week_day})
days_detail = []
for d in days:
_day = {}
username = d['username']
_day['username'] = username
_day['status'] = d['status']
_day['submit_list'] = d['submit_list']
_day['rfs'] = d['rfs']
days_detail.append(_day)
return days_detail
def get_users(self):
users = []
for m in self.members:
user = [m["name"], m["email"]]
users.append(user)
return users
def deep_delete(self):
teamID = self['_id']
dayDetails = connection.DayDetail.find({"teamID": teamID})
for i in dayDetails:
i.delete()
weekDetails = connection.WeekDetail.find({"teamID": teamID})
for i in weekDetails:
i.delete()
tasks = connection.Task.find({"teamID": teamID})
for i in tasks:
i.delete()
taskMemberSummarys = connection.TaskMemberSummary.find({"teamID": teamID})
for i in taskMemberSummarys:
i.delete()
taskTeamSummarys = connection.TaskTeamSummary.find({"teamID": teamID})
for i in taskTeamSummarys:
i.delete()
self.delete()
@connection.register
class DayDetail(BaseModel):
__collection__ = "DayDetail_Col"
structure = {
"teamID": ObjectId,
"useremail": basestring,
"username": basestring,
"teamname": basestring,
"year_week_day": basestring,
"submit_list": list,
# submit_list是一个列表,列表里面存储的数据大致如下:
# [{}]
# |
# V
# {
# "pre": list,
# "next": list,
# "problem": list,
# "submit_time":datetime.datetime
# }
# pre, next 的list如下:
# [{}]
# |
# V
# {
# "level": int, # 0:紧急 1:高 2:中 3:低
# "type": int, # 0:工作 1:学习 2:拓展 3:其他
# "text": basestring
# }
# problem 的list如下:
# [{}]
# |
# V
# {
# "level": int, # 0:紧急 1:高 2:中 3:低
# "type": int, # 0:工作 1:学习 2:拓展 3:其他
# "text": basestring
# }
"status": IS(
0, # 未提交
1, # 已提醒
2, # 已OK
3, # 已请假
4, # 失效Detail
5, # 延迟提交
6, # 保留字段
7 # 保留字段
),
"create_time": datetime.datetime,
"submit_time": datetime.datetime,
"update_time": datetime.datetime,
"deadline_time": datetime.datetime,
"expend_time": int,
"late": int, # 0:NO 1: YES
"rfs": dict #reserve fields
}
default_values = {
"status": 0,
"expend_time": 8,
"late": 0,
}
def find_for_notify(self, teamID, member):
year_week_day = get_year_week_day_str()
return self.find_one({
"teamID":teamID,
"username":member['name'],
"useremail":member['email'],
"year_week_day":year_week_day
})
def createByUserList(self, teamID, members, teamname, day_detail_summary_time):
usersDayDetails = []
year_week_day = get_year_week_day_str()
print day_detail_summary_time
for m in members:
if m['role'] not in [0, 3]:
continue
d = {
"teamID": teamID,
"teamname": teamname,
"username": m["name"],
"useremail": m["email"],
"year_week_day": year_week_day
}
if connection.DayDetail.find_one(d):
continue
detail = []
dayDetail = self()
dayDetail["status"] = 0
dayDetail["create_time"] = datetime.datetime.now()
dayDetail["teamID"] = teamID
dayDetail["teamname"] = teamname
dayDetail["username"] = m["name"]
dayDetail["useremail"] = m["email"]
dayDetail["year_week_day"] = year_week_day
dayDetail["submit_time"] = datetime.datetime.now()
dayDetail["update_time"] = datetime.datetime.now()
dayDetail["deadline_time"] = datetime.datetime.now()
dayDetail["expend_time"] = 8
dayDetail.save()
key = dayDetail['_id']
detail = [m["name"], m["email"], str(key)]
usersDayDetails.append(detail)
return usersDayDetails
def pushDetail(self, idstr, pretext, nexttext, problem_text, expend_time):
dayDetail = self.find_one({"_id": ObjectId(idstr)})
submit = {}
submit['pre'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": pretext
}]
submit['next'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": nexttext
}]
submit['submit_time'] = datetime.datetime.now()
if problem_text:
submit['problem'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": problem_text
}]
if dayDetail:
dayDetail.update({
"status": 2,
"update_time": datetime.datetime.now(),
"submit_time": datetime.datetime.now(),
"expend_time": int(expend_time)
})
dayDetail["submit_list"].append(submit)
dayDetail.save()
self._checkDetailSummary(dayDetail['year_week_day'], dayDetail['teamID'])
def _checkDetailSummary(self, year_week_day, teamID):
now_year_week_day = get_year_week_day_str()
team = connection.Team.find_one({"_id":ObjectId(teamID)})
if team:
now = datetime.datetime.now()
_h = now.hour
_m = now.minute
if _h < 10:
_h = "0" + str(_h)
if _m < 10:
_m = "0" + str(_m)
now = str(_h) + ":" + str(_m)
now = now_year_week_day + now
team_day_detail_summary_time = team["day_detail_summary_time"]
s_time = year_week_day + team_day_detail_summary_time
if now > s_time:
pass
else:
dayDetails = self.find({"teamID": teamID, "year_week_day":year_week_day})
for detail in dayDetails:
if detail['status'] not in [2, 3]:
return
if now_year_week_day == year_week_day:
team.update({"day_detail_summary_flag":year_week_day})
team.save()
from db.cache import taskQueue
day_summary = team.get_summary_day(year_week_day)
users = team.get_users()
_queue = {"type": "summaryDay", "users": users, "value": day_summary, "team": team['name'], "now_flag":year_week_day}
taskQueue.put(_queue)
def find_by_id_verbose(self, idstr):
dayDetail = self.find_one({"_id": ObjectId(idstr)})
if dayDetail:
username = dayDetail['username']
r_day_detail = {}
r_day_detail["year_week_day"] = dayDetail["year_week_day"]
r_day_detail["create_time"] = dayDetail["create_time"]
r_day_detail["status"] = dayDetail["status"]
r_day_detail["submit_list"] = dayDetail["submit_list"]
r_day_detail["username"] = username
r_day_detail["rfs"] = dayDetail["rfs"]
r_day_detail["expend_time"] = dayDetail["expend_time"]
return r_day_detail
else:
return None
@connection.register
class WeekDetail(BaseModel):
__collection__ = "WeekDetail_Col"
structure = {
"teamID": ObjectId,
"useremail": basestring,
"username": basestring,
"teamname": basestring,
"year_week": basestring,
"submit_list": list,
# submit_list是一个列表,列表里面存储的数据大致如下:
# [{}]
# |
# V
# {
# "pre": list,
# "next": list,
# "problem": list,
# "submit_time":datetime.datetime
# }
# pre, next 的list如下:
# [{}]
# |
# V
# {
# "level": int, # 0:紧急 1:高 2:中 3:低
# "type": int, # 0:工作 1:学习 2:拓展 3:其他
# "text": basestring
# }
# problem 的list如下:
# [{}]
# |
# V
# {
# "level": int, # 0:紧急 1:高 2:中 3:低
# "type": int, # 0:工作 1:学习 2:拓展 3:其他
# "text": basestring
# }
"status": IS(
0, # 未提交
1, # 已提醒
2, # 已OK
3, # 已请假
4, # 失效Detail
5, # 保留字段
6, # 保留字段
7 # 保留字段
),
"create_time": datetime.datetime,
"submit_time": datetime.datetime,
"update_time": datetime.datetime,
"deadline_time": datetime.datetime,
"expend_time": int,
"late": int, # 0:NO 1: YES
"rfs": dict #reserve fields
}
default_values = {
"status": 0,
"expend_time":40
}
def find_for_notify(self, teamID, member):
year_week = get_year_week_str()
return self.find_one({
"teamID":teamID,
"username":member['name'],
"useremail":member['email'],
"year_week":year_week
})
def createByUserList(self, teamID, members, teamname, week_detail_summary_time):
weekDetails = []
year_week = get_year_week_str()
print week_detail_summary_time
for m in members:
if m['role'] not in [0, 3]:
continue
d = {
"teamID": teamID,
"teamname": teamname,
"username": m["name"],
"useremail": m["email"],
"year_week": year_week
}
if connection.DayDetail.find_one(d):
continue
detail = []
weekDetail = self()
weekDetail["status"] = 0
weekDetail["create_time"] = datetime.datetime.now()
weekDetail["teamID"] = teamID
weekDetail["teamname"] = teamname
weekDetail["username"] = m["name"]
weekDetail["useremail"] = m["email"]
weekDetail["year_week"] = year_week
weekDetail["submit_time"] = datetime.datetime.now()
weekDetail["update_time"] = datetime.datetime.now()
weekDetail["deadline_time"] = datetime.datetime.now()
weekDetail["expend_time"] = 40
weekDetail.save()
key = weekDetail['_id']
detail = [m["name"], m["email"], str(key)]
weekDetails.append(detail)
return weekDetails
def pushDetail(self, idstr, pretext, nexttext, problem_text, expend_time):
weekDetail = self.find_one({"_id": ObjectId(idstr)})
submit = {}
submit['pre'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": pretext
}]
submit['next'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": nexttext
}]
submit['submit_time'] = datetime.datetime.now()
if problem_text:
submit['problem'] = [{
"level": 0, # 0:紧急 1:高 2:中 3:低
"type": 0, # 0:工作 1:学习 2:拓展 3:其他
"text": problem_text
}]
if weekDetail:
weekDetail.update({
"status": 2,
"update_time": datetime.datetime.now(),
"submit_time": datetime.datetime.now(),
"expend_time": int(expend_time)
})
weekDetail["submit_list"].append(submit)
weekDetail.save()
self._checkDetailSummary(weekDetail['year_week'], weekDetail['teamID'])
def _checkDetailSummary(self, year_week, teamID):
weekDetails = self.find({"teamID": teamID, "year_week":year_week})
all_ok = True
for detail in weekDetails:
if detail['status'] not in [2, 3]:
all_ok = False
now_year_week = get_year_week_str()
team = connection.Team.find_one({"_id":ObjectId(teamID)})
if team:
if now_year_week == year_week:
team.update({"week_detail_summary_flag":year_week})
team.save()
now_time_str = str(datetime.datetime.now().isocalendar()[2]) + datetime.datetime.now().strftime('%H:%M')
team_time_str = team['week_detail_summary_time']
if not all_ok and now_year_week == year_week and now_time_str <= team_time_str:
return
from db.cache import taskQueue
week_summary = team.get_summary_week(year_week)
users = team.get_users()
_queue = {"type": "summaryWeek", "users": users, "value": week_summary, "team": team['name'], "now_flag":year_week}
taskQueue.put(_queue)
def find_by_id_verbose(self, idstr):
weekDetail = self.find_one({"_id": ObjectId(idstr)})
if weekDetail:
username = weekDetail['username']
r_week_detail = {}
r_week_detail["year_week"] = weekDetail["year_week"]
r_week_detail["create_time"] = weekDetail["create_time"]
r_week_detail["status"] = weekDetail["status"]
r_week_detail["submit_list"] = weekDetail["submit_list"]
r_week_detail["username"] = username
r_week_detail["rfs"] = weekDetail["rfs"]
r_week_detail["expend_time"] = weekDetail["expend_time"]
return r_week_detail
else:
return None
@connection.register
class Task(BaseModel):
__collection__ = "Task_Col"
structure = {
"teamID": ObjectId,
"useremail": basestring,
"username": basestring,
"teamname": basestring,
"createusername": basestring,
"pubusername": basestring,
"createuseremail": basestring,
"pubuseremail": basestring,
"name": basestring,
"dead_line_time": datetime.datetime,
"explain": basestring, # 说明
"remarks": basestring, # 备注
# "remarks": list,
# list: [{}]
# |
# V
# {
# "uuid": basestring, # id mark
# "username": "jack.zh",
# "updatetime":datetime.datetime(),
# "text": basestring,
# "type": int
# }
"priority": IS(
0, # 紧急
1, # 高
2, # 中
3, # 低
4, # 保留字段
5, # 保留字段
6, # 保留字段
7 # 保留字段
),
"status": IS(
0, # 未完成
1, # 进行中
2, # 已完成
3, # 延后
4, # 延后完成
5, # 保留字段
6, # 保留字段
7 # 保留字段
),
"weight": IS(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
"score": float,
"create_time": datetime.datetime,
"update_time": datetime.datetime,
"done_time": datetime.datetime,
"start_time": datetime.datetime,
"done_100": int,
"rfs": dict #reserve fields
}
default_values = {
"name": "new task",
"explain": "",
"priority": 2,
"status": 0,
"remarks": "",
"done_100": 0,
"weight": 1,
"score": 1.0
}
def newTask(self, teamID, teamname, username, useremail, createusername, createuseremail, pubusername, pubuseremail, name, explain, priority, dead_line_time, weight):
def _priority2str(priority):
if priority == 0:
return u"紧急"
elif priority == 1:
return u"高"
elif priority == 2:
return u"中"
elif priority == 3:
return u"低"
else:
return u"随你便"
def _datetime2str(d):
return str(d.year) + "年" + str(d.month) + "月" + str(d.day) + "日 " + str(d.hour) + ":" + str(d.minute)
task = self()
task["teamID"] = teamID
task["teamname"] = teamname
task['username'] = username
task['useremail'] = useremail
task['createusername'] = createusername
task['createuseremail'] = createuseremail
task['pubusername'] = pubusername
task['pubuseremail'] = pubuseremail
task["name"] = name
task["dead_line_time"] = dead_line_time
task["priority"] = priority
task["create_time"] = datetime.datetime.now()
task["update_time"] = datetime.datetime.now()
task["done_time"] = dead_line_time
task["start_time"] = datetime.datetime.now()
task["explain"] = explain
task['weight'] = int(weight)
task.save()
createTaskEmail = {}
createTaskEmail["teamname"] = teamname
createTaskEmail["username"] = username
createTaskEmail['createUsername'] = createusername
createTaskEmail["name"] = name
createTaskEmail["dead_line_time"] = _datetime2str(dead_line_time)
createTaskEmail["priority"] = _priority2str(priority)
createTaskEmail["explain"] = explain
createTaskEmail["now"] = _datetime2str(datetime.datetime.now())
createTaskEmail['email'] = useremail
from settings import host_url
createTaskEmail['url'] = host_url + "/edit/task/" + str(task['_id'])
from db.cache import taskQueue
_queue = {"type": "createTask", "data":createTaskEmail}
taskQueue.put(_queue)
def find_by_id_verbose(self, key):
task = self.find_one({"_id": ObjectId(key)})
if task:
teamID = task['teamID']
team = connection.Team.find_one({"_id":teamID})
if team:
ms = team['members']
members = []
for m in ms:
members.append({"username":m['name'], "email":m['email'], "role": m['role']})
else:
return None
status = task['status']
priority = task['priority']
remarks = task['remarks']
name = task['name']
explain = task['explain']
dead_line_time = task['dead_line_time']
dead_line_time = str(dead_line_time.year) + "年" + str(dead_line_time.month) + "月" + str(dead_line_time.day) + "日 " + str(dead_line_time.hour) + ":" + str(dead_line_time.minute)
createtime = task['create_time']
createtime = [str(createtime.year) + "年" + str(createtime.month) + "月" + str(createtime.day) + "日 " + str(createtime.hour) + ":" + str(createtime.minute) , createtime.weekday()]
done_100 = task['done_100']
taskDict = {
"status": status,
"dead_line_time": dead_line_time,
"explain": explain,
"remarks": remarks,
"name": name,
"createtime": createtime,
"members": members,
"priority": priority,
"username": task["username"],
"createusername": task["createusername"],
"pubusername": task["pubusername"],
"email": task["useremail"],
"done_100": done_100,
'start_time': [get_year_month_day_str_ch(task['start_time']), get_hour_minute_str(task['start_time'])],
'done_time': [get_year_month_day_str_ch(task['done_time']), get_hour_minute_str(task['done_time'])],
}
return taskDict
else:
return None
def updateByService(self, key, status, pub, pub_text, update_task_remarks, update_task_done_100, real_time):
def _priority2str(priority):
if priority == 0:
return u"紧急"
elif priority == 1:
return u"高"
elif priority == 2:
return u"中"
elif priority == 3:
return u"低"
else:
return u"随你便"
def _datetime2str(d):
return str(d.year) + "年" + str(d.month) + "月" + str(d.day) + "日 " + str(d.hour) + ":" + str(d.minute)
task = self.find_one({"_id": ObjectId(key)})
if status == 2 and real_time > task["dead_line_time"]:
status = 4
if task:
if pub == 1:
if status == 2 and real_time > task["dead_line_time"]:
status = 4
useremail = pub_text.split("(")[1].split(")")[0]
username = pub_text.split("(")[0]
pubuseremail = task['useremail']
pubusername = task['username']
task['useremail'] = useremail
task['username'] = username
task['pubuseremail'] = pubuseremail
task['pubusername'] = pubusername
pubTaskEmail = {}
pubTaskEmail["teamname"] = task["teamname"]
pubTaskEmail["username"] = username
pubTaskEmail['pubusername'] = pubusername
pubTaskEmail["name"] = task['name']
pubTaskEmail["dead_line_time"] = _datetime2str(task["dead_line_time"])
pubTaskEmail["priority"] = _priority2str(task["priority"])
pubTaskEmail["explain"] = task["explain"]
pubTaskEmail["now"] = _datetime2str(datetime.datetime.now())
pubTaskEmail['email'] = useremail
from settings import host_url
pubTaskEmail['url'] = host_url + "/edit/task/" + str(task['_id'])
from db.cache import taskQueue
_queue = {"type": "pubTask", "data":pubTaskEmail}
taskQueue.put(_queue)
task['status'] = status
task['remarks'] = update_task_remarks
task['done_100'] = update_task_done_100
task['update_time'] = datetime.datetime.now()
if status == 1:
task['start_time'] = real_time
elif status in [2, 4]:
task['done_time'] = real_time
else:
pass
if status in [2, 4]:
task['done_100'] = 100
if status == 0:
task['done_100'] = 0
task.save()
return "Success"
else:
return "No Task"
@connection.register
class TaskMemberSummary(BaseModel):
__collection__ = "TaskMemberSummary_Col"
structure = {
"teamID": ObjectId,
"useremail": basestring,
"teamname": basestring,
"username": basestring,
"createtime": datetime.datetime,
"html": basestring,
"rfs": dict #reserve fields
}
default_values = {
"teamname": "",
"username": "",
"createtime": datetime.datetime.now(),
"html": ""
}
def newHtml(self, _db_dict):
tms = self()
tms['createtime'] = datetime.datetime.now()
for _ in _db_dict:
tms[_] = _db_dict[_]
tms.save()
return str(tms['_id'])
@connection.register
class TaskTeamSummary(BaseModel):
__collection__ = "TaskTeamSummary_Col"
structure = {
"teamID": ObjectId,
"teamname": basestring,
"createtime": datetime.datetime,
"html": basestring,
"rfs": dict #reserve fields
}
default_values = {
"teamname": "",
"createtime": datetime.datetime.now(),
"html": ""
}
def newHtml(self, _db_dict):
tts = self()
tts['createtime'] = datetime.datetime.now()
for _ in _db_dict:
tts[_] = _db_dict[_]
tts.save()
return str(tts['_id']) |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Channel
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ChannelUpdateForm(forms.ModelForm):
class Meta:
model = Channel
fields = ['image'] |
import threading as th
class PallThread(th.Thread):
stop_request = th.Event()
def run(self):
pass
def stop(self):
self.stop_request.set()
if self.is_alive:
self.join()
|
import pandas as pd
import networkx as nx
import numpy as np
import timeit
import argparse
from networkx import eccentricity
from networkx.algorithms import approximation as approx
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='magic features graph compute')
parser.add_argument('--type',default="clean", choices=['clean', 'stem', 'lem', 'raw'],type=str)
main_args = parser.parse_args()
typ = main_args.type
train_orig = pd.read_csv('data/{}_train.csv'.format(typ), header=0)
test_orig = pd.read_csv('data/{}_test.csv'.format(typ), header=0)
tic0=timeit.default_timer()
df1 = train_orig[['{}_question1'.format(typ)]].copy()
df2 = train_orig[['{}_question2'.format(typ)]].copy()
df1_test = test_orig[['{}_question1'.format(typ)]].copy()
df2_test = test_orig[['{}_question2'.format(typ)]].copy()
df2.rename(columns = {'{}_question2'.format(typ):'{}_question1'.format(typ)},inplace=True)
df2_test.rename(columns = {'{}_question2'.format(typ):'{}_question1'.format(typ)},inplace=True)
train_questions = df1.append(df2)
train_questions = train_questions.append(df1_test)
train_questions = train_questions.append(df2_test)
#train_questions.drop_duplicates(subset = ['qid1'],inplace=True)
train_questions.drop_duplicates(subset = ['{}_question1'.format(typ)],inplace=True)
train_questions.reset_index(inplace=True,drop=True)
questions_dict = pd.Series(train_questions.index.values,index=train_questions['{}_question1'.format(typ)].values).to_dict()
train_cp = train_orig.copy()
test_cp = test_orig.copy()
try:
train_cp.drop(['qid1','qid2'],axis=1,inplace=True)
except:
pass
test_cp['is_duplicate'] = -1
test_cp.rename(columns={'test_id':'id'},inplace=True)
comb = pd.concat([train_cp,test_cp])
comb['q1_hash'] = comb['{}_question1'.format(typ)].map(questions_dict)
comb['q2_hash'] = comb['{}_question2'.format(typ)].map(questions_dict)
added_features = []
q1_vc = comb.q1_hash.value_counts().to_dict()
q2_vc = comb.q2_hash.value_counts().to_dict()
print("computing frequency")
def try_apply_dict(x,dict_to_apply):
try:
return dict_to_apply[x]
except KeyError:
return 0
# map to frequency space
comb['q1_freq'] = comb['q1_hash'].map(lambda x: try_apply_dict(x,q1_vc) + try_apply_dict(x,q2_vc))
comb['q2_freq'] = comb['q2_hash'].map(lambda x: try_apply_dict(x,q1_vc) + try_apply_dict(x,q2_vc))
comb['min_freq'] = np.minimum(comb['q1_freq'],comb['q2_freq'])
comb['max_freq'] = np.maximum(comb['q1_freq'],comb['q2_freq'])
comb['ratio_freq'] = comb['min_freq'] / np.maximum(1,comb['max_freq'])
added_features.extend(['min_freq', 'max_freq', 'ratio_freq'])
print("creating graph")
g = nx.Graph()
g.add_nodes_from(comb.q1_hash)
g.add_nodes_from(comb.q2_hash)
edges = list(comb[['q1_hash', 'q2_hash']].to_records(index=False))
g.add_edges_from(edges)
print("computing connected component")
res = connected_components(g)
comp_dict = {}
for comp in res:
size = len(comp)
for elem in comp:
comp_dict[elem] = size
comb['cc_size'] = comb.apply(lambda row: comp_dict[row.q1_hash], axis=1)
added_features.extend(['cc_size'])
print("computing page rank")
pr = nx.pagerank(g, alpha=0.9)
comb['pr_q1'] = comb.apply(lambda row: pr[row.q1_hash], axis=1)
comb['pr_q2'] = comb.apply(lambda row: pr[row.q2_hash], axis=1)
comb['min_pr'] = np.minimum(comb['ecc_q1'],comb['pr_q2'])
comb['max_pr'] = np.maximum(comb['ecc_q1'],comb['pr_q2'])
comb['ratio_pr'] = comb['min_pr'] / np.maximum(1,comb['max_pr'])
added_features.extend(['min_pr', 'max_pr', 'ratio_pr'])
print("computing intersection count")
def get_intersection_count(row):
return(len(set(g.neighbors(row.q1_hash)).intersection(set(g.neighbors(row.q2_hash)))))
comb['intersection_count'] = comb.apply(lambda row: get_intersection_count(row), axis=1)
comb['ecc_q1'] = comb.apply(lambda row: len(set(g.neighbors(row.q1_hash))), axis=1)
comb['ecc_q2'] = comb.apply(lambda row: len(set(g.neighbors(row.q2_hash))), axis=1)
comb['min_ecc'] = np.minimum(comb['ecc_q1'],comb['ecc_q2'])
comb['max_ecc'] = np.maximum(comb['ecc_q1'],comb['ecc_q2'])
comb['ratio_ecc'] = comb['min_ecc'] / np.maximum(1,comb['max_ecc'])
added_features.extend(['min_ecc', 'max_ecc', 'ratio_ecc'])
train_comb = comb[comb['is_duplicate'] >= 0][['id'] + added_features].set_index('id')
test_comb = comb[comb['is_duplicate'] < 0][['id'] + added_features].rename(columns={'id':'test_id'}).set_index('test_id')
train_comb.to_csv('../features/{}_train_magic_features_graph.csv'.format(typ))
test_comb.to_csv('../features/{}_test_magic_features_graph.csv'.format(typ)) |
from flask import Blueprint, render_template
from flask_login import current_user
import os
from app.forms import Print1
import datetime
test = Blueprint('test', __name__)
@test.route('/select_pay', methods=['GET', 'POST'])
def test_select():
form = Print1()
datetimes = datetime.datetime.now()
now = str(datetimes.year) + "-" + str(datetimes.month) + "-" + str(datetimes.day) + "_" + str(
datetimes.hour) + "-" + str(datetimes.minute) + "-" + str(datetimes.second)
if form.validate_on_submit():
print_file = form.print_file.data
new_filename = str(current_user.Tel_Number) + '_' + now + str(print_file.filename)
basepath = os.path.abspath(os.path.dirname(__file__)) # 当前文件所在目录
parentdir = os.path.dirname(basepath) # 父级目录
upload_path = os.path.join(parentdir, 'static/Upload_Files', new_filename)
# upload_path = os.path.join(basepath, 'uploads', random_filename(print_file.filename))
print_file.save(upload_path)
return render_template('select1.html', now=now, form=form)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-03 04:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('postfeed', '0002_auto_20170905_2350'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('text',)},
),
migrations.AlterModelOptions(
name='tag',
options={'ordering': ('tag',)},
),
migrations.RenameField(
model_name='post',
old_name='post_doc',
new_name='pic',
),
migrations.RenameField(
model_name='post',
old_name='post_text',
new_name='text',
),
migrations.RenameField(
model_name='post',
old_name='post_pic',
new_name='video',
),
migrations.RemoveField(
model_name='post',
name='post_video',
),
migrations.AlterField(
model_name='tag',
name='post',
field=models.ManyToManyField(blank=True, to='postfeed.Post'),
),
]
|
# Generated by Django 3.0.3 on 2020-02-10 19:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ghostpost', '0006_auto_20200210_1954'),
]
operations = [
migrations.RemoveField(
model_name='ghostpost',
name='submitDate',
),
]
|
#pip install google-cloud-container google-api-python-client
from google.cloud import container_v1
from googleapiclient import discovery
client = container_v1.ClusterManagerClient()
service = discovery.build('compute', 'v1')
project_id = ''
ip=[]
#response = client.get_cluster(project_id,zone,cluster_name)
response = client.get_cluster(name='projects/<project_name>/locations/<zone>/clusters/<cluster_name>')
instance_group_urls=(list(response.instance_group_urls))
for instance_group in instance_group_urls:
data=instance_group.split('zones')[1].split('/')
compute_zone=data[1]
instance_group_manager=data[3]
request = service.instanceGroups().listInstances(project=project_id, zone=compute_zone, instanceGroup=instance_group_manager)
response = request.execute()
for instance_data in response['items']:
instance_name=instance_data['instance'].split('/')[-1]
instance_ip = service.instances().get(project=project_id,zone=compute_zone,instance=instance_name).execute()['networkInterfaces'][0]['accessConfigs'][0]['natIP']
ip.append(instance_ip)
print(ip)
|
from django.http import Http404
from api.models import TaskList, Task
from api.serializers import TaskListSerializer2, TaskSerializer2
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from rest_framework import status
# /api/task_lists/
class TaskLists(APIView):
def get(self, request):
t_lists = TaskList.objects.all()
serializer = TaskListSerializer2(t_lists, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request):
serializer = TaskListSerializer2(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# /api/task_lists/<int:pk>/
class TaskListsTask(APIView):
def get_object(self, pk):
try:
return TaskList.objects.get(id=pk)
except TaskList.DoesNotExist:
raise Http404
def get(self, request, pk):
TaskList = self.get_object(pk)
serializer = TaskListSerializer2(TaskList)
return Response(serializer.data)
def put(self, request, pk):
TaskList = self.get_object(pk)
serializer = TaskListSerializer2(instance=TaskList, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def delete(self, request, pk):
TaskList = self.get_object(pk)
TaskList.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# /api/task_lists/<int:pk>/tasks/
class Tasks(APIView):
def get_object(self, pk):
try:
return TaskList.objects.get(id=pk)
except TaskList.DoesNotExist:
raise Http404
@permission_classes((IsAuthenticated,))
def get(self, request, pk):
t_list = self.get_object(pk)
tasks = t_list.task_set.all()
serializer = TaskSerializer2(tasks, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@permission_classes((IsAuthenticated,))
def post(self, request, pk):
t_list = request.data.pop('task_list')
task_list = TaskList(t_list['id'], t_list['name'])
serializer = TaskSerializer2(task_list=task_list, data=request.data)
if serializer.is_valid():
serializer.save(task_list=task_list)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors)
# /api/task_lists/<int:pk>/tasks/<int:pk2>
class Task(APIView):
def get_object(self, pk):
try:
return TaskList.objects.get(id=pk)
except TaskList.DoesNotExist:
raise Http404
def get(self, request, pk, pk2):
t_list = self.get_object(pk)
try:
task = t_list.task_set.get(id=pk2)
except Task.DoesNotExist as e:
return Response({'error': str(e)})
serializer = TaskSerializer2(task)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk, pk2):
t_list = self.get_object(pk)
data = request.data
try:
task = t_list.task_set.get(id=pk2)
t_list = data.pop('task_list')
taskList = TaskList(t_list['id'], t_list['name'])
serializer = TaskSerializer2(instance=task, data=data)
if serializer.is_valid():
serializer.save(task_list=taskList)
return Response(serializer.data)
return Response(serializer.errors)
except Task.DoesNotExist as e:
return Response({'error': str(e)})
def delete(self, request, pk, pk2):
t_list = self.get_object(pk)
task = t_list.task_set.get(id=pk2)
task.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT) |
class Solution:
def threeSumMulti(self, arr: List[int], target: int) -> int:
count = [0] * 101
for n in arr:
count[n] += 1
res = 0
if target % 3 == 0 and count[target//3] >= 3:
cnt = count[target//3]
res += cnt * (cnt-1) * (cnt-2) // 6
for l in range(101):
for r in range(l+1, 101):
if (l*2 + r) == target and count[l] >= 2 and count[r] >= 1:
res += count[l] * (count[l]-1) // 2 * count[r]
if (l + r*2) == target and count[l] >= 1 and count[r] >= 2:
res += count[l] * count[r] * (count[r]-1) // 2
for l in range(101):
for m in range(l+1, 101):
r = target - l - m
if m < r < 101:
res += count[l] * count[m] * count[r]
return res % (10**9 + 7)
|
from django.db import models
from django.utils import timezone
from users.models import User
from django.shortcuts import get_object_or_404
# Create your models here.
ACCOUNT_TYPE_CHOICES = (
('BİRİKİM HESABI','BİRİKİM HESABI'),
('KREDİLİ MEVDUAT HESABI','KREDİLİ MEVDUAT HESABI'),
('NORMAL HESAP','NORMAL HESAP'),
('EMEKLİLİK HESABI','EMEKLİLİK HESABI'),
)
class Currency(models.Model):
currency_type = models.CharField(max_length=50)
def __str__(self):
return self.currency_type
class Accounts(models.Model):
iban = models.CharField(max_length=60, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
account_type = models.CharField(max_length=50, choices=ACCOUNT_TYPE_CHOICES)
currency_type = models.ForeignKey(Currency, null=True)
user = models.ForeignKey(User, null=True, blank=True)
amount = models.IntegerField()
def __str__(self):
return 'with IBAN : ' + str(self.iban)
#return 'Account owner is : ' + self.user.first_name + ' IBAN no is : ' + self.iban
class Loan(models.Model):
account = models.ForeignKey(Accounts)
interest_rate = models.DecimalField(max_digits=4, decimal_places=2, blank=True, default=0)
installment = models.IntegerField(default=1)
start_date = models.DateField()
finish_date = models.DateField()
amount = models.IntegerField()
delay_interest_rate = models.DecimalField(max_digits=4, decimal_places=2, blank=True, default=0)
class LoanAccountPayment(models.Model):
account = models.ForeignKey(Accounts)
loan = models.ForeignKey(Loan)
installment_number = models.IntegerField()
is_paid = models.BooleanField(default=False)
finish_date = models.DateField()
is_active = models.BooleanField(default=True)
amount = models.IntegerField()
class Transaction(models.Model):
description = models.CharField(max_length=800)
amount = models.IntegerField()
currency_type = models.ForeignKey(Currency)
sourceaccount = models.ForeignKey(Accounts, related_name='source_account')
destinationaccount = models.ForeignKey(Accounts, related_name='destination_account')
sending_date = models.DateField(auto_now_add=True)
is_done = models.BooleanField(default=False)
def make_transaction(self):
source = get_object_or_404(Accounts, pk=self.sourceaccount.id)
destination = get_object_or_404(Accounts, pk=self.destinationaccount.id)
source.amount = source.amount - self.amount
source.save()
destination.amount = self.amount + self.amount
destination.save()
self.is_done = True
self.save()
return self
def validate_transaction(self):
"""
It validates transaction can be made or not.
If there is not enough money transaction cannot be made
:param transaction:
:return:
"""
if self.sourceaccount.amount > self.amount:
return True
else:
return False
def cancel_transaction(self):
source = get_object_or_404(Accounts, pk=self.sourceaccount.id)
destination = get_object_or_404(Accounts, pk=self.destinationaccount.id)
source.amount = source.amount + self.amount
source.save()
destination.amount = destination.amount - self.amount
destination.save()
self.is_done = False
self.save()
return self
def cancel_validate_transaction(self):
"""
It validates transaction can be canceled,
if there is not enough money destination account transaction cannot be cancel
:param transaction:
:return:
"""
if self.destinationaccount.amount > self.amount:
return True
else:
return False
|
import psycopg2
from psycopg2 import Error
try:
# Connect to an existing database
connection = psycopg2.connect(user="postgres",
password="postgres",
host="127.0.0.1",
port="4000",
database="postgres")
# Create a cursor to perform database operations
cursor = connection.cursor()
# Print PostgreSQL details
print("PostgreSQL server information")
print(connection.get_dsn_parameters(), "\n")
# Executing a SQL query
cursor.execute("SELECT version();")
# Fetch result
record = cursor.fetchone()
print("You are connected to - ", record, "\n")
cursor.close()
except (Exception, Error) as error:
print("Error while connecting to PostgreSQL", error)
def check_table_exists():
exists = False
try:
cursor = connection.cursor()
query = "SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = 'news'"
cursor.execute(query)
for row in cursor.fetchall():
exists = True
except (Exception, Error) as error:
print("Error while reading data...", error)
return exists
def create_table():
try:
cursor = connection.cursor()
create_table_query = '''CREATE TABLE news
(URL varchar(255) PRIMARY KEY NOT NULL,
TITLE TEXT NOT NULL,
SUB_TITLE TEXT NOT NULL,
ABSTRACT TEXT NOT NULL,
DOWNLOAD_TIME timestamp NOT NULL,
UPDATE_TIME timestamp); '''
# Execute a command: this creates a new table
cursor.execute(create_table_query)
connection.commit()
print("Table created successfully in PostgreSQL ")
except (Exception, Error) as error:
print("Error while creating Table", error)
def check_news_exists(link):
exists = False
try:
cursor = connection.cursor()
select_query = "SELECT * from news where URL='"+link+"'"
cursor.execute(select_query)
for row in cursor.fetchall():
exists = True
except (Exception, Error) as error:
print("Error while reading data...", error)
return exists
def insert_scraped_data(link, title, sub_title, abstract, download_time):
try:
cursor = connection.cursor()
insert_query = "INSERT into news values('"+link+"', '"+title+"', '"+sub_title+"', '"+abstract+"', '"+download_time+"')"
# Execute a command: this creates a new table
cursor.execute(insert_query)
connection.commit()
print("Data inserted successfully in PostgreSQL ")
except (Exception, Error) as error:
print("Error while inserting data...", error)
def update_scraped_time(link, download_time):
try:
cursor = connection.cursor()
update_query = "UPDATE news set UPDATE_TIME='"+download_time+"' where URL='"+link+"'"
# Execute a command: this creates a new table
cursor.execute(update_query)
connection.commit()
print("Data updated successfully in PostgreSQL ")
except (Exception, Error) as error:
print("Error while updating data...", error) |
import argparse
import os
from os.path import split,join,splitext
import xml.etree.ElementTree as ET
import re
import numpy as np
from shapely.geometry import Polygon
from debug_tool import paint_polygons
from matplotlib import pyplot as plt
def parse_point(s):
s.split(',')
_, p1, p2, _ = re.split(',|\\(|\\)', s)
# pt = re.findall('\d+\.*\d*', s)
return (float(p1), float(p2))
def parse_line(s):
split = s.find('--')
start = parse_point(s[:split])
end = parse_point(s[split+2:])
return (start, end)
def parse_pole(line):
split = line.find('|')
left = line[:split]
right = line[split+1:]
return (parse_line(left), parse_line(right))
def parse_gt_pole(s):
# print(s)
floats = list(map(float, re.split(',|;', s)))
points = [(x, y) for x, y in zip(floats[0::2], floats[1::2])]
points = sorted(points, key=lambda p:p[1])
top = points[:2]
bottom = points[2:]
top = sorted(top)
bottom = sorted(bottom)
return ((top[0], bottom[0]), (top[1], bottom[1]))
def parse_gt(fp):
tree = ET.parse(fp)
gt_map = {}
for c in tree.getroot().getchildren():
if 'image' == c.tag:
poles = [parse_gt_pole(p.get('points')) for p in c.getchildren() if 'points' in p.keys()]
name = split(c.get('name'))[-1]
name = splitext(name)[0]
gt_map[name] = poles
return gt_map
def area_of_bbox(bbox):
a = (bbox[1][0] - bbox[0][0]) * (bbox[1][1] - bbox[0][1])
assert a >= 0
return a
def bbox_of_pole(pole):
pts = (pole[0][0], pole[0][1], pole[1][0], pole[1][1])
x_min = min(pole[0][0][0], pole[0][1][0])
x_max = max(pole[1][0][0], pole[1][1][0])
y_min = min(pole[0][0][1], pole[0][1][1])
y_max = max(pole[1][0][1], pole[1][1][1])
return((x_min, y_min), (x_max, y_max))
def polygon_of_pole(pole):
assert pole[0][0][1] < pole[0][1][1], pole
assert pole[1][0][1] < pole[1][1][1], pole
points = [pole[0][0], pole[0][1], pole[1][1], pole[1][0]]
return Polygon(points)
def calculate_iou_of_poles(pole_a, pole_b):
polygon_a = polygon_of_pole(pole_a)
polygon_b = polygon_of_pole(pole_b)
# print(polygon_a)
# print(polygon_b)
try:
intersection = polygon_a.intersection(polygon_b)
except Exception as e:
print(e)
# paint_polygons(polygon_a, polygon_b)
# plt.show()
return 0.0
else:
# print(intersection)
return intersection.area/ (polygon_a.area + polygon_b.area - intersection.area)
def calculate_iou_of_bbox(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0][0], boxB[0][0])
yA = max(boxA[0][1], boxB[0][1])
xB = min(boxA[1][0], boxB[1][0])
yB = min(boxA[1][1], boxB[1][1])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
# print("intersection area=", interArea)
if interArea == 0:
return 0
# compute the area of both the prediction and ground-truth
# rectangles
# boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
# boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(area_of_bbox(boxA)+ area_of_bbox(boxB)- interArea)
# return the intersection over union value
return iou
IOU_THRESHOLD = 0.5
EPS = 1e-9
def compare_with_groundtruth(detected_poles, ground_truth):
true_detection = []
not_detected = []
matched = [False] * len(detected_poles)
for g in ground_truth:
iou_list = [calculate_iou_of_poles(g, p) for p in detected_poles]
max_idx = np.argmax(iou_list)
if iou_list[max_idx] > IOU_THRESHOLD:
true_detection.append((g, detected_poles[max_idx]))
matched[max_idx] = True
else:
not_detected.append(g)
false_detection = [p for m, p in zip(matched, detected_poles) if not m]
return true_detection, false_detection, not_detected
class DetectionEvaluator:
def __init__(self, gt_fp, detection_directory):
self.gt_map = parse_gt(gt_fp)
self.detection_map = {}
for file_name in os.listdir(detection_directory):
if not file_name.endswith('.txt'):
continue
self.evaluate(join(detection_directory, file_name))
def __getitem__(self, key):
return self.detection_map[key]
def evaluate(self, detection_file_path):
sample_name = splitext(split(detection_file_path)[-1])[0]
with open(detection_file_path, 'r') as f:
detected_poles = [parse_pole(l) for l in f.readlines()]
# print("detected %d poles in %s" % (len(detected_poles), file_name))
true_detection = []
false_detection = []
ground_truth = self.gt_map[sample_name]
not_detected = ground_truth
if len(detected_poles) != 0:
true_detection, false_detection, not_detected = compare_with_groundtruth(detected_poles, ground_truth)
self.detection_map[sample_name] = {'true_detection': true_detection,
'false_detection': false_detection,
'not_detected': not_detected,
'true_positive': len(true_detection),
'positive': len(detected_poles),
'groundtruth_count':len(ground_truth),
'precision': len(true_detection) / (len(detected_poles) + EPS),
'recall': len(true_detection) / (len(ground_truth) + EPS)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('groundtruth_path')
parser.add_argument('detection_result_directory')
args = parser.parse_args()
eva = DetectionEvaluator(args.groundtruth_path, args.detection_result_directory)
true_positive = 0
positive = 0
groundtruth_count = 0
for e in eva.detection_map.values():
true_positive += e['true_positive']
positive += e['positive']
groundtruth_count += e['groundtruth_count']
print('precision=%f, recall=%f' % (true_positive/positive, true_positive/groundtruth_count))
|
import tkinter
from datetime import *
from tkinter import *
from tkinter import messagebox
import random
from random import randint
from tkinter import ttk
window = Tk()
window.geometry = ("300x300")
window.title("Ithuba Lottery: Age Restriction")
#random_no = random.randint(range(1,49),6)
lottery = []
name_ = Entry(window, width=7)
name_.place(x=60,y=50)
name_label = Label(window, text="Name:")
name_label.place(x=10, y=50)
age_num = Entry(window, text="Age:", width=7)
age_num.place(x=60,y=80)
age_label = Label(window, text="Age:")
age_label.place(x=10, y=80)
date_label = Label(window)
date_label.config(text="Date: "+datetime.now().strftime("%m/%d/%y"))
file = "Lotto.txt"
#
# def age_confirm():
# try:
# if int(age_num.get()) < 18:
# messagebox.showwarning()("Error!", "You are underage")
# if int(age_num.get()) >= 18:
#
# nameVar = name_.get()
# ageVar = age_num.get()
# dateVar = str(date_label.cget("text"))
# messagebox.showinfo("Go through","Proceed to play lotto!!!!!!!")
#
# window.destroy()
#
# except:
# if age_num is str:
# print("Enter valid input")
#
# age_button = Button(window, command=age_confirm, text="Enter Lotto")
# age_button.place(x=90,y=105)
# window.mainloop()
import tkinter
from datetime import *
from tkinter import *
from tkinter import messagebox
import random
from random import randint
from tkinter import ttk
window = Tk()
window.geometry = ("300x300")
window.title("Ithuba Lottery: Age Restriction")
random_no = random.sample(range(1,49), 6)
lottery = []
name_ = Entry(window, width=7)
name_.place(x=60,y=50)
name_label = Label(window, text="Name:")
name_label.place(x=10, y=50)
age_num = Entry(window, text="Age:", width=7)
age_num.place(x=60,y=80)
age_label = Label(window, text="Age:")
age_label.place(x=10, y=80)
date_label = Label(window)
date_label.config(text="Date: "+datetime.now().strftime("%m/%d/%y"))
file = "Lotto.txt"
def age_confirm():
try:
if int(age_num.get()) < 18:
messagebox.showwarning()("Error!", "You are underage")
if int(age_num.get()) >= 18:
nameVar = name_.get()
ageVar = age_num.get()
dateVar = str(date_label.cget("text"))
messagebox.showinfo("Go through","Proceed to play lotto!!!!!!!")
window.destroy()
except:
if age_num is str:
print("Enter valid input")
####################################################################################################################
####################################################################################################################
####################################################################################################################
my_window = Tk()
my_window.geometry = ("400x400")
my_window.title("Ithuba Lottery: Lottery Numbers")
#
# system_label = Label(my_window, text = "Ithuba Lottery: Lotto Numbers", font = ('Arial Black',30))
# system_label.place(x=300,y=5)
try:
#Creates widgets to enter numbers
# and labels
num_A = Entry(my_window,width=10)
num_B = Entry(my_window,width=10)
num_C = Entry(my_window,width=10)
num_D = Entry(my_window,width=10)
num_E = Entry(my_window,width=10)
num_F = Entry(my_window,width=10)
lb = Label(my_window, text="Enter Lotto numbers:")
result_lb = Label(my_window)
except ValueError as e:
messagebox.showerror("Error", "Enter number"+e)
def lottoNumbers():
a = random.randint(1, 49)
b = random.randint(1, 49)
c = random.randint(1, 49)
d = random.randint(1, 49)
e = random.randint(1, 49)
f = random.randint(1, 49)
lottoNum1.set(a)
lottoNum2.set(b)
lottoNum3.set(c)
lottoNum4.set(d)
lottoNum5.set(e)
lottoNum6.set(f)
return
lottoNum1 = StringVar()
lottoNum2 = StringVar()
lottoNum3 = StringVar()
lottoNum4 = StringVar()
lottoNum5 = StringVar()
lottoNum6 = StringVar()
age_button = Button(window, command=age_confirm, text="Enter Lotto")
age_button.place(x=90,y=105)
# label1 = LabelFrame(window, text = "Text", bd = 20, insertwidth = 1, font =("Arial",20), justify = CENTER)
# #playButton = Button(window, text = "Press to Play", command = pick)
window.mainloop()
|
import scramble.puzzle
import time
DUMMY_SCRAMBLE = scramble.puzzle.Scramble('000', '0', '', '')
class Game(object):
def __init__(self, gid, time_limit, users, puzzle_database):
self.gid = gid
self.time_limit = time_limit
self.solved = False
self.solved_count = 0
self.users = users
self.users_index = dict()
for i, user in enumerate(self.users):
self.users_index[user.uid] = user
user.game_name = 'Player %d' % (i + 1)
# load puzzle database
self.puzzles = list()
g = 0
for puzzle in puzzle_database.puzzles:
p = 0
scrambles = list()
for s in puzzle.scrambles:
jumble = puzzle_database.jumbles[s.name]
scrambl = scramble.puzzle.Scramble('%sp%ds%d' % (gid, g, p),
str(p + 1), jumble.value, jumble.jumble)
scrambl.indices = s.keys
if p > 0:
scrambl.prev_scramble = scrambles[-1]
scrambl.prev_scramble.next_scramble = scrambl
scrambl.mystery = s.mystery
if scrambl.mystery:
# reset because it will be filled with key letters
scrambl.scramble = ''
scrambles.append(scrambl)
p = p + 1
pzl = scramble.parser.Puzzle(puzzle.name, puzzle.seconds, scrambles)
self.puzzles.append(pzl)
g = g + 1
self.scrambles_index = dict()
for puzzle in self.puzzles:
for scrambl in puzzle.scrambles:
self.scrambles_index[scrambl.pid] = scrambl
# set up game for first puzzle
self.start_puzzle(0)
def completed(self):
return self.puzzle >= len(self.puzzles)
def start_puzzle(self, gindx):
self.puzzle = gindx
if self.completed():
return
self.start = time.time()
self.solved = False
# all players start game at first scramble
for user in self.users:
user.scramble = DUMMY_SCRAMBLE
def all_users_ready(self):
for user in self.users:
if user.scramble == DUMMY_SCRAMBLE:
return False
return True
def user_ready(self, uid):
user = self.get_user(uid)
user.scramble = self.puzzles[self.puzzle].scrambles[0]
# timer for first puzzle starts when all users are ready
self.start = time.time()
def timer(self):
try:
elapsed = int(time.time() - self.start)
return self.puzzles[self.puzzle].seconds - elapsed
except IndexError:
return 0
def get_scramble(self, pid):
return self.scrambles_index[pid]
def get_user(self, uid):
return self.users_index[uid]
def solve(self, pid, uid):
scrambl = self.get_scramble(pid)
if scrambl.solved is not None:
return
user = self.get_user(uid)
scrambl.solve(user.game_name)
mystery = self.puzzles[self.puzzle].scrambles[-1]
for index in scrambl.indices:
mystery.scramble += scrambl.value[index - 1]
self.solved = mystery.solved is not None
if self.solved:
self.solved_count += 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import (
setup,
find_packages,
)
setup(
name='py-geth',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='2.0.1',
description="""Run Go-Ethereum as a subprocess""",
long_description_markdown_filename='README.md',
author='Piper Merriam',
author_email='pipermerriam@gmail.com',
url='https://github.com/ethereum/py-geth',
include_package_data=True,
py_modules=['geth'],
install_requires=[
"semantic-version>=2.6.0",
],
license="MIT",
zip_safe=False,
keywords='ethereum go-ethereum geth',
packages=find_packages(exclude=["tests", "tests.*"]),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
import argparse
import math
import os
import subprocess
import sys
import tempfile
import srt
parser = argparse.ArgumentParser(
description="Turn a video into a picture book by taking screenshots with subtitles."
)
parser.add_argument("video", help="Input video")
parser.add_argument("out_dir", help="Output directory for screenshots")
parser.add_argument(
"--subs",
help="Subtitle file (default: the video's subtitle stream will be used)",
)
parser.add_argument(
"--scale",
default="640:-1",
help="Arguments for FFmpeg's scale filter (default: scale width to 640px, preserving aspect ratio). An empty string disables scaling.",
)
parser.add_argument(
"--gray",
action="store_true",
help="Convert screenshots to grayscale (default: %(default)s)",
)
parser.add_argument(
"--max-gap",
type=float,
default=5,
help="Maximum number of seconds between screenshots (default: %(default)s)",
)
parser.add_argument(
"--format",
default="jpg",
choices=["jpg", "png"],
help="Screenshot format (default: %(default)s)",
)
parser.add_argument(
"--jpg-quality",
type=int,
default=2,
help="JPG quality, from 2 (best, default) to 31 (worst)",
)
parser.add_argument(
"--subtitle-style",
help="Custom subtitle style (format as ASS `KEY=VALUE` pairs separated by commas)",
)
args = parser.parse_args()
# Check arguments
if not (2 <= args.jpg_quality <= 31):
print("jpq_quality must be between 2 and 31, inclusive")
sys.exit()
elif args.max_gap <= 0:
print("max_gap must be greater than 0")
sys.exit()
def parse_ffprobe(entry):
output = subprocess.run(
[
"ffprobe",
# Suppress the banner and debug info
"-v", "error",
# Don't print the section tags or entry keys
"-of", "default=noprint_wrappers=1:nokey=1",
"-show_entries",
entry,
args.video,
],
capture_output=True,
text=True,
check=True,
)
return output.stdout.strip()
# Get duration (of container)
duration = float(parse_ffprobe("format=duration"))
# Get (average) FPS (first entry that is not "0/0")
for f in parse_ffprobe("stream=avg_frame_rate").split("\n"):
if f != "0/0":
frac = f.split("/")
fps = float(frac[0]) / float(frac[1])
break
print(f"Guessing that FPS is {fps} and duration is {duration} seconds.")
# filename can be a video or non-SubRip subtitle file
def convert_to_srt(filename):
# Apparently, on Windows NT or later, the temporary file cannot be opened
# by other processes while it's still open in Python. So, we have to close
# it first.
fd, temppath = tempfile.mkstemp(suffix=".srt")
os.close(fd)
try:
# We need -y to ensure that the temporary file is overwritten
subprocess.run(
["ffmpeg", "-y", "-i", filename, temppath],
# Don't clutter the console
capture_output=True,
check=True,
)
with open(temppath) as f:
return srt.parse(f.read())
finally:
os.remove(temppath)
# Get subtitles
if args.subs is None:
print("Extracting subtitles from video... ", end="", flush=True)
subtitles = convert_to_srt(args.video)
print("Done.")
elif os.path.splitext(args.subs)[1] != ".srt":
print("Converting subtitles to SRT... ", end="", flush=True)
subtitles = convert_to_srt(args.subs)
print("Done.")
else:
with open(args.subs) as f:
subtitles = srt.parse(f.read())
# Get subtitle timestamps. We also include the first and last frame.
timestamps = [0]
# We pick the middle of each subtitle so there's more room for error in case
# the calculated frame isn't accurate (e.g. due to rounding).
for sub_time in [(s.start + s.end).total_seconds() / 2 for s in subtitles] + [duration]:
# Calculate screenshot times, adding evenly spaced screenshots if the gap
# is too big.
previous_timestamp = timestamps[-1]
gap = sub_time - previous_timestamp
screenshot_count = int(gap // args.max_gap) + 1
timestamps.extend(
previous_timestamp + gap * (i + 1) / screenshot_count
for i in range(screenshot_count)
)
# Construct video filter
vf = f"subtitles={args.subs or args.video}"
if args.subtitle_style:
vf += f":force_style={args.subtitle_style}"
if args.scale:
vf += f",scale='{args.scale}'"
# We pick frames to screenshot using a select filter. The select expression
# returns 0 for frames that should be ignored and -1 for frames that should be
# screenshotted. So, for frames f1, f2, ..., the expression looks like:
#
# -eq(n, f1) - eq(n, f2) - ...
#
# where n is the current frame and eq(a, b) returns 1 if a == b and 0
# otherwise. This approach creates a long expression, which may be slow for
# long videos. But, it's simple, lets us do everything with one FFmpeg command,
# and seems to work fine if there are only a few hundred frames to screenshot.
# For more info, see:
# - SO answer that inspired this: https://stackoverflow.com/a/47199740
# - select filter: https://ffmpeg.org/ffmpeg-filters.html#select_002c-aselect
# - Expression syntax: https://www.ffmpeg.org/ffmpeg-utils.html#Expression-Evaluation
vf += ",select='" + "".join(f"-eq(n,{int(t * fps)})" for t in timestamps) + "'"
# Format specific flags
if args.format == "png" and args.gray:
# This ensures that grayscale PNGs are single-channel, which saves space.
format_flags = ["-pix_fmt", "gray"]
elif args.format == "jpg":
format_flags = ["-q:v", str(args.jpg_quality)]
if args.gray:
# FFmpeg doesn't seem to support single-channel JPGs, so pix_fmt won't
# work. We use the format filter instead (it wastes space by using
# three channels, but what can you do).
vf += ",format=gray"
else:
format_flags = []
# Filename pattern
digits_needed = int(math.log10(len(timestamps))) + 1
filename_pattern = f"%0{digits_needed}d.{args.format}"
os.makedirs(args.out_dir, exist_ok=True)
print(f"Extracting {len(timestamps)} screenshots.\n")
subprocess.run(
[
"ffmpeg",
# Reduce clutter
"-hide_banner",
"-i", args.video,
# I'm not sure exactly why this is needed, but without it, there are
# tons of duplicate frames.
"-vsync", "vfr",
"-vf", vf,
*format_flags,
os.path.join(args.out_dir, filename_pattern),
]
)
|
#This object is used to represent the response data
#This data will usually be destined for a database
from colored import fg, bg, attr
class ResultObject:
#Initialize our result object
def __init__(self, respID, responseSize, statusCode, time, numHeaders,
numTokens):
self.rs = attr('reset')
self.respID = respID
self.responseSize = responseSize
self.statusCode = statusCode
self.time = time
self.numHeaders = numHeaders
self.numTokens = numTokens
#For debugging
def printAll(self):
#Print out all the data
print(fg(4)+'response ID: '+ fg(2) + self.respID + self.rs)
print(fg(4)+'size: '+ self.rs + self.responseSize)
print(fg(4)+'status: '+ self.rs + self.statusCode)
print(fg(4)+'time: '+ self.rs + self.time)
print(fg(4)+'num_headers: '+ self.rs + self.numHeaders)
print(fg(4)+'num_tokens: '+ self.rs + self.numTokens)
|
from fe_code.data_structures import geometry_data
from fe_code.data_structures import process_data
class systemData:
def __init__(self):
self.geometryData = geometry_data.geometryData()
self.processesData = process_data.processData()
self.displacements_calculated = False
self.SIMP_calculated = False
self.display_nodes = False
self.original_volume_frac = 1.0
self.strain_energy = ['Strain energy']
self.system_divisions = [0,0]
self.system_step_size = [0,0]
self.save_images = False
self.simp_iteration = int(0)
def getGeometryData(self):
return self.geometryData
def incrementSimpIteration(self):
self.simp_iteration += int(1)
def getSimpIteration(self):
return self.simp_iteration
def setSaveImages(self,bool_in):
self.save_images = bool_in
def getSaveImages(self):
return self.save_images
def setSystemDivisions(self,divisions_in):
self.system_divisions = divisions_in
def getSystemDivisions(self):
return self.system_divisions
def setSystemStepSize(self,step_in):
self.system_step_size = step_in
def getSystemStepSize(self):
return self.system_step_size
def getConstitutiveData(self):
return self.constitutiveData
def getProcessData(self):
return self.processesData
def getDisplacementsCalculatedBool(self):
return self.displacements_calculated
def setDisplacementsCalculatedBool(self,bool_in):
self.displacements_calculated = bool_in
def setSIMPCalculatedBool(self,bool_in):
self.SIMP_calculated = bool_in
def getSIMPCalculatedBool(self):
return self.SIMP_calculated
def getDisplayNodes(self):
return self.display_nodes
def toggleDisplayNodes(self,window_update):
if self.display_nodes:
self.display_nodes = False
else:
self.display_nodes = True
window_update()
def setVolumeFrac(self,frac_in):
self.original_volume_frac = frac_in
def getVolumeFrac(self):
return self.original_volume_frac
def appendStrainEnergy(self,strain_in):
self.strain_energy.append(strain_in)
def getStrainEnergy(self):
return self.strain_energy |
from lm.util import lm_tag_reader
from lm import lm_consts
import lm_tag_base
class CTag(lm_tag_base.CTag):
def __init__(self, ctx, tag):
super(CTag, self).__init__(ctx, tag)
d = self.parse_tag(ctx, tag)
self._data = []
for info in d["symbol_list"]:
self._data.append(info["symbol"] or "")
self._data = tuple(self._data)
def get_val(self, idx):
return self._data[idx]
def get_id(cls):
return lm_consts.TAG_STR_LIST
def get_ctx(self):
return self.ctx |
import unittest
from linkedlist import LinkedList
from linkedlist import Node
23
class TestMethods(unittest.TestCase):
def test1(self):
'''
Test on empty list.
'''
l = LinkedList()
self.assertEqual(l.traverse(), [])
def test2(self):
'''
Test on non-empty list of length == 1.
'''
l = LinkedList()
l.insert(1)
self.assertEqual(l.traverse(), [1])
def test3(self):
'''
Test on non-empty list of length > 1.
'''
l = LinkedList()
for i in range(10):
l.insert(i)
self.assertEqual(l.traverse(), [0,1,2,3,4,5,6,7,8,9])
if __name__ == '__main__':
unittest.main() |
from django.conf.urls import url
# from django.core.urlresolvers import reverse_lazy
from .views import *
urlpatterns = [
url(r'^index/$', IndexView.as_view(), name="app_index"),
]
|
#在函数中定义函数,也就是嵌套函数
def hi(name='sixbo'):
print("now you are inside the hi() function")
def greet():
print("now you are inside the greet() function")
def welcome():
print("now you are inside the greet() function")
print(greet())
print(welcome())
print("now you are back in the hi() function")
hi()
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, iterate_structure, binary_erosion
from scipy.signal import stft
from scipy.io import wavfile
from os.path import exists
DEFAULT_AMP_MIN = 10
PEAK_NEIGHBORHOOD_SIZE = 20
NewWAV = 'y'
while NewWAV == 'y':
try:
FileName = input('What is the name of the WAV file?\n')
print()
if exists('Plots/' + FileName[:-4] + ' Spectrum.png') and exists('Plots/' + FileName[:-4] + ' Fingerprint.png'):
print("Fingerprint analysis already completed.")
print("Find the files in the plots folder.")
print()
else:
DEFAULT_FS, AudioData = wavfile.read('WAV Files/' + FileName)
Frequencies, Times, Spectrum = stft(AudioData[:, 0], fs=DEFAULT_FS)
Spectrum = np.real(10 * np.log10(Spectrum))
Spectrum[Spectrum == -np.inf] = 0
plt.imshow(Spectrum, cmap='Spectral', extent=(min(Times), max(Times), min(Frequencies), max(Frequencies)),
origin='lower', aspect='auto')
plt.show()
plt.savefig('Plots/' + FileName[:-4] + ' Spectrum.png')
Struct = generate_binary_structure(2, 1)
Neighborhood = iterate_structure(Struct, PEAK_NEIGHBORHOOD_SIZE)
LocalMax = maximum_filter(Spectrum, footprint=Neighborhood) == Spectrum
Background = (Spectrum == 0)
ErodedBackground = binary_erosion(Background, structure=Neighborhood, border_value=1)
DetectedPeaks = LocalMax ^ ErodedBackground
Amps = Spectrum[DetectedPeaks]
JIndex, IIndex = np.where(DetectedPeaks)
Amps = Amps.flatten()
Peaks = zip(IIndex, JIndex, Amps)
Peaks = filter(lambda x: x[2] > DEFAULT_AMP_MIN, Peaks)
FrequencyIndices = []
TimeIndices = []
for Peak in Peaks:
FrequencyIndices.append(Peak[1])
TimeIndices.append(Peak[0])
plt.clf()
plt.imshow(np.real(Spectrum), cmap='Spectral', extent=(min(Times), max(Times),
min(Frequencies), max(Frequencies)),
origin='lower', aspect='auto')
plt.scatter(Times[TimeIndices], Frequencies[FrequencyIndices], s=3, c='k')
plt.xlim(min(Times), max(Times))
plt.ylim(min(Frequencies), max(Frequencies))
plt.show()
plt.savefig('Plots/' + FileName[:-4] + ' Fingerprint.png')
except IOError:
print('File not found.')
Redo = input('Would you like to try again? (y/n)\n')
print()
while Redo is not None:
if Redo == 'y':
break
elif Redo == 'n':
exit()
else:
Redo = input("Character not understood, please enter 'y' or 'n'.\n")
print()
continue
NewWAV = input('Would you like to try another WAV File? (y/n)\n')
print()
while NewWAV is not None:
if NewWAV == 'y' or NewWAV == 'n':
break
else:
NewWAV = input("Character not understood, please enter 'y' or 'n'.\n")
print()
|
# -*- coding: utf-8 -*-
# GMate - Plugin Based Programmer's Text Editor
# Copyright © 2008-2009 Alexandre da Silva
#
# This file is part of Gmate.
#
# See LICENTE.TXT for licence information
import gtk
import gnomevfs
from GMATE import files
from GMATE import i18n as i
def error(message):
"""Displays on error dialog with a single stock OK button."""
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK,
message)
response = dialog.run()
dialog.destroy()
def choice_ok_cancel(message, cancelDefault=False):
"""Displays an ok/cancel message dialog."""
default = gtk.RESPONSE_OK
if cancelDefault:
default = gtk.RESPONSE_CANCEL
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
message)
dialog.set_default_response(default)
response = dialog.run()
dialog.destroy()
return response
def choice_yes_no(message, noDefault=False):
"""Displays an yes/no message dialog."""
default = gtk.RESPONSE_YES
if noDefault:
default = gtk.RESPONSE_NO
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO,
message)
dialog.set_default_response(default)
response = dialog.run()
dialog.destroy()
return response
def retrieve_new_file_name(uri=None):
"""Get the name of a file to create."""
dialog = gtk.FileChooserDialog(action=gtk.FILE_CHOOSER_ACTION_SAVE)
dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_ADD, gtk.RESPONSE_OK)
# Default to the users home directory
if uri is None:
uri = files.get_user_home_uri()
dialog.set_current_folder_uri(str(uri))
# Get the response and the URI
response = dialog.run()
file_uri = dialog.get_uri()
dialog.destroy()
if response == gtk.RESPONSE_OK:
if file_uri is not None:
write = True
# Check to be sure if the user wants to overwrite a file
if gnomevfs.exists(file_uri):
response = choice_yes_no(i.file_already_exists, True)
if response == gtk.RESPONSE_NO:
write = False
if write:
# Return the new filename
return file_uri
else:
raise IOError, i.no_file_specified
return None
def retrieve_new_file_name(uri=None):
"""Get the name of a file to create."""
dialog = gtk.FileChooserDialog(action=gtk.FILE_CHOOSER_ACTION_SAVE)
dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK)
# Default to the users home directory
if uri is None:
uri = files.get_user_home_uri()
dialog.set_current_folder_uri(str(uri))
# Get the response and the URI
response = dialog.run()
file_uri = dialog.get_uri()
dialog.destroy()
if response == gtk.RESPONSE_OK:
if file_uri is not None:
write = True
# Check to be sure if the user wants to overwrite a file
if gnomevfs.exists(file_uri):
response = choice_yes_no(i.file_already_exists, True)
if response == gtk.RESPONSE_NO:
write = False
if write:
# Return the new filename
return file_uri
else:
raise IOError, i.no_file_specified
return None
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
# Copyright (C) 2017 - All Rights Reserved
# 模块名称: szt_ceph_type.py
# 创建日期: 2017/8/25
# 代码编写: fanwen
# 功能说明:
class ItemType:
osd = 0;
host = 1;
diskcluster = 2;
root = - 3;
null = -1;
@staticmethod
def format():
ft = '''\
type 0 osd
type 1 host
type 2 diskcluster
type 3 root\
''';
return ft;
@staticmethod
def get_type_name(type):
if type == ItemType.osd:
return "osd";
elif type == ItemType.host:
return "host";
elif type == ItemType.diskcluster:
return "diskcluster";
elif type == ItemType.root:
return "root";
else:
return "";
# ----ItemType end
class StorageType:
ssd = "SSD" # ssd存储设备
sas = "SAS"; # 普通机械存储设备
mix = "MIX"; # 混合型
unkown = "UNKOWN"; # 位置设备
def mystrip(in_str):
if not isinstance(in_str, str):
return
length = len(in_str);
i = 0;
for i in range(0, length):
if in_str[i].isspace():
i += 1
else:
break
j = length - 1
while ( j > i ):
if in_str[j].isspace():
j -= 1;
else:
break;
return in_str[i : j + 1];
# if __name__ == "__main__":
# print ItemType.format().strip();
# print ItemType.get_type_name(1);
#
# print mystrip('''
# asdasda asdfasdf ''');
|
import cv2
import os
from basic_lib import Get_List
from PIL import Image
import numpy as np
# 剪切图片生成video 为openpose做准备
def img_process(img,loadsize):
try :
h, w ,_= img.shape
except:
print("hah")
result = np.zeros((loadsize,loadsize,3))
if h >= w:
w = int(w*loadsize/h)
h = loadsize
img = cv2.resize(img,(w,h),interpolation=cv2.INTER_CUBIC)
bias = int((loadsize - w)/2)
img = np.array(img)
result[0:h,bias:bias+w,...] = img[0:h,0:w,...]
if w > h:
h = int(h * loadsize / w)
w = loadsize
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
bias = int((loadsize - h)/2)
img = np.array(img)
result[bias:bias+h,0:w,...] = img[0:h,0:w,...]
result = result.astype(np.uint8)
return result
def get_all_loc(file_path):
file = open(file_path, 'r')
listall = file.readlines()
listall = [i.rstrip('\n').split('\t')[:-1] for i in listall]
for i in range(len(listall)):
for j in range(len(listall[i])):
listall[i][j] = int(listall[i][j])
file.close()
return listall
target_img_path = '/media/kun/Dataset/Pose/DataSet/new_data/video_06/back_ground.png'
target_img = Image.open(target_img_path).convert('RGB')
size_target = target_img.size
img_root_path = '/media/kun/Dataset/Pose/DataSet/new_data/机械哥_bilibili/img'
name_path = '/media/kun/Dataset/Pose/DataSet/new_data/机械哥_bilibili/DensePoseProcess/img'
txt_root_path = '/media/kun/Dataset/Pose/DataSet/new_data/机械哥_bilibili/DensePoseProcess/loc.txt'
loc_all_source = get_all_loc(txt_root_path)
_,name_all = Get_List(name_path)
name_all.sort()
# fps = 30
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# videoWriter = cv2.VideoWriter('/media/kun/Dataset/Pose/DataSet/new_data/机械哥_bilibili/机械哥_bilibili_cut.avi',
# fourcc, fps, (size_target[0],size_target[0]))
# if not videoWriter.isOpened():
# print("video error")
# exit(0)
img_last = None
for i in range(len(name_all)):
img_path = os.path.join(img_root_path,name_all[i])
img = cv2.imread(img_path)
# 定位
tmp = loc_all_source[i]
point = {'xmin': tmp[0], 'xmax': tmp[1], 'ymin': tmp[2], 'ymax': tmp[3]}
w = point['xmax'] - point['xmin']
h = point['ymax'] - point['ymin']
xmin = point['xmin']
xmax = point['xmax']
ymin = point['ymin']
ymax = point['ymax']
if xmax>xmin and ymax>ymin:
img = img[ymin:ymax, xmin:xmax, ...]
img = img_process(img,size_target[0])
img = cv2.resize(img, (int(img.shape[1]*2/3), int(img.shape[0]*2/3)), interpolation=cv2.INTER_CUBIC)
if img_last is None:
img_last = img
print(1.0*i/len(name_all))
cv2.imshow('a',img-img_last)
cv2.waitKey(1)
img_last = img
# videoWriter.release()
print('finish')
|
import pygame
import sys
import random
"""© reyan mehmood All right reserved"""
# general setup
pygame.init()
clock = pygame.time.Clock()
# Setting up the main window
ScreenWidth = 1200
ScreenHeight = 700
screen = pygame.display.set_mode((ScreenWidth, ScreenHeight))
pygame.display.set_caption('Pong')
# shapes
ball = pygame.Rect(ScreenWidth / 2 - 15, ScreenWidth / 2 - 15, 30, 30)
player = pygame.Rect(ScreenWidth - 20, ScreenHeight / 2 - 70, 10, 140)
opponent = pygame.Rect(10, ScreenHeight / 2 - 70, 10, 140)
# colors
bg_color = pygame.Color('grey12')
light_grey = (200, 200, 200)
# ball speed
ball_x_speed = 7 * random.choice((1, -1))
ball_y_speed = 7 * random.choice((1, -1))
player_speed = 0
opponent_speed = 7
def ball_move():
global ball_x_speed, ball_y_speed
if ball.top <= 0 or ball.bottom >= ScreenHeight:
ball_y_speed *= -1
if ball.left <= 0 or ball.right >= ScreenWidth:
ball_restart()
if ball.colliderect(player) or ball.colliderect(opponent):
ball_x_speed *= -1
def player_animation():
if player.top <= 0:
player.top = 0
if player.bottom >= ScreenHeight:
player.bottom = ScreenHeight
player.y += player_speed
def opponentanimation():
if opponent.top < ball.y:
opponent.top += opponent_speed
if opponent.bottom > ball.y:
opponent.bottom -= opponent_speed
if opponent.top <= 0:
opponent.top = 0
if opponent.bottom >= ScreenHeight:
opponent.bottom = ScreenHeight
def ball_restart():
global ball_x_speed, ball_y_speed
ball.center = (ScreenWidth / 2, ScreenHeight / 2)
ball_y_speed *= random.choice((1, -1))
ball_x_speed *= random.choice((1, -1))
while True:
# handling input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
player_speed += 7
if event.key == pygame.K_UP:
player_speed -= 7
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
player_speed -= 7
if event.key == pygame.K_UP:
player_speed += 7
# Visual
screen.fill(bg_color)
pygame.draw.rect(screen, light_grey, player)
pygame.draw.rect(screen, light_grey, opponent)
pygame.draw.ellipse(screen, light_grey, ball)
pygame.draw.aaline(screen, light_grey, (ScreenWidth / 2, 0), (ScreenWidth / 2, ScreenHeight))
ball.x += ball_x_speed
ball.y += ball_y_speed
ball_move()
player_animation()
opponentanimation()
# updating the window
pygame.display.flip()
clock.tick(60)
|
# Parses all files from WID and loads them into the DB
from django.core.management.base import BaseCommand, CommandError
import csv
from api.models import Indicator, Country, IndicatorType
class Command(BaseCommand):
help = 'Load pertinent data from WID (located in WID_DATA folder) into DB'
def handle(self, *args, **options):
# TODO open all country files in folder
with open('/home/olpi/projects/inequality-dashboard/WID_DATA/WID_data_AE.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
# empty previous indicators
# Indicator.objects.all().delete()
# get agdpro_992_i AND anninc_992_i_ with p0p100
# diff between GDP and national income => Also external revenues
# list of all variables needed for charts
# variables_list = ['sptinc992j']
# percentiles_list = ['p90p100','p0p50','p50p90','p99p100']
indicator_types = IndicatorType.objects.all()
useful_indicator = {}
for i in indicator_types:
useful_indicator[i.stat_variable] = []
for p in i.percentiles.all():
useful_indicator[i.stat_variable].append(p.name)
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
else:
if row[1] in useful_indicator and row[2] in useful_indicator[row[1]]:
q = Indicator(
year = row[3],
country = Country.objects.get(code=row[0]),
value = row[4],
indicator_type = IndicatorType.objects.filter(stat_variable=row[1]).first(),
percentile = row[2]
)
q.save()
line_count += 1
self.stdout.write(f'Processed {line_count} lines.')
self.stdout.write(self.style.SUCCESS('Files loaded !'))
|
import os, sys
import shutil
from datetime import datetime
from chart_generate import topn_requests_donut, yearoveryear_reqeusts_volume, delete_directory
from data_fetch import data as dframe
from tweet_generate import api, tweet
## create directory to store program logs
if not os.path.exists('logs'):
os.mkdir('logs')
timestamp = datetime.now().strftime('%A %B %d,%Y %I:%M%p')
module = sys.modules[__name__]
pprint_module_name = ((str(module).split('from')[1]).replace('>',''))
image_folder = os.path.join(os.pardir,'data','images')
image_files = []
image_files.append(yearoveryear_reqeusts_volume(dframe))
for prd in ['year','week']:
image_files.append(topn_requests_donut(dframe, period=prd))
def run_program():
tweet(api_object=api, files=image_files, msg=timestamp)
delete_directory(image_folder)
shutil.move('execution.log', os.path.join('logs','execution.log'))
print('{} run sucessfully'.format(pprint_module_name))
## tweet charts & remove images folder after sent
if __name__ == "__main__":
run_program()
|
# Generated by Django 2.2.8 on 2020-01-30 08:51
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blogs', '0005_auto_20200130_1144'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='timestamp', unique_with=('timestamp',)),
),
]
|
import urllib
dataset='mnist.pkl.gz'
origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
|
class HangmanLetter:
def __init__(self, letter):
self.letter = letter
self.show = False
def display_letter_space(self) -> str:
if self.show:
return self.letter
else:
return '_'
def display_letter(self) -> str:
return self.letter
|
from packet import Packet
import threading
class PacketConstructor:
window_size = 10
data_type = 0
ack_type = 1
syn_ack_type = 2
syn_type = 3
"""
Packet represents a simulated UDP packet.
"""
def __init__(self):
self.next_seq_num = 0
self.received_packets = {}
self.received_last_packet = False
self.payload_lock = threading.Lock()
def reset(self):
self.payload = b''
self.next_seq_num = 0
self.received_packets = {}
self.received_last_packet = False
def received_all_packets(self):
if not self.received_last_packet:
return False
next_seq_num = 0
for seq_num in sorted(self.received_packets):
if not seq_num == next_seq_num:
return False
next_seq_num += 1
return True
def assemble_payload(self):
payload = b''
for seq_num in sorted(self.received_packets):
payload += self.received_packets[seq_num]
return payload
def send_ack(self, conn, seq_num, destination, peer_ip_addr, peer_port):
p = Packet(packet_type=PacketConstructor.ack_type,
seq_num=seq_num,
peer_ip_addr=peer_ip_addr,
peer_port=peer_port,
is_last_packet=True,
payload=b'')
print("sending ack " + str(seq_num))
conn.sendto(p.to_bytes(), destination)
def add_packet(self, p, conn, sender):
if p.packet_type == PacketConstructor.data_type and p.seq_num >= self.next_seq_num and p.seq_num <= (
self.next_seq_num + PacketConstructor.window_size):
self.send_ack(conn, p.seq_num, sender, p.peer_ip_addr, p.peer_port)
if p.seq_num not in self.received_packets:
self.received_packets[p.seq_num] = p.payload
while self.next_seq_num in self.received_packets:
self.next_seq_num += 1
if (p.is_last_packet):
self.received_last_packet = True
self.payload_lock.acquire()
if (self.received_all_packets()):
payload = self.assemble_payload()
self.reset()
self.payload_lock.release()
return payload
self.payload_lock.release()
else:
print("got out of order packet " + str(p.seq_num))
# TODO: store the out of order packet somewhere
else:
print("got an out of window packet " + str(p.seq_num))
# TODO: ?
return None |
"""
Functions for Exploratory Data Analysis
Script containing functions used for performing
exploratory data analysis on the cleaned headers.
"""
import numpy as np
import matplotlib.pyplot as plt
import operator
from wordcloud import WordCloud, STOPWORDS
import progressbar
import util
def analyze_basic(headers):
""" Perform basic analysis on the email data set """
util.log_print("Performing Basic Analysis")
# Check how many people sent emails to themselves
sent_to_self_count = len(list(filter(lambda header: header["From"][0] in header["To"][0], headers)))
sent_to_self_percentage = round(sent_to_self_count / len(headers) * 100, 2)
print("{0} Emails ({1}%) were sent from the senders to themselves"
.format(sent_to_self_count, sent_to_self_percentage))
# Check how many emails were sent from the same domain
valid_headers = list(filter(lambda h: len(h["From"][0].split("@")) == 2, headers))
same_domain_count = len(list(filter(lambda d: d["From"][0].split("@")[1].split(".")[0]
in " ".join(d["To"]), valid_headers)))
same_domain_percentage = round(same_domain_count / len(headers) * 100, 2)
print("{0} Emails ({1}%) were sent from the same domain".format(same_domain_count, same_domain_percentage))
# Check how many emails were sent to more than one recipient
multiple_recipient_count = len(list(filter(lambda q: len(q["To"]) > 1, headers)))
multiple_recipient_percentage = round(multiple_recipient_count / len(headers) * 100, 2)
print("{0} Emails ({1}%) were sent to more than one recipient"
.format(multiple_recipient_count, multiple_recipient_percentage))
# Check how many emails were sent to a single recipient
single_recipient_count = len(list(filter(lambda q: len(q["To"]) == 1, headers)))
single_recipient_percentage = round(single_recipient_count / len(headers) * 100, 2)
print("{0} Emails ({1}%) were sent to only one recipient"
.format(single_recipient_count, single_recipient_percentage))
def analyze_subjects(headers, words_to_strip):
""" Creates a word cloud of all subjects"""
util.log_print("Running Subject Analysis")
# Map headers to subjects
subjects = list(map(lambda h: h["Subject"][0], headers))
text = " ".join(subjects)
# Strip specified words from subjects
for word in words_to_strip:
text = text.replace(word, " ")
# Generate and save world cloud
word_cloud = WordCloud(width=1000, height=500, stopwords=set(STOPWORDS)).generate(text)
plt.figure(figsize=(15, 8))
plt.imshow(word_cloud)
plt.axis("off")
plt.show()
plt.imsave("../../res/images/sub.png", word_cloud)
def analyze_content_types(headers, is_charset):
""" Creates a pie chart of all content types or charsets"""
if is_charset:
util.log_print("Running Charset Analysis")
content_types = list(map(lambda h: h["Content-Type"][1].split("=")[1], headers))
else:
util.log_print("Running Content Type Analysis")
content_types = list(map(lambda h: h["Content-Type"][0], headers))
unique_types = list(set(content_types))
counts = []
for t in unique_types:
counts.append(unique_types.count(t))
chart, ax1 = plt.subplots()
ax1.pie(counts, labels=unique_types, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.savefig("../../res/images/content" + str(is_charset) + ".png")
plt.show()
def analyze_days(headers):
""" Creates a bar chart showing the number of emails sent per day"""
util.log_print("Running Day of Week Analysis")
days_of_week = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
email_days = list(map(lambda x: x["Date"][0].split(",")[0], headers))
day_counts = []
for day in days_of_week:
day_counts.append(email_days.count(day))
# Display statistics
util.display_stats(day_counts, "Statistics for days on which emails are set:")
# Configure bar chart
num_days = len(days_of_week)
ind = np.arange(num_days)
bar_width = 0.6
chart, ax = plt.subplots()
rectangles = ax.bar(ind, day_counts, bar_width, color='r', alpha=0.6)
ax.set_ylabel("Number of emails")
ax.set_xlabel("Day")
ax.set_title("Number of emails per day")
ax.set_xticks(ind + bar_width / 20)
ax.set_xticklabels(("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"))
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.savefig("../../res/images/days.png")
plt.show()
def analyze_months(headers):
""" Creates a bar chart showing the number of emails sent per month """
util.log_print("Running Month Analysis")
months_of_year = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
email_months = list(map(lambda x: x["Date"][0].split(" ")[2], headers))
month_counts = []
for month in months_of_year:
month_counts.append(email_months.count(month))
# Display statistics
util.display_stats(month_counts, "Statistics for months in which emails are sent:")
# Configure bar chart
num_days = len(months_of_year)
ind = np.arange(num_days)
bar_width = 0.6
chart, ax = plt.subplots()
rectangles = ax.bar(ind, month_counts, bar_width, color='b', alpha=0.6)
ax.set_ylabel("Number of emails")
ax.set_xlabel("Month")
ax.set_title("Number of emails per month")
ax.set_xticks(ind + bar_width / 20)
ax.set_xticklabels(months_of_year)
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.savefig("../../res/images/months.png")
plt.show()
def analyze_years(headers):
""" Creates a line chart showing the number of emails sent per year """
util.log_print("Running Year Analysis")
email_years = list(map(lambda x: x["Date"][0].split(" ")[3]
.replace("2000", "x")
.replace("000", "200")
.replace("x", "2000"), headers))
unique_years = list(set(email_years))
unique_years.sort()
year_counts = []
for year in unique_years:
year_counts.append(email_years.count(year))
# Display statistics
util.display_stats(year_counts, "Statistics for years in which emails are sent:")
# Configure line chart
unique_years = list(map(int, unique_years))
plt.plot(unique_years, year_counts, color='r', alpha=0.6)
plt.xlim(1979, 2044)
plt.ylabel('Number of Emails')
plt.xlabel('Year')
plt.title('Number of emails per year')
plt.savefig("../../res/images/years.png")
plt.show()
def analyze_times(headers):
""" Creates a line chart showing the number of emails sent per hour """
util.log_print("Running Time Analysis")
hours = list(map(lambda x: int(x["Date"][0].split(" ")[4].split(":")[0]), headers))
unique_hours = list(set(hours))
unique_hours.sort()
hours_count = []
for hour in unique_hours:
hours_count.append(hours.count(hour))
# Display statistics
util.display_stats(hours_count, "Statistics for hours in which emails are sent:")
# Configure line chart
plt.plot(unique_hours, hours_count, color='g', alpha=0.6)
plt.xlim(0, 24)
plt.ylabel('Number of Emails')
plt.xlabel('Hour of Day')
plt.title('Number of emails per hour')
plt.savefig("../../res/images/hours.png")
plt.show()
def analyze_domains(headers, top):
""" Creates a horizontal bar chart showing the number of emails sent by the top domains """
util.log_print("Running Domain Analysis")
valid_headers = list(filter(lambda h: len(h["From"][0].split("@")) == 2, headers))
domains = list(map(lambda h: h["From"][0].split("@")[1].split(".")[0], valid_headers))
unique_domains = set(domains)
domain_counts = {}
counter = 0
with progressbar.ProgressBar(max_value=len(unique_domains)) as bar:
for domain in unique_domains:
domain_counts[domain] = domains.count(domain)
bar.update(counter)
counter += 1
sorted_domain_counts = sorted(domain_counts.items(), key=operator.itemgetter(1))
sorted_domain_counts.reverse()
chart_domains = []
chart_domain_counts = []
print("Top {0} domains that sent emails:".format(top))
for x in range(top):
chart_domains.append(sorted_domain_counts[x][0])
chart_domain_counts.append(sorted_domain_counts[x][1])
# Print results
print("{0}. {1} - {2} emails sent".format(x+1, sorted_domain_counts[x][0], sorted_domain_counts[x][1]))
# Draw horizontal bar chart
plt.rcdefaults()
fig, ax = plt.subplots()
y_pos = np.arange(len(chart_domains))
ax.barh(y_pos, chart_domain_counts, align='center',
color='green', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(chart_domains)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Number of emails sent')
ax.set_title('Emails Sent per Domain')
plt.savefig("../../res/images/domainsA.png")
plt.show()
def get_max_senders(headers, top):
""" Creates a bar chart showing the number of emails sent by the top senders """
util.log_print("Running Max Senders Analysis")
email_addresses = list(map(lambda h: h["From"][0].split("@")[0], headers))
unique_addresses = list(set(email_addresses))
address_counts = {}
counter = 0
with progressbar.ProgressBar(max_value=len(unique_addresses)) as bar:
for address in unique_addresses:
address_counts[address] = email_addresses.count(address)
bar.update(counter)
counter += 1
sorted_address_counts = sorted(address_counts.items(), key=operator.itemgetter(1))
sorted_address_counts.reverse()
graph_emails = []
graph_counts = []
for x in range(top):
graph_emails.append(sorted_address_counts[x][0])
graph_counts.append(sorted_address_counts[x][1])
# Display statistics
util.display_stats(graph_counts, "Statistics for emails sent per person:")
# Configure bar chart
plt.tight_layout()
num_emails = len(graph_emails)
ind = np.arange(num_emails)
bar_width = 0.6
chart, ax = plt.subplots()
rectangles = ax.bar(ind, graph_counts, bar_width, color='r', alpha=0.6)
ax.set_ylabel("Number of emails")
ax.set_xlabel("Sender")
ax.set_title("Number of emails per sender")
ax.set_xticks(ind + bar_width / 20)
ax.set_xticklabels(graph_emails)
plt.xticks(rotation=90)
plt.savefig("../../res/images/senders.png")
plt.show()
|
from syntax_expr import Expr
#############################################################################
#
# Array Operators: It's not scalable to keep adding first-order operators
# at the syntactic level, so eventually we'll need some more extensible
# way to describe the type/shape/compilation semantics of array operators
#
#############################################################################
class Len(Expr):
_members = ['value']
class ConstArray(Expr):
_members = ['shape', 'value']
class ConstArrayLike(Expr):
"""
Create an array with the same shape as the first arg, but with all values set
to the second arg
"""
_members = ['array', 'value']
class Range(Expr):
_members = ['start', 'stop', 'step']
class AllocArray(Expr):
"""Allocate an unfilled array of the given shape and type"""
_members = ['shape', 'elt_type']
def children(self):
yield self.shape
class ArrayView(Expr):
"""Create a new view on already allocated underlying data"""
_members = ['data', 'shape', 'strides', 'offset', 'size']
def children(self):
yield self.data
yield self.shape
yield self.strides
yield self.offset
yield self.size
class Ravel(Expr):
_members = ['array']
def children(self):
return (self.array,)
class Reshape(Expr):
_members = ['array', 'shape']
def children(self):
yield self.array
yield self.shape
class Shape(Expr):
_members = ['array']
class Strides(Expr):
_members = ['array']
class Transpose(Expr):
_members = ['array']
def children(self):
yield self.array
class Where(Expr):
"""
Given a boolean array, returns its true indices
"""
_members = ['array']
def children(self):
yield self.array
|
import numpy as np
output = 'coordinates_v2.dat'
y1 = 25.6
y2 = 18.4
y3 = 11
data_file = '../interface_analysis/6layer_surface_data.txt'
data = np.loadtxt(data_file)
center_file = '6layer_thincenter_data.txt'
center_data = np.loadtxt(center_file)
center = data[7]
width = data[5]
x1 = center + (width/2)
x3 = x1
x2 = center_data[2] + (center_data[0]/2)
f = open(output, "w")
f.write(str(x1) + '\t' + str(y1) + '\n')
f.write(str(x2) + '\t' + str(y2) + '\n')
f.write(str(x3) + '\t' + str(y3) + '\n')
#data = np.transpose(data)
|
import os
from pathlib import Path
import requests
def main():
filepath = "./images/pug.png"
# "./images/0-pug.png" -> "0-pug.png"
file_name = filepath.split("/")[-1:][0]
headers = {
"pinata_api_key": os.getenv("PINATA_API_KEY"),
"pinata_secret_api_key": os.getenv("PINATA_API_SECRET")
}
with Path(filepath).open("rb") as file_path:
image_binary = file_path.read()
pinata_base_url = "https://api.pinata.cloud"
endpoint = "pinning/pinFileToIPFS"
response = requests.post(
pinata_base_url + endpoint,
files={"file": (file_name, image_binary)},
headers=headers
)
|
from collections import deque
def solution(values,edges,quries):
arr = [ [] for _ in range(len(values)+1) ]
for i, e in enumerate(edges):
arr[e[0]-1].append(e[1]-1)
def firstQurie(root):
vis = [0]*(len(arr)+1)
q = deque()
q.append(root)
vis[root]=1
ansSum = values[root]
while q :
now = q.popleft();
for nxt in arr[now]:
if vis[nxt] !=0 : continue
vis[nxt]=1
ansSum+=values[nxt]
q.append(nxt)
return ansSum
def secondQurie(u,w):
vis = [0]*(len(arr)+1)
q = deque()
q.append(u)
while q :
now = q.popleft();
if now == 0 :
values[now]=w
q.clear()
break
for parentIdx, nodeInf in enumerate(arr):
if now not in nodeInf or vis[parentIdx]!=0:continue
vis[parentIdx]=1
values[now]=values[parentIdx]
q.append(parentIdx)
result = []
for querie in quries :
if querie[1]==-1 :
result.append(firstQurie(querie[0]-1))
else : secondQurie(querie[0]-1, querie[1])
return result
print(solution([1,10,100,1000,10000],[[1,2],[1,3],[2,4],[2,5]],[[1,-1],[2,-1],[3,-1],[4,-1],[5,-1],[4,1000],[1,-1],[2,-1],[3,-1],[4,-1],[5,-1],[2,1],[1,-1],[2,-1],[3,-1],[4,-1],[5,-1]])) |
def get_plural(number, case1, case2, case5):
if 11 <= number % 100 <= 19:
return case5
if number % 10 == 1:
return case1
if 2 <= number % 10 <= 4:
return case2
return case5
|
#!/usr/bin/python
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
import bs4
import requests
from datetime import datetime
import datetime
import csv
import time
import re
import json
def save_to_file(data, filename):
text_file = open(filename, "w")
text_file.write(data)
text_file.close()
def getlinks ():
addresses = []
for counter in range (0, 45): #when this scraper was written, there were altogether 44 pages
print ('getting links page ' + str (counter))
txt_doc='http://www.entwicklung.at/nc/zahlen-daten-und-fakten/projektliste/?tx_sysfirecdlist_pi1%5Btest%5D=test&tx_sysfirecdlist_pi1%5Bmode%5D=1&tx_sysfirecdlist_pi1%5Bsort%5D=uid%3A1&tx_sysfirecdlist_pi1%5Bpointer%5D=' + str(counter)
soup = BeautifulSoup(urlopen(txt_doc), 'html.parser')
for item in soup.find_all ('a'):
if 'href' in item.attrs:
if "http://www.entwicklung.at/zahlen-daten-und-fakten/projektliste/?" in item['href']:
addresses.append (item['href'])
return addresses
def striphtml (data):
p = re.compile(r'<.*?>')
return p.sub('', data)
if __name__ == "__main__":
addresses = getlinks ()
data = []
l = len (addresses)
k = 1
print ("downloaded %s addresses" % str (l))
for address in addresses:
elem = {}
response = requests.get (address)
soup2 = bs4.BeautifulSoup (response.text)
content = soup2.find_all ('div', {'cdcontentdiv'})
aktualisierung = soup2.find_all ('div', id= 'c3936')
div = aktualisierung [0].find_all('div')
date=div[0].contents
Aktualisierungsdatum = date[0].replace('\xa0', '')
elem["Link"] = address
elem["Vertragsnummer"] = striphtml (str (content [0]))
elem["Vertragstitel"]= striphtml (str (content [1]))
elem["LandRegion"]= striphtml (str (content [2]))
elem["Vertragspartner"]= striphtml (str (content [3]))
elem["Vertragssumme"]= striphtml (str (content [4]))
elem["Beschreibung"]= striphtml (str(content [5]))
ts = time.time()
elem["Timestamp"] = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
elem["Aktualisierungsdatum"]=Aktualisierungsdatum
print ("downloaded %s of %s entries" % (k, l))
k = k+1
data.append(elem)
save_to_file(json.dumps(data, indent=2), 'Entwicklunghilfe.json')
|
# from django.db import models
from djongo import models
class Point(models.Model):
_id = models.ObjectIdField(db_column='_id', primary_key=True)
points = models.CharField(
max_length=1000,
verbose_name=('Json of points'),
)
def __str__(self):
return self.points
|
# 1) Open Python shell in the same location like the script
# 2) >>> import prepare_jobs
# 3) >>> data = prepare_jobs.rewrite()
# 4) Access M by 'prepare_jobs.M'
import numpy as np
def rewrite():
f = open('shops.dat', 'r')
d = f.readlines()
cities = ['Z', 'D', 'B', 'N']
k = []
cnt = 0
for i in d:
if(len(i.split('&')) > 1):
l = []
# Insert the city label using a cyclic counter
l.append(cities[cnt // 4])
# Return a new list with sales Q1, Q2, Q3
l = l + i.split('&')[0:4]
# Append sales Q4 (split trailing slashes)
l.append(i.split('&')[4].split('\\')[0])
# Prepare writeable list
k.append(l)
# Cycle city counter
cnt += 1
output = open('output.dat', 'w')
output.write('City Sport Q1 Q2 Q3 Q4\n')
for i in k:
output.write(" ".join(i) + '\n')
output.close()
filehandle = open('output.dat', 'r')
data = filehandle.readlines()
for i in data:
print(i, end='')
return data
data = rewrite()
M = []
# Skip header line
for i in data[1:]:
l = []
l = i.split(' ')[2:5]
l.append(i.split(' ')[5].split('\n')[0])
M.append(l)
M = np.array(M)
print(M)
print(type(M))
Z = M[0:4]
D = M[4:8]
B = M[8:12]
N = M[12:16]
def convert_to_float(B):
print("Converting...")
return np.array([list(map(lambda x: float(x), Bi)) for Bi in B])
Z = convert_to_float(Z)
D = convert_to_float(D)
B = convert_to_float(B)
N = convert_to_float(N)
# Z = np.array([list(map(lambda x: float(x), Bi)) for Bi in Z])
|
import sys
import sqlite3
import logging
from datetime import datetime
from sales import load_sales_data
from catalog import load_catalog_by_item_id
def main():
if len(sys.argv) < 4:
print("Usage: {} <catalog-file.csv> <sales-file.csv> <output.db>".format(sys.argv[0]))
return 2
# TODO: check if files exist and are readable
catalog_by_item_id = load_catalog_by_item_id(sys.argv[1])
sales = load_sales_data(sys.argv[2])
db_filename = sys.argv[3]
with sqlite3.connect(db_filename, isolation_level=None) as connection:
print("Connection opened")
create_tables(connection)
print("Tables created")
import_catalog_into_db(catalog_by_item_id, connection)
print("Catalog imported")
import_sales_into_db(sales, connection)
print("Sales imported")
def create_tables(connection):
cursor = connection.cursor()
cursor.execute("""
create table if not exists sale (
id INTEGER PRIMARY KEY AUTOINCREMENT,
catalog_id INTEGER NOT NULL,
country varchar(3),
city_name varchar(60),
sale_timestamp TEXT,
price NUMERIC,
FOREIGN KEY (catalog_id) REFERENCES catalog (id)
);
""")
cursor.execute("""
create table if not exists catalog (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_key varchar(200) UNIQUE,
category varchar(200)
);
""")
def import_catalog_into_db(catalog_by_item_id, connection):
cursor = connection.cursor()
for catalog_entry in catalog_by_item_id.values():
cursor.execute(
"insert or replace into catalog (item_key, category) values (?, ?)",
[catalog_entry.item_id, catalog_entry.category_name]
)
def import_sales_into_db(sales, connection):
cursor = connection.cursor()
for sale in sales:
cursor.execute('SELECT id FROM CATALOG where item_key = ?', [sale.item_id])
result = cursor.fetchone()
if result is None:
with open('res/errors.txt', 'a') as f:
f.write('{}: Unable to import {} because of invalid item id.\n'.format(datetime.utcnow(), str(sale)))
logging.error('{}: Unable to import {} because of invalid item id.\n'.format(datetime.utcnow(), str(sale)))
else:
cursor.execute("""
insert or replace into sale (catalog_id, country, city_name, sale_timestamp, price) values (?, ?, ?, ?, ?)
""", [result[0], sale.country, sale.city, sale.sale_timestamp, float(sale.price)])
if __name__ == '__main__':
main()
|
from cvxpy import *
from cvxpy.tests.base_test import BaseTest
class TestSolvers(BaseTest):
""" Unit tests for solver specific behavior. """
def setUp(self):
self.a = Variable(name='a')
self.b = Variable(name='b')
self.c = Variable(name='c')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable(2, 2, name='A')
self.B = Variable(2, 2, name='B')
self.C = Variable(3, 2, name='C')
def test_lp(self):
"""Tests basic LPs.
"""
if ELEMENTAL in installed_solvers():
prob = Problem(Minimize(0), [self.x == 2])
prob.solve(verbose=False, solver=ELEMENTAL)
self.assertAlmostEqual(prob.value, 0)
self.assertItemsAlmostEqual(self.x.value, [2, 2])
prob = Problem(Minimize(-self.a), [self.a <= 1])
prob.solve(verbose=False, solver=ELEMENTAL)
self.assertAlmostEqual(prob.value, -1)
self.assertAlmostEqual(self.a.value, 1)
def test_soc(self):
"""Test SOCP representable problems.
"""
if ELEMENTAL in installed_solvers():
x = Variable(2, 2)
prob = Problem(Minimize(huber(x)[0, 0]),
[x == [[0.5, -1.5], [4, 0]]])
prob.solve(verbose=False, solver=ELEMENTAL)
self.assertAlmostEqual(prob.value, 0.25)
x = Variable(3)
prob = Problem(Maximize(pnorm(x, .5)),
[x == [1.1, 2, .1]])
# data = prob.get_problem_data(ELEMENTAL)
# data['c'], data['b'], data['h'], data['A'], data['G']
prob.solve(verbose=False, solver=ELEMENTAL)
self.assertAlmostEqual(prob.value, 7.724231543909264, places=3)
x = Variable()
prob = Problem(Minimize(power(x, 1.34)), [x == 7.45])
prob.solve(solver=ELEMENTAL, verbose=False)
self.assertAlmostEqual(prob.value, 14.746515290825071, places=3)
x = Variable(2, 2)
expr = inv_pos(x)
prob = Problem(Minimize(expr[1, 0]), [x == [[1, 2], [3, 4]]])
prob.solve(solver=ELEMENTAL, verbose=False)
# Constant([[1,1.0/2],[1.0/3,1.0/4]])),
self.assertAlmostEqual(prob.value, 0.5)
x = Variable(2, 2)
expr = sqrt(x)
constr = [x == [[2, 4], [16, 1]]]
# Constant([[1.414213562373095,2],[4,1]])),
prob = Problem(Maximize(expr[0, 0]), constr)
prob.solve(solver=ELEMENTAL, verbose=False)
x = Variable(2, 3)
expr = mixed_norm(x, 1, 1)
constr = [x == [[1, 2], [3, 4], [5, 6]]]
prob = Problem(Minimize(expr), constr)
prob.solve(solver=ELEMENTAL, verbose=False)
self.assertAlmostEqual(prob.value, 21)
|
"""
Program for initial training of models
"""
from variables import MAX_TITLE_LENGHT, DIMENSIONS
from variables import WORD_VEC_MODEL
from variables import TITLE_MODEL_JSON, TITLE_MODEL_H5
from variables import TITLE_ARCH1, TITLE_ARCH2, TILE_PLOT
import logging
import time
import pandas as pd
from numpy import random,shape, array
from flask import Flask
import gensim.downloader as api
from gensim.parsing.preprocessing import remove_stopwords , preprocess_string
from gensim.models import Word2Vec
from gensim.models.keyedvectors import Word2VecKeyedVectors
from keras.preprocessing.sequence import pad_sequences
from Algorithms.SiMantIcepLSTM import SiInceptionLSTM
from Algorithms.Sent2Vec import getSentVecs
from MetaData.Datasets.datasets import datasets
from models import db, Projects, Colleges
from config import SQLALCHEMY_DATABASE_URI
logging.basicConfig(level=logging.INFO,filename='MetaData/'+'traininginfo.txt', format='%(asctime)s :: %(levelname)s :: %(message)s')
def lognprint(message):
logging.info(message)
print(message)
lognprint('*'*50)
lognprint('\n\tTRAINING BEGAINS\n')
train_start_time=time.time()
lognprint('\n Creating database projects.db\n\nAdding rows to db.\n\n')
titles=list(datasets.keys())
abstracts=list(datasets.values())
app = Flask(__name__)
app.config.from_pyfile('config.py')
with app.test_request_context():
db.init_app(app)
db.create_all()
user1 = Colleges('test1','test1','test1','test1@email.com','test1 college,test 1 street, test city','1122334455')
db.session.add(user1)
for i in range(0,33):
if i < 11 : user1.projects.append(Projects(titles[i],abstracts[i],'2017','CSE'))
elif i < 22 : user1.projects.append(Projects(titles[i],abstracts[i],'2018','CSE'))
else:user1.projects.append(Projects(titles[i],abstracts[i],'2019','CSE'))
user2 = Colleges('test2','test2','test2','test2@email.com','test2 college,test 2 street, test city','6677889900')
db.session.add(user2)
for i in range(33,66):
if i < 44 : user2.projects.append(Projects(titles[i],abstracts[i],'2017','CSE'))
elif i < 55 : user2.projects.append(Projects(titles[i],abstracts[i],'2018','CSE'))
else:user2.projects.append(Projects(titles[i],abstracts[i],'2019','CSE'))
user3 = Colleges('test3','test3','test3','test3@email.com','test3 college,test 3 street, test city','234567890')
db.session.add(user3)
for i in range(66,100):
if i < 77 : user3.projects.append(Projects(titles[i],abstracts[i],'2017','CSE'))
elif i < 88 : user3.projects.append(Projects(titles[i],abstracts[i],'2018','CSE'))
else:user3.projects.append(Projects(titles[i],abstracts[i],'2019','CSE'))
db.session.commit()
lognprint('\n\n\nLoading gensims corpus and adding the vocab to our word_model\n')
temptime=time.time()
word_model=Word2VecKeyedVectors(vector_size=DIMENSIONS)
corpus_model=Word2Vec(api.load('text8'),size=DIMENSIONS)
corpus_words=list(corpus_model.wv.vocab.keys())
corpus_vectors=[corpus_model.wv[word] for word in corpus_words]
word_model.add(corpus_words,corpus_vectors)
lognprint("Finished loading gensim's corpus model.\nTime taken:{t}\n".format(t=time.time()-temptime))
lognprint("Creating data-frames of Word and Sentence Trainers\n")
msrcsv='MetaData/'+'MSRTrainData.csv'
leecsv='MetaData/'+'LeeDocSimTrain.csv'
tit_df=pd.read_csv(msrcsv, error_bad_lines=False)
abs_df=pd.read_csv(leecsv, error_bad_lines=False)
lognprint('Loading words to re-train word model\n')
new_words_list=[]
for index,row in tit_df.iterrows():
for i in [row['Sentence1'],row['Sentence2']]:
new_words_list.append(preprocess_string( remove_stopwords(i)))
for index,row in abs_df.iterrows():
for i in [row['Document1'],row['Document2']]:
new_words_list.append(preprocess_string( remove_stopwords(i)))
for i in titles:new_words_list.append(preprocess_string( remove_stopwords(i)))
for i in abstracts:new_words_list.append(preprocess_string( remove_stopwords(i)))
lognprint('Re-training with Word2Vec model with new words\n')
temp_time=time.time()
new_model = Word2Vec(new_words_list, size=DIMENSIONS, window=5, min_count=1, workers=4)
lognprint('Finished temporary model with new words, adding words to word2vec model.\nTime taken {t}\n'.format(t=time.time()-temp_time))
word_vecs=[]
words=[]
for lis in new_words_list:
for word in lis:
words.append(word)
word_vecs.append(new_model.wv[word])
word_model.add(words,word_vecs,replace=False)
word_model.save("MetaData/"+WORD_VEC_MODEL)
lognprint("Finished training Word2Vec Model and saved.\nTotal vocabulary size {vocab_size}\n".format(vocab_size=len(word_model.vocab)))
lognprint("\n\n\n Starting with training neural network models\n")
lognprint('Creating list of word2vec array for training\n')
word_model.init_sims(replace=False)
Y_title=tit_df['Score']
X1_Title=[]
X2_Title=[]
for index, row in tit_df.iterrows():
sentence1=row['Sentence1']
sentence2=row['Sentence2']
tokens1=preprocess_string( remove_stopwords(sentence1) )
tokens2=preprocess_string( remove_stopwords(sentence2) )
if tokens1==[]:tokens1=['print']
if tokens2==[]:tokens2=['print']
vec_matrix1=[]
for word in tokens1:
vec=0
try:vec=word_model.wv[word]
except:vec=random.rand(DIMENSIONS)
vec_matrix1.append(vec)
vec_matrix2=[]
for word in tokens2:
vec=0
try:vec=word_model.wv[word]
except:vec=random.rand(DIMENSIONS)
vec_matrix2.append(vec)
temp1=[vec_matrix1]
temp2=[vec_matrix2]
pad_vec_matrix1=pad_sequences(temp1, padding='post', truncating='post', maxlen= MAX_TITLE_LENGHT,dtype='float64')
X1_Title.append(pad_vec_matrix1[0])
pad_vec_matrix2=pad_sequences(temp2, padding='post', truncating='post', maxlen= MAX_TITLE_LENGHT,dtype='float64')
X2_Title.append(pad_vec_matrix2[0])
tile_model= SiInceptionLSTM()
tile_model.build_model(x1=X1_Title, x2=X2_Title, Y=Y_title,
max_seq_length=MAX_TITLE_LENGHT,embedding_dim=DIMENSIONS,
arch_file_name1=TITLE_ARCH1 , arch_file_name2=TITLE_ARCH2,
plot_filename= TILE_PLOT,
modeljsonfile= TITLE_MODEL_JSON, modelh5file=TITLE_MODEL_H5)
lognprint('Training of Title model Finished')
lognprint("\n\nTotal training time : {t}\n".format(t=time.time()-train_start_time))
lognprint("Training of Word2Vec model and title model . Models can be found in the meta file\n")
lognprint("\n\n\n\t\t\t Training Completed\n\n")
lognprint('*'*50) |
class Solution:
# @param nums, an integer[]
# @return an integer
def findPeakElement(self, nums):
startIdx = 0
endIdx = len(nums) - 1
mid = endIdx / 2
while startIdx < endIdx :
if (mid == startIdx or nums[mid-1] < nums[mid]) and (mid == endIdx or nums[mid] > nums[mid+1]) :
break
else :
if num > startIdx and nums[mid-1] > nums[mid] :
endIdx = mid - 1
elif mid < endIdx and nums[mid] < nums[mid+1] :
startIdx = mid + 1
mid = endIdx + (startIdx - endIdx) / 2
return mid |
import urllib, json
import numpy as np
import mysql.connector
def compute_boundary_limit(metricsValArray,metric):
#print("inside function")
print "mean",np.mean(metricsValArray)
print "sd",np.std(metricsValArray)
lbound = np.mean(metricsValArray) - (2 * (np.std(metricsValArray)))
ubound = np.mean(metricsValArray) + (2 * (np.std(metricsValArray)))
#print metric
lbound = float(lbound)
ubound = float(ubound)
print "type of lbound",type(lbound)
print lbound
print ubound
return lbound,ubound
def submitDetailstoDB(metric,lbound,ubound):
print "mysql connect for :",metric
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="wipro@123",
database="PlatformMonitor"
)
mycursor = mydb.cursor()
#mycursor.execute("SHOW DATABASES")
#for x in mycursor:
# print(x)
# CREATE TABLE MetricsAnomalyRange (metricsName VARCHAR(100), lowerLimit DOUBLE, upperLimit DOUBLE, createdTime timestamp DEFAULT CURRENT_TIMESTAMP )
sql = "INSERT INTO MetricsAnomalyRange (metricsName, lowerLimit, upperLimit) VALUES (%s, %s, %s)"
mycursor.execute(sql, (metric,lbound,ubound))
mydb.commit()
print(mycursor.rowcount, "record inserted.")
def archiveToHistory():
print "mysql connect for archiving"
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="wipro@123",
database="PlatformMonitor"
)
mycursor = mydb.cursor()
#mycursor.execute("SHOW DATABASES")
#for x in mycursor:
# print(x)
# CREATE TABLE MetricsAnomalyRange (metricsName VARCHAR(100), lowerLimit VARCHAR(100), upperLimit VARCHAR(100), createdTime timestamp DEFAULT CURRENT_TIMESTAMP )
sql = "SELECT count(*) FROM MetricsAnomalyRange"
mycursor.execute(sql)
result = mycursor.fetchone()
no_of_rows = result[0]
print no_of_rows
if no_of_rows > 0 :
print "mysql connect insert to history"
mycursor = mydb.cursor()
sql = "INSERT INTO MetricsAnomalyRangeHistory SELECT * from MetricsAnomalyRange"
mycursor.execute(sql)
mydb.commit()
print(mycursor.rowcount, "record inserted.")
print "truncate table"
mycursor = mydb.cursor()
sql = "TRUNCATE TABLE MetricsAnomalyRange"
mycursor.execute(sql)
mydb.commit()
archiveToHistory()
cpu_querystring = "&m=sum:cpu_user&m=sum:cpu_system&m=sum:cpu_idle&m=sum:cpu_nice&m=sum:cpu_wio"
memory_querystring = "&m=sum:mem_free&m=sum:mem_cached&m=sum:mem_total&m=sum:mem_shared"
#disk_querystring = "&m=sum:read_bytes&m=sum:write_bytes"
#network_querystring = "&m=sum:bytes_in&m=sum:bytes_out"
#url = "http://10.201.45.15:4242/api/query?start=1h-ago"+cpu_querystring+memory_querystring+disk_querystring+network_querystring;
url = "http://10.201.45.15:4242/api/query?start=3h-ago"+cpu_querystring+memory_querystring;
response = urllib.urlopen(url)
data = json.loads(response.read())
metricsCnt = len(data)
i=0
while i < metricsCnt :
datapoints = data[i]['dps']
metric = data[i]['metric']
metricsValArray = datapoints.values()
#print metric
print metricsValArray
#Anomaly boundary computation
lbound,ubound = compute_boundary_limit(metricsValArray,metric)
#DB insertion
submitDetailstoDB(metric,lbound,ubound)
i+=1
|
#config file containing credentials for rds mysql instance
db_username = "awsuser"
db_password = "<redacted>"
db_name = "testdb"
db_endpoint = "<redacted>.amazonaws.com"
|
# -*- coding: utf-8 -*-
str1="Never say Never! Never say Impossible!"
str2="浪花有意千重雪,桃李無言一隊春。\n一壺酒,一竿綸,世上如儂有幾人?"
s1=str1.count("Never",15)
s2=str1.count("e",0,3)
s3=str2.count("一")
print("{}\n「Never」出現{}次,「e」出現{}次".format(str1,s1,s2))
print("\n{}\n「一」出現{}次".format(str2,s3)) |
import datetime
import pytz
from sklearn.base import BaseEstimator, TransformerMixin
class ActualsAdder(BaseEstimator, TransformerMixin):
def __init__(self, actuals_frame):
self.actuals_frame = actuals_frame
def fit(self, X, y=None):
return self
# Join actuals to a set of vehicle datapoints
def transform(self, vehicle_datapoints, y=None):
actuals = self._load_actuals()
vehicle_datapoints["gtfs_trip_id"] = \
vehicle_datapoints["gtfs_trip_id"].astype(str)
vehicle_datapoints["timestamp"] = \
vehicle_datapoints["timestamp"].apply(
lambda str: datetime.datetime.strptime(
str,
"%Y-%m-%dT%H:%M:%S.%fZ"
).replace(tzinfo=pytz.UTC).timestamp()
)
self._add_service_date(actuals, 'time')
self._add_service_date(vehicle_datapoints, 'timestamp')
merged_frame = vehicle_datapoints.merge(
actuals,
how="inner",
on=['gtfs_trip_id', 'service_date']
)
merged_frame["actual_seconds_from_now"] = \
merged_frame["time"] - merged_frame["timestamp"]
merged_frame = merged_frame.drop(
[
"timestamp",
"time",
'service_date'
], axis=1
)
merged_frame = merged_frame.query("actual_seconds_from_now > 0")
return merged_frame.dropna()
def _add_service_date(self, dataframe, series_name):
dataframe['service_date'] = dataframe[series_name].apply(
self._service_date_for_timestamp
)
return dataframe
# Build a dataframe from prediction analyzer logs, dropping actuals without
# times, commuter rail trips, and duplicates (which we get because we log
# both from dev-green and prod).
def _load_actuals(self):
raw_frame = self.actuals_frame.filter(
['event_type', 'stop_id', 'time', 'trip_id', 'vehicle_id'],
axis=1
)
raw_frame['stop_id'] = raw_frame['stop_id'].astype('str')
dropped_frame = raw_frame.dropna(subset=["time"])
is_subway = dropped_frame["trip_id"].apply(lambda x: x[0:3] != "CR-")
subway_actuals = dropped_frame[is_subway].drop_duplicates()
subway_actuals = subway_actuals.rename(
{"trip_id": "gtfs_trip_id", "stop_id": "destination_gtfs_id"},
axis="columns"
)
subway_actuals["gtfs_trip_id"] = \
subway_actuals["gtfs_trip_id"].astype(str)
return subway_actuals
def _service_date_for_timestamp(self, timestamp):
# Service days run 3 AM to 3 AM, so adjust the time by 3 hours
adjusted_timestamp = timestamp - 10800
utc_adjusted_time = datetime. \
datetime. \
utcfromtimestamp(adjusted_timestamp). \
replace(tzinfo=pytz.UTC)
adjusted_time = utc_adjusted_time.astimezone(
pytz.timezone('America/New_York')
)
return adjusted_time.toordinal()
|
import csv
import argparse
import re
from urllib import request
url = 'http://s3.amazonaws.com/cuny-is211-spring2015/weblog.csv'
fileOpen = request.urlopen(url)
readFile = fileOpen.read()
decFile = readFile.decode('ascii').split('\n')
for line in decFile:
print(line)
x = re.findall("\.jpg|\.JPG", str(decFile))
y = re.findall("\.gif|\.GIF", str(decFile))
z = re.findall("\.png|\.PNG", str(decFile))
m = re.findall("Mozilla", str(decFile))
c = re.findall("Chrome", str(decFile))
#i = re.findall("Internet Explorer", str(decFile))
s = re.findall("Safari", str(decFile))
print(len(m))
print(len(c))
#print(i)
print(len(s))
totalJPG = len(x)
totalGIF = len(y)
totalPNG = len(z)
totalImg = len(x) + len(y) + len(z)
print(f"Total .jpg images is {totalJPG}")
print(f"Total .gif images is {totalGIF}")
print(f"Total .png images is {totalPNG}")
totalDecfile = len(decFile)
div = totalImg/totalDecfile * 100
print("Image requests account for {:.1f}% of all requests".format(div)) |
from django.db import models
# Create your models here.
class InsureType(models.Model):
name = models.CharField(max_length=20)
is_active = models.BooleanField(default=True)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "insure_type"
def __str__(self):
return "%s" % (self.name)
class CarType(models.Model):
name = models.CharField(max_length=20 )
is_active = models.BooleanField(default=True)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "car_type"
def __str__(self):
return "%s" % (self.name)
class Agent(models.Model):
code = models.CharField(max_length=20 )
first_name = models.CharField(max_length=200 )
last_name = models.CharField(max_length=200 )
is_active = models.BooleanField(default=True)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "agent"
def __str__(self):
return "%s(%s %s)" %(self.code, self.first_name, self.last_name)
class Customer(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
telephone = models.CharField(max_length=10)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "customer"
def __str__(self):
return "%s %s" %(self.first_name, self.last_name)
class Service(models.Model):
insure_type = models.ForeignKey(InsureType, on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
agent = models.ForeignKey(Agent, on_delete=models.CASCADE)
car_type = models.ForeignKey(CarType, on_delete=models.CASCADE)
license_plate = models.CharField(max_length=7)
remark = models.TextField(null=True, default=None)
class Meta:
db_table = "service"
def __str__(self):
return "%s - %s" %(self.customer, self.agent) |
from model.cluster import Grid
from programs import Haplotyper
import Readers, logging, traceback
class AllelicDiversity():
"""The class AllelicDiversity includes all methods and variables needed for calculation of the allelic diversity.
"""
def __init__(self, pool,gffFile):
"""The constructor sets the given variables as instance variables
:param pool: The pool to calculate the allelic diversity for
:type pool: an instance of a :py:class`Pool.Pool` object
:param gffFile: The path to the gff file
:type gffFile: str. - path to the gffFile
"""
self.gffFile = gffFile
self.pool = pool
#"C:/Users/Jetse/data/test/testPhenotyper/geneModelChr12.gff3", vcfFile = "C:/Users/Jetse/data/test/testPhenotyper/fictiveSNPs.vcf"
def _getAllHaplotypesByAccession(self, contigs):
"""The method getAllHaplotypesByAccession retrieves creates a dictionary with the accession as key and the haplotype as value
:param contigs: The contigs to get the haplotypes from
:type contigs: an list of :py:class:`Contig.Contig` instances
"""
allHaplotypes = {}
for key in contigs:
haplotypes = {}
if len(self.allContigs[key].snps) > 0:
for haplotype,accessions in self.allContigs[key].haplotypes.items():
for accession in accessions:
if accession in haplotypes:
haplotypes[accession].append(haplotype)
else:
haplotypes[accession] = [haplotype]
allHaplotypes[key] = haplotypes
return allHaplotypes
def _parseFiles(self, chrom):
"""The method parseFiles creates the reader objects to parse all files.
:param chrom: The chromosome to parse
:type chrom: str
"""
gffReader = Readers.GffReader(chrom=chrom)
gffReader.readFile(self.gffFile)
self.allContigs = gffReader.contigs
vcfReader = Readers.VcfReader(self.allContigs.values())
vcfReader.readFile(self.vcfFile)
def getAllelicDiversity(self):
"""The method getAllelicDiversity calculates the allelic diversity and writes the output to a file.
"""
if Grid.useGrid == True:
Haplotyper.executeBeagleCluster(self.pool)
else:
Haplotyper.executeBeagleMultiThread(self.pool)
for vcf in self.pool.vcf:
if vcf == None:
logging.info("Starting to calculate the allelic diversity")
outputFile = self.pool.outputDir + "/allelicDiversity.csv"
else:
logging.info("calculating allelic diversity of " + vcf)
outputFile = self.pool.outputDir + "/"+vcf + "_" + "allelicDiversity.csv"
try:
self.vcfFile = self.pool.vcf[vcf].getFile()
self._parseFiles(vcf)
haplotypes = self._getAllHaplotypesByAccession(self.allContigs)
accessions = haplotypes.values()[0].keys()
with open(outputFile, "w") as outWriter:
outWriter.write("contig\toriginal\t")
for accession in accessions: outWriter.write( accession + "_1\t" + accession + "_2\t")
outWriter.write("\n")
for contigId in self.allContigs:
outWriter.write(contigId + "\t")
try:
outWriter.write(self.allContigs[contigId].refHaplotype + "\t")
except AttributeError: outWriter.write("-\t")
for accession in accessions:
for i in range(2):
if contigId in haplotypes:
outWriter.write(haplotypes[contigId][accession][i] + "\t")
else:
outWriter.write("-\t")
outWriter.write("\n")
except IndexError:
if vcf == None:
logging.warning("No SNPs within contigs found")
else:
logging.warning("No SNPs within contigs found of " + vcf)
except Exception as ex:
if vcf == None:
logging.error("an error occured during parsing the vcf file")
else:
logging.error("an error occured during parsing " + vcf)
logging.error(ex)
traceback.print_exc()
|
import networkx as nx
def findTiers(G):
t1 = []
t2 = []
t3 = []
for u in G.nodes():
if G.out_degree(u) > 0 and G.in_degree == 0:
t1.append(u)
elif G.out_degree(u) > 0 and G.in_degree > 0:
t2.append(u)
else:
t3.append(u)
return t1,t2,t3
#G = nx.read_gml('Yeast0.gml')
#t1,t2,t3 = findTiers(G) |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#http://www.linux.org.ru/forum/development/1788460
#http://ps.readthedocs.io/ru/latest/strings.html
#http://stackoverflow.com/questions/41204234/python-pyqt5-qtreewidget-sub-item
#http://ru.stackoverflow.com/questions/511955/pyqt5-Контекстное-меню-только-на-элементах-qtreewidget
#http://stackoverflow.com/questions/11608276/extract-data-from-db-then-pass-it-to-qtreeview
import sys
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QApplication, QWidget
from PyQt5 import QtSql
dbPath = 'Cert2.db3'
app = QApplication(sys.argv)
con = QtSql.QSqlDatabase.addDatabase('QSQLITE')
con.setDatabaseName('Cert2.db3')
con.open()
query = QtSql.QSqlQuery()
query.exec_("""select u.Name,
us.Name,
cd.DateCre,
cd.DateExp,
cd.PodrName,
cd.UserName
from Unit as U,
UnitSubject as us,
CertDate as cd,
LinkCertORUnitSub as link
WHERE 1=1
and link.idPodr = us.id
and link.IdCert = cd.id
and U.id = us.idPodr
and us.Name = 'Alapaevsk'
""")
lst = []
if query.isActive():
query.first()
while query.isValid():
print(query.value('Name'))
lst.append(query.value('Name'))
query.next()
#print(lst)
con.close()
sys.exit(app.exec_())
|
"""
biosignatures: elemental signature analysis of cyanobacteria
------------------------------------------------------------
"""
from distutils.core import setup
setup(name='biosignatures',
packages = ['biosignatures'],
package_dir = {'biosignatures':'.'},
package_data={'biosignatures': ['./*.py']})
|
#!/usr/bin/env python
"""reads stdin, removes line breaks, replaces whitespace with one space, and pipes to pbcopy"""
import sys, re, os, optparse, commands
class UsageException(Exception): pass
def main():
parser = optparse.OptionParser(usage='stdin | %prog [options]')
(options, args) = parse_input(parser)
if options.test:
test()
else:
lines = sys.stdin.readlines()
try:
sys.exit(onelinecopy(lines, discard=options.discard))
except UsageException, e:
parser.error(e)
def onelinecopy(lines, discard=None):
if len(lines) is 0:
raise UsageException('did not receive any input')
if discard is not None:
def filter(line):
if re.match(discard, line): return ''
else: return line
lines = map(filter, lines)
blob = ' '.join(lines)
blob = re.sub(r'[\s]+', ' ', blob)
blob = re.sub(r'"', r'\"', blob)
os.system('echo -n "%s" | pbcopy' % blob)
print "copied %d bytes to clipboard" % len(blob)
return 0
def parse_input(parser):
parser.add_option('-t', '--test', help='run tests and exit', action='store_true')
parser.add_option('-d', '--discard', help='discard lines that match this regular expression', type='string')
(options, args) = parser.parse_args()
if options.discard is not None:
try:
options.discard = re.compile(options.discard)
except re.error, e:
parser.error('invalid DISCARD pattern (%s)' % e)
return (options, args)
def test():
def assert_copied(exp):
out = commands.getoutput('pbpaste').strip()
assert exp == out, 'actual: %s' % out
print "testing..."
blob = """
blah,
blah, blah"""
onelinecopy(blob.split("\n"))
assert_copied('blah, blah, blah')
blob = """
one,
--two,
--three,
four \t\r\n """
onelinecopy(blob.split("\n"), discard=re.compile(r'^[\s]*--'))
assert_copied( 'one, four')
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import mysql.connector
class GuxinlangPipeline(object):
def process_item(self, item, spider):
db=mysql.connector.connect(host='localhost',port=3306,user='root',password='123',db='gl')
cursor=db.cursor()
sql="insert into glx(a,b,c,d,e,f,g,h,i,j,k,l,m) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"%(item['a'],item['b'],item['c'],item['d'],item['e'],item['f'],item['g'],item['h'],item['i'],item['j'],item['k'],item['l'],item['m'])
print(sql)
cursor.execute(sql)
db.commit()
db.close()
return item
|
from sys import stdin
s = str(stdin.readline().strip())
result = []
while(s != '.'):
array = []
flag = 0
for i in range(len(s)):
if s[i] == '(' or ')' or '[' or ']':
if len(array) == 0 and s[i] == '(' or s[i] == '[':
array.append(s[i])
print(i, array)
elif len(array) == 0 and s[i] == ')' or s[i] == ']':
flag = 1
break
elif array[-1] != s[i]:
if array[-1] == '(' or array[-1] == ')' and s[i] == '[' or s[i] == ']':
array.append(s[i])
print(i, array)
elif array[-1] == '[' or array[-1] == ']' and s[i] == '(' or s[i] == ')':
array.append(s[i])
print(i, array)
else:
array.pop()
print(i, array)
else:
array.append(s[i])
print(i, array)
else:
continue
if len(array) == 0 and flag == 0:
result.append('yes')
else:
result.append('no')
s = str(stdin.readline().strip())
print(result)
|
#create a password
password= input("Create a password : ")
x = True
while x:
if (len(password)<8 or len(password)>16):
print("Password length between 6 and 12 please")
break
elif not re.search("[a-z]",password):
print("You need at least one lower case letter")
break
elif not re.search("[0-9]",password):
print("You need at least one number")
break
elif not re.search("[A-Z]",password):
print("You need at least one upper case character")
break
elif not re.search("[$#@]",password):
print("You need at least one special character")
break
elif re.search("\s",password):
print("You cannot have blank spaces in your password...")
break
else:
print("Valid Password")
x=False
break |
# https://leetcode.com/problems/sort-colors/
def sortColors(nums):
counter = [0] * 3
for i in nums:
counter[i] += 1
idx = 0
for color in range(3):
for _ in xrange(counter[color]):
nums[idx] = color
idx += 1
if __name__ == '__main__':
print sortColors([0, 1, 0, 2, 0, 1, 0])
|
# Copyright 2015 Dimitri Racordon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import scoped_session, create_session
from flask import current_app
db_session = scoped_session(
lambda: create_session(bind=current_app.db_engine, autocommit=False, autoflush=False))
def db_sync():
from sqlalchemy import DDL, event
# Import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling db_sync()
import mushi.core.db.models
event.listen(
mushi.core.db.models.Issue.__table__,
'after_create',
DDL('ALTER TABLE %(table)s AUTO_INCREMENT = 1;').execute_if(
dialect=('postgresql', 'mysql'))
)
models.Base.metadata.create_all(bind=current_app.db_engine)
|
from mamba import description, it, before, context
from expects import expect, have_key, be_none
import securecscc
from securecscc import origins
from specs.support import fixtures
from specs.support.matchers import be_an_uuid
with description(origins.Falco) as self:
with before.each:
self.settings = securecscc.Settings()
self.mapper = origins.Falco(self.settings)
with it('uses the source_id assigned to us from Google'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('source_id', self.settings.source_id()))
with it('uses the rule as category'):
category = 'Terminal shell in container'
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('category', category))
with it('uses only seconds from event time'):
event_time = 1526547969
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('event_time', event_time))
with it('does not set any url'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('url', be_none))
with it('uses an uuid as id'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('id', be_an_uuid()))
with it('uses organization as asset ids'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('asset_ids', [self.settings.organization()]))
with context('when building properties'):
with it('adds output'):
output = "A shell was spawned in a container with an attached terminal (user=root unruffled_hamilton (id=32c415f00958) shell=bash parent=<NA> cmdline=bash terminal=34816)"
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('properties', have_key('summary', output)))
with it('adds priority'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('properties', have_key('priority', 'Notice')))
with it('adds container id'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('properties', have_key('container.id', '32c415f00958')))
with it('adds container name'):
finding = self.mapper.create_from(fixtures.event_falco())
expect(finding).to(have_key('properties', have_key('container.name', 'unruffled_hamilton')))
with context('when creating from falco with kubernetes integration enabled'):
with it('adds pod name to properties'):
finding = self.mapper.create_from(fixtures.event_falco_k8s())
expect(finding).to(have_key('properties', have_key('kubernetes.pod.name', 'falco-event-generator-6fd89678f9-cdkvz')))
|
from keras.utils.visualize_util import plot
from keras import backend as K
from keras.models import load_model
import numpy as np
import h5py, pickle, cv2
from os.path import abspath, dirname
from os import listdir
import scipy as sp
from datetime import datetime
from shutil import copyfile
from random import randint
from sklearn.metrics import confusion_matrix
ROOT = dirname(dirname(abspath(__file__)))
def logloss(actual, preds):
epsilon = 1e-15
ll = 0
for act, pred in zip(actual, preds):
pred = max(epsilon, pred)
pred = min(1-epsilon, pred)
ll += act*sp.log(pred) + (1-act)*sp.log(1-pred)
return -ll / len(actual)
def visualizer(modell):
plot(modell, to_file=ROOT + '/vis.png', show_shapes=True)
def get_confusion_matrix(y_true, y_pred):
preds = []
for i, x in enumerate(y_pred):
preds.append(1. if x[0] > 0.5 else 0.)
return confusion_matrix(y_true, preds)
def shuffle_in_unison(a,b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def classifyTrainGen(x=None, y=None, batch_size=256, target=False):
i = 0
x, y = shuffle_in_unison(x, y)
while 1:
batch_x = np.ndarray((batch_size,) + x.shape[1:], dtype=np.float32)
batch_y = dict()
batch_y['domain_pred'] = np.asarray([0.] * batch_size, dtype=np.float32)
y_true = np.ndarray((batch_size,), dtype=np.float32)
for nb in xrange(batch_size):
batch_x[nb] = x[i]
y_true[nb] = y[i] if not target else 0.
i = (i + 1) % len(x)
if i == 0: x,y = shuffle_in_unison(x, y)
batch_y['classifier'] = y_true
yield batch_x, batch_y
def classifyValGen(x=None, y=None, batch_size=256):
i = 0
while 1:
batch_x = np.ndarray((batch_size,) + x.shape[1:], dtype=np.float32)
batch_y = np.ndarray((batch_size,), dtype=np.float32)
for nb in xrange(batch_size):
batch_x[nb] = x[i]
batch_y[nb] = y[i]
i = (i + 1) % len(x)
if i == 0: shuffle_in_unison(x, y)
yield batch_x, batch_y
def gangen(x=None, batch_size=256):
i = 0
while 1:
batch_x = np.ndarray((batch_size,) + x.shape[1:], dtype=np.float32)
batch_y = dict()
batch_y['domain_pred'] = np.ones(batch_size)
batch_y['classifier'] = np.zeros(batch_size)
for nb in xrange(batch_size):
batch_x[nb] = x[i]
i = (i + 1) % len(x)
if i == 0: x = x[np.random.permutation(len(x))]
yield batch_x, batch_y
def join(A, B):
x1, y1 = A
x2, y2 = B
x = np.concatenate((x1, x2))
y = dict()
y1['domain_pred'] = np.zeros(len(x1), dtype=np.float32)
y2['domain_pred'] = np.ones(len(x2), dtype=np.float32)
y_domain = np.concatenate((y1['domain_pred'], y2['domain_pred']))
p = np.random.permutation(len(x))
x, y['domain_pred'] = shuffle_in_unison(x[p], y_domain[p])
y['classifier'] = np.zeros(len(x), dtype=np.float32)
return x,y
def accuracy(y_pred, y_true):
print logloss(y_true, y_pred[0])
acc = 0
for i, pred in enumerate(y_pred[0]):
acc += (np.abs(pred[0] - y_true[i]) < 0.5)
return float(acc) / len(y_true)
def dumper(model,fname=None):
try:
with open(fname,'w') as f:
model.save(fname)
except IOError:
raise IOError('Unable to open: {}'.format(fname))
return fname |
from flask import Flask, request
from logging.handlers import RotatingFileHandler
from flask_restful import Api, Resource
import json,subprocess,logging,traceback
import pandas as pd
app = Flask(__name__)
api = Api(app)
#logging.basicConfig(filename='app.log', format="%(asctime)s:%(filename)s:%(message)s")
STATES_LIST = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI',
'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV',
'NY', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT',
'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
PARTY_LIST = ['R', 'D', 'LIB', 'G', 'I', 'GRE', 'NPA', 'IND',
'US Taxpayers', 'Working Class', 'Legal Marijuana Now',
'Independence Party', 'REF', 'CON', 'Reform Party', 'WOF', 'IPO',
'L', 'AME', 'DPD', 'Mountain', "Women's Equality Party"]
class Predict(Resource):
def post(self):
postedData = request.get_json()
retJson = {}
postJson = {}
logger.debug(postedData)
if all (k in postedData for k in ('state','party','district','voteshare')):
state = postedData["state"]
party = postedData["party"]
district = postedData["district"]
voteshare = postedData["voteshare"]
if state not in STATES_LIST:
retJson["message"] = "Syntax of state is Wrong or No Data for this state now."
return retJson
elif party not in PARTY_LIST:
retJson["message"] = "Choose from party list in the documentation."
return retJson
elif int(voteshare) < 0 or int(voteshare) > 100:
retJson["message"] = "Voteshare is the percentage value. So it should stay between 0 to 100."
return retJson
elif int(district) > 32:
retJson["message"] = "Check your district"
return retJson
else:
postJson = {
"state": state,
"party" : party,
"district" : district,
"voteshare" : voteshare,
"state_district": state+"_"+str(district)}
else:
retJson["message"] = "All 4 of the attributes 'state', 'party', 'district', 'voteshare' has to be populated"
stringPosted = json.dumps(postJson)
proc = subprocess.Popen(['python', 'data.py', '--predict_params='+stringPosted])
proc.communicate()[0]
proc.wait()
retJson = {}
with open("text.txt") as g:
retJson = json.load(g)
return retJson
api.add_resource(Predict,'/predict')
if __name__ == "__main__":
handler = RotatingFileHandler('app.log', maxBytes = 10000 , backupCount=3)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
app.run(host='0.0.0.0',debug=True)
|
N = int(input())
# 約数をリストで返す
def divisor(N):
i = 1
l = []
while i**2 <= N:
if N % i == 0:
l.append(i)
if i**2 != N:
l.append(N//i)
i += 1
return (sorted(l))
l = divisor(N)
res = N
for i in range(len(l)):
a, b = l[i], N//l[i]
res = min(a+b-2, res)
print(res) |
import os
import json
currentDirPath = os.path.dirname(__file__)
jsonFile = open(os.path.join(currentDirPath, 'source\UpperDarby.geojson'))
jsonString = jsonFile.read()
jsonParsed = json.loads(jsonString)
mappedRecords = []
for feature in jsonParsed["features"]:
sourceProperties = feature["properties"]
record = {}
record["zipcode"] = sourceProperties["ZIP"]
record["cityId"] = ""
record["percentageAdoptedStreets"] = 0
record["totalAdoptedStreets"] = 0
record["totalStreets"] = 0
record["geometry"] = feature["geometry"]
mappedRecords.append(record)
print "Exporting.."
with open(os.path.join(currentDirPath, 'converted\UpperDarby.geojson'), 'w') as outfile:
json.dump(mappedRecords, outfile)
print "Done" |
def solution(citations):
citations.sort(reverse=True)
ans = 0
for idx, cite in enumerate(citations):
if (idx+1) <= cite:
ans = idx+1
return ans
print(solution([3, 0, 6, 1, 5]))
print(solution([7, 3, 3, 3, 3]))
print(solution([7, 3]))
|
import os
fName = 'Hello.txt'
fPath = 'Users/slims/Documents'
abPath = os.path.join(fPath, fName)
print(abPath)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 17:10:14 2018
@author: OPHIR - Josh Clark
"""
import os
import pandas as pd
import xlwt
import xlrd
import re
import datetime
# 0. Set variables
#Set size of basket
sstkcount = 50
#Set borrow cost threshold
bct = 0.05
#Set total short weight
sw = -0.50
#Define Fundamental Short stocks
fs = ['KGN']
#Set fundamental short basket weight
fsw = -0.01865
#Set basket short weight
bsw = sw - fsw
# 1. Get the weighted average market cap of the portfolio
os.chdir('S:\Ophir\Investment\Quant\Python\Portfolio Data\\')
# List all files and directories in current directory
filelist = [filename for filename in os.listdir('.') if filename.startswith("VAL-FTS_Ophir_")]
date_pattern = re.compile(r'(?P<date>\d{8})')
def get_date(filename):
matched = date_pattern.search(filename)
if not matched:
return None
return datetime.datetime.strptime(matched.groups('date')[0], "%Y%m%d")
dates = (get_date(fn) for fn in filelist)
dates = (d for d in dates if d is not None)
last_date = max(dates)
file = 'VAL-FTS_Ophir_'+last_date.strftime('%Y%m%d')+'.xls'
# Load spreadsheet
xl = pd.ExcelFile('S:\Ophir\Investment\Quant\Python\Portfolio Data\\' + file)
# Load a sheet into a DataFrame by name: df
df = xl.parse('Sheet1')
#Get Date
date=df.at[0,'As At Date']
#Drop rows with Apps accountin them
df = df[df['Asset Name'] != 'OAMARFAPPS']
dfcash = df[df[' Analysis Group 1'] == 'Cash']
df = df[df[' Analysis Group 1'] != 'Cash']
summary=dfcash.groupby('Portfolio').sum()
summary['Average Cost'] = 1
summary['Market Value'] = 1
summary['Asset'] = 'Cash'
summary['Asset Name'] = 'Cash'
summary['Portfolio'] = 'Cash'
summary['Portfolio']=summary.index
date=df.at[0,'As At Date']
summary['As At Date'] = date
dfi=df.append(summary)
df=dfi[dfi['Portfolio'] == 'OAMARF']
df['Port Weight'] = df['Market Value.1']/df['Market Value.1'].sum()
#Set portfolio FUM
OAMARFFUM = df['Market Value.1'].sum()
#Portfolio FUM minus Cash value = equities value
OAMARFFUM_EQ = OAMARFFUM - df.loc[df['Asset'] == 'Cash']['Market Value.1']
df['eqweight']=OAMARFFUM_EQ[0]
df['eqweight']=df['Market Value.1']/OAMARFFUM_EQ[0]
file = 'Bloomberg Consensus Database 3.0 (Values Only).xlsm'
# Load spreadsheet
# Open the file
xl = pd.ExcelFile('S:\Ophir\Investment\Quant\Python\External Data\\' + file)
# Get the first sheet as an object
dfbb = xl.parse(sheetname='dataimport',skiprows=3)
dfbb = dfbb.drop(dfbb.index[[0,1]])
dfbb = dfbb.loc[~dfbb.index.duplicated(keep='first')]
#df['Mcap'] = df['Asset'].map(dfbb.set_index('Unnamed: 0')['Mcap'])
file = 'mcaps.xlsx'
xl = pd.ExcelFile('S:\Ophir\Investment\Quant\Python\External Data\\' + file)
# Load a sheet into a DataFrame by name: df
dfmcaps = xl.parse('Sheet1')
df['Mcap'] = df['Asset'].map(dfmcaps.set_index('Code')['Mcap'])
wgtavmcap=df['Mcap']*df['eqweight']
wgtavmcap=wgtavmcap.sum()
# 2. import small ords
indexfile = 'Short Hedge.xlsm'
xl = pd.ExcelFile('S:\Ophir\Investment\Quant\Python\External Data\\' + indexfile)
df3 = xl.parse('XSO')
#Remove long holdings from universe
df3 = df3[-df3['Security'].isin(dfi['Asset'])]
#Remove super threshold borrow cost stocks from universe
##Import Borrow Cost
indexfile = 'Stock Borrow.xlsx'
xl = pd.ExcelFile('S:\Ophir\Investment\Quant\Python\External Data\\' + indexfile)
df5 = xl.parse('Sheet1')
df5['Security']=df5.Security.str.replace('.AX','')
df3['Indic Fee'] = df3['Security'].map(df5.set_index('Security')['Indic Fee'])
df3 = df3[df3['Indic Fee']<bct]
df3 = df3[['Security','Date','MarketCapitalisationEOD','ClosePrice','Indic Fee']]
#create new column
df3['wgtdmcap']= 'NaN'
# add nothing for first 50, then weighted av market cap thereafter
i = df3['wgtdmcap'].count()
df3 = df3.sort_values(['MarketCapitalisationEOD'], ascending=[0])
df3=df3.reset_index(drop=True)
m1=[]
for i in range(0,i-sstkcount):
#Test - find weighted mcap of previous 50
dfwgt = df3[0+i:i+sstkcount]
dfwgt['Weight'] = dfwgt['MarketCapitalisationEOD']/dfwgt['MarketCapitalisationEOD'].sum()
dfwgt['wgtdmcap'] = dfwgt.Weight*dfwgt.MarketCapitalisationEOD
#bwm = basket weighted mcap
m0 = dfwgt['wgtdmcap'].sum()
m1.append(m0)
x=min(m1, key=lambda x:abs(x-wgtavmcap*1000000))
x=m1.index(x)
s1 = df3.iloc[x:x+sstkcount]
s1['Short Weight'] = bsw * s1['MarketCapitalisationEOD']/s1['MarketCapitalisationEOD'].sum()
s1['Target Shares'] = round(s1['Short Weight']*OAMARFFUM/s1['ClosePrice']*100)
col_list=['Security','Target Shares','Short Weight']
#Calculate borrow cost (bc)
bc = s1['Short Weight']*s1['Indic Fee']
borrow_cost=bc.sum()
s1=s1[col_list]
s1.to_csv('S:\Ophir\Investment\Quant\Python\Output\Short Weights.csv',index=False)
#Find row where market cap
#Create Exceptions List & remove from data frame e.g. no avail or high borrow cost
# 3. Remove Portfolio Holdings
# 4. List Weighted Average Market Cap for the 50 stocks above the adjacent stock
# 5. Pick out the stock number and generate the portfolio
#
|
import json
from util_chat import send_prompt
from util_json import save_json_to_file
text = f"""
You should express what you want a model to do by \
providing instructions that are as clear and \
specific as you can possibly make them. \
This will guide the model towards the desired output, \
and reduce the chances of receiving irrelevant \
or incorrect responses. Don't confuse writing a \
clear prompt with writing a short prompt. \
In many cases, longer prompts provide more clarity \
and context for the model, which can lead to \
more detailed and relevant outputs.
"""
prompt = f"""
Summarize the text delimited by triple backticks \
into a single sentence.
```{text}```
"""
send_prompt(prompt)
prompt = f"""
Generate a list of top 50 sci-fi book titles along \
with their authors and genres.
Provide a summary.
Provide the result in JSON format with the following keys:
book_id, title, author, genre, summary.
"""
rsp = send_prompt(prompt)
save_json_to_file(json.loads(rsp), "./data/top-sci-fi-books.json")
|
import numpy as np
import math
import matplotlib.pyplot as plt
def findProducts(index,x,value):
pro = 1
for i in range(index):
pro = pro*(value-x[i])
return pro
def calculateTable(x,y,n):
for i in range(1,n):
for j in range(n-i):
y[j][i] = (y[j][i-1] - y[j+1][i-1])/(x[j] - x[j+i])
def findValue(value,x,y,n):
sum = y[0][0]
for i in range(1,n):
sum = sum+ findProducts(i,x,value)*y[0][i]
return sum
def findTable(values):
n = 5
x = [0] * n
dimensions = (n, n)
y = np.zeros(dimensions)
for i in range(n):
x[i] = values[i][0]
y[i][0] = values[i][1]
calculateTable(x, y, n)
return x,y,n
def plotGraph(xx,yy,n,lower,upper):
# creating numpy array from range x = -1.99 to 10 with increament 0.05
x = np.arange(0, 40, 0.05)
y = findValue(x,xx,yy,n)
# y[y > 100] = np.nan # erasing asymptote
# y[y < -100] = np.nan # erasing asymptote
py = [0]*n
for i in range(n):
py[i] = yy[i][0]
plt.scatter(xx, py,color='red')
# Plotting the graphs
plt.ylim([lower, upper]) # limiting y axis value
# plt.plot([-3, 10], [0, 0], color='blue') # x-axis
# plt.plot([0, 0], [-15, 15], color='blue') # y-axis
plt.plot(x, y, color='green', linestyle='dashed', linewidth=3) # main graph
# Creating axis
plt.grid(True, which='both')
plt.axhline(y=0, color='blue')
plt.axvline(x=0, color='blue')
# Labeling axis
plt.xlabel('x - axis')
plt.ylabel('y - axis')
plt.show()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
mass = [[19,1203],[22,1245],[26,1378],[28,1315],[30,1475]]
n = 4
x = [0]*n
dimensions = (n, n)
y = np.zeros(dimensions)
for i in range(1,n):
x[i] = mass[i][0]
y[i][0] = mass[i][1]
calculateTable(x,y,n)
Result3 = findValue(25,x,y,n)
n = 5
x = [0] * n
dimensions = (n, n)
y = np.zeros(dimensions)
for i in range(n):
x[i] = mass[i][0]
y[i][0] = mass[i][1]
calculateTable(x, y, n)
Result4 = findValue(25, x, y, n)
error = math.fabs(Result4-Result3)/Result4*100
print("The mass at 25 sec is",Result4)
print("Abosulte Relative Error", error,"%")
plotGraph(x,y,5)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.mail import send_mail, EmailMessage
from optparse import make_option
from texas.models import *
from datetime import datetime
import random
import string
import time
class Command(BaseCommand):
args = "<tier_id> <count>"
help = "Send lottery notification to purchasers"
def handle(self, *args, **options):
tier_id = args[0]
count = int(args[1])
chances = Chance.objects.filter(
queue_code__isnull=True).order_by('?')
#chances = chances.filter(user__email='bsarsgard@slightlymad.net')
chances = chances[:count]
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
for chance in chances:
print chance.user.email
code = ''.join(random.choice(chars) for _ in range(10))
chance.queue_code = code
chance.save()
event = chance.tier.occurrence.event
site = Site.objects.get(id=settings.SITE_ID)
subject = "Your %s lottery entry" % event.label
from_address = "tickets@playadelfuego.org"
to_address = [chance.user.email]
body = """Congratulations! Your lottery entry has been selected to proceed!
Your entry will expire 72 hours from the date on this email, so please proceed as soon as you can. Once you have selected your number of tickets, your lottery code will be considered used, and you will not be able to complete a second purchase, so be sure you have everything in order before proceeding.
You will need to be logged in with the account you used to enter the lottery, so please do so before clicking the link. We also suggest logging into PayPal ahead of time to be sure your account is working.
http://%s/buy/?show=show&code=%s
Thank You!""" % (
site.domain,
chance.queue_code,
)
email = EmailMessage(subject, body, from_address, to_address)
email.send()
time.sleep(1)
print 'done'
|
""" utility functions"""
import re
import os
from os.path import basename
import subprocess
import gensim
import torch
from torch import nn
from evaluate import eval_rouge
from ConfManager import ConfManager
def count_data(path):
""" count number of data in the given path"""
matcher = re.compile(r'[0-9]+\.json')
match = lambda name: bool(matcher.match(name))
names = os.listdir(path)
n_data = len(list(filter(match, names)))
return n_data
PAD = 0
UNK = 1
START = 2
END = 3
def make_vocab(wc, vocab_size):
word2id, id2word = {}, {}
word2id['<pad>'] = PAD
word2id['<unk>'] = UNK
word2id['<start>'] = START
word2id['<end>'] = END
for i, (w, _) in enumerate(wc.most_common(vocab_size), 4):
word2id[w] = i
return word2id
def make_embedding(id2word, w2v_file, initializer=None):
attrs = basename(w2v_file).split('.') # word2vec.{dim}d.{vsize}k.bin
w2v = gensim.models.Word2Vec.load(w2v_file).wv
vocab_size = len(id2word)
emb_dim = int(attrs[-3][:-1])
embedding = nn.Embedding(vocab_size, emb_dim).weight
if initializer is not None:
initializer(embedding)
oovs = []
with torch.no_grad():
for i in range(len(id2word)):
# NOTE: id2word can be list or dict
if i == START:
embedding[i, :] = torch.Tensor(w2v['<s>'])
elif i == END:
embedding[i, :] = torch.Tensor(w2v[r'<\s>'])
elif id2word[i] in w2v:
embedding[i, :] = torch.Tensor(w2v[id2word[i]])
else:
oovs.append(i)
return embedding, oovs
def get_gpu_memory_map():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free,utilization.gpu',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_info = [eval(x) for x in result.strip().split('\n')]
gpu_info = dict(zip(range(len(gpu_info)), gpu_info))
sorted_gpu_info = sorted(gpu_info.items(), key=lambda kv: kv[1][0], reverse=True)
sorted_gpu_info = sorted(sorted_gpu_info, key=lambda kv: kv[1][1])
print(f'gpu_id, (mem_left, util): {sorted_gpu_info}')
return sorted_gpu_info
cm = ConfManager()
def calc_official_rouge(dec_dir, name):
if name == 'val':
ref_dir = cm.REF04
else:
ref_dir = cm.REF11
print(f'{name}: ref_dir={ref_dir}')
dec_pattern = r'(\d+).dec'
ref_pattern = '#ID#.[A-Z].ref'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir)
# print(output)
for line in output.split('\n'):
if line.startswith('1 ROUGE-1 Average_F'):
r1 = float(line.split()[3])
if line.startswith('1 ROUGE-2 Average_F'):
r2 = float(line.split()[3])
if line.startswith('1 ROUGE-L Average_F'):
rl = float(line.split()[3])
if line.startswith('1 ROUGE-SU4 Average_F'):
rsu4 = float(line.split()[3])
R = {'R-1': r1, 'R-2': r2, 'R-L': rl, 'R-SU4': rsu4}
print(R, '\n')
return R
def print_config(config, logger=None):
config = vars(config)
info = "Running with the following configs:\n"
for k, v in config.items():
info += "\t{} : {}\n".format(k, str(v))
if not logger:
print("\n" + info + "\n")
else:
logger.info("\n" + info + "\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.