blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f51071d12e02b45ba50607bc276a85e47ac54b4 | 671b7430e866fdb67b5234c08a195ac394ce636c | /ml/mltuts/11-outliers.py | 8d4a6ce6a0230a95d30acde1d5c8b8ce0b12ef3f | [] | no_license | ranedk/tuts | 6c84ec3bcb2042c4ee29249a3ad91ccf5bb2723e | 7bdcdd6207ca5bfd8c6e181f6f384a57c1778546 | refs/heads/master | 2023-08-07T11:36:00.490950 | 2023-07-30T09:21:50 | 2023-07-30T09:21:50 | 180,953,486 | 1 | 2 | null | 2023-05-02T00:29:10 | 2019-04-12T07:22:47 | Jupyter Notebook | UTF-8 | Python | false | false | 1,591 | py | import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
df = pd.read_csv("data/Salary.csv")
# To check distribution (histogram with 20 bins)
df['Salary'].hist(bins=20)
# The distribution of the data doesnt look normal
# Ways to normalize: log, sqrt, cube root
df['Salary_log'] = np.log(df['Salary'])
df['Salary_log'].hist(bins=20)
# The distribution looks fairly normal (gaussian)
df['Salary_sqrt'] = np.sqrt(df['Salary'])
df['Salary_sqrt'].hist(bins=20)
# The distribution looks gaussion with a left bias
df['Salary_cubert'] = np.cbrt(df['Salary'])
df['Salary_cubert'].hist(bins=20)
# The distribution looks gaussion with a left bias
# ------------------------------------------------
# Elliptic envelope: Draws a eclipse and everything outside is outlier
from sklearn.covariance import EllipticEnvelope
X = np.array([
[100, 100],
[1, 1],
[2, 4],
[4, 5],
[6, 4],
[8, 4],
[6, 2],
[4, 8],
[3, 5],
[7, 2]
])
outlier = EllipticEnvelope(contamination=0.1).fit(X)
prediction1 = outlier.predict(X)
# prediction: array([-1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# 100, 100 doesn't fit in the eliptical
# Applying it on dataset
# Lets try to find out if age x salary for someone is an outlier
# Means that someone is getting paid a lot MORE for his age
# Doesn't capture outliers which are below a threshold
features = df.iloc[:, [1, 2]].values
outlier = EllipticEnvelope(contamination=0.1).fit(features)
prediction2 = outlier.predict(features)
df['outliers'] = prediction2
# People who are outliers
df[df['outliers'] == -1]
| [
"ranedk@gmail.com"
] | ranedk@gmail.com |
99b4530d9de437b92c524eb5dacc65c526cb7780 | 469569104ec4c17e14cf7789f2a7c1b7e165e4be | /myenv/bin/celeryd-multi | abfacae72490d7b771098239492a578c68b0a5e4 | [] | no_license | ArushiSinghal/Energy_portal | d7acbfdc378335cb7467768055391c522c366451 | 83b166cacf6b78b86e978724ce518d8a6ff3d54f | refs/heads/master | 2022-12-14T22:50:48.910099 | 2017-03-10T09:22:31 | 2017-03-10T09:22:31 | 83,199,262 | 0 | 1 | null | 2022-11-22T01:05:48 | 2017-02-26T10:06:28 | Python | UTF-8 | Python | false | false | 253 | #!/home/simran/evs/myenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from celery.__main__ import _compat_multi
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_compat_multi())
| [
"singhalsimran0@gmail.com"
] | singhalsimran0@gmail.com | |
9e20c44700047479c01f6cdeb7fbfcafb618f3b9 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/show_asset_meta_response.py | 17beda5d5d2fc1bd79e8b76d4ed6bfa0f640b853 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,451 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetMetaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_info_array': 'list[AssetInfo]',
'is_truncated': 'int',
'total': 'int'
}
attribute_map = {
'asset_info_array': 'asset_info_array',
'is_truncated': 'is_truncated',
'total': 'total'
}
def __init__(self, asset_info_array=None, is_truncated=None, total=None):
"""ShowAssetMetaResponse - a model defined in huaweicloud sdk"""
super(ShowAssetMetaResponse, self).__init__()
self._asset_info_array = None
self._is_truncated = None
self._total = None
self.discriminator = None
if asset_info_array is not None:
self.asset_info_array = asset_info_array
if is_truncated is not None:
self.is_truncated = is_truncated
if total is not None:
self.total = total
@property
def asset_info_array(self):
"""Gets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:return: The asset_info_array of this ShowAssetMetaResponse.
:rtype: list[AssetInfo]
"""
return self._asset_info_array
@asset_info_array.setter
def asset_info_array(self, asset_info_array):
"""Sets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:param asset_info_array: The asset_info_array of this ShowAssetMetaResponse.
:type: list[AssetInfo]
"""
self._asset_info_array = asset_info_array
@property
def is_truncated(self):
"""Gets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:return: The is_truncated of this ShowAssetMetaResponse.
:rtype: int
"""
return self._is_truncated
@is_truncated.setter
def is_truncated(self, is_truncated):
"""Sets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:param is_truncated: The is_truncated of this ShowAssetMetaResponse.
:type: int
"""
self._is_truncated = is_truncated
@property
def total(self):
"""Gets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:return: The total of this ShowAssetMetaResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:param total: The total of this ShowAssetMetaResponse.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetMetaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
01f03677c1c199afc3763b2adf640bf22524e264 | 8e69eee9b474587925e22413717eb82e4b024360 | /v2.5.7/toontown/coghq/CashbotMintLavaRoomFoyer_Action01.py | b7cfc755c82949fc1587ac270f73a11b84148d12 | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 9,037 | py | from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10000: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10001: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'scale',
'recursive': 1,
'typeName': 'model',
'value': 'Vec3(.955,1,1)'},
10019: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10015,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10006: {'type': 'gear', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10007: {'type': 'gear', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 4.28999996185),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10009: {'type': 'gear', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 8.57999992371),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.055},
10014: {'type': 'gear', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 12.8699998856),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.06},
10018: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10017,
'pos': Point3(-2.03643107414, 2.34967470169, 5.46433734894),
'hpr': Vec3(34.1522636414, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10002: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(6.5, 6.5, 6.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/RoundShadow'},
10005: {'type': 'model', 'name': 'doorwayCrate',
'comment': '',
'parentEntId': 0,
'pos': Point3(27.0090961456, 0.850000023842, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10008: {'type': 'model', 'name': 'shaft',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 7.25891637802),
'hpr': Vec3(0.0, 0.0, 180.0),
'scale': Vec3(5.35842609406, 5.35842609406, 5.35842609406),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cashbotHQ/MintGearPost'},
10010: {'type': 'model', 'name': 'middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10011: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(-5.72357320786, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10012: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(5.71999979019, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10013: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(11.4399995804, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10015: {'type': 'model', 'name': 'crateStack',
'comment': '',
'parentEntId': 0,
'pos': Point3(-18.0376968384, 20.2023410797, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10016: {'type': 'model', 'name': 'upper',
'comment': '',
'parentEntId': 10015,
'pos': Point3(0.0, 0.0, 5.42841148376),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10017: {'type': 'model', 'name': 'copy of upper',
'comment': '',
'parentEntId': 10016,
'pos': Point3(0.0, 0.0, 5.43412637711),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10021: {'type': 'model', 'name': 'crateStack',
'comment': '',
'parentEntId': 10020,
'pos': Point3(21.064825058, 20.1899757385, 9.87216758728),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1'},
10003: {'type': 'nodepath', 'name': 'gears',
'comment': '',
'parentEntId': 0,
'pos': Point3(-3.18650078773, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10004: {'type': 'nodepath', 'name': 'wall',
'comment': '',
'parentEntId': 0,
'pos': Point3(19.5468139648, 6.37875938416, 0.0),
'hpr': Point3(270.0, 0.0, 0.0),
'scale': Vec3(1.95812249184, 1.5, 1.79999995232)},
10020: {'type': 'nodepath', 'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]} | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
400aae5cd47147d9b4310c759e23ff7db6ffa555 | c61ab3f95fca96c418f97463f2fbe7dcea42bf30 | /venv/Scripts/easy_install-script.py | d0d10e9ff49f95fc9aa2dea1aa02fc874c0172b7 | [] | no_license | Devilins/Labs_4_kurs | 42a3e2553762f09c8ff954c8cab463bc4d5e7099 | 3522e105a992c9ff56345ae61304f5f0d99b6972 | refs/heads/master | 2020-09-26T19:43:49.567028 | 2019-12-06T19:40:44 | 2019-12-06T19:40:44 | 226,329,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!C:\Users\Alexandr\PycharmProjects\Labs_4_kurs\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"krivovalex4@yandex.ru"
] | krivovalex4@yandex.ru |
ab60af3ff7bcccdcbad6c642b7178894e8e9f40f | 9f8b5b5b84b2d27b392d04c0d823e8c94e2465da | /gravtools/constants/__init__.py | dbb075b547eb4199bb5424a86376c4047321ada1 | [
"MIT"
] | permissive | JWKennington/gravtools | 55f31e64100592a433fc67c520edae7578907711 | 188229ed2061958012cf7338a5eebd2ef0a399cc | refs/heads/master | 2020-09-07T10:33:51.068154 | 2019-11-11T06:10:36 | 2019-11-11T06:10:36 | 220,752,808 | 0 | 0 | MIT | 2019-11-11T06:10:37 | 2019-11-10T06:49:48 | Python | UTF-8 | Python | false | false | 110 | py | """Flatten the constants package"""
from .merger import MergerParameters
from .observatory import Observatory | [
"jameswkennington@gmail.com"
] | jameswkennington@gmail.com |
bcf64e30397c858e6d836ba934690acae6d3ab38 | 4ab14cba7b27d58ae8f6e01525141db9d9044e36 | /GMMSegmentation.py | 0c067edff0c53e6510771b936692b231ff2c4897 | [] | no_license | bairlie/FinalYearProject | 86b70b3ad40cf8cdd915b45c5707c7e2c0fbfaed | 59e7ef5a96dc4e13800b88210e4359d3f1f48811 | refs/heads/master | 2021-04-29T17:40:11.537548 | 2018-04-29T12:59:32 | 2018-04-29T12:59:32 | 121,675,849 | 0 | 0 | null | 2018-04-29T12:59:33 | 2018-02-15T19:57:48 | Python | UTF-8 | Python | false | false | 827 | py | import cv2
from pylab import *
from sklearn.mixture import GaussianMixture
from PIL import Image
def segViaGMM(pil_image, segCol1, segCol2, startX, startY):
pil_image = pil_image.convert('RGB')
im = np.array(pil_image)
im = im[:, :, ::-1].copy()
width, height = pil_image.size
newdata = im.reshape(width*height, 3)
gmm = GaussianMixture(n_components=2, covariance_type="tied")
gmm = gmm.fit(newdata)
cluster = gmm.predict(newdata)
data = np.zeros((height,width, 3), dtype=np.uint8)
cluster = np.reshape(cluster,(height,width))
for (x,y,z), value in np.ndenumerate(data):
if(cluster[x,y] == cluster[startY,startX]):
data[x, y] = segCol1
else:
data[x,y] = segCol2
img = Image.fromarray(data, 'RGB')
return(img)
| [
"bennairlie@138-38-242-110.eduroam.bath.ac.uk"
] | bennairlie@138-38-242-110.eduroam.bath.ac.uk |
1b1728e77103b12b30fe66b7557a05d11ef55045 | 493c5a13f60cc44ebc8876a3feebbad0154ce14d | /env/bin/pip | 7203cbfb4765077a348777dc3a1cc98317f09569 | [] | no_license | rubeendaw/heroku-django-ruben | 27736047db6608a2ea23234613a432ad0e65a893 | 57d32d61e9547a10468e4e9fd754aeefe098a37c | refs/heads/master | 2022-07-01T00:44:32.226904 | 2020-02-19T22:45:03 | 2020-02-19T22:45:03 | 241,742,444 | 0 | 0 | null | 2022-05-25T04:11:36 | 2020-02-19T22:41:15 | Python | UTF-8 | Python | false | false | 273 | #!/home/rubeendaw/Documentos/Servidor/backend_django/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rubeendaw@gmail.com"
] | rubeendaw@gmail.com | |
30e99cd125126168a62391d1dd2870494f66f8d3 | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_optimize11.py | 419bdafcf7b28b46a1cc0c98248bc2b40b67c8d9 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 2,279 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize11.xlsx")
def test_create_file_no_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
workbook.close()
self.assertExcelEqual()
def test_create_file_with_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
def test_create_file_with_reopen(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A2", "Hello 2")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
488aafc76cbbeaf3ce2a1a63768193c39aec5aae | 3d5139c1f312413cd7f5627c2d22b1d2da2c773f | /projects/task.py | e4c053ad62691fdbbc27aa562f80e5882e31fdb3 | [] | no_license | sammasamadhi/CMDB_platform | 0a3d6f6afd1624933b5700d09dd1de582e0dfb6a | 05f5c189b7de5f4fe9ea5a62fc2e3516c2150fad | refs/heads/master | 2021-06-07T23:04:59.849957 | 2016-11-24T10:00:17 | 2016-11-24T10:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,988 | py | #^v^! coding: utf-8 ^v^!
__author__ = 'Alex hao'
import models as projects_models
import hosts.models as hosts_models
from django.db import transaction
from django.utils import timezone
from backends import ansible_project_task
from CMDB_platform import settings
import threading
class Task(object):
def __init__(self,request):
self.request = request
self.task_type = self.request.POST.get("task_type")
def handle(self):
if self.task_type:
if hasattr(self,self.task_type):
func = getattr(self,self.task_type)
return func()
else:
raise TypeError
@transaction.atomic
def _ansible_task(self):
try:
result = ansible_project_task.main(self.host_ip_list,self.ansible_module,self.ansible_args) #执行ansible项目任务事件
for bind_host_id in self.bind_hosts_user_id_list: #执行事件结果存入数据库
obj = projects_models.ProjectTaskLogDetail.objects.get(child_of_task_id = self.task_obj.id,bind_host_id = bind_host_id,)
host_ip = hosts_models.BindHostToUser.objects.get(id=bind_host_id).host.lan_ip #host_ip的type为unicode
if 'rc' in result[host_ip]:
rc = result[host_ip]['rc']
if rc == 0:
script_result = 'success'
else:
script_result = 'failed'
else:
script_result = 'failed'
if 'msg' in result[host_ip]:
obj.event_log = result[host_ip]['msg']
else:
if result[host_ip]['stdout']:
obj.event_log = result[host_ip]['stdout']
else:
obj.event_log = "no return information"
obj.result = script_result
obj.date = timezone.now()
obj.save()
self.task_obj.end_time = timezone.now() #添加project_task_log任务结束时间
self.task_obj.save()
except Exception,e:
print e
@transaction.atomic #函数执行完成后统一commit到数据库
def _insert_project_logs(self):
self.bind_hosts_user_id_list = []
self.host_ip_list = []
for bind_host in hosts_models.BindHostToGroup.objects.get(host_group=self.project_id).bind_hosts.all():
self.bind_hosts_user_id_list.append(bind_host.id)
self.host_ip_list.append(bind_host.host.lan_ip)
bind_hosts_user_id_list = set(self.bind_hosts_user_id_list)
self.task_obj = projects_models.ProjectTaskLog(
task_type = self.task_type,
user_id = self.request.user.id,
belong_to_project_id = self.project_id
#many to many 关系要创建记录后添加.
)
self.task_obj.save()
self.task_obj.hosts.add(*bind_hosts_user_id_list) #多对多关系添加需要传入 *加id列表
for bind_host_id in bind_hosts_user_id_list: #添加相关主机的操作记录
obj = projects_models.ProjectTaskLogDetail(
child_of_task_id = self.task_obj.id,
bind_host_id = bind_host_id,
event_log = '<img src="/static/css/plugins/jsTree/throbber.gif" alt="loadimage">',
)
obj.save()
def update(self): #项目更新
self.project_id = self.request.POST.get("projectID")
self.ansible_module = "script" #ansible调用的模块
project_obj = projects_models.ProjectList.objects.get(id=self.project_id)
project_title = project_obj.jetty_name
project_info = project_obj.project_group.name
project_name = project_obj.jetty_root
project_port = project_obj.jetty_port
mysql_server = project_obj.db.ip
mysql_database = project_obj.db_name
mysql_user = project_obj.db_user
mysql_passwd = project_obj.db_pd
mem_server = project_obj.memcached.ip
mem_port = project_obj.mem_port
PROJECT_PATH = project_obj.project_path
CONFIG_FILE = project_obj.conf_path
#ansible script模块脚本参数
ansible_script_args = ' '+ '"%s"'%project_title +\
' '+ '"%s"'%project_info +\
' '+ '"%s"'%project_name +\
' '+ '"%s"'%project_port +\
' '+ '"%s"'%mysql_server +\
' '+ '"%s"'%mysql_database +\
' '+ '"%s"'%mysql_user +\
' '+ '"%s"'%mysql_passwd +\
' '+ '"%s"'%mem_server +\
' '+ '"%s"'%mem_port +\
' '+ '"%s"'%PROJECT_PATH +\
' '+ '"%s"'%CONFIG_FILE
self.ansible_args = settings.ProjectUpdateScript + ansible_script_args #ansible模块参数
self._insert_project_logs() #创建任务记录信息
t = threading.Thread(target=self._ansible_task,args=()) #多线程执行ansible事件任务,解决函数阻塞问题,执行任务后任务结果入库
t.start()
return {'task_id':self.task_obj.id}
def check(self): #项目状态检测
self.project_id = self.request.POST.get("projectID")
self.ansible_module = "script" #ansible调用的模块
project_obj = projects_models.ProjectList.objects.get(id=self.project_id)
project_title = project_obj.jetty_name
project_name = project_obj.jetty_root
ansible_script_args = ' '+ '%s'%project_title +\
' '+ '%s'%project_name
self.ansible_args = settings.ProjectCheckScript + ansible_script_args
self._insert_project_logs() #创建任务记录信息
t = threading.Thread(target=self._ansible_task,args=()) #多线程执行ansible事件任务,解决函数阻塞问题,执行任务后任务结果入库
t.start()
return {'task_id':self.task_obj.id}
def get_project_result(self):
task_id = self.request.GET.get('task_id')
if task_id:
res_list = projects_models.ProjectTaskLogDetail.objects.filter(child_of_task_id=task_id)
return list(res_list.values(
'id',
'bind_host__host__hostname',
'bind_host__host__wan_ip',
'bind_host__host__lan_ip',
'bind_host__host_user__username',
'date',
'event_log',
'result',
)) | [
"75898234@qq.com"
] | 75898234@qq.com |
bf8cf478cf2fd74650e5a0f3ce59c2b7ffb105dc | 8c30809325a4e18d7036e8b16ee16d7ae5b327fa | /app.py | 37f7bb2ccb0fdc3efdbbbd492e76763ba21763fb | [] | no_license | ZedJcel/lynis-autofix | 6629db093ccbd9be49e7a199b51485ed652d8960 | 0175f36efe69948ff88055dd557f2f7eb3cb40a4 | refs/heads/master | 2023-05-24T21:09:40.245478 | 2018-01-08T12:57:07 | 2018-01-08T12:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,603 | py | import json
import logging
import platform
import sys
import shlex
import time
import os
import re
import subprocess
from StringIO import StringIO
from pprint import pprint
## Logging stuff
datum = time.strftime("%d-%m-%Y-%H-%M-%S")
logging.basicConfig(filename='%s-log.log'%(datum) ,format='%(asctime)s - %(name)s - %(levelname)s | %(message)s |', stream=sys.stdout, level=logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s | %(message)s |')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
listtodo = []
def fix_yes_no(question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def run_shell_command(command_line):
command_line_args = shlex.split(command_line)
logging.info('Subprocess: "' + command_line + '"')
try:
command_line_process = subprocess.Popen(
command_line_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
)
process_output, _ = command_line_process.communicate()
except (OSError) as exception:
logging.info('Exception occured: ' + str(exception))
logging.info('Subprocess failed')
return False
else:
# no exception was raised
logging.info('Subprocess finished')
return True
def fixes():
for todo in listtodo:
try:
with open('json/%s.json' % todo[0]) as data_file:
data = json.load(data_file)
for fix in data:
for d in fix:
if fix_yes_no('Do you want to install %s - %s?'%(data[d]['id'], data[d]['Description']), default="yes") == True:
logging.info('We installed: %s :)' % (data[d]['id']))
else:
logging.warning('%s - %s is not installed' % (data[d]['id'], data[d]['command']))
except:
logging.critical('%s.json does not excist in the json directory' % todo[0])
def lynisupdate():
if os.path.exists("/usr/local/lynis") == True:
os.system("cd /usr/local/lynis && git pull > /dev/null 2>&1")
logging.info('Lynis updated')
elif os.path.exists("/usr/local/lynis") == False:
os.system("sudo git clone https://github.com/CISOfy/lynis.git /usr/local/lynis > /dev/null 2>&1")
logging.info('Lynis Installed')
else:
logging.critical('Could not update/download lynis')
def runlynis():
try:
logging.info('Generate Lynis Report bare with us :-)')
os.system("cd /usr/local/lynis && sudo ./lynis audit system -q --auditor 'Lynis-autofix' --report-file /usr/local/lynis/%s-report.dat > /dev/null 2>&1 && cat /usr/local/lynis/%s-report.dat | grep suggestion > /usr/local/lynis/%s-suggestion.txt "%(datum,datum,datum))
except:
logging.critical('Could not create report from lynis')
def todolist():
file = open("/usr/local/lynis/%s-suggestion.txt"%datum, "r")
regex = r"suggestion\[\]=([A-z-0-9]+)\|"
for row in file:
matches = re.findall(regex, row)
listtodo.append(matches)
file.close()
def main():
logging.info("Welcome to Lynis Autofix!")
if platform.system() == "Linux":
logging.info("Running on %s version %s" % (platform.system(), platform.release()))
elif platform.system() != "Linux":
logging.info("Running on %s version %s" % (platform.system(), platform.release()))
logging.critical("%s %s not Supported!" % (platform.system(), platform.release()))
exit()
else:
exit()
logging.info(40 * "-")
lynisupdate()
logging.info(40 * "-")
runlynis()
logging.info(40 * "-")
todolist()
logging.info(40 * "-")
fixes()
if __name__ == "__main__":
user = os.getenv("SUDO_USER")
if user is None:
print("This program need 'sudo'")
exit()
main() | [
"chuck@tonec.nl"
] | chuck@tonec.nl |
40610b49d91d37083e53372dbc1a725ab4284308 | 949598c3ac05f7e7393fb4125464ecd6544915cc | /howmuch/article/forms.py | cdb57637f04eebd2e158ba7d81de9fef08f98433 | [] | no_license | UncleMario/howmuch | 6a3f9c06b80f83afa8c4910f172ca18d54892aab | 0758171be7a70d654174b8f133db2201b45af1b7 | refs/heads/master | 2021-03-24T12:30:41.864048 | 2013-03-30T05:46:45 | 2013-03-30T05:46:45 | 9,137,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | from django import forms
from django.forms import ModelForm, Textarea, TextInput, Select
from howmuch.article.models import Article, Assignment
QUANTITY_CHOICES = (
(1,'UNO'),
(2,'ENTRE 2 y 5'),
(5,'ENTRE 5 y 10'),
(10,'MAS DE 10'),
)
class ArticleForm(ModelForm):
class Meta:
model = Article
exclude = ('owner','tags','date','pictures','title_url', 'comments', 'followers', 'is_active')
widgets = {
'price' : TextInput(attrs = {'class' : 'span12'}),
'title' : TextInput(attrs = {'class' : 'span12'}),
'description' : Textarea(attrs = {'class' : 'span12'}),
'quantity' : Select(attrs = {'class' : 'span12'}),
'category' : Select(attrs = {'class' : 'span12'}),
'state' : Select(attrs = {'class' : 'span12'}),
}
class OfferForm(forms.Form):
quantity = forms.ChoiceField(
widget = forms.Select(attrs = {'class' : 'span12'}),
choices = QUANTITY_CHOICES)
cprice = forms.IntegerField(
widget = forms.TextInput(attrs = {'class' : 'span12'}))
message = forms.CharField(
widget = forms.Textarea(attrs = {'class' : 'span12'}))
picture1 = forms.ImageField(
widget = forms.ClearableFileInput(attrs = {'class' : 'input-file',
'onChange' : "readURL(this,'image1')"}))
picture2 = forms.ImageField(required=False,
widget = forms.ClearableFileInput(attrs = {'class' : 'input-file',
'onChange' : "readURL(this,'image2')"}))
picture3 = forms.ImageField(required=False,
widget = forms.ClearableFileInput(attrs = {'class' : 'input-file',
'onChange' : "readURL(this,'image3')"}))
picture4 = forms.ImageField(required=False,
widget = forms.ClearableFileInput(attrs = {'class' : 'input-file',
'onChange' : "readURL(this,'image4')"}))
picture5 = forms.ImageField(required=False,
widget = forms.ClearableFileInput(attrs = {'class' : 'input-file',
'onChange' : "readURL(this,'image5')"}))
class AssignmentForm(ModelForm):
class Meta:
model = Assignment
exclude = ('owner', 'article', 'date', 'status', )
| [
"kayethano@gmail.com"
] | kayethano@gmail.com |
93f01551fc71c691ab7c4d7b49966cb6e2af604c | e4200b764d0b4ffba65180e54cf84b30ee84efcc | /selfdrive/boardd/boardd_setup.py | f987c7aa29e08bc7bdd5e335dc38ac0c14730201 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | kegman/openpilot | c9ba96a72d905956f02c684e065091e023942883 | 54a8614b5a6451154817a4c6c86141c96103ae47 | refs/heads/kegman-0.7 | 2022-05-22T17:07:16.656336 | 2020-01-23T16:40:55 | 2020-01-23T16:40:55 | 229,979,925 | 105 | 212 | MIT | 2022-03-13T05:47:51 | 2019-12-24T17:27:11 | C | UTF-8 | Python | false | false | 1,019 | py | import subprocess
from distutils.core import Extension, setup
from Cython.Build import cythonize
from common.cython_hacks import BuildExtWithoutPlatformSuffix
from common.basedir import BASEDIR
import os
PHONELIBS = os.path.join(BASEDIR, 'phonelibs')
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
ARCH_DIR = 'x64' if ARCH == "x86_64" else 'aarch64'
setup(name='Boardd API Implementation',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"boardd_api_impl",
libraries=[':libcan_list_to_can_capnp.a', ':libcapnp.a', ':libkj.a'] if ARCH == "x86_64" else [':libcan_list_to_can_capnp.a', 'capnp', 'kj'],
library_dirs=[
'./',
PHONELIBS + '/capnp-cpp/' + ARCH_DIR + '/lib/',
PHONELIBS + '/capnp-c/' + ARCH_DIR + '/lib/'
],
sources=['boardd_api_impl.pyx'],
language="c++",
extra_compile_args=["-std=c++11"],
)
)
)
| [
"8837066+kegman@users.noreply.github.com"
] | 8837066+kegman@users.noreply.github.com |
922a4a657e71d843671559e6538adec352da9514 | e7e196631cd84f68d9a6b9cce8a497b5661968c9 | /combat.py | 3eaf0dd166a25032bbb1a3cb04cd06d2b46fd5be | [] | no_license | alabecki/Imperialist-Bastards | 5a3abb9b9702b7b7691fca3262ea4857ed5657c6 | 47e80288b6292bd65044e1cb4491498d7434637d | refs/heads/master | 2020-12-30T12:11:19.437915 | 2018-02-28T15:44:05 | 2018-02-28T15:44:05 | 91,410,823 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,939 | py |
from player_class import Player
from technologies import*
from human import Human
from AI import*
from minor_classes import*
#from AI_foreign_affairs import*
from random import*
from pprint import pprint
from copy import deepcopy
def calculate_number_of_units(player):
count = 0
count += player.military["infantry"] + player.military["cavalry"] + player.military["artillery"] + \
player.military["irregulars"] + player.military["tank"] + player.military["fighter"]
return count
def calculate_land_ammo_needed(p):
ammo_needed = 0.0
ammo_needed += p.military["infantry"] * p.infantry["ammo_use"]
ammo_needed += p.military["cavalry"] * p.cavalry["ammo_use"]
ammo_needed += p.military["artillery"] * p.artillery["ammo_use"]
ammo_needed += p.military["tank"] * p.cavalry["ammo_use"]
ammo_needed += p.military["fighter"] * p.artillery["ammo_use"]
#print("Ammo Needed for %s: %s" % (p.name, ammo_needed))
return ammo_needed
def calculate_oil_needed(p):
oil_needed = 0.0
oil_needed += p.military["tank"] * p.tank["oil_use"]
oil_needed += p.military["fighter"] * p.fighter["oil_use"]
#print("Oil Needed for %s: %s" % (p.name, oil_needed))
return oil_needed
def calculate_manouver(p):
manouver = 0.0
manouver += p.military["infantry"] * p.infantry["manouver"]
manouver += p.military["cavalry"] * p.cavalry["manouver"]
manouver += p.military["tank"] * p.tank["manouver"]
manouver += p.military["fighter"] * p.fighter["manouver"]
#manouver = manouver * (1 + p.midPOP["officers"]["number"])
manouver = manouver * (1 + p.developments["military"]/5)
return manouver
def calculate_oil_manouver(p):
manouver = 0.0
manouver += p.military["tank"] * p.tank["manouver"]
manouver += p.military["fighter"] * p.fighter["manouver"]
#manouver = manouver * (1 + p.midPOP["officers"]["number"])
manouver = manouver * (1 + p.developments["military"]/5)
return manouver
def calculate_oil_att(p):
strength = 0.0
strength += p.military["tank"] * p.tank["attack"]
strength += p.military["fighter"] * p.fighter["attack"]
return strength
def calculate_oil_def(p):
strength = 0.0
strength += p.military["tank"] * p.tank["defend"]
strength += p.military["fighter"] * p.fighter["defend"]
return strength
def distribute_losses(player, losses, num_units):
while(losses >= 0.5 and num_units >= 0.5):
#print("Losses %s , num_units %s \n" % (losses, num_units))
if(player.military["irregulars"] >= 0.5):
player.military["irregulars"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
loss = uniform(0, 1)
if loss <= 0.25:
if(player.military["infantry"] >= 0.5):
player.military["infantry"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.25 and loss <= 0.5:
if(player.military["cavalry"] >= 0.5):
player.military["cavalry"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.5 and loss <= 0.7:
if(player.military["tank"] >= 0.5):
player.military["tank"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.7 and loss <= 0.85:
if(player.military["artillery"] >= 0.5):
player.military["artillery"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.2
losses -= 0.5
else:
continue
elif loss > 0.85:
if(player.military["fighter"] >= 0.5):
player.military["fighter"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
return num_units
def distribute_losses_amph(player, losses, num_units, current_makeup):
while(losses > 0.5 and num_units >= 0.5):
loss = uniform(0, 1)
if loss <= 0.25:
if(current_makeup["infantry"] >= 0.5):
player.military["infantry"] -=0.5
current_makeup["infantry"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.25 and loss <= 0.5:
if(current_makeup["cavalry"] >= 0.5):
player.military["cavalry"] -= 0.5
num_units -= 0.5
current_makeup["cavalry"] -= 0.5
#player.num_units -=0.5
#def_losses -= 1
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.5 and loss <= 0.7:
if(current_makeup["tank"] >= 0.5):
player.military["tank"] -= 0.5
num_units -= 0.5
current_makeup["tank"] -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.7 and loss <= 0.85:
if(current_makeup["artillery"] >= 0.5):
player.military["artillery"] -= 0.5
current_makeup["artillery"] -= 0.5
num_units -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
elif loss > 0.85:
if(current_makeup["fighter"] >= 0.5):
player.military["fighter"] -= 0.5
num_units -= 0.5
current_makeup["fighter"] -= 0.5
#player.num_units -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
losses -= 0.5
else:
continue
return current_makeup
def resolve_total_war(winner, p1, p2, prov, players, market, relations):
if winner == p1.name:
p1.reputation -= 0.5
print("%s has sucessfuly invaded %s ! \n" % (p1.name, p2.name))
market.report.append("%s has sucessfuly invaded %s ! \n" % (p1.name, p2.name))
if p2.number_developments >= 4:
opts = []
for pr, province in p2.provinces.items():
if province.development_level >= 1:
opts.append(province)
if len(opts) >= 5:
amount = int(len(opts)/3)
for i in range(amount):
selection = choice(opts)
if selection.development_level >= 1:
selection.development_level -= 1
# print("As a result of the war, the development level of %s has been reduced to %s" % (selection.name, selection.development_level))
num_resist = 0
for p, pr in p2.provinces.items():
if pr.culture != p1.culture and pr.culture == p2.culture:
num_resist += 1
num_resist = int(num_resist)
for i in range(num_resist):
p1.stability -= 0.2
if p1.stability < -3:
p1.stability = -3
unit_types = ["infantry", "cavalry", "artillery", "tank", "fighter"]
kind = choice(unit_types)
if p1.military[kind] > 2:
p1.military[kind] -= 0.04
p1.milPOP -= 0.04
p1.POP -= 0.04
# print("A %s unit belogning to %s has been damaged by %s resistance fighters!" % (kind, p1.name, p2.name))
for r, res in p2.resources.items():
if p1.resources[r] >= 3:
p1.resources[r] += (p2.resources[r] - 2)
p2.resources[r] = 2
for g, good in p2.goods.items():
if p1.goods[g] >= 3:
p1.goods[g] += (p2.goods[g] -2)
p2.goods[g] = 2
for pl in players.values():
sphere_target_copy = deepcopy(pl.sphere_targets)
for st in sphere_target_copy:
if st == p2.name:
pl.sphere_targets.remove(st)
capital = p2.capital
for c in capital:
# print(c)
p1.capital.add(c)
core = p2.core_provinces()
p2.capital = set()
for p, pr in p2.provinces.items():
if pr in core:
p1.provinces[p] = pr
p1.number_developments += pr.development_level
pr.owner = p1.name
if type(p1) == AI:
p1.resource_base[pr.resource] += pr.quality
p1.ai_modify_priorities_from_province(p1.provinces[pr.name].resource)
core_keys = []
for c in core:
core_keys.append(c.name)
for ck in core_keys:
print("%s has lost %s" % (p2.name, ck))
market.report.append("%s has lost %s" % (p2.name, ck))
p2.number_developments -= p2.provinces[ck].development_level
p2.provinces.pop(ck)
p2.resource_base[pr.resource] -= pr.quality
remains = (len(p2.provinces.keys()) * 1.2)
p1.POP += (p2.POP - remains)
p1.numLowerPOP += (p2.POP - remains)
p2.POP = remains
p2.numLowerPOP = remains
#for k, v in p1.midPOP.items():
# p1.midPOP[k]["number"] += p2.midPOP[k]["number"]
# p1.numMidPOP += p2.midPOP[k]["number"]
# p1.POP += p2.midPOP[k]["number"]
# p2.numMidPOP -= p2.midPOP[k]["number"]
# p2.POP -= p2.midPOP[k]["number"]
# p2.midPOP[k]["number"] = 0
for p, pl in players.items():
if len(set([p1.name, p])) == 1 or len(set([p2.name, p])) == 1:
continue
if relations[frozenset([p2.name, p])].relationship < 1.5:
relations[frozenset([p1.name, p])].relationship -= 1
if relations[frozenset([p1.name, p])].relationship < 2.5:
relations[frozenset([p1.name, p])].relationship -= 1
if relations[frozenset([p2.name, p])].relationship < 1.5:
relations[frozenset([p1.name, p])].relationship -= 1
#recalculate borders of nations:
p1_borders = set()
for k, v in players.items():
if p1.check_for_border(v) == True:
p1_borders.add(k)
p1.borders = p1_borders
#print("%s has lost a total war to %s" % (p2.name, p1.name))
pause = input()
p2_borders = set()
if len(p2.provinces.keys()) >= 1:
opts = list(p2.provinces.keys())
ch = choice(opts)
p2.capital.add(ch)
p2_borders = set()
for k, v in players.items():
if p2.check_for_border(v) == True:
p2_borders.add(k)
p2.borders = p2_borders
p2.defeated = True
if len(p2.provinces.keys()) == 0:
print("%s no longer exists as a nation!" % (p2.name))
market.report.append("%s no longer exists as a nation!" % (p2.name))
for k, v in market.market.items():
for i in v:
if i.owner == p2.name:
if k in p1.resources.keys():
p1.resources[k] += 1
if k in p1.goods.keys():
p1.goods[k] +=1
market.market[k].remove(i)
del i
pause = input()
relkeys = list(relations.keys())
for r in relkeys:
if p2.name in relations[r].relata:
del relations[r]
for pl in players.values():
if type(pl) == AI:
if p2.name in pl.sphere_targets:
pl.sphere_targets.remove(p2.name)
if p2 in pl.allied_target:
pl.allied_target.remove(p2)
if pl.rival_target != []:
if p2.name == pl.rival_target[0].name:
pl.rival_target = []
if p2.name in pl.objectives:
pl.objectives.remove(p2.name)
if p2.name in pl.embargo:
pl.embargo.remove(p2.name)
#sphere_target_copy = deepcopy(pl.sphere_targets)
#for st in sphere_target_copy:
#if st == p2.name:
# pl.sphere_targets.remove(st)
del players[p2.name]
elif winner == p2.name:
p1.stability -= 1
if p1.stability < -3.0:
p1.stability = -3.0
p2.stability += 1
if p1.stability > 3.0:
p1.stability = 3.0
print("%s has repelled %s's pitiful invasion! \n" % (p2.name, p1.name))
market.report.append("%s has repelled %s's pitiful invasion! \n" % (p2.name, p1.name))
# print("Will we soon see a counter invasion from %s ?" % (p2.name))
def combat_outcome(winner, p1, p2, prov, players, market, relations):
if prov == "total":
resolve_total_war(winner, p1, p2, prov, players, market, relations)
return
if type(p1) == AI:
p1.rival_target = []
relata = frozenset([p1.name, p2.name])
p1.rival_target = []
relations[relata].relationship += 1
#cb_copy = deepcopy(p1.CB)
cb_keys = []
for cb in p1.CB:
cb_keys.append(cb)
for cb in cb_keys:
if cb.province == prov.name:
p1.CB.remove(cb)
del cb
if winner == p1.name:
print("%s has sucessfuly invaded %s ! \n" % (p1.name, p2.name))
market.report.append("%s has sucessfuly invaded %s ! \n" % (p1.name, p2.name))
#p1.stability += 0.5
#if p1.stability > 3:
# p1.stability = 3
#maybe gain stability with Nationalism
p2.just_attacked = 3
if p2.number_developments >= 2:
opts = []
for pr, province in p2.provinces.items():
if province.development_level >= 1:
opts.append(province)
if len(opts) >= 1:
selection = choice(opts)
selection.development_level -= 1
# print("As a result of the war, the development level of %s has been reduced to %s" % (selection.name, selection.development_level))
if prov.name in p2.provinces.keys():
gain_province(p1, p2, prov, players, market, relations)
else:
p1.war_after_math(p2, players, relations, prov)
loot = p2.resources["gold"]/3.33
p1.resources["gold"] += loot
p2.resources["gold"] -= loot
print("%s loots %s gold from %s \n" % (p1.name, loot, p2.name))
market.report.append("%s loots %s gold from %s \n" % (p1.name, loot, p2.name))
if p2.type == "major" and p1.military["tank"] > 0 and prov.culture == p2.culture:
p2.defeated == True
print("%s has been defeated by %s! " % (p2.name, p1.name))
market.report.append("%s loots %s gold from %s \n" % (p1.name, loot, p2.name))
elif winner == p2.name:
p1.stability -= 0.5
if p1.stability < -3.0:
p1.stability = -3.0
p2.stability += 0.5
if p1.stability > 3.0:
p1.stability = 3.0
print("%s has repelled %s's pitiful invasion! \n" % (p2.name, p1.name))
market.report.append("%s has repelled %s's pitiful invasion! \n" % (p2.name, p1.name))
print("The war between %s and %s has ended in a white pease \n" % (p1.name, p2.name))
def gain_province(p1, p2, prov, players, market, relations):
win_name = p1.name
loss_name = p2.name
print("%s has defeated %s for the province of %s \n" % (win_name, loss_name, prov.name))
market.report.append("%s has defeated %s for the province of %s \n" % (win_name, loss_name, prov.name))
if prov.culture == p2.culture or prov.type == "civilized":
if p2.development_level > 2:
p2.development_level -= 1
possibilities = []
for d, dev in p2.developments.items():
if dev > 0:
possibilities.append(d)
loss = choice(possibilities)
p2.developments[loss] -= 1
pause = input()
prov.owner = p1.name
#new = deepcopy(p2.provinces[prov.name])
p1.number_developments += prov.development_level
#maybe add an option for sorchered earth
p2.number_developments -= prov.development_level
p1.provinces[prov.name] = prov
if type(p1) == AI:
p1.resource_base[prov.resource] += prov.quality
p1.ai_modify_priorities_from_province(p1.provinces[prov.name].resource)
if type(p2) == AI:
p2.resource_base[prov.resource] -= prov.quality
p2_core = p2.core_provinces()
#p1.provinces[new.name].type = "old"
if prov in p2_core:
print(prov)
p2_core.remove(prov)
p2.provinces.pop(prov.name)
p2_core = list(p2_core)
if prov.name in p2.capital:
#print(prov.name + "is old capital of" + p2.name)
p2.capital.remove(prov.name)
if len(p2.provinces.keys()) > 0 and len(p2.capital) == 0:
if len(p2_core) > 0:
ch = choice(p2_core)
# print("New Capital: %s" % (ch.name))
p2.capital.add(ch.name)
else:
ch = choice(list(p2.provinces.keys()))
p2.capital.add(ch)
if prov.colony == True:
p2.num_colonies -= 1
p2.colonization += 1 + p1.num_colonies
if p2.type == "old_empire" or p2.type == "old_minor" or prov.colony == True:
p1.colonization -= (1 + p1.num_colonies)
p1.provinces[prov.name].colony = True
p1.num_colonies += 1
if prov.worked == True:
p1.POP += 1
p1.numLowerPOP += 1
p2.POP -= 1
p2.numLowerPOP -= 1
p1.stability -= 0.15
if p1.stability < -3.0:
p1.stability = -3.0
p2.stability -= 0.25
if p2.stability < -3.0:
p2.stability = -3.0
if len(p2.provinces.keys()) == 0:
print("%s no longer exists as a nation!" % (p2.name))
market.report.append("%s no longer exists as a nation!" % (p2.name))
p1.war_after_math(p2, players, relations, prov)
for k, v in p2.resources.items():
p1.resources[k] += v
for k, v in p2.goods.items():
p1.goods[k] += v
for k, v in market.market.items():
for i in v:
if i.owner == p2.name:
if k in p1.resources.keys():
p1.resources[k] += 1
if k in p1.goods.keys():
p1.goods[k] +=1
market.market[k].remove(i)
# print("removed %s %s"% (i.owner, i.kind))
del i
pause = input()
relkeys = list(relations.keys())
for r in relkeys:
if p2.name in relations[r].relata:
del relations[r]
for pl in players.values():
if type(pl) == AI:
if p2.name in pl.sphere_targets:
pl.sphere_targets.remove(p2.name)
if p2 in pl.allied_target:
pl.allied_targets.remove(p2)
if pl.rival_target != []:
if p2.name == pl.rival_target[0].name:
pl.rival_target = []
if p2.name in pl.objectives:
pl.objectives.remove(p2.name)
if p2.name in pl.embargo:
pl.embargo.remove(p2.name)
del players[p2.name]
else:
p1.war_after_math(p2, players, relations, prov)
p2_borders = set()
for k, v in players.items():
if p2.check_for_border(v) == True:
p2_borders.add(k)
p2.borders = p2_borders
#recalculate borders of nations:
p1_borders = set()
for k, v in players.items():
if p1.check_for_border(v) == True:
p1_borders.add(k)
p1.borders = p1_borders
print(str(prov) + " is now part of " + p1.name)
market.report.append(str(prov) + " is now part of " + p1.name)
def calculate_amphib_num_units(player, current_makeup):
number = 0
for k, v in current_makeup.items():
number += v
return number
def calculate_amphib_strength(player, forces):
strength = 0
for k, v in forces.items():
if k == "infantry":
strength += forces[k] * player.infantry["attack"]
if k == "cavalry":
strength += forces[k] * player.cavalry["attack"]
if k == "artillery":
strength += forces[k] * player.artillery["attack"]
if k == "tank":
strength += forces[k] * player.tank["attack"]
if k == "fighter":
strength += forces[k] * player.fighter["attack"]
return strength
def calculate_amphib_ammo(player, current_makeup):
ammo = 0
for k, v in current_makeup.items():
if k == "infantry":
ammo += current_makeup[k] * player.infantry["ammo_use"]
if k == "cavalry":
ammo += current_makeup[k] * player.cavalry["ammo_use"]
if k == "artillery":
ammo += current_makeup[k] * player.artillery["ammo_use"]
if k == "tank":
ammo += current_makeup[k] * player.tank["ammo_use"]
if k == "fighter":
ammo += current_makeup[k] * player.fighter["ammo_use"]
return ammo
def calculate_amphib_man(player, current_makeup):
num_units = 0
manouver = 0
for k, v in current_makeup.items():
num_units += current_makeup[k]
for k, v in current_makeup.items():
if k == "infantry":
manouver += current_makeup[k] * player.infantry["manouver"]
if k == "cavalry":
manouver += current_makeup[k] * player.cavalry["manouver"]
if k == "artillery":
manouver += current_makeup[k] * player.artillery["manouver"]
if k == "tank":
manouver += current_makeup[k] * player.tank["manouver"]
if k == "fighter":
manouver += current_makeup[k] * player.fighter["manouver"]
#manouver = manouver * (1 + player.midPOP["officers"]["number"]/2)
#manouver = manouver/(num_units + 0.001)
return manouver
def calculate_amphib_oil(player, current_makeup):
oil = 0
for k, v in current_makeup.items():
if k == "tank":
oil += current_makeup[k] * player.tank["oil_use"]
if k == "fighter":
oil += current_makeup[k] * player.fighter["oil_use"]
return oil
def oil_amph_unit_str(player, current_makeup):
amount = 0
for k, v in current_makeup.items():
if k == "tank":
amount = current_makeup[k] * player.tank["attack"]
if k == "fighter":
amount = current_makeup[k] * player.fighter["attack"]
return amount
def oil_amph_unit_man(player, current_makeup):
amount = 0
for k, v in current_makeup.items():
if k == "tank":
amount = current_makeup[k] * player.tank["manouver"]
if k == "fighter":
amount = current_makeup[k] * player.fighter["manouver"]
return amount
####################################################### Add for ALL ground battles!
def select_ground_forces(player, target):
#########################################################
forces = {
"infantry": 0,
"cavalry": 0,
"artillery": 0,
"tank": 0,
"fighter": 0
}
number = 0
for k, v in forces.items():
correct = False
while correct == False:
print("How many %s would you like to send? (you have %s)" % (k, player.military[k]))
print("%s has %s %s" % (target.name, target.military[k], k))
amount = input()
amount = int(amount)
if amount > player.military[k]:
print("You only have %s %s" %(v, k))
continue
else:
forces[k] = amount
number += amount
correct = True
return forces
def combat_against_uncivilized(player, unciv, cprov = ""):
print("The nation of %s is attacking %s !" % (player.name, unciv.name))
cont = input()
forces = {}
if type(player) == Human:
forces = naval_transport(player)
if type(player) == AI:
forces = player.ai_transport_units()
player_initial_army = calculate_amphib_num_units(player, forces)
player_initial_makeup = forces
player_current_makeup = forces
unciv_initial_army = unciv.number_irregulars
while(True):
player_number_units_army = calculate_amphib_num_units(player, player_current_makeup)
player_str = calculate_amphib_strength(player, player_current_makeup)
player_ammo = calculate_amphib_ammo(player, player_current_makeup)
player_manouver = calculate_amphib_man(player, player_current_makeup)
player_oil = calculate_amphib_oil(player, player_current_makeup)
#print("Oil amount %s" % (player_oil))
#print("player oil %s" % (player.resources["oil"]))
unciv_strength = unciv.number_irregulars * 0.65
player_manouver_roll = uniform(0, 1)
unciv_manouver_roll = uniform(0, 1)
o_deficit = player.resources["oil"] - player_oil
#print("deficit %s" % (o_deficit))
if o_deficit < 0:
# print("%s has an oil deficit of %s" % (player.name, abs(o_deficit)))
base = oil_amph_unit_man(player, player_current_makeup)
temp = abs(o_deficit/(player_oil * 1.5))
penalty = base * (1 - temp)
player_manouver -= penalty
#print("%s has %s units with a total attack strength of %s \n" % (player.name, player_number_units_army, player_str))
#print("%s has %s units with a total attack strength of %s \n" % (unciv.name, unciv.number_irregulars, unciv_strength ))
if(player_manouver + player_manouver_roll > 1.5 + unciv_manouver_roll):
player_str = player_str * 1.20
if "indirect_fire" in player.technologies:
player_str += (player_current_makeup["artillery"] * player.artillery["attack"]) * 0.5
else:
unciv_strength = unciv_strength * 1.20
a_deficit = player.goods["cannons"] - player_ammo
if a_deficit < 0:
print("%s has an ammo deficit of %s" % (player.name, abs(a_deficit)))
penalty = abs(a_deficit/ (player_ammo * 2))
player_str = player_str * (1 - penalty)
player.goods["cannons"] = 0
else:
player.goods["cannons"] -= player_ammo
if o_deficit < 0:
#print("%s has an oil deficit of %s" % (player.name, abs(o_deficit)))
base = oil_amph_unit_str(player, player_current_makeup)
temp = abs(o_deficit/(player_oil *2))
penalty = base * (1 - temp)
player_str -= penalty
player.resources["oil"] = 0
else:
player.resources["oil"] -= player_oil
# print("Player modified att str: %s \n" % (player_str))
# print("unciv modified att str %s \n" % (unciv_strength))
player_losses = unciv_strength/3.0
unciv_losses = player_str/3.0
#print("Player losses: %s \n" % (player_losses))
# print("unciv losses %s \n" % (unciv_losses))
player_current_makeup = distribute_losses_amph(player, player_losses, player_number_units_army, player_current_makeup)
player_number_units_army = calculate_amphib_num_units(player, player_current_makeup)
unciv.number_irregulars -= unciv_losses
player_str = calculate_amphib_strength(player, player_current_makeup)
#print("Player units remaining: %s. Player Stength: %s \n" % (player_number_units_army, player_str) )
#print("Unciv remaining: %s. Unciv Strenthg: %s \n" % (unciv.number_irregulars, unciv.number_irregulars * 0.65) )
done = False
if(player_number_units_army < player_initial_army * 0.4):
done = True
if(unciv.number_irregulars < unciv_initial_army * 0.4):
done = True
if(done == True):
if player_number_units_army > unciv.number_irregulars:
# print("%s has defeated %s for the province of %s \n" % (player.name, unciv.name, cprov.name))
#print("What provinces does Unciv have?")
#for p, prov in unciv.provinces.items():
# print(p, prov.name)
new = deepcopy(unciv.provinces[cprov.name])
player.provinces[new.name] = new
#p1.provinces[new.name].type = "old"
player.provinces[new.name].worked = False
player.provinces[new.name].colony = True
player.provinces[new.name].type = "uncivilized"
player.POP += 1
player.freePOP +=1
player.numLowerPOP += 1
player.resources["gold"] += 3
unciv.provinces.pop(cprov.name)
if type(player) == AI:
player.resource_base[new.resource] += int(new.quality)
player.ai_modify_priorities_from_province(player.provinces[new.name].resource)
player.reputation -= 0.1
player.colonization -= (1 + player.num_colonies)
player.num_colonies += 1
player.stability -= 0.1
if player.stability < -3.0:
player.stability = -3.0
return
else:
print("%s's attept to take %s has ended in failure, what an embarresment! \n" % (player.name, unciv.name))
player.stability -= 0.5
if player.stability < -3.0:
player.stability = -3.0
unciv.number_irregulars += 1
return
else:
if type(player) == Human:
cont = input("%s, you currently have %s units, the enemy has %s units, would you like to continue the assult? (y,n)" \
% (player.name, str(player_number_units_army), str(number_irregulars)))
if(cont == "n"):
return
if type(player) == AI:
player_str = calculate_amphib_strength(player, player_current_makeup)
unciv_strength = unciv.number_irregulars * 0.65
if player_str * 0.85 < unciv_strength:
return
def amph_combat(p1, p2, p1_forces, prov, players, market, relations):
print("War has broken out between %s and %s ! \n" % (p1.name, p2.name))
market.report.append("War has broken out between %s and %s ! \n" % (p1.name, p2.name))
cont = input()
att_initial_army = calculate_amphib_num_units(p1, p1_forces)
att_initial_makeup = p1_forces
att_current_makeup = p1_forces
def_initial_army = calculate_number_of_units(p2)
while(True):
def_number_units_army = calculate_number_of_units(p2)
att_number_units_army = calculate_amphib_num_units(p1, p1_forces)
att_str = calculate_amphib_strength(p1, p1_forces)
def_str = p2.calculate_base_defense_strength()
att_ammo = calculate_amphib_ammo(p1, p1_forces)
att_oil = calculate_amphib_oil(p1, p1_forces)
def_ammo = calculate_ammo_needed(p2)
def_oil = calculate_oil_needed(p2)
att_manouver = calculate_amphib_man(p1, p1_forces)
def_manouver = calculate_manouver(p2)
att_manouver_roll = uniform(1, 1.25)
def_manouver_roll = uniform(1, 1.25)
p1o_deficit = p1.resources["oil"] - att_oil
if p1o_deficit < 0:
print("%s has an oil deficit of %s" % (p1.name, abs(p1o_deficit)))
base = oil_amph_unit_man(p1, p1_forces)
temp = abs(p1o_deficit/((att_oil * 1.5) + 0.01))
penalty = base * (1 - temp)
att_manouver -= penalty
p2o_deficit = p2.resources["oil"] - def_oil
if p2o_deficit < 0:
print("%s has an oil deficit of %s" % (p2.name, abs(p2o_deficit)))
base = calculate_oil_manouver(p2)
temp = abs(p2o_deficit/(def_oil * 1.5) + 0.01)
penalty = base * (1 - temp)
att_manouver -= penalty
att_manouver = att_manouver * (((p1.developments["military"]) + 0.1)/((att_number_units_army) + 0.001))
def_manouver = def_manouver * (((p2.developments["military"])+ 0.1)/((def_number_units_army) + 0.001))
print("%s has %s units and base attack strength of %s \n" % (p1.name, att_number_units_army, att_str))
print("%s has %s units and base defense strength of %s \n" % (p2.name, def_number_units_army, def_str))
market.report.append("%s has %s units and base attack strength of %s \n" % (p1.name, att_number_units_army, att_str))
market.report.append("%s has %s units and base defense strength of %s \n" % (p2.name, def_number_units_army, def_str))
att_manouver = att_manouver * att_manouver_roll
def_manouver = def_manouver * def_manouver_roll
# 1 - 1/att_man
print("%s manouver = %s, %s manouver = %s \n" % (p1.name, att_manouver, p2.name, def_manouver))
market.report.append("%s manouver = %s, %s manouver = %s \n" % (p1.name, att_manouver, p2.name, def_manouver))
if( att_manouver * att_manouver_roll) > (def_manouver * def_manouver_roll):
difference = att_manouver/(def_manouver + 0.001)
print("%s out-manouvers %s \n" % (p1.name, p2.name))
market.report.append("%s out-manouvers %s \n" % (p1.name, p2.name))
att_str = att_str * min( 1.33, difference)
else:
print("%s out-manouvers %s \n" % (p2.name, p1.name))
market.report.append("%s out-manouvers %s \n" % (p2.name, p1.name))
difference = def_manouver/(att_manouver + 0.001)
def_str = def_str * min( 1.33, difference)
print("%s total attack strength: %s, %s total attack strength: %s \n" % (p1.name, att_str, p2.name, def_str))
market.report.append("%s total attack strength: %s, %s total attack strength: %s \n" % (p1.name, att_str, p2.name, def_str))
p1a_deficit = p1.goods["cannons"] - att_ammo
if p1a_deficit < 0:
print("%s has an ammo deficit of %s" % (p1.name, abs(p1a_deficit)))
market.report.append("%s has an ammo deficit of %s" % (p1.name, abs(p1a_deficit)))
penalty = abs(p1a_deficit/ ((att_ammo * 2) + 0.01))
att_str = att_str * (1 - penalty)
p1.goods["cannons"] = 0
else:
p1.goods["cannons"] -= att_ammo
p2a_deficit = p2.goods["cannons"] - def_ammo
if p2a_deficit < 0:
print("%s has an ammo deficit of %s" % (p2.name, abs(p2a_deficit)))
market.report.append("%s has an ammo deficit of %s" % (p2.name, abs(p2a_deficit)))
penalty = abs(p2a_deficit/ ((def_ammo * 2) + 0.01))
def_str = def_str * (1 - penalty)
p2.goods["cannons"] = 0
else:
p2.goods["cannons"] -= def_ammo
if p1o_deficit < 0:
print("%s has an oil deficit of %s" % (p1.name, abs(p1o_deficit)))
market.report.append("%s has an oil deficit of %s" % (p1.name, abs(p1o_deficit)))
base = oil_amph_unit_str(p1, p1_forces)
temp = abs(p1o_deficit/((att_oil *2) + 0.01))
penalty = base * (1 - temp)
att_str -= penalty
p1.resources["oil"] = 0
else:
p1.resources["oil"] -= att_oil
p2o_deficit = p2.resources["oil"] - def_oil
if p2o_deficit < 0:
print("%s has an oil deficit of %s" % (p2.name, abs(p2o_deficit)))
market.report.append("%s has an oil deficit of %s" % (p2.name, abs(p2o_deficit)))
base = calculate_oil_def(p2)
temp = abs(p2o_deficit/((def_oil *2) + 0.01))
penalty = base * (1 - temp)
def_str -= penalty
p2.resources["oil"] = 0
else:
p2.resources["oil"] -= def_oil
temp = max(1, att_number_units_army * 0.333)
loss_mod = att_str/temp
att_losses = def_str/(loss_mod + 0.001)
def_losses = att_str/(loss_mod + 0.001)
if att_losses > att_number_units_army:
temp = att_losses = att_number_units_army
def_losses -= temp
if def_losses > def_number_units_army:
temp = def_losses - def_number_units_army
att_losses -= temp
done = False
if att_losses < 0.50 and def_losses < 0.50:
done = True
print("%s losses: %s, %s losses: %s \n" % (p1.name, att_losses, p2.name, def_losses))
market.report.append("%s losses: %s, %s losses: %s \n" % (p1.name, att_losses, p2.name, def_losses))
att_current_makeup = distribute_losses_amph(p1, att_losses, att_number_units_army, att_current_makeup)
att_number_units_army = calculate_amphib_num_units(p1, att_current_makeup)
def_number_units_army = distribute_losses(p2, def_losses, def_number_units_army)
print("%s has %s units remaining, %s has %s units remaining \n" % (p1.name, att_number_units_army, p2.name, def_number_units_army))
market.report.append("%s has %s units remaining, %s has %s units remaining \n" % (p1.name, att_number_units_army, p2.name, def_number_units_army))
att_now = calculate_amphib_strength(p1, p1_forces)
def_now = p2.calculate_base_defense_strength()
if att_now >= def_now * 2 or def_now >= att_now * 2:
done = True
if(att_number_units_army < att_initial_army * 0.45):
done = True
if(def_number_units_army < def_initial_army * 0.38):
done = True
if att_number_units_army < 1 or def_number_units_army < 1:
done = True
if done == True:
if att_number_units_army > def_number_units_army:
combat_outcome(p1.name, p1, p2, prov, players, market, relations)
return
else:
combat_outcome(p2.name, p1, p2, prov, players, market, relations)
return
else:
if type(p1) == Human:
cont = input("%s, you currently have %s units, the enemy has %s units, would you like to continue the assult? (y,n)" \
% (p1.name, att_number_units_army, def_number_units_army))
if(cont == "n"):
break
if type(p1) == AI:
att_str = calculate_amphib_strength(p1, p1_forces)
def_str = p2.calculate_base_defense_strength()
if att_str * 0.85 < def_str:
return
def naval_transport(player, target):
forces = {
"infantry": 0,
"cavalry": 0,
"artillery": 0,
"tank": 0,
"fighter": 0
}
if player.military["infantry"] < 1:
return forces
forces["infantry"] += 1
transport_limit = ((player.military["frigates"] + player.military["iron_clad"]) * 2 + player.military["battle_ship"] * 3)
if type(player) == Human:
number = 0
print("Your transport capacity is %s" % (transport_limit))
for k, v in forces.items():
correct = False
while correct == False:
amount = input("How many %s would you like to send? (you have %s)" % (k, player.military[k]))
amount = int(amount)
if number + amount > transport_limit:
print("The amount you specified exceeds your capacity \n")
continue
elif amount > player.military[k]:
print("You only have %s %s" %(v, k))
continue
else:
forces[k] = amount
number += amount
correct = True
if type(player) == AI:
target_strength = target.calculate_base_defense_strength()
#print("Target strength: %s" % (target_strength))
self_strength = 0
number_units = player.num_army_units()
tries = 0
number = 0
while (self_strength < (target_strength * 2) and number_units > 0.99 and tries < 128 and number <= transport_limit):
pick = choice(["infantry", "artillery", "cavalry", "fighter", "tank"])
if (player.military[pick] - forces[pick]) >= 1:
# print("Adds %s " % (pick))
forces[pick] += 1
if pick == "infantry":
self_strength += player.infantry["attack"]
elif pick == "cavalry":
self_strength += player.cavalry["attack"]
elif pick == "artillery":
self_strength += player.artillery["attack"]
elif pick == "tank":
self_strength += player.tank["attack"]
elif pick == "fighter":
self_strength += player.fighter["attack"]
tries += 1
number_units -= 1
#print("Tries: %s" % (tries))
number += 1
else:
tries += 1
return forces
def ai_transport_units(player, target):
target_strength = target.calculate_base_defense_strength()
# print("Target strength: %s" % (target_strength))
self_strength = 0
tries = 0
number_units = player.num_army_units()
transport_limit = ((player.military["frigates"] + player.military["iron_clad"]) * 2 + player.military["battle_ship"] * 3)
forces = {
"infantry": 0,
"cavalry": 0,
"artillery": 0,
"tank": 0,
"fighter": 0
}
number = 0
while (self_strength < (target_strength * 2) and number_units > 0.99 and tries < 128 and number <= transport_limit):
pick = choice(["infantry", "artillery", "cavalry", "fighter", "tank"])
if (player.military[pick] - forces[pick]) >= 1:
# print("Adds %s " % (pick))
forces[pick] += 1
if pick == "infantry":
self_strength += player.infantry["attack"]
elif pick == "cavalry":
self_strength += player.cavalry["attack"]
elif pick == "artillery":
self_strength += player.artillery["attack"]
elif pick == "tank":
self_strength += player.tank["attack"]
elif pick == "fighter":
self_strength += player.fighter["attack"]
tries += 1
number_units -= 1
number += 1
else:
tries += 1
return forces
def calculate_number_of_ships(player):
count = 0
count += player.military["frigates"] + player.military["iron_clad"] + player.military["battle_ship"]
return count
def distribute_naval_losses(player, losses, num_units):
limit = 0
while(losses > 0.2 and num_units >= 0.2):
while(player.military["frigates"] > 0.2 and losses > 0.2):
# print("Losses %s, num_units %s \n" % (losses, num_units))
player.military["frigates"] -=0.5
player.POP -= 0.1
player.milPOP -= 0.1
player.numLowerPOP -= 0.1
num_units -= 0.5
losses -= 0.5
limit += 1
if limit > 20:
break
while(player.military["iron_clad"] >= 0.2 and losses > 0.2):
player.military["iron_clad"] -= 0.25
player.POP -= 0.05
player.milPOP -= 0.05
player.numLowerPOP -= 0.05
num_units -= 0.25
losses -= 0.5
limit += 1
if limit > 30:
break
while(player.military["battle_ship"] >= 0.2 and losses > 0.2):
player.military["battle_ship"] -= 0.125
player.POP -= 0.025
player.milPOP -= 0.025
player.numLowerPOP -= 0.025
num_units -= 0.125
losses -= 0.5
limit += 1
if limit > 40:
break
limit += 1
if limit > 60:
return num_units
return num_units
def calculate_ammo_needed_navy(player):
amount = 0
amount += player.military["frigates"] * player.frigates["ammo_use"]
amount += player.military["iron_clad"] * player.iron_clad["ammo_use"]
amount += player.military["battle_ship"] * player.battle_ship["ammo_use"]
#print("Oil Needed for %s: %s" % (player.name, amount))
return amount
def calculate_oil_needed_navy(player):
amount = player.military["battle_ship"] * player.battle_ship["oil_use"]
#print("Oil Needed for %s: %s" % (player.name, amount))
return amount
def naval_battle(p1, p2, market, relations, prov = " "):
print("A naval battle is being fought between %s and %s !! \n" % (p1.name, p2.name))
market.report.append("A naval battle is being fought between %s and %s !! \n" % (p1.name, p2.name))
cont = input()
winner = ""
att_initial_navy = calculate_number_of_ships(p1)
def_initial_navy = calculate_number_of_ships(p2)
print("%s has a fleet size of %s, %s has a fleet size of %s \n" % (p1.name, att_initial_navy, p2.name, def_initial_navy))
market.report.append("%s has a fleet size of %s, %s has a fleet size of %s \n" % (p1.name, att_initial_navy, p2.name, def_initial_navy))
while(True):
att_number_units_navy = calculate_number_of_ships(p1)
def_number_units_navy = calculate_number_of_ships(p2)
att_ammo = calculate_ammo_needed_navy(p1)
def_ammo = calculate_ammo_needed_navy(p2)
att_oil = calculate_oil_needed_navy(p1)
def_oil = calculate_oil_needed_navy(p2)
att_str = calculate_naval_strength(p1)
def_str = calculate_naval_strength(p2)
print("%s has naval strength of %s, %s has naval strength of %s \n" % (p1.name, att_str, p2.name, def_str))
market.report.append("%s has naval strength of %s, %s has naval strength of %s \n" % (p1.name, att_str, p2.name, def_str))
p1a_deficit = p1.goods["cannons"] - att_ammo
if p1a_deficit < 0:
print("%s has an ammo deficit of %s" % (p1.name, abs(p1a_deficit)))
market.report.append("%s has an ammo deficit of %s" % (p1.name, abs(p1a_deficit)))
penalty = abs(p1a_deficit/ ((att_ammo * 2) + 0.1))
att_str = att_str * max((1 - penalty), 0.3)
p1.goods["cannons"] = 0
else:
p1.goods["cannons"] -= att_ammo
p2a_deficit = p2.goods["cannons"] - def_ammo
if p2a_deficit < 0:
print("%s has an ammo deficit of %s" % (p2.name, abs(p2a_deficit)))
market.report.append("%s has an ammo deficit of %s" % (p2.name, abs(p2a_deficit)))
penalty = abs(p2a_deficit/ ((def_ammo * 2) + 0.1))
att_str = def_str * max((1 - penalty), 0.3)
p2.goods["cannons"] = 0
else:
p2.goods["cannons"] -= def_ammo
p1o_deficit = p1.resources["oil"] - att_oil
if p1o_deficit < 0:
print("%s has an oil deficit of %s" % (p1.name, abs(p1o_deficit)))
market.report.append("%s has an oil deficit of %s" % (p1.name, abs(p1o_deficit)))
base = p1.military["battle_ship"] * p1.battle_ship["oil_use"]
temp = abs(p1o_deficit/((att_oil *2) + 0.1))
#penalty = base * max((1 - temp), 0.25)
att_str = att_str * max(1 - temp, 0.3)
p1.resources["oil"] = 0
else:
p1.resources["oil"] -= att_oil
p2o_deficit = p2.resources["oil"] - def_oil
if p2o_deficit < 0:
print("%s has an oil deficit of %s" % (p2.name, abs(p2o_deficit)))
market.report.append("%s has an oil deficit of %s" % (p2.name, abs(p2o_deficit)))
base = p2.military["battle_ship"] * p2.battle_ship["oil_use"]
temp = abs(p2o_deficit/(def_oil *2))
#penalty = base * max((1 - temp), 0.3)
#def_str - penalty
def_str = def_str * max(1 - temp, 0.3)
p2.resources["oil"] = 0
else:
p2.resources["oil"] -= def_oil
print("%s's effective naval strenght is: %s" % (p1.name, att_str))
print("%s's effective naval strenght is:" % (p2.name, def_str))
market.report.append("%s's effective naval strenght is: %s" % (p1.name, att_str))
market.report.append("%s's effective naval strenght is:" % (p2.name, def_str))
temp = max(1, att_number_units_navy * 0.25)
loss_mod = att_str/(temp + 0.001)
att_losses = def_str/(loss_mod + 0.001)
def_losses = att_str/(loss_mod + 0.001)
print("%s takes %s losses, %s takes %s losses \n" % (p1.name, att_losses, p2.name, def_losses))
market.report.append("%s takes %s losses, %s takes %s losses \n" % (p1.name, att_losses, p2.name, def_losses))
att_number_units_navy = distribute_naval_losses(p1, att_losses, att_number_units_navy)
def_number_units_navy = distribute_naval_losses(p2, def_losses, def_number_units_navy)
print("%s has %s units remaining, %s has %s units remaining \n" % (p1.name, att_number_units_navy, p2.name, def_number_units_navy))
market.report.append("%s has %s units remaining, %s has %s units remaining \n" % (p1.name, att_number_units_navy, p2.name, def_number_units_navy))
done = False
if att_losses < 0.50 and def_losses < 0.50:
done = True
if(att_number_units_navy < att_initial_navy * 0.43):
done = True
if(def_number_units_navy < def_initial_navy * 0.35):
done = True
if att_number_units_navy < 1 or def_number_units_navy < 1:
done = True
if(done == True):
if att_number_units_navy > def_number_units_navy:
print("%s had defeated %s at sea! \n" % (p1.name, p2.name))
market.report.append("%s had defeated %s at sea! \n" % (p1.name, p2.name))
winner = p1.name
if prov in p2.provinces:
gain_province(p1, p2, prov, players, market, relations)
return winner
else:
print("%s had defeated %s at sea! \n"% (p2.name, p1.name))
market.report.append("%s had defeated %s at sea! \n"% (p2.name, p1.name))
winner = p2.name
return winner
else:
if type(p1) == Human:
cont = input("%s, you currently have %s units, the enemy has %s units, would you like to continue the assult? (y,n)" \
% (p1.name, att_number_units_navy, def_number_units_navy))
if(cont == "n"):
return p2.name
if cont == "y":
continue
if type(p2) == AI:
att_str = calculate_naval_strength(p1)
def_str = calculate_naval_strength(p2)
if att_str * 86 < def_str:
return p2.name
else:
continue
def amphib_prelude(player, other, annex, players, market, relations):
amount = naval_transport(player, other)
if amount["infantry"] == 0:
return
if type(other) == Human:
print("That dastardly %s is sending an armada filled with soldiers to your homeland! \n" % (player.name))
print("His navy has %s frigates and %s ironclads. Your navy has %s frigates and %s ironclads" \
% (player.military["frigates"], (player.military["iron_clad"]), other.military["frigates"], other.military["iron_clad"] ))
inter = ""
while inter != "y" and inter != "n":
inter = input("Do you wish to send your army to intercept? (y/n)")
if inter == "n":
print("We will meet the enemy on the ground! \n")
amph_combat(player, other, amount, players, market, relations)
else:
print("Let us stop them in their tracks! \n")
result = naval_battle(player, other, market, relations)
if result == other.name:
print("%s attempts to sail his army to %s has failed\n" % (player.name, other.name))
elif result == player.name:
print("%s has sailed his navy to %s and is about to invade! \n" % (player.name, other.name))
amph_combat(player, other, amount, annex, players, market)
elif calculate_naval_strength(other) >= calculate_naval_strength(player):
result = naval_battle(player, other, market, relations)
if result == other.name:
print("%s attempts to sail his army to %s has failed\n" % (player.name, other.name))
market.report.append("%s attempts to sail his army to %s has failed\n" % (player.name, other.name))
return
elif result == player.name:
print("%s has sailed his navy to %s and is about to invade! \n" % (player.name, other.name))
market.report.append("%s attempts to sail his army to %s has failed\n" % (player.name, other.name))
amph_combat(player, other, amount, annex, players, market, relations)
return
else:
amph_combat(player, other, amount, annex, players, market, relations)
| [
"noreply@github.com"
] | noreply@github.com |
d0c640d81a44dfb8f33f28efc689acec9b3b0984 | 1ea5bc7b29b13166dc722816304f9b05eef7b6c9 | /utils/utils_multiWOZ_DST.py | 218764be1dea032f61c53ae8cfd590b46cc8902c | [] | no_license | yuya0430/history_extraction | a31d9cb356fb1d87ab8518383c7b325814e9c35a | fa574f46c550ce6af766a4b8205899db0178483a | refs/heads/master | 2023-03-19T01:56:51.947770 | 2021-03-12T12:22:45 | 2021-03-12T12:22:45 | 346,677,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,991 | py | import json
import torch
import torch.utils.data as data
import unicodedata
import string
import re
import random
import time
import math
import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.config import *
import ast
from collections import Counter
from collections import OrderedDict
from embeddings import GloveEmbedding, KazumaCharEmbedding
from tqdm import tqdm
import os
import pickle
from random import shuffle
from .fix_label import *
EXPERIMENT_SERVICES = []
TRAIN_SERVICES = []
DEV_SERVICES = []
except_services = [u'Hotels_3',u'Services_3',u'Alarm_1']
class Lang:
def __init__(self):
self.word2index = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS", UNK_token: 'UNK'}
self.n_words = len(self.index2word) # Count default tokens
self.word2index = dict([(v, k) for k, v in self.index2word.items()])
def index_words(self, sent, type):
if type == 'utter':
for word in sent.split():
self.index_word(word)
elif type == 'slot':
for slot in sent:
d, s = slot.split("-")
self.index_word(d)
for ss in s.split():
self.index_word(ss)
elif type == 'belief':
for slot, value in sent.items():
d, s = slot.split("-")
self.index_word(d)
for ss in s.split():
self.index_word(ss)
for v in value.split():
self.index_word(v)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.index2word[self.n_words] = word
self.n_words += 1
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, src_word2id, trg_word2id, sequicity, mem_word2id):
"""Reads source and target sequences from txt files."""
self.ID = data_info['ID']
self.turn_service = data_info['turn_service']
self.turn_id = data_info['turn_id']
self.dialog_history = data_info['dialog_history']
self.turn_belief = data_info['turn_belief']
self.gating_label = data_info['gating_label']
self.turn_uttr = data_info['turn_uttr']
self.generate_y = data_info["generate_y"]
self.sequicity = sequicity
self.num_total_seqs = len(self.dialog_history)
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.mem_word2id = mem_word2id
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
ID = self.ID[index]
turn_id = self.turn_id[index]
turn_belief = self.turn_belief[index]
gating_label = self.gating_label[index]
turn_uttr = self.turn_uttr[index]
turn_service = self.preprocess_domain(self.turn_service[index])
generate_y = self.generate_y[index]
generate_y = self.preprocess_slot(generate_y, self.trg_word2id)
context = self.dialog_history[index]
context = self.preprocess(context, self.src_word2id)
context_plain = self.dialog_history[index]
item_info = {
"ID":ID,
"turn_id":turn_id,
"turn_belief":turn_belief,
"gating_label":gating_label,
"context":context,
"context_plain":context_plain,
"turn_uttr_plain":turn_uttr,
"turn_service":turn_service,
"generate_y":generate_y,
}
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence, word2idx):
"""Converts words to ids."""
story = [word2idx[word] if word in word2idx else UNK_token for word in sequence.split()]
story = torch.Tensor(story)
return story
def preprocess_slot(self, sequence, word2idx):
"""Converts words to ids."""
story = []
for value in sequence:
v = [word2idx[word] if word in word2idx else UNK_token for word in value.split()] + [EOS_token]
story.append(v)
# story = torch.Tensor(story)
return story
def preprocess_memory(self, sequence, word2idx):
"""Converts words to ids."""
story = []
for value in sequence:
d, s, v = value
s = s.replace("book","").strip()
# separate each word in value to different memory slot
for wi, vw in enumerate(v.split()):
idx = [word2idx[word] if word in word2idx else UNK_token for word in [d, s, "t{}".format(wi), vw]]
story.append(idx)
story = torch.Tensor(story)
return story
def preprocess_domain(self, turn_service):
services = {}
for idx, service in enumerate(EXPERIMENT_SERVICES):
services[service] = idx
return services[turn_service]
def collate_fn(data):
def merge(sequences):
'''
merge from batch * sent_len to batch * max_len
'''
lengths = [len(seq) for seq in sequences]
max_len = 1 if max(lengths)==0 else max(lengths)
padded_seqs = torch.ones(len(sequences), max_len).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
padded_seqs = padded_seqs.detach() #torch.tensor(padded_seqs)
return padded_seqs, lengths
def merge_multi_response(sequences):
'''
merge from batch * nb_slot * slot_len to batch * nb_slot * max_slot_len
'''
lengths = []
for bsz_seq in sequences:
length = [len(v) for v in bsz_seq]
lengths.append(length)
max_len = max([max(l) for l in lengths])
padded_seqs = []
for bsz_seq in sequences:
pad_seq = []
for v in bsz_seq:
v = v + [PAD_token] * (max_len-len(v))
pad_seq.append(v)
padded_seqs.append(pad_seq)
padded_seqs = torch.tensor(padded_seqs)
lengths = torch.tensor(lengths)
return padded_seqs, lengths
def merge_memory(sequences):
lengths = [len(seq) for seq in sequences]
max_len = 1 if max(lengths)==0 else max(lengths) # avoid the empty belief state issue
padded_seqs = torch.ones(len(sequences), max_len, 4).long()
for i, seq in enumerate(sequences):
end = lengths[i]
if len(seq) != 0:
padded_seqs[i,:end,:] = seq[:end]
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
y_seqs, y_lengths = merge_multi_response(item_info["generate_y"])
gating_label = torch.tensor(item_info["gating_label"])
turn_service = torch.tensor(item_info["turn_service"])
#turn_slot = torch.tensor(item_info["turn_slot"])
if USE_CUDA:
src_seqs = src_seqs.cuda()
gating_label = gating_label.cuda()
turn_service = turn_service.cuda()
#turn_slot = turn_slot.cuda()
y_seqs = y_seqs.cuda()
y_lengths = y_lengths.cuda()
item_info["context"] = src_seqs
item_info["context_len"] = src_lengths
item_info["gating_label"] = gating_label
item_info["turn_service"] = turn_service
#item_info["turn_slot"] = turn_slot
item_info["generate_y"] = y_seqs
item_info["y_lengths"] = y_lengths
return item_info
def read_langs(file_name, gating_dict, SLOTS, dataset, lang, mem_lang, sequicity, training, max_line = None):
print(("Reading from {}".format(file_name)))
data = []
max_resp_len, max_value_len = 0, 0
service_counter = {}
with open(file_name) as f:
dials = json.load(f)
# create vocab first
for dial_dict in dials:
if (args["all_vocab"] or dataset=="train") and training:
for ti, turn in enumerate(dial_dict["turns"]):
lang.index_words(turn["system_utterance"], 'utter')
lang.index_words(turn["utterance"], 'utter')
# determine training data ratio, default is 100%
if training and dataset=="train" and args["data_ratio"]!=100:
random.Random(10).shuffle(dials)
dials = dials[:int(len(dials)*0.01*args["data_ratio"])]
cnt_lin = 1
for dial_dict in dials:
dialog_history = ""
last_belief_dict = {}
dial_services = dial_dict["services"]
# Filtering and counting domains
for service in dial_dict["services"]:
if service not in EXPERIMENT_SERVICES:
continue
if service not in service_counter.keys():
service_counter[service] = 0
service_counter[service] += 1
# Unseen domain setting
if args["only_service"] != "" and args["only_service"] not in dial_dict["services"]:
continue
for excepts in except_services :
if (len(except_services) != 0 and dataset == "test" and excepts not in dial_dict["services"]) or \
(len(except_services) != 0 and dataset != "test" and [excepts] == dial_dict["services"]):
continue
# Reading data
for ti, turn in enumerate(dial_dict["turns"]):
turn_service = turn["frames"][0]["service"]
turn_id = turn["turn_idx"]
turn_uttr = turn["system_utterance"] + " ; " + turn["utterance"]
turn_uttr_strip = turn_uttr.strip()
dialog_history += (turn["system_utterance"] + " ; " + turn["utterance"] + " ; ")
if turn_id > 2:
dhistory = dialog_history.split(';')
dialog_history = ""
i = len(dhistory) - 7
while i < len(dhistory)-1:
dialog_history += dhistory[i] + ";" + dhistory[i+1] + ";"
i += 2
source_text = dialog_history.strip()
turn_belief_dict = fix_general_label_error(turn["belief_state"], False, SLOTS)
# Generate domain-dependent slot list
slot_temp = SLOTS
if dataset == "train":
except_services2 = except_services + [u'Travel_1', u'Weather_1'] #Single-Domainになかったので除去
if len(except_services) != 0:
for idx, excepts in enumerate(except_services2):
slot_temp = [k for k in slot_temp if excepts not in k]
turn_belief_dict = OrderedDict([(k, v) for k, v in turn_belief_dict.items() if excepts not in k])
elif args["only_service"] != "":
slot_temp = [k for k in SLOTS if args["only_service"] in k]
turn_belief_dict = OrderedDict([(k, v) for k, v in turn_belief_dict.items() if args["only_service"] in k])
elif dataset == "dev":
if len(except_services) != 0:
for idx, excepts in enumerate(except_services):
slot_temp = [k for k in slot_temp if excepts not in k]
turn_belief_dict = OrderedDict([(k, v) for k, v in turn_belief_dict.items() if excepts not in k])
elif args["only_service"] != "":
slot_temp = [k for k in SLOTS if args["only_service"] in k]
turn_belief_dict = OrderedDict([(k, v) for k, v in turn_belief_dict.items() if args["only_service"] in k])
else:
if len(except_services) != 0:
s = []
t = []
for excepts in except_services:
s += [k for k in SLOTS if excepts in k]
t += [(k, v) for k, v in turn_belief_dict.items() if excepts in k]
slot_temp = s
turn_belief_dict = OrderedDict(t)
elif args["only_service"] != "":
slot_temp = [k for k in SLOTS if args["only_service"] in k]
turn_belief_dict = OrderedDict([(k, v) for k, v in turn_belief_dict.items() if args["only_service"] in k])
turn_belief_list = [str(k)+'-'+str(v) for k, v in turn_belief_dict.items()]
if (args["all_vocab"] or dataset=="train") and training:
mem_lang.index_words(turn_belief_dict, 'belief')
"""cur_serviceのスロットだけ抜き取る"""
for sidx, service in enumerate(dial_services):
if sidx == 0:
turn_slot = [k for k in slot_temp if service in k]
else:
turn_slot += [k for k in slot_temp if service in k]
class_label, generate_y, slot_mask, gating_label = [], [], [], []
start_ptr_label, end_ptr_label = [], []
for slot in slot_temp:
if slot in turn_belief_dict.keys():
generate_y.append(turn_belief_dict[slot])
if turn_belief_dict[slot] == "dontcare":
gating_label.append(gating_dict["dontcare"])
elif turn_belief_dict[slot] == "none":
gating_label.append(gating_dict["none"])
else:
gating_label.append(gating_dict["ptr"])
if max_value_len < len(turn_belief_dict[slot]):
max_value_len = len(turn_belief_dict[slot])
else:
generate_y.append("none")
gating_label.append(gating_dict["none"])
data_detail = {
"ID":dial_dict["dialogue_id"],
"services":dial_dict["services"],
"turn_service":turn_service,
"turn_id":turn_id,
"dialog_history":source_text,
"turn_belief":turn_belief_list,
"gating_label":gating_label,
"turn_uttr":turn_uttr_strip,
'generate_y':generate_y
}
data.append(data_detail)
if max_resp_len < len(source_text.split()):
max_resp_len = len(source_text.split())
cnt_lin += 1
if(max_line and cnt_lin>=max_line):
break
# add t{} to the lang file
if "t{}".format(max_value_len-1) not in mem_lang.word2index.keys() and training:
for time_i in range(max_value_len):
mem_lang.index_words("t{}".format(time_i), 'utter')
print("service_counter", service_counter)
return data, max_resp_len, slot_temp
def get_seq(pairs, lang, mem_lang, batch_size, type, sequicity):
if(type and args['fisher_sample']>0):
shuffle(pairs)
pairs = pairs[:args['fisher_sample']]
data_info = {}
data_keys = pairs[0].keys()
for k in data_keys:
data_info[k] = []
for pair in pairs:
for k in data_keys:
#print(pair[k])
#input()
data_info[k].append(pair[k])
dataset = Dataset(data_info, lang.word2index, lang.word2index, sequicity, mem_lang.word2index)
if args["imbalance_sampler"] and type:
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
# shuffle=type,
collate_fn=collate_fn,
sampler=ImbalancedDatasetSampler(dataset))
else:
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=type,
collate_fn=collate_fn)
return data_loader
def dump_pretrained_emb(word2index, index2word, dump_path):
print("Dumping pretrained embeddings...")
embeddings = [GloveEmbedding(), KazumaCharEmbedding()]
E = []
for i in tqdm(range(len(word2index.keys()))):
w = index2word[i]
e = []
for emb in embeddings:
e += emb.emb(w, default='zero')
E.append(e)
with open(dump_path, 'wt') as f:
json.dump(E, f)
def get_all_slot_information(schemas, schemas2):
SLOTS = []
for schema in schemas:
EXPERIMENT_SERVICES.append(schema['service_name'])
for slot in schema['slots']:
SLOTS.append('{}-{}'.format(schema['service_name'],slot['name']))
for schema in schemas2:
if schema['service_name'] not in EXPERIMENT_SERVICES:
EXPERIMENT_SERVICES.append(schema['service_name'])
for slot in schema['slots']:
SLOTS.append('{}-{}'.format(schema['service_name'],slot['name']))
return SLOTS
def get_slot_information(schemas, train = True):
SLOTS = []
for schema in schemas:
if train:
TRAIN_SERVICES.append(schema['service_name'])
else:
DEV_SERVICES.append(schema['service_name'])
for slot in schema['slots']:
SLOTS.append('{}-{}'.format(schema['service_name'],slot['name']))
#ontology_domains = dict([(k, v) for k, v in schema['slots'].items() if k.split("-")[0] in EXPERIMENT_SERVICES])
#SLOTS = [k.replace(" ","").lower() if ("book" not in k) else k.lower() for k in ontology_domains.keys()]
return SLOTS
def prepare_data_seq(training, task="dst", sequicity=0, batch_size=100):
eval_batch = args["eval_batch"] if args["eval_batch"] else batch_size
file_train = 'data/dstc8/train_dials.json'
file_dev = 'data/dstc8/dev_dials.json'
file_test = 'data/dstc8/test_dials.json'
# Create saving folder
if args['path']:
folder_name = args['path'].rsplit('/', 2)[0] + '/'
else:
#folder_name = 'save/{}-'.format(args["decoder"])+args["addName"]+args['dataset']+str(args['task'])+'/'
folder_name = 'save/{}-'.format(args["decoder"])+args["addName"]+"dstc8"+str(args['task'])+'/'
print("folder_name", folder_name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# load domain-slot pairs from ontology
schema_train = json.load(open("data/dstc8/train/schema.json", 'r'))
schema_dev = json.load(open("data/dstc8/dev/schema.json", 'r'))
ontology = json.load(open("data/multi-woz/MULTIWOZ2 2/ontology.json"))
ALL_SLOTS = get_all_slot_information(schema_train, schema_dev)
TRAIN_SLOTS = get_slot_information(schema_train)
DEV_SLOTS = get_slot_information(schema_dev, False)
gating_dict = {"ptr":0, "dontcare":1, "none":2}
# Vocabulary
lang, mem_lang = Lang(), Lang()
lang.index_words(ALL_SLOTS, 'slot')
mem_lang.index_words(ALL_SLOTS, 'slot')
lang_name = 'lang-all.pkl' if args["all_vocab"] else 'lang-train.pkl'
mem_lang_name = 'mem-lang-all.pkl' if args["all_vocab"] else 'mem-lang-train.pkl'
if training:
pair_train, train_max_len, slot_train = read_langs(file_train, gating_dict, TRAIN_SLOTS, "train", lang, mem_lang, sequicity, training)
#print(pair_train)
#print(train_max_len)
#print(slot_train)
#input()
#pair_train, train_max_len, slot_train = read_langs(file_train, gating_dict, ALL_SLOTS, "train", lang, mem_lang, sequicity, training)
train = get_seq(pair_train, lang, mem_lang, batch_size, True, sequicity)
nb_train_vocab = lang.n_words
pair_dev, dev_max_len, slot_dev = read_langs(file_dev, gating_dict, DEV_SLOTS, "dev", lang, mem_lang, sequicity, training)
dev = get_seq(pair_dev, lang, mem_lang, eval_batch, False, sequicity)
pair_test, test_max_len, slot_test = read_langs(file_test, gating_dict, ALL_SLOTS, "test", lang, mem_lang, sequicity, training)
test = get_seq(pair_test, lang, mem_lang, eval_batch, False, sequicity)
if os.path.exists(folder_name+lang_name) and os.path.exists(folder_name+mem_lang_name):
print("[Info] Loading saved lang files...")
with open(folder_name+lang_name, 'rb') as handle:
lang = pickle.load(handle)
with open(folder_name+mem_lang_name, 'rb') as handle:
mem_lang = pickle.load(handle)
else:
print("[Info] Dumping lang files...")
with open(folder_name+lang_name, 'wb') as handle:
pickle.dump(lang, handle)
with open(folder_name+mem_lang_name, 'wb') as handle:
pickle.dump(mem_lang, handle)
emb_dump_path = 'data/emb{}.json'.format(len(lang.index2word))
if not os.path.exists(emb_dump_path) and args["load_embedding"]:
dump_pretrained_emb(lang.word2index, lang.index2word, emb_dump_path)
else:
with open(folder_name+lang_name, 'rb') as handle:
lang = pickle.load(handle)
with open(folder_name+mem_lang_name, 'rb') as handle:
mem_lang = pickle.load(handle)
pair_train, train_max_len, slot_train, train, nb_train_vocab = [], 0, {}, [], 0
pair_dev, dev_max_len, slot_dev = read_langs(file_dev, gating_dict, DEV_SLOTS, "dev", lang, mem_lang, sequicity, training)
dev = get_seq(pair_dev, lang, mem_lang, eval_batch, False, sequicity)
pair_test, test_max_len, slot_test = read_langs(file_test, gating_dict, ALL_SLOTS, "test", lang, mem_lang, sequicity, training)
test = get_seq(pair_test, lang, mem_lang, eval_batch, False, sequicity)
test_4d = []
if len(except_services) != 0:
pair_test_4d, _, _ = read_langs(file_test, gating_dict, DEV_SLOTS, "dev", lang, mem_lang, sequicity, training)
#pair_test_4d, _, _ = read_langs(file_test, gating_dict, ALL_SLOTS, "dev", lang, mem_lang, sequicity, training)
test_4d = get_seq(pair_test_4d, lang, mem_lang, eval_batch, False, sequicity)
max_word = max(train_max_len, dev_max_len, test_max_len) + 1
print("Read %s pairs train" % len(pair_train))
print("Read %s pairs dev" % len(pair_dev))
print("Read %s pairs test" % len(pair_test))
print("Vocab_size: %s " % lang.n_words)
print("Vocab_size Training %s" % nb_train_vocab )
print("Vocab_size Belief %s" % mem_lang.n_words )
print("Max. length of dialog words for RNN: %s " % max_word)
print("USE_CUDA={}".format(USE_CUDA))
SLOTS_LIST = [ALL_SLOTS, slot_train, slot_dev, slot_test]
print("[Train Set & Dev Set Slots]: Number is {} in total".format(str(len(SLOTS_LIST[0]))))
print(SLOTS_LIST[0])
print("[Test Set Slots]: Number is {} in total".format(str(len(SLOTS_LIST[3]))))
print(SLOTS_LIST[3])
LANG = [lang, mem_lang]
return train, dev, test, test_4d, LANG, SLOTS_LIST, gating_dict, nb_train_vocab
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
"""
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)] for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.turn_service[idx]
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
| [
"yuya0430@slp.nitech.ac.jp"
] | yuya0430@slp.nitech.ac.jp |
2b43c163f93eb4de5e42d1ac901eb0746bdf8560 | 5999393c996612e303c13ec525ff77a73c2803af | /blog/admin.py | 127981512aa33c37de038669e22da78e76a9c06a | [] | no_license | keeeboooo/my-first-blog | 7623f48da0725dbb6d68fcf77d0d357f30804e43 | 2c2add8ee690b33b0c6028e4638a184efd4f85ee | refs/heads/master | 2023-08-21T22:52:39.552305 | 2021-10-20T03:06:27 | 2021-10-20T03:06:27 | 374,272,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from django.contrib import admin
from .models import Post, Comment#, Rogin
admin.site.register(Post)
admin.site.register(Comment)
#admin.site.register(Rogin)
# Register your models here.
| [
"keisuke_onepiece@icloud.com"
] | keisuke_onepiece@icloud.com |
31d6541de7c09f85091fbf2e239202c78dfb15bb | a0f1970da405d05797807a4baa96e5ef1f891608 | /python/icp_test.py | efc3583db053e84bd1e0c3a523da003f78dfca47 | [] | no_license | B-urb/generic_object_tracking | 3e042000fdf24f69df46ca3231e53ff73c9e853e | 324bab0b37532d9f0bf571c934505f66df54a3c8 | refs/heads/master | 2023-05-05T17:11:23.291990 | 2021-05-16T23:09:05 | 2021-05-16T23:09:05 | 367,958,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,514 | py | import pyrealsense2 as rs
import numpy as np
import cv2
import open3d as o3d
from datetime import datetime
import matplotlib.pyplot as plt
import time
from app.tracking_object import TrackingObject
from sklearn.cluster import KMeans
def main():
#out = cv2.VideoWriter('./data/output.avi', -1, 20.0,(1280, 720))
pipeline = rs.pipeline()
config = rs.config()
vis = o3d.visualization.Visualizer()
vis.create_window('PCD', width=1280, height=720)
pointcloud = o3d.geometry.PointCloud()
vis.add_geometry(pointcloud)
geom_added = False
# note: using 640 x 480 depth resolution produces smooth depth boundaries
# using rs.format.bgr8 for color image format for OpenCV based image visualization
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
config.enable_device_from_file("./data/realsense.bag", repeat_playback=True)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Streaming loop
frame_count = 0
intrinsics = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
pcd_old = o3d.geometry.PointCloud()
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
img_depth = o3d.geometry.Image(depth_image)
img_color = o3d.geometry.Image(color_image)
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
img_color, img_depth, depth_trunc=clipping_distance_in_meters, convert_rgb_to_intensity=False)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intrinsics.width, intrinsics.height, intrinsics.fx,
intrinsics.fy,
intrinsics.ppx, intrinsics.ppy)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, pinhole_camera_intrinsic)
# o3d.visualization.draw_geometries([pcd])
#print("Update")
pcd = pcd.voxel_down_sample(voxel_size=0.01)
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
print(pcd)
start = time.time()
labels = np.array(
pcd.cluster_dbscan(eps=0.02, min_points=10, print_progress=False))
# with o3d.utility.VerbosityContextManager(
# o3d.utility.VerbosityLevel.Debug) as cm:
# labels = np.array(
# pcd.cluster_dbscan(eps=0.02, min_points=15, print_progress=False))
end = time.time()
print("Clustering took: " + str((end - start) * 1000) + "ms")
max_label = labels.max()
objects = []
#todo paralelize
for i in range(0, max_label + 1):
objects.append(TrackingObject(i))
point_array = np.asarray(pcd.points)
for i, point in enumerate(point_array):
cluster_id = labels[i]
#print(cluster_id)
if cluster_id != -1:
objects[cluster_id].points.append(point)
for obj in objects:
obj.calc_bounding_box(intrinsics)
print(f"point cloud has {max_label + 1} clusters")
colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
#o3d.visualization.draw_geometries([pcd])
for obj in objects:
vis.add_geometry(obj.bounding_box)
# cv2.rectangle(color_image, obj.bounding_box[0],obj.bounding_box[1], (0, 255, 0), 2)
cv2.imshow('bgr', color_image)
key = cv2.waitKey(1)
if key == ord('q'):
break
pointcloud.points = pcd.points
pointcloud.colors = pcd.colors
if geom_added == False:
vis.add_geometry(pointcloud)
geom_added = True
#vis.capture_screen_image("./data/test.png")
#screenshot = np.asanyarray(buf)
#cv.ims
#out.write(screenshot)
vis.update_geometry(pointcloud)
vis.poll_events()
vis.update_renderer()
frame_count += 1
vis.destroy_window()
del vis
#out.release()
pipeline.stop()
def extract_cluster_boxes():
pass
if __name__ == "__main__":
main()
| [
"bjoclurban@gmail.com"
] | bjoclurban@gmail.com |
3f95ed6087c4acea580d8a19ca0f731e1e49c8bd | 84109b3edcf5171493c98d2d50a38f3e63dcc1af | /imap-filter/MailFilter/Condition.py | 722ca84ad9fb862e9f2418a9793e216fc0f8c71e | [] | no_license | josterpi/python | 2ce665e2330e68a571a9fe085fb6551270970c3a | fe6920764511837bf9694984e61c25c8f48a9e18 | refs/heads/master | 2021-01-23T15:51:06.424206 | 2009-09-20T17:29:44 | 2009-09-20T17:29:44 | 312,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py |
from Predicate import Predicate
import email
class Condition:
def __init__(self, subject, predicate, object):
SUBJECTS = {'Subject' : self.subject,
'Sender' : self.sender,
'To' : self.to,
'Cc' : self.cc,
'To or cc' : self.toOrCc,
'X-Spam-Flag' : self.xSpamFlag,
'Reply-To' : self.replyTo}
if subject in SUBJECTS:
self.match = SUBJECTS[subject]
else:
raise KeyError(subject)
self.predicate = Predicate(predicate,object)
self.object = object
def subject(self, email):
return self.predicate.match(email.get('Subject'))
def sender(self, email):
return self.predicate.match(email.get('From'))
def to(self, email):
return self.predicate.match(email.get('To'))
def cc(self, email):
return self.predicate.match(email.get('Cc'))
def toOrCc(self, email):
return self.to(email) or self.cc(email)
def xSpamFlag(self, email):
return self.predicate.match(email.get('X-Spam-Flag'))
def replyTo(self, email):
return self.predicate.match(email.get('Reply-To'))
| [
"josterhouse@gmail.com"
] | josterhouse@gmail.com |
dd3e935fc5be1a0d0f88c73d2c306de12fb40f71 | db05b94cf771785f573866fa7093fab4cee840b5 | /load_model_and_predict.py | 420f4c7fc0154f673f6308600f3831d8b6a8f38f | [] | no_license | InformationExtractNJU/BertBilstmCrf | 3757a8767b98947d5f40e906b5703bafd1920120 | 6665228880cc346f3746f64fa75020abcf1dd4a2 | refs/heads/master | 2020-11-29T06:56:08.112389 | 2020-03-06T10:24:30 | 2020-03-06T10:24:30 | 230,051,916 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,520 | py | import os
import codecs
import re
import random
import string
from tqdm import tqdm
import pandas as pd
import numpy as np
from zhon.hanzi import punctuation
from sklearn.model_selection import train_test_split
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras_contrib.layers import CRF
import tensorflow as tf
import keras
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.optimizers import Adam
# from seqeval.metrics import precision_score, recall_score, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score,classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
import re
config_path = '../../chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '../../chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '../../chinese_L-12_H-768_A-12/vocab.txt'
# 自定义tokenizer
token_dict = {}
with codecs.open(dict_path,'r',encoding='utf-8') as f:
for line in f:
token = line.strip()
token_dict[token] = len(token_dict)
class OurTokenizer(Tokenizer):
def _tokenize(self,text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]')
else:
R.append('[UNK]')
return R
tokenizer = OurTokenizer(token_dict)
# 获取训练数据
def get_train_data():
train_data = []
count = 0
reader = open('../train_data/sentences_relation.txt',encoding = 'utf-8-sig')
list_data = reader.readlines()
for i,element in enumerate(list_data):
if i % 2 != 0:
tags_str = list_data[i]
text_str = list_data[i-1]
text_str = text_str.replace(' ','')
text_str = text_str.replace('\n','')
tags_str = tags_str.replace('\n',' ')
# tags_str_list = tags_str.split(' ')
train_data.append((count,text_str,tags_str))
count = count+1
return train_data
# 加载训练数据
train_data = get_train_data()
print('数据读取完毕')
print(len(train_data[0]))
print(train_data[0])
train_data=train_data
bert_model = load_trained_model_from_checkpoint(config_path,checkpoint_path,seq_len = None)
# 定义模型
x1_in = keras.layers.Input(shape=(None,))
x2_in = keras.layers.Input(shape=(None,))
bert_output = bert_model([x1_in,x2_in])
sen_vector = Lambda(lambda x: x[:, 0])(bert_output) # 取出[CLS]对应的向量用来做分类
out=Dense(12,activation='sigmoid')(sen_vector)
# lstm = keras.layers.Bidirectional(keras.layers.LSTM(units = 128,return_sequences = True))(bert_output )
# drop = keras.layers.Dropout(0.4)(lstm)
# dense = keras.layers.TimeDistributed(keras.layers.Dense(128,activation='relu'))(drop)
# crf = CRF(n_tags)
# out = crf(dense)
model = keras.models.Model(inputs=[x1_in,x2_in],outputs=out)
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
model.load_weights('model/model_01.hdf5')
X1_test, X2_test = [], []
# 对训练集进行处理
maxlen=512
for d in train_data:
text = d[1][:maxlen]
y = d[2][:maxlen]
x1, x2 = tokenizer.encode(first=text)
X1_test.append(x1)
X2_test.append(x2)
# print('#'*100)
# # print (y_train_list)
X1_test = keras.preprocessing.sequence.pad_sequences(maxlen=maxlen, sequences=X1_test, padding="post", value=0)
X2_test = keras.preprocessing.sequence.pad_sequences(maxlen=maxlen, sequences=X1_test, padding="post", value=0)
test_pred = model.predict([X1_test, X2_test], verbose=1)
# print(test_pred)
write_to_txt=[]
for i in range(len(X1_test)):
# print (type(X1_test[i]))
# print (type(test_pred[i]))
# print (X1_test[i])
# print (test_pred[i])
# pred_labels = np.array(listpred2label(test_pred[i].tolist()))
pred_labels = np.array(test_pred[i].tolist())
list_pred_labels = [str(x) for x in pred_labels]
str_pred_labels = ' '.join(list_pred_labels)
print (train_data[i])
write_to_txt.append(train_data[i][1])
write_to_txt.append('\n')
write_to_txt.append(str_pred_labels)
write_to_txt.append('\n')
output_path="Test_case/output.txt"
print(write_to_txt[i])
resultwrite = open(output_path, 'w', encoding='utf-8')
resultwrite.writelines(write_to_txt) | [
"zhuwk3@mail2.sysu.edu.cn"
] | zhuwk3@mail2.sysu.edu.cn |
72b7bb7acba687c0f6f14413cd6d43962e8a3351 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/common/Lib/encodings/iso2022_jp_ext.py | 79e0c5be45183dd71284af4365cf20ec67ea90b1 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 964 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/encodings/iso2022_jp_ext.py
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='iso2022_jp_ext', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
4437c2c917c441f1c78f609a916e6a8e0ffee660 | 95472c40c7fe6c4c64d0e9582ab764fea26c0457 | /DS/Graph_AdjacencyList.py | ef752333ad01f19d6a6e1d685de22c6e5dd64043 | [] | no_license | vinay10949/DataStructuresAndAlgorithms | a156627de3e0186866ad7bcbcac9aef4690aa20b | 88d35ad74c464438a2c8d6a1570741c39a539267 | refs/heads/master | 2023-02-20T23:47:28.128680 | 2021-01-22T10:14:42 | 2021-01-22T10:14:42 | 273,527,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | class Node:
def __init__(self,v):
self.vertex=v
self.next=None
class Graph:
#initialize matrix
def __init__(self, size):
self.V=size
self.graph = [None]*size
def add_edge(self,s,d):
if s==d:
print("Both vertices are same")
return
node=Node(d)
node.next=self.graph[s]
self.graph[s]=node
node=Node(s)
node.next=self.graph[d]
self.graph[d]=node
def print_graph(self):
for row in self.graph:
for i in range(self.V):
temp=self.graph[i]
while temp:
print(temp.vertex)
temp=temp.next
print("\n")
g = Graph(3)
g.add_edge(0, 1)
g.print_graph()
| [
"vinay10949@gmail.com"
] | vinay10949@gmail.com |
f691dfe6a51e168f138b493fc932ca5d154d959c | 4c429fb781600413c1a960fbba6e4a2661a6b6ab | /visio/hauptprogramme/asciio_layout_v2.py | a8a5e225e016993ed55cd2c249610c74144531c1 | [] | no_license | dasteihn/visio | fd66fc6d60f58d010a8badb4b7d62e23923d3b12 | 17d8d023bf984cb2f3d2f655da2384092e38e28d | refs/heads/master | 2020-04-11T13:04:38.825275 | 2018-12-14T15:30:28 | 2018-12-14T15:30:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | from asciimatics.effects import Print, Clock
from asciimatics.renderers import BarChart, FigletText, Box, ColourImageFile
from asciimatics.scene import Scene
from asciimatics.widgets import Frame, ListBox, Layout, Label, Divider
from asciimatics.screen import Screen
import sys
from pathlib import Path
import json
new_cmd = False
ROWS = 30
COLS = 80
AKT_SENDER_BOX_MAX_LENGTH = 18
STANDARD_CONFIG = Path('../../musiko/dateien/Debuggg.json')
NOTFALL_CONFIG = Path('../../config-dateien/JSON_Default_Config.json')
LOGO = "../Logo/logo-test.png"
CONFIG_MEMB_AMOUNT = 20
# 6 sender above the current sender, 6 under
DISPLAY_SENDER_SYMM = 6
debug = open("debug.txt", "w")
# Returns the current volume in percent
def get_vol():
return 40
def load_config():
if STANDARD_CONFIG.exists():
with STANDARD_CONFIG.open() as f:
konfig = json.load(f)
print("Nutzer-Config geladen.")
elif NOTFALL_CONFIG.exists():
with NOTFALL_CONFIG.open() as f:
konfig = json.load(f)
print("Notfall-Config geladen")
else:
print("Katastrophaler Fehler: Keine Config-Dateien. Beende...")
sys.exit(1)
return konfig
# Krall Dir anhand des aktuellen Senderindizes die 6 Sendernamen vor und nach
# dem gewählten Sender
def parse_sender(config, akt_nr):
namen = []
j = 1
for i in range(1, DISPLAY_SENDER_SYMM+1):
if akt_nr-i >= 0:
namen.append(config["senderliste"][akt_nr-i]["sendername"])
else:
index = CONFIG_MEMB_AMOUNT-j
namen.append(config["senderliste"][index]["sendername"])
j += 1
namen.reverse()
for i in range(0, DISPLAY_SENDER_SYMM+1):
namen.append(config["senderliste"][(akt_nr+i)%CONFIG_MEMB_AMOUNT]["sendername"])
for d in namen:
debug.write(d)
return namen
# Hole die fünf Presets aus der Config. Setze Presets mit -1 auf den der
# unbelegten Taste entsprechenden Index in der Senderliste
def parse_presets(config):
namen = []
for n, i in zip(config["presetliste"], range(0, 5)):
if n >= 0:
namen.append(config["senderliste"][n]["sendername"])
else:
namen.append(config["senderliste"][i]["sendername"])
debug.write(str(i))
return namen
def format_sl_layout(layout):
layout.add_widget(Divider(False, 7), 0)
layout.add_widget(Label('>>>', 1, align='<'), 0)
layout.add_widget(Divider(False, 7), 2)
layout.add_widget(Label('<<<', 1, align='>'), 2)
def format_pr_layout(layout):
layout.add_widget(Label(' ', 1), 0)
layout.add_widget(Label(' ', 1), 1)
# Fülle die Senderliste initial und gib die Label für die spätere Verarbeitung
# zurück. Die Divider bleiben als Abstandshalter dauerhaft im Layout.
def gen_and_add_sender_labels(layout, namen):
labs = []
for name in namen:
labs.append(Label(name, 1))
# Add first 6 stations
for l in labs[:DISPLAY_SENDER_SYMM]:
layout.add_widget(l, 1)
# Add spaces and the central station (hehe, got it?)
layout.add_widget(Divider(False, 1), 1)
layout.add_widget(labs[DISPLAY_SENDER_SYMM], 1)
layout.add_widget(Divider(False, 1), 1)
# Add the rest of the stations
for l in labs[DISPLAY_SENDER_SYMM+1:]:
layout.add_widget(l, 1)
return labs
# Füge die Spaltennummern der Presets hinzu, außerdem fülle die Presets initial
# und gib die Liste mit den Presetlabels für die spätere Verarbeitung zurück
def gen_and_add_preset_labels(layout, namen):
preset_labs = []
# Presetnamen einfügen in Spalte 1
for name in namen:
preset_labs.append(Label(name, 1))
debug.write(name)
for l in preset_labs:
layout.add_widget(l, 1)
# Spaltennummern der Presets einfügen in Spalte 0
for i in range(1, 6):
layout.add_widget(Label(str(i), 1), 0)
return preset_labs
def update_sender_labels(labs, cfg, sender_nr):
namen = parse_sender(cfg, sender_nr)
for l, n in zip(labs, namen):
l.text = n
def update_preset_labels(labs, cfg):
namen = parse_presets(cfg)
for l, n in zip(labs, namen):
l.text = n
# mhuhahahahhahahahahaaaa!
def asciisierer(s):
tabelle = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'AE',
ord('Ö'): 'OE',
ord('Ü'): 'UE',
ord('ẞ'): 'SS',
}
return s.translate(tabelle)
def run_display(screen):
scenes = []
AKT_SENDER = "Retro rockt!"
# Prepare frame for the presets
preset_frame = Frame(screen, 7, 29, can_scroll=False, title="Tastenbelegung",
x=0, y=10, reduce_cpu=True)
pr_layout = Layout([10, 90], fill_frame=True)
preset_frame.add_layout(pr_layout)
# Prepare frame for the sender list
sender_frame = Frame(screen, 17, 50, can_scroll=False, title="Senderliste",
x=30, y=0, reduce_cpu=True)
sender_layout0 = Layout([10, 80, 10], fill_frame=True)
sender_frame.add_layout(sender_layout0)
# Load the json config-file
cfg = load_config()
# Prepare the layouts, add spaces etc
format_sl_layout(sender_layout0)
# Nicht mehr nötig nach aktuellem Stand
# format_pr_layout(pr_layout)
# Create the sender-labels and fill them initially. Return them for
# later changing
sender_labels = gen_and_add_sender_labels(sender_layout0, parse_sender(cfg, 0))
preset_labels = gen_and_add_preset_labels(pr_layout, parse_presets(cfg))
preset_frame.fix()
sender_frame.fix()
# Effects are all the stuff which will be shown on the display
effects = [preset_frame, sender_frame,
# Print(screen, Box(26, 15, True), x=54, y=0),
Print(screen, Box(80, 8, True), x=0, y=17, speed=2),
# Clock(screen, 68, 7, 5),
Print(screen, ColourImageFile(screen, LOGO, 9, bg=7),
x=0, y=0, speed=2),
Print(screen, FigletText(asciisierer(AKT_SENDER)), x=1, y=18),
Print(screen, BarChart(
4, 80,
[get_vol],
colour=2,
char=' ',
bg=7,
scale=100,
axes=BarChart.X_AXIS,
intervals=25,
labels=True,
border=False),
x=0, y=26, transparent=False, speed=2)]
# Start displaying
scenes.append(Scene(effects, -1))
screen.play(scenes)
def main():
Screen.wrapper(run_display)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
debug.close()
pass
| [
"stanner@posteo.de"
] | stanner@posteo.de |
0add16aff9187e8e1d1558f3a79633831144f1cd | f1d35bfae9cee1376e24903a88a3d04d3d1a31b1 | /django_react/settings.py | e850a1c0332e73bdc2fe925897554a113176f5c6 | [] | no_license | joigmz/django-react-integration | 9fbe0b2c2123c9907dd39d1171329c1e5e70567e | 74f8c2df391a96a6d0fbee21a554a680ee6c5e52 | refs/heads/main | 2023-07-05T21:53:02.439892 | 2021-08-15T05:30:51 | 2021-08-15T05:30:51 | 396,192,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | """
Django settings for django_react project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY_DJANGO_REACT')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_react.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'reactapp/build'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'reactapp/build/static'),
] | [
"57118197+jose-izam99@users.noreply.github.com"
] | 57118197+jose-izam99@users.noreply.github.com |
06432ae0fe02ed8472ccb420e0ebb19fbf85e794 | 9f3e0a7f3886237a6eaaa9479e9f8244a68d2bc5 | /bdc/bdc/__init__.py | 4e8cd24b90085d1240895dd9bb85399fd5111507 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | databricks/build-tooling | e5ff02d3a9ccd967d5e3a49141118a2b7860f84d | 5869096b94696b43d382ad788474a3fc52cf8acf | refs/heads/master | 2023-08-26T15:45:42.736301 | 2018-09-12T23:10:29 | 2018-09-12T23:10:29 | 150,157,911 | 0 | 3 | null | 2018-09-24T19:35:18 | 2018-09-24T19:35:18 | null | UTF-8 | Python | false | false | 83,728 | py | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super,
filter, map, zip)
from future import standard_library
standard_library.install_aliases()
import sys
if sys.version_info[0] != 2:
print("bdc only works on Python 2. You're using Python {0}.".format(
'.'.join([str(i) for i in sys.version_info[0:3]])
))
sys.exit(1)
import subprocess
import json
from collections import namedtuple
import os
from os import path
import codecs
import re
from datetime import datetime
from ConfigParser import SafeConfigParser, NoOptionError
from enum import Enum
import master_parse
from grizzled.file import eglob
from bdc.bdcutil import *
from string import Template as StringTemplate
# We're using backports.tempfile, instead of tempfile, so we can use
# TemporaryDirectory in both Python 3 and Python 2. tempfile.TemporaryDirectory
# was added in Python 3.2.
from backports.tempfile import TemporaryDirectory
# ---------------------------------------------------------------------------
# Constants
#
# (Some constants are below the class definitions.)
# ---------------------------------------------------------------------------
VERSION = "1.24.0"
DEFAULT_BUILD_FILE = 'build.yaml'
PROG = os.path.basename(sys.argv[0])
DB_SHARD_HOME_VAR = 'DB_SHARD_HOME'
USAGE = ("""
{0}, version {1}
Usage:
{0} (--version)
{0} --info [--shell] [BUILD_YAML]
{0} (-h | --help)
{0} [-o | --overwrite] [-v | --verbose] [-d DEST | --dest DEST] [BUILD_YAML]
{0} --list-notebooks [BUILD_YAML]
{0} --upload [-v | --verbose] [-P PROF | --dprofile PROF ] SHARD_PATH [BUILD_YAML]
{0} --download [-v | --verbose] [-P PROF | --dprofile PROF ] SHARD_PATH [BUILD_YAML]
MASTER_CFG is the build tool's master configuration file.
BUILD_YAML is the build file for the course to be built. Defaults to {2}.
SHARD_PATH is the path to a folder on a Databricks shard, as supported
by the Databricks CLI. You must install databricks-cli and configure it
properly for --upload and --download to work.
Options:
-h --help Show this screen.
-d DEST --dest DEST Specify output destination. Defaults to
~/tmp/curriculum/<course_id>
-o --overwrite Overwrite the destination directory, if it exists.
-v --verbose Print what's going on to standard output.
--info Display the course name and version, and exit
--shell Used with --info, this option causes the course
name and version to be emitted as shell variables.
--list-notebooks List the full paths of all notebooks in a course
--upload Upload all notebooks to a folder on Databricks.
--download Download all notebooks from a folder on Databricks,
copying them into their appropriate locations on the
local file system, as defined in the build.yaml file.
-P PROF --dprofile PROF When uploading and downloading, pass authentication
profile PROF to the "databricks" commands. This
option corresponds exactly with the --profile
argument to "databricks".
--version Display version and exit.
""".format(PROG, VERSION, DEFAULT_BUILD_FILE))
DEFAULT_INSTRUCTOR_FILES_SUBDIR = "InstructorFiles"
DEFAULT_INSTRUCTOR_LABS_DBC = "Instructor-Labs.dbc"
DEFAULT_STUDENT_FILES_SUBDIR = "StudentFiles"
DEFAULT_STUDENT_LABS_DBC = "Labs.dbc" # in the student directory
SLIDES_SUBDIR = "Slides" # in the instructor directory
DATASETS_SUBDIR = "Datasets" # in the student directory
INSTRUCTOR_NOTES_SUBDIR = "InstructorNotes" # in the instructor directory
# Post master-parse variables (and associated regexps)
TARGET_LANG = 'target_lang'
TARGET_EXTENSION = 'target_extension'
NOTEBOOK_TYPE = 'notebook_type'
OUTPUT_DIR = 'output_dir'
PROFILE_VAR = 'profile'
VALID_PROFILES = {'amazon', 'azure'}
PROFILE_ABBREVIATIONS = {'amazon' : 'am', 'azure': 'az'}
POST_MASTER_PARSE_VARIABLES = {
TARGET_LANG: variable_ref_patterns(TARGET_LANG),
TARGET_EXTENSION: variable_ref_patterns(TARGET_EXTENSION),
NOTEBOOK_TYPE: variable_ref_patterns(NOTEBOOK_TYPE),
OUTPUT_DIR: variable_ref_patterns(OUTPUT_DIR),
PROFILE_VAR: variable_ref_patterns(PROFILE_VAR),
}
# EXT_LANG is used when parsing the YAML file.
EXT_LANG = {'.py': 'Python',
'.r': 'R',
'.scala': 'Scala',
'.sql': 'SQL'}
# LANG_EXT: Mapping of language (in lower case) to extension
LANG_EXT = dict([(v.lower(), k) for k, v in EXT_LANG.items()])
# Used to create a Scala version notebook in the top-level. This is a string
# template, with the following variables:
#
# {course_name} - the course name
# {version} - the version
# {build_timestamp} - the build timestamp, in printable format
VERSION_NOTEBOOK_TEMPLATE = """// Databricks notebook source
// MAGIC %md # Course: ${course_name}
// MAGIC * Version ${version}
// MAGIC * Built ${build_timestamp}
// MAGIC
// MAGIC Copyright \u00a9 ${year} Databricks, Inc.
"""
# The version notebook file name. Use as a format string, with {0} as the
# version number.
VERSION_NOTEBOOK_FILE = "Version-{0}.scala"
ANSWERS_NOTEBOOK_PATTERN = re.compile('^.*_answers\..*$')
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
errors = 0
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class BuildError(Exception):
pass
class UploadDownloadError(Exception):
pass
class ConfigError(BuildError):
pass
class UnknownFieldsError(ConfigError):
def __init__(self, parent_section, section_name, unknown_keys):
super(ConfigError, self).__init__(
'"{0}": Unknown fields in "{1}" section: {2}'.format(
parent_section, section_name, ', '.join(unknown_keys)
)
)
class NotebookType(Enum):
EXERCISES = 'exercises'
INSTRUCTOR = 'instructor'
ANSWERS = 'answers'
@classmethod
def default_mappings(cls):
return {
NotebookType.EXERCISES: 'exercises',
NotebookType.INSTRUCTOR: 'instructor',
NotebookType.ANSWERS: 'answers',
}
def suffix_for(self):
"""
Get the filename suffix for the notebook type (e.g., '_exercises').
:return: the suffix
"""
return NotebookType.suffixes()[self]
@classmethod
def suffixes(cls):
"""
Get a dict of NotebookType -> suffix mappings
:return: the mappings
"""
return {
NotebookType.EXERCISES: '_exercises',
NotebookType.INSTRUCTOR: '_instructor',
NotebookType.ANSWERS: '_answers',
}
def __repr__(self):
return 'NotebookType.{0}'.format(self.name)
MiscFileData = namedtuple('MiscFileData', ('src', 'dest', 'is_template',
'dest_is_dir'))
SlideData = namedtuple('SlideData', ('src', 'dest'))
DatasetData = namedtuple('DatasetData', ('src', 'dest', 'license', 'readme'))
MarkdownInfo = namedtuple('MarkdownInfo', ('html_stylesheet',))
NotebookHeading = namedtuple('NotebookHeading', ('path', 'enabled'))
NotebookFooter = namedtuple('NotebookFooter', ('path', 'enabled'))
BundleFile = namedtuple('BundleFileData', ('src', 'dest'))
class OutputInfo(DefaultStrMixin):
def __init__(self, student_dir, student_dbc, instructor_dir, instructor_dbc):
self.student_dir = student_dir
self.student_dbc = student_dbc
self.instructor_dir = instructor_dir
self.instructor_dbc = instructor_dbc
@property
def student_labs_subdir(self):
(base, _) = path.splitext(self.student_dbc)
return joinpath(self.student_dir, base)
@property
def instructor_labs_subdir(self):
(base, _) = path.splitext(self.instructor_dbc)
return joinpath(self.instructor_dir, base)
class CourseInfo(DefaultStrMixin):
def __init__(self, name, version, class_setup, schedule, instructor_prep,
copyright_year, deprecated, type, title=None):
self.name = name
self.version = version
self.class_setup = class_setup
self.schedule = schedule
self.instructor_prep = instructor_prep
self.copyright_year = copyright_year
self.deprecated = deprecated
self.type = type
self.title = title or name
@property
def course_id(self):
"""
The course ID, which is a combination of the course name and the
version.
:return: the course ID string
"""
return '{0}-{1}'.format(self.name, self.version)
class Bundle(DefaultStrMixin):
def __init__(self, zipfile, files=None):
'''
Parsed bundle information.
:param zipfile: the zip file for the bundle
:param files: a list of BundleFile objects
'''
self.files = files or []
self.zipfile = zipfile
class NotebookDefaults(DefaultStrMixin):
def __init__(self, dest=None, master=None, variables=None):
"""
Create a new NotebookDefaults object.
:param dest: The destination value (str)
:param variables: Default (unexpanded) variables
:param master: The master parse section (dict, not MasterParseInfo)
"""
self.dest = dest
self.master = master or {}
self.variables = variables or {}
class MasterParseInfo(DefaultStrMixin):
"""
Parsed master parser data for a notebook.
"""
LANGUAGES = ('python', 'scala', 'r', 'sql')
VALID_FIELDS = {
'enabled': bool,
'python': bool,
'scala': bool,
'r': bool,
'sql': bool,
'answers': bool,
'exercises': bool,
'instructor': bool,
'heading': NotebookHeading.__class__,
'footer': NotebookFooter.__class__,
'encoding_in': str,
'encoding_out': str,
'debug': bool,
'enable_templates': bool
}
VALID_HEADING_FIELDS = {
'path': str,
'enabled': bool
}
VALID_FOOTER_FIELDS = {
'path': str,
'enabled': bool
}
def __init__(self,
enabled=False,
python=True,
scala=True,
r=False,
sql=False,
answers=True,
exercises=True,
instructor=True,
heading=NotebookHeading(path=None, enabled=True),
footer=NotebookFooter(path=None, enabled=True),
encoding_in='UTF-8',
encoding_out='UTF-8',
target_profile=master_parse.TargetProfile.NONE,
enable_templates=False,
debug=False):
"""
Create a new parsed master parse data object
:param enabled: whether master parsing is enabled
:param python: whether Python notebook generation is enabled
:param scala: whether Scala notebook generation is enabled
:param r: whether R notebook generation is enabled
:param sql: whether SQL notebook generation is enabled
:param answers: whether to generate answer notebooks
:param exercises: whether to generate exercises notebook
:param instructor: whether to generate instructor notebooks
:param heading: heading information (a NotebookHeading object)
:param footer: footer information (a NotebookFooter object)
:param encoding_in: the encoding of the source notebooks
:param encoding_out: the encoding to use when writing notebooks
:param target_profile: the target profile, if any
:param enable_templates: whether to treat Markdown cells as Mustache
templates
:param debug: enable/disable debug messages for the master
parse phase
"""
self.enabled = enabled
self.python = python
self.scala = scala
self.r = r
self.sql = sql
self.answers = answers
self.exercises = exercises
self.instructor = instructor
self.heading = heading
self.footer = footer
self.encoding_in = encoding_in
self.encoding_out = encoding_out
self.target_profile = target_profile
self.enable_templates = enable_templates
self.debug = debug
def lang_is_enabled(self, lang):
"""
Determine if a specific language is enabled.
:param lang: the name (string) for the language, in lower case
:return: True if it's enable, False if not
"""
return self.__getattribute__(lang)
def enabled_langs(self):
"""
Return a list of the enabled languages. e.g., ['scala', 'python']
:return: the list of enabled languages, which could be empty
"""
return [i for i in self.LANGUAGES if self.__getattribute__(i)]
def update_from_dict(self, d):
"""
Update the fields in this master parse record from a dictionary.
The dictionary should represent a master parse dictionary (e.g., as
parsed from YAML). Keys can be missing. Extra keys are ignored.
:param d: the dictionary
"""
for k in self.VALID_FIELDS.keys():
if k in d:
if k == 'heading':
heading_data = d[k]
if isinstance(heading_data, NotebookHeading):
self.heading = heading_data
else:
self.heading = self._parse_heading(d[k])
elif k == 'footer':
footer_data = d[k]
if isinstance(footer_data, NotebookFooter):
self.footer = footer_data
else:
self.footer = self._parse_footer(d[k])
else:
self.__setattr__(k, d[k])
@classmethod
def extra_keys(cls, d):
"""
Check a dictionary of master parse value for extra (unknown) keys.
:param d: the dictionary to check
:return: any unknown keys, or None if there aren't any.
"""
extra = set(d.keys()) - set(cls.VALID_FIELDS.keys())
heading = d.get('heading') or {}
for k in (set(heading.keys()) - set(cls.VALID_HEADING_FIELDS.keys())):
extra.add('heading.{0}'.format(k))
if len(extra) == 0:
extra = None
return extra
@classmethod
def from_dict(cls, d):
"""
Create a MasterParseData object from a dictionary of values.
:param d: the dictionary.
:return: The object. Throws exceptions on error. Extra keys are not
interpreted as an error here; callers can report those errors
with more context.
"""
heading = cls._parse_heading_data(d.get('heading'))
return MasterParseInfo(
enabled=bool_field(d, 'enabled', False),
python=bool_field(d, 'python', True),
scala=bool_field(d, 'scala', True),
r=bool_field(d, 'r', True),
sql=bool_field(d, 'sql', False),
answers=bool_field(d, 'answers', True),
exercises=bool_field(d, 'exercises', True),
instructor=bool_field(d, 'instructor', True),
heading=heading,
encoding_in=d.get('encoding_in', 'UTF-8'),
encoding_out=d.get('encoding_out', 'UTF-8'),
debug=bool_field(d, 'debug', False)
)
def to_dict(self):
"""
Convert this object into a dictionary.
:return: the dictionary of fields
"""
res = {}
res.update(self.__dict__)
return res
@classmethod
def _parse_footer(cls, footer_data):
if footer_data:
heading = NotebookFooter(
path=footer_data.get('path', DEFAULT_NOTEBOOK_FOOTER.path),
enabled=bool_field(footer_data, 'enabled',
DEFAULT_NOTEBOOK_FOOTER.enabled)
)
else:
heading = NotebookHeading(path=None, enabled=True)
return heading
@classmethod
def _parse_heading(cls, heading_data):
if heading_data:
heading = NotebookHeading(
path=heading_data.get('path', DEFAULT_NOTEBOOK_HEADING.path),
enabled=bool_field(heading_data, 'enabled',
DEFAULT_NOTEBOOK_HEADING.enabled)
)
else:
heading = NotebookHeading(path=None, enabled=True)
return heading
class NotebookData(object, DefaultStrMixin):
"""
Parsed notebook data.
"""
def __init__(self,
src,
dest,
upload_download=True,
master=None,
variables=None,
only_in_profile=None):
'''
Captures parsed notebook data.
:param src: Partial or full path to the notebook
:param dest: Destination for the notebook, which can
contain variables. This value can be set
to `None`, as long as a destination is
available in the notebook defaults.
:param upload_download: Whether upload and download are enabled
for this notebook.
:param master: The master parse data.
:param variables: Any variables for the notebook.
:param only_in_profile: Profile to which notebook is restricted, if
any.
'''
super(NotebookData, self).__init__()
self.src = src
self.dest = dest
self.master = master
self.upload_download = upload_download
self.variables = variables or {}
self.only_in_profile = only_in_profile
def master_enabled(self):
"""
Determine whether master notebook processing is enabled for this
notebook.
:return: true or false
"""
return self.master.enabled
def total_master_langs(self):
"""
Get the number of output languages produced by the master parser
for this notebook.
:return: 0 if the master parser isn't enabled. Number of output
languages otherwise.
"""
return len(self.master.enabled_langs()) if self.master.enabled else 0
def master_multiple_langs(self):
"""
Determine whether the master parser is parsing to multiple languages
or not.
:return: True if master parsing is enabled and parsing to multiple
languages; False if master parsing is disabled or is enabled
but with only one output language.
"""
return self.total_master_langs() > 0
class BuildData(object, DefaultStrMixin):
"""
Parsed build data.
"""
def __init__(self,
build_file_path,
top_dbc_folder_name,
source_base,
output_info,
course_info,
notebooks,
slides,
datasets,
misc_files,
keep_lab_dirs,
markdown_cfg,
notebook_type_map,
use_profiles=False,
course_type=None,
variables=None,
bundle_info=None):
"""
Create a new BuildData object.
:param build_file_path: path to the build file, for reference
:param top_dbc_folder_name: top-level directory in DBC, or None
:param source_base: value of source base field
:param output_info: info about the output directories and DBCs
:param course_info: parsed CourseInfo object
:param notebooks: list of parsed Notebook objects
:param slides: parsed SlideInfo object
:param datasets: parsed DatasetData object
:param misc_files: parsed MiscFilesData object
:param keep_lab_dirs: value of keep_lab_dirs setting
:param notebook_heading: parsed NotebookHeading object
:param markdown_cfg: parsed MarkdownInfo object
:param notebook_type_map: a dict mapping notebook types to strings.
Keys are from the NotebookType enum.
:param use_profiles: whether to use Azure/Amazon build profiles
:param variables: a map of user-defined variables
:param bundle_info Bundle data, if any
"""
super(BuildData, self).__init__()
self.build_file_path = build_file_path
self.course_directory = path.dirname(build_file_path)
self.notebooks = notebooks
self.course_info = course_info
self.source_base = source_base
self.output_info = output_info
self.slides = slides
self.datasets = datasets
self.markdown = markdown_cfg
self.misc_files = misc_files
self.keep_lab_dirs = keep_lab_dirs
self.notebook_type_map = notebook_type_map
self.variables = variables or {}
self.use_profiles = use_profiles
self.course_type = course_type
self.bundle_info = bundle_info
if top_dbc_folder_name is None:
top_dbc_folder_name = '${course_name}'
folder_vars = merge_dicts(variables, {
'course_name': course_info.name,
'course_version': course_info.version,
'course_id': self.course_id,
})
self.top_dbc_folder_name = VariableSubstituter(
top_dbc_folder_name
).substitute(
folder_vars
)
@property
def name(self):
return self.course_info.name
@property
def course_id(self):
"""
The course ID, which is a combination of the course name and the
version.
:return: the course ID string
"""
return self.course_info.course_id
# ---------------------------------------------------------------------------
# Class-dependent Constants
# ---------------------------------------------------------------------------
DEFAULT_NOTEBOOK_FOOTER = NotebookFooter(path=None, enabled=True)
DEFAULT_NOTEBOOK_HEADING = NotebookHeading(path=None, enabled=True)
# Always generate Databricks notebooks.
MASTER_PARSE_DEFAULTS = {
'enabled': False,
'add_heading': True,
'python': True,
'r': False,
'scala': True,
'sql': False,
'answers': True,
'instructor': True,
'encoding_in': 'UTF-8',
'encoding_out': 'UTF-8',
'heading': DEFAULT_NOTEBOOK_HEADING,
'footer': DEFAULT_NOTEBOOK_FOOTER,
'debug': False
}
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def error(msg):
global errors
errors += 1
emit_error(msg)
def die(msg, show_usage=False):
"""
Emit a message to standard error, optionally write the usage, and exit.
"""
error(msg)
if show_usage:
sys.stderr.write(USAGE)
sys.stderr.write("\n*** ABORTED\n")
sys.exit(1)
def load_build_yaml(yaml_file):
"""
Load the YAML configuration file that defines the build for a particular
class. Returns a BuildData object. Throws ConfigError on error.
:param yaml_file the path to the build file to be parsed
:param output_dir the top-level build output directory
:return the Build object, representing the parsed build.yaml
"""
import yaml
def required(d, key, where, error=None):
"""
Get a required key
:param d: the dictionary
:param key: the key
:param where: where in the file the key should be (for errors)
:param error: error message, or None for default
:return:
"""
v = d.get(key)
if v is None:
if error:
msg = error
else:
msg = 'Missing required "{0}" in "{1}".'.format(key, where)
raise ConfigError(msg)
return v
def parse_time_subst(dest, src, allow_lang=True, extra_vars=None):
# Handles parse-time variable substitution. Some variables are
# substituted later.
if extra_vars is None:
extra_vars = {}
base_with_ext = path.basename(src)
(base_no_ext, ext) = path.splitext(base_with_ext)
if '@' in dest:
raise ConfigError('The "@" character is disallowed in destinations.')
# A certain set of variables is expanded only after master parsing; all
# others are expanded here. Any references to post master-parse variables
# (expanded in process_master_notebook) must be explicitly preserved
# here. This logic escapes them by removing the "$" and surrounding the
# rest with @ ... @. The escaping is undone, below.
adj_dest = dest
subbed = True
while subbed:
subbed = False
for pats in POST_MASTER_PARSE_VARIABLES.values():
m = matches_variable_ref(pats, adj_dest)
while m:
var = '@{0}@'.format(m[1].replace(r'$', ''))
adj_dest = m[0] + var + m[2]
subbed = True
m = matches_variable_ref(pats, adj_dest)
fields = {
'basename': base_no_ext,
'extension': ext[1:] if ext.startswith('') else ext,
'filename': base_with_ext,
}
if allow_lang:
fields['lang'] = EXT_LANG.get(ext, "???")
fields.update(extra_vars)
adj_dest = VariableSubstituter(adj_dest).safe_substitute(fields)
# Restore escaped variables.
escaped = re.compile(r'^([^@]*)@([^@]+)@(.*)$')
m = escaped.match(adj_dest)
while m:
adj_dest = m.group(1) + '$' + m.group(2) + m.group(3)
m = escaped.match(adj_dest)
return adj_dest
def parse_dict(d, fields_spec, outer_section, section):
res = {}
for field, type in fields_spec.items():
if field not in d:
continue
if type is bool:
try:
res[field] = bool_value(d[field])
except ValueError as e:
raise ConfigError(
'{0}: Bad value for "{1}" in section "{2}": {3}'.format(
outer_section, field, section, e.message
)
)
continue
# Anything else gets copied as is for now.
res[field] = d[field]
return res
def parse_master_section(data, section_name, build_yaml_dir):
# Parse the master section, returning a (possibly partial)
# dictionary (NOT a MasterParseInfo object).
extra_keys = MasterParseInfo.extra_keys(data)
if extra_keys:
raise UnknownFieldsError(section_name, "master", extra_keys)
master = parse_dict(data, MasterParseInfo.VALID_FIELDS,
section_name, 'master')
heading = master.get('heading')
if heading:
heading = parse_dict(heading, MasterParseInfo.VALID_HEADING_FIELDS,
section_name, 'master.heading')
heading_path = heading.get('path')
if heading_path == 'DEFAULT':
heading['path'] = None
elif heading_path is not None:
# Resolve the path, relative to the build file.
if not path.isabs(heading_path):
heading_path = path.abspath(joinpath(build_yaml_dir, heading_path))
if not path.exists(heading_path):
raise ConfigError(
'Footer file "{}" does not exist.'.format(heading_path)
)
heading['path'] = heading_path
master['heading'] = heading
footer = master.get('footer')
if footer:
footer = parse_dict(footer, MasterParseInfo.VALID_FOOTER_FIELDS,
section_name, 'master.footer')
footer_path = footer.get('path')
if footer_path == 'DEFAULT':
footer['path'] = None
elif footer_path is not None:
# Resolve the path, relative to the build field.
if not path.isabs(footer_path):
footer_path = path.abspath(joinpath(build_yaml_dir, footer_path))
if not path.exists(footer_path):
raise ConfigError(
'Footer file "{}" does not exist.'.format(footer_path)
)
footer['path'] = footer_path
master['footer'] = footer
return master
def parse_notebook_defaults(contents, section_name, build_yaml_dir):
cfg = contents.get(section_name)
if not cfg:
return NotebookDefaults(dest=None, master=None)
master = parse_master_section(dict_get_and_del(cfg, 'master', {}),
'notebook_defaults', build_yaml_dir)
variables = dict_get_and_del(cfg, 'variables', {})
res = NotebookDefaults(dest=dict_get_and_del(cfg, 'dest', None),
master=master, variables=variables)
if len(cfg.keys()) > 0:
raise UnknownFieldsError("build", section_name, cfg.keys())
return res
def parse_notebook(obj, notebook_defaults, extra_vars, build_yaml_dir):
bad_dest = re.compile('^\.\./*|^\./*')
src = required(obj, 'src', 'notebooks section')
section = 'Notebook "{0}"'.format(src)
dest = obj.get('dest', notebook_defaults.dest)
if not dest:
raise ConfigError(
('Notebook "{0}": Missing "dest" section, and no default ' +
'"dest" in notebook defaults.').format(src)
)
variables = merge_dicts(notebook_defaults.variables,
obj.get('variables', {}))
all_extra_vars = merge_dicts(extra_vars, variables)
dest = parse_time_subst(dest, src, extra_vars=all_extra_vars)
if bool_field(obj, 'skip'):
verbose('Skipping notebook {0}'.format(src))
return None
master = MasterParseInfo() # defaults
master.update_from_dict(notebook_defaults.master)
nb_master = parse_master_section(obj.get('master', {}), section,
build_yaml_dir)
master.update_from_dict(nb_master)
_, dest_ext = os.path.splitext(dest)
if master.enabled and bad_dest.match(dest):
raise ConfigError(
('Notebook "{0}": Relative destinations ("{1}") are ' +
'disallowed.').format(src, dest)
)
if master.enabled:
total_langs = len(master.enabled_langs())
if (total_langs > 1):
pat = POST_MASTER_PARSE_VARIABLES[TARGET_LANG]
if not matches_variable_ref(pat, dest):
raise ConfigError(
('Notebook "{0}": When multiple master parser languages ' +
'are used, you must substitute ${1} in the ' +
'destination.').format(
src, TARGET_LANG
)
)
else:
_, src_ext = os.path.splitext(src)
if (not dest_ext) or (dest_ext != src_ext):
raise ConfigError(
('Notebook "{0}": "master" is disabled, so "dest" should ' +
'have extension "{1}".').format(src, src_ext)
)
for pats in POST_MASTER_PARSE_VARIABLES.values():
m = matches_variable_ref(pats, dest)
if m:
raise ConfigError(
('Notebook "{0}": "{1}" found in "dest", but "master" ' +
'is disabled.').format(src, m[1])
)
prof = obj.get('only_in_profile')
if prof and (prof not in VALID_PROFILES):
raise ConfigError(
('Notebook "{0}": Bad value of "{1}" for only_in_profile. ' +
'Must be one of: {2}').format(
src, prof, ', '.join(VALID_PROFILES)
)
)
if prof and (not master.enabled):
raise ConfigError(
('Notebook "{0}": only_in_profile is set, but master is ' +
'not enabled.'.format(src))
)
nb = NotebookData(
src=src,
dest=dest,
master=master,
upload_download=bool_field(obj, 'upload_download', True),
variables=variables,
only_in_profile=prof
)
return nb
def parse_slide(obj, extra_vars):
src = required(obj, 'src', 'notebooks')
dest = required(obj, 'dest', 'notebooks')
if bool_field(obj, 'skip'):
verbose('Skipping slide {0}'.format(src))
return None
else:
return SlideData(
src=src,
dest=parse_time_subst(dest, src, allow_lang=False,
extra_vars=extra_vars)
)
def parse_bundle(obj, output_info, course_info, extra_vars):
if not obj:
return None
files = obj.get('files')
if not files:
return None
zip_vars = {
'course_name': course_info.name,
'course_version': course_info.version
}
zipfile = obj.get('zipfile')
if zipfile:
# Use safe_substitute, which leaves all other variables alone.
zipfile = StringTemplate(zipfile).safe_substitute(zip_vars)
else:
zipfile = course_info.course_id + '.zip'
file_list = []
src_vars = {}
src_vars.update(extra_vars)
src_vars.update({
'student_dbc': output_info.student_dbc,
'instructor_dbc': output_info.instructor_dbc
})
for d in files:
src = d['src']
dest = d['dest']
if not (dest or src):
raise ConfigError('"bundle" has a file with no "src" or "dest".')
if not src:
raise ConfigError('"bundle" has a file with no "src".')
if not dest:
raise ConfigError('"bundle" has a file with no "dest".')
src = StringTemplate(src).substitute(src_vars)
dest = parse_time_subst(dest, src, allow_lang=False,
extra_vars=extra_vars)
file_list.append(BundleFile(src=src, dest=dest))
return Bundle(zipfile=zipfile, files=file_list)
def parse_misc_file(obj, extra_vars):
src = required(obj, 'src', 'misc_files')
dest = required(obj, 'dest', 'misc_files')
if bool_field(obj, 'skip'):
verbose('Skipping file {0}'.format(src))
return None
else:
dest = parse_time_subst(dest, src, allow_lang=False, extra_vars=extra_vars)
mf = MiscFileData(
src=src,
dest=dest,
dest_is_dir=obj.get('dest_is_dir', False),
is_template=obj.get('template', False)
)
# Sanity checks: A Markdown file can be translated to Markdown,
# PDF or HTML. An HTML file can be translated to HTML or PDF.
# is_template is disallowed for non-text files.
if mf.is_template and (not is_text_file(src)):
raise ConfigError(
('Section misc_files: "{}" is marked as a template' +
"but it is not a text file.").format(src)
)
# We can't check to see whether the target is a directory, since
# nothing exists yet. But if it has an extension, we can assume it
# is not a directory.
if (dest == '.') and (not mf.dest_is_dir):
# It's the top-level directory. Nothing to check.
raise ConfigError(
('Section misc_files: "{}" uses a "dest" of ".", but '
'"dest_is_dir" is not set to true.').format(src)
)
elif has_extension(dest):
# It's a file, not a directory.
if is_markdown(src):
if not (is_pdf(dest) or is_html(dest) or is_markdown(dest)):
raise ConfigError(
('Section misc_files: "{}" is Markdown, the ' +
'target ("{}") is not a directory and is not ' +
"PDF, HTML or Markdown.").format(src, dest)
)
if is_html(src):
if not (is_pdf(dest) or is_html(dest)):
raise ConfigError(
('Section misc_files: "{}" is HTML, the ' +
'target ("{}") is not a directory and is not ' +
"PDF or HTML.").format(src, dest)
)
return mf
def parse_dataset(obj, extra_vars, build_yaml_dir):
src = required(obj, 'src', 'notebooks')
dest = required(obj, 'dest', 'notebooks')
if bool_field(obj, 'skip'):
verbose('Skipping data set {0}'.format(src))
return None
else:
src_dir = path.dirname(src)
license = joinpath(src_dir, 'LICENSE.md')
readme = joinpath(src_dir, 'README.md')
p = joinpath(build_yaml_dir, src)
if not path.exists(p):
raise ConfigError('Dataset file "{}" does not exist'.format(p))
for i in (license, readme):
p = joinpath(build_yaml_dir, i)
if not path.exists(p):
raise ConfigError(
'Dataset "{}": Required "{}" does not exist.'.format(
src, p
)
)
if os.stat(p).st_size == 0:
raise ConfigError(
'Dataset "{}": "{}" is empty.'.format(
src, p
)
)
return DatasetData(
src=src,
dest=parse_time_subst(dest, src, allow_lang=False, extra_vars=extra_vars),
license=license,
readme=readme
)
def parse_file_section(section, parse, *args):
# Use the supplied parse function to parse each element in the
# supplied section, filtering out None results from the function.
# Convert the entire result to a tuple.
return tuple(
filter(lambda o: o != None, [parse(i, *args) for i in section])
)
def parse_markdown(obj):
if obj:
stylesheet = obj.get('html_stylesheet')
else:
stylesheet = None
return MarkdownInfo(html_stylesheet=stylesheet)
def parse_notebook_types(contents):
res = NotebookType.default_mappings()
names_to_keys = dict([(t.value, t) for t in NotebookType])
invalid_keys = set()
for k, v in contents.get('notebook_type_name', {}).items():
t = names_to_keys.get(k)
if not t:
invalid_keys.add(k)
else:
res[t] = v
if invalid_keys:
raise ConfigError(
'Unknown key(s) in "notebook_type_name" section: {0}'.format(
', '.join(invalid_keys)
))
return res
def parse_min_version(key, value):
res = contents.get(key)
if res is not None:
if isinstance(res, float):
raise ConfigError(
'"{0}" of the form <major>.<minor> must be quoted.'.format(
key
)
)
try:
# Ignore the match version.
res = parse_version_string(res)[0:2]
except ValueError as e:
raise ConfigError(
'Bad value of "{0}" for "{1}": {2}'.format(
res, key, e.message
)
)
return res
def parse_course_type(data, section):
course_type = data.get('type')
if not course_type:
raise ConfigError(
'Missing required "{}.type" setting in "{}"'.format(
section, yaml_file
)
)
if course_type.lower() == 'self-paced':
return master_parse.CourseType.SELF_PACED
if course_type.lower() == 'ilt':
return master_parse.CourseType.ILT
raise ConfigError(
('Unknown value of "{}" for "{}.type". Legal values are ' +
'"ilt" and "self-paced".').format(course_type, course_type)
)
def parse_course_info(course_info_cfg, section_name):
ilt_only = {
'class_setup': None,
'schedule': None,
'instructor_prep': None
}
name = required(course_info_cfg, 'name', section_name)
version = required(course_info_cfg, 'version', section_name)
ilt_only['class_setup'] = course_info_cfg.get('class_setup')
ilt_only['schedule'] = course_info_cfg.get('schedule')
ilt_only['instructor_prep'] = course_info_cfg.get('prep')
type = parse_course_type(course_info_cfg, section_name)
deprecated = course_info_cfg.get('deprecated', False)
copyright_year = course_info_cfg.get('copyright_year',
str(datetime.now().year))
if type == master_parse.CourseType.SELF_PACED:
for k, v in ilt_only.items():
if v:
warning(
'course_info.{} is ignored for self-paced courses'.format(
k
)
)
ilt_only[k] = None
return CourseInfo(
name=name,
title=course_info_cfg.get('title', name),
version=version,
class_setup=ilt_only['class_setup'],
schedule=ilt_only['schedule'],
instructor_prep=ilt_only['instructor_prep'],
type=type,
deprecated=deprecated,
copyright_year=copyright_year
)
def parse_output_info(contents):
student_dir = contents.get('student_dir', DEFAULT_STUDENT_FILES_SUBDIR)
instructor_dir = contents.get('instructor_dir',
DEFAULT_INSTRUCTOR_FILES_SUBDIR)
student_dbc = contents.get('student_dbc', DEFAULT_STUDENT_LABS_DBC)
instructor_dbc = contents.get('instructor_dbc',
DEFAULT_INSTRUCTOR_LABS_DBC)
for (k, v) in (('student_dbc', student_dbc),
('instructor_dbc', instructor_dbc)):
if path.dirname(v) != '':
raise ConfigError(
'"{}" value "{}" is not a simple file name.'.format(k, v)
)
if student_dir == instructor_dir:
raise ConfigError(
('"student_dir" and "instructor_dir" cannot be the same. ' +
'"student_dir" is "{0}". ' +
'"instructor_dir" is "{1}".').format(
student_dir, instructor_dir
)
)
return OutputInfo(student_dir=student_dir,
instructor_dir=instructor_dir,
student_dbc=student_dbc,
instructor_dbc=instructor_dbc)
# Main function logic
verbose("Loading {0}...".format(yaml_file))
with open(yaml_file, 'r') as y:
contents = yaml.safe_load(y)
bdc_min_version = parse_min_version(
'bdc_min_version', required(contents, 'bdc_min_version', 'build')
)
cur_major_minor = parse_version_string(VERSION)[0:2]
if bdc_min_version > cur_major_minor:
raise ConfigError(
("This build requires bdc version {0}.x or greater, but " +
"you're using bdc version {1}.").format(
'.'.join(map(str, bdc_min_version)), VERSION
)
)
variables = contents.get('variables', {})
notebooks_cfg = required(contents, 'notebooks', 'build')
slides_cfg = contents.get('slides', [])
misc_files_cfg = contents.get('misc_files', [])
datasets_cfg = contents.get('datasets', [])
course_info_cfg = required(contents, 'course_info', 'build')
course_info = parse_course_info(course_info_cfg, 'course_info')
src_base = required(contents, 'src_base', 'build')
build_yaml_full = path.abspath(yaml_file)
build_yaml_dir = path.dirname(build_yaml_full)
src_base = path.abspath(joinpath(build_yaml_dir, src_base))
use_profiles = bool_field(contents, 'use_profiles')
notebook_defaults = parse_notebook_defaults(contents, 'notebook_defaults',
build_yaml_dir)
if slides_cfg:
slides = parse_file_section(slides_cfg, parse_slide, variables)
else:
slides = None
if datasets_cfg:
datasets = parse_file_section(datasets_cfg, parse_dataset, variables,
build_yaml_dir)
else:
datasets = None
if misc_files_cfg:
misc_files = parse_file_section(misc_files_cfg, parse_misc_file,
variables)
else:
misc_files = None
if notebooks_cfg:
notebooks = parse_file_section(notebooks_cfg, parse_notebook,
notebook_defaults, variables,
build_yaml_dir)
# If there are any profiles in the notebooks, and use_profiles is off,
# abort.
profiles = {n.only_in_profile for n in notebooks if n.only_in_profile}
if (not use_profiles) and (len(profiles) > 0):
raise ConfigError(
'At least one notebook has "only_in_profile" set, but the ' +
'build does not specify "use_profiles: true".'
)
else:
notebooks = None
need_master = any([n.master.enabled for n in notebooks])
if need_master:
required_master_min_version = parse_min_version(
'master_parse_min_version',
required(contents,'master_parse_min_version', 'build',
error='"master_parse_min_version" is required if any ' +
'notebooks use the master parser.')
)
master_version = parse_version_string(master_parse.VERSION)[0:2]
if required_master_min_version > master_version:
raise ConfigError(
("This build requires master_parse version {0}.x or greater, " +
"but you're using master_parse version {1}.").format(
'.'.join(map(str, required_master_min_version)),
master_parse.VERSION
)
)
output_info = parse_output_info(contents)
bundle_info = parse_bundle(contents.get('bundle'), output_info,
course_info, variables)
data = BuildData(
build_file_path=build_yaml_full,
top_dbc_folder_name=contents.get('top_dbc_folder_name'),
course_info=course_info,
output_info=output_info,
notebooks=notebooks,
slides=slides,
datasets=datasets,
source_base=src_base,
misc_files=misc_files,
keep_lab_dirs=bool_field(contents, 'keep_lab_dirs'),
markdown_cfg=parse_markdown(contents.get('markdown')),
notebook_type_map=parse_notebook_types(contents),
variables=variables,
use_profiles=use_profiles,
bundle_info=bundle_info
)
return data
def parse_args():
"""
Parse the command line parameters.
"""
from docopt import docopt
return docopt(USAGE, version=VERSION)
def expand_template(src_template_file, build, tempdir, profile):
import pystache
variables = {}
if build.variables:
variables['variables'] = build.variables
for p in VALID_PROFILES:
if profile == p:
variables[p] = p.capitalize()
else:
variables[p] = ''
course_info_vars = {}
for k, v in build.course_info.__dict__.items():
if v is None:
continue
if isinstance(v, Enum):
v = v.value
course_info_vars[k] = str(v)
variables['course_info'] = course_info_vars
output = joinpath(tempdir, path.basename(src_template_file))
with codecs.open(src_template_file, mode='r', encoding='utf8') as i:
with codecs.open(output, mode='w', encoding='utf8') as o:
o.write(pystache.render(i.read(), variables))
return output
# For copy_info_files and related logic:
#
# This is a table of special source file type to target file type
# processors. If the source type has a key in this table, then it
# is processed specially, and there MUST be an entry for the target type,
# or an error occurs. If the source type has no key in this table, then
# it is just copied as is. See _get_type().
INFO_PROCESSORS = {
# Table that maps a source type and a target type to a consistent
# three-arg lambda (src, dest, build) for generating the target.
# src_type -> target_type -> lambda
# The type is also the extension
'md':
{
'html':
lambda src, dest, build: markdown_to_html(
src, dest, stylesheet=build.markdown.html_stylesheet
),
'pdf':
lambda src, dest, build: markdown_to_pdf(
src, dest, stylesheet=build.markdown.html_stylesheet
),
'md':
lambda src, dest, build: copy(src, dest)
},
'html':
{
'pdf':
lambda src, dest, build: html_to_pdf(src, dest),
'html':
lambda src, dest, build: copy(src, dest)
}
}
def _get_type(f):
if is_markdown(f):
return 'md'
if is_pdf(f):
return 'pdf'
if is_html(f):
return 'html'
return None
def _convert_and_copy_info_file(src, dest, build):
'''
Workhorse function: Takes the source and target, looks up how to process
them, and processes them.
:param src: the source file
:param dest: the destination file (not directory)
:param build: the parsed build information
'''
src_type = _get_type(src)
dest_type = _get_type(dest)
if src_type is None:
# Not a special type that we have to convert. Just copy.
copy(src, dest)
elif dest_type is None:
# Source type is a special type (Markdown, HTML), but we don't know the
# destination type. This is a bug, since this error should've been
# caught during build file parsing..
raise BuildError(
'(BUG: Should have been caught earlier) "{}" -> "{}".'.format(
src, dest
)
)
else:
proc = INFO_PROCESSORS.get(src_type, {}).get(dest_type, None)
if proc is None:
raise BuildError(
'(BUG: No processor) "{}" -> "{}".'.format(
src, dest
)
)
proc(src, dest, build)
def copy_info_file(src_file, target, is_template, build, profile):
"""
Copy a file that contains some kind of readable information (e.g., a
Markdown file, a PDF, etc.). If the file is a Markdown file, it is also
converted to HTML and copied.
"""
with TemporaryDirectory() as tempdir:
if is_template:
real_src = expand_template(src_file, build, tempdir, profile)
else:
real_src = src_file
# Okay to check for directory here. It should've been created already.
if not path.isdir(target):
# Copy and/or generate one file.
_convert_and_copy_info_file(real_src, target, build)
else:
# Is a directory. What we generate depends on the input.
# By this point, it has to exist.
src_type = _get_type(src_file)
if src_type is None:
# Just a copy.
base = path.basename(src_file)
copy(real_src, joinpath(target, base))
else:
dest_map = INFO_PROCESSORS.get(src_type)
if dest_map is None:
raise BuildError(
'(BUG: Processor mismatch) "{}" -> "{}".'.format(
src_file, target
)
)
for dest_type in dest_map.keys():
(base, _) = path.splitext(path.basename(src_file))
out = joinpath(target, base + '.' + dest_type)
_convert_and_copy_info_file(real_src, out, build)
def process_master_notebook(dest_root, notebook, src_path, build, master_profile):
"""
Process a master notebook.
:param dest_root: top-level target directory for build
:param notebook: the notebook data from the build YAML
:param src_path: the pre-calculated path to the source notebook
:param dest_path: the path to the target directory, calculated
from dest_root and notebook.dest
:param build parsed build data
:param master_profile: master profile, or master_parser.TargetProfile.NONE
:return: None
"""
verbose("notebook={0}\ndest_root={1}".format(notebook, dest_root))
notebook_type_map = build.notebook_type_map
student_labs_subdir = build.output_info.student_labs_subdir
instructor_labs_subdir = build.output_info.instructor_labs_subdir
student_dir = joinpath(dest_root, student_labs_subdir)
instructor_dir = joinpath(dest_root, instructor_labs_subdir)
def move_master_notebooks(master, temp_output_dir):
"""
Move master-parsed notebooks.
:param master: the master notebook configuration data
:param temp_output_dir: the temporary output directory
"""
# See if we have to move the notebooks to other paths.
for lang in set(EXT_LANG.values()):
lc_lang = lang.lower()
if not master.lang_is_enabled(lc_lang):
continue
# This language is desired.
# Get the file name extension for the language. Note that this
# extension INCLUDES the ".".
lang_ext = LANG_EXT[lc_lang]
# The master parse tool created <notebook-basename>/<lang>/*
# in the temporary directory. The following recursive glob pattern
# will make finding the files simple. In this glob pattern, {0} is
# the notebook type (e.g., "_answers"), and {1} is the file
# extension (e.g., ".py")
glob_template = "**/*{0}*{1}"
# Copy all answers notebooks and exercises notebooks to the student
# labs directory. Copy all instructor notebooks to the instructor
# labs directory.
types_and_targets = []
if master.exercises:
types_and_targets.append(
(NotebookType.EXERCISES, student_dir)
)
if master.instructor:
types_and_targets.append(
(NotebookType.INSTRUCTOR, instructor_dir)
)
if master.answers:
types_and_targets.append((NotebookType.ANSWERS, student_dir))
base, _ = path.splitext(path.basename(notebook.src))
mp_notebook_dir = joinpath(temp_output_dir, base, lc_lang)
lang_dir = lc_lang.capitalize()
for notebook_type, target_dir in types_and_targets:
# Use a recursive glob pattern to find all matching notebooks.
# Note that eglob returns a generator.
copied = 0
suffix = NotebookType.suffix_for(notebook_type)
glob_pattern = glob_template.format(suffix, lang_ext)
matches = eglob(glob_pattern, mp_notebook_dir)
ext = LANG_EXT[lc_lang]
fields = merge_dicts(notebook.variables, {
TARGET_LANG: lang_dir,
TARGET_EXTENSION: ext[1:] if ext.startswith('') else ext,
NOTEBOOK_TYPE: notebook_type_map.get(notebook_type, '')
})
dest_subst = VariableSubstituter(
notebook.dest
).safe_substitute(
fields
)
if dest_subst.startswith(os.path.sep):
dest_subst = dest_subst[len(os.path.sep):]
for f in matches:
target = path.normpath(joinpath(target_dir, dest_subst))
copy(f, target)
copied += 1
if copied == 0:
error('Found no generated {0} {1} notebooks for "{2}"!'.
format(lang, notebook_type.value, notebook.src)
)
verbose("Running master parse on {0}".format(src_path))
master = notebook.master
extra_template_vars = {}
extra_template_vars.update(build.variables)
extra_template_vars.update(notebook.variables)
with TemporaryDirectory() as tempdir:
try:
params = master_parse.Params(
path=src_path,
output_dir=tempdir,
databricks=True,
ipython=False,
scala=master.scala,
python=master.python,
r=master.r,
sql=master.sql,
instructor=True,
exercises=True,
answers=master.answers,
notebook_heading_path=master.heading.path,
add_heading=master.heading.enabled,
notebook_footer_path=master.footer.path,
add_footer=master.footer.enabled,
encoding_in=master.encoding_in,
encoding_out=master.encoding_out,
enable_verbosity=verbosity_is_enabled(),
copyright_year=build.course_info.copyright_year,
target_profile=master_profile,
course_type=build.course_info.type,
enable_debug=master.debug,
enable_templates=master.enable_templates,
extra_template_vars=extra_template_vars
)
master_parse.process_notebooks(params)
move_master_notebooks(master, tempdir)
except Exception as e:
error("Failed to process {0}\n {1}: {2}".format(
src_path, e.__class__.__name__, e.message
))
raise
def copy_notebooks(build, labs_dir, dest_root, profile):
"""
Copy the notebooks to the destination directory.
"""
os.makedirs(labs_dir)
if profile is None:
master_profile = master_parse.TargetProfile.NONE
elif profile == 'amazon':
master_profile = master_parse.TargetProfile.AMAZON
elif profile == 'azure':
master_profile = master_parse.TargetProfile.AZURE
else:
assert(False)
for notebook in build.notebooks:
src_path = joinpath(build.source_base, notebook.src)
if (profile and notebook.only_in_profile and
notebook.only_in_profile != profile):
info('Suppressing notebook "{}", which is {}-only.'.format(
src_path, profile.title()
))
continue
if notebook.master_enabled():
process_master_notebook(
dest_root=dest_root,
notebook=notebook,
src_path=src_path,
build=build,
master_profile=master_profile
)
else:
dest_path = joinpath(labs_dir, notebook.dest)
copy(src_path, dest_path)
remove_empty_subdirectories(dest_root)
def copy_instructor_notes(build, dest_root, profile):
# Starting at build.source_base, look for instructor notes and course
# guides. Only keep the ones for the labs and slides we're using.
if build.notebooks:
notebook_dirs = set([path.dirname(n.src) for n in build.notebooks])
else:
notebook_dirs = set()
if build.slides:
slide_dirs = set([path.dirname(s.src) for s in build.slides])
else:
slide_dirs = set()
def lives_under_one_of(dirs, to_match):
for d in dirs:
if d.startswith(to_match):
return True
return False
notes_re = re.compile(r'^instructor[-_]?notes[-._]', re.IGNORECASE)
guide_re = re.compile(r'^guide\.', re.IGNORECASE)
full_source_base = path.abspath(build.source_base)
for (dirpath, _, filenames) in os.walk(build.source_base):
for f in filenames:
# Get the file path relative to the source file. With labs
# (notebooks), if the file matches the instructor notes regex
# AND anywhere under one of the notebook directories, copy it.
#
# For instructor guides, the guide must live in one of the
# slides directories.
rel_dir = path.abspath(dirpath)[len(full_source_base) + 1:]
keep = False
if notes_re.match(f) and lives_under_one_of(notebook_dirs, rel_dir):
keep = True
elif guide_re.match(f) and (rel_dir in slide_dirs):
keep = True
if keep:
s = joinpath(dirpath, f)
t = joinpath(dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
f)
(base, _) = path.splitext(path.basename(f))
verbose("Copying {0} to {1}".format(s, t))
copy_info_file(s, t, False, build, profile)
if is_html(s):
html = s
else:
html = None
if is_markdown(s):
t = joinpath(dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
base + '.html')
html = t
markdown_to_html(s, t,
stylesheet=build.markdown.html_stylesheet)
if html:
t = joinpath(dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
base + '.pdf')
html_to_pdf(html, t)
continue
def make_dbc(gendbc, build, labs_dir, dbc_path):
"""
Create a DBC file from the labs.
"""
wd = path.dirname(labs_dir)
with working_directory(wd):
simple_labs_dir = path.basename(labs_dir)
if verbosity_is_enabled():
cmd = "{0} {1} {2} {3} {4} {5}".format(
gendbc, "-v", "-f", build.top_dbc_folder_name, simple_labs_dir,
dbc_path
)
else:
cmd = "{0} {1} {2} {3} {4}".format(
gendbc, "-f", build.top_dbc_folder_name, simple_labs_dir,
dbc_path
)
verbose("\nIn {0}:\n{1}\n".format(wd, cmd))
rc = os.system(cmd)
if rc != 0:
raise BuildError("Failed to create DBC: " + cmd)
def copy_slides(build, dest_root):
"""
Copy the slides (if any).
"""
if build.slides:
for f in build.slides:
src = joinpath(build.source_base, f.src)
dest = joinpath(dest_root,
build.output_info.instructor_dir,
SLIDES_SUBDIR,
f.dest)
copy(src, dest)
def copy_misc_files(build, dest_root, profile):
"""
Copy the miscellaneous files (if any).
"""
if build.misc_files:
for f in build.misc_files:
s = joinpath(build.course_directory, f.src)
dest = f.dest
if dest == '.':
dest = dest_root
if f.dest_is_dir and (not path.isdir(dest)):
os.mkdir(dest)
t = joinpath(dest_root, dest)
copy_info_file(s, t, f.is_template, build, profile)
def copy_datasets(build, dest_root):
"""
Copy the datasets (if any).
"""
if build.datasets:
def target_for(file, dest):
return joinpath(dest_root,
build.output_info.student_dir,
DATASETS_SUBDIR,
dest,
file)
for ds in build.datasets:
source = joinpath(build.course_directory, ds.src)
copy(source, target_for(path.basename(source), ds.dest))
css = build.markdown.html_stylesheet
for i in (ds.license, ds.readme):
source = joinpath(build.course_directory, i)
(base, _) = path.splitext(path.basename(i))
pdf = target_for(base + ".pdf", ds.dest)
html = target_for(base + ".html", ds.dest)
markdown_to_html(source, html, stylesheet=css)
html_to_pdf(html, pdf)
def remove_empty_subdirectories(directory):
for dirpath, _, _ in os.walk(directory, topdown=False):
if len(os.listdir(dirpath)) == 0:
verbose("Deleting empty directory {0}".format(dirpath))
os.rmdir(dirpath)
def write_version_notebook(dir, notebook_contents, version):
nb_path = joinpath(dir, VERSION_NOTEBOOK_FILE.format(version))
ensure_parent_dir_exists(nb_path)
with codecs.open(nb_path, 'w', encoding='UTF-8') as out:
out.write(notebook_contents)
def bundle_course(build, dest_dir, profile):
from zipfile import ZipFile
# Expand any run-time variables in zipfile and dest.
vars = {PROFILE_VAR: profile or ''}
t = StringTemplate(joinpath(dest_dir, build.bundle_info.zipfile))
zip_path = t.safe_substitute(vars)
print('Writing bundle {}'.format(zip_path))
with ZipFile(zip_path, 'w') as z:
for file in build.bundle_info.files:
src = joinpath(dest_dir, file.src)
if not (path.exists(src)):
raise BuildError(
'While building bundle, cannot find "{}".'.format(src)
)
if path.isdir(src):
raise BuildError(
'Cannot make bundle: Source "{}" is a directory'.format(
src
)
)
dest = StringTemplate(file.dest).safe_substitute(vars)
z.write(src, dest)
def do_build(build, gendbc, base_dest_dir, profile=None):
if profile:
dest_dir = joinpath(base_dest_dir, profile)
else:
dest_dir = base_dest_dir
for d in (build.output_info.instructor_dir, build.output_info.student_dir):
mkdirp(joinpath(dest_dir, d))
version = build.course_info.version
fields = merge_dicts(build.variables, {
'course_name': build.course_info.name,
'version': version,
'build_timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'),
'year': build.course_info.copyright_year,
})
version_notebook = VariableSubstituter(
VERSION_NOTEBOOK_TEMPLATE
).substitute(
fields
)
labs_full_path = joinpath(dest_dir, build.output_info.student_labs_subdir)
copy_notebooks(build, labs_full_path, dest_dir, profile)
copy_instructor_notes(build, dest_dir, profile)
write_version_notebook(labs_full_path, version_notebook, version)
student_dbc = joinpath(
dest_dir, build.output_info.student_dir, build.output_info.student_dbc
)
make_dbc(gendbc=gendbc,
build=build,
labs_dir=labs_full_path,
dbc_path=student_dbc)
instructor_labs = joinpath(
dest_dir, build.output_info.instructor_labs_subdir
)
if os.path.exists(instructor_labs):
instructor_dbc = joinpath(
dest_dir, build.output_info.instructor_dir,
build.output_info.instructor_dbc
)
write_version_notebook(instructor_labs, version_notebook, version)
make_dbc(gendbc, build, instructor_labs, instructor_dbc)
copy_slides(build, dest_dir)
copy_misc_files(build, dest_dir, profile)
copy_datasets(build, dest_dir)
if build.bundle_info:
bundle_course(build, dest_dir, profile)
# Finally, remove the instructor labs folder and the student labs
# folder.
if not build.keep_lab_dirs:
rm_rf(labs_full_path)
rm_rf(instructor_labs)
def build_course(opts, build, dest_dir):
if build.course_info.deprecated:
die('{0} is deprecated and cannot be built.'.format(
build.course_info.name
))
gendbc = find_in_path('gendbc')
verbose('Publishing to "{0}"'.format(dest_dir))
if path.isdir(dest_dir):
if not opts['--overwrite']:
die(('Directory "{0}" already exists, and you did not ' +
'specify --overwrite.').format(dest_dir))
rm_rf(dest_dir)
if not build.use_profiles:
do_build(build, gendbc, dest_dir, profile=None)
else:
for profile in VALID_PROFILES:
info('')
info("Building profile {}".format(profile))
do_build(build, gendbc, dest_dir, profile)
if errors > 0:
raise BuildError("{0} error(s).".format(errors))
print("\nPublished {0}, version {1} to {2}\n".format(
build.course_info.name, build.course_info.version, dest_dir
))
def dbw(subcommand, args, capture_stdout=True, db_profile=None):
"""
Invoke "databricks workspace" with specified arguments.
:param subcommand: the "databricks workspace" subcommand
:param args: arguments, as a list
:param capture_stdout: True to capture and return standard output. False
otherwise.
:param db_profile: The --profile argument for the "databricks" command,
if any; None otherwise.
:return: A tuple of (returncode, parsed_json) on error,
or (returncode, stdout) on success. If capture_stdout is False,
then a successful result will return an empty string for stdout.
"""
dbw = find_in_path('databricks')
try:
full_args = [dbw, 'workspace', subcommand] + args
if db_profile:
full_args.append('--profile')
full_args.append(db_profile)
verbose('+ {0}'.format(' '.join(full_args)))
stdout_loc = subprocess.PIPE if capture_stdout else None
p = subprocess.Popen(full_args,
stdout=stdout_loc, stderr=subprocess.STDOUT)
if capture_stdout:
stdout, stderr = p.communicate()
stdout = stdout.decode('UTF-8')
else:
stdout = ''
p.wait()
if p.returncode is 0:
return (p.returncode, stdout)
elif stdout.startswith('Error: {'):
j = json.loads(stdout.replace('Error: {', '{'))
j['message'] = j.get('message', '').replace(r'\n', '\n')
return (p.returncode, j)
else:
return (p.returncode, {'error_code': 'UNKNOWN', 'message': stdout})
except OSError as e:
return (1, {'error_code': 'OS_ERROR', 'message': e.message})
def ensure_shard_path_exists(shard_path, db_profile):
rc, res = dbw('ls', [shard_path], db_profile=db_profile)
if rc == 0 and res.startswith(u'Usage:'):
die('(BUG) Error in "databricks" command:\n{0}'.format(res))
elif rc == 0:
# Path exists.
pass
else:
message = res.get('message', '?')
if res.get('error_code', '?') == 'RESOURCE_DOES_NOT_EXIST':
# All good
die('Shard path "{0}" does not exist.'.format(message))
else:
# Some other error
die('Unexpected error with "databricks": {0}'.format(message))
def ensure_shard_path_does_not_exist(shard_path, db_profile):
rc, res = dbw('ls', [shard_path], db_profile=db_profile)
if rc == 0 and res.startswith('Usage:'):
die('(BUG) Error in "databricks" command:\n{0}'.format(res))
elif rc == 0:
# Path exists.
die('Shard path "{0}" already exists.'.format(shard_path))
else:
message = res.get('message', '?')
if res.get('error_code', '?') == 'RESOURCE_DOES_NOT_EXIST':
# All good
pass
else:
# Some other error
die('Unexpected error with "databricks": {0}'.format(message))
def expand_shard_path(shard_path):
if shard_path.startswith('/'):
return shard_path
# Relative path. Look for DB_SHARD_HOME environment variable.
home = os.getenv(DB_SHARD_HOME_VAR)
if home is not None:
if len(home.strip()) == 0:
home = None
if home is None:
db_config = os.path.expanduser('~/.databrickscfg')
if os.path.exists(db_config):
cfg = SafeConfigParser()
cfg.read(db_config)
try:
home = cfg.get('DEFAULT', 'home')
except NoOptionError:
pass
if home is None:
die(('Shard path "{0}" is relative, but environment variable {1} ' +
'does not exist or is empty, and there is no "home" setting in ' +
'{2}.').format(shard_path, DB_SHARD_HOME_VAR, db_config))
if shard_path == '':
shard_path = home
else:
shard_path = '{0}/{1}'.format(home, shard_path)
return shard_path
def notebook_is_transferrable(nb, build):
nb_full_path = path.abspath(joinpath(build.source_base, nb.src))
if not nb.upload_download:
info('Skipping notebook "{0}": It has upload_download disabled.'.format(
nb_full_path
))
return False
return True
def get_sources_and_targets(build):
"""
Get the list of source notebooks to be uploaded/downloaded and map them
to their target names on the shard.
:param build: the build
:return: A dict of source names to partial-path target names
"""
template_data = {
TARGET_LANG: '',
NOTEBOOK_TYPE: '',
}
profile_subst_pattern = re.compile(r'^(\d*-?)(.*)$')
def map_notebook_dest(nb):
template_data2 = {}
template_data2.update(template_data)
_, ext = path.splitext(nb.src)
if ext:
ext = ext[1:] # skip leading '.'
template_data2[TARGET_EXTENSION] = ext
p = path.normpath(
leading_slashes.sub(
'', VariableSubstituter(nb.dest).safe_substitute(template_data2)
)
)
if nb.only_in_profile:
(dir, file) = (path.dirname(p), path.basename(p))
m = profile_subst_pattern.match(file)
if not m:
new_file = '{}-{}'.format(
PROFILE_ABBREVIATIONS[nb.only_in_profile], file
)
else:
new_file = '{}{}-{}'.format(
m.group(1), PROFILE_ABBREVIATIONS[nb.only_in_profile],
m.group(2)
)
p = joinpath(dir, new_file)
return p
res = {}
notebooks = [nb for nb in build.notebooks
if notebook_is_transferrable(nb, build)]
leading_slashes = re.compile(r'^/+')
target_dirs = {}
for nb in notebooks:
dest = map_notebook_dest(nb)
if nb.master.enabled:
# The destination might be a directory. Count how many notebooks
# end up in each directory.
target_dirs[dest] = target_dirs.get(dest, 0) + 1
for nb in notebooks:
nb_full_path = path.abspath(joinpath(build.source_base, nb.src))
# Construct partial path from target path.
base_with_ext = path.basename(nb_full_path)
(base_no_ext, ext) = path.splitext(base_with_ext)
if len(ext) > 0:
ext = ext[1:] # remove the leading "."
dest = map_notebook_dest(nb)
res[nb_full_path] = dest
return res
def upload_notebooks(build, shard_path, db_profile):
shard_path = expand_shard_path(shard_path)
ensure_shard_path_does_not_exist(shard_path, db_profile)
notebooks = get_sources_and_targets(build)
try:
with TemporaryDirectory() as tempdir:
info("Copying notebooks to temporary directory.")
for nb_full_path, partial_path in notebooks.items():
if not path.exists(nb_full_path):
warning('Notebook "{}" does not exist. Skipping it.'.format(
nb_full_path
))
continue
temp_path = joinpath(tempdir, partial_path)
dir = path.dirname(temp_path)
mkdirp(dir)
verbose('Copying "{0}" to "{1}"'.format(nb_full_path, temp_path))
copy(nb_full_path, temp_path)
with working_directory(tempdir):
info("Uploading notebooks to {0}".format(shard_path))
rc, res = dbw('import_dir', ['.', shard_path],
capture_stdout=False, db_profile=db_profile)
if rc != 0:
raise UploadDownloadError(
"Upload failed: {0}".format(res.get('message', '?'))
)
else:
info("Uploaded {0} notebooks to {1}.".format(
len(notebooks), shard_path
))
except UploadDownloadError as e:
dbw('rm', [shard_path], capture_stdout=False, db_profile=db_profile)
die(e.message)
def download_notebooks(build, shard_path, db_profile):
shard_path = expand_shard_path(shard_path)
ensure_shard_path_exists(shard_path, db_profile)
# get_sources_and_targets() returns a dict of
# local-path -> remote-partial-path. Reverse it. Bail if there are any
# duplicate keys, because it's supposed to be 1:1.
remote_to_local = {}
for local, remote in get_sources_and_targets(build).items():
if remote in remote_to_local:
die('(BUG): Found multiple instances of remote path "{0}"'.format(
remote
))
remote_to_local[remote] = local
with TemporaryDirectory() as tempdir:
info("Downloading notebooks to temporary directory")
with working_directory(tempdir):
rc, res = dbw('export_dir', [shard_path, '.'], db_profile=db_profile)
if rc != 0:
die("Download failed: {0}".format(res.get('message', '?')))
for remote, local in remote_to_local.items():
if not path.exists(remote):
warning(('Cannot find downloaded version of course ' +
'notebook "{0}".').format(local))
print('"{0}" -> {1}'.format(remote, local))
# Make sure there's a newline at the end of each file.
move(remote, local, ensure_final_newline=True)
# Are there any leftovers?
leftover_files = []
for root, dirs, files in os.walk('.'):
for f in files:
leftover_files.append(path.relpath(joinpath(root, f)))
if len(leftover_files) > 0:
warning(("These files from {0} aren't in the build file and" +
" were not copied").format(shard_path))
for f in leftover_files:
print(" {0}".format(f))
def list_notebooks(build):
for notebook in build.notebooks:
src_path = joinpath(build.source_base, notebook.src)
print(src_path)
def print_info(build, shell):
if shell:
print('COURSE_NAME="{}"; COURSE_VERSION="{}"'.format(
build.name, build.course_info.version
))
else:
print("Course name: {}".format(build.name))
print("Course version: {}".format(build.course_info.version))
# ---------------------------------------------------------------------------
# Main program
# ---------------------------------------------------------------------------
def main():
opts = parse_args()
if opts['--verbose']:
set_verbosity(True, verbose_prefix='bdc: ')
course_config = opts['BUILD_YAML'] or DEFAULT_BUILD_FILE
if not os.path.exists(course_config):
die('{} does not exist.'.format(course_config))
try:
build = load_build_yaml(course_config)
dest_dir = (
opts['--dest'] or
joinpath(os.getenv("HOME"), "tmp", "curriculum", build.course_id)
)
if opts['--list-notebooks']:
list_notebooks(build)
elif opts['--info']:
print_info(build, opts['--shell'])
elif opts['--upload']:
upload_notebooks(build, opts['SHARD_PATH'], opts['--dprofile'])
elif opts['--download']:
download_notebooks(build, opts['SHARD_PATH'], opts['--dprofile'])
else:
build_course(opts, build, dest_dir)
except ConfigError as e:
die('Error in "{0}": {1}'.format(course_config, e.message))
except BuildError as e:
die(e.message)
if __name__ == '__main__':
main()
| [
"bmc@clapper.org"
] | bmc@clapper.org |
8137af58f166a6d22239abbc40893487c9bdbcd3 | b5926d11c69478742f9524d8a2533dcc5c88d57e | /图片处理/照片自动剪裁/get_img_from_baike.py | dad29b8596db6272df3c94a7e7c86b17b498555e | [
"MIT"
] | permissive | simoooncao/CourseDesign0000 | af0526f7b1c561d377f9164ae050acd2deb5168c | 3d510bb9787a9cd690e4e77b361cdafdd6ddaa66 | refs/heads/main | 2023-01-20T03:21:45.617368 | 2020-12-02T13:47:07 | 2020-12-02T13:47:07 | 317,871,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | import requests
from bs4 import BeautifulSoup
def get_img_from_baike(star):
headers = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
response = requests.get('https://baike.baidu.com/search/word' , params = {'word' : star} , headers = headers)
with open('test.html' , 'w' , encoding = 'utf-8') as file:
file.write(response.text)
soap = BeautifulSoup(response.text , 'lxml')
soap1 = soap.select('.layout style')
if len(soap1) != 0:
soap = soap1[0].string
img_url = soap.split('background-image')[1].split('\'')[1]
else:
soap1 = soap.select('.summary-pic a img')
if len(soap1) != 0:
img_url = soap1[0].attrs['src']
else:
img_url = ''
return img_url
def download(file_name):
error = open('error.txt' , 'a')
with open(file_name + '.txt' , 'r') as file:
while True:
line = file.readline()
if not line:
break
line = line.replace('\n' , '')
if line == '':
continue
print(line)
url = get_img_from_baike(line)
if url == '':
print('获取url失败!失败条目已记录。')
error.write('%s\n' % line)
continue
print('获取url成功!')
print(url)
r = requests.get(url)
with open('./img_raw/' + line + '.jpg' , 'wb') as f:
f.write(r.content)
print('下载成功\n')
error.close()
download('actor')
download('singer') | [
"noreply@github.com"
] | noreply@github.com |
0007be916107d3d43c886de09370af6c919edac6 | 37e84c832b4016b7e5b4fa6f99da00977cf010f7 | /datayes/stocks_lifecycle.py | 9c4d260ad28c77496db2287ec4da531e2d7c0c95 | [
"MIT"
] | permissive | CoderDream/python-best-practice | 24e4a09e778cc6d0ce846edf1e7b4fb66c536b0e | 40e6b5315daefb37c59daa1a1990ac1ae10f8cca | refs/heads/master | 2020-08-14T13:57:16.309356 | 2019-10-28T09:20:03 | 2019-10-28T09:20:03 | 215,180,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from pandas import DataFrame
from dataapiclient import Client
import json
client = Client()
client.init('cae5c4acc4ad4ccb93a8aaac4b8adb04363feaa9852c34d14ddd2248613b09b3')
url='/api/equity/getEqu.json?field=ticker,secShortName,listDate,delistDate&listStatusCD=L,S,DE,UN&secID=&ticker=&equTypeCD=A'
code, result = client.getData(url)
j = json.loads(result.decode())
d = DataFrame(j['data'])
d = d.set_index('ticker')
d = d[['secShortName','listDate','delistDate']]
d.to_csv('data/ticker_and _day_of_(de)list_date.csv') | [
"coderdream@gmail.com"
] | coderdream@gmail.com |
90472ae1500003128c099c82b18c65cd294fb594 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_67/run_cfg.py | b7a42574ec89cfa3b75f992ff17c74f8999faf28 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1297.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1298.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1299.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_13.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_130.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
c47949e80dc18da7c3ae83d53109b924756a3750 | cc31fef8092e4ed996a8f54e199558be15fd4d0e | /adzerk_decision_sdk/models/user.py | 7edda0d54021679bb351d6aaf83243be7629f391 | [] | no_license | Sandy4321/adzerk-decision-sdk-python | 1519683aeef1357b72386d62f66e89c4246ae6b6 | d92d86d26710ada4cdc9a0e5a142f8907bbb80a6 | refs/heads/master | 2023-03-12T14:24:13.198840 | 2021-01-22T20:57:03 | 2021-01-22T20:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | # coding: utf-8
"""
Adzerk Decision API
Adzerk Decision API # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from adzerk_decision_sdk.configuration import Configuration
class User(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str'
}
attribute_map = {
'key': 'key'
}
def __init__(self, key=None, local_vars_configuration=None): # noqa: E501
"""User - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self.discriminator = None
if key is not None:
self.key = key
@property
def key(self):
"""Gets the key of this User. # noqa: E501
The UserKey used for UserDB Targeting # noqa: E501
:return: The key of this User. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this User.
The UserKey used for UserDB Targeting # noqa: E501
:param key: The key of this User. # noqa: E501
:type: str
"""
self._key = key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, User):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, User):
return True
return self.to_dict() != other.to_dict()
| [
"CrshOverride@gmail.com"
] | CrshOverride@gmail.com |
b3712377d76d579b8ddb217ced898b5037e49fdf | 931110410da99d69ee262a8ad6cf964a9e9e6a92 | /codes/city_cells.py | 42e343f18850b155cf3658914cad2c1882c8d029 | [] | no_license | azspark/DRIVER_TRAJECTORY_ANALYSIS | d20c188a82f6f8c68db64f5db3309d320123129b | ab3f63896e7a1de71e8d92ef9a0ddfc0df59da7a | refs/heads/master | 2022-08-28T17:57:17.330223 | 2020-05-31T17:39:04 | 2020-05-31T17:39:04 | 263,628,824 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from .cell import Cell
from .utils import Engrider
import numpy as np
import matplotlib.pyplot as plt
class CityCells:
"""Index, control and update the cell information"""
def __init__(self, trajectories, geo_range=None, div_number=None):
self.update_info(trajectories)
def _init(self):
pass
def update_info(self, trajectories):
for traj in trajectories:
pass
def vis_city_info(self):
pass | [
"zz1254657569@gmail.com"
] | zz1254657569@gmail.com |
d82811fa7169c6fd818b1953a4e1c0085072e58f | 51d504622c8bde5096d954bf9b38789d48ba4ff7 | /Python/flask_mysql/db_connection/first_flask/friend.py | 8002c4e32a64bc2978ac466a35b0e0c2d1c09ecc | [] | no_license | BLee1126/Dojo-Assignments | 36c8fb2294c5cd6a04c065415aae12225c0eb483 | d40d8f6569b1f504d1785d8f34d27c58eab406c8 | refs/heads/main | 2023-07-10T16:00:30.863709 | 2021-08-20T16:42:10 | 2021-08-20T16:42:10 | 368,209,920 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | # import the function that will return an instance of a connection
from mysqlconnection import connectToMySQL
# model the class after the friend table from our database
class Friend:
def __init__( self , data ):
self.id = data['id']
self.first_name = data['first_name']
self.last_name = data['last_name']
self.occupation = data['occupation']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
# Now we use class methods to query our database
@classmethod
def get_all(cls):
query = "SELECT * FROM friends;"
# make sure to call the connectToMySQL function with the schema you are targeting.
results = connectToMySQL('first_flask').query_db(query)
# Create an empty list to append our instances of friends
friends = []
# Iterate over the db results and create instances of friends with cls.
for friend in results:
friends.append( cls(friend) )
return friends
@classmethod
def save(cls, data ):
query = "INSERT INTO friends ( first_name , last_name , occupation , created_at, updated_at ) VALUES ( %(fname)s , %(lname)s , %(occ)s , NOW() , NOW() );"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('first_flask').query_db( query, data )
| [
"blee1126@gmail.com"
] | blee1126@gmail.com |
8293596737e9fa47b9ba92989e1082e5de3679f7 | 096d6ae171eb86fb864a667bfe11af5fcec0ad9e | /happy/cli.py | f2e7dd2b34006e096ddaa435c4ebce0303f82f4a | [
"MIT"
] | permissive | msabramo/happy | 3a3196898aad95624836499b6d2e584c45fefe7d | 7b582b039f78b6536c0f275a1412c3e570eec800 | refs/heads/master | 2023-06-09T13:58:44.315107 | 2015-02-16T03:17:49 | 2015-02-16T03:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | """
The command-line interface for happy!
"""
import json
import os
import subprocess
import sys
import click
import happy
def _infer_tarball_url():
"""Returns the tarball URL inferred from an app.json, if present."""
try:
with click.open_file('app.json', 'r') as f:
contents = f.read()
app_json = json.loads(contents)
except IOError:
return None
return app_json.get('repository') + '/tarball/master/'
def _write_app_name(app_name):
"""Writes the app name to the .happy file."""
with click.open_file('.happy', 'w') as f:
f.write(str(app_name))
def _read_app_name():
"""Reads the app name from the .happy file."""
try:
with click.open_file('.happy', 'r') as f:
return f.read().strip()
except IOError:
return None
def _delete_app_name_file():
"""Deletes the .happy file. :("""
os.remove('.happy')
@click.group(name='happy')
def cli():
"""Quickly set up and tear down Heroku apps!"""
@cli.command(name='up')
@click.option('--tarball-url', help='URL of the tarball containing app.json.')
def up(tarball_url):
"""Brings up a Heroku app."""
tarball_url = tarball_url or _infer_tarball_url()
if not tarball_url:
click.echo('No tarball URL found.')
sys.exit(1)
click.echo('Creating app... ', nl=False)
build_id, app_name = happy.create(tarball_url=tarball_url)
click.echo(app_name)
click.echo('Building... ', nl=False)
happy.wait(build_id)
_write_app_name(app_name)
click.echo('done')
click.echo("It's up! :) https://%s.herokuapp.com" % app_name)
@cli.command(name='down')
def down():
"""Brings down a Heroku app."""
app_name = _read_app_name()
if not app_name:
click.echo('No app is running.')
sys.exit(1)
click.echo('Destroying app %s... ' % app_name, nl=False)
happy.delete(app_name=app_name)
_delete_app_name_file()
click.echo('done')
click.echo("It's down. :(")
| [
"joe@joefriedl.net"
] | joe@joefriedl.net |
9e72a2f8ab1adc2e387356b9dca2d04e223e20f5 | be0232a12312400d92f33c7eb2723b425be1c00b | /polestar/apps/ship/admin.py | 4d7a6a8c87476e7ffb5de28d67831215fb2bbda7 | [] | no_license | eddumpy/PS | e53add572d2403e289f60a1a7d34730c63279ca0 | b9595872f76e73c13b603245f0226f378d05de62 | refs/heads/master | 2020-05-15T18:05:51.166882 | 2019-04-27T15:28:23 | 2019-04-27T15:28:23 | 182,417,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Ship
admin.register(Ship) | [
"eddumpleton@gmail.com"
] | eddumpleton@gmail.com |
8be394afa029f9465ec9e58d8ef925a43b7461c5 | 6204b534860e35bed2f52923538b7e50f6fc4a20 | /tests/context.py | 3a17d2ee1d10159e00ed94d4b9c5c7af16d285f7 | [
"MIT"
] | permissive | brian-mclaverty/plepy | 7d73b4257c9365ab039be82338293fbc84e4e0ef | 4c39d8723633045737ef2a5f793dbfb7403ab857 | refs/heads/master | 2022-12-15T13:56:15.919883 | 2020-09-03T21:01:06 | 2020-09-03T21:01:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..')))
import plepy | [
"monshapiro@gmail.com"
] | monshapiro@gmail.com |
30c452b548f2ff28109da3212b07745400c34990 | 8f8d1b973bf396501442c89151558994088e5681 | /recognition/views.py | 5a9b41bca78b5f52a95fae4414ee09e21b0c495e | [] | no_license | minwoo9629/capstone_design | 0968f9fe1fb23791004ba109f0cc3ae76ad7c0a2 | 01109d454c7bc283151929d7b678e9198d8c2358 | refs/heads/master | 2022-12-09T08:37:43.481372 | 2020-11-25T05:22:22 | 2020-11-25T05:22:22 | 245,607,259 | 1 | 1 | null | 2022-12-06T20:22:16 | 2020-03-07T09:53:26 | JavaScript | UTF-8 | Python | false | false | 1,976 | py | from django.shortcuts import render
from .recognition import detect_face, test_detect
from django.contrib.auth.models import User
from student.models import Student
from lecture.models import Lecture
from attendance.models import Attendance, facial_attendance
from datetime import datetime
from time import strftime, time, localtime
from django.core.exceptions import ObjectDoesNotExist
import json
# Create your views here.
"""
detect_face(udata, lec1, ['09:00']) 리스트는 json 키값 임의로 배정해서 넣음
테스트를 위해 만든 함수
실제로 작동되는 함수는 recogniiton.py의 detect_face
"""
def recognition(request):
lec1 = '1'
student_data = Student.objects.all()
udata = {}
for data in student_data:
udata[data.username] = str(data.photo)
numbering = test_detect(udata, lec1, ['11:00'])
print(numbering)
now = datetime.now()
ymd = strftime('%Y-%m-%d',localtime(time()))
for number in range(len(numbering)):
#print(list(udata.keys())[number])
try:
user = User.objects.get(username=list(udata.keys())[number])
attend = facial_attendance.objects.filter(time=ymd).filter(lecture_id=lec1).get(username=user)
attend_result = json.loads(attend.result)
if numbering[number] > 100:
d = json.loads('{"' + '11:00' + '" : "ATTEND"}')
else:
d = json.loads('{"' + '11:00' + '" : "ABSENT"}')
attend_result.update(d)
result = json.dumps(attend_result)
attend.result = result
attend.save()
#print("1234")
except ObjectDoesNotExist:
user = User.objects.get(username=list(udata.keys())[number])
lec = Lecture.objects.get(id=lec1)
if numbering[number] > 100:
new_post = facial_attendance.objects.create(username=user, lecture=lec, result='{"' + '11:00' + '" : "ATTEND"}' )
else:
new_post = facial_attendance.objects.create(username=user, lecture=lec, result='{"' + '11:00' + '" : "ABSENT"}' )
return render(request, "check.html", {'udata':udata.values})
| [
"smw123123@naver.com"
] | smw123123@naver.com |
2b267595d14127e4b06d71f257952e8fbf9f49f2 | b1bd0990ced7a25bf016123a3063f1ba2a3d3ed9 | /util/perf.py | b88cbd3d7223b88d6268a9cfc9ad3b7e47ded370 | [
"MIT"
] | permissive | sbirch/webtalk | f0ee38a81062ee0f798cccdb3bfe870e517f9409 | 72ee81ff50bc9fe586f4007cc7213313694cf962 | refs/heads/master | 2020-05-03T12:44:56.021937 | 2013-12-30T19:17:44 | 2013-12-30T19:17:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | import time
timers = []
def tick(label=None):
timers.insert(0, (label, time.time()))
def tock():
label, st = timers.pop(0)
elapsed = (time.time() - st)
if elapsed < 1:
elapsed = '%.1fms' % (elapsed*1000.0)
else:
elapsed = '%.2fs' % elapsed
if label != None:
print '%s took %s' % (label, elapsed)
else:
print '%s' % elapsed | [
"andrewkov@Andrews-MacBook-Air.local"
] | andrewkov@Andrews-MacBook-Air.local |
c40c6e259e4a638842d4f24d9fb439d03fa4f511 | 6551182069743e16c45b651696f71d8be26fc68a | /main/migrations/0010_stocks_price.py | 4dea1f420477f03725ef214d0bb4b757521dda6d | [] | no_license | srx64/ms105_shp_exchange | cef7c986eb078f64ff4f13dd206cb811040757e2 | 8218ee7309e8513cdb041efe7f6afaae70e09695 | refs/heads/master | 2023-07-04T23:24:16.976337 | 2021-07-13T19:47:57 | 2021-07-13T19:47:57 | 381,255,257 | 86 | 5 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Generated by Django 3.1.7 on 2021-05-06 23:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20210424_0954'),
]
operations = [
migrations.AddField(
model_name='stocks',
name='price',
field=models.FloatField(default=0),
),
]
| [
"dasha.mishkina117@gmail.com"
] | dasha.mishkina117@gmail.com |
1a84d01eee6ca1ccaf632175d9d562efed8e789b | 10b7d40e5c6e35e0755e2431d83ace861ede295e | /pocket_change/rest/__init__.py | 7b0ccd6cf21e47601e762648923be675f2e227f2 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | samdfonseca/pocket_change | ddeef5bfe2dc94dca99701affd0911f098a32182 | cb8a92603e1bee0eb096806f36223998cd22d5e1 | refs/heads/master | 2021-01-16T18:50:31.695270 | 2015-06-19T04:16:01 | 2015-06-19T04:16:01 | 32,945,228 | 0 | 0 | null | 2015-03-26T18:36:30 | 2015-03-26T18:36:30 | null | UTF-8 | Python | false | false | 63 | py | from flask import Blueprint
api = Blueprint('rest', __name__) | [
"silas.ray@gmail.com"
] | silas.ray@gmail.com |
b056870a8e5ec22e1ecf472c5da304f774d3f3a8 | 6abfac6f7eaf411bea02af907efa63375142ee29 | /build/industrial_core/industrial_robot_simulator/catkin_generated/pkg.develspace.context.pc.py | ebfed93b24688382e39e775a9e55f060904cf22d | [] | no_license | PorkPy/catkin_ws | 4e20b0baf12386f2b26f8a24e15faed35f799c00 | 31c24379ee9f5121c06d9cf163b8e83235955a01 | refs/heads/master | 2020-04-22T21:18:58.715283 | 2019-02-14T10:25:17 | 2019-02-14T10:25:17 | 170,670,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "industrial_robot_simulator"
PROJECT_SPACE_DIR = "/home/ur5/catkin_ws2/devel"
PROJECT_VERSION = "0.5.1"
| [
"dommckean@gmail.com"
] | dommckean@gmail.com |
d0548a4d828307ef22b8dfcec338d3734dfdbd6a | b7b6777bcd86311ebe36187e4bdc206b2f0a4fed | /Capstone/dashboard/views.py | af798f99e48eb8e752cb9681529e22432b06bc62 | [] | no_license | zhenyul1/Capstonedashboard | c980584a5e9a37c8bb2061eefc09f7b8106991aa | 8dce277150ccf95dde47794e7b04e4299c24113e | refs/heads/master | 2020-03-16T04:11:34.768478 | 2018-05-09T16:43:55 | 2018-05-09T16:43:55 | 132,505,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,203 | py | from django.http import JsonResponse
from django.shortcuts import render
from datetime import datetime, timedelta
import requests
import ast
from collections import defaultdict, namedtuple, OrderedDict
def home(request):
context = {}
# get machine list and parse the input to dics
response = requests.get("http://18.188.124.37/getMachines/")
str = response.content.decode('utf-8')
newStr = str.replace("}", "}!").replace("u'", "'").replace("L,", ",").replace("L}", "}")[:-1]
machineArray = newStr.split("!")
machineStruct = namedtuple('machineStruct', 'last_service id name type_id desc')
machineObjects = []
for machine in machineArray:
machineDic = ast.literal_eval(machine)
tmp = machineStruct(last_service=machineDic['last_service'], id=int(machineDic['id']), name=machineDic['name'],
type_id=int(machineDic['type_id']), desc=machineDic['desc'])
machineObjects.append(tmp)
context['machines'] = machineObjects
return render(request, 'table.html', context)
def charts(request, machine_id):
if request.method == 'GET':
context = {}
context['machine_id'] = machine_id
# we don't use the time for presente
endTime = datetime.now() - timedelta(days=0)
startTime = endTime - timedelta(hours=3)
sentEndTime = endTime.strftime("%m-%d-%Y %H:%M:%S").replace(" ", "%20")
sentStartTime = startTime.strftime("%m-%d-%Y %H:%M:%S").replace(" ", "%20")
# fix the time due to limit data
context['startTime'] = datetime.strptime("05-06-2018 00:00:01", "%m-%d-%Y %H:%M:%S") # startTime
context['endTime'] = datetime.strptime("05-06-2018 23:59:59", "%m-%d-%Y %H:%M:%S")
context['startTimeStamp'] = "05-06-2018 00:00:01" # sentStartTime
context['endTimeStamp'] = "05-06-2018 23:59:59"
return render(request, 'dashboard.html', context)
# deal with the datetime picker time range
if request.method == 'POST':
context = {}
context['machine_id'] = machine_id
startTime = request.POST.get('startTime')
endTime = request.POST.get('endTime')
# handle wrong input graceful
if endTime == None:
endTime = datetime.now()
if startTime == None or endTime < startTime:
startTime = datetime.now() - timedelta(days=7)
endTime = datetime.now()
sentEndTime = datetime.strftime(datetime.strptime(endTime, "%Y-%m-%d %H:%M"), "%m-%d-%Y %H:%M:%S").replace(" ",
"%20")
sentStartTime = datetime.strftime(datetime.strptime(startTime, "%Y-%m-%d %H:%M"), "%m-%d-%Y %H:%M:%S").replace(
" ", "%20")
context['startTime'] = startTime
context['endTime'] = endTime
context['startTimeStamp'] = sentStartTime
context['endTimeStamp'] = sentEndTime
return render(request, 'dashboard.html', context)
# respond json file
def get_data(request, strstartTime, strendTime, machineId):
context = {}
sentStartTime = strstartTime.replace(" ", "%20")
sentEndTime = strendTime.replace(" ", "%20")
url = "http://18.188.124.37/getInfoTime/?machineid=" + machineId + "&start=" + sentStartTime + "&end=" + sentEndTime
print(url)
response = requests.get(url)
str = response.content.decode('utf-8')
newStr = str.replace("u'", "'").replace("Decimal(", "").replace(")", "")
machineDic = ast.literal_eval(newStr)
# gauge charts
context['staticsN'] = [['Label', 'Value'], ['Minimum', float(machineDic['noise_attr']['min'])],
['Maximum', float(machineDic['noise_attr']['max'])],
['Average', float(machineDic['noise_attr']['avg'])]]
context['staticsT'] = [['Label', 'Value'], ['Minimum', float(machineDic['temp_attr']['min'])],
['Maximum', float(machineDic['temp_attr']['max'])],
['Average', float(machineDic['temp_attr']['avg'])]]
# what's the configuration ???
context['dangerN'] = float(machineDic['Ideal_noise']) * 1.15
context['dangerT'] = float(machineDic['Ideal_temp']) * 1.15
context['warningN'] = float(machineDic['Ideal_noise']) * 0.85
context['warningT'] = float(machineDic['Ideal_temp']) * 0.85
warningT = context['warningT']
warningN = context['warningN']
dangerT = context['dangerT']
dangerN = context['dangerN']
# time series
context['timeT'] = [[e1, float(e2), dangerT, warningT] for e1, e2 in
zip(machineDic['temp_data']['Date'], machineDic['temp_data']['Temp'])]
context['timeN'] = [[e1, float(e2), dangerN, warningN] for e1, e2 in
zip(machineDic['noise_data']['Date'], machineDic['noise_data']['Noise'])]
# comparison
# x.split(" ")[0] for date
# here we use the miniute as unit, cut the last three chars
dictAvgT = defaultdict(list)
for key, value in zip(list(map(lambda x: x[:-3], machineDic['temp_data']['Date'])),
machineDic['temp_data']['Temp']):
dictAvgT[key].append(float(value))
dictDisT = dictAvgT.copy()
for key in dictAvgT.keys():
dictAvgT[key] = sum(dictAvgT[key]) / len(dictAvgT[key])
dictAvgN = defaultdict(list)
# x.split(" ")[0] for date
# here we use the miniute as unit , cut the last three chars
for key, value in zip(list(map(lambda x: x[:-3], machineDic['noise_data']['Date'])),
machineDic['noise_data']['Noise']):
dictAvgN[key].append(float(value))
dictDisN = dictAvgN.copy()
for key in dictAvgN.keys():
dictAvgN[key] = sum(dictAvgN[key]) / len(dictAvgN[key])
dictAvgT = OrderedDict(sorted(dictAvgT.items()))
dictAvgN = OrderedDict(sorted(dictAvgN.items()))
context['comparison'] = [[k1, dictAvgT[k1], dictAvgN[k2]] for k1, k2 in zip(dictAvgT.keys(), dictAvgN.keys())]
context['comparison'].insert(0, ['Dates', 'Avg temperature', 'Avg vibration'])
# stacked column
# normal, danger, warning
for key in dictDisT.keys():
dictDisT[key] = [count_range_in_list(dictDisT[key], -100, warningT),
count_range_in_list(dictDisT[key], warningT, dangerT),
count_range_in_list(dictDisT[key], dangerT, 1000)]
for key in dictDisN.keys():
dictDisN[key] = [count_range_in_list(dictDisN[key], -100, warningN),
count_range_in_list(dictDisN[key], warningN, dangerN),
count_range_in_list(dictDisN[key], dangerN, 1000)]
dictDisT = OrderedDict(sorted(dictDisT.items()))
dictDisN = OrderedDict(sorted(dictDisN.items()))
context['stackedColT'] = [[k1, dictDisT[k1][1], dictDisT[k1][0] + dictDisT[k1][2], ''] for k1 in dictDisT.keys()]
context['stackedColN'] = [[k1, dictDisN[k1][1], dictDisN[k1][0] + dictDisN[k1][2], ''] for k1 in dictDisN.keys()]
return JsonResponse(context)
def count_range_in_list(li, min, max):
ctr = 0
for x in li:
if min < x <= max:
ctr += 1
return ctr
| [
"zhenyul1@andrew.cmu.edu"
] | zhenyul1@andrew.cmu.edu |
60970ab65f2384908efc1c74b7fa6fdefbaadf46 | b6a48f9a6158bcb7e6fc75e5eacaef19250fc4c5 | /cosmos/ingestion/ingest/process/detection/src/torch_model/model/utils/config_manager.py | c5af72c9c0d77749c41e4e4151ac91a4091dc749 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | UW-COSMOS/Cosmos | dcde3be6534e411a20fcf1ff36e422fc8af2ac8a | 5ed4a4c149e03773690668437d2f93aa532453c6 | refs/heads/master | 2023-09-01T18:03:20.525760 | 2023-08-31T13:56:21 | 2023-08-31T13:56:21 | 159,849,583 | 39 | 14 | null | 2023-09-13T14:39:45 | 2018-11-30T16:24:59 | Python | UTF-8 | Python | false | false | 1,242 | py | import yaml
class Struct:
def __init__(self, **entries):
for key, value in entries.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
class ConfigManager:
"""
Basic config singleton for easily accessing config parameters
"""
class __Singleton:
def __init__(self, fp):
"""
Initialize a singleton config object
:param fp:
"""
with open(fp) as fh:
config = yaml.load(fh, yaml.Loader)
for key, value in config.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
def merge(self, data):
for key in data.keys():
self__dict__[key] = data[key]
instance = None
def __init__(self, fp=None):
if (ConfigManager.instance is None) and (fp is not None):
ConfigManager.instance = ConfigManager.__Singleton(fp)
def __getattr__(self, item):
return getattr(ConfigManager.instance, item)
def __setattr__(self, key, value):
setattr(ConfigManager.instance, key, value)
| [
"ankur.goswami12@gmail.com"
] | ankur.goswami12@gmail.com |
6ad5bd78dfb8022244345b015e8ec44e4942cb9f | 0953f9307b506255e3b310193d8e33706974e73d | /helloworld.py | 6e3f8f51ceab2ea43232064d2e18dc5a95202a89 | [] | no_license | ginda/appengine | be80cf8febcbcf8ebc933171ddfa5e662c4187df | 8d7c9ddb79dc86faa24ea334cc8461b7b6687c95 | refs/heads/master | 2021-01-10T19:04:41.343256 | 2011-09-11T15:25:26 | 2011-09-11T15:25:26 | 419,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | import myClass
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
#class is now external
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
x = myClass.myClass()
self.response.out.write("i got as param -mode-:"+self.request.get("mode")+"<br>\n")
self.response.out.write("<img src=\"/images/logor.png\"><br>")
if (self.request.get("mode") == "hello"):
self.response.out.write("hallo a<br>")
else:
#self.response.out.write("vd_: "+str( x.grabmyurl("http://www.vondir.de") )+" seconds to load file<br>\n")
#self.response.out.write("fb_: "+str( x.grabmyurl("http://www.farmbeds.com") )+" seconds to load file<br>\n")
#self.response.out.write("fb2: "+str( x.grabmyurl("http://www.farmbeds.com/test.wsgi") )+" seconds to load file<br>\n")
#self.response.out.write("fbxpy: "+str( x.grabmyurl("http://www.farmbeds.com/xml.wsgi") )+" seconds to load file<br>\n")
#self.response.out.write("fbxph: "+str( x.grabmyurl("http://www.farmbeds.com/test.php") )+" seconds to load file<br>\n")
#self.response.out.write("vdpl: "+str( x.grabmyurl("http://www.vondir.de/flash/flash_gallery/help.pl?catid=31") )+" seconds to load file<br>\n")
#self.response.out.write("gaei: "+str( x.grabmyurl("http://avengo2.appspot.com/images/logor.png") )+" seconds to load file<br>\n")
self.response.out.write("gaea:"+str( x.grabmyurl("http://avengo2.appspot.com/?mode=hello") )+" seconds to load file<br>\n")
self.response.out.write("gaea: o:"+str(x.dbreadmyurl("http://avengo2.appspot.com/?mode=hello"))+" - n:"+str( x.grabmyurl("http://avengo2.appspot.com/?mode=hello") )+" seconds to load file<br>\n")
application = webapp.WSGIApplication(
[('/', MainPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| [
"gh@gh-Latitude-E6410"
] | gh@gh-Latitude-E6410 |
681a5ed2c794eb4e09f40f31941d9a53eb066735 | 8e38235e128a78e0126fb4c492d4d33a94a35257 | /main.py | 5362268192559938c000099d64885db04d8909dd | [] | no_license | dioNode/siliconAirApp | add7a7ee88812eb85e4d335827dedf45075ad747 | eee3a7bb86969672f64d6865e9310e57a3f0c260 | refs/heads/master | 2020-03-28T16:46:04.367037 | 2018-09-29T06:52:15 | 2018-09-29T06:52:15 | 148,726,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | from PriorityANN import PriorityANN
def main():
priorityANN = PriorityANN()
while True:
command = input("What would you like to do?\n [C = Build CSV from current flights to train] \n [E = Evict passengers from flight] \n")
if command.upper() == "C":
print("Generating CSV")
generateCSVModel(priorityANN)
print("CSV generated and stored in passengerDetails.csv")
elif command.upper() == "E":
showFlightEvictors(priorityANN)
else:
print("Sorry that is not an option")
def generateCSVModel(priorityANN):
priorityANN.setFlight("SQ890")
priorityANN.writeCSV("passengerDetails.csv")
def showFlightEvictors(priorityANN):
flightNo = input("Which flight would you like to select? \n The available flights are [SQ890, SQ494]\n").upper()
while flightNo not in ["SQ890", "SQ494"]:
print("Sorry, flight "+ flightNo + " does not exist.")
flightNo = input("Which flight would you like to select? \n The available flights are [SQ890]\n")
priorityANN.evaluate(flightNo)
if __name__ == "__main__":
main() | [
"dion.lao@uqconnect.edu.au"
] | dion.lao@uqconnect.edu.au |
97c3730c522f14d3e70b194878b0d860135c6b52 | def06466dadf32385b083615e46a07188ef841c2 | /web_app/primes/primes/wsgi.py | 4839f01dfbebfa726790474ac354f5d2b5730dc8 | [] | no_license | ChillarAnand/just-queue-it | ead51fa0fa14bca6276c452b32a8d4e382e37f95 | c58a214507b429d8854a1049e4b5ed6377435a82 | refs/heads/master | 2020-05-23T14:05:38.511931 | 2015-02-19T21:42:34 | 2015-02-19T21:42:34 | 31,038,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | """
WSGI config for primes project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "primes.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"anand21nanda@gmail.com"
] | anand21nanda@gmail.com |
82562838b8c6c45b78ce1a65f030dff267405be8 | 28a5cbfff030bf21d82dc28177d94b76d2801714 | /ex04_hangman/exo4.py | b579d781cc4cda750f9062587bd1d74b2620fce6 | [] | no_license | dayana-18/hi-python | ba544bb8c71bc098b6887ec703a57c20cd00fd39 | 3673578c8b7e917d38240148cba275497c359274 | refs/heads/main | 2023-08-03T16:43:09.113955 | 2021-09-23T21:56:51 | 2021-09-23T21:56:51 | 409,200,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | import random
#choisit un mot au hasard du fichier words.py
words = open('words.py').read().strip().split('", "')
word = random.choice(words)
print(word)
#l'utilisateur a 12 chances pour trouver le mot
for i in range(0, 12):
user_choice = input('letter :')
#parcours du mot
for letter in word :
if user_choice == letter :
print(user_choice) #montre la ou les positions de la lettre
if user_choice != letter : #si la lettre n'est pas dans le mot
print("_") #les lettres manquantes
print("the letter is not here")
if user_choice == word : #s'il devine le mot
print(word)
print("you won")
| [
"dayrz18@gmail.com"
] | dayrz18@gmail.com |
6abe2aa5eb03d998411e585d09fff339547d1c02 | a3b281e38dedeffb306614fd16699f1bb12a4ed3 | /extplugins/weaponlimiterbf3/weapondef.py | ede1c7005569fa8faee7b55e656944e97d9444f8 | [] | no_license | ozon/b3-plugin-weaponlimiterbf3 | c17820a629ae6d252753fe7c38b825e09fa0b26e | d913d001cb34adf24cbea6c546528b13d36b76e2 | refs/heads/master | 2020-05-17T00:57:17.636611 | 2014-02-14T00:47:28 | 2014-02-14T00:47:28 | 2,986,901 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 23,689 | py | WEAPON_NAMES_BY_ID = {
'AEK-971': {'type': 'assault rifle', 'name': 'AEK-971'},
'Weapons/AK74M/AK74': {'type': 'assault rifle', 'name': 'AK-74M', 'kit': 'assault'},
'AN-94 Abakan': {'type': 'assault rifle', 'name': 'AN-94', 'kit': 'assault'},
'Steyr AUG': {'type': 'assault rifle', 'name': 'AUG A3', 'kit': 'assault'},
'F2000': {'type': 'assault rifle', 'name': 'F2000', 'kit': 'assault'},
'FAMAS': {'type': 'assault rifle', 'name': 'FAMAS', 'kit': 'assault'},
'Weapons/G3A3/G3A3': {'type': 'assault rifle', 'name': 'G3A3', 'kit': 'assault'},
'Weapons/KH2002/KH2002': {'type': 'assault rifle', 'name': 'KH2002', 'kit': 'assault'},
'Weapons/XP1_L85A2/L85A2': {'type': 'assault rifle', 'name': 'L85A2', 'kit': 'assault'},
'M16A4': {'type': 'assault rifle', 'name': 'M16A3', 'kit': 'assault'},
'M416': {'type': 'assault rifle', 'name': 'M416', 'kit': 'assault'},
'SCAR-L': {'type': 'assault rifle', 'name': 'SCAR-L', 'kit': 'assault'},
'Defib': {'type': None, 'name': 'DEFIBRILLATOR', 'kit': 'assault'},
'M320': {'type': None, 'name': 'M320', 'kit': 'assault'},
'M26Mass': {'type': 'shotgun', 'name': 'M26 MASS', 'kit': 'assault'},
# engineer
'Weapons/A91/A91': {'type': 'carbines', 'name': 'A-91', 'kit': 'engineer'},
'Weapons/XP2_ACR/ACR': {'type': 'carbines', 'name': 'ACW-R', 'kit': 'engineer'},
'AKS-74u': {'type': 'carbines', 'name': 'AKS-74', 'kit': 'engineer'},
'Weapons/G36C/G36C': {'type': 'carbines', 'name': 'G36C', 'kit': 'engineer'},
'HK53': {'type': 'carbines', 'name': 'G53', 'kit': 'engineer'},
'M4A1': {'type': 'carbines', 'name': 'M4A1', 'kit': 'engineer'},
'Weapons/XP2_MTAR/MTAR': {'type': 'carbines', 'name': 'MTAR-21', 'kit': 'engineer'},
'QBZ-95': {'type': 'carbines', 'name': 'QBZ-95B', 'kit': 'engineer'},
'Weapons/SCAR-H/SCAR-H': {'type': 'carbines', 'name': 'SCAR-H', 'kit': 'engineer'},
'SG 553 LB': {'type': 'carbines', 'name': 'SG553', 'kit': 'engineer'},
'FIM92': {'type': 'launcher', 'name': 'FIM-92 STINGER', 'kit': 'engineer'},
'Weapons/Sa18IGLA/Sa18IGLA': {'type': 'launcher', 'name': 'SA-18 IGLA', 'kit': 'engineer'},
'FGM-148': {'type': 'launcher', 'name': 'FGM-148 JAVELIN', 'kit': 'engineer'},
'RPG-7': {'type': 'launcher', 'name': 'RPG-7V2', 'kit': 'engineer'},
'SMAW': {'type': 'launcher', 'name': 'SMAW', 'kit': 'engineer'},
'Repair Tool': {'type': '', 'name': 'REPAIR TOOL', 'kit': 'engineer'},
'M15 AT Mine': {'type': 'explosive', 'name': 'M15 AT MINE', 'kit': 'engineer'},
'EOD BOT': {'type': '', 'name': 'EOD BOT', 'kit': 'engineer'},
# supporter
'Weapons/XP2_L86/L86': {'type': 'LMG', 'name': 'L86A2', 'kit': 'support'},
'LSAT': {'type': 'LMG', 'name': 'LSAT', 'kit': 'support'},
'M240': {'type': 'LMG', 'name': 'M240B', 'kit': 'support'},
'M249': {'type': 'LMG', 'name': 'M249', 'kit': 'support'},
'M27IAR': {'type': 'LMG', 'name': 'M27 IAR', 'kit': 'support'},
'M60': {'type': 'LMG', 'name': 'M60E4', 'kit': 'support'},
'MG36': {'type': 'LMG', 'name': 'MG36', 'kit': 'support'},
'Pecheneg': {'type': 'LMG', 'name': 'PKP PECHENEG', 'kit': 'support'},
'QBB-95': {'type': 'LMG', 'name': 'QBB-95', 'kit': 'support'},
'RPK-74M': {'type': 'LMG', 'name': 'RPK-74M', 'kit': 'support'},
'Type88': {'type': 'LMG', 'name': 'TYPE 88 LMG', 'kit': 'support'},
'Weapons/Gadgets/C4/C4': {'type': 'explosive', 'name': 'C4 EXPLOSIVES', 'kit': 'support'},
'Weapons/Gadgets/Claymore/Claymore': {'type': 'explosive', 'name': 'M18 CLAYMORE', 'kit': 'support'},
# sniper
'M417': {'type': 'sniper rifle', 'name': 'M417', 'kit': 'recon'},
'JNG90': {'type': 'sniper rifle', 'name': 'JNG-90', 'kit': 'recon'},
'L96': {'type': 'sniper rifle', 'name': 'L96', 'kit': 'recon'},
'M39': {'type': 'sniper rifle', 'name': 'M39 EMR', 'kit': 'recon'},
'M40A5': {'type': 'sniper rifle', 'name': 'M40A5', 'kit': 'recon'},
'Model98B': {'type': 'sniper rifle', 'name': 'M98B', 'kit': 'recon'},
'Mk11': {'type': 'sniper rifle', 'name': 'MK11 MOD 0', 'kit': 'recon'},
'QBU-88': {'type': 'sniper rifle', 'name': 'QBU-88', 'kit': 'recon'},
'SKS': {'type': 'sniper rifle', 'name': 'SKS', 'kit': 'recon'},
'SV98': {'type': 'sniper rifle', 'name': 'SV98', 'kit': 'recon'},
'SVD': {'type': 'sniper rifle', 'name': 'SVD', 'kit': 'recon'},
'MAV': {'type': None, 'name': 'MAV', 'kit': 'recon'},
# general
'870MCS': {'type': 'shotgun', 'name': '870MCS', 'kit': 'general'},
'DAO-12': {'type': 'shotgun', 'name': 'DAO-12', 'kit': 'general'},
'jackhammer': {'type': 'shotgun', 'name': 'MK3A1', 'kit': 'general'},
'M1014': {'type': 'shotgun', 'name': 'M1014', 'kit': 'general'},
'SPAS-12': {'type': 'shotgun', 'name': 'SPAS-12', 'kit': 'general'},
'Siaga20k': {'type': 'shotgun', 'name': 'SAIGA 12K', 'kit': 'general'},
'USAS-12': {'type': 'shotgun', 'name': 'USAS-12', 'kit': 'general'},
# general smg
'Weapons/XP2_MP5K/MP5K': {'type': 'smg', 'name': 'M5K', 'kit': 'general'},
'MP7': {'type': 'smg', 'name': 'MP7', 'kit': 'general'},
'Weapons/P90/P90': {'type': 'smg', 'name': 'P90', 'kit': 'general'},
'Weapons/MagpulPDR/MagpulPDR': {'type': 'smg', 'name': 'PDW-R', 'kit': 'general'},
'PP-19': {'type': 'smg', 'name': 'PP-19', 'kit': 'general'},
'PP-2000': {'type': 'smg', 'name': 'PP-2000', 'kit': 'general'},
'Weapons/UMP45/UMP45': {'type': 'smg', 'name': 'UMP-45', 'kit': 'general'},
'AS Val': {'type': 'smg', 'name': 'AS VAL', 'kit': 'general'},
'M67': {'type': None, 'name': 'M67 GRENADE', 'kit': 'general'},
# knifekill with animation
'Knife_RazorBlade': {'type': 'all', 'name': 'ACB-90', 'kit': 'general'},
# knifekill normal
'Melee': {'type': 'all', 'name': 'ACB-90', 'kit': 'general'},
'Weapons/Knife/Knife': {'type': 'all', 'name': 'ACB-90', 'kit': 'general'},
'CrossBow': {'type': 'handgun', 'name': 'XBOW', 'kit': 'general'},
# Pistols
'M1911': {'type': 'handgun', 'name': 'M1911', 'kit': 'general'},
'Weapons/MP412Rex/MP412REX': {'type': 'handgun', 'name': 'MP412 REX', 'kit': 'general'},
'M9': {'type': 'handgun', 'name': 'M9', 'kit': 'general'},
'M93R': {'type': 'handgun', 'name': '93R', 'kit': 'general'},
'Weapons/MP443/MP443': {'type': 'handgun', 'name': 'MP443', 'kit': 'general'},
'MP443SUPP': {'type': 'handgun', 'name': 'MP443 SUPP.', 'kit': 'general'},
'Taurus .44': {'type': 'handgun', 'name': '.44 MAGNUM', 'kit': 'general'},
'Glock18': {'type': 'handgun', 'name': 'G18', 'kit': 'general'},
# others
'Death': {'type': '', 'name': 'Death', 'kit': ''},
'SoldierCollision': {'type': '', 'name': 'SoldierCollision', 'kit': ''},
'RoadKill': {'type': '', 'name': 'RoadKill', 'kit': ''},
'DamageArea': {'type': '', 'name': 'DamageArea', 'kit': ''},
'Suicide': {'type': '', 'name': 'Suicide', 'kit': ''},
# gunmaster weapons
'Weapons/MP443/MP443_GM': {'type': 'handgun', 'name': 'MP443 (Gunmster mode)', 'kit': 'general'},
'Weapons/P90/P90_GM': {'type': 'smg', 'name': 'P90 (Gunmaster Mode)', 'kit': 'general'},
}
WEAPONS_GROUPS = {
'shotguns': ('870MCS', 'DAO-12', 'jackhammer', 'M1014', 'SPAS-12', 'Siaga20k', 'USAS-12', 'M26Mass'),
'explosives': ('Weapons/Gadgets/Claymore/Claymore', 'M15 AT Mine', 'Weapons/Gadgets/C4/C4'),
}
BF4_WEAPON_NAMES_BY_ID = {
# Assault Rifles
'U_AEK971': {'type': 'assault rifle', 'name': 'AEK-971', 'kit': 'assault'},
'U_AK12': {'type': 'assault rifle', 'name': 'AK-12', 'kit': 'assault'},
'U_CZ805': {'type': 'assault rifle', 'name': 'CZ-805', 'kit': 'assault'},
'U_FAMAS': {'type': 'assault rifle', 'name': 'FAMAS', 'kit': 'assault'},
'U_GalilACE23': {'type': 'assault rifle', 'name': 'ACE-23', 'kit': 'assault'},
'U_M16A4': {'type': 'assault rifle', 'name': 'M16A4', 'kit': 'assault'},
'U_M416': {'type': 'assault rifle', 'name': 'M416', 'kit': 'assault'},
'U_QBZ951': {'type': 'assault rifle', 'name': 'QBZ-95-1', 'kit': 'assault'},
'U_SAR21': {'type': 'assault rifle', 'name': 'SAR-21', 'kit': 'assault'},
'U_SCAR-H': {'type': 'assault rifle', 'name': 'SCAR-H', 'kit': 'assault'},
'U_SteyrAug': {'type': 'assault rifle', 'name': 'AUG-A3', 'kit': 'assault'},
'U_L85a2': {'type': 'assault rifle', 'name': 'L85A2', 'kit': 'assault'},
# Carbines
'U_A91': {'type': 'carbines', 'name': 'A-91', 'kit': 'general'},
'U_ACR': {'type': 'carbines', 'name': 'ACW-R', 'kit': 'general'},
'U_AK5C': {'type': 'carbines', 'name': 'AK-5C', 'kit': 'general'},
'U_AKU12': {'type': 'carbines', 'name': 'AKU-12', 'kit': 'general'},
'U_G36C': {'type': 'carbines', 'name': 'G36C', 'kit': 'general'},
'U_GalilACE': {'type': 'carbines', 'name': 'ACE-21-CQB', 'kit': 'general'},
'U_GalilACE52': {'type': 'carbines', 'name': 'ACE-52-CQB', 'kit': 'general'},
'U_M4A1': {'type': 'carbines', 'name': 'M4', 'kit': 'general'},
'U_SG553LB': {'type': 'carbines', 'name': 'SG553', 'kit': 'general'},
'U_Type95B': {'type': 'carbines', 'name': 'Type-95B-1', 'kit': 'general'},
'U_MTAR21': {'type': 'carbines', 'name': 'MTAR-21', 'kit': 'general'},
# Designated Marksman Rifles (DMR)
'U_GalilACE53': {'type': 'DMR', 'name': 'ACE-53-SV', 'kit': 'general'},
'U_M39EBR': {'type': 'DMR', 'name': 'M39-EMR', 'kit': 'general'},
'U_MK11': {'type': 'DMR', 'name': 'MK11-MOD-0', 'kit': 'general'},
'U_QBU88': {'type': 'DMR', 'name': 'QBU-88', 'kit': 'general'},
'U_RFB': {'type': 'DMR', 'name': 'RFB', 'kit': 'general'},
'U_SCAR-HSV': {'type': 'DMR', 'name': 'SCAR-H-SV', 'kit': 'general'},
'U_SKS': {'type': 'DMR', 'name': 'SKS', 'kit': 'general'},
'U_SVD12': {'type': 'DMR', 'name': 'SVD-12', 'kit': 'general'},
# Explosive Gadgets
'U_C4': {'type': 'explosive', 'name': 'C4-Explosive', 'kit': 'recon'},
'U_C4_Support': {'type': 'explosive', 'name': 'C4-Explosive', 'kit': 'support'},
'U_Claymore': {'type': 'explosive', 'name': 'M18-Claymore', 'kit': ''},
'U_Claymore_Recon': {'type': 'explosive', 'name': 'M18-Claymore', 'kit': 'recon'},
'U_M15': {'type': 'explosive', 'name': 'M15-AT-Mine', 'kit': ''},
'U_SLAM': {'type': 'explosive', 'name': 'M2-Slam', 'kit': ''},
'U_MGL': {'type': 'explosive', 'name': 'M32-MGL', 'kit': ''},
#'Death': {'type': 'explosive', 'name': 'M224-Mortar', 'kit': ''},
'U_XM25': {'type': 'explosive', 'name': 'XM25-Airburst', 'kit': ''},
'U_XM25_Flechette': {'type': 'explosive', 'name': 'XM25-Dart', 'kit': ''},
'U_XM25_Smoke': {'type': 'explosive', 'name': 'XM25-Smoke', 'kit': ''},
'U_UCAV': {'type': 'explosive', 'name': 'UCAV', 'kit': ''},
# Hand Grenades
#'': {'type': 'grenade', 'name': 'hand-flare', 'kit': 'general'},
'U_Flashbang': {'type': 'grenade', 'name': 'M84-Flashbang', 'kit': 'general'},
'U_Grenade_RGO': {'type': 'grenade', 'name': 'RGO-Impact', 'kit': 'general'},
#'': {'type': 'grenade', 'name': 'M18-Smoke', 'kit': 'general'},
'U_M34': {'type': 'grenade', 'name': 'M34-Incendiary', 'kit': 'general'},
'U_M67': {'type': 'grenade', 'name': 'M67-Frag', 'kit': 'general'},
'U_V40': {'type': 'grenade', 'name': 'V40-Mini', 'kit': 'general'},
# Hand Guns
'U_M93R': {'type': 'handgun', 'name': '93R', 'kit': 'general'},
'U_CZ75': {'type': 'handgun', 'name': 'CZ-75', 'kit': 'general'},
'U_FN57': {'type': 'handgun', 'name': 'FN57', 'kit': 'general'},
'U_Glock18': {'type': 'handgun', 'name': 'G18', 'kit': 'general'},
'U_HK45C': {'type': 'handgun', 'name': 'Compact-45', 'kit': 'general'},
'U_M1911': {'type': 'handgun', 'name': 'M1911', 'kit': 'general'},
'U_MP412Rex': {'type': 'handgun', 'name': 'M412-Rex', 'kit': 'general'},
'U_M9': {'type': 'handgun', 'name': 'M9', 'kit': 'general'},
'U_MP443': {'type': 'handgun', 'name': 'MP443', 'kit': 'general'},
'U_P226': {'type': 'handgun', 'name': 'P226', 'kit': 'general'},
'U_QSZ92': {'type': 'handgun', 'name': 'QSZ-92', 'kit': 'general'},
'U_Taurus44': {'type': 'handgun', 'name': '44-Magnum', 'kit': 'general'},
# Light Machine Guns (LMG)
'U_LSAT': {'type': 'LMG', 'name': 'LSAT', 'kit': 'support'},
'U_M240': {'type': 'LMG', 'name': 'M240B', 'kit': 'support'},
'U_M249': {'type': 'LMG', 'name': 'M249', 'kit': 'support'},
'U_MG4': {'type': 'LMG', 'name': 'MG4', 'kit': 'support'},
'U_Pecheneg': {'type': 'LMG', 'name': 'PKP-Pecheneg', 'kit': 'support'},
'U_QBB95': {'type': 'LMG', 'name': 'QBB-95-1', 'kit': 'support'},
'U_RPK12': {'type': 'LMG', 'name': 'RPK-12', 'kit': 'support'},
'U_Type88': {'type': 'LMG', 'name': 'Type-88-LMG', 'kit': 'support'},
'U_Ultimax': {'type': 'LMG', 'name': 'U-100-MK5', 'kit': 'support'},
'U_rpk-74': {'type': 'LMG', 'name': 'RPK-74M', 'kit': 'support'},
# Personal Defense Weapons (PDW)
'U_CBJ-MS': {'type': 'PWD', 'name': 'CBJ-MS', 'kit': 'general'},
'U_JS2': {'type': 'PWD', 'name': 'JS2', 'kit': 'general'},
'U_MX4': {'type': 'PWD', 'name': 'MX4', 'kit': 'general'},
'U_P90': {'type': 'PWD', 'name': 'P90', 'kit': 'general'},
'U_MagpulPDR': {'type': 'PWD', 'name': 'PDW-R', 'kit': 'general'},
'U_PP2000': {'type': 'PWD', 'name': 'PP-2000', 'kit': 'general'},
'U_Scorpion': {'type': 'PWD', 'name': 'CZ-3A1', 'kit': 'general'},
'U_UMP45': {'type': 'PWD', 'name': 'UMP-45', 'kit': 'general'},
'U_UMP9': {'type': 'PWD', 'name': 'UMP-9', 'kit': 'general'},
'U_mp7': {'type': 'PWD', 'name': 'MP7', 'kit': 'general'},
# Rocket Launcher
'U_FGM148': {'type': 'explosive', 'name': 'FGM-148-Javelin', 'kit': 'engineer'},
'U_FIM92': {'type': 'explosive', 'name': 'FIM-92-Stinger', 'kit': 'engineer'},
'U_Sa18IGLA': {'type': 'explosive', 'name': 'SA-18-Igla', 'kit': 'engineer'},
'U_AT4': {'type': 'explosive', 'name': 'M136-CS', 'kit': 'engineer'},
'U_NLAW': {'type': 'explosive', 'name': 'MBT-LAW', 'kit': 'engineer'},
'U_RPG7': {'type': 'explosive', 'name': 'RGP-7V2', 'kit': 'engineer'},
'U_SMAW': {'type': 'explosive', 'name': 'MK153-SMAW', 'kit': 'engineer'},
'U_SRAW': {'type': 'explosive', 'name': 'FGM-172-SRAW', 'kit': 'engineer'},
'U_Starstreak': {'type': 'explosive', 'name': 'HVM-II', 'kit': 'engineer'},
# Shotguns
'U_M26Mass_Flechette': {'type': 'shotgun', 'name': 'M26-Dart', 'kit': 'general'},
'U_M26Mass_Frag': {'type': 'shotgun', 'name': 'M26-Frag', 'kit': 'general'},
'U_M26Mass': {'type': 'shotgun', 'name': 'M26-Mass', 'kit': 'general'},
'U_M26Mass_Slug': {'type': 'shotgun', 'name': 'M26-Slug', 'kit': 'general'},
'U_USAS-12_Nightvision': {'type': 'shotgun', 'name': 'USAS-12-Flir', 'kit': 'general'},
'U_870': {'type': 'shotgun', 'name': '870-MCS', 'kit': 'general'},
'U_DBV12': {'type': 'shotgun', 'name': 'DBV-12', 'kit': 'general'},
'U_HAWK': {'type': 'shotgun', 'name': 'HAWK-12G', 'kit': 'general'},
'U_M1014': {'type': 'shotgun', 'name': 'M1014', 'kit': 'general'},
'U_QBS09': {'type': 'shotgun', 'name': 'QBS-09', 'kit': 'general'},
'U_SAIGA_20K': {'type': 'shotgun', 'name': 'Saiga-12K', 'kit': 'general'},
'U_SerbuShorty': {'type': 'shotgun', 'name': 'Shorty-12G', 'kit': 'general'},
'U_SPAS12': {'type': 'shotgun', 'name': 'Spas-12', 'kit': 'general'},
'U_USAS-12': {'type': 'shotgun', 'name': 'USAS-12', 'kit': 'general'},
'U_UTAS': {'type': 'shotgun', 'name': 'UTS-15', 'kit': 'general'},
# Sniper Rifle
'U_AMR2': {'type': 'sniper', 'name': 'AMR-2', 'kit': 'recon'},
'U_AMR2_CQB': {'type': 'sniper', 'name': 'AMR-2-CQB', 'kit': 'recon'},
'U_AMR2_MED': {'type': 'sniper', 'name': 'AMR-2-MID', 'kit': 'recon'},
'U_CS-LR4': {'type': 'sniper', 'name': 'CS-LR4', 'kit': 'recon'},
'U_FY-JS': {'type': 'sniper', 'name': 'FY-JS', 'kit': 'recon'},
'U_JNG90': {'type': 'sniper', 'name': 'JNG-90', 'kit': 'recon'},
'U_M200': {'type': 'sniper', 'name': 'SRR-61', 'kit': 'recon'},
'U_M40A5': {'type': 'sniper', 'name': 'M40A5', 'kit': 'recon'},
'U_M82A3': {'type': 'sniper', 'name': 'M82A3', 'kit': 'recon'},
'U_M82A3_CQB': {'type': 'sniper', 'name': 'M82A3-CQB', 'kit': 'recon'},
'U_M82A3_MED': {'type': 'sniper', 'name': 'M82A3-MID', 'kit': 'recon'},
'U_M98B': {'type': 'sniper', 'name': 'M98B', 'kit': 'recon'},
'U_Scout': {'type': 'sniper', 'name': 'Scout-Elite', 'kit': 'recon'},
'U_SRS': {'type': 'sniper', 'name': '338-Recon', 'kit': 'recon'},
'U_SV98': {'type': 'sniper', 'name': 'SV-98', 'kit': 'recon'},
'U_L96A1': {'type': 'sniper', 'name': 'L96A1', 'kit': 'recon'},
# Knife
'Melee': {'type': None, 'name': 'Melee', 'kit': 'general'},
# Underslug Launcher
'U_M320_HE': {'type': 'explosive', 'name': 'M320-HE', 'kit': 'general'},
'U_M320_FLASH': {'type': 'explosive', 'name': 'M320-FB', 'kit': 'general'},
'U_M320_LVG': {'type': 'explosive', 'name': 'M320-LVG', 'kit': 'general'},
'U_M320_SHG': {'type': 'explosive', 'name': 'M320-Dart', 'kit': 'general'},
'U_M320_SMK': {'type': 'explosive', 'name': 'M320-SMK', 'kit': 'general'},
# Assault Rifle Underslung Launchers
'U_AEK971_M320_HE': {'type': 'explosive', 'name': 'AEK971 M320 HE', 'kit': 'assault'},
'U_AEK971_M320_FLASH': {'type': None, 'name': 'AEK971 M320 Flashbang', 'kit': 'assault'},
'U_AEK971_M320_LVG': {'type': 'explosive', 'name': 'AEK971 M320 LVG', 'kit': 'assault'},
'U_AEK971_M320_SHG': {'type': 'shotgun', 'name': 'AEK971 M320 Shotgun', 'kit': 'assault'},
'U_AEK971_M320_SMK': {'type': None, 'name': 'AEK971 M320 Smoke', 'kit': 'assault'},
'U_AK12_M320_HE': {'type': 'explosive', 'name': 'AK12 M320 HE', 'kit': 'assault'},
'U_AK12_M320_FLASH': {'type': None, 'name': 'AK12 M320 Flashbang', 'kit': 'assault'},
'U_AK12_M320_LVG': {'type': 'explosive', 'name': 'AK12 M320 LVG', 'kit': 'assault'},
'U_AK12_M320_SHG': {'type': 'shotgun', 'name': 'AK12 M320 Shotgun', 'kit': 'assault'},
'U_AK12_M320_SMK': {'type': None, 'name': 'AK12 M320 Smoke', 'kit': 'assault'},
'U_CZ805_M320_HE': {'type': 'explosive', 'name': 'CZ805 M320 HE', 'kit': 'assault'},
'U_CZ805_M320_FLASH': {'type': None, 'name': 'CZ805 M320 Flashbang', 'kit': 'assault'},
'U_CZ805_M320_LVG': {'type': 'explosive', 'name': 'CZ805 M320 LVG', 'kit': 'assault'},
'U_CZ805_M320_SHG': {'type': 'shotgun', 'name': 'CZ805 M320 Shotgun', 'kit': 'assault'},
'U_CZ805_M320_SMK': {'type': None, 'name': 'CZ805 M320 Smoke', 'kit': 'assault'},
'U_M16A4_M320_HE': {'type': 'explosive', 'name': 'M16A4 M320 HE', 'kit': 'assault'},
'U_M16A4_M320_FLASH': {'type': None, 'name': 'M16A4 M320 Flashbang', 'kit': 'assault'},
'U_M16A4_M320_LVG': {'type': 'explosive', 'name': 'M16A4 M320 LVG', 'kit': 'assault'},
'U_M416_M320_SHG': {'type': 'shotgun', 'name': 'M416 M320 Shotgun', 'kit': 'assault'},
'U_M416_M320_SMK': {'type': None, 'name': 'M416 M320 Smoke', 'kit': 'assault'},
'U_QBZ951_M320_HE': {'type': 'explosive', 'name': 'QBZ951 M320 HE', 'kit': 'assault'},
'U_QBZ951_M320_FLASH': {'type': None, 'name': 'QBZ951 M320 Flashbang', 'kit': 'assault'},
'U_QBZ951_M320_LVG': {'type': 'explosive', 'name': 'QBZ951 M320 LVG', 'kit': 'assault'},
'U_QBZ951_M320_SHG': {'type': 'shotgun', 'name': 'QBZ951 M320 Shotgun', 'kit': 'assault'},
'U_QBZ951_M320_SMK': {'type': None, 'name': 'QBZ951 M320 Smoke', 'kit': 'assault'},
'U_SAR21_M320_HE': {'type': 'explosive', 'name': 'SAR21 M320 HE', 'kit': 'assault'},
'U_SAR21_M320_FLASH': {'type': None, 'name': 'SAR21 M320 Flashbang', 'kit': 'assault'},
'U_SAR21_M320_LVG': {'type': 'explosive', 'name': 'SAR21 M320 LVG', 'kit': 'assault'},
'U_SAR21_M320_SHG': {'type': 'shotgun', 'name': 'SAR21 M320 Shotgun', 'kit': 'assault'},
'U_SAR21_M320_SMK': {'type': None, 'name': 'SAR21 M320 Smoke', 'kit': 'assault'},
'U_SCAR-H_M320_HE': {'type': 'explosive', 'name': 'SCAR-H M320 HE', 'kit': 'assault'},
'U_SCAR-H_M320_FLASH': {'type': None, 'name': 'SCAR-H M320 Flashbang', 'kit': 'assault'},
'U_SCAR-H_M320_LVG': {'type': 'explosive', 'name': 'SCAR-H M320 LVG', 'kit': 'assault'},
'U_SCAR-H_M320_SHG': {'type': 'shotgun', 'name': 'SCAR-H M320 Shotgun', 'kit': 'assault'},
'U_SCAR-H_M320_SMK': {'type': None, 'name': 'SCAR-H M320 Smoke', 'kit': 'assault'},
'U_SteyrAug_M320_HE': {'type': 'explosive', 'name': 'SteyrAug M320 HE', 'kit': 'assault'},
'U_SteyrAug_M320_FLASH': {'type': None, 'name': 'SteyrAug M320 Flashbang', 'kit': 'assault'},
'U_SteyrAug_M320_LVG': {'type': 'explosive', 'name': 'SteyrAug M320 LVG', 'kit': 'assault'},
'U_SteyrAug_M320_SHG': {'type': 'shotgun', 'name': 'SteyrAug M320 Shotgun', 'kit': 'assault'},
'U_SteyrAug_M320_SMK': {'type': None, 'name': 'SteyrAug M320 Smoke', 'kit': 'assault'},
'U_L85a2_M320_HE': {'type': 'explosive', 'name': 'L85a2 M320 HE', 'kit': 'assault'},
'U_L85a2_M320_FLASH': {'type': None, 'name': 'L85a2 M320 Flashbang', 'kit': 'assault'},
'U_L85a2_M320_LVG': {'type': 'explosive', 'name': 'L85a2 M320 LVG', 'kit': 'assault'},
'U_L85a2_M320_SHG': {'type': 'shotgun', 'name': 'L85a2 M320 Shotgun', 'kit': 'assault'},
# Assault Rifle Underslung Shotguns
'U_M416_M26_Buck': {'type': 'shotgun', 'name': 'M416 M26 Buckshot', 'kit': 'assault'},
'U_M416_M26_Flechette': {'type': 'shotgun', 'name': 'M416 M26 Flechette', 'kit': 'assault'},
'U_M416_M26_Frag': {'type': 'shotgun', 'name': 'M416 M26 Fragment', 'kit': 'assault'},
'U_M416_M26_Slug': {'type': 'shotgun', 'name': 'M416 M26 Slug', 'kit': 'assault'},
'U_SCAR-H_M26_Buck': {'type': 'shotgun', 'name': 'SCAR-H M26 Buckshot', 'kit': 'assault'},
'U_SCAR-H_M26_Flechette': {'type': 'shotgun', 'name': 'SCAR-H M26 Flechette', 'kit': 'assault'},
'U_SCAR-H_M26_Frag': {'type': 'shotgun', 'name': ' SCAR-H M26 Fragment', 'kit': 'assault'},
'U_SCAR-H_M26_Slug': {'type': 'shotgun', 'name': ' SCAR-H M26 Slug', 'kit': 'assault'},
'U_CZ805_M26_Buck': {'type': 'shotgun', 'name': ' CZ805 M26 Buckshot', 'kit': 'assault'},
'U_CZ805_M26_Flechette': {'type': 'shotgun', 'name': ' CZ805 M26 Flechette', 'kit': 'assault'},
'U_CZ805_M26_Frag': {'type': 'shotgun', 'name': ' CZ805 M26 Fragment', 'kit': 'assault'},
'U_CZ805_M26_Slug': {'type': 'shotgun', 'name': ' CZ805 M26 Slug', 'kit': 'assault'},
'U_M16A4_M26_Buck': {'type': 'shotgun', 'name': ' M16A4 M26 Buckshot', 'kit': 'assault'},
'U_M16A4_M26_Flechette': {'type': 'shotgun', 'name': ' M16A4 M26 Flechette', 'kit': 'assault'},
'U_M16A4_M26_Frag': {'type': 'shotgun', 'name': 'M16A4 M26 Fragment', 'kit': 'assault'},
'U_M16A4_M26_Slug': {'type': 'shotgun', 'name': 'M16A4 M26 Slug', 'kit': 'assault'},
# Gadgets
'U_Defib': {'type': 'gadget', 'name': 'Defibrillator', 'kit': 'assault'},
'U_PortableMedicpack': {'type': 'gadget', 'name': 'FIRST AID PACK', 'kit': 'assault'},
'U_Medkit': {'type': 'gadget', 'name': 'MEDIC BAG', 'kit': 'assault'},
'U_Repairtool': {'type': 'gadget', 'name': 'Repair-Tool', 'kit': 'engineer'},
# Misc
'Death': {'type': '', 'name': 'Death', 'kit': ''},
'DamageArea': {'type': None, 'name': 'Suicide', 'kit': ''},
'RoadKill': {'type': None, 'name': 'Road Kill', 'kit': ''},
'SoldierCollision': {'type': None, 'name': 'Suicide (Collision)', 'kit': ''},
'Suicide': {'type': None, 'name': 'Suicide', 'kit': ''},
}
BF4_WEAPON_GROUPS = {
'shotgun': [k for k,v in BF4_WEAPON_NAMES_BY_ID.items() if v.get('type') == 'shotgun'],
'handgun': [k for k,v in BF4_WEAPON_NAMES_BY_ID.items() if v.get('type') == 'handgun'],
'explosive': [k for k,v in BF4_WEAPON_NAMES_BY_ID.items() if v.get('type') == 'explosive'],
} | [
"rootdesign@gmail.com"
] | rootdesign@gmail.com |
03710aad4caae02e3b37f38eb9379396d049d532 | f5171500752e258406718a0d2f33e027e97e9225 | /Simulators/BEsim-vs/AppGenScripts/sobelTester.py | f6523f41eb1e0624d3a046d3d7a7f7aaef7af9a4 | [] | no_license | UFCCMT/behavioural_emulation | 03e0c84db0600201ccb29a843a4998dcfc17b92a | e5fa2f1262d7a72ab770d919cc3b9a849a577267 | refs/heads/master | 2021-09-25T17:55:33.428634 | 2018-10-24T15:55:37 | 2018-10-24T15:55:37 | 153,633,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import re
import csv
import highGen
import appGenerator
import os
proc=[2,4,8,16,32]
dim=[2,2,3,4,6]
rows=[320,480,640,800,1024,1280,1600]
cols=[240,320,480,600,768,1024,1200]
#proc=[4]
#dim=[2]
#size=[128]
operation=["Sobel"]
os.system("sobelInputGenerator.py")
for op in operation:
for procIndex in range(len(proc)):
for r in rows:
procNum=proc[procIndex]
highGen.highGen("input_%s_%d_%d.txt" %(op,procNum,r),"output_%s_%d_%d" %(op,procNum,r),procNum,dim[procIndex],r)
appGenerator.appGenerator("output_%s_%d_%d" %(op,procNum,r), "VS_%s_%d_%d.txt" %(op,procNum,r),procNum,dim[procIndex],r)
| [
"aravindneelakantan@gmail.com"
] | aravindneelakantan@gmail.com |
a2de12fa9022f2658a10c58f120c7721ea5711ff | a97f789530412fc1cb83170a11811f294b139ee8 | /疯狂Python讲义/codes/04/4.2/indent_error.py | a5376f270c8bc4c43ab4de3efb842b2239fcf796 | [] | no_license | baidongbin/python | 3cebf2cc342a15b38bf20c23f941e6887dac187a | 1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f | refs/heads/master | 2021-07-21T19:23:32.860444 | 2020-03-07T11:55:30 | 2020-03-07T11:55:30 | 195,909,272 | 0 | 1 | null | 2020-07-21T00:51:24 | 2019-07-09T01:24:31 | Python | UTF-8 | Python | false | false | 166 | py | s_age = input("请输入您的年龄:")
age = int(s_age)
if age > 20:
print("年龄已经大于20岁了")
print("20岁以上的人应该学会承担责任...")
| [
"baidongbin@qq.com"
] | baidongbin@qq.com |
2e2165a60eee1738d27216e3431329a016dd2989 | aa75e0ccf5808737b3a29d21769f6de90d01033a | /kaoshi/6.py | 104f58cb98f90046e9f536657e57c9a41488c8f3 | [] | no_license | clarkkentzh/python | 1b2a740433bca7d73b50bd6b266d2051640b57b0 | 7fb850d0aa1d614fa1aa5a4d8368a461771410fb | refs/heads/master | 2021-07-02T05:04:34.873232 | 2017-09-20T08:14:20 | 2017-09-20T08:14:20 | 100,256,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | #coding=utf-8
import math
st = raw_input()
lists = st.split(" ")
rows = int(math.sqrt(len(lists)))
k = 0
list1 = []
list2 = []
for i in range(rows):
list1.append([])
for j in range(rows):
list1[i].append(lists[k])
k += 1
def fun1(lists):
for i in lists[0]:
list2.append(i)
del lists[0]
return lists
def fun(lists):
a = fun1(lists)
b = map(list,zip(*a))
b.reverse()
if(len(b) >= 1):
fun(b)
fun(list1)
print list2
| [
"clarkkent.z@outlook.com"
] | clarkkent.z@outlook.com |
848d2f480bd397f153f5c878357136984965741b | 970c31bbfaa5d6c9b2c4c60e9958a0ec6c002bf7 | /config/jupyter-password.py | 135fc4063f12ed050d09aec67a3316962e263884 | [
"MIT"
] | permissive | nyugoh/machine-learning-container | 2a0be540383ad382c1684febd29aae4ec172095d | 7d8a310cee304a8c6cddeebb5b3f9ff09a9b4955 | refs/heads/master | 2021-01-25T14:23:33.089705 | 2018-03-03T14:23:21 | 2018-03-03T14:23:21 | 123,692,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from notebook.auth import passwd
import os
jupyter_config = os.path.expanduser('./jupyter_notebook_config.py')
line = "==========================================================================="
print line
print "Setting Jupyter additional configuration"
print line
print "Please set a strong password"
pwhash = passwd()
print line
print "Following will be added to %s " % (jupyter_config)
jupyter_comment_start = "# Start of lines added by jupyter-password.py"
jupyter_comment_end = "# End lines added by jupyter-passwordd.py"
jupyter_passwd_line = "c.NotebookApp.password = u'%s'" % (pwhash)
jupyter_no_browser = "c.NotebookApp.open_browser = False"
print " "
print " %s " % (jupyter_comment_start)
print " %s " % (jupyter_passwd_line)
print " %s " % (jupyter_no_browser)
print " %s " % (jupyter_comment_end)
print line
with open(jupyter_config, 'a') as file:
file.write('\n')
file.write(jupyter_comment_start + '\n')
file.write(jupyter_passwd_line + '\n')
file.write(jupyter_no_browser + '\n')
file.write(jupyter_comment_end + '\n')
| [
"nyugoh@gmail.com"
] | nyugoh@gmail.com |
d4c60b7b753d2ae5a08ffb5223c250db055a3bfe | 0cc37ea66fff1ff87fd6b01c9ea66e0392faaf1d | /centroid3.py | 471e03ca973b40dcf02c381a88dc616ad219a11e | [] | no_license | pratheeknagaraj/ssp_other_projects | 2062093930644cd90f533cf5a1948d6959f0c5e7 | 5d5e932e152b60e93424bc5ebfcee6fa9ca24fba | refs/heads/master | 2020-11-26T17:04:25.607261 | 2012-12-10T11:08:48 | 2012-12-10T11:08:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | '''
Pratheek Nagaraj
July 1, 2011
Image Processing and Centriod Project
2. Programming Actvity Part (c)
This program will process an image file and find the centroid of the object
'''
#Import Packages
from visual import *
from numpy import *
from math import *
import pyfits
import numdisplay
def centroidFunc():
#Display the Object Image
'Open the object image and display using DS9'
numdisplay.display(objectImage)
normalizedObject = modify( flatImage )
image = noise( normalizedObject )
#Bright Pixel
'Get bright pixel location'
xPos = input("Please enter the x value for the location of the bright pixel: ")
yPos = input("Please enter the y value for the location of the bright pixel: ")
print "The radius corresponds to the numer of points to be used in the calculation i.e. 1->9 and 3->49"
radius = input("Please enter the radius for the location of the bright pixel: ")
brightLoc = image[ yPos-radius-1:yPos+radius, xPos-radius-1:xPos+radius ]
#Find Centroid
'Call Centroid Function to locate the given centroid'
centroidArray = calcCentroid( brightLoc )
centroid = ( xPos + centroidArray[0], yPos + centroidArray[1] )
uncertainty = centroidArray[2]
#Output
print "The centroid of the object is: " + str(centroid)
print "The uncertainty of the calculation is: " + str(uncertainty)
'Give choice for a visual'
choice = raw_input("Would you like to see a visual (Y/N)? ")
if choice == "y" or choice == "Y" or choice == "Yes" or choice == "yes":
visualFunc( brightLoc, centroidArray[0], centroidArray[1], uncertainty )
else:
print "Goodbye!"
def calcCentroid( array ):
#Total Sum
'Use Python command to sum all elements in the array'
sumArray = sum(array);
#X Coordinate
'Create the sum of the X Weighted Values'
xSum = 0.0
'Loop through the array and weight the X Values'
weights = range( -len(array)/2+1, len(array)/2+1 )
for element in array:
pos = 0
for point in element:
xSum = xSum + weights[pos] * point
pos = pos + 1
'Find the X Coordinate by dividing by the total sum'
xCoor = xSum / sumArray
xCoor = round(xCoor,3)
#Y Coordinate
'Create the sum of the Y Weighted Values'
ySum = 0.0
'Loop through the array and weight the Y Values'
weight = len(array)/2
for element in array:
ySum = ySum + weight*sum(element)
weight = weight - 1
'Find the Y Coordinate by dividing by the total sum'
yCoor = ySum / sumArray
yCoor = round(yCoor,3)
uncertainty = 0.0
for element in array:
for element2 in element:
uncertainty = uncertainty + 2 * sqrt(element2)
uncertainty = uncertainty / sumArray
uncertainty = round( uncertainty, 3 )
return xCoor, yCoor, uncertainty
def modify( flatImage ):
#Flat Image
'Normalize the flat image using the mean'
mean = flatImage.mean()
normalizedFlat = flatImage / mean
#Normalize Object Image
'Normalize the object image using the normalized flat'
normalizedObject = objectImage / normalizedFlat
return normalizedObject
def noise( normalizedObject ):
#Noise Floor
'Get boundaties and adjust image'
x1Bound = input("Please enter left x bound for the noise floor: ")
x2Bound = input("Please enter right x bound for the noise floor: ")
y1Bound = input("Please enter lower y bound for the noise floor: ")
y2Bound = input("Please enter upper y bound for the noise floor: ")
print "---------------------------------------------"
subImage = normalizedObject[ y1Bound - 1: y2Bound, x1Bound - 1: x2Bound ]
meanValue = subImage.mean()
'New image modified with floor'
image = normalizedObject - meanValue
image[where(image < 0)]=0
return image
def visualFunc( array, x, y, unc ):
#Display a visual of the centroid calculation
'Create brightness circles with nested for loop'
maxValue = array.max()
print maxValue
posY = -len(array)/2
for row in array:
posX = -len(array)/2
for element in row:
sphere( radius = element/(maxValue*2), pos = (posX, posY), color = color.blue )
posX = posX + 1
posY = posY + 1
'Create circle for centroid'
sphere(radius = unc, pos = ( x, y, 1 ), color = color.red )
#Input
'Open the object image and the flat image'
global objectImage
objectImage = pyfits.getdata('sampleimage.fit')
global flatImage
flatImage = pyfits.getdata('flat.fit')
centroidFunc()
| [
"pratheek.nagaraj@gmail.com"
] | pratheek.nagaraj@gmail.com |
9b8ce594f608bd4ca62109570db9828ca5bfef0a | 149f6abbc2b62d9e0c71ebc8c14643a7d6313a75 | /Validaciones/valida_PoC.py | 2b4d6a1d861f385615b29129067a723c084a9d83 | [] | no_license | mcvalenti/ARXCODE | b2c7dc9cd25674fae932f9a3b339c10fcf1ae5cf | 51cd3c159ee7a9e0b909e46eb6fcb684ff52cb86 | refs/heads/master | 2021-01-17T09:27:01.373266 | 2018-08-29T19:44:59 | 2018-08-29T19:44:59 | 68,813,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,052 | py | '''
Created on 30/06/2017
Se transcriben los datos de la bibliografia
(Lei Chien ) a fin de validar que la formula
y los metodos que se utilizan para el calculo
de la PoC son correctos.
@author: mcvalenti
'''
import numpy as np
from datetime import datetime
from scipy.integrate import dblquad
from CDM.cdmParser import CDM
from TleAdmin.TLE import Tle
from Encuentro.Encuentro import Encuentro
from SistReferencia.sist_deCoordenadas import ricSis
def calcula_Poc_manual(mu_x,mu_y,sig2_xc,sig2_yc):
#-----------------------------------------------
# CAlculo de la PoC
#-----------------------------------------------
ra=0.01
exp_c=np.exp(-ra*ra/(2*np.sqrt(sig2_xc)*np.sqrt(sig2_yc)))
PoC=np.exp((-1.0/2.0)*((mu_x*mu_x/(sig2_xc))+(mu_y*mu_y/(sig2_yc))))*(1-exp_c)
PoC_int=dblquad(lambda y, x: (1.0/(2*np.sqrt(sig2_xc)*np.sqrt(sig2_yc)))*np.exp((-1.0/2.0)*((x*x/(sig2_xc))+(y*y/(sig2_yc)))), mu_x-ra, mu_x+ra, lambda y: -np.sqrt(ra*ra-(y-mu_x)*(y-mu_x))+mu_y, lambda y: np.sqrt(ra*ra-(y-mu_x)*(y-mu_x))+mu_y)
return PoC, PoC_int
def proyecta_plano_de_encuentro(rsw_vect,cov_rsw,phi):
"""
Proyecta los valores del encuentro al plano de encuentro (x,y)
-----------------------------------------------------------------
inputs
rsw_vect: vector con las posciones relativas en el sistema
(RSW) - [numpy array]
cov_rsw: ma. de Covarianza - [numpy array (matrix)]
phi: angulo entre los vectores velocidad.
"""
phi_rad=phi*np.pi/180.0
mu_x=rsw_vect[0]
mu_y=np.sqrt(rsw_vect[1]*rsw_vect[1]+rsw_vect[2]*rsw_vect[2])
sig2_xc=cov_rsw[0][0]
sig2_yc=cov_rsw[1][1]*np.cos(phi_rad/2.0)*np.cos(phi_rad/2.0)+cov_rsw[2][2]*np.sin(phi_rad/2.0)*np.sin(phi_rad/2.0)
return mu_x,mu_y,sig2_xc,sig2_yc
def valida_bibl():
"""
Evalua los valores del ejemplo de la bibliografia
tanto para la formula de PoC explicita en RSW,
como para el calculo de la integral
"""
#===============================================
# GEOMETRIA DEL ENCUENTRO (Datos del Libro)
#===============================================
#Posiciones en el TCA (ECI)
r_sat=np.array([1457.273246,1589.568484,6814.189959])
v_sat=np.array([7.001731,2.439512,0.926209])
r_deb=np.array([1457.532155,1588.932671,6814.316188])
v_deb=np.array([3.578705,6.172896,2.200215])
#-----------------------------------------------
# posicion relativa del obj2 respecto a obj1
# en el sistema de referencia RSW.
#-----------------------------------------------
dr=0.031731
ds=0.436476
dw=0.543785
# angulo entre velocidades relativas phi [grados]
phi=102.458
phi_rad=phi*np.pi/180.0
#-----------------------------------------------
# Estadistica
#-----------------------------------------------
# combinacion de errores
sig_r_sat=0.0231207
sig_r_deb=0.0363234
sig_s_sat=0.2061885
sig_s_deb=0.4102069
sig_w_sat=0.0719775
sig_w_deb=0.0341134
sig_s_comb=0.4591115
sig_w_comb=0.0796523
#-----------------------------------------------
# PLANO DE ENCUENTRO
#-----------------------------------------------
# Posicion relativa de Obj2
mu_x=dr
mu_y=0.697294
sig_x=0.0430576 # comp radial.
sig_y=0.2941297 # comp horizontal
sig2_xc=sig_r_sat*sig_r_sat+sig_r_deb*sig_r_deb
sig2_yc=sig_s_comb*sig_s_comb*np.cos(phi_rad/2.0)*np.cos(phi_rad/2.0)+sig_w_comb*sig_w_comb*np.sin(phi_rad/2.0)*np.sin(phi_rad/2.0)
# print'=========================================='
# print 'varianzas de bibliog = ', sig_x,sig_y
# print 'varianzas calculadas = ', round(np.sqrt(sig2_xc),7), round(np.sqrt(sig2_yc),7)
ra=0.01
#-----------------------------------------------
# CAlculo de la PoC
#-----------------------------------------------
exp_c=np.exp(-ra*ra/(2*sig_x*sig_y))
PoC=np.exp((-1.0/2.0)*((mu_x*mu_x/(sig_x*sig_x))+(mu_y*mu_y/(sig_y*sig_y))))*(1-exp_c)
PoC_int=dblquad(lambda y, x: (1.0/(2.0*np.pi*sig_x*sig_y))*np.exp((-1.0/2.0)*((x*x/(sig_x*sig_x))+(y*y/(sig_y*sig_y)))), mu_x-ra, mu_x+ra, lambda y: -np.sqrt(ra*ra-(y-mu_x)*(y-mu_x))+mu_y, lambda y: np.sqrt(ra*ra-(y-mu_x)*(y-mu_x))+mu_y)
Poc_bibl=0.0001807975
# dif_poc=PoC-Poc_bibl
# dif_porc=dif_poc*100.0/Poc_bibl
print '=========================================='
print 'PoC calculada en forma explicita = ','%.7e' % round(PoC,11)
print 'PoC calculada mediante una integral = ','%.7e' % round(PoC_int[0],11)
print 'PoC calculada en la bibliografia = ', '%.7e' % round(Poc_bibl,11)
# print 'Diferencia = ', dif_poc
# print 'Porcentanje de error = ', dif_porc
#===============================================
# Posicion relativa calculada a partir de r,v
#===============================================
dif_r=r_deb-r_sat
rr,i,c=ricSis(r_sat,v_sat, dif_r)
dif_r1=r_sat-r_deb
rr1,i1,c1=ricSis(r_deb,v_deb, dif_r1)
print'=========================================='
print 'Posicion relativa bibliografia = ', dr,ds,dw
print 'Posicion relativa calculada satelite = ', round(rr,7), round(i,7), round(c,7)
print 'Posicion relativa calculada deb = ', round(rr1,7), round(i1,7), round(c1,7)
def valida_cdm(cdm_archivo):
"""
Extrae datos del CDM y calcula la PoC con el metodo
de formula explicita de Lei Chen.
"""
cdm=CDM(cdm_archivo)
#=================
# Desgloce del CDM
#=================
sat_id=cdm.noradID_mision
deb_id=cdm.noradID_deb
TCA=cdm.TCA
v_sat=cdm.v_sat
v_deb=cdm.v_deb
dr=float(cdm.dr)/1000.0
ds=float(cdm.ds)/1000.0
dw=float(cdm.dw)/1000.0
var_r=float(cdm.cr_r)*0.000001
var_s=float(cdm.ct_t)*0.000001
var_w=float(cdm.cn_n)*0.000001
# poc_cdm=float(cdm.POC)
#===============================================
#Calculo el angulo entre los vectores velocidad.
#===============================================
cos_phi=np.dot(v_sat,v_deb)/(np.sqrt(np.dot(v_sat,v_sat))*np.sqrt(np.dot(v_deb,v_deb)))
phi=np.arccos(cos_phi)
#===============================================
#Calculo la Probabilidad de Colision.
#===============================================
rsw_vect=[dr,ds,dw]
cov_rsw=np.array([[var_r,0,0],[0,var_s,0],[0,0,var_w]])
mu_x,mu_y,sig2_xc,sig2_yc=proyecta_plano_de_encuentro(rsw_vect,cov_rsw,phi)
PoC,PoC_int=calcula_Poc_manual(mu_x,mu_y,sig2_xc,sig2_yc)
# print '=========================================='
# print 'Proyeccion al Plano'
# print mu_x,mu_y,sig2_xc,sig2_yc
print '=========================================='
# print 'PoC del CDM = ','%.7e' % round(poc_cdm,11)
print 'PoC calculada en forma explicita = ','%.7e' % round(PoC,11)
print 'PoC calculada mediante una integral = ','%.7e' % round(PoC_int[0],11)
print '=========================================='
#===============================================
#Calculo del Encuentro.
#===============================================
tca_epoca=datetime.strptime(TCA,"%Y-%m-%dT%H:%M:%S.%f")
tle_sat=Tle.creadoxParam(sat_id, tca_epoca)
tle_deb=Tle.creadoxParam(deb_id, tca_epoca)
n=0
encuentro = Encuentro(tle_sat,tle_deb,tca_epoca,n)
print '=========================================='
print 'Min distancia Calculada = ', encuentro.mod_minDist
print 'TCA calculado = ', encuentro.tca_c
print 'Componentes RTN del CDM: ', dr, ds, dw
print 'Componentes RTN calculadas: ', encuentro.r_comp, encuentro.s_comp, encuentro.w_comp
if __name__=='__main__':
# valida_bibl() # CARGA LOS PARAMETROS DE LA BIBLIOGRAFIA Y CALCULA PoC.
# cdm_archivo='cdmxmlBluebook.xml'
# cdm_archivo='cdmTerraPegasus10.xml'
# cdm_archivo='25922_conj_23270_JAN_2013010_1603.xml'
cdm_archivo='24903_conj_33759_JAN_2013010_1322.xml'
valida_cdm(cdm_archivo) # CARGA vel, dif RTN, varianzas RTN y calcula PoC
| [
"mcvalenti717@hotmail.com"
] | mcvalenti717@hotmail.com |
307eb8a2de903b6a21d1b1e3a272603dad316992 | f0e75512dbeabd273e28a97016cdc1e51d26b7c4 | /FPT/migrations/0025_auto_20210128_0147.py | 3cc23f11f913e8b8aa7fb262fc2f9b8c88aeddea | [] | no_license | khoa03052000/FPT-Trainning-System | 339041b533d1acd2ef82750262d7cf6573604f85 | 21411aa387c20e2151316c062c3b964a12d5e168 | refs/heads/main | 2023-02-25T16:26:08.442087 | 2021-01-15T11:12:18 | 2021-02-01T05:10:10 | 329,796,901 | 0 | 0 | null | 2021-01-15T03:47:27 | 2021-01-15T03:17:50 | null | UTF-8 | Python | false | false | 951 | py | # Generated by Django 3.1.5 on 2021-01-28 01:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FPT', '0024_auto_20210127_1021'),
]
operations = [
migrations.AlterField(
model_name='trainee',
name='dot',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='trainer',
name='education',
field=models.CharField(blank=True, default='Greenwich', max_length=50),
),
migrations.AlterField(
model_name='trainer',
name='phone',
field=models.CharField(blank=True, default='09xx', max_length=12),
),
migrations.AlterField(
model_name='trainer',
name='working_place',
field=models.CharField(blank=True, default='FPT Co.', max_length=50),
),
]
| [
"khoahtgcd191049@fpt.edu.vn"
] | khoahtgcd191049@fpt.edu.vn |
2d35ba558e65b2aa0a4c270411cd0a7207189d72 | 9cf434b6ee59ab22496ee031fb4ab145bbaff1a2 | /tranque_v1.8.4_source/backend/src/targets/migrations/0025_threshold_kind.py | 9da935043934aadd20fada38b72528d8345ff01b | [] | no_license | oliverhernandezmoreno/SourcesOH | f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7 | 5d9ca5ab1caceafd4d11207139c9e56210156ef8 | refs/heads/master | 2023-01-05T02:51:25.172103 | 2020-08-27T14:39:34 | 2020-08-27T14:39:34 | 64,422,812 | 0 | 1 | null | 2022-12-30T17:25:10 | 2016-07-28T19:33:44 | JavaScript | UTF-8 | Python | false | false | 402 | py | # Generated by Django 2.1 on 2019-06-04 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('targets', '0024_target_remote'),
]
operations = [
migrations.AddField(
model_name='threshold',
name='kind',
field=models.SlugField(blank=True, max_length=255, null=True),
),
]
| [
"oliverhernandezmoreno@gmail.com"
] | oliverhernandezmoreno@gmail.com |
3876214b0e807bb05f2093c7352f8a9df05e35b5 | 76e2190fd9d18a25645bc6c19a2d7c08d2b7a065 | /crawler.py | b4a4f3086ec24704840fc9c8170b7969b5e600d3 | [] | no_license | crubalcaba/subsbot | 655f5b624ae0207c8b9ce766af72830b42b72d30 | 9a9d760b624ddde037651cc7b5ecd88f902e8cc9 | refs/heads/master | 2020-04-16T22:40:48.035041 | 2019-01-12T21:52:13 | 2019-01-12T21:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | import requests
import re
class Episode:
id = 0
name = 'no name'
subtitles = []
def __init__(self, id, name, subtitles):
self.id = id
self.name = name
self.subtitles = subtitles
def __str__(self):
return f'[{self.id}] -> `{self.name}` -> {self.subtitles}'
class Serie:
id = 0
name = "no name"
@staticmethod
def get_series_list():
result = []
series_page = requests.get('https://www.tusubtitulo.com/series.php').text
for serie in re.findall(
'<td class="line0"><img class="icon" src="images/icon-television.png" height="16" width="16"><a href="/show/(.*?)">(.*?)<',
series_page):
result.append(Serie(id=serie[0], name=serie[1]))
return result
def __init__(self, id, name):
self.id = id
self.name = name
def __str__(self):
return f'[{self.id}] -> {self.name}'
def get_seasons(self):
result = []
temporadas_page = requests.get(f'https://www.tusubtitulo.com/show/{self.id}').text
for temporada in re.findall('<a href="#" data-season="(.*?)">(.*?)</a>', temporadas_page):
result.append(temporada[0])
return result
def get_episodes(self, season):
result = []
episodes_page = requests.get(
f'https://www.tusubtitulo.com/ajax_loadShow.php?show={self.id}&season={season}').text
for episodio_text in re.findall('<table width="80%" border="0" cellpadding="0" cellspacing="0">(.*?)</table>',
episodes_page, re.DOTALL):
for episodio in re.findall("<a href='//www.tusubtitulo.com/episodes/(\d+)/(.*?)'>(.*?)</a>", episodio_text):
subtitles = []
for download in re.findall(f'(\d+/{episodio[0]}/\d+)">', episodes_page):
lang = 'Unknow'
for i in re.findall(
f'<td width="41%" class="language">\n\s*(.*?)\s*</td>\n\s*<td width="17%">\n.*?\n\s*<td>\n\s*<img src="//www.tusubtitulo.com/images/download.png" width=16" height="16" /><a href="//www.tusubtitulo.com/updated/{download}">',
episodio_text):
lang = i
break
subtitles.append((lang, f'https://www.tusubtitulo.com/updated/{download}'))
result.append(Episode(id=episodio[0], name=episodio[2], subtitles=subtitles))
return result
| [
"akiel@aleph.engineering"
] | akiel@aleph.engineering |
6a24d87959255b844418cf43580ce215287ee4be | b9e388d2cb2b96a2286030eee9501e22e4081a5e | /yolo3_verification_code_recognition/train.py | 9925a8b849a96206b6e939d4d1588044b810444d | [
"MIT"
] | permissive | quicklysnail/yolo3-simple-application | d61bcd0fc140d3cdcb9c4b96145cd887ec274ea8 | eb7a776fe190ef0df0ea6f1f1eeefeeea73aa224 | refs/heads/master | 2021-10-25T09:07:06.843744 | 2019-04-03T09:40:17 | 2019-04-03T09:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,391 | py | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
index_map = dict(zip(class_names, range(num_classes)))
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 32 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| [
"819078740@qq.com"
] | 819078740@qq.com |
df0ff49589c0a9d5fc891039c4e7a9daaa4b2b3a | bc2e9f69a78b6ad372bf7c3ea52498d5a877f45c | /Project1/Modularize/ditk.text_similarity.var_siamese.py | 6497209496becc676fea4065257a62307181d257 | [] | no_license | Parthdbz/csci548-project | ab2b45344eed9f84833f82cd1f3335fcb2f4069e | 332e2705307a0c8c61ffb6b9bbb22b1c0305affc | refs/heads/master | 2020-05-04T20:18:55.279288 | 2019-04-04T06:11:57 | 2019-04-04T06:11:57 | 179,432,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py |
class var_siamese(ditk.text_similarity):
# Any shared data strcutures or methods should be defined as part of the parent class.
# A list of shared arguments should be defined for each of the following methods and replace (or precede) *args.
# The output of each of the following methods should be defined clearly and shared between all methods implemented by members of the group.
@classmethod
@abc.abstractmethod
def read_dataset(*args, **kwargs):
pass
@classmethod
@abc.abstractmethod
def train(*args, **kwargs):
pass
@classmethod
@abc.abstractmethod
def predict(*args, **kwargs):
pass
@classmethod
@abc.abstractmethod
def evaluate(*args, **kwargs):
pass
@classmethod
def bi_LSTM.encoder
pass
@classmethod
def LSTM.decoder
pass
@classmethod
def sentence.representation
pass
@classmethod
def wasserstein2tensor
pass
@classmethod
def hadamardproduct
pass
@classmethod
def multilayerpercptron
pass
| [
"noreply@github.com"
] | noreply@github.com |
76365823d072d54826924eb954f54f08ee1178c8 | 616c3c02be31b9ae4d06bd7c5a8d4a2e7c446aa1 | /401.二进制手表.py | c1394764a8ed4675d2bc74aff7690c1c59620be7 | [] | no_license | L1nwatch/leetcode-python | 8b7c47c04ee9400d50d8b0764a544a0463df8f06 | 0484cbc3273ada25992c72105658cd67411c5d39 | refs/heads/master | 2023-01-11T14:53:15.339276 | 2023-01-11T05:24:43 | 2023-01-11T05:24:43 | 194,516,548 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | #
# @lc app=leetcode.cn id=401 lang=python3
#
# [401] 二进制手表
#
# @lc code=start
class Solution:
def readBinaryWatch(self, turnedOn: int) -> List[str]:
result = list()
for hour in range(12):
bin_hour_1 = bin(hour).count("1")
for minute in range(60):
if bin_hour_1 + bin(minute).count("1") == turnedOn:
result.append(f"{hour}:{minute:0>2d}")
return result
# @lc code=end
| [
"watch1602@gmail.com"
] | watch1602@gmail.com |
2b657faad4e21d51fb84c11930db76c245c17871 | bfd7b57fd2f5df4a96691d92d4187d9b226c447e | /allInformation_id.py | b779b66e40ffdc3b525024ed40bdfc5f7f10ee9a | [] | no_license | 3AM-dev/GetData | 1d0eab1f3a243ed06086d236078b524bf0679c67 | b6d2b6c11b4d04fd95c6f6eb1e91de47c74d6ea1 | refs/heads/master | 2023-06-09T06:22:05.302209 | 2021-07-04T11:56:19 | 2021-07-04T11:56:19 | 382,832,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | '''
作用:通过id获取所有的数据信息
版本:21/5/14修改了原来的部分,行头整理完毕
'''
import pandas as pd
import numpy as np
from pymatgen.ext.matproj import MPRester
import csv
# path = r'D:\机器学习--孙老师\data\5.12\Ti+Cr\12.csv'
# data = pd.read_csv(path)
# print(type(data))
# data=data.values
# data_num=data.shape[0]#id的个数
# data=data.tolist()#二维数组转一维数组
# data_id=[]
# for i in range(0,data_num):#data.shape[0]
# data_id.append(data[i])
# data_id=list(np.array(data_id).flatten())#转化为一维数组
# #print(data_id)
# x=[]
# print("开始注册!")
# with MPRester("K5JtfUOhunkvFWgT") as m:
# print('注册成功,开始执行!')
# for i in range(0,data_num):#ata_num
# result = m.get_data(data_id[i])
# result.insert(0,data_id[i])
# #print(result)
# x.append(result)
# if i%100==0:
# print('已经处理数据:',i)
# print('任务结束!')
#
#
# with open('D:/机器学习--孙老师/data/5.12/Ti+Cr/cr.csv', 'w', newline='') as csvfile:
# writer = csv.writer(csvfile)
# for row in x:
# writer.writerow(row)
path = r'D:\机器学习--孙老师\data\5.12\Ti+Cr\Ti+Mo.csv'
data = pd.read_csv(path)
# print(type(data))
data = data.values
data_num = data.shape[0]#id的个数
data = data.tolist()#二维数组转一维数组
data_id = []
for i in range(0, data_num):#data.shape[0]
data_id.append(data[i])
data_id = list(np.array(data_id).flatten())#转化为一维数组
#print(data_id)
x = []
print("开始注册!")
with MPRester("K5JtfUOhunkvFWgT") as m:
print('注册成功,开始执行!')
num_count = 0 # 数据条目个数
for i in range(0, data_num):#ata_num
result = m.get_data(data_id[i])
result.insert(0, data_id[i])
# 设置表头,此时result[0]里面的从文件中读的id,result[1]才是dict
if i == 0:
hang = []
for key in result[1].keys():
hang.insert(0, key)
x.append(hang) # 设置表头
# 只要数据,此时result[0]里面的从文件中读的id,result[1]才是dict
temp = []
for key, value in result[1].items():
# temp.insert(0, key)
temp.insert(0, value)
x.append(temp)
temp = []
if i% 100 == 0:
print('已经处理数据:', i)
num_count = i
print("该任务总共打印数据:", num_count)
print('任务结束!')
with open('D:/机器学习--孙老师/data/5.12/Ti+Cr/try2.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for row in x:
writer.writerow(row) | [
"977725368@qq.com"
] | 977725368@qq.com |
b37cde0f2e69f7b1f6aae3518ce11961dc698a7b | 133b62fc8c68b56ff0512284c5846ee94043327f | /nets/try_out_ms_net.py | 277e1f45fc2cf117eb536237453204f622927368 | [] | no_license | deanpospisil/v4cnn | 87c41d1eb5afeeaaec12350d53cb01c276277103 | b0532e9083eb023c2247e5b53833229e7e6eb740 | refs/heads/master | 2020-04-15T13:10:53.447804 | 2020-04-07T17:24:01 | 2020-04-07T17:24:01 | 56,346,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 13 16:21:25 2017
@author: dean
"""
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 29 15:06:13 2017
@author: dean
"""
from pylab import *
caffe_root = '/home/dean/caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)
import numpy as np
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
import os
os.chdir(caffe_root)
os.chdir('examples')
from caffe import layers as L, params as P
import matplotlib.pyplot as plt
caffe.set_device(0)
caffe.set_mode_gpu()
#%%
### load the solver and create train and test nets
solver = None # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)
#solver = caffe.SGDSolver('/home/dean/caffe/examples/cifar10/cifar10_full_solver.prototxt')
solver = caffe.SGDSolver('/home/dean/caffe/models/msnet/solver.prototxt')
solver.net.params['region1_resmod0_conv'][0].data
#%%
for k, v in solver.net.blobs.items():
print((k, v.data.shape))
batch = 500
#solver.net.forward() # train net
#solver.test_nets[0].forward() # test net (there can be more than one)
#plt.imshow(solver.net.blobs['data'].data[:8, 0].transpose(1, 0, 2).reshape(28, 8*28), cmap='gray'); axis('off')
#print 'train labels:', solver.net.blobs['label'].data[:8]
#solver.step(1)
#
#plt.imshow(solver.net.params['conv1'][0].diff[:, 0].reshape(4, 5, 5, 5)
# .transpose(0, 2, 1, 3).reshape(4*5, 5*5), cmap='gray'); axis('off')
niter = 50000
test_interval = 100
# losses will also be stored in the log
train_loss = np.zeros(niter)
test_acc = np.zeros(int(np.ceil(niter / test_interval)))
output = np.zeros((niter, 8, 10))
# the main solver loop
for it in range(niter):
solver.step(1) # SGD by Caffe
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
if it % test_interval == 0:
correct = 0
for test_it in range(100):
solver.test_nets[0].forward()
correct += sum(solver.test_nets[0].blobs['accuracy'].data.argmax(1)
== solver.test_nets[0].blobs['label'].data)
test_acc[it // test_interval] = correct / (batch*100.)
print(correct / (batch*100.))
#%%
_, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(np.arange(niter), train_loss)
ax2.plot(test_interval * np.arange(len(test_acc)), test_acc, 'r')
ax1.set_xlabel('iteration')
ax1.set_ylabel('train loss')
ax2.set_ylabel('test accuracy')
ax2.set_title('Test Accuracy: {:.2f}'.format(test_acc[-1]))
#%%
def colorize(im):
#expects rxcxRGB
im -= np.min(im)
im /= np.max(im)
return im
image = 1
a_filter = 0
n_plot = 29
im = solver.net.blobs['data'].data[image, ...]
plt.figure(figsize=(1,1))
plt.imshow(colorize(np.copy(im).T.swapaxes(0,1)))
for n_conv in range(1,4):
t = solver.test_nets[0].blobs['conv{}'.format(n_conv + 1)].data
#conv = np.sum(np.real(np.fft.ifft2(np.fft.fft2(t)**2)[:, :, ...]), (0,1))
plt.figure(figsize=(40,2))
for i in range(1, n_plot):
plt.subplot(1, n_plot, i);
plt.imshow(t[image, a_filter+i,...]);
plt.xticks([]);plt.yticks([])
#plt.tight_layout()
#%%
for im in solver.net.params['conv1'][0].data:
t = im.T
t = t.swapaxes(0,1)
plt.figure()
plt.subplot(211)
plt.imshow(np.mean(t,-1));plt.colorbar();
plt.subplot(212)
plt.imshow(colorize(np.copy(t)))
#%%
def norm_cov(x, subtract_mean=True):
#if nxm the get cov mxm
x = x.astype(np.float64)
if subtract_mean:
x = x - np.mean(x, 0, keepdims=True)
diag_inds = np.triu_indices(x.shape[1], k=1)
numerator = np.sum(np.dot(x.T, x)[diag_inds])
vnrm = np.linalg.norm(x, axis=0, keepdims=True)
denominator = np.sum(np.multiply(vnrm.T, vnrm)[diag_inds])
norm_cov = numerator/denominator
return norm_cov
for n_conv in range(0, 5):
t = solver.net.params['conv{}'.format(n_conv + 1)][0].data
wt_cov = []
for a_filt in t:
a_filt_unrw = a_filt.reshape(a_filt.shape[0], np.product(a_filt.shape[1:]))
wt_cov.append(norm_cov(a_filt_unrw, subtract_mean=False))
plt.figure()
plt.hist(wt_cov);plt.xlim(-1,1);
#conv = np.sum(np.real(np.fft.ifft2(np.fft.fft2(t)**2)[:, :, ...]), (0,1))
| [
"dean@sarus.biostr.washington.edu"
] | dean@sarus.biostr.washington.edu |
0a7695a450556600a09b319b421f2220e47917a5 | 4f0cbdfe4b44c6a01dd7029900ee612f1466bda7 | /test.py | 3d26c742e9c6ea7a7cc37e23e677720cbf3aa83d | [] | no_license | dazedfunc0/testSpacy | 4d43ceb1dda54fb7aed5d0385f9416b997d0ecb6 | 72058f15e93e7754c9dff38a79ac2abba4e3fa2b | refs/heads/master | 2022-12-05T02:19:34.099739 | 2020-09-04T14:46:09 | 2020-09-04T14:46:09 | 292,871,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import spacy
from spacy import displacy
from spacy.lang.en.stop_words import STOP_WORDS
nlp = spacy.load("en_core_web_sm")
text = nlp('''Kraftwerk is a German band formed in Düsseldorf in 1970 by Ralf Hütter and Florian Schneider. Widely considered as innovators and pioneers of electronic music, they were among the first successful acts to popularize the genre.''')
# FILTERS OUT STOP WORDS
#print(STOP_WORDS) #prints all the stop words
#STOP_WORDS.add("lol") #adds "lol" as a stop word
nostops = []
for word in text:
if word.is_stop == False and word.is_punct == False:
nostops.append(word)
print(nostops)
print("\n")
# PRINTS EACH WORD FROM nostops
#for word in nostops:
# print(word.text)
# ADDS .pos_ .dep_ ATTRIBUTE
for word in text:
print(word.text, word.pos_, word.dep_)
#spacy.explain('advmod')
# VISUALIZES WORDS DEPENDENCY | HAS VISUALIZER OPTIONS (color, font, etc.) | SHOWS LEMMATIZED FORM OF THE WORDS
options = {'compact':True, 'bg':'#C0C0C0','color':'#000000', 'font':'Sans Serif', 'add_lemma':True}
displacy.serve(text, style="dep", options = options)
# SHOWS HOW CLOSE TWO WORDS ARE SEMANTICALLY
#for word1 in nostops:
# for word2 in nostops:
# print((word1.text, word2.text), "SImilarity is: ", word1.similarity(word2))
| [
"gsakhvadz3@gmail.com"
] | gsakhvadz3@gmail.com |
4870aa6fd357ecf7b67f52f9708a5a396f00d244 | 9112e89dc3840f6ff73a7c253c796b4530dbfb4d | /locationpro/wsgi.py | dd2581a58c2e92646ef18c938d4c71e342586984 | [] | no_license | RedAakash/DjangoLocationRESTApi | 0aa55315878b716e3bf249c79932750d95666969 | 5134a02fbaa5cdec3519d47db7ddd08ec78250e3 | refs/heads/master | 2021-09-24T20:49:14.764460 | 2019-12-24T21:09:30 | 2019-12-24T21:09:30 | 230,003,625 | 0 | 0 | null | 2021-09-22T18:17:57 | 2019-12-24T21:07:56 | Python | UTF-8 | Python | false | false | 399 | py | """
WSGI config for locationpro project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'locationpro.settings')
application = get_wsgi_application()
| [
"akash@goognu.com"
] | akash@goognu.com |
8cc8e89134bba8cf346a36513c1e8a98f5c17c5b | f501c7310fa3ca46a58e44b81556d7826acfd67f | /modules/ApplicationControl/appman_gui.py | f1543d5d4e06442605c846c2bc2c5341aae7db56 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | shiva16/BCI | 7917ce910e6c8451fcbcc5cbe37866a08dd0cca7 | 00f97305481c62068ef4572c379721e8622518a7 | refs/heads/master | 2020-05-20T19:27:48.789782 | 2013-10-02T21:30:53 | 2013-10-02T21:30:53 | 37,503,971 | 1 | 0 | null | 2015-06-16T02:46:03 | 2015-06-16T02:46:03 | null | UTF-8 | Python | false | false | 30,056 | py | #!/usr/bin/python
import os
import appman
import confman
import subprocess as sp
from argparse import ArgumentParser
from ConfigParser import SafeConfigParser
from PyDragonfly import Dragonfly_Module, CMessage, copy_from_msg, copy_to_msg, MT_EXIT, MT_KILL
from time import sleep
import threading
import Dragonfly_config as rc
import re
import wx
import platform
from enthought.traits.api import HasTraits, Bool, Enum, Float, Str, List, File, \
Button, String, Instance
from enthought.traits.ui.api import Handler, View, Item, UItem, StatusItem, \
Group, HGroup, VGroup, spring, EnumEditor, ButtonEditor, TextEditor, InstanceEditor
from enthought.traits.ui.wx.animated_gif_editor import AnimatedGIFEditor
from output_stream import OutputStream
SRC_DIR = os.environ['BCI_MODULES']
CONFIG_DIR = os.environ['BCI_CONFIG']
DATA_DIR = os.environ['BCI_DATA']
def get_dirs(root, exclude=['attic']):
dirs = []
for x in os.listdir(root):
if os.path.isdir(root + '/' + x) and (x[0] != '.'):
dirs.append(x)
dirs.sort()
for dropper in exclude:
if dropper in dirs:
dirs.remove(dropper)
return dirs
class Dragonfly_Read_Thread(threading.Thread):
def __init__(self, mod, subs, data, callback, timeout):
super(Dragonfly_Read_Thread, self).__init__()
self.mod = mod
self.subs = subs
self.data = data
self.callback = callback
self.timeout = timeout
self.stoprequest = threading.Event()
self.result = 0
def get_result(self):
return self.result
def stop(self):
#print "Dragonfly_Read_Thread: Done, exiting.."
self.stoprequest.set()
def run(self):
if self.timeout > 0:
timeout = self.timeout
while not self.stoprequest.isSet():
msg = CMessage()
rcv = self.mod.ReadMessage(msg, 0)
if rcv == 1:
if msg.GetHeader().msg_type in self.subs:
self.result = self.callback(msg, self.data)
if self.result == 1:
self.stop()
sleep(.010)
if self.timeout > 0:
timeout = timeout - 1
if timeout == 0:
#print "Timout reached"
self.stoprequest.set()
class SessionManager(HasTraits):
parent = Instance(wx.Frame)
config_dirs = get_dirs(CONFIG_DIR, exclude=['default', 'attic'])
configs = Enum(*config_dirs)
#monkey_dirs = get_dirs(DATA_DIR, exclude=['VideoLoggerData', 'KingKong'])
#monkeys = Enum(*monkey_dirs)
monkeys = Enum(['KingKong', 'Sim'])
calib_options = {1:'New Calibration', 2:'From Previous Session', 3:'From Specific Session'}
calib_opts = Enum(calib_options.keys())
calib_session = Str
calib_session_list = List
monkey_label = Str('Monkey:')
config_label = Str('Configuration:')
calib_label = Str('Calibration:')
session_label = Str('Session:')
rd_thread = Instance(Dragonfly_Read_Thread)
monkeys_enabled = Bool(True)
configs_enabled = Bool(True)
calib_opts_enabled = Bool(True)
calib_opts_visible = Bool(True)
calib_session_enabled = Bool(True)
calib_session_visible = Bool(False)
start = Button()
stop = Button()
config = Button()
modules = Button()
kill = Button()
statusbar_text = output = OutputStream(max_len=3000) #Instance(OutputStream) #Str("Ready")
start_button_label = Str('Start')
start_enabled = Bool(True)
stop_enabled = Bool(True)
config_enabled = Bool(True)
modules_enabled = Bool(True)
kill_enabled = Bool(True)
session_num = None
module_id_list = None
host_id_list = None
multi_task_config = Bool(False)
multi_task_file = None
subscriptions = [rc.MT_PING_ACK, rc.MT_APP_START_COMPLETE, rc.MT_SESSION_CONFIG,
rc.MT_EXIT_ACK, rc.MT_XM_END_OF_SESSION]
busy_anim_file = File(SRC_DIR + '/ApplicationControl/busy.gif')
appman_busy = Bool(False)
error_icon_file = File(SRC_DIR + '/ApplicationControl/error.gif')
error_flag = Bool(False)
running_icon_file = File(SRC_DIR + '/ApplicationControl/running.gif')
session_starting = Bool(False)
session_running = Bool(False)
session_ending = Bool(False)
session_interrupted = Bool(False)
#modman_frame = None
view = View(VGroup(
HGroup(
HGroup(
VGroup(Item(name='config_label', show_label=False, style='readonly'),
Item(name='configs', label='Session', show_label=False, enabled_when='configs_enabled')),
VGroup(Item(name='monkey_label', show_label=False, style='readonly'),
Item(name='monkeys', label='Monkey', show_label=False, enabled_when='monkeys_enabled')),
VGroup(Item(name='calib_label', show_label=False, style='readonly'),
Item(name='calib_opts', editor=EnumEditor(values=calib_options), enabled_when='calib_opts_enabled', label='CalibOpt', show_label=False),
visible_when='calib_opts_visible==True'),
VGroup(Item(name='session_label', show_label=False, style='readonly'),
Item(name='calib_session', width=175, editor = EnumEditor(name = 'calib_session_list'), enabled_when='calib_session_enabled', show_label=False),
visible_when='calib_session_visible==True'),
springy=True),
HGroup(
Item(name='start', editor=ButtonEditor(label_value='start_button_label'), show_label=False, enabled_when='start_enabled'),
Item(name='stop', show_label=False, enabled_when='stop_enabled', visible_when='stop_visible==True'),
Item(name='kill', show_label=False, enabled_when='kill_enabled'),
Item(name='config', show_label=False, enabled_when='config_enabled'),
#Item(name='modules', show_label=False, enabled_when='modules_enabled')
),
),
HGroup(Item(name='busy_anim_file', editor=AnimatedGIFEditor(), show_label=False, visible_when='appman_busy==True'),
Item(name='running_icon_file', editor=AnimatedGIFEditor(), show_label=False, visible_when='session_running==True'),
Item(name='error_icon_file', editor=AnimatedGIFEditor(), show_label=False, visible_when='error_flag==True'),
Item('statusbar_text', editor=InstanceEditor(), show_label=False, resizable=True, height=100, style='custom')) #springy=True,
#Item(name='statusbar_text', show_label=False, style='custom', resizable=True))
))
#def __init__(self, **traits):
# HasTraits.__init__(self, **traits)
def __init__(self):
super(SessionManager, self).__init__()
last_app = appman.get_last()
print last_app
if last_app is not None:
self.configs = last_app
else:
appman.write_last(self.configs)
self.check_multi_task_config()
self.get_last_subject()
def _modules_fired(self):
import modman
self.modman_frame = modman.MainWindow(self.parent, -1, self.statusbar_text, self.modman_closing)
def modman_closing(self):
print "modman exiting..."
self.modman_frame = None
#def modman_update(self):
# if self.modman_frame is not None:
# self.modman_frame.update_text(status)
def update_status(self, status):
self.statusbar_text.write(status + '\n')
print "%s" % status
def _configs_changed(self):
self.check_multi_task_config()
self.get_last_subject()
def _monkeys_changed(self):
self.update_calib_sessions()
def _calib_opts_changed(self):
self.update_calib_sessions()
def get_last_subject(self):
last_subject_file = CONFIG_DIR + '/' + self.configs + '/last_subject.txt'
if os.path.isfile(last_subject_file):
f = open(last_subject_file, 'r')
self.monkeys = f.read()
f.close()
def check_multi_task_config(self):
self.multi_task_file = CONFIG_DIR + '/' + self.configs + '/multi_task.config'
if os.path.isfile(self.multi_task_file):
self.multi_task_config = True
self.calib_opts_visible = False
self.calib_session_visible = False
else:
self.multi_task_config = False
self.calib_opts_visible = True
self.multi_task_file = None
self.update_calib_sessions()
def get_calib_sessions(self):
try:
raw_folder = os.path.join(DATA_DIR, self.monkeys, "Raw")
self.calib_session_list = get_dirs(raw_folder)
except:
self.calib_session = ""
self.calib_session_list = []
def update_calib_sessions(self):
if (self.calib_options[self.calib_opts] == "From Previous Session") or \
(self.calib_options[self.calib_opts] == "From Specific Session"):
self.get_calib_sessions()
if not self.calib_session_list:
self.start_enabled = False
self.calib_session_visible = False
else:
self.start_enabled = True
self.calib_session_visible = True
print self.calib_session
if (self.calib_options[self.calib_opts] == "From Specific Session"):
self.calib_session_enabled = True
if self.calib_session not in self.calib_session_list:
print "here"
self.calib_session = self.calib_session_list[-1]
else:
self.calib_session_enabled = False
self.calib_session = self.calib_session_list[-1]
else:
self.start_enabled = True
self.calib_session_visible = False
def connect(self, server):
self.mod = Dragonfly_Module(0, 0)
self.mod.ConnectToMMM(server)
for sub in self.subscriptions:
self.mod.Subscribe(sub)
self.mod.SendModuleReady()
print "Connected to Dragonfly at", server
def disconnect(self):
self.mod.DisconnectFromMMM()
def proc_modules_PING_ACK(self, msg, data):
result = 0
mdf = rc.MDF_PING_ACK()
copy_from_msg(mdf, msg)
if mdf.module_name in data['module_list']:
data['module_id_list'][msg.GetHeader().src_mod_id] = mdf.module_name
data['module_list'].remove(mdf.module_name)
if not data['module_list']:
result = 1
return result
def proc_hosts_PING_ACK(self, msg, data):
result = 0
mdf = rc.MDF_PING_ACK()
copy_from_msg(mdf, msg)
module_info = mdf.module_name.split(':')
print module_info
if (module_info[0] == "AppStarter") and (module_info[1] in data['host_list']):
data['host_id_list'][msg.GetHeader().src_mod_id] = module_info[1]
data['host_list'].remove(module_info[1])
if not data['host_list']:
result = 1
return result
def proc_APP_START_COMPLETE(self, msg, data):
result = 0
module_id = msg.GetHeader().src_mod_id
if module_id in data['host_id_list']:
data['host_id_list'].remove(module_id)
if not data['host_id_list']:
result = 1
return result
def proc_modules_EXIT_ACK(self, msg, data):
result = 0
if msg.GetHeader().src_mod_id in data['module_id_list']:
del data['module_id_list'][msg.GetHeader().src_mod_id]
else:
print "Unexpected module id: {0}".format(msg.GetHeader().src_mod_id)
if not data['module_id_list']:
result = 1
return result
def proc_MSG_RECEIVED(self, msg, data):
return 1
def proc_SESSION_CONFIG(self, msg, data):
mdf = rc.MDF_SESSION_CONFIG()
copy_from_msg(mdf, msg)
m = re.match(".+\.[0]*(\d+)$", mdf.data_dir)
if m:
data['session_num'] = m.group(1)
else:
data['session_num'] = '???'
return 1
def subscribe_to_list(self, subs):
for sub in subs:
self.mod.Subscribe(sub)
return subs
def send_PING(self, module_name):
mdf = rc.MDF_PING()
mdf.module_name = module_name
msg = CMessage(rc.MT_PING)
copy_to_msg(mdf, msg)
#return msg
self.mod.SendMessage(msg);
def send_APP_START(self, config):
mdf = rc.MDF_APP_START()
mdf.config = config
msg = CMessage(rc.MT_APP_START)
copy_to_msg(mdf, msg)
self.mod.SendMessage(msg)
def disable_gui(self):
self.monkeys_enabled = False
self.calib_session_enabled = False
self.calib_opts_enabled = False
self.configs_enabled = False
def enable_gui(self):
self.monkeys_enabled = True
self.calib_opts_enabled = True
self.configs_enabled = True
if self.calib_options[self.calib_opts] != "From Previous Session":
self.calib_session_enabled = True
def wait_for_dragonfly_thread(self, raise_exception=True):
while(self.rd_thread.isAlive()):
wx.Yield()
if (self.session_starting == False) and (self.session_ending == False) and \
(self.session_running == False):
self.rd_thread.stop()
if raise_exception == True:
raise RuntimeError('Cancelled')
sleep(0.010)
def _config_fired(self):
#self.modman_frame = None
try:
if self.multi_task_config == True:
root_files = {'MT' : 'multi_task.config'}
else:
root_files = {'XM' : 'XM.config', 'appman' : 'appman.conf'}
frame = confman.MainWindow(self.parent, -1, CONFIG_DIR, self.configs, root_files)
except (ValueError, IOError) as e:
self.update_status("%s" % e)
self.error_flag = True
def _kill_fired(self):
dlg = wx.MessageDialog(self.parent,
"Do you really want to kill all running modules?",
"Confirm Kill", wx.OK|wx.CANCEL|wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_OK:
self.mod.SendSignal(MT_KILL)
self.error_flag = False
self.start_enabled = True
self.module_id_list = None
#self.kill_enabled = False
self.update_status("Modules killed")
def do_stop_modules(self):
try:
self.disable_gui()
self.appman_busy = True
self.error_flag = False
self.start_enabled = False
self.session_running = False
self.update_status("Stopping modules...")
self.session_ending = True
# prep Dragonfly read thread
data = {'module_id_list': self.module_id_list}
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_EXIT_ACK], data, self.proc_modules_EXIT_ACK, 300)
self.rd_thread.start()
appman.stop_modules(self.mod)
self.wait_for_dragonfly_thread()
result = self.rd_thread.get_result()
if result == 0:
self.kill_enabled = True
self.update_status("Some modules did not respond: %s" % (', '.join(map(str, self.module_id_list.values()))))
raise RuntimeError("!! Click KILL to close any modules still running !!")
if self.session_num is not None:
self.update_status("Session #%s is terminated" % self.session_num)
else:
self.update_status("Session is terminated")
self.error_flag = False
self.start_enabled = True
except RuntimeError, e:
self.update_status("%s" % e)
self.error_flag = True
finally:
self.enable_gui()
self.appman_busy = False
self.session_ending = False
self.module_id_list = None
def _stop_fired(self):
if self.module_id_list is None:
appman.stop_modules(self.mod)
self.error_flag = False
self.start_enabled = True
#elif (self.multi_task_config == True) and (self.session_running == True):
elif (self.session_running == True):
self.session_running = False
self.session_interrupted = True
else:
self.do_stop_modules()
def _start_fired(self):
# advance to the next iteration of the multi_task
if (self.multi_task_config == True) and (self.session_running == True):
self.session_running = False
# cancel starting session
elif self.session_starting == True:
self.session_starting = False
# start new session
else:
try:
self.disable_gui()
self.appman_busy = True
self.error_flag = False
self.start_button_label = 'Cancel'
self.stop_enabled = False
#self.kill_enabled = False
self.session_starting = True
self.session_num = None
self.session_interrupted = False
self.statusbar_text.reset()
self.update_status("Starting new session...")
# prepare session parameters
subject = self.monkeys
config_list = []
calib_session_id_list = []
num_reps_list =[]
self.update_calib_sessions()
if self.multi_task_config == True:
parser = SafeConfigParser()
parser.read(self.multi_task_file)
config = dict(parser.items('config'))
config_list = re.sub(r"[\n\r]+", '', config['config_names']).split(',')
calib_session_id_list = re.sub(r"[\n\r]+", '', config['calibration_session_ids']).split(',')
num_reps_list = re.sub(r"[\n\r]+", '', config['num_reps']).split(',')
else:
config_list.append(self.configs)
num_reps_list.append(0)
if (self.calib_options[self.calib_opts] == "From Specific Session"):
m = re.match("(\w+)\.DK\.[0]+(\d+)", self.calib_session)
calib_session_id = int(m.group(2))
elif (self.calib_options[self.calib_opts] == "From Previous Session"):
m = re.match("(\w+)\.DK\.[0]+(\d+)", self.calib_session_list[-1])
calib_session_id = int(m.group(2))
else:
calib_session_id = 0
calib_session_id_list.append(calib_session_id)
#done = False # put "done" back if you want to repeat forever
#while not done:
for c, config in enumerate(config_list):
hosts = appman.get_host_modules(config)
# ------------------------------------------------------------------------
# PING all AppStarter modules
# ------------------------------------------------------------------------
self.update_status("Pinging AppStarter modules...")
host_list = hosts.keys()
self.host_id_list = {};
print host_list
# prep Dragonfly read thread
data = {'host_list': host_list, 'host_id_list': self.host_id_list}
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_PING_ACK], data, self.proc_hosts_PING_ACK, 400)
self.rd_thread.start()
self.send_PING("AppStarter")
self.wait_for_dragonfly_thread()
result = self.rd_thread.get_result()
if result == 0:
raise RuntimeError('Did not receive response from AppStarter on hosts: %s' % (', '.join(map(str, host_list))))
# ------------------------------------------------------------------------
# Send APP_START signal to AppStarter modules
# ------------------------------------------------------------------------
self.update_status("Starting modules...")
# prep Dragonfly read thread
data = {'host_id_list': self.host_id_list.keys()}
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_APP_START_COMPLETE], data, self.proc_APP_START_COMPLETE, 300)
self.rd_thread.start()
self.send_APP_START(config)
self.wait_for_dragonfly_thread()
result = self.rd_thread.get_result()
if result == 0:
raise RuntimeError('Did not receive APP_START_COMPLETE from all hosts')
# ------------------------------------------------------------------------
# PING all modules, make sure they are all running and responding
# ------------------------------------------------------------------------
self.update_status("Pinging modules..")
num_retries = 30
self.module_id_list = {};
module_list = []
for h in hosts.keys():
module_list = module_list + hosts[h]
for r in reversed(range(num_retries)):
# prep Dragonfly read thread
data = {'module_list': module_list, 'module_id_list': self.module_id_list}
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_PING_ACK], data, self.proc_modules_PING_ACK, 75)
self.rd_thread.start()
self.send_PING("*")
self.wait_for_dragonfly_thread()
result = self.rd_thread.get_result()
if result == 0:
if r == 0:
raise RuntimeError("Did not receive response from modules: %s" % (', '.join(map(str, module_list))))
else:
break
# ------------------------------------------------------------------------
# wait for SESSION_CONFIG from executive if it's in appman.conf
# ------------------------------------------------------------------------
for m in self.module_id_list.values():
matched = re.search('^(executive)', m)
if matched:
break
if matched:
self.update_status("Waiting for SESSION_CONFIG from executive...")
# prep Dragonfly read thread
data = {'session_num': ''}
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_SESSION_CONFIG], data, self.proc_SESSION_CONFIG, -1)
self.rd_thread.start()
# send the executive extra params it needs
num_reps = int(num_reps_list[c])
calib_sess_id = int(calib_session_id_list[c])
mdf = rc.MDF_XM_START_SESSION()
mdf.subject_name = subject
if num_reps > 0:
mdf.num_reps = num_reps
if calib_sess_id > 0:
mdf.calib_session_id = calib_sess_id
mdf.load_calibration = 1
msg = CMessage(rc.MT_XM_START_SESSION)
copy_to_msg(mdf, msg)
self.mod.SendMessage(msg)
self.wait_for_dragonfly_thread()
result = self.rd_thread.get_result()
if result == 0:
raise RuntimeError('Did not receive SESSION_CONFIG from executive')
self.session_num = self.rd_thread.data['session_num']
#
# All steps were successful...
#
appman.write_last(self.configs)
if self.multi_task_config == True:
self.update_status("Multi task session: Session #%s, Config '%s', NumReps %s" % (self.session_num, config, num_reps_list[c]))
self.disable_gui()
self.session_running = True
#
self.appman_busy = False
self.start_button_label = 'Next'
self.session_starting = False
self.stop_enabled = True
# wait for session to end or user to hit cancel
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_XM_END_OF_SESSION], None, self.proc_MSG_RECEIVED, -1)
self.rd_thread.start()
self.wait_for_dragonfly_thread(False)
# stop all modules and move onto the next iteration
self.do_stop_modules()
if self.session_interrupted == True:
#done = True
self.update_status("Multi task session is terminated")
break;
# reinit variables for another iteration
self.appman_busy = True
self.session_running = False
self.session_starting = True
self.start_button_label = 'Cancel'
self.stop_enabled = False
#self.kill_enabled = False
self.session_num = None
else:
#done = True
if self.session_num is None:
self.update_status("Session is running")
else:
self.update_status("Session #%s is running" % self.session_num)
self.disable_gui()
self.session_running = True
self.start_enabled = False
self.appman_busy = False
self.session_starting = False
self.stop_enabled = True
# wait for session to end or user to hit stop
self.rd_thread = Dragonfly_Read_Thread(self.mod, [rc.MT_XM_END_OF_SESSION], None, self.proc_MSG_RECEIVED, -1)
self.rd_thread.start()
self.wait_for_dragonfly_thread(False)
if self.session_interrupted == True:
#done = True
break;
if self.multi_task_config == True:
self.update_status("Multi task session is completed, terminating..")
else:
if self.session_interrupted == False:
self.update_status("Session is completed, terminating..")
self.do_stop_modules()
except (RuntimeError, ValueError) as e:
self.update_status("%s" % e)
self.error_flag = True
if e.message == "Cancelled":
self.update_status("!! Click STOP or KILL to close any modules that already started running ..")
self.start_enabled = False
self.kill_enabled = True
finally:
self.enable_gui()
self.appman_busy = False
self.start_button_label = 'Start'
self.session_starting = False
self.stop_enabled = True
self.session_running = False
class MainWindow(wx.Frame):
def __init__(self, mm_ip): #, sm):
wx.Frame.__init__(self, None, -1, 'Application Manager', wx.DefaultPosition, \
wx.DefaultSize, wx.CAPTION|wx.CLOSE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER|wx.MINIMIZE_BOX)
self.sm = SessionManager()
self.sm.connect(mm_ip)
self.sm.parent = self
self.sm.edit_traits(parent=self, kind='subpanel')
self.Fit()
self.Show(True)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
if self.sm.session_running == True:
dlg = wx.MessageDialog(self,
"There is a session running. Do you really want to exit?",
"Confirm Exit", wx.OK|wx.CANCEL|wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_OK:
self.doClose()
else:
self.doClose()
def doClose(self):
self.Destroy()
self.sm.disconnect()
if __name__ == "__main__":
parser = ArgumentParser(description = "Starts session modules")
parser.add_argument(type=str, dest='mm_ip', nargs='?', default='127.0.0.1:7111')
args = parser.parse_args()
print("Using MM IP=%s" % (args.mm_ip))
app = wx.PySimpleApp()
frame = MainWindow(args.mm_ip)
app.MainLoop()
| [
"emd@.(none)"
] | emd@.(none) |
f5230e1e1563a7726f439fe2228a5b66f1ec575e | 896da6e62703afc5cbeca27969bf9d3a813b2605 | /yodlee/models/field.py.7a29849846499503f4369b73ee04c454.tmp | 342dd9c3e8f1a7fffef53b5f91a0183f39c7ea8a | [] | no_license | webclinic017/yodlee_client | 4d150f74c8c2255b096f3765c3772c6fb0bcbe81 | 7cbf79d3367fd13da375bf7134718bae51b4e8fe | refs/heads/master | 2023-08-04T01:51:17.850386 | 2021-09-22T21:29:34 | 2021-09-22T21:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,683 | tmp | # coding: utf-8
"""
Yodlee Core APIs
This file describes the Yodlee Platform APIs, using the swagger notation. You can use this swagger file to generate client side SDKs to the Yodlee Platform APIs for many different programming languages. You can generate a client SDK for Python, Java, javascript, PHP or other languages according to your development needs. For more details about our APIs themselves, please refer to https://developer.yodlee.com/Yodlee_API/. # noqa: E501
OpenAPI spec version: 1.1.0
Contact: developer@yodlee.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from yodlee.configuration import Configuration
class Field(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'image': 'str',
'prefix': 'str',
'min_length': 'int',
'value_editable': 'str',
'is_optional': 'bool',
'suffix': 'str',
'type': 'str',
'is_value_provided': 'bool',
'name': 'str',
'id': 'str',
'value': 'str',
'max_length': 'int',
'option': 'list[Option]'
}
attribute_map = {
'image': 'image',
'prefix': 'prefix',
'min_length': 'minLength',
'value_editable': 'valueEditable',
'is_optional': 'isOptional',
'suffix': 'suffix',
'type': 'type',
'is_value_provided': 'isValueProvided',
'name': 'name',
'id': 'id',
'value': 'value',
'max_length': 'maxLength',
'option': 'option'
}
def __init__(self, image=None, prefix=None, min_length=None, value_editable=None, is_optional=None, suffix=None, type=None, is_value_provided=None, name=None, id=None, value=None, max_length=None, option=None, _configuration=None): # noqa: E501
"""Field - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._image = None
self._prefix = None
self._min_length = None
self._value_editable = None
self._is_optional = None
self._suffix = None
self._type = None
self._is_value_provided = None
self._name = None
self._id = None
self._value = None
self._max_length = None
self._option = None
self.discriminator = None
if image is not None:
self.image = image
if prefix is not None:
self.prefix = prefix
if min_length is not None:
self.min_length = min_length
if value_editable is not None:
self.value_editable = value_editable
if is_optional is not None:
self.is_optional = is_optional
if suffix is not None:
self.suffix = suffix
if type is not None:
self.type = type
if is_value_provided is not None:
self.is_value_provided = is_value_provided
if name is not None:
self.name = name
if id is not None:
self.id = id
if value is not None:
self.value = value
if max_length is not None:
self.max_length = max_length
if option is not None:
self.option = option
@property
def image(self):
"""Gets the image of this Field. # noqa: E501
Image displayed at the endsite.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li></ul> # noqa: E501
:return: The image of this Field. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this Field.
Image displayed at the endsite.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li></ul> # noqa: E501
:param image: The image of this Field. # noqa: E501
:type: str
"""
self._image = image
@property
def prefix(self):
"""Gets the prefix of this Field. # noqa: E501
The prefix string that has to be displayed before the field value.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The prefix of this Field. # noqa: E501
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""Sets the prefix of this Field.
The prefix string that has to be displayed before the field value.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param prefix: The prefix of this Field. # noqa: E501
:type: str
"""
self._prefix = prefix
@property
def min_length(self):
"""Gets the min_length of this Field. # noqa: E501
The minimum length of the login form field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The min_length of this Field. # noqa: E501
:rtype: int
"""
return self._min_length
@min_length.setter
def min_length(self, min_length):
"""Sets the min_length of this Field.
The minimum length of the login form field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param min_length: The min_length of this Field. # noqa: E501
:type: int
"""
self._min_length = min_length
@property
def value_editable(self):
"""Gets the value_editable of this Field. # noqa: E501
Indicates whether the field is editable or not.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The value_editable of this Field. # noqa: E501
:rtype: str
"""
return self._value_editable
@value_editable.setter
def value_editable(self, value_editable):
"""Sets the value_editable of this Field.
Indicates whether the field is editable or not.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param value_editable: The value_editable of this Field. # noqa: E501
:type: str
"""
self._value_editable = value_editable
@property
def is_optional(self):
"""Gets the is_optional of this Field. # noqa: E501
Indicates if a field is an optional field or a mandatory field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The is_optional of this Field. # noqa: E501
:rtype: bool
"""
return self._is_optional
@is_optional.setter
def is_optional(self, is_optional):
"""Sets the is_optional of this Field.
Indicates if a field is an optional field or a mandatory field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param is_optional: The is_optional of this Field. # noqa: E501
:type: bool
"""
self._is_optional = is_optional
@property
def suffix(self):
"""Gets the suffix of this Field. # noqa: E501
The suffix string that has to be displayed next to the field value.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The suffix of this Field. # noqa: E501
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this Field.
The suffix string that has to be displayed next to the field value.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param suffix: The suffix of this Field. # noqa: E501
:type: str
"""
self._suffix = suffix
@property
def type(self):
"""Gets the type of this Field. # noqa: E501
This indicates the display type of the field. For example, text box, image, etc. <br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul><b>Applicable Values</b><br> # noqa: E501
:return: The type of this Field. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Field.
This indicates the display type of the field. For example, text box, image, etc. <br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul><b>Applicable Values</b><br> # noqa: E501
:param type: The type of this Field. # noqa: E501
:type: str
"""
allowed_values = ["text", "password", "option", "checkbox", "radio", "image"] # noqa: E501
if (self._configuration.client_side_validation and
type not in allowed_values):
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def is_value_provided(self):
"""Gets the is_value_provided of this Field. # noqa: E501
Indicates that the answer to the security question already exists in the Yodlee system.Persuading the user to provide the answer to the security question again during the edit-credential flow can be avoided.<br><br><br><b>Endpoints</b>:<ul><li>GET providerAccounts?include=questions</li><li>GET providerAccounts/{providerAccountId}? include=questions</li></ul> # noqa: E501
:return: The is_value_provided of this Field. # noqa: E501
:rtype: bool
"""
return self._is_value_provided
@is_value_provided.setter
def is_value_provided(self, is_value_provided):
"""Sets the is_value_provided of this Field.
Indicates that the answer to the security question already exists in the Yodlee system.Persuading the user to provide the answer to the security question again during the edit-credential flow can be avoided.<br><br><br><b>Endpoints</b>:<ul><li>GET providerAccounts?include=questions</li><li>GET providerAccounts/{providerAccountId}? include=questions</li></ul> # noqa: E501
:param is_value_provided: The is_value_provided of this Field. # noqa: E501
:type: bool
"""
self._is_value_provided = is_value_provided
@property
def name(self):
"""Gets the name of this Field. # noqa: E501
Name of the field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The name of this Field. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Field.
Name of the field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param name: The name of this Field. # noqa: E501
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this Field. # noqa: E501
Identifier for the field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The id of this Field. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Field.
Identifier for the field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param id: The id of this Field. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
id is not None and len(id) > 2147483647):
raise ValueError("Invalid value for `id`, length must be less than or equal to `2147483647`") # noqa: E501
if (self._configuration.client_side_validation and
id is not None and len(id) < 1):
raise ValueError("Invalid value for `id`, length must be greater than or equal to `1`") # noqa: E501
self._id = id
@property
def value(self):
"""Gets the value of this Field. # noqa: E501
Value expected from the user for the field. This will be blank and is expected to be filled and sent back when submitting the login or MFA information.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The value of this Field. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Field.
Value expected from the user for the field. This will be blank and is expected to be filled and sent back when submitting the login or MFA information.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param value: The value of this Field. # noqa: E501
:type: str
"""
self._value = value
@property
def max_length(self):
"""Gets the max_length of this Field. # noqa: E501
The maximum length of the login form field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The max_length of this Field. # noqa: E501
:rtype: int
"""
return self._max_length
@max_length.setter
def max_length(self, max_length):
"""Sets the max_length of this Field.
The maximum length of the login form field.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param max_length: The max_length of this Field. # noqa: E501
:type: int
"""
self._max_length = max_length
@property
def option(self):
"""Gets the option of this Field. # noqa: E501
Provides the different values that are available for the user to choose. This field is applicable for drop-down or radio field types.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:return: The option of this Field. # noqa: E501
:rtype: list[Option]
"""
return self._option
@option.setter
def option(self, option):
"""Sets the option of this Field.
Provides the different values that are available for the user to choose. This field is applicable for drop-down or radio field types.<br><br><b>Endpoints</b>:<ul><li>GET providerAccounts/{providerAccountId}</li><li>GET providers/{providerId}</li></ul> # noqa: E501
:param option: The option of this Field. # noqa: E501
:type: list[Option]
"""
self._option = option
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Field, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Field):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Field):
return True
return self.to_dict() != other.to_dict()
| [
"jordan-hamill@hotmail.com"
] | jordan-hamill@hotmail.com |
3d45c93d651f9f216389e7eb639666305b5005ab | 712358ba6c93ccc4251b1385792a1b50e2d4a5e9 | /main.py | cbce87ec5ed95465fe57b8603b9772dfed4cf86c | [] | no_license | JW312k/GTest | 4e8fb0429336d3e090b08b2d4b64f84cecec0d75 | 3998bbc61d7fae38b27ca6ac492caeb3b4718119 | refs/heads/main | 2023-02-15T05:34:06.737966 | 2021-01-05T23:55:56 | 2021-01-05T23:55:56 | 325,429,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import tkinter
root = tkinter.Tk()
tkinter.Label(root, text = "Hello World!").pack()##fdsafds
root.mainloop()
#abcde | [
"noreply@github.com"
] | noreply@github.com |
1180c2df653973dfeb4478f34ad3c39fd22cab39 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/workflows/v1beta/workflows-v1beta-py/google/cloud/workflows_v1beta/types/__init__.py | 66aec79fe2b77723f73afe591aafa1edbbb647c0 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .workflows import (
CreateWorkflowRequest,
DeleteWorkflowRequest,
GetWorkflowRequest,
ListWorkflowsRequest,
ListWorkflowsResponse,
OperationMetadata,
UpdateWorkflowRequest,
Workflow,
)
__all__ = (
'CreateWorkflowRequest',
'DeleteWorkflowRequest',
'GetWorkflowRequest',
'ListWorkflowsRequest',
'ListWorkflowsResponse',
'OperationMetadata',
'UpdateWorkflowRequest',
'Workflow',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
45bf48ffd215e02b0acf52e2a9c28c2383657a7c | 3ed85ff2d80d1e562e8dfccf3f197ae2c4719a29 | /login/migrations/0002_auto_20181219_0937.py | 21f84eb22870a139d1a58203018928d3c471812f | [] | no_license | zhoubin0422/mysite | 2d78ebdb25a83a0d4132edb8c0c6d96abba08145 | d2d4343c868d2c113813436e84e860adc2552e34 | refs/heads/master | 2020-04-12T10:03:57.872486 | 2019-04-16T05:21:59 | 2019-04-16T05:21:59 | 162,417,639 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # Generated by Django 2.1.4 on 2018-12-19 09:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('login', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ConfirmString',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=256)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '确认码',
'verbose_name_plural': '确认码',
'ordering': ['-c_time'],
},
),
migrations.AddField(
model_name='user',
name='has_confirmed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='confirmstring',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='login.User'),
),
]
| [
"wanwu@iMac.local"
] | wanwu@iMac.local |
d72980cb2fe713c566d88566215d5efb3856abaa | f724c102966787cad1c588a77908a5c922aab4f0 | /catalog/migrations/0002_bookinstance_borrower.py | de65debcb893f64f2c3fb3ee65d5b2e77ab6cc7d | [] | no_license | erbilsilik/local-library | c198de959dfacd03aebc30cde021219abbafa235 | 2f3da157a7db0a2fc591de42e45fbfbd34b939f1 | refs/heads/master | 2020-04-17T13:23:29.554042 | 2019-01-20T19:46:53 | 2019-01-20T19:46:53 | 166,613,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # Generated by Django 2.1.5 on 2019-01-20 02:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| [
"erbil.silik@kolayik.com"
] | erbil.silik@kolayik.com |
faff0c419d4ae3f2cdb59c26b7f93ca5fb1e738d | f1f23eb040142d8aa4f443a8142e00ad7827655d | /app.py | eac87fa47c133c1204fc03b3a824eb71311948ed | [] | no_license | xfoxawy/Simple-Flask-Restful-Api | 66cc5adc04bff480382c4f9d0a3811e7c4a348d0 | 1f08cac21b00bac1b3c8221aa5bc39700db58c9a | refs/heads/master | 2021-05-03T06:47:02.770571 | 2016-09-22T00:47:04 | 2016-09-22T00:47:04 | 68,850,620 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | from ConfigParser import ConfigParser
from os import path
from flask import Flask, request
from models import db, User, Achievements, Statistics
from flask_jsonschema import JsonSchema, ValidationError
from flask_migrate import Migrate
from custom_exceptions import AlreadyExists
from helpers import respond
from simple_auth import requires_auth
config = ConfigParser()
config.read('db.conf')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+mysqldb://"+config.get('DB', 'user')+":"+config.get('DB', 'password')+"@"+config.get('DB', 'host')+"/"+config.get('DB', 'db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JSONSCHEMA_DIR'] = path.join(app.root_path, 'schemas')
jsonschema = JsonSchema(app)
db.init_app(app)
migrate = Migrate(app, db)
"""jsonschema error handler"""
@app.errorhandler(ValidationError)
def on_validation_error(e):
return respond({'error': e.message}, 400)
#routes
api = '/api/v1'
@app.route(api+'/user', methods=['POST'])
@requires_auth
@jsonschema.validate('user', 'create')
def createUser():
req = request.get_json()
try:
newuser = User(username=req['username'], email=req['email'])
db.session.add(newuser)
db.session.commit()
return respond({'user': newuser.columns_to_dict()}, 201)
except AlreadyExists as e:
return respond({'error': e.message}, 400)
@app.route(api+'/user/<string:account_id>', methods=['GET'])
@requires_auth
def getUser(account_id):
try:
user = User.query.filter_by(account_id=account_id).first()
response = respond({'user': user.columns_to_dict()}, 200)
except:
response = respond({'error': "user not found"}, 404)
return response
@app.route(api+'/user/<string:account_id>', methods=['PUT'])
@requires_auth
@jsonschema.validate('user', 'update')
def updateUser(account_id):
req = request.get_json()
try:
user = User.query.filter_by(account_id=account_id).first()
user.update(req)
db.session.commit()
return respond({'user': user.columns_to_dict()}, 201)
except AlreadyExists as e:
return respond({"error": e.message}, 400)
@app.route(api+'/user/<string:account_id>/<string:info>', methods=['GET'])
@requires_auth
def getUserInfo(account_id, info):
if info not in ['statistics', 'achievements']:
return respond({"error": "invalid url"}, 404)
else:
if info == 'achievements':
return getUserAchievements(account_id=account_id)
elif info == 'statistics':
return getUserStastistics(account_id=account_id)
def getUserAchievements(account_id):
try:
user = User.query.filter_by(account_id=account_id).options(db.load_only("id")).first()
achievements = Achievements.query.filter_by(user_id=user.id).all()
result = {}
for index in range(len(achievements)):
result[index] = achievements[index].columns_to_dict()
return respond({'achievements': result}, 200)
except:
return respond({'error': "user not found"}, 404)
def getUserStastistics(account_id):
try:
user = User.query.filter_by(account_id=account_id).options(db.load_only("id")).first()
statistics = Statistics.query.filter_by(user_id=user.id).order_by(Statistics.created_at.desc()).first()
res = {"statistics": statistics.columns_to_dict()}
return respond(res, 200)
except:
return respond({"error": "user not found"}, 404)
@app.route(api+'/user/<string:account_id>/achievements', methods=['POST'])
@requires_auth
@jsonschema.validate('achievements', 'create')
def updateUserAchievements(account_id):
try:
req = request.get_json()
user = User.query.filter_by(account_id=account_id).options(db.load_only("id")).first()
achiev = Achievements(user_id=user.id, achievement=req['achievement'])
db.session.add(achiev)
db.session.commit()
resp = {"success": "achievement has been added successfully"}
return respond(resp, 201)
except:
return respond({"error": "user not found"}, 404)
@app.route(api+'/user/<string:account_id>/statistics', methods=['POST'])
@requires_auth
@jsonschema.validate('statistics', 'create')
def updateUserStatistics(account_id):
try:
req = request.get_json()
user = User.query.filter_by(account_id=account_id).options(db.load_only("id")).first()
stats = Statistics(user_id=user.id, wins=req['wins'], losses=req['losses'], score=req['score'], level=req['level'])
db.session.add(stats)
db.session.commit()
resp = {"success": "statistics has been added successfully"}
return respond(resp, 201)
except:
return respond({"error": "user not found"}, 404)
if __name__ == '__main__':
app.run(debug=True)
| [
"xfoxawy@gmail.com"
] | xfoxawy@gmail.com |
6ca15290a3c7833d6b943fbf62dcac382d49c0ca | e820ca03c4aaa4b6e3f6690d63cfe88849730c18 | /experiments/examples_complexityTestByOptimization/test_basicFeatures.py | 1704ccc5044ce22bac654c1ed250f14fa45c7c1a | [] | no_license | milan-rybar/inspiration-triggered-search | 00a90c8089d6b7d771cfe9249c7f059f692ebdc2 | 45ffa8ca1e5689dabae4eeaa1034d0125867610b | refs/heads/master | 2021-01-21T21:40:09.524218 | 2016-04-12T19:42:15 | 2016-04-12T19:42:15 | 42,721,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | #!/usr/bin/python
import sys
sys.path.append("../../scripts/")
from ITS import *
import os
import timeit
import cPickle
def loadDb(filename):
return cPickle.load(open(filename, 'rb'))
# settings
jobID = 0
workingDirectory = ""
def runExperiment(image, imageIndex, trial):
printMessage = "image index: " + str(imageIndex) + ", trial: " + str(trial)
print "START: ", printMessage
# create experiment directory
directory = createNumberedDirectory(os.path.join(workingDirectory, "test_basicFeatures/" + str(imageIndex) + "/trial_"))
# prepare experiment
a = its.AlgorithmTemplate()
a.PhenotypeFunction = its.ImagePhenotype(256)
# feature
a.Features.append(its.DistanceInPixelSpace(image))
# aggregation function
a.Attach(its.Sum())
# statistics about the whole population
allStats = its.InformationForStatistics()
allStats.StoreIndividuals = True
a.Attach(allStats)
# additional features
globalFeatures = its.FeaturesStatistics()
globalFeatures.Features.append(its.NormalizedVariance())
globalFeatures.Features.append(its.MaxAbsLaplacian())
globalFeatures.Features.append(its.Tenengrad())
globalFeatures.Features.append(its.Choppiness())
globalFeatures.Features.append(its.RelaxedSymmetry())
globalFeatures.Features.append(its.GlobalContrastFactor())
globalFeatures.Features.append(its.JpegImageComplexity())
a.Attach(globalFeatures)
# check everything is ready
if not a.Init(): return
startTime = timeit.default_timer()
# run experiment
for i in range(6):
if i > 0:
# save current temporal results
allStats.Save(os.path.join(directory, "temp_statistics"))
globalFeatures.Save(os.path.join(directory, "temp_features"))
a.Population.Save(os.path.join(directory, "temp_population"))
a.RunGenerations(100)
stopTime = timeit.default_timer()
# save information for statistics
allStats.Save(os.path.join(directory, "statistics"))
# save additional features values
globalFeatures.Save(os.path.join(directory, "features"))
# save the last population
a.Population.Save(os.path.join(directory, "lastPopulation"))
# save time of the experiment
with open(os.path.join(directory, "time"), "w") as f:
f.write(str(stopTime - startTime))
# remove temporal files
os.remove(os.path.join(directory, "temp_statistics"))
os.remove(os.path.join(directory, "temp_features"))
os.remove(os.path.join(directory, "temp_population"))
print "END: ", printMessage
if __name__ == '__main__':
dbFilename = "ImagesDatabase_BasicFeatures.dat"
db = loadDb(dbFilename)
jobID = int(os.environ.get("PBS_ARRAYID", "0"))
workingDirectory = os.environ.get("PC2WORK", ".")
imageIndex = jobID % len(db)
runExperiment(db[imageIndex], imageIndex, jobID / len(db) + 1)
| [
"kontakt@milanrybar.cz"
] | kontakt@milanrybar.cz |
024ba811a9a4912ee7e3a9bb618fa97c3b813238 | 7567ed407980771b907d179b705b3cad8606e108 | /python/python_and_functions/currency_convert.py | e2f0c639dc78996931dbff51342b65b14b21726d | [] | no_license | ocb-dev-04/projects.personal.cv | 211679ffb78fda1af9d4636d0ffe04c8b434b33c | 93638cff84cc1d0e31d3b897881552b48311f10b | refs/heads/master | 2020-04-17T23:27:35.121485 | 2019-01-23T05:34:32 | 2019-01-23T05:34:32 | 167,036,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | # currency convert
# a currency converter that asks for a quantity of money,
# asks for the currency for which it will be exchanged,
# gives the option to enter the value of said currency
# and launches the result
# dolar function
def dolarF():
print ("What is dolar price?")
global Dprice
Dprice = input()
Dprice = float(Dprice)
global total
total = cash / Dprice
print ("You have ", total, "dolare(s)")
# euro function
def euroF():
print ("What is euro price?")
global Eprice
Eprice = input()
Eprice = float(Eprice)
global total
total = cash / Eprice
print ("You have ", total, " euro(s)")
# esterlina function
def esterlinaF():
print ("What is esterlina price?")
global ESprice
ESprice = input()
ESprice = float(ESprice)
global total
total = cash / ESprice
print ("You have: ", total, " esterlina(s)")
# main options
def main():
print ("How many cash you have? (DOP)")
global cash
cash = input()
cash = float(cash)
print ("What change you wanna?")
print ("1. Dolar")
print ("2. Euros")
print ("3. Esterlina")
print ("0. Quit")
global change
change = input()
change = int (change)
# filter options
if(change == 1):
# call dollar function
dolarF()
elif(change == 2):
# call euro function
euroF()
elif(change == 3):
# call esterlina function
esterlinaF()
elif(change==0):
print ("Thanks for using currency convert")
else:
print ("This opcion not exist!!!")
print ("Wanna convert again? (yes/no)")
global res
res = input()
res = res.lower()
againConvert()
def againConvert():
while(res!="no"):
main()
# call main function
main() | [
"42700726+ocb-dev-04@users.noreply.github.com"
] | 42700726+ocb-dev-04@users.noreply.github.com |
58270a7c262944cd188186aa67ab970c20b93094 | 7bb9f2e6e8993c6104c1109c1c2714e331c09ac2 | /toolbox/workload/forms.py | e1b7346061cdb45ffd663c20b22b963dac2ebc2f | [] | no_license | oinopion/toolbox | 6a775156cb20660f2d92e1d825e4cbabc9df3be7 | a8df57ee6f2343aaaa512703da74dae5fa3d4cfd | refs/heads/master | 2021-01-19T18:32:54.484006 | 2011-12-22T15:00:48 | 2011-12-22T15:00:48 | 3,033,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # encoding: utf-8
from django import forms
from django.forms import fields
from toolbox.workload.models import Assignment
from workload.grid import date_range_inclusive
class AssignmentForm(forms.ModelForm):
beginnig = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
end = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
next = fields.CharField(widget=forms.HiddenInput())
class Meta:
exclude = ['date']
model = Assignment
def save(self, commit=True):
dates = date_range_inclusive(self.cleaned_data['beginnig'],
self.cleaned_data['end'],
exclude_weekends=True)
for date in dates:
Assignment.objects.create(**{
'date': date,
'person': self.cleaned_data['person'],
'project': self.cleaned_data['project'],
})
| [
"tomek@hauru.eu"
] | tomek@hauru.eu |
ae475c352c3eb1b9ae6b620daca1c2efa721b298 | 051393bec70541a8780382bcdbf8b0bc8e048862 | /index.py | bc3f48625426f88f3f7a4b1c02f2f00e87deb37c | [] | no_license | ptq204/CS419-ImageRetrieval | dfb0eb974edd9ebeae71349d77d4fe0785876ee5 | 08276aec1bbc2875f6d2f73b600e847be6fe6e40 | refs/heads/master | 2020-11-30T15:32:19.918800 | 2020-01-04T04:32:55 | 2020-01-04T04:32:55 | 230,430,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | from image_processing.clustering import Cluster
import argparse
import glob
import cv2
import numpy as np
import csv
from collections import defaultdict
import pickle, pprint
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--bovw", required = True,
help = "Path to the bag of visual words directory")
ap.add_argument("-c", "--clusters", required = True,
help = "Path to where the clusters will be stored")
ap.add_argument("-i", "--index", required = True,
help = "Path to where the index will be stored")
args = vars(ap.parse_args())
# Build inverted index
with open(args["clusters"], "rb") as f_read:
cluster = pickle.load(f_read)
f_read.close()
bovw = {}
with open(args["bovw"]) as f:
reader = csv.reader(f)
for row in reader:
features = [int(x) for x in row[1:]]
bovw[row[0]] = features
f.close()
centers = cluster.cluster_centers_
centers_size = len(centers)
index = {}
for label in range(centers_size):
for imageID in bovw:
if label in bovw[imageID]:
if label not in index:
index[label] = [(imageID, bovw[imageID].count(label))]
else:
index[label].append((imageID, bovw[imageID].count(label)))
with open(args["index"], "wb") as f_out:
pickle.dump(index, f_out, pickle.HIGHEST_PROTOCOL)
f_out.close() | [
"quyenpt3@vng.com.vn"
] | quyenpt3@vng.com.vn |
c7a7e5e435b04371bd70f14f21f4ee9042776921 | baaa3a4d6cc9f9e465726ba11647ca9a3e431a7e | /IN1000/Trix 8/rorbu.py | 3a078705eb9f12b50d3fe290938296e121a276a1 | [] | no_license | MehCheniti/Python | 671d430868e1b8eeafabd7ecb1cf6684ab5c6858 | 1e1dc38dc874e03cb248664ff48f3cb2d497c6e5 | refs/heads/master | 2022-05-28T03:05:31.336202 | 2022-03-11T19:55:46 | 2022-03-11T19:55:46 | 206,413,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from gjest import Gjest
class Rorbu:
def __init__(self):
self._gjester = []
def nyGjest(self, gjestObjekt):
self._gjester.append(gjestObjekt)
for gjester in self._gjester:
gjester.underhold(1)
def fortellVits(self, heltall):
for gjester in self._gjester:
gjester.underhold(heltall)
def hvorMorsomtHarViDet(self):
for gjester in self._gjester:
if gjester.hentUnderholdsningsverdi() / len(self._gjester) < 200:
return "Kjedelig kveld."
elif 200 < gjester.hentUnderholdsningsverdi() / len(self._gjester) < 400:
return "Dette var jo litt gøy."
elif 400 < gjester.hentUnderholdsningsverdi() / len(self._gjester) < 600:
return "Dette var artig!"
elif gjester.hentUnderholdsningsverdi() / len(self._gjester) > 600:
return "Dra på Lopphavet - bi dæ godtar no så gyt e!"
def hentAntallGjester(self):
return len(self._gjester)
| [
"mehdiwx91@hotmail.com"
] | mehdiwx91@hotmail.com |
91aca319ba44c8ce8d330c73d614813e76cc844f | 4e0a4ce19538edf645e073d15c478f074b890748 | /src/problem036/solution036.py | 64c31501ce501d4ab0aaabd68fbbcce7e9b6b0d0 | [] | no_license | smilliken/projecteuler | d0d4aa9a48a64573e94b057ba7e0e554adb0adf2 | 1faf00f9bf49e989a4f05140a50e2dcb4c66cccb | refs/heads/master | 2021-01-19T06:25:36.794876 | 2015-10-28T22:55:03 | 2015-10-28T22:55:03 | 1,786,272 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | def main():
print(sum([n for n in xrange(1, 1000001)
if str(n) == str(n)[::-1] and bin(n)[2:] == bin(n)[2:][::-1]]))
if __name__ == '__main__':
main()
| [
"scott@deltaex.com"
] | scott@deltaex.com |
328177def650b351652fbc6438abfbe85888e7bc | eb0cf6fdfbf36623e058f609b12eea291a3629eb | /comment/views.py | 473efdb1b1021ed4a98efb2d53bb56d48bf82505 | [] | no_license | spygg/liusblog | a6a951bb021cb1a4e39558487048ffe2da18b453 | 7a4cae60575ab321ad24b2ded35b5070fae879fa | refs/heads/master | 2021-01-20T11:00:59.128494 | 2018-09-29T14:23:07 | 2018-09-29T14:23:07 | 83,941,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | # from django.shortcuts import render, redirect
# from django.contrib.contenttypes.fields import GenericForeignKey
# from django.contrib.contenttypes.models import ContentType
from comment.forms import CommentForm
from .models import Comment
# from django.urls import reverse
from django.http import JsonResponse
from comment.templatetags.comment_tag import get_comment_number, get_comment_user_number
from django.contrib.contenttypes.models import ContentType
def comment(request):
# refer = request.META.get('HTTP_REFERER', reverse('home'))
commentForm = CommentForm(request.POST, user=request.user)
data = {}
if commentForm.is_valid():
comm = Comment()
comm.user = commentForm.cleaned_data['user']
comm.content = commentForm.cleaned_data['content']
# 获取对象
comm.content_object = commentForm.cleaned_data['content_object']
comm.top_comment_id = commentForm.cleaned_data['top_comment_id']
comm.root_object_id = commentForm.cleaned_data['object_id']
comm.save()
data['status'] = 'SUCCESS'
data['username'] = comm.user.username
data['created_time'] = comm.created_time.strftime(
'%Y-%m-%d, %H:%M:%S')
data['content'] = comm.content
data['top_comment_id'] = comm.top_comment_id
data['pk'] = comm.pk
try:
data['reply_username'] = Comment.objects.get(
id=comm.object_id).user.username
except:
data['reply_username'] = ''
# 获取评论文章
content_type = commentForm.cleaned_data['content_type']
object_id = commentForm.cleaned_data['object_id']
model_class = ContentType.objects.get(model=content_type).model_class()
model_obj = model_class.objects.get(pk=object_id)
data['comment_user_number'] = get_comment_user_number(model_obj)
data['comment_number'] = get_comment_number(model_obj)
else:
data['status'] = 'ERROR'
data['message'] = list(commentForm.errors.values())[0]
return JsonResponse(data)
| [
"behindmeisyou@gmail.com"
] | behindmeisyou@gmail.com |
c33f29d71bbf135ea10ec41aa87c6f4a64b32f7e | 62179a165ec620ba967dbc20016e890978fbff50 | /tests/torch/modules/seq2seq/seq2seq_base.py | ebe3e13913b31bd5beac08c8b2640c3364faf5eb | [
"Apache-2.0"
] | permissive | openvinotoolkit/nncf | 91fcf153a96f85da166aacb7a70ca4941e4ba4a4 | c027c8b43c4865d46b8de01d8350dd338ec5a874 | refs/heads/develop | 2023-08-24T11:25:05.704499 | 2023-08-23T14:44:05 | 2023-08-23T14:44:05 | 263,687,600 | 558 | 157 | Apache-2.0 | 2023-09-14T17:06:41 | 2020-05-13T16:41:05 | Python | UTF-8 | Python | false | false | 3,173 | py | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from torch.nn.functional import log_softmax
PAD = 0
class Seq2Seq(nn.Module):
"""
Generic Seq2Seq module, with an encoder and a decoder.
"""
def __init__(self, encoder=None, decoder=None, batch_first=False):
"""
Constructor for the Seq2Seq module.
:param encoder: encoder module
:param decoder: decoder module
:param batch_first: if True the model uses (batch, seq, feature)
tensors, if false the model uses (seq, batch, feature) tensors
"""
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.batch_first = batch_first
def encode(self, inputs, lengths):
"""
Applies the encoder to inputs with a given input sequence lengths.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param lengths: vector with sequence lengths (excluding padding)
"""
return self.encoder(inputs, lengths)
def decode(self, inputs, context, inference=False):
"""
Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode
"""
return self.decoder(inputs, context, inference)
def generate(self, inputs, context, beam_size):
"""
Autoregressive generator, works with SequenceGenerator class.
Executes decoder (in inference mode), applies log_softmax and topK for
inference with beam search decoding.
:param inputs: tensor with inputs to the decoder
:param context: context from the encoder
:param beam_size: beam size for the generator
returns: (words, logprobs, scores, new_context)
words: indices of topK tokens
logprobs: log probabilities of topK tokens
scores: scores from the attention module (for coverage penalty)
new_context: new decoder context, includes new hidden states for
decoder RNN cells
"""
logits, scores, new_context = self.decode(inputs, context, True)
logprobs = log_softmax(logits, dim=-1)
logprobs, words = logprobs.topk(beam_size, dim=-1)
return words, logprobs, scores, new_context
def forward(self, input_encoder, input_enc_len, input_decoder):
raise NotImplementedError
| [
"noreply@github.com"
] | noreply@github.com |
1fe3e745416e5576ce7054c230de3a7a3ec43e8c | e809e86044044c1b40877f25629244443323bc14 | /7.py | 77af4f1947b5fb48b6d612e2d1c99a5ecf3b5ab7 | [] | no_license | LHR13/Holiday | 0f13e2c81096a476310435c6ef7e739105e1ba7c | 307e02cb1a6ff8bced2b18f864753ce99957dd39 | refs/heads/master | 2020-04-17T07:03:43.577724 | 2019-01-31T13:46:02 | 2019-01-31T13:46:02 | 166,351,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | a=[1,2,3,4,5]
b=a.copy()
print(b)
| [
"1309184697@qq.com"
] | 1309184697@qq.com |
03756a7acb99e8907d2bf21186f702c06e303a3b | 731c136992f98cab61508b9e5661afbd491962b6 | /Sort/Sort.py | 2f1338d9504c5dc5d5304e321cc3d067484b1d45 | [] | no_license | yangze01/py_LeetCode | c311235dbe1053c68694aea04fe29296ccb3a6e2 | 2b7213d00e2e482379a2f160b0d8e267a7951599 | refs/heads/master | 2021-01-20T06:03:53.852486 | 2017-12-08T01:30:26 | 2017-12-08T01:30:26 | 101,479,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,980 | py | #coding=utf8
import sys
"""
算法复习开始: 八大排序算法
"""
def bubble_sort(list):
"""
冒泡排序
:param list:
:return:
"""
length = len(list)
# 第一级遍历
for index in range(length):
# 第二级遍历
for j in range(1, length - index):
if list[j-1] > list[j]:
# 交换两者数据
list[j-1], list[j] = list[j], list[j-1]
return list
def bubble_sort_flag(list):
"""
改进冒泡排序,如果已经是顺序的,则不用进行排序,直接返回结果
:param list:
:return:
"""
length = len(list)
for index in range(length):
# 标志位
flag = True
for j in range(1, length - index):
if list[j - 1] > list[j]:
list[j - 1], list[j] = list[j], list[j - 1]
flag = False
if flag:
return list
return list
def selection_sort(list):
"""
选择排序,每次将序列中最小或者最大的元素找出来,
然后放在序列的起始位置
:param list:
:return:
"""
n = len(list)
for i in range(0, n):
min_index = i
for j in range(i + 1, n):
if list[j] < list[min_index]:
min_index = j
list[min_index], list[i] = list[i], list[min_index]
return list
def insert_sort(list):
"""
插入排序,通过构建有序序列,对于未排序的数据,
在已排序序列中从后向前扫描,找到相应位置并插入。
步骤
1. 从第一个元素开始,该元素可以认为已经被排序
2. 取出下一个元素,在已经排序的序列中从后向前扫描
3. 如果该元素(已排序)大于新元素,将该元素移到下一位置
4. 重复步骤3, 直到找到已排序的元素小于或者等于新元素的位置
5. 将新元素插入到该位置后
6. 重复步骤2-5
:param list:
:return:
"""
n = len(list)
for i in range(1, n):
# 后一个元素跟前一个元素比较
# 如果比前一个小
if list[i] < list[i - 1]:
# 将这个数取出
temp = list[i]
# 保存下标
index = i
# 从后往前一次比较每个元素
for j in range(i - 1, -1, -1):
# 和比取出元素大的元素交换
if list[j] > temp:
list[j + 1] = list[j]
index = j
else:
break
# 插入元素
list[index] = temp
return list
def insert_sort2(lists):
"""
插入排序
:param lists:
:return:
"""
# 插入排序
count = len(lists)
# 每次遍历已经排好序的部分,生成结果。
for i in range(1, count):
# 记录当前元素
key = lists[i]
j = i - 1
# 从已经排好序的元素开始,遍历当前元素应该插入到哪一个
while j >= 0:
if lists[j] > key:
lists[j + 1] = lists[j]
lists[j] = key
j -= 1
return lists
# def insert_sort3(lists):
# count = len(lists)
# for i in range(1, count):
# # 记录当前元素
# key = lists[i]
# j = i - 1
# while j >= 0:
# if lists[j] > key:
# lists[j+1] = lists[j]
# lists[j] = key
# j -= 1
# return lists
def shell_sort(lists):
"""
希尔排序,每次以一定的步长(跳过等距的数)进行排序,直至步长为1.
:param list:
:return:
"""
n = len(lists)
# 初始步长
gap = round(n/2)
while gap > 0:
for i in range(gap, n):
# 每个步长进行插入排序
temp = lists[i]
j = i
# 插入排序
# while j >= gap and list[j - gap] > temp:
# list[j] = list[j - gap]
while j >= gap and lists[j - gap] > temp:
lists[j] = lists[j - gap]
j -= gap
lists[j] = temp
# 得到新的步长
gap = round(gap / 2)
return lists
# 递归方法实现归并排序
def merge_sort(lists):
# 认为长度不大于1的数列是有序的
if len(lists) <= 1:
return lists
# 二分列表
middle = len(lists) // 2
left = merge_sort(lists[:middle])
right = merge_sort(lists[middle:])
# 最后一次合并
return merge(left, right)
# 合并
def merge(left, right):
l,r=0,0
result=[]
while l<len(left) and r<len(right):
if left[l] <right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
# print(l,r)
result += left[l:]
result += right[r:]
return result
# 迭代方法实现归并排序
def merge_sort2(lists):
length = len(lists)
step = 1
# 步长为1, 2, 4, 8, ..., 一直合并下去
while step <= length:
offset = step << 1
for index in range(0, length, offset):
merge2(lists, index, min(index+step, length-1), min(index+offset-1, length-1))
step = offset
def merge2(lists, head1, head2, tail2):
# 合并两个排好序的区间:[head1, tail1]与[head2, tail2]
tail1 = head2 - 1
start = head1
index = 0
tmp = [0] * (tail2-head1+1)
while head1 <= tail1 or head2 <= tail2:
if head1 > tail1:
tmp[index] = lists[head2]
elif head2 > tail2:
tmp[index] = lists[head1]
else:
if lists[head1] <= lists[head2]:
tmp[index] = lists[head1]
else:
tmp[index] = lists[head2]
if head1 <= tail1 and tmp[index] == lists[head1]:
head1 += 1
else:
head2 += 1
index += 1
for i in range(start, tail2 + 1):
lists[i] = tmp[i-start]
# 快速排序 递归
def quick_sort(lists, left, right):
if left >= right:
return lists
key = lists[left]
low = left
high = right
while left < right:
while left < right and lists[right] >= key:
right -= 1
lists[left] = lists[right]
while left < right and lists[left] <= key:
left += 1
lists[right] = lists[left]
lists[right] = key
quick_sort(lists, low, left - 1)
quick_sort(lists, left + 1, high)
return lists
# 快速排序
def quick_sort2(lists):
less = []
pivotList = []
more = []
# 递归出口
if len(lists) <= 1:
return lists
else:
# 第一个值为基准
pivot = lists[0]
for i in lists:
# 将比base小的值放到less里面
if i < pivot:
less.append(i)
# 将比base大的值放到More里面
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
less = quick_sort2(less)
more = quick_sort2(more)
return less + pivotList + more
def adjust_heap(lists, i, size):
# print(1)
lchild = 2 * i + 1 # i的左孩子节点序号
rchild = 2 * i + 2 # i的右孩子节点序号
max = i
if i <= size/2:
if lchild < size and lists[lchild] > lists[max]:
max = lchild
if rchild < size and lists[rchild] > lists[max]:
max = rchild
if max != i:
lists[i], lists[max] = lists[max], lists[i]
adjust_heap(lists, max, size) # 避免调整之后以max为父节点的子树不是堆
def build_heap(lists, size):
for i in range(0, (int(size/2)))[::-1]:
adjust_heap(lists, i, size)
def heap_sort(lists):
size = len(lists)
build_heap(lists, size)
for i in range(0, size)[::-1]:
lists[0], lists[i] = lists[i], lists[0]
adjust_heap(lists, 0, i)
return lists
if __name__ == "__main__":
# print(1)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort")
print(bubble_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort2")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("selection sort")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("insert sort")
print(insert_sort2(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("shell sort")
print(shell_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort")
print(merge_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort2")
merge_sort2(lists)
print(lists)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("quick sort")
print(quick_sort(lists, 0, len(lists)-1))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("heap sort")
print(heap_sort(lists))
| [
"858848101@qq.com"
] | 858848101@qq.com |
fb02e325830bbbcc9b696029cfd80d733729e0f4 | 7e835e6323d5f03ad5ca6e97ad74858905a58afd | /models/efficient.py | 2715aef7632c8432400d58aece54ba3e9ba2ff7a | [] | no_license | shatalinra/dnn-watermark-detection | b43c67c2fe5c0e00e0fe4c7be48bc317b124e00d | 02d5d24767ce4929b05737cf3f53f197148a0f9a | refs/heads/main | 2023-06-09T18:04:54.952359 | 2021-07-04T05:03:17 | 2021-07-04T05:03:17 | 371,747,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | import tensorflow as tf
import logging
from pathlib import Path
backbone = tf.keras.applications.EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
backbone.trainable = False
class StopIfFileExists(tf.keras.callbacks.Callback):
"""Callback that terminates training when certain file existss."""
def __init__(self, filepath):
self._filepath = Path(filepath)
def on_batch_end(self, batch, logs=None):
if self._filepath.is_file():
self.model.stop_training = True
def preprocess(image):
image = tf.image.resize(image, (224, 224))
images = tf.expand_dims(image, 0)
outputs = backbone(255 * images, training = False)
return outputs[0]
def train_model(dataset):
model = tf.keras.Sequential(name = "efficient")
model.add(tf.keras.layers.Dense(320, input_shape=(7,7,1280))) # our data should be efficient embedding
model.add(tf.keras.layers.Activation('sigmoid'))
#model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Dense(80))
model.add(tf.keras.layers.Activation('sigmoid'))
#model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Dense(20))
model.add(tf.keras.layers.Activation('sigmoid'))
#model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('sigmoid'))
# model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Dense(1))
model.add(tf.keras.layers.Activation('sigmoid'))
model.add(tf.keras.layers.GlobalMaxPool2D(name="pool")) # pick the location with most propability for watermark
model.summary(print_fn=lambda x: logging.info(x))
batch_size = 32
dataset_size = 81280 # not known apriori, so update it on changing training data size
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(0.001, decay_steps=5*dataset_size/batch_size, decay_rate=0.95, staircase=True)
model.compile(loss=tf.losses.BinaryCrossentropy(), optimizer=tf.optimizers.Adam(learning_rate = lr_schedule))
stop = StopIfFileExists('stop.txt')
history = model.fit(dataset.batch(batch_size), epochs=200, verbose=2, callbacks=[stop])
return model, history.history["loss"]
| [
"shatalinra@gmail.com"
] | shatalinra@gmail.com |
52e3890295583481ef97f847efae1012461483f2 | 37b79725b9bdeeeea097c50de242af8c78223d52 | /ansible/roles/db/molecule/default/tests/test_default.py | 0869703afead515317c4812b3edc5a6f149e839e | [
"BSD-3-Clause",
"MIT"
] | permissive | Otus-DevOps-2021-06/pawsy-foxicute_infra | c5a5d18e974e667d15399e6bb56919ef227f8a86 | af850b598086bfe3049aefcb3f7e6db8bbd257fc | refs/heads/main | 2023-06-11T03:31:15.806332 | 2021-06-25T12:09:31 | 2021-06-25T12:09:31 | 377,172,266 | 0 | 0 | MIT | 2021-06-25T12:09:33 | 2021-06-15T13:27:32 | HCL | UTF-8 | Python | false | false | 922 | py | """Role testing files using testinfra."""
def test_hosts_file(host):
"""Validate /etc/hosts file."""
f = host.file("/etc/hosts")
assert f.exists
assert f.user == "root"
assert f.group == "root"
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
# Тестрирование порта
def test_http(host):
mongod = host.addr("0.0.0.0")
assert mongod.port(27017).is_reachable
| [
"wily.mister.fox@gmail.com"
] | wily.mister.fox@gmail.com |
522be82d7acccbcab57ccd20f972dcc97a489fd3 | c44379e9a48a5f8eea6b73e205871d0965b18abe | /Digital_Recognizer-Keras.py | c15d34912ddfd221daf7c04450ce89e67b228dbe | [] | no_license | mehdi-saeedi/Machine_Learning_Py | 4d1f69238dc5319b498a89b5e298577b31e6b556 | 55316cac316187a5e137a9e3d0b0f468155502c7 | refs/heads/master | 2021-08-05T22:38:30.159524 | 2017-11-01T14:14:23 | 2017-11-01T14:14:23 | 106,041,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,749 | py | # Create your first MLP in Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy
import pandas as pd
from sklearn.model_selection import train_test_split
from keras import optimizers
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from keras.utils.np_utils import to_categorical
from keras.optimizers import RMSprop
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import regularizers
from sklearn import preprocessing
from keras.optimizers import Adam
import Augmentor
# fix random seed for reproducibility
np.random.seed(7)
directory = '../../Datasets/Digital_Recognizer/'
train_input = pd.read_csv(directory + 'train.csv')
train_target = train_input['label']
train_input.drop(['label'], axis=1, inplace=True)
train_input = train_input.astype('float32')
train_input = train_input / 255.
train_target = to_categorical(train_target, 10)
print(train_target.shape)
train_input = train_input.values
batch_size = 256
epochs = 100
dropout = 0.05
num_classes = 10
X_train, X_cv, y_train, y_cv = train_test_split(train_input, train_target, test_size=0.20, random_state=0)
X_train = X_train.reshape(-1, 28, 28, 1)
X_cv = X_cv.reshape(-1, 28, 28, 1)
input_shape = (28, 28, 1)
conv_model = Sequential()
conv_model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu',
input_shape=input_shape))
conv_model.add(BatchNormalization())
conv_model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
conv_model.add(BatchNormalization())
conv_model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
conv_model.add(BatchNormalization())
conv_model.add(MaxPooling2D(strides=(2, 2)))
conv_model.add(Dropout(dropout))
conv_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
conv_model.add(BatchNormalization())
conv_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
conv_model.add(BatchNormalization())
conv_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
conv_model.add(BatchNormalization())
conv_model.add(MaxPooling2D(strides=(2, 2)))
conv_model.add(Dropout(dropout))
conv_model.add(Flatten())
conv_model.add(Dense(512, activation='relu'))
conv_model.add(Dropout(dropout))
conv_model.add(Dense(1024, activation='relu'))
conv_model.add(Dropout(dropout))
conv_model.add(Dense(10, activation='softmax'))
# print summary of the conv_model
conv_model.summary()
datagen = ImageDataGenerator(zoom_range = 0.1,
height_shift_range = 0.1,
width_shift_range = 0.1,
rotation_range = 10)
# Compile conv_model
conv_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4), metrics=['accuracy']) #RMSprop()
hist = conv_model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
steps_per_epoch=len(X_train)//batch_size,
epochs=epochs,
verbose=2, #1 for ETA, 0 for silent
validation_data=(X_cv,y_cv))
# hist = conv_model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False,
# validation_data=(X_cv, y_cv))
# evaluate the conv_model
scores = conv_model.evaluate(X_train, y_train)
print("\n%s: %.2f%%" % (conv_model.metrics_names[1], scores[1] * 100))
# evaluate the conv_model
scores = conv_model.evaluate(X_cv, y_cv)
print("\n%s: %.2f%%" % (conv_model.metrics_names[1], scores[1] * 100))
# check the wrong images
p_cv = np.round(conv_model.predict(X_cv)).argmax(axis=1)
wrong_pixels = X_cv[p_cv != y_cv.argmax(axis=1)]
wrong_y = conv_model.predict(wrong_pixels)
print('[CV]: number of wrong items is:', len(wrong_pixels), 'out of', len(X_cv))
# evaluate test data
test_input = pd.read_csv(directory + 'test.csv')
test_input = test_input.astype('float32')
test_input = test_input.values
test_input = test_input / 255.
test_input = test_input.reshape(-1, 28, 28, 1)
p_test = np.round(conv_model.predict(test_input)).argmax(axis=1)
# write to a file
out_df = pd.DataFrame(
{'ImageId': np.arange(1, test_input.shape[0] + 1), 'Label': p_test}).to_csv(
'out.csv', header=True, index=False)
#visually check 100 wrong cases
f, axarr = plt.subplots(10, 20)
for i in range(0, 10):
for j in range(0, 10):
idx = np.random.randint(0, wrong_pixels.shape[0])
axarr[i][j].imshow(wrong_pixels[idx, :].reshape(28, 28), cmap=cm.Greys_r)
tit = str(wrong_y[idx, :].argmax())
axarr[i][j + 10].text(0.5, 0.5, tit)
axarr[i][j].axis('off')
axarr[i][j + 10].axis('off')
plt.show()
| [
"32145191+mehdi-saeedi@users.noreply.github.com"
] | 32145191+mehdi-saeedi@users.noreply.github.com |
7d83694389e19e9af3efec1f4b2681bc8a1a7b4c | 3dabf0a2da69806a0299d0c5d93d9ac4255b3868 | /VNL/PythonScripts/Scripts/ProjectionPlotter.py | 44c7ac1bbddfb5ab449aa4bae89ed500112e6f99 | [] | no_license | rwiuff/Nanomechanics-for-graphene-membranes | b55945f95d87e2aad95aa695d79d3a93d11f0214 | b7360f43ea7e7e0dcedc5f910636a81e5dc1eb2e | refs/heads/master | 2021-09-12T15:47:21.866702 | 2018-04-18T07:04:55 | 2018-04-18T07:04:55 | 104,201,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,835 | py | # =====================================================================
#
# By Frederik Grunnet Kristensen, Christoffer Vendelbo Sorensen
# & Rasmus Wiuff, s163977@student.dtu.dk
#
# Script for creating projected mode plots and trendlines
#
# =====================================================================
# -------------------------------------------------------------
# Load libraries, ProjectedPhononBandsDisplacement and switch
# matplotlib backend for HPC compatibility
# -------------------------------------------------------------
import matplotlib.pyplot as plt
from NanoLanguage import *
from pylab import *
import pickle
import numpy as np
import numpy.polynomial.polynomial as poly
from scipy.optimize import curve_fit
# -------------------------------------------------------------
# Create data arrays
# -------------------------------------------------------------
myfile = np.array([])
configuration = np.array([])
dynamical_matrix = np.array([])
n_modes = np.array([])
# -------------------------------------------------------------
# Load data from dynamical matrices
# -------------------------------------------------------------
nof = 10
for i in range(nof + 1):
if i == nof:
break
elif i >= 9:
# Choose file
myfile = np.append(myfile,
'{}nmDynamicalMatrix.hdf5'.format(i + 1))
else:
myfile = np.append(myfile,
'0{}nmDynamicalMatrix.hdf5'.format(i + 1))
# Load configuration with calculator
configuration = np.append(configuration, nlread(
myfile[i], BulkConfiguration)[-1])
# Load DynamicalMatrix
dynamical_matrix = np.append(
dynamical_matrix, nlread(myfile[i], DynamicalMatrix)[-1])
# Vibrational state to project onto
n_modes = np.append(n_modes,
(len(configuration[i]) -
len(dynamical_matrix[i].constraints())) * 3)
# projection_vibration = numpy.zeros((n_modes),dtype=float)
# Display loaded matrices and modes
print('+-------------------------------------------------------+')
for i in range(nof):
pstring = "| The File {} contains {:4d} modes |".format(
myfile[i], int(n_modes[i]))
print(pstring)
print('+-------------------------------------------------------+')
# -------------------------------------------------------------
# Make projection vectors
# -------------------------------------------------------------
projection_vibration = {}
for i in range(nof):
constrained = dynamical_matrix[i].constraints()
tmp = numpy.zeros((n_modes[i]), dtype=float)
tmp[2::3] = 1 # project on z-motion
projection_vibration[i] = tmp
# print 'Projecting on: ', projection_vibration[i], n_modes[i]
# -------------------------------------------------------------
# Set qpoint
# -------------------------------------------------------------
fractional_qpoints = [0.0, 0.0, 0.0]
# -------------------------------------------------------------
# Load pickled data
# -------------------------------------------------------------
# Create data dictionaries
qpoints = {}
frequency_list = {}
projection = {}
anti_projection = {}
RMS = {}
# Load data into dictionaries
print("+=========================+")
print("| Loading datafiles |")
print("|-------------------------|")
with open('qpoints.pickle', 'rb') as handle:
qpoints = pickle.load(handle)
print("| (1/5): Q-points |")
with open('frequency_list.pickle', 'rb') as handle:
frequency_list = pickle.load(handle)
print("| (2/5): Frequency list |")
with open('projection.pickle', 'rb') as handle:
projection = pickle.load(handle)
print("| (3/5): Projection |")
with open('anti_projection.pickle', 'rb') as handle:
anti_projection = pickle.load(handle)
print("| (4/5): Anti projection |")
with open('RMS.pickle', 'rb') as handle:
RMS = pickle.load(handle)
print("| (5/5): RMS |")
print("+=========================+")
# -------------------------------------------------------------
# Define colormaps
# -------------------------------------------------------------
cmap, norm = cm.get_cmap('brg'), None # 'hot','brg','seismic'
# -------------------------------------------------------------
# Plot projections
# -------------------------------------------------------------
figure()
# plot with color and without/with variable point size
myscale = {}
for i in range(nof):
myscale[i] = projection[i] # /numpy.max(projection)
for i in range(nof):
# print numpy.max(projection[i])
plotmode = 1
if plotmode == 0:
scatter(numpy.repeat(np.array([i + 1]), n_modes[i]),
frequency_list[i].inUnitsOf(
eV).flatten() * 1000, c=myscale[i], s=150, marker='o',
edgecolor='none', cmap=cmap, norm=norm)
elif plotmode == 1:
scatter(numpy.repeat(np.array([i + 1]), n_modes[i]),
frequency_list[i].inUnitsOf(eV).flatten() * 1000,
c=myscale[i], s=15 + myscale[i] * 120, marker='o',
edgecolor='none', cmap=cmap, norm=norm)
# colorbar
cb = colorbar() # colorbar(ticks=[-1, 0, 1], orientation='vertical')
cb.set_label('projection', fontsize=12)
tick_locator = matplotlib.ticker.MaxNLocator(nbins=10, prune=None)
cb.locator = tick_locator
cb.update_ticks()
# Set x-ticks
kticks = [w * 1 for w in range(nof + 2)]
ticklabels = ['%i nm' % w for w in range(nof + 1)]
xticks(kticks, ticklabels)
grid(kticks)
plt.subplots_adjust(left=None, bottom=None, right=0.97, top=None,
wspace=None, hspace=None)
# -------------------------------------------------------------
# Fit 1/r^2 plot for mode 0
# -------------------------------------------------------------
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.zeros(10)
for i in range(nof):
y[i] = np.sort(frequency_list[i].inUnitsOf(
eV).flatten() * 1000)[0]
# Define reciprocal function
def reciproc(x, a, n):
return a * (1 / (x**n))
# Fit reciprocal curve
popt, pcov = curve_fit(reciproc, x, y, p0=(1, 2))
perr = np.sqrt(np.diag(pcov))
x_rec0 = np.linspace(x[0], x[-1], num=len(x) * 10)
y_rec0 = reciproc(x_rec0, *popt)
# Create legend label
rec0label = r'$a={:.3f}\pm{:.3f}$' '\n' r'$n={:.3f}\pm{:.3f}$'.format(
popt[0], perr[0], popt[1], perr[1])
# -------------------------------------------------------------
# Fit 1/r^2 plot for mode 1
# -------------------------------------------------------------
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.zeros(10)
for i in range(nof):
y[i] = np.sort(frequency_list[i].inUnitsOf(
eV).flatten() * 1000)[1]
# Define reciprocal function
def reciproc(x, a, n):
return a * (1 / (x**n))
# Fit reciprocal curve
popt, pcov = curve_fit(reciproc, x, y, p0=(1, 2))
perr = np.sqrt(np.diag(pcov))
x_rec1 = np.linspace(x[0], x[-1], num=len(x) * 10)
y_rec1 = reciproc(x_rec1, *popt)
# Create legend label
rec1label = r'$a={:.3f}\pm{:.3f}$' '\n' r'$n={:.3f}\pm{:.3f}$'.format(
popt[0], perr[0], popt[1], perr[1])
# -------------------------------------------------------------
# Show or save plots
# -------------------------------------------------------------
# Print menu
rec = r'$a \cdot \frac{1}{x^n}$'
showsave = 0
menu = np.array(["Show plot ",
"Show zoomed and fitted plot",
"Save plot ",
"Save zoomed and fitted plot"])
while showsave == 0:
print("+================================+")
print("| Show or save plot? |")
print("|--------------------------------|")
for i in range(len(menu)):
print("| {:d}. {:s} |".format(i + 1, menu[i]))
print("+================================+")
while not (np.any(showsave == np.arange(len(menu)) + 1)):
while True:
try:
showsave = float(input("Choose an option: "))
break
except ValueError:
print("Only integers accepted")
pass
except NameError:
print("Only integers accepted")
except TypeError:
print("Only integers accepted")
# Define graph ranges
ymin, ymax = -10, 20
xmin, xmax = 0.5326636405016747, 10.565891401841617
xlim(xmin, xmax)
# Show plot
if showsave == 1:
ylabel('$\omega$ [meV]')
ymin, ymax = -25, 225
ylim(ymin, ymax)
plt.show()
# Show fitted plot
elif showsave == 2:
ylim(ymin, ymax)
ylabel('$\omega$ [meV]')
ax = plt.gca()
ax.plot(x_rec0, y_rec0, 'b-', label=rec0label)
ax.plot(x_rec1, y_rec1, 'r-', label=rec1label)
ax.legend(loc=3, title=rec)
plt.show()
# Save plot
elif showsave == 3:
ylabel('$\omega$ [meV]')
print("+=====================================+")
print("| Saving plots |")
print("|-------------------------------------|")
print("| (1/2): FrequencyModeProjections.eps |")
savefig('FrequencyModeProjections.eps')
print("| (2/2): FrequencyModeProjections.png |")
savefig('FrequencyModeProjections.png')
print("+=====================================+")
# Save fitted plot
elif showsave == 4:
ylim(ymin, ymax)
ylabel('$\omega$ [meV]')
ax = plt.gca()
ax.plot(x_rec0, y_rec0, 'b-', label=rec0label)
ax.plot(x_rec1, y_rec1, 'r-', label=rec1label)
ax.legend(loc=3, title=rec)
print("+============================================+")
print("| Saving plots |")
print("|--------------------------------------------|")
print("| (1/2): FrequencyModeProjectionsZoomFit.eps |")
savefig('FrequencyModeProjectionsZoomFit.eps')
print("| (2/2): FrequencyModeProjectionsZoomFit.png |")
savefig('FrequencyModeProjectionsZoomFit.png')
print("+============================================+")
| [
"rwiuff@gmail.com"
] | rwiuff@gmail.com |
6c7f03635b074646f1d5fd9c357980c3721e9310 | b7ac30f1c28718a07fbf6353513453314b86ee63 | /app/models.py | 84332c05e43fa21bb8e983d81c242f63f8226cb8 | [] | no_license | yash2608-code/booklib | 568b358b3d4fb2b9ce88aba4c58a7c5b14ffadd8 | 4bdaeb4e7bd00bc044efb80bd4cff93c1a7fb7ce | refs/heads/master | 2023-06-12T20:32:10.569970 | 2021-07-01T06:52:43 | 2021-07-01T06:52:43 | 379,820,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from django.db import models
# Create your models here.
class Seller(models.Model):
fname = models.CharField(max_length=255,default="Firstname")
lname = models.CharField(max_length=255,default="Lastname")
email = models.EmailField(unique=True)
passwd = models.CharField(max_length=225,default="Password")
class Category(models.Model):
Cname = models.CharField(max_length=255,default="Book Category")
def __str__(self):
return self.Cname
class Book(models.Model):
Book_name = models.CharField(max_length=255,default="BookName")
Author_name = models.CharField(max_length=255,default="Author name")
Book_category = models.ForeignKey(Category, on_delete=models.CASCADE)
Book_img = models.ImageField(upload_to="bookimages/",default="abc.jpg")
Book = models.FileField(upload_to="books/",default="book1.pdf")
BookPrice = models.FloatField(default=0.0) | [
"yashpoojara268@gmail.com"
] | yashpoojara268@gmail.com |
45a933ae54870562a49b7ca56df522e6a7a24a32 | fc561c26499cdb93b612fef9cfbd133348b27012 | /madlibs.py | e477a047bd7c6457a13e7ca15b91021fb710d8ac | [] | no_license | pollardk123/lab-05-js-py | 1db44340ae2ba25227cd854359c8e85203465871 | c3b00c5b771469bd367c0500c5af87509bf9d418 | refs/heads/main | 2023-08-22T04:50:09.244308 | 2021-10-07T01:59:28 | 2021-10-07T01:59:28 | 413,547,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | noun = input("pizza")
adjective = input("ridiculous")
adverb = input("slowly")
verb = input("running")
print("On a tropical island" + str(noun) + ",")
print("Underneath a" + str(adjective) +" lava moon.")
print("hangin'" + str(adverb) +"with the hula dancers, ")
print(str(verb) + "questions cause' they all got answers.")
| [
"pollardk836@gmail.com"
] | pollardk836@gmail.com |
2823bede91bf3a0573ff1ded78288e184fb2f586 | 758d17014e8200bb127fdb0a7e1c9a1e235efb33 | /tests/integration_tests/test_suite.py | ca0b9e89765d37c69e79b75b93247ea4388986a9 | [
"MIT"
] | permissive | mohsinkhansymc/mindsdb | 4de9a0fcc9c4c7693c5bd223a73b52d2ee7002b9 | 84376b50a9ea2fa695f5288479170cd73e147fae | refs/heads/master | 2020-07-22T00:47:19.395496 | 2019-09-09T03:46:10 | 2019-09-09T03:46:10 | 207,019,270 | 0 | 1 | MIT | 2019-09-07T19:57:32 | 2019-09-07T19:57:31 | null | UTF-8 | Python | false | false | 1,462 | py | from run_example import run_example
from generated_data_tests import *
import multiprocessing
import os
# Run the CI tests
os.system('cd ..; cd ci_tests; python3 full_test.py')
# Run the example datassts
datasets = [{
'name':'default_of_credit',
'sample':True,
'expect_accuracy_above':72
},{
'name':'imdb_movie_review',
'sample':False,
'expect_accuracy_above':83
},{
'name':'cifar_100',
'sample':True,
'expect_accuracy_above': 40 # For full dataset: 69
}]
for dataset in datasets:
dataset_name = dataset['name']
res = run_example(dataset_name, sample=dataset['sample'])
acc = res['accuracy']
ex_acc = dataset['expect_accuracy_above']
if acc < ex_acc:
print('\n\n\n============WARNING===============\n\n\n')
print(f'Expected an accuracy above {ex_acc} for dataset {dataset_name}.')
print(f'Got accuracy of {acc} instead.')
print('\n\n\n==================================\n\n\n')
else:
print('\n\n\n============SUCCESS===============\n\n\n')
print(f'Example dataset {dataset_name}, ran with success')
print(f'Got accuracy of {acc} !')
print('\n\n\n==================================\n\n\n')
# Run the generated data tests
test_one_label_prediction_wo_strings()
test_timeseries()
test_multilabel_prediction()
test_one_label_prediction()
#with multiprocessing.Pool(max(len(datasets),6)) as pool:
# pool.map(run_example,datasets)
| [
"jorge.torres.maldonado@gmail.com"
] | jorge.torres.maldonado@gmail.com |
23f2a6b0b4e2f106262cc73d2106fbdca425701e | 58072d424233d2ccbd0cefd4275fa4fba05e6d6c | /exercises/exl.py | 1bdb4bb38accc354c5adb812765b5a5e92c3d039 | [] | no_license | nvalladares/python-work | c98588588ad70dc11a21a5ddfb7606b65c8b35df | df08c2ff14a0a61ecd42ba67f623f33a10481e8d | refs/heads/master | 2021-01-10T05:48:44.253637 | 2015-11-08T05:59:52 | 2015-11-08T05:59:52 | 44,711,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,191 | py | # ------ Exercise 1
# print "Hello world!"
# print "Hello again"
# print "I like typing this."
# print "This is fun."
# print 'Yay! printing.'
# print "I'd much rather You 'not'."
# print 'I "said" do not touch this.'
# print "Cats are awesome!"
# print "cats are better than dogs!"
# -------- Exercise 2
# A comment, this is so you can read your program later.
# Anything after the # is ignored by python.
# print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "This won't run."
# print "This will run."
# --------- Exercise 3
# prints "I will now count my chickens"
# print "I will now count my chickens:"
# adds 25+30 and divides the answer by 6 and prints the answer = 30
# print "Hens", 25 + 30 / 6
# * and % take precedence over -, so we first evaluate 25 * 3 % 4. * and % have the same priority and associativity from left to right, so we evaluate from left to right, starting with 25 * 3. This yields 75. Now we evaluate 75 % 4, yielding 3. Finally, 100 - 3 is 97.
# print "Roosters", 100 - 25 * 3 % 4
# prints "Now I will count the eggs"
# print "Now I will count the eggs:"
# the answer is 7. The % (modulo) operator yields the remainder from the division of the first argument by the second. The numeric arguments are first converted to a common type. A zero right argument raises the ZeroDivisionError exception. The arguments may be floating point numbers, e.g., 3.14%0.7 equals 0.34 (since 3.14 equals 4*0.7 + 0.34.) The modulo operator always yields a result with the same sign as its second operand (or zero); the absolute value of the result is strictly smaller than the absolute value of the second operand [2].
# print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
# print "Is it true that 3 + 2 < 5 - 7?"
# print 3 + 2 < 5 - 7
# print "What is 3 + 2?", 3 + 2
# print "What is 5 - 7?", 5 - 7
# print "Oh, that's why it's False."
# print "How about some more."
# print "Is it greater?", 5 > -2
# print "Is it greater or equal?", 5 >= -2
# print "Is it less or equal?", 5 <= -2
# -------- Exercise 4
# variable 'cars' has a value of 100
# cars = 100
# variable for 'space in a car' has a value of 4.0
# space_in_a_car = 4.0
# variable for 'drivers' has a value of 30
# drivers = 30
# variable for 'passengers' has a value of 90
# passengers = 90
# var 'cars not driven' value is the difference of 'cars' - 'drivers'
# cars_not_driven = cars - drivers
# var for 'cars driven' is equal to 'drivers'
# cars_driven = drivers
# var 'carpool capacity' is the product of 'cars driven' * 'space in a car'
# carpool_capacity = cars_driven * space_in_a_car
# var 'average of passengers per car' takes average of 'passengers' divided by 'cars driven'
# average_passengers_per_car = passengers / cars_driven
# outputs value of var 'cars' (100)
# print "There are", cars, "cars available."
# outputs value of var 'drivers' (30)
# print "There are only", drivers, "drivers available."
# outputs difference of 'cars' - 'drivers'
# print "There will be", cars_not_driven, "empty cars today."
# outputs value of var 'carpool capacity' which is the product of 'cars driven' * 'space in a car'
# print "We can transport", carpool_capacity, "people today."
# outputs value of var 'passengers' (90)
# print "We have", passengers, "to carpool today."
# outputs average of 'passengers' divided by 'cars driven'
# print "We need to put about", average_passengers_per_car, "in each car."
# --------------- Exercise 5
# name = 'Frodo Baggins'
# age = 33 # when I left the Shire
# height = 42 # inches
# weight = 80 # lbs
# eyes = 'Blue'
# teeth = 'White'
# hair = 'Brown'
# print "Let's talk about %s." % name
# print "He is %d years old." % age
# print "He's %d inches tall." % height
# print "He's %d pounds heavy." % weight
# print "Actually that's not too heavy."
# print "He's got %s eyes and %s hair." % (eyes, hair)
# print "His teeth are usually %s depending on how close to Mordor he is." % teeth
# print "If I add %d, %d, and %d I get %d." % (
# age, height, weight, age + height + weight)
# print "Height converted from inches to centimeters is %d" % (height * 2.54)
# print "Weight converted from pounds to kilograms is %d" % (weight / 2.2)
# print round(1.98999)
# These are format specifiers. The %d specifier refers specifically to a decimal (base 10) integer. The %s specifier refers to a Python string. It is used for string formatting
# --------------- Exercise 6
# var 'x' = 'there are 10 types of people'
# x = "There are %d types of people." % 10
# var 'binary' = 'binary'
# binary = "binary"
# var 'do_not' = 'don't
# do_not = "don't"
# var 'y' = 'Those who understand binary and those who don't'
# y = "Those who know %s and those who %s." % (binary, do_not)
# prints value of 'x'
# print x
# prints value of 'y'
# print y
# prints value of 'x'
# print "I said: %r." % x
# prints value of 'y'
# print "I also said: '%s'." % y
# var 'hilarious' has a value of 'False'
# hilarious = False
# joke evaluation prints value of 'r'
# joke_evaluation = "Isn't that joke so funny?! %r"
# print joke_evaluation % hilarious
# w = "This is the left side of..."
# e = "a string with a right side."
# print w + e
# ------------ Exercise 7
print "Mary had a little lamb."
print "Its fleece was white as %s." % 'snow' #'snow' isn't a variable, it's a strig
print "And everywhere that Mary went."
print "." * 10 # what'd that do? It printed "." 10 times
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
# with comma = "Cheese Burger"
# without comma = "Cheese
# Burger"
# --------------- Exercise 8
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said good night."
)
| [
"nvalladares120@gmail.com"
] | nvalladares120@gmail.com |
6e0e35a66e1e9f4a715acf41a8f04081eec0fb17 | 74aabb616e81cada88a96e74aa87ef276478d534 | /libs/functions/generate_api.py | dba15c7110e007c290b7272ef62049feb890f4b9 | [
"MIT"
] | permissive | mbachtell/covid-data-model | b0362b6b6666c86f06a8e388201b32e81e7f2ad3 | 695cb7fbbedc2f5e97a22fefc44f99db8e88f588 | refs/heads/master | 2022-04-21T16:43:48.257096 | 2020-04-14T05:20:05 | 2020-04-14T05:20:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | from datetime import datetime
from api.can_predictions import (
CANPredictionAPIRow,
CANPredictionAPI,
_Projections,
_HospitalBeds,
)
from libs.datasets import results_schema as rc
from libs.constants import NULL_VALUE
def _format_date(input_date):
if not input_date:
raise Exception("Can't format a date that doesn't exist")
if isinstance(input_date, str):
# note if this is already in iso format it will be grumpy. maybe use dateutil
datetime_obj = datetime.strptime(input_date, "%m/%d/%Y %H:%M")
return datetime_obj.isoformat()
if isinstance(input_date, datetime):
return input_date.isoformat()
raise Exception("Invalid date type when converting to api")
def _get_date_or_none(panda_date_or_none):
""" Projection Null value is a string NULL so if this date value is a string,
make it none. Otherwise convert to the python datetime. Example
of this being null is when there is no bed shortfall, the shortfall dates is none """
if isinstance(panda_date_or_none, str):
return None
return _format_date(panda_date_or_none.to_pydatetime())
def _get_or_none(value):
if isinstance(value, str) and value == NULL_VALUE:
return None
else:
return value
def generate_api_for_projection_row(county_row):
peak_date = _get_date_or_none(county_row[rc.PEAK_HOSPITALIZATIONS])
shortage_start_date = _get_date_or_none(county_row[rc.HOSPITAL_SHORTFALL_DATE])
_hospital_beds = _HospitalBeds(
peakDate=peak_date,
shortageStartDate=shortage_start_date,
peakShortfall=_get_or_none(county_row[rc.PEAK_HOSPITALIZATION_SHORTFALL]),
)
_projections = _Projections(hospitalBeds=_hospital_beds)
county_result = CANPredictionAPIRow(
stateName=county_row[rc.STATE],
countyName=county_row[rc.COUNTY],
fips=county_row[rc.FIPS],
lastUpdatedDate=_format_date(county_row[rc.LAST_UPDATED]),
projections=_projections,
)
return county_result
def generate_api_for_projection(projection):
api_results = []
for index, county_row in projection.iterrows():
county_result = generate_api_for_projection_row(county_row)
api_results.append(county_result)
return CANPredictionAPI(data=api_results)
| [
"noreply@github.com"
] | noreply@github.com |
7374152c465ffef2efce65bb95b73399c51a8efa | 984680c2d4b4ecf65fb1228d6c80ea8efc774979 | /store/tests/test_urls.py | b420ae77023f1412246328095c458bc328772dcd | [] | no_license | NeoVic2006/Summit_project | ba37da2e9d98e3823b40801def303c077b204bc1 | bb6a87a950e251b83d44296b0d230317ec43056c | refs/heads/main | 2023-07-22T08:48:00.981271 | 2021-08-30T22:34:23 | 2021-08-30T22:34:23 | 396,090,417 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | from django.test import SimpleTestCase
from django.urls import reverse, resolve
from store.views import Categories, CategoryUpdate, CategoryDetail, CategoryCreate, Products, cart_summary, cart_add
class TestUrls(SimpleTestCase):
# path('', Categories.as_view(), name='list_category'),
def test_list_category_url_is_resolves(self):
url = reverse('store:list_category')
self.assertEqual(resolve(url).func.view_class, Categories) # testing Class View Url
# path('cart/', cart_summary, name='cart'),
def test_cart_url_is_resolves(self):
url = reverse('store:cart')
self.assertEqual(resolve(url).func, cart_summary) # testing function View Url
# path('<slug:slug>/', CategoryDetail.as_view(), name='category_details'),
def test_category_details_url_is_resolves(self):
url = reverse('store:category_details', args=['some-slug'])
self.assertEqual(resolve(url).func.view_class, CategoryDetail) # testing Slug parametr
| [
"NeoVic2006@gmail.com"
] | NeoVic2006@gmail.com |
8f50db5e80b2f43463b2b5b774b8d15a91d726a6 | e213657e54a2d1b84fdb26a195045f2617972a5b | /scripts/Chizulum-Nnodu.py | 1f1cd9400e8664ad37364a02a4c05c6f9c0c6dfa | [] | no_license | Wenykeny/internship_hngi7 | 0bb9d1dcad7a5d87009a630052fc90497eab3f87 | 4bd38eab2333ee094c90bf8e9e0d555d0cac92bb | refs/heads/master | 2022-10-02T02:11:48.175312 | 2020-06-05T17:03:41 | 2020-06-05T17:03:41 | 268,345,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def intro():
print("Hello World, this is Chizulum Nnodu with HNGi7 ID HNG-01348 and email cin2@students.calvin.edu using python for stage 2 task")
intro();
| [
"moisesnt2@gmail.com"
] | moisesnt2@gmail.com |
0667f97fb57c8c12e435d2f0e0d28df739385605 | fcf3c983043273c4e57ac33330efaa0a9e5643a2 | /model-optimizer/mo/front/mxnet/extractors/utils_test.py | 070d5323122452347c77478d42a838fab10ae476 | [
"Apache-2.0"
] | permissive | p3tromyz0n/dldt | e7ab259848c90fdffd1395eaf5cf53ecd2b1e2f3 | 669bee86e580cbbc8ef40b440ab195ba2cbf5142 | refs/heads/2018 | 2020-05-15T13:03:47.748654 | 2019-03-14T10:13:27 | 2019-03-14T10:13:27 | 158,445,061 | 0 | 1 | Apache-2.0 | 2019-04-19T15:24:15 | 2018-11-20T20:07:50 | C++ | UTF-8 | Python | false | false | 6,599 | py | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
import mxnet as mx
from mo.front.mxnet.extractors.utils import AttrDictionary
from mo.front.mxnet.extractors.utils import load_params
class TestAttrDictionary(unittest.TestCase):
def testBool(self):
attrs = {
"global_pool": "True"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolAsDigits(self):
attrs = {
"global_pool": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolWithoutAttr(self):
attrs = {
"something": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(False, global_pool)
def testStrAttr(self):
attrs = {
"something": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Val", attr)
def testStrAttrWithoutAttr(self):
attrs = {
"something2": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Text", attr)
def testFloatAttr(self):
attrs = {
"something": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.5, attr)
def testFloatWithoutAttr(self):
attrs = {
"something2": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.1, attr)
def testIntAttr(self):
attrs = {
"something": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(5, attr)
def testIntWithoutAttr(self):
attrs = {
"something2": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(1, attr)
def testTupleAttr(self):
attrs = {
"something": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(5, a)
self.assertEqual(6, b)
self.assertEqual(7, c)
def testTupleWithoutAttr(self):
attrs = {
"something2": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(1, a)
self.assertEqual(2, b)
self.assertEqual(3, c)
def testTupleWithEmptyTupleAttr(self):
attrs = {
"something2": "()"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testTupleWithEmptyListAttr(self):
attrs = {
"something2": "[]"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testListAttr(self):
attrs = {
"something": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(5, l[0])
self.assertEqual(6, l[1])
self.assertEqual(7, l[2])
def testListWithoutAttr(self):
attrs = {
"something2": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(1, l[0])
self.assertEqual(2, l[1])
self.assertEqual(3, l[2])
class TestUtils(unittest.TestCase):
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_params(self, mock_nd_load):
mock_nd_load.return_value = {'arg:conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'arg:conv1_weight': mx.nd.array([2, 3], dtype='float32'),
'aux:bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("model.params")
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_args_nd(self, mock_nd_load):
mock_nd_load.return_value = {'conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'conv1_weight': mx.nd.array([2, 3], dtype='float32')}
model_params = load_params("args_model.nd", data_names=('data1', 'data2'))
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_auxs_nd(self, mock_nd_load):
mock_nd_load.return_value = {'bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("auxs_model.nd")
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
| [
"44090433+openvino-pushbot@users.noreply.github.com"
] | 44090433+openvino-pushbot@users.noreply.github.com |
57604a15ec5e0ce185af0e028d6b12c25ef35e93 | 78ab40c6e97f102be445b98fd8c3f908cf6f1609 | /tests/get_config.py | 517b3d63936cd46cf1b06f5351bc9133825982b0 | [] | no_license | meaqese/AutoImporter | a058ac4cc36de15d1711a41f381d34ee6b96a1e5 | bee28800503d5f471dd2415ba8e146e95dc2df62 | refs/heads/master | 2023-04-22T02:21:08.420179 | 2021-05-05T15:08:10 | 2021-05-05T15:08:10 | 290,337,456 | 0 | 1 | null | 2020-08-26T02:51:39 | 2020-08-25T22:30:46 | Python | UTF-8 | Python | false | false | 403 | py | import autoimporter
from pathlib import Path
from pytest import raises
def test_main():
config = autoimporter.get_config(Path('../sandbox/config.yaml'))
assert config == {
'import': ['blocks/*/**', 'blocks/*'],
'destination': 'styles.scss'
}
def test_file_not_found():
with raises(FileNotFoundError):
autoimporter.get_config(Path('../sandbox/config.ymlqqq'))
| [
"meaqese@protonmail.com"
] | meaqese@protonmail.com |
a4ce1a8fed608f1f033770d83238d074c118d376 | fbb7f9695b96c984fa2f43c4a9650fff6b09dcac | /src/main.py | 99a2c68105d4eecee391e97dc85d3fb6557ff128 | [
"MIT"
] | permissive | aevear/Stonktastic | db5b685d599ff46062f1d3a5cf7f7c1c0f4754e1 | 69a7b33c29492c8d76f5bec892eefb6606c2eaab | refs/heads/main | 2023-02-25T21:13:26.607786 | 2021-01-31T17:38:01 | 2021-01-31T17:38:01 | 329,738,692 | 3 | 1 | MIT | 2021-01-20T16:49:49 | 2021-01-14T21:28:10 | HTML | UTF-8 | Python | false | false | 1,947 | py | """
.. module:: main
:synopsis: Runs stonks backend with options for partial runs
.. moduleauthor:: Alexander Hahn <github.com/aevear>
.. moduleauthor:: Kody Richardson <github.com/aevear>
"""
import sys
from stonktastic.config.config import maxHistory, stockToOpt
from stonktastic.databaseCode import initializeStockDatabase, nameListGen
from stonktastic.machinelearning import (
memoryMain,
overallEval,
polyRegMain,
ranForMain,
runHistory,
)
from stonktastic.optimization import runPolyRegOptimization, runRanForOptimization
import logging
logging.getLogger('tensorflow').disabled = True
def main(option):
"""
**Initializes Program**
Args:
options (str) : value provided by the user to select which aspect of the program to run. Options are listed below
======= ===========
Options Description
======= ===========
Full Erases all data and re-initializes and runs all stages of the project:
Init Initializes the database and fills it with data from Yahoo Finance:
Train Runs all three machine learning model on data loaded into the db.:
Stats Preforms analytics and predictions for each model for each stock.:
Opt Preforms the full optimization aspect of the project on whichever stonk is provided in the *config.csv* file:
======= ===========
"""
if option == "full":
initializeStockDatabase()
polyRegMain()
memoryMain()
ranForMain()
runHistory()
overallEval()
if option == "init":
initializeStockDatabase()
else:
nameListGen()
if option == "train":
polyRegMain()
memoryMain()
ranForMain()
if option == "stats":
runHistory()
overallEval()
if option == "opt":
runPolyRegOptimization(stockToOpt)
runRanForOptimization(stockToOpt)
if __name__ == "__main__":
main(sys.argv[1])
| [
"xan.hahn@gmail.com"
] | xan.hahn@gmail.com |
389e956262735deadbff885eaace35377b9672fa | cbcbb04be207839cab8d26d352f64cc505a5eec9 | /virtual/lib/python3.6/site-packages/virtualenv/create/creator.py | 45075dae96cf5208adec66636979324cfeb1a046 | [
"MIT"
] | permissive | Antony-me/perfect-pitch | 8c61b7d6de1d00fddff5c2feea0293eb85ea8f92 | a2de0adaa3a22844390627459796b823e4ac8e71 | refs/heads/main | 2023-01-10T23:25:46.931458 | 2020-11-02T20:34:52 | 2020-11-02T20:34:52 | 308,002,503 | 0 | 0 | MIT | 2020-11-02T07:58:34 | 2020-10-28T12:01:00 | Python | UTF-8 | Python | false | false | 8,501 | py | from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from textwrap import dedent
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path, safe_delete
from virtualenv.util.six import ensure_str, ensure_text
from virtualenv.util.subprocess import run_cmd
from virtualenv.version import __version__
from .pyenv_cfg import PyEnvCfg
HERE = Path(os.path.abspath(__file__)).parent
DEBUG_SCRIPT = HERE / "debug.py"
class CreatorMeta(object):
def __init__(self):
self.error = None
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
self.app_data = options.app_data
def __repr__(self):
return ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", ensure_text(str(self.dest))),
("clear", self.clear),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta, app_data):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param app_data: the application data folder
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest",
help="directory to create virtualenv at",
type=cls.validate_dest,
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common),
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding,
"".join(refused.keys()),
raw_value,
),
)
if os.pathsep in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, os.pathsep),
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both
value = dest
while dest:
if dest.exists():
if os.access(ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
safe_delete(self.dest)
self.create()
self.set_pyenv_cfg()
self.setup_ignore_vcs()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
def setup_ignore_vcs(self):
"""Generate ignore instructions for version control systems."""
# mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs
git_ignore = self.dest / ".gitignore"
if not git_ignore.exists():
git_ignore.write_text(
dedent(
"""
# created by virtualenv automatically
*
""",
).lstrip(),
)
# Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the
# subinclude directive from root, at which point on might as well ignore the directory itself, see
# https://www.selenic.com/mercurial/hgignore.5.html for more details
# Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore
# Subversion - does not support ignore files, requires direct manipulation with the svn tool
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data)
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script, app_data):
env = os.environ.copy()
env.pop(str("PYTHONPATH"), None)
with app_data.ensure_extracted(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
| [
"antonymunyasia993@gmail.com"
] | antonymunyasia993@gmail.com |
5083bd67060492a1bf8d4c3a8b2f3127faed1128 | be5e162e547ab24e6dde59d5d3ffec483c285e62 | /PostingsReader.py | 5ea7735d253c6b26b5d84413de33bc3e92953965 | [] | no_license | chrisb66110/BackEndRI | e0eb8eb3db61a9cdaf70e4c7b7cddc24a7972221 | 29fb176fcfac04e376ed7ed4ffe09b342807d529 | refs/heads/master | 2020-06-04T11:15:21.240049 | 2019-07-02T05:17:41 | 2019-07-02T05:17:41 | 191,989,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | import re
class PostingsReader:
def __init__(self, docName):
docName = str(docName)
self.fileName = './DocumentosProcesados/postings/' + docName + '.txt'
# Se lee el archivo .wtd
self.file = open(self.fileName, "r")
string = self.file.read()
self.fileLines = string.split("\n")
# Metodo que devuelve el archivo
def getLinesPostings(self):
return self.fileLines
# Metodo que devuelve un diccionario de lo que tiene el portings.txt
# Llave: termino, aliasDocumento
# Valor: numeroLinea, peso
def getDicPostings(self):
postings = dict()
numberLine = 0
for line in self.fileLines:
lineWithoutSpace = re.sub(r'\s+', ' ', line)
lineDiv = lineWithoutSpace.split(" ")
if len(lineDiv) >= 3:
llave = lineDiv[0], lineDiv[1]
postings[llave] = numberLine, float(lineDiv[2])
numberLine = numberLine + 1
return postings
if __name__ == '__main__':
tokReader = PostingsReader('postings')
wtd = tokReader.getDicPostings()
for word in wtd:
print(str(word) + " " + str(wtd[word]))
| [
"christofer.rodriguez@ucr.ac.cr"
] | christofer.rodriguez@ucr.ac.cr |
cb25979d06470980fc05da61a37f53a1a344aef2 | 2c5dc3bd95c6d2fbf0868defeb1c1f52608902fa | /Assignment 6/1.py | d6524fe78bd41c9fc121510756f2788c74ff0860 | [] | no_license | MohamedZakariaa/Python-MySQL | fe6745a3c8e9702976d6fc50e6cb9c6491dab285 | a18fe472d8cbf7662162f0731f13715425257ab9 | refs/heads/master | 2022-03-11T09:37:42.085525 | 2018-05-04T17:07:16 | 2018-05-04T17:07:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import mysql.connector
connection = mysql.connector.connect(host='localhost',database='python_mysql',user='root',password='aaditya')
cur = connection.cursor();
itemcode = input("Enter ItemCode > ")
itemtype = input("Enter ItemType > ")
description = input("Enter Description > ")
price = input("Enter Price > ")
reorderlevel = input("Enter ReOrderlevel > ")
quantityonhand=input("Enter QuantityOnHand > ")
category = input("Enter Category > ")
cur.execute('INSERT INTO item VALUES(%s,%s,%s,%s,%s,%s,%s)',
(itemcode,itemtype,description,price,reorderlevel,quantityonhand,category))
connection.commit()
connection.close()
| [
"noreply@github.com"
] | noreply@github.com |
9fd480bd7227b91fc716c94c68a0e585dde1809c | 4d27b9bbe4a6360db7fdb792c95c50b355411496 | /Admin/migrations/0003_feedbackandcomplaints.py | 0258c2fffa0b1980239a14cebd6627b672f53c03 | [] | no_license | Urmila29/Artist-Hub | f9d17448c2f858b8702aaf3054ae5d5d3ed40ce1 | ca7e5557e1060e911b0c408afe9d1a2c24d2cf49 | refs/heads/master | 2023-09-02T23:03:07.575336 | 2021-11-17T10:07:27 | 2021-11-17T10:07:27 | 427,591,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # Generated by Django 2.0 on 2021-11-11 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Admin', '0002_aadmin_otp'),
]
operations = [
migrations.CreateModel(
name='FeedbackAndComplaints',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_Name', models.CharField(default='Name', max_length=100)),
('Email', models.EmailField(max_length=200)),
('Subject', models.CharField(default='Feedback', max_length=100)),
('Message', models.TextField(max_length=5000)),
],
),
]
| [
"sanurmi0129@gmail.com"
] | sanurmi0129@gmail.com |
f68979d9dc09bccdf5c7da0eb41b1131d065d623 | b415c705d572d40e00c577319c0b5c0129ffbdc4 | /pyGame/game_functions.py | 0a7d072e05340842a7a5f27a0e8357bb7f2d181f | [] | no_license | soulding/get_python | 6b839f63372e4b479c086251c8507a7f3c3dcb0f | fe5c52b7afd7f32f696d8c618285f124562d8b68 | refs/heads/master | 2020-04-16T02:00:13.239531 | 2019-02-12T03:53:56 | 2019-02-12T03:53:56 | 165,194,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,135 | py | import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_events(ai_settings, screen, stats, play_button, ship, aliens, bullets):
'''相应按键和鼠标事件'''
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y):
"""在玩家胆机Play按钮时开始新游戏"""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
pygame.mouse.set_visible(False)
stats.reset_stats()
stats.game_active = True
aliens.empty()
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def check_keydown_events(event, ai_settings, screen, ship, bullets):
'''响应按键'''
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ship):
'''响应松开'''
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def update_screen(ai_settings, screen, stats, ship, aliens, bullets, play_button):
'''更新屏幕上的图像,并切换到新屏幕'''
#每次循环时都重绘屏幕
screen.fill(ai_settings.bg_color)
#绘制所有子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
if not stats.game_active:
play_button.draw_button()
#让最近绘制的屏幕可见
pygame.display.flip()
def update_bullets(ai_settings, screen, ship, aliens, bullets):
"""更新子弹的位置,并删除已消失的子弹"""
#更新子弹的位置
bullets.update()
#删除已消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, ship, aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, ship, aliens, bullets):
"""检查子弹和外星人的碰撞"""
#删除击中的外星人和子弹
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if len(aliens) == 0:
#删除现有的子弹,并新建一群外星人
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
def fire_bullet(ai_settings, screen, ship, bullets):
"""发射子弹"""
#创建一颗子弹,并将其加入到编组bullets中
if len(bullets) < ai_settings.bullet_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def get_number_aliens_x(ai_settings, alien_width):
"""计算每行可容纳多少个外星人"""
available_space_x = ai_settings.screen_width - 2*alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
"""创建一个外星人并将其放在当前行"""
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def get_number_rows(ai_settings, ship_height, alien_height):
"""计算屏幕可容纳多少外星人"""
available_space_y = (ai_settings.screen_height - 3 * alien_height - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_fleet(ai_settings, screen, ship, aliens):
"""创建外星人群"""
#创建一个外星人,并计算一行可容纳多少个外星人
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
#创建第一行外星人
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def ship_hit(ai_settings, stats, screen, ship, aliens, bullets):
"""响应被外星人碰撞的飞船"""
if stats.ship_limit > 1:
#ship_left减1
stats.ship_limit -= 1
#清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
#创建一群新的外星人,并将飞船放到屏幕低端中央
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
#暂停0.5s
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets):
"""检查是否有外星人到达了屏幕底部"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(ai_settings, stats, screen, ship, aliens, bullets)
break
def update_aliens(ai_settings, stats, screen, ship, aliens, bullets):
"""检查是否有外星人在边缘,并更新外星人群中所有外星人的位置"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
#检测外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, screen, ship, aliens, bullets)
check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets)
def check_fleet_edges(ai_settings, aliens):
"""有外星人到达边缘时采取相应措施"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""将整体外星人下移,并改变他们的方向"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
| [
"dzy0112@163.com"
] | dzy0112@163.com |
6e52d10b29499aa35b90812a85ffec4595d8550c | 1ed647263e9be7b8cd5fb06905121840f96f0a04 | /app.py | 8148f9b59f32b8f3bf4eecded141c7d3f55586fd | [] | no_license | JesNatTer/flask-blog | f595a73ce4e1bb3f0645d39f7c4d0782aa57cf19 | bcb80854ebac47e4218444dfe8d7e6a77ebe43b0 | refs/heads/master | 2023-06-24T05:14:58.511603 | 2021-07-30T12:05:25 | 2021-07-30T12:05:25 | 390,381,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,036 | py | import hmac
import sqlite3
import datetime
from flask import Flask, request, jsonify
from flask_jwt import JWT, jwt_required, current_identity
from flask_cors import CORS
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def fetch_users():
with sqlite3.connect('blog.db') as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM user")
users = cursor.fetchall()
new_data = []
for data in users:
new_data.append(User(data[0], data[3], data[4]))
return new_data
users = fetch_users()
def init_user_table():
conn = sqlite3.connect('blog.db')
print("Opened database successfully")
conn.execute("CREATE TABLE IF NOT EXISTS user(user_id INTEGER PRIMARY KEY AUTOINCREMENT,"
"first_name TEXT NOT NULL,"
"last_name TEXT NOT NULL,"
"username TEXT NOT NULL,"
"password TEXT NOT NULL)")
print("user table created successfully")
conn.close()
def init_post_table():
with sqlite3.connect('blog.db') as conn:
conn.execute("CREATE TABLE IF NOT EXISTS post (id INTEGER PRIMARY KEY AUTOINCREMENT,"
"title TEXT NOT NULL,"
"content TEXT NOT NULL,"
"date_created TEXT NOT NULL)")
print("blog table created successfully.")
init_user_table()
init_post_table()
username_table = { u.username: u for u in users }
userid_table = { u.id: u for u in users }
def authenticate(username, password):
user = username_table.get(username, None)
if user and hmac.compare_digest(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
app = Flask(__name__)
CORS(app)
app.debug = True
app.config['SECRET_KEY'] = 'super-secret'
jwt = JWT(app, authenticate, identity)
@app.route('/protected')
@jwt_required()
def protected():
return '%s' % current_identity
@app.route('/user-registration/', methods=["POST"])
def user_registration():
response = {}
if request.method == "POST":
first_name = request.form['first_name']
last_name = request.form['last_name']
username = request.form['username']
password = request.form['password']
with sqlite3.connect("blog.db") as conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO user("
"first_name,"
"last_name,"
"username,"
"password) VALUES(?, ?, ?, ?)", (first_name, last_name, username, password))
conn.commit()
response["message"] = "success"
response["status_code"] = 201
return response
@app.route('/create-blog/', methods=["POST"])
@jwt_required()
def create_blog():
response = {}
if request.method == "POST":
title = request.form['title']
content = request.form['content']
date_created = datetime.datetime.now()
with sqlite3.connect('blog.db') as conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO post("
"title,"
"content,"
"date_created) VALUES(?, ?, ?)", (title, content, date_created))
conn.commit()
response["status_code"] = 201
response['description'] = "Blog post added succesfully"
return response
@app.route('/get-blogs/', methods=["GET"])
def get_blogs():
response = {}
with sqlite3.connect("blog.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM post")
posts = cursor.fetchall()
response['status_code'] = 200
response['data'] = posts
return response
@app.route("/delete-post/<int:post_id>")
@jwt_required()
def delete_post(post_id):
response = {}
with sqlite3.connect("blog.db") as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM post WHERE id=" + str(post_id))
conn.commit()
response['status_code'] = 200
response['message'] = "blog post deleted successfully."
return response
@app.route('/edit-post/<int:post_id>/', methods=["PUT"])
@jwt_required()
def edit_post(post_id):
response = {}
if request.method == "PUT":
with sqlite3.connect('blog.db') as conn:
incoming_data = dict(request.json)
put_data = {}
if incoming_data.get("title") is not None:
put_data["title"] = incoming_data.get("title")
with sqlite3.connect('blog.db') as conn:
cursor = conn.cursor()
cursor.execute("UPDATE post SET title =? WHERE id=?", (put_data["title"], post_id))
conn.commit()
response['message'] = "Update was successfully"
response['status_code'] = 200
if incoming_data.get("content") is not None:
put_data['content'] = incoming_data.get('content')
with sqlite3.connect('blog.db') as conn:
cursor = conn.cursor()
cursor.execute("UPDATE post SET content =? WHERE id=?", (put_data["content"], post_id))
conn.commit()
response["content"] = "Content updated successfully"
response["status_code"] = 200
return response
@app.route('/get-post/<int:post_id>/', methods=["GET"])
def get_post(post_id):
response = {}
with sqlite3.connect("blog.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM post WHERE id=" + str(post_id))
response["status_code"] = 200
response["description"] = "Blog post retrieved successfully"
response["data"] = cursor.fetchone()
return jsonify(response)
| [
"jessenterblanche@gmail.com"
] | jessenterblanche@gmail.com |
18b6822b158feb02278f7ca0e91fdab621f5ee7b | 305b6376dfd2efaf77befcd0f250c45d1abc78cd | /implementations/stargan/datasets.py | c65141102f0010abc251c13a0c5abf92bfd84203 | [] | no_license | Liuhongzhi2018/Pytorch-GAN | 3a8d0355e452de5569c3eca62e67e751e2b33101 | c18f1c1b91fc59d5e4828f36060259e92baa6a6c | refs/heads/master | 2020-11-25T17:57:48.072967 | 2020-06-07T15:57:43 | 2020-06-07T15:57:43 | 228,781,769 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import glob
import random
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class CelebADataset(Dataset):
def __init__(self, root, transforms_=None, mode="train", attributes=None):
self.transform = transforms.Compose(transforms_)
self.selected_attrs = attributes
self.files = sorted(glob.glob("%s/*.jpg" % root))
self.files = self.files[:-2000] if mode == "train" else self.files[-2000:]
self.label_path = glob.glob("%s/txt/*.txt" % root)[0]
self.annotations = self.get_annotations()
def get_annotations(self):
"""Extracts annotations for CelebA"""
annotations = {}
lines = [line.rstrip() for line in open(self.label_path, "r")]
print(self.label_path)
self.label_names = lines[1].split()
for _, line in enumerate(lines[2:]):
filename, *values = line.split()
labels = []
for attr in self.selected_attrs:
idx = self.label_names.index(attr)
labels.append(1 * (values[idx] == "1"))
annotations[filename] = labels
return annotations
def __getitem__(self, index):
filepath = self.files[index % len(self.files)]
filename = filepath.split("/")[-1]
img = self.transform(Image.open(filepath))
label = self.annotations[filename]
label = torch.FloatTensor(np.array(label))
return img, label
def __len__(self):
return len(self.files)
| [
"liuhongzhi_006@163.com"
] | liuhongzhi_006@163.com |
4bf01b7ae1c62134c913b6119a7902635486c910 | f44c9ab8a25c5f4a2811fc1e77a59cdce2fe588c | /analysis/check_audio_problems.py | 790ab272d5f3822830562109658a06f5fe559128 | [] | no_license | vejmelkam/StimPackC | 645e1137ef057379971054778cf45f7a9d89ed07 | b82dbbf267073017be3202996906fd0fe900e89e | refs/heads/master | 2021-01-10T20:39:14.301366 | 2011-08-24T17:39:54 | 2011-08-24T17:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | #!/usr/bin/env python
import sys
import string
# read lines from log file
f = open(sys.argv[1], "r")
lines = f.readlines()
f.close()
# find number of instance of "dropping buffer"
found = 0
for line in lines:
if string.find(line, "dropping buffer") >= 0:
found += 1
print("\n **** check audio problems script ****");
print("VLC log contains %d lines." % len(lines))
if found < 20:
print("Audio problems noted %d times, no problem for 4 videos." % found)
else:
print("Audio problems noted %d times !!! Check audio log and question subject." % found)
| [
"devnull@localhost"
] | devnull@localhost |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.