blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef62be11a2c541a6cf473f35f4d0effb8dc905be | e38eda106e821c06d34674dd1b670e2b236cbbf3 | /python/GIT_kalman/sensor_fusion.py | 2493d88ccf181528a1d19279f9fa1c9303a3e82c | [] | no_license | genius-Y/RFID_localization | 45412acd79821618bfd44f04644e85009404f01f | f683cc63e525dcbdcd947930f0130e802c3b9f41 | refs/heads/master | 2021-08-30T20:59:18.640200 | 2017-12-19T12:29:48 | 2017-12-19T12:29:48 | 115,241,000 | 0 | 1 | null | 2017-12-24T04:58:55 | 2017-12-24T04:58:54 | null | UTF-8 | Python | false | false | 2,305 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sensor_fusion.py
#
# Copyright 2017 Nishant <Nishant@NISHANT>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from numpy import array, asarray
from numpy.random import randn
from filterpy.kalman import KalmanFilter
import matplotlib.pyplot as plt
import numpy as np
import book_plots as bp
import numpy.random as random
def fusion_test(wheel_sigma, ps_sigma, do_plot=True):
dt = 0.1
kf = KalmanFilter(dim_x=2, dim_z=2)
kf.F = array([[1., dt], [0., 1.]])
kf.H = array([[1., 0.], [1., 0.]])
kf.x = array([[0.], [1.]])
kf.Q *= array([[(dt**3)/3, (dt**2)/2],
[(dt**2)/2, dt ]]) * 0.02
kf.P *= 100
kf.R[0, 0] = wheel_sigma**2
kf.R[1, 1] = ps_sigma**2
random.seed(1123)
xs, zs, nom = [], [], []
for i in range(1, 100):
m0 = i + randn()*wheel_sigma
m1 = i + randn()*ps_sigma
z = array([[m0], [m1]])
kf.predict()
kf.update(z)
xs.append(kf.x.T[0])
zs.append(z.T[0])
nom.append(i)
xs = array(xs)
zs = array(zs)
nom = array(nom)
res = nom - xs[:, 0]
#print('fusion std: {:.3f}'.format(np.std(res)))
ts = np.arange(0.1, 10, .1)
bp.plot_measurements(ts, zs[:, 0], label='Wheel')
plt.plot(ts, zs[:, 1], ls='--', label='Pos Sensor')
bp.plot_filter(ts, xs[:, 0], label='Kalman filter')
plt.legend(loc=4)
plt.ylim(0, 100)
bp.set_labels(x='time (sec)', y='meters')
plt.show()
fusion_test(1.5, 3.0)
| [
"nishant2006in@gmail.com"
] | nishant2006in@gmail.com |
33673f2e8184b5c2020942244cf7cd80e01a59c1 | ad0e853db635edc578d58891b90f8e45a72a724f | /python/ray/train/_internal/checkpoint.py | 82c25febb8b0d79b04a829842a8758c8d245538a | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | ericl/ray | 8c93fc713af3b753215d4fe6221278700936e2db | e9a1c6d814fb1a81033809f56695030d651388f5 | refs/heads/master | 2023-08-31T11:53:23.584855 | 2023-06-07T21:04:28 | 2023-06-07T21:04:28 | 91,077,004 | 2 | 4 | Apache-2.0 | 2023-01-11T17:19:10 | 2017-05-12T09:51:04 | Python | UTF-8 | Python | false | false | 11,154 | py | import os
import logging
from pathlib import Path
from typing import Callable, Dict, List, Optional, Type, Union
from ray.air import Checkpoint, CheckpointConfig, session
from ray.air._internal.checkpoint_manager import CheckpointStorage
from ray.air._internal.checkpoint_manager import (
_CheckpointManager as CommonCheckpointManager,
)
from ray.air._internal.checkpoint_manager import _TrackedCheckpoint
from ray.train._internal.session import TrainingResult
from ray.train._internal.utils import construct_path
from ray.train.constants import (
CHECKPOINT_RANK_KEY,
TRAIN_CHECKPOINT_SUBDIR,
TUNE_CHECKPOINT_ID,
TUNE_INSTALLED,
CHECKPOINT_METADATA_KEY,
LAZY_CHECKPOINT_MARKER_FILE,
)
from ray.air.constants import TIMESTAMP
if TUNE_INSTALLED:
from ray import tune
else:
tune = None
logger = logging.getLogger(__name__)
def load_checkpoint_from_path(checkpoint_to_load: Union[str, Path]) -> Checkpoint:
"""Utility function to load a checkpoint from a path."""
checkpoint_path = Path(checkpoint_to_load).expanduser()
if not checkpoint_path.exists():
raise ValueError(f"Checkpoint path {checkpoint_path} does not exist.")
checkpoint = Checkpoint.from_directory(str(checkpoint_path))
return checkpoint
class CheckpointManager(CommonCheckpointManager):
"""Manages checkpoint processing, writing, and loading.
- A ``checkpoints`` directory is created in the ``run_dir`` and contains
all the checkpoint files.
The full default path will be:
~/ray_results/train_<datestring>/run_<run_id>/checkpoints/
checkpoint_<checkpoint_id>
Attributes:
latest_checkpoint_dir: Path to the file directory for
the checkpoints from the latest run. Configured through
``start_training``.
latest_checkpoint_filename: Filename for the latest
checkpoint.
next_checkpoint_path: Path to the next checkpoint to
persist from the latest run.
best_checkpoint_path: Path to the best persisted
checkpoint from the latest run.
latest_checkpoint_id: The id of the most recently
saved checkpoint.
latest_checkpoint: The latest saved checkpoint. This
checkpoint may not be saved to disk.
"""
_persist_memory_checkpoints = True
def __init__(
self,
run_dir: Optional[Path] = None,
checkpoint_strategy: Optional[CheckpointConfig] = None,
):
self.run_dir = run_dir
super().__init__(checkpoint_strategy=checkpoint_strategy)
self._validate_checkpoint_strategy()
def _validate_checkpoint_strategy(self):
if self._checkpoint_strategy.checkpoint_score_attribute is None:
self._checkpoint_strategy.checkpoint_score_attribute = TIMESTAMP
def _load_checkpoint(
self, checkpoint_to_load: Optional[Union[Dict, str, Path, Checkpoint]]
) -> Optional[Checkpoint]:
"""Load the checkpoint dictionary from the input dict or path."""
if checkpoint_to_load is None:
return None
if isinstance(checkpoint_to_load, Dict):
return Checkpoint.from_dict(checkpoint_to_load)
if isinstance(checkpoint_to_load, Checkpoint):
return checkpoint_to_load
else:
# Load checkpoint from path.
return load_checkpoint_from_path(checkpoint_to_load)
def _process_checkpoint(
self,
checkpoint_result: TrainingResult,
decode_checkpoint_fn: Callable,
) -> _TrackedCheckpoint:
checkpoint_data = checkpoint_result.data
checkpoint_metadata = checkpoint_result.metadata or {}
checkpoint_rank = checkpoint_metadata.get(CHECKPOINT_RANK_KEY, 0)
if isinstance(checkpoint_data, str):
checkpoint_class: Type[Checkpoint] = checkpoint_metadata[
CHECKPOINT_METADATA_KEY
].checkpoint_type
checkpoint_data = checkpoint_class.from_directory(checkpoint_data)
checkpoint_data._metadata = checkpoint_metadata[CHECKPOINT_METADATA_KEY]
else:
# TODO(ml-team): Remove once we remove Backend.decode_data
checkpoint_data = decode_checkpoint_fn(checkpoint_data)
score_attr = self._checkpoint_strategy.checkpoint_score_attribute
if (
self._checkpoint_strategy.num_to_keep != 0
and score_attr not in checkpoint_metadata
):
raise ValueError(
f"Unable to persist checkpoint for "
f"checkpoint_score_attribute: "
f"{score_attr}. "
f"Include this attribute in the call to "
f"`session.report()`."
)
return _TrackedCheckpoint(
dir_or_data=checkpoint_data,
checkpoint_id=self._latest_checkpoint_id,
storage_mode=CheckpointStorage.MEMORY,
metrics={score_attr: checkpoint_metadata.get(score_attr, 0.0)},
rank=checkpoint_rank,
)
def _process_checkpoints(
self,
checkpoint_results: List[TrainingResult],
decode_checkpoint_fn: Callable,
) -> None:
"""Ray Train entrypoint. Perform all processing for a checkpoint."""
if self._checkpoint_strategy._checkpoint_keep_all_ranks:
tracked_checkpoints = [
self._process_checkpoint(checkpoint_result, decode_checkpoint_fn)
for checkpoint_result in checkpoint_results
]
else:
# Get checkpoint from first worker.
tracked_checkpoints = [
self._process_checkpoint(checkpoint_results[0], decode_checkpoint_fn)
]
self.register_checkpoints(checkpoints=tracked_checkpoints)
def _get_next_checkpoint_path(self) -> Optional[Path]:
"""Path to the next checkpoint to persist."""
checkpoint_path = _construct_checkpoint_path_name(
self._latest_checkpoint_id + 1
)
return self.latest_checkpoint_dir.joinpath(checkpoint_path)
def on_start_training(
self,
checkpoint_strategy: Optional[CheckpointConfig],
run_dir: Path,
latest_checkpoint_id: Optional[int] = 0,
):
checkpoint_strategy = checkpoint_strategy or CheckpointConfig()
self._checkpoint_strategy = checkpoint_strategy
self._validate_checkpoint_strategy()
self.run_dir = run_dir
self._latest_checkpoint_id = latest_checkpoint_id or 0
# Train-specific attributes
@property
def latest_checkpoint(self):
if not self._latest_memory_checkpoint:
return None
return self._latest_memory_checkpoint.dir_or_data
@property
def latest_checkpoint_dir(self) -> Optional[Path]:
"""Path to the latest checkpoint directory."""
checkpoint_dir = Path(TRAIN_CHECKPOINT_SUBDIR)
return construct_path(checkpoint_dir, self.run_dir)
@property
def latest_checkpoint_file_name(self) -> Optional[str]:
"""Filename to use for the latest checkpoint."""
if self._latest_checkpoint_id > 0:
return _construct_checkpoint_path_name(self._latest_checkpoint_id)
else:
return None
@property
def next_checkpoint_path(self) -> Optional[Path]:
"""Path to the next checkpoint to persist."""
checkpoint_file = _construct_checkpoint_path_name(
self._latest_checkpoint_id + 1
)
return self.latest_checkpoint_dir.joinpath(checkpoint_file)
@property
def best_checkpoint_path(self) -> Optional[Path]:
"""Path to the best persisted checkpoint."""
if self._best_persisted_checkpoint:
return Path(self._best_persisted_checkpoint.dir_or_data)
else:
return None
@property
def latest_checkpoint_id(self) -> Optional[int]:
"""The checkpoint id of most recently saved checkpoint.
If no checkpoint has been saved yet, then return None.
"""
checkpoint_id = self._latest_checkpoint_id
if checkpoint_id == 0:
return None
else:
return checkpoint_id
class TuneCheckpointManager(CheckpointManager):
def __init__(
self,
run_dir: Optional[Path] = None,
checkpoint_strategy: Optional[CheckpointConfig] = None,
):
super().__init__(run_dir, checkpoint_strategy)
# Name of the marker dropped by the Trainable. If a worker detects
# the presence of the marker in the trial dir, it will use lazy
# checkpointing.
self._lazy_marker_path = None
if tune.is_session_enabled():
self._lazy_marker_path = (
Path(session.get_trial_dir()) / LAZY_CHECKPOINT_MARKER_FILE
)
with open(self._lazy_marker_path, "w"):
pass
def _load_checkpoint(
self, checkpoint_to_load: Optional[Union[Dict, str, Path, Checkpoint]]
) -> Optional[Union[Dict, Checkpoint]]:
loaded_checkpoint = super()._load_checkpoint(checkpoint_to_load)
assert not loaded_checkpoint or isinstance(loaded_checkpoint, Checkpoint)
# `latest_checkpoint_id` will be the id assigned to the next checkpoint,
# which should be one more than the loaded checkpoint's id
# If no checkpoint is loaded, initialize this to 0
self._latest_checkpoint_id = (
getattr(loaded_checkpoint, TUNE_CHECKPOINT_ID, -1) + 1
)
return loaded_checkpoint
def add_tune_checkpoint_id(self, checkpoint: Checkpoint):
# Store the checkpoint_id in the file so that the Tune trial can be
# resumed after failure or cancellation.
setattr(checkpoint, TUNE_CHECKPOINT_ID, self._latest_checkpoint_id)
def _process_persistent_checkpoint(self, checkpoint: _TrackedCheckpoint):
self.add_tune_checkpoint_id(checkpoint.dir_or_data)
# Train may choose not to commit a checkpoint, but make sure the
# checkpoint is always committed for Tuning purpose.
# After this is committed, checkpoint.dir_or_path will become a string,
# which will prevent this checkpoint from being commtted again in the
# subsequent super()._process_persistent_checkpoint() call.
with tune.checkpoint_dir(step=self._latest_checkpoint_id) as checkpoint_dir:
path = Path(checkpoint_dir)
checkpoint.commit(path)
return super()._process_persistent_checkpoint(checkpoint)
@property
def latest_checkpoint_dir(self) -> Optional[Path]:
raise NotImplementedError
@property
def next_checkpoint_path(self) -> Optional[Path]:
return None
def _get_next_checkpoint_path(self) -> Optional[Path]:
return None
def __del__(self):
try:
assert self._lazy_marker_path
os.remove(str(self._lazy_marker_path))
except Exception:
pass
return super().__del__()
def _construct_checkpoint_path_name(checkpoint_id: int) -> str:
return f"checkpoint_{checkpoint_id:06d}"
| [
"noreply@github.com"
] | ericl.noreply@github.com |
63e5371afc1cc3437b3bb7655c3a94e25a2e7ffb | d7e64b27bf2d039e27d3fa28cbe9ba501777dba3 | /ex4_3.py | cf80e789f0ee31daf77807310c02cb5804b208d0 | [] | no_license | midfielder16/Pypyhihi | 3ac371e5a9af18bc2e0d0517edc4cd03ea9094af | b66fba9441fe5315f1fe826380610da48ddbf279 | refs/heads/master | 2021-05-13T21:29:28.618011 | 2018-01-06T08:06:12 | 2018-01-06T08:06:12 | 116,465,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | #!/usr/bin/env python3
def solve(words):
'''Trả về list chứa điểm tương ứng của các từ trong `words`
Nếu a b c d (không phân biệt chữ hoa thường) .... lần lượt bằng 1 2 3 4 ...
thì từ ``attitude`` có giá trị bằng 100.
(http://www.familug.org/2015/05/golang-tinh-tu-cung-9gag.html)
Gợi ý::
import string
print(string.ascii_lowercase)
'''
result = []
alphabet = 'abcdefghijklmnopqrstuvwxyz'
for word in words:
sum = 0
for char in word.lower():
sum += alphabet.index(char) + 1
result.append(sum)
return result
def main():
words = ['numpy', 'django', 'saltstack', 'discipline',
'Python', 'FAMILUG', 'pymi']
print(solve(words))
if __name__ == "__main__":
main()
| [
"phamdai0102@gmail.com"
] | phamdai0102@gmail.com |
50d7896ca2a3fd81c7a3a5b423c105fc094df359 | 0f2112a0e198cb0275c002826854c836bbfb5bdf | /pywicta/image/__init__.py | ebc8b9794d95d617edc05784841fc62efa089799 | [
"MIT"
] | permissive | jeremiedecock/pywi-cta | a7f98ae59beb1adecb25623153c13e5bc70e5560 | 1185f7dfa48d60116472c12ffc423be78a250fc9 | refs/heads/master | 2021-04-15T12:06:03.723786 | 2019-03-21T02:33:15 | 2019-03-21T02:33:15 | 126,397,380 | 0 | 1 | MIT | 2018-10-16T12:17:52 | 2018-03-22T21:31:45 | Python | UTF-8 | Python | false | false | 158 | py | """Image functions
This package contains additional image processing functions.
"""
from . import hillas_parameters
from . import signal_to_border_distance
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
af58e053d0181b09b9a4ce94a927ba7d70a13686 | 8e527d9cce7373b50fa549c5463da0a26f6f65bb | /数组/面试题3题目二.py | ce45ef6e2e68597d3309edcafce548f1a1dcd2dc | [] | no_license | GarvenYu/Algorithm-DataStructure | 5888551d02ce1843020b4184b6d97e258fd9dd6b | f66500f22315eb8cfd3c2c84a6627def09560c22 | refs/heads/master | 2020-03-23T03:03:59.018969 | 2019-05-17T01:06:19 | 2019-05-17T01:06:19 | 141,007,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!usr/bin/env python
# -*- coding: UTF-8 -*-
data_list = [2,3,4,5,1,2]
def find_duplication(data_list): # O(nlogn) 不可改变输入数组 实现方法1
if not data_list:
return 'list is null'
start = 1
end = len(data_list)-1
count = 0
# 针对start-end的数据进行二分区间划分,并统计该区间内数字在集合中出现的次数
while end >= start:
mid = (end-start)//2 +start
# (start,mid) (mid+1,end)
count = count_range(data_list, start, mid)
if end == start:
duplication = start if count>1 else -1
print('重复元素 %d' % duplication)
return
if count > (mid-start+1):
# (start,mid)区间内有重复
end = mid
else:
start = mid +1
def count_range(data_list,start,end):
count = 0
for index in range(len(data_list)):
# O(n)
if start<=data_list[index]<=end:
count+=1
return count
def find_duplication_more_time(data_list): # O(n^2) 实现方法2
for index in range(len(data_list)):
if data_list.count(data_list[index])>1:
print('重复元素 %d 重复次数 %d' % (data_list[index], data_list.count(data_list[index])))
def find_duplication_change(data_list): # 实现方法3,前提是如果可以改变输入数组 O(n) 让i位置存储value=i的元素
for i in range(len(data_list)):
while data_list[i] != i:
if data_list[i] == data_list[data_list[i]]:
print('重复元素 %d' % data_list[i])
break
temp = data_list[i]
data_list[i] = data_list[temp]
data_list[temp] = temp
find_duplication(data_list)
find_duplication_more_time(data_list)
find_duplication_change(data_list) | [
"1747603726@qq.com"
] | 1747603726@qq.com |
c7e51a7fe2300853ae4c4b706fcaf24f7d19f4fa | 0e2a4b3254978e6664a1fa1b216f81e26c46cbed | /configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco_mm2021_official.py | 2f24b29ebd62857e222d66baf551b48857076ee2 | [
"Apache-2.0"
] | permissive | Yannik-H/mm2021 | 34f4b38f71c7e15fbf6b9b5fd0ac814b459d0a05 | c8afb3e138c0097148c86b66e51a72e1acc4550d | refs/heads/main | 2023-05-28T01:29:40.006142 | 2021-06-18T04:43:48 | 2021-06-18T04:43:48 | 370,870,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,814 | py | model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=515,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.1,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
CLASSES = ('Jansport', 'molsion', 'guess', 'Goodbaby', 'coach', 'meizu', 'cocacola', 'moncler', 'qingyang', 'zippo', 'lego', 'decathlon', 'adidas', 'seiko', 'vrbox', 'moschino', 'palmangels', 'uniqlo', 'bose', 'baishiwul', 'rayban', 'd_wolves', 'laneige', 'hotwind', 'skechers', 'anta', 'kingston', 'wuliangye', 'disney', 'pinkfong', 'lancome', 'Versace', 'FGN', 'fortnite', 'titoni', 'innisfree', 'levis', 'robam', 'dior', 'GUND', 'paulfrank', 'wodemeilirizhi', 'thehistoryofwhoo', 'bejirog', 'mg', 'VANCLEEFARPELS', 'Stussy', 'alexandermcqueen', 'inman', 'nikon', 'dove', 'jiangshuweishi', 'durex', 'thombrowne', 'emerson', 'erdos', 'iwc', 'Anna_sui', 'nanjiren', 'emiliopucci', 'ugg', 'vacheronconstantin', 'gloria', '3M', 'bally', 'asics', 'lamborghini', 'dyson', 'christopher_kane', 'basichouse', 'casio', 'moco', 'acne', 'ysl', 'aptamil', 'BASF', 'okamoto', 'FridaKahlo', 'Specialized', 'bolon', 'jack_wolfskin', 'jeep', 'cartier', 'mlb', 'jimmythebull', 'zhejiangweishi', 'jeanrichard', 'stuartweitzman', 'baleno', 'montblanc', 'guerlain', 'cainiaoguoguo', 'bear', 'monsterenergy', 'Aquabeads', 'marcjacobs', 'ELLE', 'nfl', 'Levis_AE', 'chigo', 'snoopy', 'hla', 'jimmychoo', 'otterbox', 'simon', 'lovemoschino', 'armani', 'playboy', 'sulwhasoo', 'lv', 'dkny', 'vatti', 'lenovo', 'offwhite', 'eddrac', 'semir', 'ihengima', 'panerai', 'sergiorossi', 'mulberry', 'tissot', 'parker', 'loreal', 'columbia', 'Lululemon', 'samsung', 'liquidpalisade', 'Amii', '3concepteyes', 'miffy', 'vancleefarpels', 'lachapelle', 'kobelco', 'PATAGONIA', 'theexpendables', 'lincoln', 'chloe', 'jnby', 'rapha', 'beautyBlender', 'gentlemonster', 'chaumet', 'banbao', 'vans', 'linshimuye', 'shaxuan', 'liangpinpuzi', 'lux', 'stanley', 'philips', 'brioni', 'hp', 'edwin', 'peskoe', 'eral', 'pantene', 'gree', 'nxp', 'bandai', 'shelian', 'HarleyDavidson_AE', 'abercrombiefitch', 'goldlion', 'keds', 'samanthathavasa', 'nintendo', 'be_cheery', 'mujosh', 'anessa', 'snidel', 'erke', 'furla', 'Josiny', 'tomford', 'jaegerlecoultre', 'dissona', 'wodemeiliriji', 'brabus', 'moony', 'gucci', 'miumiu', 'vanguard', 'THINKINGPUTTY', 'LAMY', 'bobdog', 'pigeon', 'celine', 'bulgari', 'shiseido', 'joyong', 'vlone', 'dell', 'deli', 'canon', 'karenwalker', 'musenlin', 'volcom', 'amass', 'SANDVIK', 'dhc', 'mcm', 'GOON', 'bvlgari', 'beats', 'ny', 'ports', 'omron', 'only', 'razer', 'siemens', 'clinique', 'ccdd', 'zara', 'esteelauder', 'OTC', 'blackberry', 'bottegaveneta', 'suzuki', 'yili', 'fsa', 'jackjones', 'wonderflower', 'MaxMara', 'nissan', 'makeupforever', 'hublot', 'belle', 'jissbon', 'monchichi', 'youngor', 'PopSockets', 'hengyuanxiang', 'motorhead', 'mistine', 'jeanswest', 'versace', 'chromehearts', 'HUGGIES', 'Belif', 'aux', 'office', 'ferragamo', 'arsenal', 'yonghui', 'Yamaha', 'converse', 'sk2', 'evisu', 'newbalance', 'thermos', 'camel', 'KielJamesPatrick', 'alibaba', 'rimowa', 'newera', 'anello', 'flyco', 'LG', 'longines', 'dolcegabbana', 'YEARCON', 'mentholatum', 'VW', 'uno', 'peacebird', 'Miss_sixty', 'toryburch', 'cdgplay', 'hisense', 'fjallraven', 'mindbridge', 'katespade', 'nike', 'metersbonwe', 'chaoneng', 'zhoudafu', 'seven7', 'PXG', 'haier', 'headshoulder', 'loewe', 'safeguard', 'CanadaGoose', 'Jmsolution', 'mac', 'hellokitty', 'Thrasher', 'zebra', 'emblem', 'girdear', 'KTM', 'alexanderwang', 'metallica', 'ThinkPad', 'moussy', 'tiantainwuliu', 'leader', 'angrybirds', 'thenorthface', 'kipling', 'dazzle', 'bioderma', 'grumpycat', 'avene', 'longchamp', 'tesla', 'wechat', 'cree', 'chenguang', 'vivo', 'ochirly', 'walmart', 'manchesterunited', 'ecco', 'doraemon', 'toshiba', 'tencent', 'eland', 'juicycouture', 'swarovski', 'VDL', 'supor', 'moutai', 'ironmaiden', 'konka', 'intel', 'burberry', 'septwolves', 'nipponpaint', 'HARRYPOTTER', 'Montblanc', 'fila', 'pepsicola', 'citizen', 'airjordan', 'fresh', 'TOUS', 'balenciaga', 'omega', 'fendi', 'honda', 'xiaomi', 'oakley', 'FESTO', 'ahc', 'CommedesGarcons', 'perfect', 'darlie', 'OralB', 'kappa', 'instantlyageless', 'OPPO', 'royalstar', 'esprit', 'tommyhilfiger', 'olay', 'kanahei', 'Levistag', '361du', 'lee', 'onitsukatiger', 'henkel', 'miui', 'michael_kors', 'Aape', 'leaders', 'libai', 'hunanweishi', 'Auby', 'asus', 'nestle', 'rolex', 'barbie', 'PawPatrol', 'tata', 'chowtaiseng', 'markfairwhale', 'puma', 'Herschel', 'joeone', 'baojianshipin', 'naturerepublic', 'kans', 'prada', 'kiehls', 'piaget', 'toread', 'bosideng', 'castrol', 'apple', 'buick', 'ck', 'mobil', 'lanvin', 'Bosch', 'chanel', 'cpb', 'wanda', 'hermes', 'patekphilippe', 'toray', 'toyota', 'lindafarrow', 'peppapig', 'lacoste', 'gap', 'porsche', 'Mexican', 'christianlouboutin', 'goldsgym', 'heronpreston', 'UnderArmour', 'warrior', 'benz', 'Duke', 'lets_slim', 'huawei', 'volvo', 'rejoice', 'TommyHilfiger', 'versacetag', 'pierrecardin', 'tries', 'sandisk', 'veromoda', 'Y-3', 'yuantong', 'ford', 'beaba', 'lining', 'stdupont', 'hotwheels', 'teenagemutantninjaturtles', 'montagut', 'hollister', 'panasonic', 'hikvision', 'hugoboss', 'ThomasFriends', 'skf', 'MANGO', 'miiow', 'DanielWellington', 'hera', 'tagheuer', 'starbucks', 'KOHLER', 'baishiwuliu', 'gillette', 'beijingweishi', 'diesel', 'pandora', 'sony', 'tumi', 'etam', 'CHAMPION', 'tcl', 'arcteryx', 'aokang', 'kboxing', 'kenzo', 'audi', 'mansurgavriel', 'house_of_hello', 'pampers', 'opple', 'samsonite', 'nanoblock', 'xtep', 'charles_keith', 'CCTV', 'PJmasks', 'threesquirrels', 'Dickies', 'tudor', 'goyard', 'pinarello', 'tiffany', 'lanyueliang', 'daphne', 'nba', 'SUPERME', 'juzui', 'MURATA', 'valentino', 'bmw', 'franckmuller', 'zenith', 'oldnavy', 'sum37', 'holikaholika', 'girardperregaux', 'bull', 'PINKFLOYD', 'zhoushengsheng', 'givenchy', 'baidu', 'nanfu', 'skyworth', 'snp', 'tsingtao', 'MCM', '3t', 'hyundai', 'jiaodan', 'Budweiser', 'triangle', 'satchi', 'lexus', 'balabala', 'teenieweenie', 'midea', 'FivePlus', 'reddragonfly', 'ralphlauren')
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(
type=dataset_type,
ann_file=data_root + 'coco_mm2021/annotations/train.json',
img_prefix=data_root + 'coco_mm2021/train_all/',
pipeline=train_pipeline,
classes=CLASSES),
val=dict(
type=dataset_type,
ann_file=data_root + 'coco_mm2021/annotations/val.json',
img_prefix=data_root + 'coco_mm2021/train_all/',
pipeline=test_pipeline,
classes=CLASSES),
test=dict(
type=dataset_type,
ann_file=data_root + 'coco_mm2021/annotations/test.json',
img_prefix=data_root + 'coco_mm2021/test/',
pipeline=test_pipeline,
classes=CLASSES))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=5000,
warmup_ratio=0.0001,
step=[16, 22])
total_epochs = 24
checkpoint_config = dict(interval=1)
log_config = dict(
interval=1,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = "/data/mmdet/ACM_MM_2021/faster_rcnn_r101_fpn_2x_coco_mm2021_official"
gpu_ids = range(0, 8) | [
"huangyuning@megvii.com"
] | huangyuning@megvii.com |
936ac1a26cc0f0c3c4098e4dab5068c152183601 | 786de89be635eb21295070a6a3452f3a7fe6712c | /root/tags/V00-03-00/SConscript | 79a8398cab91d01f66746f757727ba8c866b37e9 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package root
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving some or all parameters.
#
root_ver = "5.34.25"
PREFIX = pjoin('$SIT_EXTERNAL_SW', "root", root_ver + "-$PYTHON")
INCDIR = "include/root"
LIBDIR = "lib"
LINKLIBS = "lib*.so*"
PKGLIBS = "Core Cint RIO Net Hist Graf Graf3d Gpad Tree Rint Postscript Matrix Physics MathCore Thread m dl"
BINDIR = "bin"
LINKBINS = "root root.exe rootcint root-config"
PYDIR = "lib"
LINKPY = "*.py libPyROOT.so*"
standardExternalPackage('root', **locals())
| [
"gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
38a7fca7aa7911336a605a76d7fe26a7822d15be | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_galleries_operations.py | 78b2fc6273345d2b9c8cba61fa9f1031eb1862b9 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 23,103 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleriesOperations:
"""GalleriesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery: "_models.Gallery",
**kwargs
) -> "_models.Gallery":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Gallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery, 'Gallery')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Gallery', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Gallery', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery: "_models.Gallery",
**kwargs
) -> AsyncLROPoller["_models.Gallery"]:
"""Create or update a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery. The allowed characters are alphabets
and numbers with dots and periods allowed in the middle. The maximum length is 80 characters.
:type gallery_name: str
:param gallery: Parameters supplied to the create or update Shared Image Gallery operation.
:type gallery: ~azure.mgmt.compute.v2018_06_01.models.Gallery
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Gallery or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Gallery]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Gallery"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery=gallery,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> "_models.Gallery":
"""Retrieves information about a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Gallery, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.Gallery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Gallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery to be deleted.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.GalleryList"]:
"""List galleries under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.GalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.GalleryList"]:
"""List galleries under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.GalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries'} # type: ignore
| [
"noreply@github.com"
] | hivyas.noreply@github.com |
615fef9648aee32aaeb84628d396b43e34836624 | bcc8d79a101f72d325c7a5d48332fbf7940f6c43 | /tests/runtests.py | fc9cf2447a4dc7b7a476e476a2f6beeb7a0b16db | [
"BSD-Attribution-HPND-disclaimer",
"OLDAP-2.8"
] | permissive | jammann/cyrus-sasl | 143aa38b9214b18179f39b687628d5d19c25b9c3 | e1b60cff154da38f01877cec9cb9ec4a43bf268b | refs/heads/master | 2021-06-12T01:12:57.638825 | 2021-02-07T09:33:45 | 2021-05-13T11:05:54 | 190,206,306 | 0 | 0 | NOASSERTION | 2019-06-04T13:23:40 | 2019-06-04T13:23:39 | null | UTF-8 | Python | false | false | 8,377 | py | #!/usr/bin/python3
import argparse
import base64
import os
import shutil
import signal
import subprocess
import time
from string import Template
def setup_socket_wrappers(testdir):
""" Try to set up socket wrappers """
wrapdir = os.path.join(testdir, 'w')
os.makedirs(wrapdir)
wrappers = subprocess.Popen(['pkg-config', '--exists', 'socket_wrapper'])
wrappers.wait()
if wrappers.returncode != 0:
raise Exception('Socket Wrappers not available')
wrappers = subprocess.Popen(['pkg-config', '--exists', 'nss_wrapper'])
wrappers.wait()
if wrappers.returncode != 0:
raise Exception('NSS Wrappers not available')
hosts = os.path.join(wrapdir, 'hosts')
with open(hosts, 'w+') as conffile:
conffile.write('127.0.0.9 host.realm.test')
return {'LD_PRELOAD': 'libsocket_wrapper.so libnss_wrapper.so',
'SOCKET_WRAPPER_DIR': wrapdir,
'SOCKET_WRAPPER_DEFAULT_IFACE': '9',
'NSS_WRAPPER_HOSTNAME': 'host.realm.test',
'NSS_WRAPPER_HOSTS': hosts}
KERBEROS_CONF = '''
[libdefaults]
default_realm = REALM.TEST
dns_lookup_realm = false
dns_lookup_kdc = false
rdns = false
ticket_lifetime = 24h
forwardable = yes
default_ccache_name = FILE://${TESTDIR}/ccache
udp_preference_limit = 1
[domain_realm]
.realm.test = REALM.TEST
realm.test = REALM.TEST
[realms]
REALM.TEST = {
kdc = 127.0.0.9
admin_server = 127.0.0.9
acl_file = ${TESTDIR}/kadm.acl
dict_file = /usr/share/dict/words
admin_keytab = ${TESTDIR}/kadm.keytab
database_name = ${TESTDIR}/kdc.db
key_stash_file = ${TESTDIR}/kdc.stash
}
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[logging]
kdc = FILE:${TESTDIR}/kdc.log
admin_server = FILE:${TESTDIR}/kadm.log
default = FILE:${TESTDIR}/krb5.log
'''
def setup_kdc(testdir, env):
""" Setup KDC and start process """
krbconf = os.path.join(testdir, 'krb.conf')
env['KRB5_CONFIG'] = krbconf
kenv = {'KRB5_KDC_PROFILE': krbconf,
'PATH': '/sbin:/bin:/usr/sbin:/usr/bin'}
kenv.update(env)
# KDC/KRB5 CONFIG
templ = Template(KERBEROS_CONF)
text = templ.substitute({'TESTDIR': testdir})
with open(krbconf, 'w+') as conffile:
conffile.write(text)
testlog = os.path.join(testdir, 'kdc.log')
log = open(testlog, 'a')
subprocess.check_call([
"kdb5_util", "create",
"-r", "REALM.TEST", "-s", "-P", "password"
], stdout=log, stderr=log, env=kenv, timeout=5)
kdc = subprocess.Popen(['krb5kdc', '-n'], env=kenv, preexec_fn=os.setsid)
time.sleep(5)
# Add a user and genrate a keytab
keytab = os.path.join(testdir, "user.keytab")
subprocess.check_call([
"kadmin.local", "-q",
"addprinc -randkey user"
], stdout=log, stderr=log, env=kenv, timeout=5)
subprocess.check_call([
"kadmin.local", "-q",
"ktadd -k {} user".format(keytab)
], stdout=log, stderr=log, env=kenv, timeout=5)
env['KRB5_CLIENT_KTNAME'] = keytab
# Add a service and genrate a keytab
keytab = os.path.join(testdir, "test.keytab")
subprocess.check_call([
"kadmin.local", "-q",
"addprinc -randkey test/host.realm.test"
], stdout=log, stderr=log, env=kenv, timeout=5)
subprocess.check_call([
"kadmin.local", "-q",
"ktadd -k {} test/host.realm.test".format(keytab)
], stdout=log, stderr=log, env=kenv, timeout=5)
env['KRB5_KTNAME'] = keytab
return kdc, env
def gssapi_basic_test(kenv):
try:
srv = subprocess.Popen(["../tests/t_gssapi_srv"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
srv.stdout.readline() # Wait for srv to say it is ready
cli = subprocess.Popen(["../tests/t_gssapi_cli"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
try:
cli.wait(timeout=5)
srv.wait(timeout=5)
except Exception as e:
print("Failed on {}".format(e));
cli.kill()
srv.kill()
if cli.returncode != 0 or srv.returncode != 0:
raise Exception("CLI ({}): {} --> SRV ({}): {}".format(
cli.returncode, cli.stderr.read().decode('utf-8'),
srv.returncode, srv.stderr.read().decode('utf-8')))
except Exception as e:
print("FAIL: {}".format(e))
return
print("PASS: CLI({}) SRV({})".format(
cli.stdout.read().decode('utf-8').strip(),
srv.stdout.read().decode('utf-8').strip()))
def gssapi_channel_binding_test(kenv):
try:
bindings = base64.b64encode("MATCHING CBS".encode('utf-8'))
srv = subprocess.Popen(["../tests/t_gssapi_srv", "-c", bindings],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
srv.stdout.readline() # Wait for srv to say it is ready
cli = subprocess.Popen(["../tests/t_gssapi_cli", "-c", bindings],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
try:
cli.wait(timeout=5)
srv.wait(timeout=5)
except Exception as e:
print("Failed on {}".format(e));
cli.kill()
srv.kill()
if cli.returncode != 0 or srv.returncode != 0:
raise Exception("CLI ({}): {} --> SRV ({}): {}".format(
cli.returncode, cli.stderr.read().decode('utf-8'),
srv.returncode, srv.stderr.read().decode('utf-8')))
except Exception as e:
print("FAIL: {}".format(e))
return
print("PASS: CLI({}) SRV({})".format(
cli.stdout.read().decode('utf-8').strip(),
srv.stdout.read().decode('utf-8').strip()))
def gssapi_channel_binding_mismatch_test(kenv):
result = "FAIL"
try:
bindings = base64.b64encode("SRV CBS".encode('utf-8'))
srv = subprocess.Popen(["../tests/t_gssapi_srv", "-c", bindings],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
srv.stdout.readline() # Wait for srv to say it is ready
bindings = base64.b64encode("CLI CBS".encode('utf-8'))
cli = subprocess.Popen(["../tests/t_gssapi_cli", "-c", bindings],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=kenv)
try:
cli.wait(timeout=5)
srv.wait(timeout=5)
except Exception as e:
print("Failed on {}".format(e));
cli.kill()
srv.kill()
if cli.returncode != 0 or srv.returncode != 0:
cli_err = cli.stderr.read().decode('utf-8').strip()
srv_err = srv.stderr.read().decode('utf-8').strip()
if "authentication failure" in srv_err:
result = "PASS"
raise Exception("CLI ({}): {} --> SRV ({}): {}".format(
cli.returncode, cli_err, srv.returncode, srv_err))
except Exception as e:
print("{}: {}".format(result, e))
return
print("FAIL: This test should fail [CLI({}) SRV({})]".format(
cli.stdout.read().decode('utf-8').strip(),
srv.stdout.read().decode('utf-8').strip()))
def gssapi_tests(testdir):
""" SASL/GSSAPI Tests """
env = setup_socket_wrappers(testdir)
kdc, kenv = setup_kdc(testdir, env)
#print("KDC: {}, ENV: {}".format(kdc, kenv))
kenv['KRB5_TRACE'] = os.path.join(testdir, 'trace.log')
print('GSSAPI BASIC:')
print(' ', end='')
gssapi_basic_test(kenv)
print('GSSAPI CHANNEL BINDING:')
print(' ', end='')
gssapi_channel_binding_test(kenv)
print('GSSAPI CHANNEL BINDING MISMTACH:')
print(' ', end='')
gssapi_channel_binding_mismatch_test(kenv)
os.killpg(kdc.pid, signal.SIGTERM)
if __name__ == "__main__":
P = argparse.ArgumentParser(description='Cyrus SASL Tests')
P.add_argument('--testdir', default=os.path.join(os.getcwd(), '.tests'),
help="Directory for running tests")
A = vars(P.parse_args())
T = A['testdir']
if os.path.exists(T):
shutil.rmtree(T)
os.makedirs(T)
gssapi_tests(T)
| [
"quanah@symas.com"
] | quanah@symas.com |
baa999a066d99e042ba17c78e03035a147bb3e8a | b60fcc546c96167f344cf1d9b594f22665033b07 | /hackerearth/monk_welcome.py | fb43c33b6405157fb5453cf67016a9123b9b52b8 | [] | no_license | sajidhasan/codehub | b5d6ef9cdc973f10e8366a490559c9723bdbfb81 | 09a3afbf9a50bf0b6707e4b68a9c520956cadc4c | refs/heads/master | 2021-01-19T17:54:37.686732 | 2017-04-15T12:57:43 | 2017-04-15T12:57:43 | 88,346,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | n = int(input())
n1 = input().split()
n1 = [int(x) for x in n1]
n2 = input().split()
n2 = [int(y) for y in n2]
for i in range(n):
print(n1[i]+n2[i], end=" ")
| [
"md.sajidhasan@yahoo.com"
] | md.sajidhasan@yahoo.com |
3708646594f6b5faaae779b645a6d86b6c963022 | d25d76e1f260d77c0c269a1f2be7ea687ce3b4f3 | /DjangoProject/webtoonList/urls.py | 5b2919f65f353bbb3e088ab36e398b26f1ba0e2f | [] | no_license | ParkJeongseop/MyPrecious-Webtoon | bcc384fa38bf55958315db237652bc3dfa89d723 | 1d37a9c9806131561b81ef312c453480956c0754 | refs/heads/master | 2022-04-19T04:04:36.648099 | 2020-04-18T10:36:53 | 2020-04-18T10:36:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from django.contrib import admin
from django.urls import path
from . import views
app_name = "webtoonList"
urlpatterns = [
path('rated_list/', views.Rated, name='rated'),
path('rating_list/', views.Rating, name='rating'),
path('search_list/', views.Search, name='search'),
path('random_list/', views.Random, name='random'),
path('subscribe/', views.subscribe, name='subscribe'),
path('subscribe_list/', views.subscribe_list, name='subscribe_list'),
]
| [
"gjdigj@naver.com"
] | gjdigj@naver.com |
c6a61039657070e5790c9afa76b3ff803ab6d2c2 | d4d52da302b3139e6a41fd818b0d226f1af7f253 | /src/profile.py | 820d0e8ece78f31eb2f4628e7676f724fd082587 | [
"Apache-2.0"
] | permissive | Bmcgarry194/presume | 80873cfd6fde1abe98d63c2e449cfb0dde0be78f | 736c988000d705309eedb42c721cd11a6fcc894d | refs/heads/master | 2020-03-28T16:35:37.505054 | 2018-08-28T21:12:28 | 2018-08-28T21:12:28 | 148,710,769 | 0 | 0 | Apache-2.0 | 2018-09-13T23:43:49 | 2018-09-13T23:43:48 | null | UTF-8 | Python | false | false | 1,521 | py | """Profile class object
"""
import datetime as dt
class Profile(object):
"""
"""
def __init__(self, name, phone_num=None, email=None):
self.name = name
self.email = email
self.phone_num = phone_num
self.address = {} # dict with street, city, state, zip as keys
self.skills = set() # set of tuples
self.education = [] # list of dicts
self.socialmedia = [] # list of dicts
self.description = [] # list of dicts
self.misc = [] # list of dicts with label for obj type
self._created_date = dt.datetime.today()
self._touched_date = dt.datetime.today()
print(f"Profile created for {self.name} at {self._created_date}.")
def __repr__(self):
return f"Profile(name={self.name}, created={self._created_date})"
def edit_address(self, street=None, city=None, state=None, zip=None):
address = self.address
params = ['street', 'city', 'state', 'zip']
# edit fields only if user provides the parameter
for param in params:
if eval(param) is not None:
address[param] = str(eval(param))
self._touched_date = dt.datetime.today()
def edit_skills(self, skill_list):
self.skills.update(set(skill_list))
def edit_education(self, skill_list):
pass
def edit_socialmedia(self, skill_list):
pass
def edit_description(self, skill_list):
pass
def edit_misc(self, skill_list):
pass
| [
"mnguyenngo@gmail.com"
] | mnguyenngo@gmail.com |
ea7268caec41e72c88d0e6f39b48cd9c882ae543 | 2e4ea01ecefa389b0894d60a61c956b293c7ce24 | /first/venv/Lib/site-packages/pydash/helpers.py | ab87826f02749a9adec268b5c73257913bada890 | [] | no_license | anndrozd4321/diplomchik | b24fcd4fc0b5c57416540459d18eb5a105c8bf3f | cfd67fc1ce8003670db3666e2598cc4f0f3c7210 | refs/heads/master | 2023-05-31T08:17:55.451502 | 2021-06-01T17:35:43 | 2021-06-01T17:35:43 | 372,907,977 | 0 | 0 | null | 2021-06-05T21:44:50 | 2021-06-01T17:15:44 | Python | UTF-8 | Python | false | false | 8,667 | py | # -*- coding: utf-8 -*-
"""Generic utility methods not part of main API."""
from __future__ import absolute_import
from functools import wraps
import inspect
from operator import attrgetter, itemgetter
import warnings
import pydash as pyd
from ._compat import PY2, Iterable, Mapping, Sequence, getfullargspec, iteritems, string_types
class _NoValue(object):
"""
Represents an unset value.
Used to differentiate between an explicit ``None`` and an unset value.
"""
pass
#: Singleton object that differentiates between an explicit ``None`` value and
#: an unset value.
NoValue = _NoValue()
def callit(iteratee, *args, **kwargs):
"""Inspect argspec of `iteratee` function and only pass the supported arguments when calling
it."""
maxargs = len(args)
argcount = kwargs["argcount"] if "argcount" in kwargs else getargcount(iteratee, maxargs)
argstop = min([maxargs, argcount])
return iteratee(*args[:argstop])
def getargcount(iteratee, maxargs):
"""Return argument count of iteratee function."""
if hasattr(iteratee, "_argcount"):
# Optimization feature where argcount of iteratee is known and properly
# set by initator.
return iteratee._argcount
if isinstance(iteratee, type) or pyd.is_builtin(iteratee):
# Only pass single argument to type iteratees or builtins.
argcount = 1
else:
argcount = 1
try:
argcount = _getargcount(iteratee, maxargs)
except TypeError: # pragma: no cover
# PY2: Python2.7 throws a TypeError on classes that have __call__() defined but Python3
# doesn't. So if we fail with TypeError here, try iteratee as iteratee.__call__.
if PY2 and hasattr(iteratee, "__call__"): # noqa: B004
try:
argcount = _getargcount(iteratee.__call__, maxargs)
except TypeError:
pass
return argcount
def _getargcount(iteratee, maxargs):
argcount = None
try:
# PY2: inspect.signature was added in Python 3.
# Try to use inspect.signature when possible since it works better for our purpose of
# getting the iteratee argcount since it takes into account the "self" argument in callable
# classes.
sig = inspect.signature(iteratee)
except (TypeError, ValueError, AttributeError):
pass
else: # pragma: no cover
if not any(
param.kind == inspect.Parameter.VAR_POSITIONAL for param in sig.parameters.values()
):
argcount = len(sig.parameters)
if argcount is None:
argspec = getfullargspec(iteratee)
if argspec and not argspec.varargs: # pragma: no cover
# Use inspected arg count.
argcount = len(argspec.args)
if argcount is None:
# Assume all args are handleable.
argcount = maxargs
return argcount
def iteriteratee(obj, iteratee=None, reverse=False):
"""Return iterative iteratee based on collection type."""
if iteratee is None:
cbk = pyd.identity
argcount = 1
else:
cbk = pyd.iteratee(iteratee)
argcount = getargcount(cbk, maxargs=3)
items = iterator(obj)
if reverse:
items = reversed(tuple(items))
for key, item in items:
yield (callit(cbk, item, key, obj, argcount=argcount), item, key, obj)
def iterator(obj):
"""Return iterative based on object type."""
if isinstance(obj, dict):
return iteritems(obj)
elif hasattr(obj, "iteritems"):
return obj.iteritems() # noqa: B301
elif hasattr(obj, "items"):
return iter(obj.items())
elif isinstance(obj, Iterable):
return enumerate(obj)
else:
return iteritems(getattr(obj, "__dict__", {}))
def base_get(obj, key, default=NoValue):
"""
Safely get an item by `key` from a sequence or mapping object when `default` provided.
Args:
obj (list|dict): Sequence or mapping to retrieve item from.
key (mixed): Key or index identifying which item to retrieve.
Keyword Args:
use_default (bool, optional): Whether to use `default` value when `key` doesn't exist in
`obj`.
default (mixed, optional): Default value to return if `key` not found in `obj`.
Returns:
mixed: `obj[key]`, `obj.key`, or `default`.
Raises:
KeyError: If `obj` is missing key, index, or attribute and no default value provided.
"""
if isinstance(obj, dict):
value = _base_get_dict(obj, key, default=default)
elif not isinstance(obj, (Mapping, Sequence)) or (
isinstance(obj, tuple) and hasattr(obj, "_fields")
):
# Don't use getattr for dict/list objects since we don't want class methods/attributes
# returned for them but do allow getattr for namedtuple.
value = _base_get_object(obj, key, default=default)
else:
value = _base_get_item(obj, key, default=default)
if value is NoValue:
# Raise if there's no default provided.
raise KeyError('Object "{0}" does not have key "{1}"'.format(repr(obj), key))
return value
def _base_get_dict(obj, key, default=NoValue):
value = obj.get(key, NoValue)
if value is NoValue:
value = default
if not isinstance(key, int):
# Try integer key fallback.
try:
value = obj.get(int(key), default)
except Exception:
pass
return value
def _base_get_item(obj, key, default=NoValue):
try:
return obj[key]
except Exception:
pass
if not isinstance(key, int):
try:
return obj[int(key)]
except Exception:
pass
return default
def _base_get_object(obj, key, default=NoValue):
value = _base_get_item(obj, key, default=NoValue)
if value is NoValue:
value = default
try:
value = getattr(obj, key)
except Exception:
pass
return value
def base_set(obj, key, value, allow_override=True):
"""
Set an object's `key` to `value`. If `obj` is a ``list`` and the `key` is the next available
index position, append to list; otherwise, pad the list of ``None`` and then append to the list.
Args:
obj (list|dict): Object to assign value to.
key (mixed): Key or index to assign to.
value (mixed): Value to assign.
"""
if isinstance(obj, dict):
if allow_override or key not in obj:
obj[key] = value
elif isinstance(obj, list):
key = int(key)
if key < len(obj):
if allow_override:
obj[key] = value
else:
if key > len(obj):
# Pad list object with None values up to the index key so we can append the value
# into the key index.
obj[:] = (obj + [None] * key)[:key]
obj.append(value)
elif (allow_override or not hasattr(obj, key)) and obj is not None:
setattr(obj, key, value)
return obj
def parse_iteratee(iteratee_keyword, *args, **kwargs):
"""Try to find iteratee function passed in either as a keyword argument or as the last
positional argument in `args`."""
iteratee = kwargs.get(iteratee_keyword)
last_arg = args[-1]
if iteratee is None and (
callable(last_arg)
or isinstance(last_arg, string_types)
or isinstance(last_arg, dict)
or last_arg is None
):
iteratee = last_arg
args = args[:-1]
return (iteratee, args)
class iterator_with_default(object):
"""A wrapper around an iterator object that provides a default."""
def __init__(self, collection, default):
self.iter = iter(collection)
self.default = default
def __iter__(self):
return self
def next_default(self):
ret = self.default
self.default = NoValue
return ret
def __next__(self):
ret = next(self.iter, self.next_default())
if ret is NoValue:
raise StopIteration
return ret
next = __next__
def deprecated(func): # pragma: no cover
"""
This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"Call to deprecated function {0}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=3,
)
return func(*args, **kwargs)
return wrapper
| [
"8729194@gmail.com"
] | 8729194@gmail.com |
ee7fa4bdcb42b27e7553046d7b66df83a48854d0 | d443822ac386c57631c76957f71119aaaee401a1 | /52.34.tamrin3.part2.jalase8.py | b71d51e0e42a0bac57b93988101808a402dce9da | [] | no_license | pani-ps1/pythonclass-sematech-1400-2-30 | f21760b017c649e13690b7418143ce47f1e9359d | 27e12c5ccdb641330613837c0da9b1823b51f460 | refs/heads/main | 2023-06-23T04:03:03.396908 | 2021-07-20T12:58:31 | 2021-07-20T12:58:31 | 371,335,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from time import time
start = time()
#code here
num = int(input("Enter a number: "))
# If given number is greater than 1
if num > 1:
# Iterate from 2 to n / 2
for i in range(2, int(num/2)+1):
# If num is divisible by any number between
# 2 and n / 2, it is not prime
if (num % i) == 0:
print(num, "is not a prime number")
break
else:
print(num, "is a prime number")
else:
print(num, "is not a prime number")
print(f'Time taken to run: {time() - start} seconds')
| [
"noreply@github.com"
] | pani-ps1.noreply@github.com |
13503ebcc55265b979496e9ada75c127158884ff | 2016c8a303357460f001c9af2f871cde630da2a1 | /app/BasicFlask/bello.py | 4a2b0fe0ed0d31a118b7d10cee8327a607733f83 | [] | no_license | JinalKothari7/FlaskProject | 65878d29457e5571c561ca7aa4f3a59b0d76b653 | ed57885e1d7c13f2487312238b3c172c2b21d6f2 | refs/heads/master | 2020-04-03T15:12:33.632958 | 2018-10-30T09:24:25 | 2018-10-30T09:24:25 | 155,355,032 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from flask import Flask, redirect, url_for
app = Flask(__name__)
@app.route("/admin")
def hello_admin():
return "Administrator area, user not allowed"
@app.route('/guest/<guest>')
def hello_guest(guest):
return 'Guest user %s not having admin rights' % guest
@app.route('/user/<name>')
def hello_user(name):
if name=='admin':
return redirect(url_for('hello_admin'))
else:
return redirect(url_for('hello_guest',guest=name))
if __name__ == "__main__":
app.run(debug=True)
| [
"jinalkothari7@gmail.com"
] | jinalkothari7@gmail.com |
76a23d2b0081abeaf686956c012f6b0d7e1e4ec9 | 17a125437085b56ec88f45b8af702deee81df8d5 | /scriptRedeNeuralv3.py | f7dd9250217013acc73501b8b94e840a8775b422 | [] | no_license | NikollasGabriel/CNN-Cancer | ed6315dbc9953af8cab4d1fb3c664d52fd1fdbed | 978966ad79cf24aa502888f1539452d4510e4087 | refs/heads/master | 2020-09-07T14:02:34.111266 | 2019-11-10T21:17:01 | 2019-11-10T21:17:01 | 220,803,122 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | from PIL import Image, ImageOps
import numpy as np
import os
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
imagens = []
path = 'C:/Users/BlueTerror/Desktop/CNN_Cancer/Base/'
for folder in os.listdir(path):
for file in os.listdir(os.path.join(path, folder)):
desired_size = 128
im = Image.open(path + folder + '/' + file)
old_size = im.size # old_size[0] is in (width, height) format
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_im = Image.new("L", (desired_size, desired_size), (255))
new_im.paste(im, ((desired_size-new_size[0])//2,(desired_size-new_size[1])//2))
new_im = ImageOps.equalize(new_im)
imagens.append(new_im)
imagens = np.array(imagens, dtype='float') / 255.0
#First, Convert to int labels
labels = preprocessing.LabelEncoder()
train_vals = np.array(labels)
int_encoded = labels.fit_transform(train_vals)
#reshape to prepare for one hot encoding
reshape_intEncoded = int_encoded.reshape(-1,1)
X_train, X_valid, y_train, y_valid = train_test_split(imagens, reshape_intEncoded, test_size=0.1, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
y_train = to_categorical(y_train, num_classes=2)
y_valid = to_categorical(y_valid, num_classes=2)
y_test = to_categorical(y_test, num_classes=2) | [
"nikollas_gabriel@hotmail.com"
] | nikollas_gabriel@hotmail.com |
1682304c26e922c51a7a97e58a3082bfd379714f | d630b63b2917d6ef4b82dde06e4ac1e9df9cdc0b | /GUI - Max/tkinter_test.py | 30d7948504a1dfb9931e0a6202fa9dc0eafcb6d4 | [] | no_license | ProgrammeringOTGDKlassen/D-klassen-Programmering | eb0a42cb2f8b5128075d640855c39358bc62e151 | 9f19ab33fb0769d1b9e507fea4182cab0eccfcfc | refs/heads/master | 2023-03-11T09:48:09.889450 | 2023-02-23T11:23:06 | 2023-02-23T11:23:06 | 161,041,550 | 5 | 0 | null | 2020-05-03T22:10:28 | 2018-12-09T13:20:45 | Python | UTF-8 | Python | false | false | 1,249 | py | import tkinter as tk
class My_GUI(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.build_GUI()
def change(self):
self.lbl_name.config(text = "Her er din mor kommet, AAAAAAAAAAAD!")
def build_GUI(self):
self.pack(side = tk.BOTTOM)
self.V = tk.Frame(self)
self.H = tk.Frame(self)
self.HT = tk.Frame(self.H)
self.HB = tk.Frame(self.H)
self.V.pack(side = tk.LEFT)
self.H.pack(side = tk.RIGHT)
self.HT.pack(side = tk.TOP)
self.HB.pack(side = tk.BOTTOM)
for i in range(4):
b = tk.Button(self.V, text = 'Du er fucking grim luder')
b.pack(side = tk.TOP)
for i in range(2):
b = tk.Button(self.HT, text = 'hej med dig, hej med dig, hej')
b.pack(side = tk.TOP)
c = tk.Canvas(self.HB, width = 500, height = 500)
# self.lbl_name = tk.Label(self, text = 'Her står der jeiner i en label')
# self.but_change = tk.Button(self, text = 'Hej med dig', command = self.change, background = '#DB0000' )
# self.lbl_name.pack(side = tk.TOP)
# self.but_change.pack(side = tk.TOP)
app = My_GUI()
app.mainloop() | [
"maxx0758@gmail.com"
] | maxx0758@gmail.com |
36afed350dc654a83b8e472afb0b6814cc4e4512 | 4d6dc5b6ea15e6527cc3f27a56cd9018caaee2dc | /Toodoose/urls.py | 5e43dc20c16e8daad317900650a9c817f9094aab | [] | no_license | sparkle6596/Todoose-django | fee2206b68e5b6111c96d4157e3a924a1842c82c | d0a79be28520b1cc51ea2c210d722fe1cd4c9e3d | refs/heads/main | 2023-06-26T15:37:21.816774 | 2021-08-01T14:32:12 | 2021-08-01T14:32:12 | 391,650,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """Toodoose URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('todoo/', include("todoo.urls"))
]
| [
"ambilykr07@gmail.com"
] | ambilykr07@gmail.com |
802d5bd849ffd09f8d9ff24045318b72d5c13469 | 7b8536dae0eb5be7dbb9fde766b9f3d6a339d452 | /indicators/core/tests/test_core_utils.py | 5c682136fbbdd9b68dc768c27a65c3a535f5eebd | [] | no_license | EURITO/Pivot | b441c04f135afd1ef1fe162a2ad6ba5f84110a1d | 30269b62d118756f0e0436b3d90714d65ebe06f9 | refs/heads/master | 2023-05-08T11:32:21.415112 | 2021-05-27T10:24:01 | 2021-05-27T10:24:01 | 270,629,833 | 0 | 0 | null | 2021-05-27T10:24:02 | 2020-06-08T10:43:31 | Python | UTF-8 | Python | false | false | 773 | py | from unittest import mock
from indicators.core.core_utils import object_getter
def test_object_getter():
output = ["1", "two", "THREE"]
mocked_module = mock.MagicMock()
mocked_module.get_objects.return_value = output
assert next(object_getter(mocked_module)) == output
@mock.patch("indicators.core.core_utils.get_geo_lookup")
def test_object_getter_split(mocked_lookup):
mocked_module = mock.MagicMock()
mocked_module.get_objects.return_value = [
{"id": "1"},
{"id": "two"},
{"id": "THREE"},
]
mocked_lookup.return_value = {"FR": {"1", "THREE"}, "DE": {"two", "1"}}
assert list(object_getter(mocked_module, geo_split=True)) == [
([True, False, True], "FR"),
([True, True, False], "DE"),
]
| [
"noreply@github.com"
] | EURITO.noreply@github.com |
2b9678f7feffadc0f01155374b2199b363accae9 | 9b08877236508bff591c93fa9f27a327d24d023a | /model2.py | 5b2d4a7aa821ab1988e6c69c2d3f92ec769beb32 | [
"MIT"
] | permissive | EuridiceX/Behavioral-Cloning | 10953d79d13a18a2562798654450f4a38fec2a5c | f460daa251c896a3f27581fa144c8be85625c634 | refs/heads/master | 2022-04-04T11:31:09.766930 | 2020-02-03T09:49:26 | 2020-02-03T09:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,614 | py | #importing libraries
import csv
import os
import cv2
import sklearn
import numpy as np
from math import ceil
from random import shuffle
from keras.models import Sequential
from keras.layers import Flatten,Dense,Conv2D
from keras.layers import Lambda,Dropout
from keras.layers.pooling import MaxPooling2D
from keras.regularizers import l1
from keras.layers import Cropping2D
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#collecting data from driving_log.csv file
samples = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for line in reader:
samples.append(line)
#split data
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
#use a generator to load data and preprocess it on the fly
def generator(samples, batch_size=32):
num_samples = len(samples)
correction=0.2
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = './data/IMG/'+batch_sample[0].split('/')[-1]
name_left='./data/IMG/'+batch_sample[1].split('/')[-1]
name_right='./data/IMG/'+batch_sample[2].split('/')[-1]
center_image =cv2.imread(name)
left_image =cv2.imread(name_left)
right_image =cv2.imread(name_right)
#print(center_image.shape)
center_image=cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)
#flipp the center image
#image_flipped = np.fliplr( center_image)
left_image=cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)
right_image=cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)
center_angle = float(batch_sample[3])
#add flipped steering angle
#measurement_flipped = -center_angle
left_angle=center_angle+correction
right_angle=center_angle-correction
images.append(center_image)
#images.append(image_flipped)
images.append(left_image)
images.append(right_image)
angles.append(center_angle)
#angles.append(measurement_flipped)
angles.append(left_angle)
angles.append(right_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Set our batch size
batch_size=32
ch, row, col = 3, 80, 320
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
#define the model
model=Sequential()
#normalization
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
#cropping images
model.add(Cropping2D(cropping=((50,25),(0,0))))
#nvidia model
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation="elu"))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation="elu"))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation="elu"))
model.add(Conv2D(64, (3, 3), activation="elu"))
model.add(Conv2D(64, (3, 3), activation="elu"))
model.add(Flatten())
model.add(Dense(100,activation='elu'))
#add a regularization method
model.add(Dropout(0.25))
model.add(Dense(50,activation='elu'))
model.add(Dense(10,activation='elu'))
model.add(Dense(1))
#model compiling with Adam optimizer
model.compile(loss='mse',optimizer='adam')
#store the loss values during the training
history_object=model.fit_generator(train_generator,
steps_per_epoch=ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples)/batch_size),
epochs=5, verbose=1)
#save the model
model.save('model.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('model2_flipped.png') | [
"julie.botnari@gmail.com"
] | julie.botnari@gmail.com |
afbec97b1c9d34f73ceab3845b07f37693580dcc | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncolman.py | b4eb8ea4b99751f343c4618a341e4a0f9d07a3cc | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 64 | py | ii = [('WadeJEB.py', 4), ('ClarGE3.py', 15), ('HogaGMM2.py', 7)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
27619945df92c6b3707340826fc65800128307f7 | d32a72999274016ab1e7a10832b623fdd6c0e9fa | /server.py | 5d812e7eb55cfc97a78ea0927d08d6bbfbdae12f | [
"MIT"
] | permissive | sarayut-u/pyload01 | e4c603d1b2c8bc69eb0d93db0b7efa8e6fdb3b96 | 404d2fcd9a0a7951276f748f7041b9fb28ae97ab | refs/heads/master | 2020-07-07T16:41:30.518367 | 2019-08-20T16:04:32 | 2019-08-20T16:04:32 | 203,409,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | #!/usr/bin/python3
import threading
import time
import requests
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
seeip_api(self.name, self.counter)
def seeip_api(threadName, counter):
while counter:
if exitFlag:
threadName.exit()
response = requests.get('http://34.80.123.23/api/myip')
print ( "[%s] API/JSON response is %s" % ( threadName, response.json () ) )
counter -= 1
def main():
"""Generate workload to http container
in somewhere.
"""
# Create new threads
threads = []
for i in range(10):
threads.append( myThread(i, "Thread-1", 10) )
# Start new Threads
for t in threads:
t.start()
for t in threads:
t.join()
print ("Application processing...")
if __name__ == '__main__':
main()
| [
"max@theairtime.com"
] | max@theairtime.com |
4483b9cf6f80e231f9a1794f4794ac5c58df8f67 | fe2d277608ecd020c9e869a48e66c2431e7123b3 | /bin/theano-nose | e1d5e0b89b3cfe53bf45f5215c18b9d5e360b9eb | [] | no_license | kakshay21/ML | f1775e86fde546405d107e07c916738476628343 | 700155575a1dfdf6bd8e640740f059edb8449bdb | refs/heads/master | 2022-11-01T20:00:25.962776 | 2022-10-30T09:56:26 | 2022-10-30T09:56:26 | 79,920,131 | 0 | 1 | null | 2022-10-30T09:56:26 | 2017-01-24T14:42:42 | Python | UTF-8 | Python | false | false | 10,969 | #!/Users/AKSHAY/Desktop/MLC/bin/python
"""
This script should behave the same as the `nosetests` command.
The reason for its existence is that on some systems, it may not be obvious to
find where nosetests is installed in order to run it in a different process.
It is also used to load the KnownFailure plugin, in order to hide
KnownFailureTests error messages. Use --without-knownfailure to
disable that plugin.
`run_tests_in_batch.py` will in turn call back this script in another process.
"""
from __future__ import print_function
__authors__ = "Olivier Delalleau, Pascal Lamblin, Eric Larsen"
__contact__ = "delallea@iro"
import logging
_logger = logging.getLogger('theano.bin.theano-nose')
import os
import nose
import textwrap
import sys
from nose.plugins import Plugin
def main():
# Handle the --theano arguments
if "--theano" in sys.argv:
i = sys.argv.index("--theano")
import theano
sys.argv[i] = theano.__path__[0]
# Many Theano tests suppose device=cpu, so we need to raise an
# error if device==gpu.
# I don't know how to do this check only if we use theano-nose on
# Theano tests. So I make an try..except in case the script get
# reused elsewhere.
# We should not import theano before call nose.main()
# As this cause import problem with nosetests.
# Should we find a way to don't modify sys.path?
if not os.path.exists('theano/__init__.py'):
try:
from theano import config
if config.device != "cpu":
raise ValueError("Theano tests must be run with device=cpu."
" This will also run GPU tests when possible.\n"
" If you want GPU-related tests to run on a"
" specific GPU device, and not the default one,"
" you should use the init_gpu_device theano flag.")
except ImportError:
pass
# Handle --batch[=n] arguments
batch_args = [arg for arg in sys.argv if arg.startswith('--batch')]
for arg in batch_args:
sys.argv.remove(arg)
batch_size = None
if len(batch_args):
if len(batch_args) > 1:
_logger.warn(
'Multiple --batch arguments detected, using the last one '
'and ignoring the first ones.')
batch_arg = batch_args[-1]
elems = batch_arg.split('=', 1)
if len(elems) == 2:
batch_size = int(elems[1])
# Handle the --debug-batch argument.
display_batch_output = False
if '--debug-batch' in sys.argv:
if not batch_args:
raise AssertionError(
'You can only use the --debug-batch argument with the '
'--batch[=n] option')
while '--debug-batch' in sys.argv:
sys.argv.remove('--debug-batch')
sys.argv += ['--verbose', '--nocapture', '--detailed-errors']
display_batch_output = True
# Handle --time_prof arguments
time_prof_args = [arg for arg in sys.argv if arg=='--time-profile']
for arg in time_prof_args:
sys.argv.remove(arg)
# Time-profiling and batch modes
if time_prof_args or batch_args:
from theano.tests import run_tests_in_batch
return run_tests_in_batch.main(
theano_nose=os.path.realpath(__file__),
batch_size=batch_size,
time_profile=bool(time_prof_args),
display_batch_output=display_batch_output)
# Non-batch mode.
addplugins = []
# We include KnownFailure plugin by default, unless
# it is disabled by the "--without-knownfailure" arg.
if '--without-knownfailure' not in sys.argv:
try:
from numpy.testing.noseclasses import KnownFailure
addplugins.append(KnownFailure())
except ImportError:
_logger.warn(
'KnownFailure plugin from NumPy could not be imported. '
'Use --without-knownfailure to disable this warning.')
else:
sys.argv.remove('--without-knownfailure')
# When 'theano-nose' is called-back under the time-profile option, an
# instance of the custom Nosetests plugin class 'DisabDocString' (see
# below) is loaded. The latter ensures that the test name will not be
# replaced in display by the first line of the documentation string.
if '--disabdocstring' in sys.argv:
addplugins.append(DisabDocString())
try:
if addplugins:
ret = nose.main(addplugins=addplugins)
else:
ret = nose.main()
return ret
except TypeError as e:
if "got an unexpected keyword argument 'addplugins'" in e.message:
# This means nose is too old and does not support plugins
_logger.warn(
'KnownFailure plugin from NumPy can\'t'
' be used as nosetests is too old. '
'Use --without-knownfailure to disable this warning.')
nose.main()
else:
raise
def help():
help_msg = """
This script behaves mostly the same as the `nosetests` command.
The main difference is that it loads automatically the
KnownFailure plugin, in order to hide KnownFailureTests error
messages. It also supports executing tests by batches.
Local options:
--help, -h: Displays this help.
--batch[=n]:
If specified without option '--time-profile', do not run all
the tests in one run, but split the execution in batches of
`n` tests each. Default n is 100.
--time-profile:
Each test will be run and timed separately and the results will
be deposited in the files 'timeprof_sort', 'timeprof_nosort'
and 'timeprof_rawlog' in the current directory. If the
'--batch[=n]' option is also specified, notification of the
progresses will be made to standard output after every group of
n tests. Otherwise, notification will occur after every group
of 100 tests.
The files 'timeprof_sort' and 'timeprof_nosort' both contain one
record for each test and comprise the following fields:
- test running-time
- nosetests sequential test number
- test name
- name of class to which test belongs (if any), otherwise full
information is contained in test name
- test outcome ('OK', 'SKIPPED TEST', 'FAILED TEST' or
'FAILED PARSING')
In 'timeprof_sort', test records are sorted according to
running-time whereas in 'timeprof_nosort' records are reported
according to sequential number. The former classification is the
main information source for time-profiling. Since tests belonging
to same or close classes and files have close sequential, the
latter may be used to identify duration patterns among the tests
numbers. A full log is also saved as 'timeprof_rawlog'.
--without-knownfailure: Do not load the KnownFailure plugin.
--theano: This parameter is replaced with the path to the theano
library. As theano-nose is a wrapper to nosetests, it
expects a path to the tests to run.
If you do not know where theano is installed, use this
option to have it inserted automatically.
--debug-batch:
Use this parameter to run nosetests with options '--verbose',
'--nocapture' and '--detailed-errors' and show the output of
nosetests during batch execution. This can be useful to debug
situations where re-running only the failed tests after batch
execution is not working properly. This option can only be used
in conjunction with the '--batch=[n]' argument.
The other options will be passed to nosetests, see ``nosetests -h``.
"""
print(textwrap.dedent(help_msg))
class DisabDocString(Plugin):
"""
When activated, a custom Nosetests plugin created through this class
will preclude automatic replacement in display of the name of the test
by the first line in its documentation string.
Sources:
http://nose.readthedocs.org/en/latest/developing.html
http://nose.readthedocs.org/en/latest/further_reading.html
http://www.siafoo.net/article/54
https://github.com/nose-devs/nose/issues/294
http://python-nose.googlecode.com/svn/trunk/nose/plugins/base.py
Nat Williams:
https://github.com/Merino/nose-description-fixer-plugin/commit/
df94596f29c04fea8001713dd9b04bf3720aebf4
"""
enabled = False # plugin disabled by default
score = 2000 # high score ensures priority over other plugins
def __init__(self):
# 'super.__init__(self):' would have achieved exactly the same
if self.name is None:
self.name = self.__class__.__name__.lower()
if self.enableOpt is None:
self.enableOpt = ("enable_plugin_%s"
% self.name.replace('-', '_'))
def options(self, parser, env):
env_opt = 'NOSE_WITH_%s' % self.name.upper()
# latter expression to be used if plugin called from the command line
parser.add_option("--%s" % self.name,
# will be called with Nosetests 'main' or 'run'
# function's' argument '--disabdocstring'
action="store_true",
dest=self.enableOpt,
# the latter entails that the boolean self.enableOpt
# is set to 'True' when plugin is called through a
# function's argument
default=env.get(env_opt),
# entails that plugin will be enabled when command
# line trigger 'env_opt' will be activated
help="Enable plugin %s: %s [%s]" %
(self.__class__.__name__,
self.help(), env_opt))
def configure(self, options, conf):
self.conf = conf
# plugin will be enabled when called through argument
self.enabled = getattr(options, self.enableOpt)
def describeTest(self, test):
# 'describeTest' is also called when the test result in Nosetests calls
# 'test.shortDescription()' and can thus be used to alter the display.
return False
if __name__ == '__main__':
if '--help' in sys.argv or '-h' in sys.argv:
help()
else:
result = main()
sys.exit(result)
| [
"ka686018@yahoo.in"
] | ka686018@yahoo.in | |
e712cecd7885aa93e95dc49f0e113dfcf9bf86f5 | ea9679356a503a41663aa87a60b7d1bf4df93d8f | /defaultParser.py | 88ef3f639c89a9616c46a6b7a924c26fb29fa058 | [] | no_license | BPfuelb/cnsm2019-flow-data-streaming-client | 6a0e528079b171e655859b3151475ef6b53db408 | d7f5111f41e9ad85d9c374382385c53816b9803c | refs/heads/master | 2020-09-09T08:35:14.785946 | 2019-10-17T14:00:22 | 2019-10-17T14:00:22 | 221,401,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,412 | py | '''
default command line parameter parser
'''
import argparse
def printFlags(flags):
''' print all command line parameters '''
side = '-' * int(((80 - len('Flags')) / 2))
print(side + ' ' + 'Flags' + ' ' + side)
for k, v in sorted(vars(flags).items()):
print('\t * {}: {}'.format(k, v))
print('-' * 80)
def create_default_parser():
''' create a parser with default parameters for most experiments '''
parser = argparse.ArgumentParser()
#-------------------------------------------------------- DROPTOUT PARAMETER
parser.add_argument('--dropout_hidden', type=float, default=1)
parser.add_argument('--dropout_input', type=float, default=1)
#----------------------------------------------------------- LAYER PARAMETER
parser.add_argument('--layers', type=int, nargs='+', default=[1000, 1000, 1000])
#----------------------------------- BATCH_SIZE, LEARNING_RATE, EPOCHS, ETC.
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--log_frequency', type=int, default=50)
default_features = [
'0-1', # src_addr
'1-1', # dst_addr
# '2-' # packets
# '3-' # bytes
# '4-' # first switched
# '5-' # last switched
'6-1', # src_port
'7-1', # dst_port
'8-1', # tcp_flags
'9-1', # protocol
# '10-' # export host
# '11-' # flow seq number
# '12-' # duration
# '13-' # bitrate
'14-0', # src_country_code
'15-0', # src_longitude
'16-0', # src_latitude
'17-1', # src_asn
'18-1', # src_network
'19-0', # src_prefix_len
'20-1', # src_vlan
'21-1', # src_locality
'22-0', # dst_country_code
'23-0', # dst_longitude
'24-0', # dst_latitude
'25-1', # dst_asn
'26-1', # dst_network
'27-0', # dst_prefix_len
'28-1', # dst_vlan
'29-1', # dst_locality
# '30-' # year
'31-0', # month
'32-0', # day
'33-0', # hour
'34-0', # minute
'35-0', # second
]
#------------------------------------------------------------------ FEATURES
parser.add_argument('--features', type=str, nargs='+', default=default_features,
help='select features for training and testing')
parser.add_argument('--cw_method', type=int, default=0,
help='0 = standard class weighting; 1 = under-sampling')
parser.add_argument('--feature_filter', type=str, nargs='*', default='',
help=('set filter functions: "feature_key;lambda x: <bool> "'
' e.g.,["(6,);lambda x: x == 53.", "(7,);lambda x: x == 53."]'
' or ["(6,7);lambda x,y: x == 53. or y == 53."]'))
parser.add_argument('--boundaries_bps', type=float, nargs='+', default=[0., 50., 8000.],
help='set boundaries for bps')
parser.add_argument('--boundaries_duration', type=float, nargs='+', default=[0., 100., 200.],
help='set boundaries for duration')
#----------------------------------------------------- CSV OUTPUTS FOR PLOTS
parser.add_argument('--output_file', type=str, default='out.csv')
return parser
| [
"christoph.hardegen@informatik.hs-fulda.de"
] | christoph.hardegen@informatik.hs-fulda.de |
bb6980a6ff85f540bb196b1fe83ef777674696ef | 37139a766b66bc3450f0bc0fb6c425a28a935241 | /proyecto/migrations/0008_apartado_clientes.py | 727d5a1b87624684b0ee808e37a330adf00de9d8 | [] | no_license | lulu-spec/djangoDboo | a3bcb07f10cf0d4046c82c4804a355cc389d3f41 | df6c6e82c755254be17eaeda92380500e3751098 | refs/heads/master | 2023-01-31T07:40:30.163379 | 2020-12-13T00:52:10 | 2020-12-13T00:52:10 | 320,953,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 3.1.4 on 2020-12-10 22:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('proyecto', '0007_auto_20201210_1632'),
]
operations = [
migrations.AddField(
model_name='apartado',
name='clientes',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='proyecto.cliente'),
preserve_default=False,
),
]
| [
"mariluzmar0994@hotmail.com"
] | mariluzmar0994@hotmail.com |
a7cd4b5ffbeb6ee60e8b2e5ab66e327c69f5794e | 3268d19a08c55e3e03e9769fca4626445a3e3219 | /setup.py | 3f22c3dfa58392389434587693e87ff24d1be412 | [
"Apache-2.0"
] | permissive | ancostas/misc-utils-py | 5d02eaaf6d7a69531bf39ba80dee9290c53589da | b4e546f7cd9ee63367194949a5c9ee2c7c2f8a38 | refs/heads/master | 2020-11-30T18:39:19.937826 | 2019-12-27T18:30:51 | 2019-12-27T18:30:50 | 230,456,427 | 0 | 0 | Apache-2.0 | 2019-12-27T14:18:31 | 2019-12-27T14:18:30 | null | UTF-8 | Python | false | false | 2,233 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Setup script for miscutils."""
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
import os
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def _find_version_line_in_file(file_path: str) -> str:
with open(str(file_path), "r") as fileh:
version_lines = [line for line in fileh.readlines() if line.startswith("VERSION")]
if len(version_lines) != 1:
raise ValueError(f"Unable to determine 'VERSION' in {file_path}")
return version_lines[0]
def _lookup_local_module_version(file_path: str) -> str:
path_to_init = os.path.join(str(file_path), "__init__.py")
version_tuple = eval(_find_version_line_in_file(path_to_init).split("=")[-1])
return ".".join([str(x) for x in version_tuple])
version = _lookup_local_module_version(os.path.join(os.path.dirname(__file__), "src", "miscutils"))
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="misc-utils-py",
version=version,
license="Apache License, Version 2.0",
description="Misc utilities for python projects",
long_description=long_description,
long_description_content_type="text/markdown",
author="David Bradford",
author_email="david.bradford@mongodb.com",
url="https://github.com/dbradf/misc-utils-py",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
install_requires=[
"pylibversion >= 0.1",
"python-json-logger >= 0.1",
"structlog >= 19"
],
entry_points={"console_scripts": []},
)
| [
"david.bradford@mongodb.com"
] | david.bradford@mongodb.com |
03ca2c229c9326cecf6473f179abc05ab7093b26 | 3c9a9dca8e80e51885a2f3cb39a3a272adcdecaa | /__init__.py | 2c4c6f66ff0852cbde896dd41ece6ae1a1e9c34c | [] | no_license | jkennedy74/bellybutton | 2d08ed1a054a3178e37ced2dc878520079ea043b | b7043587bad890fec894bbf48e51a5845cc74ee4 | refs/heads/master | 2020-03-24T19:23:17.083302 | 2018-08-09T20:24:25 | 2018-08-09T20:24:25 | 142,923,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | const-naming-style="camelCase"
| [
"jkennedy@firstlook.biz"
] | jkennedy@firstlook.biz |
47aedceb25e986a3e5d3aae64be46cd960624d18 | 81f128c1d3ffc57ea35053a0f42bc3adb8ac820d | /MxShop/db_tools/import_goods_data.py | 71e3cfa1b711929ae1f857f0ba8333e70073b35b | [] | no_license | tminlun/tminlun-MxShop | f06816b5f596cffb7fa634891a70567055de1bf9 | a1ccf4b05edd8b47ad716fe65072b5be6e501e50 | refs/heads/master | 2022-12-10T11:08:28.043339 | 2019-04-15T15:12:44 | 2019-04-15T15:12:44 | 176,200,320 | 0 | 0 | null | 2022-12-08T01:43:02 | 2019-03-18T03:47:29 | Python | UTF-8 | Python | false | false | 2,099 | py | # _*_ encoding:utf-8 _*_
__author__: '田敏伦'
__date__: '2019/2/27 0027 20:38'
# 导入goods的数据
import sys
import os
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd + "../")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MxShop.settings')
import django
django.setup()
from db_tools.data.product_data import row_data
from goods.models import Goods,GoodsCategory,GoodsImage
for goods_detail in row_data:
goods = Goods()
goods.name = goods_detail["name"]
# replace("¥", "")把¥替换成 ""
goods.market_price = float(int(goods_detail["market_price"].replace("¥", "").replace("元", "")))
goods.shop_price = float(int(goods_detail["sale_price"].replace("¥", "").replace("元", "")))
# 如果内容不为None传递给goods_brief ,否则: else(为None) 把None转换为"",传递给goods_brief
goods.goods_brief = goods_detail["desc"] if goods_detail["desc"] is not None else ""
goods.goods_desc = goods_detail["goods_desc"] if goods_detail["goods_desc"] is not None else ""
# 取第一张作为封面图 [如果有就传递值,如果没有(else)传递""。image在数据库默认为str ]
goods.goods_front_image = goods_detail["images"][0] if goods_detail["images"] else ""
# 取第三级分类,作为商品的分类
category_name = goods_detail["categorys"][-1]
# 选用filter不用get。因为filter没有匹配的返回空字符串,不会抛异常,get会抛异常(只能传外键给goods.category,直接传str会出错)
category = GoodsCategory.objects.filter(name=category_name)
print(category[0]) # category是一个对象,goods.category需要字符串,category[0]返回对象的字符串
if category:
goods.category = category[0] # 当前数据的商品的分类
goods.save()
# 商品的图片
for good_image in goods_detail["images"]:
goods_image_instance = GoodsImage()
goods_image_instance.image = good_image
goods_image_instance.goods = goods # 上面有遍历每一个goods
goods_image_instance.save() | [
"you@example.com"
] | you@example.com |
76f45a142ec79323b453d5fb3aceaf97a763aae5 | 74a8c75ae5f7dda7e713282c688ab59e55465d64 | /liquid_level_detector/ROI/Range_Of_Interest.py | 257512571798e1fc248e7eee2ec576392c384503 | [] | no_license | Devalda/ObjectDetector | a1ce0fa3a8d0850192395a875a5908ce826e8de5 | 58ffec845b64189eb08b301e867b2c6072fd60f7 | refs/heads/main | 2023-06-18T17:36:29.419577 | 2021-07-22T05:01:12 | 2021-07-22T05:01:12 | 373,008,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py | import imutils
import numpy as np
import cv2 as C
import cv2
# coloring
img = C.imread("../../images/proris_wthflash.png")
gray = C.cvtColor(img, C.COLOR_BGR2GRAY)
lab = C.cvtColor(img, C.COLOR_BGR2LAB)
l, a, b = C.split(lab)
# CLAHE
clahe = C.createCLAHE(clipLimit=3.0, tileGridSize=(5, 5))
cl = clahe.apply(l)
limg = C.merge((cl, a, b))
final = C.cvtColor(limg, C.COLOR_LAB2BGR)
gray = C.cvtColor(final, C.COLOR_BGR2GRAY)
# blur
blur_median = C.medianBlur(gray, 5)
blur_gauss = C.GaussianBlur(img, (7, 7), 2)
# tracing
th_adaptive = C.adaptiveThreshold(blur_median, 255, C.ADAPTIVE_THRESH_GAUSSIAN_C, C.THRESH_BINARY, 11, 2)
canny_blur_gray = C.Canny(blur_gauss, 55, 100)
# closing operation
kernel = np.ones((1, 1))
imgDial = C.dilate(canny_blur_gray, kernel, iterations=3)
imgThres = C.erode(imgDial, kernel, iterations=2)
# make ROI
cnts = C.findContours(gray, C.RETR_EXTERNAL, C.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
#
# for c in cnts:v
# x,y,w,h = C.boundingRect(c)
# ROI = img[y:y+h, x:x+w]
slice = img[230:410, 110:430]
grey = C.cvtColor(slice, C.COLOR_BGR2GRAY)
blur_slice = C.medianBlur(grey, 5)
th_adaptive = C.adaptiveThreshold(blur_slice, 255, C.ADAPTIVE_THRESH_GAUSSIAN_C, C.THRESH_BINARY, 15, 2)
blur = C.GaussianBlur(slice, (5, 5), 0)
th_otsu = C.threshold(blur, 0, 255, C.THRESH_BINARY)
# modification - threshold
bottle_gray = cv2.cvtColor(slice , cv2.COLOR_BGR2GRAY)
bottle_gray = cv2.split(bottle_gray)[0]
bottle_gray = cv2.GaussianBlur(bottle_gray, (3, 3), 1)
cv2.imshow("gray", bottle_gray)
bottle_threshold = cv2.adaptiveThreshold(bottle_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,41,5)
bottle_threshold = cv2.bitwise_not(bottle_threshold)
bottle_threshold = imutils.skeletonize(bottle_threshold, size=(3, 3))
kernel = np.ones((3,3), np.uint8)
bottle_threshold = cv2.dilate(bottle_threshold, kernel, iterations=2)
bottle_threshold = cv2.erode(bottle_threshold, kernel, iterations=1)
cv2.imshow("kotak th ", bottle_threshold)
# RGB filter (black and white)
black = np.zeros((bottle_threshold.shape[0], bottle_threshold.shape[1], 3), np.uint8)
cv2.imshow("black", black)
black1 = cv2.rectangle(black,(0, 90),(290, 450),(255, 255, 255), -1)
gray = cv2.cvtColor(black,cv2.COLOR_BGR2GRAY)
ret,b_mask = cv2.threshold(gray,127,255, 0)
fin = cv2.bitwise_and(bottle_threshold,bottle_threshold,mask = b_mask) #masking image
cv2.imshow("mask",fin)
# bbox on ROI
contours = C.findContours(grey, C.RETR_EXTERNAL, C.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
areas = [C.contourArea(contour) for contour in contours]
(contours, areas) = zip(*sorted(zip(contours, areas), key=lambda a: a[1]))
bottle_clone = img.copy()
x, y, w, h = C.boundingRect(contours[-1])
print(w, h)
x = 110
y = 230
bbox = C.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
C.imshow("image", bbox)
C.waitKey(0)
| [
"briandevalda45@gmail.com"
] | briandevalda45@gmail.com |
a95902c6e18ce1bc91769f463fdcadd5edc5103a | b776894e97c2cedb791cb6d860865908d13b6fa9 | /op_app/Controllers/logDetailControllerClass.py | 87f3670e671f689c175562fb968b544499703355 | [] | no_license | yezimai/v11 | 30463acf9cd4c2b9bd43eb0c722947804a08c36e | a4029afb169962b0f041ac2dc9e5c03a61cba8ee | refs/heads/master | 2021-09-04T00:32:13.951114 | 2018-01-13T10:05:39 | 2018-01-13T10:05:39 | 116,007,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,704 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from op_app.logger.log import runlog
from op_app.Model.base.baseModelClass import BaseDbModelClass
from op_app.Model.base.operation_dbModelClass import Operation_dbModelClass
from op_app.logger.log import dblog, runlog
from op_app.lib import appConfigDetailClass, pub
import json
from op_app.Extends.paramikoTool import ParamikoTool
import os
import sys
class LogDetailControllerClass(object):
def __init__(self, request):
# super(LogDetailControllerClass, self).__init__()
self.request = request
self.uid = self.request.user.id
self.project_id = self.request.GET.get('project_id', '')
self.env_id = self.request.GET.get('env_id', '')
self.app_id = self.request.GET.get('app_id', '')
self.ip = self.request.GET.get('ip', '')
self.server_type = self.request.GET.get('server_type', '')
self.server_id = self.request.GET.get('server_id', '')
self.action = self.request.GET.get('action', '')
self.line_num = self.request.GET.get('line_num', '100')
def getLogDetail(self):
# 判断前端传来的参数
if self.project_id == '' or self.env_id == '' or self.app_id == '' or self.ip == ''\
or self.action == ''or self.server_type == '' or self.server_id == '':
# print('p_id,e_id,a_id,ip,action,server_type,server_id,',self.project_id,self.env_id,self.app_id,self.ip,self.action,self.server_type,self.server_id)
print('invaild action args from web...')
runlog.error("[ERROR] --wrong action args from web-----, Catch exception:, file: [ %s ], line: [ %s ]"
%(__file__,sys._getframe().f_lineno))
return 'invaild action args from web...'
else:
#判断用户是否有权限,没有权限返回判断结果
if not pub.hasAccessPermission(self.uid, self.server_type, self.server_id):
return 'no permissions to view'
# 获取app的用户名和密码
instance = appConfigDetailClass.appConfigDetail()
res, status = instance.AppConfigDetail(self.server_type, self.app_id, self.server_id, self.env_id, self.project_id)
print('logapp--res',res)
if not status:
runlog.error("the APP Data is null, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
res_dic = dict()
for i in res:
res_dic['install_user'] = i[1]
res_dic['app_type'] = i[2]
res_dic['appdir'] = i[3]
res_dic['app_name'] = i[6]
res_dic['pass'] = i[7]
res_dic['sshport'] = i[8]
res_dic['sshuser'] = i[9]
# 获取远程服务器日志目录指定行数的内容
# print('dict_res',res_dic)
if res_dic['app_type'] == '1': # app类型是tomcat
if self.action == 'start':
log_file_path = '{}/logs/catalina.out'.format(res_dic['appdir'])
# print('00---00',log_file_path)
elif self.action == 'stop':
log_file_path = '/home/{}/service_manage/{}/scripts/log/stopapp.sh.log'.format(\
res_dic['sshuser'], res_dic['app_name'])
elif self.action == 'log_show':
logdir_id = self.request.GET.get('logdir_id', '')
file_name = self.request.GET.get('file_name','')
print('nnnnnnnnnnnn',logdir_id,file_name)
if logdir_id == '' or file_name == '':
runlog.error("the logdir_id or filename is null, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
return 'invaild logdir_id to find the logfile'
# 找到app对应的日志目录
log_dir = instance.app_LogDir(self.app_id, self.server_type, self.server_id, logdir_id)
if len(log_dir) == 0:
runlog.error("the logdir is null, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
return 'invaild logdir to find the logfile'
log_file_path = '{}/{}'.format(log_dir[0][0],file_name)
print('logggggggg',log_file_path)
else:
runlog.error("the APP type is not tomcat, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
return 'invaild action to find the logfile'
pt = ParamikoTool() # 实例化对象后,获取日志默认行数
log_res = pt.getlogrow(self.ip, res_dic['sshport'], res_dic['sshuser'], \
res_dic['pass'], log_file_path, self.line_num)
# print('-------------',log_res)
# 记录用户访问记录
audit_log = Operation_dbModelClass()
audit_log.inserToLog(self.request.user.username, self.ip, 'access', self.request.path, log_file_path)
return log_res
else:
runlog.info("the APP type is not tomcat, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
return 'just for app-type like tomcat..'
def logDir(self):
# 返回的字典格式
data = {
'project_id': self.project_id,
'env_id': self.env_id,
'app_id': self.app_id,
'server_type': self.server_type,
'server_id': self.server_id,
'ip': self.ip,
}
if self.app_id == '':
runlog.error("[ERROR] -getlogdir-wrong action args from web-----, Catch exception:, file: [ %s ], line: [ %s ]"
%(__file__,sys._getframe().f_lineno))
if not pub.hasAccessPermission(self.uid, self.server_type, self.server_id):
return ['no permissions to view']
# 获取app的用户名和密码
instance = appConfigDetailClass.appConfigDetail()
res, status= instance.AppConfigDetail(self.server_type, self.app_id, self.server_id, self.env_id,
self.project_id)
print('logapp--res', res)
if not status:
runlog.error("the APP Data is null, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
data['user'] = res[0][1]
res_dir = instance.whole_appLogDirs(self.app_id, self.server_type, self.server_id) # 通过appid查找出对应的所有的日志目录
print('whole_appLogDirs------>\033[42;1m%s\033[0m' % res_dir)
if len(res_dir) == 0:
print('nono is logdir...')
return {}
res_list = []
for i in res_dir:
res_dic = dict()
res_dic['id'] = i[1]
res_dic['dir'] = i[0]
res_list.append(res_dic)
data['logdirs'] = res_list
return data
def getlogInfo(self): # 获取app日志目录下所有日志的详细信息,大小,拥有者,修改时间等
logdir_id = self.request.GET.get('logdir_id', '')
# 判断前端传来的参数
if self.project_id == '' or self.env_id == '' or self.app_id == '' or self.ip == ''\
or self.server_type == '' or self.server_id == '' or logdir_id == '':
# print(self.project_id,self.env_id,self.app_id,self.ip,self.server_type,self.server_id,logdir_id)
# print('getLogInfo-error-invaild action args from web...')
runlog.error("[ERROR] -getLogInfo-error-wrong action args from web-----, Catch exception:, file: [ %s ], line: [ %s ]"
%(__file__,sys._getframe().f_lineno))
return 'invaild action args from web...'
if not pub.hasAccessPermission(self.uid, self.server_type, self.server_id):
runlog.error("[ERROR] --no permission-----, Catch exception:, file: [ %s ], line: [ %s ]"
%(__file__,sys._getframe().f_lineno))
return {}
instance = appConfigDetailClass.appConfigDetail()
res_dir = instance.app_LogDir(self.app_id, self.server_type, self.server_id, logdir_id)
if len(res_dir) == 0:
return {}
# 获取app的用户名和密码
instance = appConfigDetailClass.appConfigDetail()
res, Status = instance.AppConfigDetail(self.server_type, self.app_id, self.server_id, self.env_id,
self.project_id)
#print('pppppppppppplogapp--res', res)
if not Status:
runlog.error("the APP Data is null, file: [ %s ], line: [ %s ]" % (
__file__, sys._getframe().f_lineno))
pk = ParamikoTool() # 实例化对象,调用方法后将文件大小按时间排序展示
data_res, status = pk.getDirInfo(self.ip, res[0][8], res[0][9], res[0][7], res_dir[0][0])
# '''data='-rw-rw-r-- 1 beehive beehive 22900 Feb 8 2017 catalina.2017-02-08.log\n
# -rw-rw-r-- 1 beehive beehive 171910 Feb 9 2017 catalina.2017-02-09.log\n
#print('2222data_res------>\033[31;1m%s\033[0m' % data_res)
if not status:
runlog.error("getDirInfo is null, file: [ %s ], line: [ %s ],error:[ %s ]" % (
__file__, sys._getframe().f_lineno, data_res))
final_data_dic = dict() # 将得到的结果按字典的格式返回
final_data_dic['data'] = data_res
return final_data_dic
| [
"41815224@qq.com"
] | 41815224@qq.com |
c181c377eb1e273a3a25ff4c7ff5f76d0397776a | f7c632518cf38979033cf1b65c6345567ae92c85 | /data_interpreter/src/Plotter.py | ec7303cd17110796e2a91c665a937459fcba2bdc | [] | no_license | cmccachern/avr-datalogger | 536cf67e729152ae4c0d1d48c7fcb6d7a5f2924b | c3fe6287406a175df05d00b751abc111baca8960 | refs/heads/master | 2016-09-05T12:56:20.715826 | 2014-07-06T22:09:22 | 2014-07-06T22:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import matplotlib.pyplot as pp
from sys import *
with open(argv[1]) as f:
content = f.readlines();
pp.plot(content)
pp.ylabel("Temperature (degrees C)")
pp.xlabel("Reading cardinality");
#pp.savefig("/home/dragostea/Dev/Robotics/UnderwaterSensor/graph.png", bbox_inches="tight")
pp.show()
exit(0)
| [
"carey@mccachern.com"
] | carey@mccachern.com |
261ffee0c6511b641ad995f1b5f6dedeb8295843 | e1f22142d7b70c96a7f166c4f2524674d5920abb | /user1_gen&crypt.py | 884be484148a6d7653a454b047a0b3da8e935575 | [] | no_license | Devastor/Encryption | ac7674df8cb955be03c532670a3e1adf03ee6515 | 2afe9a562d9e8ba20678766643b39e15c6d320ea | refs/heads/master | 2022-12-20T23:22:12.548360 | 2020-09-23T06:04:35 | 2020-09-23T06:04:35 | 297,869,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
# creation 256 bit session key
sessionKey = Random.new().read(32) # 256 bit
# encryption AES of the message
f = open('sampleText.txt', 'rb')
sample = f.read()
f.close()
iv = Random.new().read(16) # 128 bit
obj = AES.new(sessionKey, AES.MODE_CFB, iv)
cipherText = iv + obj.encrypt(sample)
f = open('sampleText.txt', 'wb')
f.write(bytes(cipherText))
f.close()
# encryption RSA of the session key
publicKey = RSA.importKey(open('User2PublicKey.txt', 'rb').read())
cipherRSA = PKCS1_OAEP.new(publicKey)
sessionKey = cipherRSA.encrypt(sessionKey)
f = open('sessionKey.txt', 'wb')
f.write(bytes(sessionKey))
f.close()
| [
"noreply@github.com"
] | Devastor.noreply@github.com |
646a43ca2d39919df54829b20142ff6a4c080961 | d22e16f3c82796d1693a6cb68abd9da085082b2f | /Ejercicios Básicos - Python/Trabajo_Ejercicio7.py | 36e925e02b8eb86fb2dbd993729fb2188951906b | [] | no_license | NeisserMS/Java-Course-UPAO | 9582c2cbdaa99ae0ea9cd7e89d10a11bbb8f425f | a9042c7037b35a29554fd8d359d90c4fabd73c36 | refs/heads/master | 2023-07-11T21:30:02.758652 | 2021-08-23T00:11:59 | 2021-08-23T00:11:59 | 256,020,840 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | ancho = float(input("Ingrese el ancho de la pared: "))
largo = float(input("Ingrese el largo de la Pared: "))
area = ancho*largo
arena = area*0.125
print("El area de la Pared es: ", area)
print("Los metros cubicos de arena que se usaran son: ", arena)
| [
"rko_619_fbi@hotmail.com"
] | rko_619_fbi@hotmail.com |
f14c818163547663498f1c30ab35e595e270d58a | 096b6100b16b2f991bd8a6d5751c27052297a3e6 | /Python for CP & DS Algo/string_var.py | 4b6d8310a3c308cdb5a9728ea601a2b583db93c3 | [] | no_license | barnamensch056/DS_Algo_CP | 97d7dcfa60dac00aba05a9325090079efebcd9a9 | 483bfb334bf5f22787bd415720c6e54d1b1fb4b9 | refs/heads/master | 2023-03-26T23:34:44.098063 | 2021-03-30T14:38:41 | 2021-03-30T14:38:41 | 319,090,940 | 0 | 1 | null | 2021-03-13T14:23:12 | 2020-12-06T17:29:20 | C++ | UTF-8 | Python | false | false | 342 | py |
s = input()
for i in range(len(s)):
command=list(input().split())
if command[0]=='isalnum':
print(s.isalnum())
elif command[0]=='isalpha':
print(s.isalpha())
elif command[0]=='isdigit':
print(s.isdigit())
elif command[0]=='islower':
print(s.islower())
else:
print(s.isupper())
| [
"barnadipdey@gmail.com"
] | barnadipdey@gmail.com |
11274cc402825d95bf627aeac68680ce4d901613 | 2f6243b14aee7be367d39221ae3ff9ac325388b9 | /daily_report/doctor_report_no_dependency.py | 7545981af5683fcff60df8e086250774bf744db2 | [] | no_license | yp-silence/data_analyse | 23f66c4d36de669e5a157146d1d6cc3ddf03c9b3 | d228d2c222e250b537440294ed54c0023a6137f6 | refs/heads/master | 2023-05-05T08:37:05.587179 | 2021-05-12T17:15:06 | 2021-05-12T17:15:06 | 366,787,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,177 | py | import pandas as pd
import pymysql
import time
import click
def get_current_date_str(ft_str='%Y-%m-%d'):
"""
获取当前日期字符串
:return:
"""
date_str = time.strftime(ft_str, time.localtime())
return date_str
def _get_db_config(is_md=False):
if is_md:
config_dict = {'user': 'root',
'password': 'hoNGbX0Q5G',
'host': '49.235.244.137',
'database': None,
'port': 3306
}
else:
config_dict = {'user': 'lljhyadmin',
'password': 'ljhy6284!fmysqlke23Wtkj88',
'host': '10n0m1gr7lo.mysql.rds.aliyuncs.com',
'database': None,
'port': 3306
}
return config_dict
def _get_connect(is_md=False):
connect_params = _get_db_config(is_md=is_md)
print(f'connect_params:{connect_params}')
connect = pymysql.Connection(**connect_params)
cursor = connect.cursor(cursor=pymysql.cursors.DictCursor)
return connect, cursor
def fetch_data(sql_str: str, use_df: bool = True, is_md=False):
print(f'query sql: {sql_str}')
connect, cursor = _get_connect(is_md=is_md)
af_rows = cursor.execute(sql_str)
print(f'total fetch {af_rows} 条记录~')
if use_df:
df = pd.DataFrame(cursor.fetchall())
df.fillna(0, inplace=True)
print(f'df:{df.head()}')
cursor.close()
connect.close()
return df
else:
data_dict = cursor.fetchall()
print(f'data_dict:{data_dict}')
cursor.close()
connect.close()
return data_dict
def get_first_date_to_target_opt(start_date, end_date, target=20, is_md=False, to_excel=True, brand_id=10000347,
merge=False):
"""10001070"""
sql = f"""
SELECT
doctor.name name,
user_rel.doctor_id,
user_rel.create_date date_time
FROM user.usr_doctor_user_rel user_rel inner join sec.sec_app_doctor doctor
on user_rel.doctor_id = doctor.id
WHERE user_rel.state > 0 and user_rel.brand_id = {brand_id}
and
user_rel.create_date between '{start_date}' AND '{end_date}'
order by user_rel.create_date
"""
df = fetch_data(sql, is_md=is_md)
count_df = df.groupby(['name', 'doctor_id']).agg({'date_time': 'count'}).reset_index()
count_df.rename(columns={'date_time': "累计新增报道数"}, inplace=True)
if target == 1:
count_df['统计区间'] = "[" + start_date + '→' + end_date + ")"
count_df.rename(columns={"name": "医生姓名"}, inplace=True)
target_df = count_df
else:
# 统计首次达到阈值的时间
tmp = df.groupby(['name', 'doctor_id']).shift(target - 1).reset_index()
# 去除 nan索引的记录
tmp = tmp.dropna()
print(f'tmp:{tmp}')
df = df.loc[tmp['index'].values]
target_df = df.groupby(['name', 'doctor_id']).agg({"date_time": 'min'}).reset_index()
target_df = count_df.merge(target_df, on=['name', 'doctor_id'], how='inner')
target_df.rename(columns={"name": "医生姓名", "date_time": "首次达到阈值时间"}, inplace=True)
target_df['新增报道人数阈值'] = target
target_df['统计区间'] = "[" + start_date + '→' + end_date + ")"
print(f'total find {len(target_df)}条记录')
doctor_ids = target_df['doctor_id'].values.tolist()
if len(doctor_ids) == 0:
doctor_ids = '1=0'
else:
doctor_ids = ["'" + str(val) + "'" for val in doctor_ids]
print(f'doctor_ids:{doctor_ids}')
detail_df = get_detail_info(doctor_ids, start_date, end_date, brand_id, is_md=is_md)
if to_excel:
if merge:
io = pd.ExcelWriter(f'{get_current_date_str()}医生报道量统计.xlsx')
target_df.drop(labels='doctor_id', inplace=True, axis=1)
sheet_name = '新增报道数统计' if target == 1 else '首次达到阈值统计'
target_df.to_excel(io, index=False, sheet_name=sheet_name)
detail_df.to_excel(io, index=False, sheet_name='详细信息')
io.sheets[sheet_name].set_column(0, target_df.shape[1], 20)
io.sheets['详细信息'].set_column(0, detail_df.shape[1], 20)
io.close()
else:
file_name = f"{start_date}-{end_date}医生新增报道量统计.xlsx" if target == 1 else f"{start_date}-{end_date}医生报道量阈值统计.xlsx"
io = pd.ExcelWriter(file_name)
target_df.drop(labels=['doctor_id'], axis=1, inplace=True)
sheet_name = '新增报道数统计' if target == 1 else '首次达到阈值统计'
target_df.to_excel(io, index=False, sheet_name=sheet_name)
io.sheets[sheet_name].set_column(0, target_df.shape[1], 20)
io.close()
io = pd.ExcelWriter(f"{start_date}-{end_date}医生报道详情统计.xlsx")
detail_df.to_excel(io, sheet_name='详详细信息', index=False)
io.sheets['详详细信息'].set_column(0, target_df.shape[1], 20)
io.close()
def get_detail_info(doctor_ids, start_date, end_date, brand_id=10000347, is_md=False):
"""brand_id=10000347 --> 若邻医生互联网医院"""
if isinstance(doctor_ids, list):
doctor_ids = ','.join(doctor_ids)
sql = f"""
select
doctor.name HCP姓名, usr.user_name NP姓名,if(usr.user_sex = 1, '男', '女') 性别, usr.bill_id 联系电话,
usr_rel.create_date 成功报道时间
from user.usr_doctor_user_rel usr_rel
inner join user.usr_user usr on usr_rel.user_id = usr.user_id
left join sec.sec_app_doctor doctor on usr_rel.doctor_id = doctor.id
where doctor_id in ({doctor_ids})
and usr_rel.brand_id = {brand_id}
and usr_rel.create_date between '{start_date}' AND '{end_date}'
order by HCP姓名, 成功报道时间 desc
"""
detail_df = fetch_data(sql, is_md=is_md)
print(f'total find {len(detail_df)}条记录')
if len(detail_df) > 0:
return detail_df
else:
return pd.DataFrame(columns=['HCP姓名', 'NP姓名', '性别', '联系电话', '成功报道时间'])
@click.command()
@click.option('--start_date', help='开始日期', default='2016-01-01 00:00:00')
@click.option('--end_date', help='结束日期', default='2021-04-23 18:00:00')
@click.option('--is_md', help='是否明德', default=False)
@click.option('--target', help='阈值', default=1)
@click.option('--to_excel', help='是否生成excel', default=True)
@click.option('--merge', help='是否合并excel', default=False)
@click.option('--brand_id', help='机构id', default=10000347)
def run(start_date, end_date, is_md, target, to_excel, merge, brand_id):
get_first_date_to_target_opt(start_date, end_date, is_md=is_md, target=target,
to_excel=to_excel, merge=merge, brand_id=brand_id)
if __name__ == '__main__':
run()
| [
"157241092@qq.com"
] | 157241092@qq.com |
2c7298807af89842b5a10136ac195bd2b10ad79c | bfc85df8b20a56b48e0190cf2664bd64acb6b294 | /core/deploy.py | d3591f4760eef9467bc8e4e763896350a315bde6 | [] | no_license | Tengtiantian/pytorch-deep-image-matting | 95e2b5a7f64282e4a6c2758a72a1e4d080628127 | 7bb692d942346d99c8ca3ca1af8e964891c964c0 | refs/heads/master | 2021-08-27T22:51:39.582281 | 2021-08-17T09:52:59 | 2021-08-17T09:52:59 | 191,011,389 | 0 | 1 | null | 2021-08-17T09:53:00 | 2019-06-09T13:43:00 | Python | UTF-8 | Python | false | false | 10,487 | py | import torch
import argparse
import torch.nn as nn
import net
import cv2
import os
from torchvision import transforms
import torch.nn.functional as F
import numpy as np
import time
def get_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--size_h', type=int, required=True, help="height size of input image")
parser.add_argument('--size_w', type=int, required=True, help="width size of input image")
parser.add_argument('--imgDir', type=str, required=True, help="directory of image")
parser.add_argument('--trimapDir', type=str, required=True, help="directory of trimap")
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--resume', type=str, required=True, help="checkpoint that model resume from")
parser.add_argument('--saveDir', type=str, required=True, help="where prediction result save to")
parser.add_argument('--alphaDir', type=str, default='', help="directory of gt")
parser.add_argument('--stage', type=int, required=True, choices=[0,1,2,3], help="backbone stage")
parser.add_argument('--not_strict', action='store_true', help='not copy ckpt strict?')
parser.add_argument('--crop_or_resize', type=str, default="whole", choices=["resize", "crop", "whole"], help="how manipulate image before test")
parser.add_argument('--max_size', type=int, default=1600, help="max size of test image")
args = parser.parse_args()
print(args)
return args
def gen_dataset(imgdir, trimapdir):
sample_set = []
img_ids = os.listdir(imgdir)
img_ids.sort()
cnt = len(img_ids)
cur = 1
for img_id in img_ids:
img_name = os.path.join(imgdir, img_id)
trimap_name = os.path.join(trimapdir, img_id)
assert(os.path.exists(img_name))
assert(os.path.exists(trimap_name))
sample_set.append((img_name, trimap_name))
return sample_set
def compute_gradient(img):
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
grad = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
grad=cv2.cvtColor(grad, cv2.COLOR_BGR2GRAY)
return grad
# inference once for image, return numpy
def inference_once(args, model, scale_img, scale_trimap, aligned=True):
if aligned:
assert(scale_img.shape[0] == args.size_h)
assert(scale_img.shape[1] == args.size_w)
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406],std = [0.229, 0.224, 0.225])
])
scale_img_rgb = cv2.cvtColor(scale_img, cv2.COLOR_BGR2RGB)
# first, 0-255 to 0-1
# second, x-mean/std and HWC to CHW
tensor_img = normalize(scale_img_rgb).unsqueeze(0)
scale_grad = compute_gradient(scale_img)
#tensor_img = torch.from_numpy(scale_img.astype(np.float32)[np.newaxis, :, :, :]).permute(0, 3, 1, 2)
tensor_trimap = torch.from_numpy(scale_trimap.astype(np.float32)[np.newaxis, np.newaxis, :, :])
tensor_grad = torch.from_numpy(scale_grad.astype(np.float32)[np.newaxis, np.newaxis, :, :])
if args.cuda:
tensor_img = tensor_img.cuda()
tensor_trimap = tensor_trimap.cuda()
tensor_grad = tensor_grad.cuda()
#print('Img Shape:{} Trimap Shape:{}'.format(img.shape, trimap.shape))
input_t = torch.cat((tensor_img, tensor_trimap / 255.), 1)
# forward
if args.stage <= 1:
# stage 1
pred_mattes, _ = model(input_t)
else:
# stage 2, 3
_, pred_mattes = model(input_t)
pred_mattes = pred_mattes.data
if args.cuda:
pred_mattes = pred_mattes.cpu()
pred_mattes = pred_mattes.numpy()[0, 0, :, :]
return pred_mattes
# forward for a full image by crop method
def inference_img_by_crop(args, model, img, trimap):
# crop the pictures, and forward one by one
h, w, c = img.shape
origin_pred_mattes = np.zeros((h, w), dtype=np.float32)
marks = np.zeros((h, w), dtype=np.float32)
for start_h in range(0, h, args.size_h):
end_h = start_h + args.size_h
for start_w in range(0, w, args.size_w):
end_w = start_w + args.size_w
crop_img = img[start_h: end_h, start_w: end_w, :]
crop_trimap = trimap[start_h: end_h, start_w: end_w]
crop_origin_h = crop_img.shape[0]
crop_origin_w = crop_img.shape[1]
#print("startH:{} startW:{} H:{} W:{}".format(start_h, start_w, crop_origin_h, crop_origin_w))
if len(np.where(crop_trimap == 128)[0]) <= 0:
continue
# egde patch in the right or bottom
if crop_origin_h != args.size_h or crop_origin_w != args.size_w:
crop_img = cv2.resize(crop_img, (args.size_w, args.size_h), interpolation=cv2.INTER_LINEAR)
crop_trimap = cv2.resize(crop_trimap, (args.size_w, args.size_h), interpolation=cv2.INTER_LINEAR)
# inference for each crop image patch
pred_mattes = inference_once(args, model, crop_img, crop_trimap)
if crop_origin_h != args.size_h or crop_origin_w != args.size_w:
pred_mattes = cv2.resize(pred_mattes, (crop_origin_w, crop_origin_h), interpolation=cv2.INTER_LINEAR)
origin_pred_mattes[start_h: end_h, start_w: end_w] += pred_mattes
marks[start_h: end_h, start_w: end_w] += 1
# smooth for overlap part
marks[marks <= 0] = 1.
origin_pred_mattes /= marks
return origin_pred_mattes
# forward for a full image by resize method
def inference_img_by_resize(args, model, img, trimap):
h, w, c = img.shape
# resize for network input, to Tensor
scale_img = cv2.resize(img, (args.size_w, args.size_h), interpolation=cv2.INTER_LINEAR)
scale_trimap = cv2.resize(trimap, (args.size_w, args.size_h), interpolation=cv2.INTER_LINEAR)
pred_mattes = inference_once(args, model, scale_img, scale_trimap)
# resize to origin size
origin_pred_mattes = cv2.resize(pred_mattes, (w, h), interpolation = cv2.INTER_LINEAR)
assert(origin_pred_mattes.shape == trimap.shape)
return origin_pred_mattes
# forward a whole image
def inference_img_whole(args, model, img, trimap):
h, w, c = img.shape
new_h = min(args.max_size, h - (h % 32))
new_w = min(args.max_size, w - (w % 32))
# resize for network input, to Tensor
scale_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
scale_trimap = cv2.resize(trimap, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
pred_mattes = inference_once(args, model, scale_img, scale_trimap, aligned=False)
# resize to origin size
origin_pred_mattes = cv2.resize(pred_mattes, (w, h), interpolation = cv2.INTER_LINEAR)
assert(origin_pred_mattes.shape == trimap.shape)
return origin_pred_mattes
def main():
print("===> Loading args")
args = get_args()
print("===> Environment init")
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if args.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
model = net.VGG16(args)
ckpt = torch.load(args.resume)
if args.not_strict:
model.load_state_dict(ckpt['state_dict'], strict=False)
else:
model.load_state_dict(ckpt['state_dict'], strict=True)
if args.cuda:
model = model.cuda()
print("===> Load dataset")
dataset = gen_dataset(args.imgDir, args.trimapDir)
mse_diffs = 0.
sad_diffs = 0.
cnt = len(dataset)
cur = 0
t0 = time.time()
for img_path, trimap_path in dataset:
img = cv2.imread(img_path)
trimap = cv2.imread(trimap_path)[:, :, 0]
assert(img.shape[:2] == trimap.shape[:2])
img_info = (img_path.split('/')[-1], img.shape[0], img.shape[1])
cur += 1
print('[{}/{}] {}'.format(cur, cnt, img_info[0]))
with torch.no_grad():
torch.cuda.empty_cache()
if args.crop_or_resize == "whole":
origin_pred_mattes = inference_img_whole(args, model, img, trimap)
elif args.crop_or_resize == "crop":
origin_pred_mattes = inference_img_by_crop(args, model, img, trimap)
else:
origin_pred_mattes = inference_img_by_resize(args, model, img, trimap)
# only attention unknown region
origin_pred_mattes[trimap == 255] = 1.
origin_pred_mattes[trimap == 0 ] = 0.
# origin trimap
pixel = float((trimap == 128).sum())
# eval if gt alpha is given
if args.alphaDir != '':
alpha_name = os.path.join(args.alphaDir, img_info[0])
assert(os.path.exists(alpha_name))
alpha = cv2.imread(alpha_name)[:, :, 0] / 255.
assert(alpha.shape == origin_pred_mattes.shape)
#x1 = (alpha[trimap == 255] == 1.0).sum() # x3
#x2 = (alpha[trimap == 0] == 0.0).sum() # x5
#x3 = (trimap == 255).sum()
#x4 = (trimap == 128).sum()
#x5 = (trimap == 0).sum()
#x6 = trimap.size # sum(x3,x4,x5)
#x7 = (alpha[trimap == 255] < 1.0).sum() # 0
#x8 = (alpha[trimap == 0] > 0).sum() #
#print(x1, x2, x3, x4, x5, x6, x7, x8)
#assert(x1 == x3)
#assert(x2 == x5)
#assert(x6 == x3 + x4 + x5)
#assert(x7 == 0)
#assert(x8 == 0)
mse_diff = ((origin_pred_mattes - alpha) ** 2).sum() / pixel
sad_diff = np.abs(origin_pred_mattes - alpha).sum()
mse_diffs += mse_diff
sad_diffs += sad_diff
print("sad:{} mse:{}".format(sad_diff, mse_diff))
origin_pred_mattes = (origin_pred_mattes * 255).astype(np.uint8)
res = origin_pred_mattes.copy()
# only attention unknown region
res[trimap == 255] = 255
res[trimap == 0 ] = 0
if not os.path.exists(args.saveDir):
os.makedirs(args.saveDir)
cv2.imwrite(os.path.join(args.saveDir, img_info[0]), res)
print("Avg-Cost: {} s/image".format((time.time() - t0) / cnt))
if args.alphaDir != '':
print("Eval-MSE: {}".format(mse_diffs / cur))
print("Eval-SAD: {}".format(sad_diffs / cur))
if __name__ == "__main__":
main()
| [
"1432249204@qq.com"
] | 1432249204@qq.com |
150031906408644576efe4932f757a1e0abf4fa8 | ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f | /harness/determined/cli/sso.py | 026b6b502e98d88856d356e481ab9bf2cf8167e6 | [
"Apache-2.0"
] | permissive | determined-ai/determined | 9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e | 8239b1993f4f44390f4e88901ffaf3b12429b83c | refs/heads/main | 2023-08-21T12:13:36.651298 | 2023-08-21T08:34:16 | 2023-08-21T08:34:16 | 253,846,879 | 2,531 | 330 | Apache-2.0 | 2023-09-14T21:54:17 | 2020-04-07T16:12:29 | Go | UTF-8 | Python | false | false | 5,240 | py | import sys
import webbrowser
from argparse import Namespace
from getpass import getpass
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Callable, List
from urllib.parse import parse_qs, urlparse
from determined.common import api
from determined.common.api import authentication
from determined.common.declarative_argparse import Arg, Cmd
from determined.errors import EnterpriseOnlyError
CLI_REDIRECT_PORT = 49176
def handle_token(master_url: str, token: str) -> None:
tmp_auth = {"Cookie": "auth={token}".format(token=token)}
me = api.get(master_url, "/users/me", headers=tmp_auth, authenticated=False).json()
token_store = authentication.TokenStore(master_url)
token_store.set_token(me["username"], token)
token_store.set_active(me["username"])
print("Authenticated as {}.".format(me["username"]))
def make_handler(master_url: str, close_cb: Callable[[int], None]) -> Any:
class TokenAcceptHandler(BaseHTTPRequestHandler):
def do_GET(self) -> None:
try:
"""Serve a GET request."""
token = parse_qs(urlparse(self.path).query)["token"][0]
handle_token(master_url, token)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"You can close this window now.")
close_cb(0)
except Exception as e:
print("Error authenticating: {}.".format(e))
close_cb(1)
def log_message(self, format: Any, *args: List[Any]) -> None: # noqa: A002
# Silence server logging.
return
return TokenAcceptHandler
def sso(parsed_args: Namespace) -> None:
master_info = api.get(parsed_args.master, "info", authenticated=False).json()
try:
sso_providers = master_info["sso_providers"]
except KeyError:
raise EnterpriseOnlyError("No SSO providers data")
if not sso_providers:
print("No SSO providers found.")
return
elif not parsed_args.provider:
if len(sso_providers) > 1:
print("Provider must be specified when multiple are available.")
return
matched_provider = sso_providers[0]
else:
matching_providers = [
p for p in sso_providers if p["name"].lower() == parsed_args.provider.lower()
]
if not matching_providers:
ps = ", ".join(p["name"].lower() for p in sso_providers)
print("Provider {} unsupported. (Providers found: {})".format(parsed_args.provider, ps))
return
elif len(matching_providers) > 1:
print("Multiple SSO providers found with name {}.".format(parsed_args.provider))
return
matched_provider = matching_providers[0]
sso_url = matched_provider["sso_url"] + "?relayState=cli"
if not parsed_args.headless:
if webbrowser.open(sso_url):
print(
"Your browser should open and prompt you to sign on;"
" if it did not, please visit {}".format(sso_url)
)
print("Killing this process before signing on will cancel authentication.")
with HTTPServer(
("localhost", CLI_REDIRECT_PORT),
make_handler(parsed_args.master, lambda code: sys.exit(code)),
) as httpd:
return httpd.serve_forever()
print("Failed to open Web Browser. Falling back to --headless CLI mode.")
example_url = f"Example: 'http://localhost:{CLI_REDIRECT_PORT}/?token=v2.public.[long_str]'"
print(
f"Please open this URL in your browser: '{sso_url}'\n"
"After authenticating, copy/paste the localhost URL "
f"from your browser into the prompt.\n{example_url}"
)
token = None
while not token:
user_input_url = getpass(prompt="\n(hidden) localhost URL? ")
try:
token = parse_qs(urlparse(user_input_url).query)["token"][0]
handle_token(parsed_args.master, token)
except (KeyError, IndexError):
print(f"Could not extract token from localhost URL. {example_url}")
def list_providers(parsed_args: Namespace) -> None:
master_info = api.get(parsed_args.master, "info", authenticated=False).json()
try:
sso_providers = master_info["sso_providers"]
except KeyError:
raise EnterpriseOnlyError("No SSO providers data")
if len(sso_providers) == 0:
print("No SSO providers found.")
return
print("Available providers: " + ", ".join(provider["name"] for provider in sso_providers) + ".")
# fmt: off
args_description = [
Cmd("auth", None, "manage auth", [
Cmd("login", sso, "sign on with an auth provider", [
Arg("-p", "--provider", type=str,
help="auth provider to use (not needed if the Determined master only supports"
" one provider)"),
Arg("--headless", action="store_true", help="force headless cli auth")
]),
Cmd("list-providers", list_providers, "lists the available auth providers", []),
])
] # type: List[Any]
# fmt: on
| [
"noreply@github.com"
] | determined-ai.noreply@github.com |
4046e96f55b809722ee7d0abfafa746c1b62d3c5 | a5c7e7d1a271811b50b454d0c7cafe4b52e25d6a | /tests/model/test_proposal_forward.py | 884b84b0cb321ee0d6d74a1b7a207ed6621709b9 | [] | no_license | UW-COSMOS/mmmask-rcnn | adcc80d7a2854c5e74f2904441467dcac98eba41 | 225acd5dcde2ab225761ce83f5c9b42e33fdd27c | refs/heads/master | 2020-04-08T21:13:49.922387 | 2019-02-11T15:01:18 | 2019-02-11T15:01:18 | 159,735,593 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #cls_branch [1x24x60x60]
#bbox_branch [1x48x 60x60]
import torch
from model.proposal.proposal_layer import ProposalLayer
img_size = 500
min_size = 16
ratios = [1, 0.5, 2]
scales = [64, 128, 256]
layer = ProposalLayer(ratios, scales, image_size=img_size, min_size=min_size)
rdata_cls = torch.rand(1, 18, 60, 60)
rdata_bbox = torch.rand(1, 36, 60, 60)
print(layer(rdata_cls, rdata_bbox)) | [
"joshuawmcgrath@gmail.com"
] | joshuawmcgrath@gmail.com |
3c061588bc9be7b65d25ad688bc573c0d434a9f0 | f481ef52fc8032c832c4ac4cd77bf61b76ed1419 | /web-mining/webming/spam/test.py | 514f26f8f15d6da48200944a3d36184fad70bd16 | [] | no_license | HaJiang/ucas-grade1 | 72e6f51b62d9c2ee8702220030912f9de067c68f | 847563b915d504ba8fd3dcc5822a8978e06000e3 | refs/heads/master | 2020-04-13T22:00:52.682050 | 2018-11-28T08:37:07 | 2018-11-28T08:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | def test():
return "test"
| [
"1635066070@qq.com"
] | 1635066070@qq.com |
d679e1bf6f86193efd75feb92ed2af8d5449b323 | e3cbfa486e960005790953fe02dfbbf52f7d2a6a | /Python/defi.py | 7ca5ef3a9d00b7cd958cc1b20e88995cbe2df04e | [] | no_license | rjpawar/Virtual-Stock-Market-Simulator | 64ff6212e97d204afe015310e405e79f1444cbf8 | ebc33d8032b3798629c84378c05c5cb867ac76d3 | refs/heads/master | 2022-12-14T16:46:44.471194 | 2020-09-07T11:05:23 | 2020-09-07T11:05:23 | 293,494,725 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | import nltk
import random
#from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
## print(votes)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
#### print(votes)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
documents_f = open('C:\Python\Python36-32\pickled_algos\documents.pickle', 'rb')
documents = pickle.load(documents_f)
documents_f.close()
word_features5k_f = open('C:\Python\Python36-32\pickled_algos\word_features5k.pickle', 'rb')
word_features = pickle.load(word_features5k_f)
word_features5k_f.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
##featuresets_f = open("pickled_algos/featuresets.pickle", "rb")
##featuresets = pickle.load(featuresets_f)
##featuresets_f.close()
random.shuffle(featuresets)
##print(len(featuresets))
testing_set = featuresets[10000:]
training_set = featuresets[:10000]
##print (featuresets)
open_file = open('C:\Python\Python36-32\pickled_algos\originalnaivebayes5k.pickle', 'rb')
classifier = pickle.load(open_file)
##print("Original Naive Bayes Algo accuracy percent:", (nltk.classify.accuracy(classifier, testing_set))*100)
open_file.close()
open_file = open('C:\Python\Python36-32\pickled_algos\MNB_classifier5k.pickle', 'rb')
MNB_classifier = pickle.load(open_file)
##print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)
open_file.close()
open_file = open('C:\Python\Python36-32\pickled_algos\BernoulliNB_classifier5k.pickle', 'rb')
BernoulliNB_classifier = pickle.load(open_file)
##print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)
open_file.close()
open_file = open('C:\Python\Python36-32\pickled_algos\LogisticRegression_classifier5k.pickle', 'rb')
LogisticRegression_classifier = pickle.load(open_file)
##print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)
open_file.close()
open_file = open('C:\Python\Python36-32\pickled_algos\LinearSVC_classifier5k.pickle', 'rb')
LinearSVC_classifier = pickle.load(open_file)
##print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)
open_file.close()
##open_file = open("pickled_algos/SGDC_classifier5k.pickle", "rb")
##SGDC_classifier = pickle.load(open_file)
##open_file.close()
voted_classifier = VoteClassifier(
classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats)
##,voted_classifier.confidence(feats)
fp=open('input.txt')
F = open('output.txt','w')
with open('input.txt') as fp:
fp.seek(0)
fchar=fp.read(1)
if not fchar:
print ("File empty")
else:
fp.seek(0)
inp=fp.readlines()
ip=' '.join(inp)
F.write(sentiment(ip))
## print(sentiment(ip))
fp.close()
##F.write(sentiment(ip))
F.close()
| [
"noreply@github.com"
] | rjpawar.noreply@github.com |
4d34ac4c1ce6980cddd4b57993d10246b4aeaa18 | 5bfe5defb30968ea039003d40afb03e393040f2c | /NumberBasesChallenge.py | 3848da7270093c8cb9423377b49e1b6d0245f025 | [] | no_license | Beck-Haywood/Number-Bases | 52820bcbf57c6c05b1c962565a6a2242458085de | ca7c98e1725eeb960cdb05512af4e410ee75b0ca | refs/heads/master | 2022-05-17T05:46:29.098673 | 2020-04-11T23:10:18 | 2020-04-11T23:10:18 | 254,971,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,709 | py | import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
possible_digits = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']
digits = digits.upper()
output = 0
array_of_digits = list(digits)
reversed_list = array_of_digits[::-1]
for i in range(len(reversed_list)):
digit = reversed_list[i]
# print(f'Value Before = {type(digit)}')
for x in range(len(possible_digits)):
if digit == possible_digits[x]:
digit = x
# print(f'Value After = {type(digit)}')
position = i
value = base ** (position)
result = value * int(digit)
output = output + result
return output
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert int(number) >= 0, 'number is negative: {}'.format(number)
possible_digits = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
number = int(number)
mods = []
while number > 0:
value = str(number % base)
for x in range(len(possible_digits)):
# print(f'Is {type(value)} Equal to {type(x)} ?')
if value == str(x):
# print('woweeeeeeee')
value = possible_digits[x]
break
mods.append(str(value).lower())
number //= base
mods.reverse()
# print(mods)
return ''.join(mods)
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
possible_digits = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11,
'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21, 'M': 22,
'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28, 'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33,
'Y': 34, 'Z': 35, 'a': 36
}
digit = 0
for char in digits:
char = char.upper()
assert char in possible_digits, 'Found unknown character!'
value = possible_digits[char]
assert value < base1, 'Found digit outside base!'
digit *= base1
digit += value
possible_digits_reversed = dict(map(reversed, possible_digits.items()))
array = []
# Until digit is a falsey number.
while digit:
digit, value = divmod(digit, base2)
array.append(possible_digits_reversed[value])
result = ''.join(reversed(array))
return result.lower()
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
#main()
# print(decode('11', 2))
print(convert('101010', 2, 16))#'10'
# print(encode('10', 16)) | [
"beck.haywood@students.makeschool.com"
] | beck.haywood@students.makeschool.com |
01f149a939d7ee4687c0ce58037ed05278e16865 | 04142fdda9b3fb29fb7456d5bc3e504985f24cbe | /mmcv/cnn/bricks/upsample.py | 78fb5bf371712d13a72edf5d57151dca8fce6953 | [
"Apache-2.0"
] | permissive | open-mmlab/mmcv | 419e301bbc1d7d45331d67eccfd673f290a796d5 | 6e9ee26718b22961d5c34caca4108413b1b7b3af | refs/heads/main | 2023-08-31T07:08:27.223321 | 2023-08-28T09:02:10 | 2023-08-28T09:02:10 | 145,670,155 | 5,319 | 1,900 | Apache-2.0 | 2023-09-14T02:37:16 | 2018-08-22T07:05:26 | Python | UTF-8 | Python | false | false | 3,299 | py | # Copyright (c) OpenMMLab. All rights reserved.
import inspect
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import xavier_init
from mmengine.registry import MODELS
MODELS.register_module('nearest', module=nn.Upsample)
MODELS.register_module('bilinear', module=nn.Upsample)
@MODELS.register_module(name='pixel_shuffle')
class PixelShufflePack(nn.Module):
"""Pixel Shuffle upsample layer.
This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
achieve a simple upsampling with pixel shuffle.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of the conv layer to expand the
channels.
"""
def __init__(self, in_channels: int, out_channels: int, scale_factor: int,
upsample_kernel: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.init_weights()
def init_weights(self):
xavier_init(self.upsample_conv, distribution='uniform')
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
def build_upsample_layer(cfg: Dict, *args, **kwargs) -> nn.Module:
"""Build upsample layer.
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the ``__init__``
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the
``__init__`` method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'type' not in cfg:
raise KeyError(
f'the cfg dict must contain the key "type", but got {cfg}')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if inspect.isclass(layer_type):
upsample = layer_type
# Switch registry to the target scope. If `upsample` cannot be found
# in the registry, fallback to search `upsample` in the
# mmengine.MODELS.
else:
with MODELS.switch_scope_and_registry(None) as registry:
upsample = registry.get(layer_type)
if upsample is None:
raise KeyError(f'Cannot find {upsample} in registry under scope '
f'name {registry.scope}')
if upsample is nn.Upsample:
cfg_['mode'] = layer_type
layer = upsample(*args, **kwargs, **cfg_)
return layer
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
6dcb79e87264e7fd776726af5dd6a9f0300ca220 | a27ff9812aa21679252170c7aac62a2c3b2878e8 | /primeinterval.py | f8ed8d0472198f454343310e429642af5d81dbf7 | [] | no_license | gauthamej/Gautham | 486ad198d41b465969a1aa24f79adc68b2077284 | af5151272fd922798ac3175ccb7f65180a2bfe6a | refs/heads/master | 2020-06-20T02:40:56.298830 | 2019-09-19T17:00:20 | 2019-09-19T17:00:20 | 196,963,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | a=input()
b=a.split()
for i in range(int(b[0])+1,int(b[1])+1):
for j in range(2,i):
if(i%j==0):
break
else:
print(i,end=" ")
| [
"noreply@github.com"
] | gauthamej.noreply@github.com |
4a953255a429a2f40fc5395ffe98711f2211609e | 57528fa8666322b1d5de1f798b8432b3e75538a4 | /application/conf/base.py | 9049bc50affcc2b14ed285f9982aeba1a47de29e | [] | no_license | sprymak/django-project-template | 125e444f2918e10fbb00440b054f434a7df92963 | 0f7a4ca02e2423b370b8c2d642bcaad55bd34e1d | refs/heads/master | 2021-01-10T03:38:40.475457 | 2016-03-04T09:40:30 | 2016-03-04T09:40:30 | 47,358,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | import os
import sys
PROJECT_NAME = '{{ project_name }}'
def get_env_var(setting, default=None):
try:
return os.environ[setting]
except KeyError:
if default is not None:
return default
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
error_msg = "Environment variable '%s' is not set" % setting
raise ImproperlyConfigured(error_msg)
_server_software = get_env_var('SERVER_SOFTWARE', '')
IS_DEV = bool(get_env_var('DEVELOPER', '')) or (
(map(os.path.basename, sys.argv[:2]) == ['manage.py', 'runserver']) or
_server_software.startswith('Dev') or
_server_software.startswith('WSGIServer'))
IS_TEST = 'test' in sys.argv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
APPLICATION_PATH = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(APPLICATION_PATH)
APPS_PATH = os.path.join(APPLICATION_PATH, 'apps')
LIB_PATH = os.path.join(APPLICATION_PATH, 'lib')
# Data directory contains variable data files. This includes spool directories
# and files, administrative and logging data, and transient and temporary files.
# Data directory is specified here in order to make it possible to place the
# application to a read-only environment.
DATA_PATH = get_env_var('DATA_PATH', os.path.join(APPLICATION_PATH, 'var'))
CACHE_PATH = os.path.join(DATA_PATH, 'cache')
LOG_PATH = os.path.join(DATA_PATH, 'log')
TEMP_PATH = os.path.join(DATA_PATH, 'tmp')
LOG_FILENAME = '.'.join([PROJECT_NAME, 'log'])
PYTHON_EGG_CACHE = os.path.join(CACHE_PATH, 'eggs')
CELERY_ENABLED = False
| [
"sprymak@metacorus.com"
] | sprymak@metacorus.com |
4c01598bd3ce8e425239d1a95c041aee4a95176a | 1474556add836a089547339698cd4efabee528ca | /CNN_stress/stress_pipeline.py | fdc03d82848d4475cb06051d71de8d7fbc9e34d9 | [] | no_license | Caleta-Team-UCA/Baby-Stress | c3e77af7cc826bce12029cc924f93dcd36728374 | a92bff60e1c1ad9b02aeb895352ec842691e6664 | refs/heads/master | 2023-06-09T03:38:16.108161 | 2021-06-25T08:17:36 | 2021-06-25T08:17:36 | 376,797,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | import depthai as dai
import cv2
import typer
import numpy as np
from time import monotonic
def define_pipeline(face_detection_blob_path: str, stress_classifier_blob_path: str):
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_3)
# Define sources
face_detection = pipeline.createNeuralNetwork()
face_detection.setBlobPath(face_detection_blob_path)
image_manager = pipeline.createImageManip()
image_manager.initialConfig.setResize(224, 224)
stress_classifier = pipeline.createNeuralNetwork()
stress_classifier.setBlobPath(stress_classifier_blob_path)
# Define links
input = pipeline.createXLinkIn()
input.setStreamName("input")
output = pipeline.createXLinkOut()
output.setStreamName("output")
# Linking
input.out.link(face_detection.input)
# face_detection.out.link(image_manager.inputImage)
face_detection.out.link(stress_classifier.input)
# image_manager.out.link(stress_classifier.input)
stress_classifier.out.link(output.input)
return pipeline
def to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:
return cv2.resize(arr, shape).transpose(2, 0, 1).flatten()
def process_video(
face_detection_blob_path: str,
stress_classifier_blob_path: str,
video_path: str,
):
pipeline = define_pipeline(face_detection_blob_path, stress_classifier_blob_path)
with dai.Device(pipeline) as device:
q_in = device.getInputQueue(name="input")
q_out = device.getOuputQueue(name="output")
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
read_correctly, frame = cap.read()
if not read_correctly:
break
img = dai.ImgFrame()
img.setData(to_planar(frame, (300, 300)))
img.setTimestamp(monotonic())
img.setWidth(300)
img.setHeight(300)
q_in.send(frame)
output = q_out.get()
print(output)
if cv2.waitKey(1) == ord("q"):
break
def main(
face_detection_blob_path: str = "/home/users/ucadatalab_group/javierj/Baby-Stress/.Models/face_detection.blob",
stress_classifier_blob_path: str = "/home/users/ucadatalab_group/javierj/Baby-Stress/.Models/resnet_imagenet/frozen_graph.blob",
video_path: str = "/home/users/ucadatalab_group/javierj/SHARED/baby-stress/videos/21-center-3.mp4",
):
process_video(face_detection_blob_path, stress_classifier_blob_path, video_path)
if __name__ == "__main__":
typer.run(main)
| [
"javijj99@gmail.com"
] | javijj99@gmail.com |
9bd0c39eba20297e9ffe85547dafddafd3c75a51 | e1a6bda06cff21efce944cff6fa13a2939f2b6b5 | /testPython.py | a252cf0b7d5cb57c29d2e3f7483ee7b6671d104c | [] | no_license | pfite/CADcommands | f59bf1dacd163b313d989fb0561cdfebae7d1061 | 32db61ab7073b9899d448fde676c596850a1d0e9 | refs/heads/master | 2020-03-23T05:56:25.068799 | 2018-07-16T18:46:53 | 2018-07-16T18:46:53 | 141,178,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import tkinter
from tkinter import ttk
from tkinter import *
from tkinter.scrolledtext import ScrolledText
import codecs
outs = 1
pot = 1
callAmount = 1
def whatToDo():
line_1 = ''
line_2 = ''
line_3 = ''
if outs_entry.get() != '':
outs = int(outs_entry.get())
pot = int(pot_entry.get())
callAmount = int(callAmount_entry.get())
line_1 = "Your pot odds are " + str(round((outs/47), 2))
line_2 = "Your hand equity is " + str(round((callAmount/pot), 2))
if ((outs/47) > callAmount/pot):
line_3 = "You should call."
panel = ttk.Label(content, text = line_1 + '\n' + line_2 + '\n' + line_3).grid(row =5, column = 1)
else:
line_3 ="You should fold.\n"
panel = ttk.Label(content, text = line_1 + '\n' + line_2 + '\n' + line_3).grid(row =5, column = 1)
else:
print('empty')
panel = ttk.Label(root, text = line_1 + '\n' + line_2 + '\n' + line_3).grid(row=0,column=0, sticky = W, padx = (550,0), pady = (0,150))
root = Tk()
content = ttk.Frame(root)
frame = ttk.Frame(content, borderwidth=5, relief="sunken")
content.grid(column=0, row=0, sticky=(N, E), rowspan = 3,)
frame.grid(column=0, row=0, columnspan=10, rowspan=10, sticky = (N,W))
one = ttk.Label(content, text="Outs: ").grid(row=0, sticky =W)
two = ttk.Label(content, text="Pot: ").grid(row=1, sticky=W)
three = ttk.Label(content, text="Amount to call ").grid(row=2, pady = (40,5),sticky = W)
four = ttk.Label(content, text=" ").grid(row=3, sticky = W)
five = ttk.Label(content, text=" ").grid(row=4, sticky = W)
outs_entry = ttk.Entry(content, width = 25)
pot_entry = ttk.Entry(content, width = 25)
callAmount_entry = ttk.Entry(content, width = 25)
Subject = ttk.Entry(content, width = 60)
MessageBody = Text(content, height = 8)
outs_entry.grid(row=0, column=1, pady = (10,5), sticky = W)
pot_entry.grid(row=1, column=1,sticky = W)
callAmount_entry.grid(row=2, column= 1, pady = (40,5), sticky = W)
Send = Button(content, text = "Should I fold?", width = 20, bg = "grey", command = whatToDo)
Send.grid(row =6, column = 1, pady = (0,10))
root.mainloop()
| [
"noreply@github.com"
] | pfite.noreply@github.com |
ab4ee919a1dcdddb3b33985c251661f195172c83 | 1bf98c6c47e882b524f93b990eaeeef9f620bb33 | /code/mnist/backend/mnist/model.py | aa0cdd2a71f785ef2c1055672f127ac2821f5f17 | [] | no_license | LublinIT/meet-js-lublin-7 | 2bd00caaf60eb36232846d2c63e60b6c65fb7c80 | e896e66d0a56c761052168413c097998066e83ea | refs/heads/master | 2021-08-31T12:07:53.848204 | 2017-12-21T08:02:46 | 2017-12-21T08:02:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import tensorflow as tf
# Softmax Regression Model
def regression(x):
W = tf.Variable(tf.zeros([784, 10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")
y = tf.nn.softmax(tf.matmul(x, W) + b)
return y, [W, b]
# Multilayer Convolutional Network
def convolutional(x, keep_prob):
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# First Convolutional Layer
x_image = tf.reshape(x, [-1, 28, 28, 1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Densely Connected Layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Readout Layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y, [W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2]
| [
"patryk.omiotek@gmail.com"
] | patryk.omiotek@gmail.com |
d777bd7774d2cc10172aa57d2f669bc44ef4859e | ddc460c39af3bc5e3383a75f6733dca048ebdfe2 | /mvc/Model.py | 2dbd97b7d3c3a9c0d71ac3c13a15721e8b82605f | [] | no_license | corentingosselin/mastermind | 31c7855009e1ea22fbc8ac394bfdd91a339773ff | c8c132e7f5f6805cb17c18e2aa8b907963fd007d | refs/heads/master | 2023-03-02T18:58:39.530793 | 2021-02-13T15:32:35 | 2021-02-13T15:32:35 | 338,605,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | #
# Model.class
#
# @Description:
#
# The Model contains all data of the game,
# Also, some useful methods to access data faster
#
#
from Pos import Pos
class Model:
def __init__(self):
self.started = False
# Size of 4 max
self.answer = [None] * 4
self.color_selected = None
self.slots = [[None] * 4 for _ in range(10)]
self.currentLine = 0
self.maxLine = 10
def set_slot(self, slot, color):
self.slots[self.currentLine][slot] = color
# Get colors of the current line
def get_colors(self):
return self.slots[self.currentLine]
def get_colors_line(self, line):
return self.slots[line]
def next_line(self):
self.currentLine += 1
def has_won(self):
# Better solution
currentAnswer = self.slots[self.currentLine]
return currentAnswer == self.answer
#for i in currentAnswer:
# if currentAnswer[i] != self.answer[i]:
# return False
#return True
| [
"coco_gigpn@hotmail.com"
] | coco_gigpn@hotmail.com |
77c0f1d1ffcd59663e23c8a8588befa6618e51a6 | 762891c38f58db6b292e394bbe623c1b3d13f787 | /test/test_data_utils.py | 999e5a46e6c08c66429de53422cadb44cd580daf | [] | no_license | myxa/PandemicDataHack | 9c1fc1461bb2b17fc9613c22f198165895b8b520 | b0c1293238455e0313b423f4431a580f4c1b67b3 | refs/heads/main | 2023-03-07T07:41:52.063254 | 2021-02-20T17:30:50 | 2021-02-20T18:12:25 | 322,592,872 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57 | py |
def test_download_from_google_disc():
assert False
| [
"ekaterinaaleksandrova7058@gmail.com"
] | ekaterinaaleksandrova7058@gmail.com |
607e0d42eb75d425c8153e3f6ce3aa530ecf5c86 | aa118568f6ecc7c10ab7947a65cd117e9610cdba | /contact/models.py | 05b2d783858d321c0e6cd3249ce925bbdf6cd9b0 | [] | no_license | mstelwach/dj-address-book-app | 147765182e25268b39b140264b155597c3c1f85e | 564ebb927562582eac44e87edde04aadde285337 | refs/heads/master | 2020-07-21T08:24:27.423020 | 2019-11-14T13:07:05 | 2019-11-14T13:07:05 | 206,796,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from django.core.validators import RegexValidator
from django.db import models
from account.models import Account
class Contact(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
account = models.ForeignKey(Account, on_delete=models.CASCADE)
birth_year = models.DateField(null=True, blank=True)
site_url = models.CharField(max_length=50, blank=True)
def __str__(self):
return 'Account: {} | First Name: {} | Last Name: {}'.format(self.account.username,
self.first_name,
self.last_name)
LABEL_PHONE = [
('home', 'Home'),
('work', 'Work'),
('mobile_phone', 'Mobile Phone'),
('primary', 'Primary'),
('pager', 'Pager')
]
only_numbers = RegexValidator(r'^[0-9]*$', 'Only numbers are allowed.')
class Phone(models.Model):
person = models.ForeignKey(Contact, on_delete=models.CASCADE, editable=False)
phone = models.CharField(max_length=50, validators=[only_numbers])
label = models.CharField(max_length=16, choices=LABEL_PHONE, default=LABEL_PHONE[2][0])
def __str__(self):
return '{}'.format(self.phone)
LABEL_EMAIL = [
('home', 'Home'),
('work', 'Work'),
]
class Email(models.Model):
person = models.ForeignKey(Contact, on_delete=models.CASCADE, editable=False)
email = models.EmailField()
label = models.CharField(max_length=16, choices=LABEL_EMAIL, default=LABEL_EMAIL[0][0])
def __str__(self):
return '{}'.format(self.email)
SOCIAL_PROFILES = [
('twitter', 'Twitter'),
('facebook', 'Facebook'),
('linkedin', 'Linkedin'),
('instagram', 'Instagram')
]
class SocialProfile(models.Model):
person = models.ForeignKey(Contact, on_delete=models.CASCADE, editable=False)
name = models.CharField(max_length=16, choices=SOCIAL_PROFILES)
profile = models.CharField(max_length=50)
def __str__(self):
return '{} -- {}'.format(self.name, self.profile)
| [
"m.stelwach@gmail.com"
] | m.stelwach@gmail.com |
112f04109b752654ef34bf7922a64953ed12ac74 | 947b9be5fa37b8f6f23a2a6f583c54ae94cb3aa4 | /info.py | 3f255a1b93aa36887653d025343dafa3bbfd19c4 | [] | no_license | umutbasal/riots | 4b5caf26156ffc0af763bb7d6e133a0df71ad133 | c48210d6c0b5765451fefc0459441d00b299b49b | refs/heads/master | 2020-04-11T14:13:10.166072 | 2018-12-13T20:18:59 | 2018-12-13T20:18:59 | 161,846,156 | 0 | 0 | null | 2018-12-14T22:16:53 | 2018-12-14T22:16:52 | null | UTF-8 | Python | false | false | 1,000 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
# UML Schema
class MetaData:
def __init__(self, location, photo, feedback,description, sensors):
self.location = location
self.photo = photo
self.feedback = feedback
self.description = description
self.sensors = sensors
self.db = "data.json"
def dataFile(self, data):
"""
Write Json data to the file
"""
with open(self.db, 'r') as file:
decodedFile = json.loads(file.read())
data['user_id'] = decodedFile[-1]['user_id'] + 1 if len(decodedFile) else 0
decodedFile.append(data)
file.close()
with open(self.db, 'w') as file:
encodedFile = json.dumps(decodedFile)
file.write(encodedFile)
file.close()
def send(self):
"""
Get json data
"""
self.dataFile({ "time": time.time(), "location": self.location, "photo": self.photo, "feedback": self.feedback, "description": self.description, "sensors": self.sensors }) | [
"rootgebruiker@gmail.com"
] | rootgebruiker@gmail.com |
777eb40ed21ee0d19af77905e76a9196639a19d6 | 20ac11c2d5f1487a218442475f64ee92b330e718 | /src/tests/test_command_20.py | f1879f8bef46abdb0b1ea769d1f7af320522ad5a | [] | no_license | DimitarTerzov/cWeb_thai_dev | b007c869f22b55a677a0ab881dcdf3125707a543 | 9a3983fbeb5cd4a7891a3744c2cd14d2b1683e7d | refs/heads/master | 2022-07-30T15:13:29.933678 | 2020-05-20T08:09:05 | 2020-05-20T08:09:05 | 260,390,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from utils import temporary_file
from command_20.validator_20 import command20
CONTENT = [
u'<Speakers>\n', # 0
u'<Speaker id="spk14" name="Paul Ruseau"/>\n', # 1
u'</Speakers>\n', # 2
u'<Episode>\n', # 3
u'<Section type="report" startTime="0" endTime="3489.288">\n', # 4
u'<Turn speaker="spk2" startTime="86.623" endTime="88.487">\n', # 5
u'<Sync time="86.623"/>\n', # 6
u"<initial> error1 </initial>> Good evening once again.\n", # 7
u'<Sync time="88.487"/>\n', # 8
u"<initial> error2 </initial> And we're here to once again\n", # 9
u'</Turn>\n', # 10
u'<Turn speaker="spk3" startTime="85.725" endTime="86.623">\n', # 11
u'<Sync time="85.725"/>\n', # 12
u'[lipsmack] <lang:Spanish> ČSSD wrong tags in code error 9 </lang:Spanish> Over\n', # 13
u'<Sync time="86.623"/>\n', # 14
u'<Background time="4.263" type="other" level="high"/>\n', # 15
u'Flag of the United <lang:respect> States of America\n', # 16
u'<Sync time="88.487"/>\n', # 17
u"We strongly believe it l\'<initial> ONU </initial> is important,\n", # 18
u'<Sync time="94.373"/> \n', # 19
u'<Sync time="3415.916"/>\n', # 20
u'<Background time="4.263" type="other" level="high"/>\n', # 21
u'Your presence here to <lang:body> listen to the ten candidates.\n', # 22
u'</Turn>\n', # 23
u'<Turn startTime="3415.916" endTime="3416.904">\n', # 24
u'<Sync time="3415.916"/>\n', # 25
u'</Turn>\n', # 26
u'</Section>\n', # 27
u'</Episode>\n', # 28
u'</Trans>\n', # 29
]
EXCLUDE = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 14,
15, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29]
CATCH = [9, 13, 16, 22]
def test_command_20(tmpdir):
file_ = temporary_file(tmpdir, CONTENT)
found = command20(file_)
for key in sorted(found.keys()):
print(key, found[key])
for row in EXCLUDE:
assert row not in found
for row in CATCH:
assert row in found
#assert 0
| [
"dimiterat@gmail.com"
] | dimiterat@gmail.com |
82dc17c898372d0871fa9c8edbaeb483147592b8 | f7deb23c08fa89c73e44e7bf865f8c03ef6e20f6 | /demo_aca_tsp.py | 4dcdae41b42607e6103fed2320a653b160dadc39 | [] | no_license | qinhew/MY_D_Python | b85a0263376826b1d29a7fd4fc327fc4a72d42f6 | 51bf823c742743291b8720541b498313c4760367 | refs/heads/master | 2022-12-19T03:42:52.181513 | 2020-09-30T02:56:03 | 2020-09-30T02:56:03 | 298,992,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | from sko.ACA import ACA_TSP
import numpy as np
from scipy import spatial
import pandas as pd
import matplotlib.pyplot as plt
num_points = 25
points_coordinate = np.random.rand(
num_points, 2) # generate coordinate of points
# 计算两个输入集合的距离,通过metric参数指定计算距离的不同方式得到不同的距离度量值
distance_matrix = spatial.distance.cdist(
points_coordinate, points_coordinate, metric='euclidean')
def cal_total_distance(routine):
num_points, = routine.shape
return sum([distance_matrix[routine[i % num_points], routine[(i + 1) % num_points]] for i in range(num_points)])
# %% Do ACA
aca = ACA_TSP(func=cal_total_distance, n_dim=num_points,
size_pop=50, max_iter=200,
distance_matrix=distance_matrix)
best_x, best_y = aca.run()
# %% Plot
fig, ax = plt.subplots(1, 2)
best_points_ = np.concatenate([best_x, [best_x[0]]])
best_points_coordinate = points_coordinate[best_points_, :]
ax[0].plot(best_points_coordinate[:, 0], best_points_coordinate[:, 1], 'o-r')
pd.DataFrame(aca.y_best_history).cummin().plot(ax=ax[1])
plt.show()
| [
"270240626@qq.com"
] | 270240626@qq.com |
8ea30a904ab9f58fdaab56a29948018b7f7f89eb | c669a1c79814a634e51f1e40ff588dd71ca82044 | /fw.py | 1d081a76b8781cfeda2b099d5ff94b696df8b2dd | [] | no_license | Lukas-VanDyke/Firewall | 51c60b22b3c7b888d1b577496df14b853c73dd65 | ddf4d04308e9b4991d1c707a445db77b057cc1d3 | refs/heads/master | 2021-01-18T18:04:40.722722 | 2017-03-31T16:54:13 | 2017-03-31T16:54:13 | 86,841,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | import sys
filenameConfig = sys.argv[1]
rules = []
try:
# Read config file
configFile = open(filenameConfig, "r")
counter = 1
rule = configFile.readline()
while rule != "":
commentCheck = rule.split()
if len(commentCheck) > 0:
if commentCheck[0].startswith('#') == False:
if len(rule.split()) > 0:
# Append line number
rule = str(counter) + " " + rule
#Parsing rules
parsedRules = rule.split()
if len(rule.split()) > 6:
sys.stderr.write("Incorrect configuration file format\n")
raise
if parsedRules[len(parsedRules) - 1] == "":
del parsedRules[len(parsedRules) - 1]
IP = parsedRules[3]
if IP == "*":
mask = ""
else:
IP = IP.replace("/", ".")
stringIP = ""
parsedIP = IP.split(".")
for i in range(4):
num = parsedIP[i]
num = int(num)
stringIP += '{0:08b}'.format(num)
mask = stringIP[:int(parsedIP[4])]
parsedRules[3] = mask
ports = parsedRules[4]
if ports == "*":
portList = []
else:
portList = ports.split(",")
parsedRules[4] = portList
if len(parsedRules) == 6:
parsedRules[5] = "1"
else:
parsedRules.append("0")
rules.append(parsedRules)
rule = configFile.readline()
counter += 1
# Read packet file
packet = sys.stdin.readline()
counter = 1
while packet != "":
# Change IP to hex string format
parsedPacket = packet.split()
if parsedPacket[len(parsedPacket) - 1] == "":
del parsedPacket[len(parsedPacket) - 1]
if len(parsedPacket) != 4:
sys.stderr.write("Incorrect packet format\n")
raise
IP = parsedPacket[1]
stringIP = ""
parsedIP = IP.split(".")
for i in range(4):
num = parsedIP[i]
num = int(num)
stringIP += '{0:08b}'.format(num)
parsedPacket[1] = stringIP
ruleFound = False
# Determine packet action
for r in rules:
if parsedPacket[0] == r[1]:
if parsedPacket[2] in r[4] or len(r[4]) == 0:
if parsedPacket[3] == r[5] or r[5] == "0":
# Compare packet IP with rule mask
if parsedPacket[1].startswith(r[3]):
action = r[2]
lineNum = r[0]
ruleFound = True
break
if not ruleFound:
action = "drop"
lineNum = -1
# Program output
result = action
if lineNum == -1:
result += "() " + parsedPacket[0] + " " + IP + " " + parsedPacket[2] + " " + parsedPacket[3]
else:
result += "(" + lineNum + ") " + parsedPacket[0] + " " + IP + " " + parsedPacket[2] + " " + parsedPacket[3]
sys.stdout.write(result + "\n")
packet = sys.stdin.readline()
counter += 1
except:
sys.stderr.write("Something went wrong, ending execution \n")
| [
"noreply@github.com"
] | Lukas-VanDyke.noreply@github.com |
2bf15b5b3b236eaf6ddb6156eb54a8bc9affd56f | 00285488980a0d72f85629a35eba73deb706fbef | /insert_word.py | 85500c1de6d0dfb6d46004ec8af696b1fc15a1b6 | [] | no_license | abc412685726/AID1907 | 674fda030552f89a4211a692781c0d23295f895f | 5350115aab1d022dae0813193fdb995f4872a372 | refs/heads/master | 2023-06-22T22:55:49.346718 | 2021-07-21T07:23:45 | 2021-07-21T07:23:45 | 387,210,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import pymysql
f=open('dict.txt','r')
db=pymysql.connect(user='root',
passwd='123456',
database='dict',
charset='utf8')
cur=db.cursor()
sql = "insert into words (word,mean) values (%s,%s);"
for line in f:
list01=line.split(' ',1)
word=list01[0]
mean=list01[1].strip()
cur.execute(sql,[word,mean])
try:
db.commit()
except Exception as e:
db.rollback()
cur.close()
db.close()
| [
"412685726@qq.com"
] | 412685726@qq.com |
72e5e14706059b1675c5cc2b0013ef0c073e5ede | 90f989e782d8be1f24050c6a31bc8d7224aa88a0 | /files/preprocess_files.py | f89c99b2dc874b0a362802b59511dc213145f7af | [] | no_license | arnicas/TopicsPythonGephi | 761ca1492edbe282a9fb7829897cc4a6d68a5d17 | f5a1531039d9e1e8c4cff486c4e81bf86ae005e1 | refs/heads/master | 2016-08-03T14:22:23.112255 | 2014-10-23T14:26:06 | 2014-10-23T14:26:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | """
Usage: python preprocess_files.py [original_dir] [new_dir] [part_of_speech]
Examples of Tokenizing, Cleaning, Rewriting with Parts of Speech
This notebook assumes you installed nltk and pattern already. (@arnicas, Aug 2014)
"""
import os
from pattern.en import tag as ptag
import ununicode
import string
import sys
def print_all_tags(text):
for word, tag in ptag(text):
print word, tag
# Make it more general - pass in the Part of Speech you want
def tag_by_line(line, tag):
# matching the first two chars of the tag - so verbs are vb, etc.
words = []
for word,foundtag in ptag(line):
if foundtag[0:2]== tag: # first 2 chars are the base tag
words.append(word)
return words
def cleanup(text):
""" This function cleans up the text a bit, and removes newlines."""
data = text.decode('utf8')
data = ununicode.toascii(data) # a useful function that does some simple unicode replacements
data = data.replace('\r', '') # windows newline in some files
data = data.replace('\n', ' ') # turn newline into space
data = data.strip() # strip remaining white space chars from edges
return data
def cleanup_and_pos(text, pos='VB'):
""" Run the cleanup function, convert to a list of verbs, clean out misc stuff left."""
other_things_to_strip = [] # put your own list of strings here, based on reading outputs
data = cleanup(text)
verbs = tag_by_line(data, pos) # returns a list of verbs
cleanverbs = [verb for verb in verbs if verb not in string.punctuation]
cleanverbs = [word for word in cleanverbs if word not in other_things_to_strip]
newdata = ' '.join(cleanverbs) # make it a single string
return newdata
def rewrite_all(dirname, newdir, funct=cleanup_and_pos, pos='VB'):
""" Take in a director of original docs, a new directory name, and a function to apply to each."""
if not os.path.exists(newdir):
print 'Making directory', newdir
os.makedirs(newdir)
for (dirpath, dirname, filenames) in os.walk(dirname):
for file in filenames:
print dirpath, file
with open(os.path.join(dirpath,file), 'r') as myfile:
data=myfile.read()
newdata = funct(data, pos=pos)
newfilename = os.path.basename(file)
with open(os.path.join(newdir, newfilename), 'wb') as temp_file:
temp_file.write(newdata.encode('utf8'))
print 'Wrote ', os.path.join(newdir, newfilename)
def main():
if len(sys.argv) != 4:
print 'Usage: python preprocess_files.py [original_dir] [new_dir] [part_of_speech]'
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
POS = sys.argv[3]
rewrite_all(input_dir, output_dir, funct=cleanup_and_pos, pos=POS)
print 'Wrote out new files to ', output_dir
if __name__ == "__main__":
main()
| [
"arnicas@gmail.com"
] | arnicas@gmail.com |
538d305b13a78c28e4dbcf06d1c2aa0a456506b8 | 0453885fd8159617734294ea2d5fd6dde44878c5 | /mysite/mysite/urls.py | b2914beb0be2fdb7958b77feadb01cb46c562f45 | [] | no_license | kristan-dev/python-django-rest-template | 5ff827eac07c3ec2c496d6010d9630b4acdb05a4 | fc522ece9add8bfc42837ec72e3597abdb3ae52c | refs/heads/master | 2022-12-20T17:07:04.632263 | 2020-08-30T07:18:17 | 2020-08-30T07:18:17 | 291,420,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('myapi.urls')),
]
| [
"kristan.sangalang@gmail.com"
] | kristan.sangalang@gmail.com |
e8ac13fc07b18a2640abd0a15d06f2f5ed3b98e9 | dc4b9b2de30e012df640110f2e046c27c7785b3a | /blog/handler/admin.py | b5817614d6e473d14153f2d82a458e8f1b2690e9 | [] | no_license | leopallas/i733.com | 84a6ac5303a735f89633d5584aa694bdd9df94d2 | 51baf15aa8f4b4d3fbcf4465c19d8fd70355f73c | refs/heads/master | 2021-01-23T20:17:05.325135 | 2014-06-26T07:45:20 | 2014-06-26T07:45:20 | 18,587,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,406 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Leo Xu <leopallas@gmail.com>
# Version: 1.0
# Create: 2014-05-30 16:03
# Copyright 2014 LEO
import hashlib
import datetime
from tornado.web import authenticated
from blog.handler.basic import BaseHandler, responseJson, SECURE_COOKIE
from blog.model.models import User, Post, Term, TermRelationship, Option, TermTaxonomy
from blog.form.forms import AdminLoginForm, PageAddForm, TermAddForm, PostAddForm, UserEditForm
from blog.util import get_datetime_from_date_now
class AdminLoginHandler(BaseHandler):
def get(self):
if not self.current_user:
form = AdminLoginForm(self)
self.render("admin/login.html", form=form)
else:
return self.redirect('/admin/home')
def post(self):
form = AdminLoginForm(self)
if form.validate():
username = form.username.data
password = unicode(hashlib.md5(form.password.data).hexdigest(), 'utf-8')
user = self.db.query(User).filter_by(login=username).first()
if user and user.password == password:
self.set_secure_cookie(SECURE_COOKIE, user.login)
return self.redirect('/admin/home')
else:
form.password.errors.append('Username or Password is wrong')
return self.render("admin/login.html", form=form)
class AdminLogoutHandler(BaseHandler):
@authenticated
def get(self):
self.clear_cookie(SECURE_COOKIE)
self.redirect("/")
class AdminHomeHandler(BaseHandler):
@authenticated
def get(self):
return self.render('admin/home.html')
@authenticated
def post(self):
pass
class AdminProfileHandler(BaseHandler):
@authenticated
def get(self):
form = UserEditForm(self)
form.displayname.process_data(self.current_user.display_name)
form.email.process_data(self.current_user.email)
return self.render('admin/profile.html', form=form, success=None)
@authenticated
def post(self):
form = UserEditForm(self)
if form.validate():
password = unicode(hashlib.md5(form.password0.data).hexdigest(), 'utf-8')
user = self.db.query(User).get(self.current_user.id)
if user and user.password == password:
user.display_name = form.displayname.data
user.email = form.email.data
user.password = form.password1.data
self.db.commit()
return self.render('admin/profile.html', form=form, success=user)
else:
form.password0.errors.append('The Old Password is wrong')
form.displayname.process_data(self.current_user.display_name)
form.email.process_data(self.current_user.email)
return self.render('admin/profile.html', form=form, success=None)
class AdminSettingHandler(BaseHandler):
@authenticated
def get(self):
return self.render('admin/setting.html', setting=self.options, success=None)
def update_or_add_option(self, key, value):
option = self.db.query(Option).filter_by(name=key).first()
if option:
option.value = value
else:
self.db.add(Option(name=key, value=value))
@authenticated
def post(self):
try:
blogname = self.request.arguments['blogname'][0]
blogdescription = self.request.arguments['blogdescription'][0]
users_can_register = self.request.arguments['users_can_register'][0]
admin_email = self.request.arguments['admin_email'][0]
comments_notify = self.request.arguments['comments_notify'][0]
posts_per_rss = self.request.arguments['posts_per_rss'][0]
rss_use_excerpt = self.request.arguments['rss_use_excerpt'][0]
default_category = self.request.arguments['default_category'][0]
users_can_comment = self.request.arguments['users_can_comment'][0]
posts_per_page = self.request.arguments['posts_per_page'][0]
posts_per_recent_post = self.request.arguments['posts_per_recent_post'][0]
posts_per_recent_comment = self.request.arguments['posts_per_recent_comment'][0]
self.update_or_add_option('blogname', blogname)
self.update_or_add_option('blogdescription', blogdescription)
self.update_or_add_option('users_can_register', users_can_register)
self.update_or_add_option('admin_email', admin_email)
self.update_or_add_option('comments_notify', comments_notify)
self.update_or_add_option('posts_per_rss', posts_per_rss)
self.update_or_add_option('rss_use_excerpt', rss_use_excerpt)
self.update_or_add_option('default_category', default_category)
self.update_or_add_option('users_can_comment', users_can_comment)
self.update_or_add_option('posts_per_page', posts_per_page)
self.update_or_add_option('posts_per_recent_post', posts_per_recent_post)
self.update_or_add_option('posts_per_recent_comment', posts_per_recent_comment)
self.db.commit()
self.update_options()
except:
return self.render('admin/setting.html', setting=self.options, success=None)
return self.render('admin/setting.html', setting=self.options, success=self.options)
# class AmdinPageListHandler(BaseHandler):
# @authenticated
# def get(self):
# form = PageAddForm(self)
# pages = self.db.query(Post).filter_by(type='page').all()
# return self.render('admin/page.html', form=form, pages=pages)
# @authenticated
# def post(self):
# pass
#
# class AmdinPageAddHandler(BaseHandler):
# @authenticated
# def get(self):
# form = PageAddForm(self)
# form.parent.query = self.db.query(Post).filter_by(type='page', status='enabled',parent=0).order_by(Post.title)
# return self.render('admin/page_add.html', form=form)
# @authenticated
# def post(self):
# form = PageAddForm(self)
# form.parent.query = self.db.query(Post).filter_by(type='page', status='enabled',parent=0).order_by(Post.title)
# if form.validate():
# title = form.title.data
# desc = form.description.data
# parent = form.parent.data.id if form.parent.data else 0
# order = form.order.data if form.parent.data else 0
# page = Post(title=title, content=desc, parent=parent, type='page',
# author=self.current_user.id, status='enabled',
# authorname=self.current_user.displayname,
# order=order,
# comment_count=0)
# self.db.add(page)
# self.db.commit()
# return self.redirect('/admin/page/list')
# return self.render("admin/page_add.html",form=form)
#
# class AmdinPageDeleteHandler(BaseHandler):
# @authenticated
# def get(self, pid):
# self.db.delete(self.db.query(Post).get(pid))
# return self.redirect('/admin/page/list')
# @authenticated
# def post(self, pid):
# pass
#
# class AmdinPageEditHandler(BaseHandler):
# @authenticated
# def get(self, pid):
# form = PageAddForm(self)
# current = self.db.query(Post).get(pid)
# #form.parent.query = self.db.query(Page).filter(Page.status=='enabled',Page.parent==0,Page.id!=current.id).order_by(Page.title)
# form.title.process_data(current.title)
# form.description.process_data(current.content)
# form.order.process_data(current.order)
# form.parent.process_data(self.db.query(Post).get(current.parent))
# form.parent.query = self.db.query(Post).filter(Post.status=='enabled').filter(Post.parent==0).filter(Post.id!=current.id).filter(Post.type=='page').order_by(Post.title)
# return self.render('admin/page_edit.html', form=form, current=current)
# @authenticated
# def post(self, pid):
# form = PageAddForm(self)
# current = self.db.query(Post).get(pid)
# form.parent.query = self.db.query(Post).filter(Post.status=='enabled').filter(Post.parent==0).filter(Post.id!=current.id).filter(Post.type=='page').order_by(Post.title)
# if form.validate():
# current.title = form.title.data
# current.content = form.description.data
# current.parent = form.parent.data.id if form.parent.data else 0
# current.order = form.order.data
# self.db.commit()
# return self.redirect('/admin/page/list')
# return self.render("admin/page_edit.html",form=form, current=current)
#
class AdminPostListHandler(BaseHandler):
@authenticated
def get(self):
posts = self.db.query(Post).filter_by(status='publish', type='post').order_by(Post.date.desc()).all()
return self.render('admin/post_list.html', posts=posts)
@authenticated
def post(self):
pass
class AdminPostAddHandler(BaseHandler):
@authenticated
def get(self):
form = PostAddForm(self)
form.date.process_data(datetime.date.today())
form.parent.query = self.db.query(Post).filter_by(status='publish', type='post').order_by(Post.title)
return self.render('admin/post_add.html', form=form, setting=self.options)
@authenticated
def post(self):
form = PostAddForm(self)
form.parent.query = self.db.query(Post).filter_by(status='publish', type='post').order_by(Post.title)
if form.validate():
title = form.title.data
datetime_now = get_datetime_from_date_now(form.date.data)
content = form.content.data
parent = form.parent.data.id if form.parent.data else 0
new_tags = []
try:
new_tags = self.request.arguments['new_tags'][0].split(',')
except:
pass
post_categories = self.request.arguments['post_category[]']
post = Post(title=title,
author=self.current_user.id,
date=datetime_now,
date_gmt=datetime_now,
content=content,
excerpt=content[0:8],
modified=datetime_now,
modified_gmt=datetime_now,
to_ping=u'',
pinged=u'',
content_filtered=u'',
parent=parent,
status='publish',
type='post'
)
self.db.add(post)
self.db.commit()
self.bind_tags(new_tags, post.id)
self.bind_category(post_categories, post.id)
return self.redirect('/admin/post/list')
return self.render("admin/post_add.html", form=form, setting=self.options)
def bind_tags(self, tags, post_id):
# skip null string
tags = [c for c in tags if len(c) > 0]
for term in tags:
termid = 0
termname = term.strip()
termslug = termname.lower()
# check if this tag exists.
tmpc = self.db.query(Term).filter(Term.slug == termslug).filter(Term.term_taxonomy_id == 'post_tag').first()
if tmpc == None:
# new a tag
tag = Term(name=termname, slug=termslug, parent=0, taxonomy='post_tag', count=0)
self.db.add(tag)
self.db.commit()
termid = tag.id
else:
termid = tmpc.id
# bind tag with post
tr = TermRelationship(post_id=post_id, term_id=termid, term_order=0)
self.db.add(tr)
tag = self.db.query(Term).get(termid)
tag.count = tag.count + 1
self.db.commit()
def bind_category(self, categorys, post_id):
if len(categorys) > 1:
try:
categorys.remove(self.options['default_category'])
except:
pass
for c in categorys:
# bind tag with post
tr = TermRelationship(post_id=post_id, term_id=int(c), term_order=0)
self.db.add(tr)
category = self.db.query(Term).get(int(c))
category.count = category.count + 1
self.db.commit()
#
# class AmdinPostDeleteHandler(BaseHandler):
# @authenticated
# def get(self, pid):
# self.db.delete(self.db.query(Post).get(pid))
# terms = self.db.query(Term).join(Term_Relationship,Term.id==Term_Relationship.term_id).filter(Term_Relationship.post_id==pid).all()
# for term in terms:
# term.count = term.count -1
#
# self.db.query(Term_Relationship).filter_by(post_id=pid).delete()
# self.db.commit()
# return self.redirect('/admin/post/list')
# @authenticated
# def post(self, pid):
# pass
#
# class AmdinPostEditHandler(BaseHandler):
# @authenticated
# def get(self, pid):
# form = PostAddForm(self)
# current = self.db.query(Post).get(pid)
# current_terms = self.db.query(Term).join(Term_Relationship,Term.id==Term_Relationship.term_id).filter(Term_Relationship.post_id==pid).distinct().all()
# current_tags = [term.name for term in current_terms if term.taxonomy=='post_tag']
# current_tags_str = ",".join(current_tags)
# current_categorys = [str(term.id) for term in current_terms if term.taxonomy=='category']
# current_categorys_str = ",".join(current_categorys)
# form.title.process_data(current.title)
# form.content.process_data(current.content)
# form.date.process_data(current.date.date())
# form.parent.process_data(self.db.query(Post).get(current.parent))
# form.parent.query = self.db.query(Post).filter(Post.status=='enabled').filter(Post.type=='page').order_by(Post.title)
# return self.render('admin/post_edit.html', form=form, current=current, current_tags=current_tags_str, current_categorys=current_categorys_str, setting=self.options)
# @authenticated
# def post(self, pid):
# form = PostAddForm(self)
# current = self.db.query(Post).get(pid)
# current_terms = self.db.query(Term).join(Term_Relationship,Term.id==Term_Relationship.term_id).filter(Term_Relationship.post_id==pid).distinct().all()
# current_tags = [term.name for term in current_terms if term.taxonomy=='post_tag']
# current_tags_str = ",".join(current_tags)
# current_categorys = [str(term.id) for term in current_terms if term.taxonomy=='category']
# current_categorys_str = ",".join(current_categorys)
# form.parent.query = self.db.query(Post).filter(Post.status=='enabled').filter(Post.type=='page').order_by(Post.title)
# if form.validate():
# title = form.title.data
# content = form.content.data
# parent = form.parent.data.id if form.parent.data else 0
# new_tags =[]
# try:
# new_tags = self.request.arguments['new_tags'][0].split(',')
# except:
# pass
# post_categorys = self.request.arguments['post_category[]']
# # update the tags and categorys relationship
# self.bindHander_tags(new_tags, current.id, current_tags)
# self.bindHander_categorys(post_categorys, current.id, current_categorys)
# # update the current Post
# current.title = title
# current.content = content
# current.parent = parent
# current.author = self.current_user.id
# if current.date.date() != form.date.data:
# current.date = GetDatetimeFromDatenow(form.date.data,current.date)
# self.db.commit()
# return self.redirect('/admin/post/list')
# return self.render('admin/post_edit.html', form=form, current=current, current_tags=current_tags_str, current_categorys=current_categorys_str, setting=self.options)
# def bindHander_tags(self, tags, post_id, current_tags):
# current_tag_slugs = [c.strip().lower() for c in current_tags]
# new_tag_slugs = [c.strip().lower() for c in tags]
# # skip null string
# new_tag_slugs = [c for c in new_tag_slugs if len(c) > 0]
# new_set = set(new_tag_slugs);
# old_set = set(current_tag_slugs);
# diff_set = new_set ^ old_set;
# for tag in diff_set:
# if tag in old_set: # remove relationship
# term = self.db.query(Term).filter(Term.slug == tag).filter(Term.taxonomy=='post_tag').first();
# self.db.query(Term_Relationship).filter_by(post_id=post_id, term_id=term.id).delete()
# term.count = term.count-1
# else: # bind, or new and then bind
# # check if it exists.
# tmpc = self.db.query(Term).filter(Term.slug == tag).filter(Term.taxonomy=='post_tag').first();
# if tmpc == None: # new a tag
# term_id = 0;
# # find the term_id with it's name
# index = new_tag_slugs.index(tag);
# tag = Term(name=tags[index], slug=tag, parent=0, taxonomy='post_tag', count=0)
# self.db.add(tag)
# self.db.commit()
# term_id = tag.id
# else:
# term_id = tmpc.id
# # bind tag with post
# tr = Term_Relationship(post_id=post_id, term_id=term_id, term_order=0)
# self.db.add(tr)
# # add count
# ctag = self.db.query(Term).get(term_id)
# ctag.count = ctag.count+1
# self.db.commit()
# def bindHander_categorys(self, categorys, post_id, current_categorys):
# if len(categorys) > 1:
# try:
# categorys.remove(self.options['default_category'])
# except:
# pass
# old_categorys = [int(c) for c in current_categorys]
# new_categorys = [int(c) for c in categorys]
# oldc = set(old_categorys);
# newc = set(new_categorys);
# diffc = oldc ^ newc;
# for term_id in diffc:
# category = self.db.query(Term).get(term_id)
# if term_id in oldc:#remove relationship
# self.db.query(Term_Relationship).filter_by(post_id=post_id, term_id=term_id).delete()
# category.count = category.count-1
# else:
# # bind tag with post
# tr = Term_Relationship(post_id=post_id, term_id=term_id, term_order=0)
# self.db.add(tr)
# category.count = category.count+1
# self.db.commit()
import json
import datetime
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
if type(data) is datetime.datetime:
data = data.strftime("%Y-%m-%d %H:%M:%S")
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
class CategoryListHandler(BaseHandler):
def get(self):
pass
@responseJson
def post(self):
categories = self.db.query(TermTaxonomy).filter_by(taxonomy='category').order_by(TermTaxonomy.term_id).all()
terms = self.db.query(Term).filter(Term.id.in_([cate.term_id for cate in categories])).all()
# from tornado.escape import json_encode
# ss = json_encode([term.as_dict() for term in terms])
clist = json.dumps(terms, cls=AlchemyEncoder).replace("</", "<\\/")
self.write(clist)
class AdminCategoryListHandler(BaseHandler):
@authenticated
def get(self):
form = TermAddForm(self)
form.parent.query = self.db.query(Term).filter_by(taxonomy='category', parent=0).order_by(Term.name)
categories = self.db.query(Term).filter_by(taxonomy='category').order_by(Term.name).all()
return self.render('admin/category.html', form=form, categorys=categories)
@authenticated
def post(self):
pass
#
# class AmdinCategoryAddHandler(BaseHandler):
# @authenticated
# def get(self):
# pass
# @authenticated
# def post(self):
# form = TermAddForm(self)
# form.parent.query = self.db.query(Term).filter_by(taxonomy='category',parent=0).order_by(Term.name)
# if form.validate():
# name = form.name.data
# desc = form.description.data
# parent = form.parent.data.id if form.parent.data else 0
# page = Term(name=name.strip(), slug=name.strip().lower(),description=desc, parent=parent, taxonomy='category', count=0)
# self.db.add(page)
# self.db.commit()
# return self.redirect('/admin/category/list')
# categorys = self.db.query(Term).filter_by(taxonomy='category').order_by(Term.name).all()
# return self.render("admin/category.html", form=form, categorys=categorys)
#
# class AmdinCategoryQuickAddHandler(BaseHandler):
# @authenticated
# def get(self):
# pass
# @authenticated
# def post(self):
# name = self.request.arguments['name'][0]
# parents=self.request.arguments['parent'][0].split(",")
# if len(name)>0 and len(parents) == 1:
# tmpc = self.db.query(Term).filter(Term.slug == name.strip().lower()).filter(Term.taxonomy=='category').all();
# if len(tmpc) != 0:
# self.write("error")
# return None;
# parent = int(parents[0])
# is_allow_insert = False
# if parent == 0:
# is_allow_insert = True
# else:
# parent_term = self.db.query(Term).get(parent)
# # check if it already exist. and its parent is fist stage
# if parent_term and parent_term.parent == 0:
# is_allow_insert = True
# if is_allow_insert:
# category = Term(name=name, slug=name.strip().lower(), parent=int(parent), taxonomy='category', count=0)
# self.db.add(category)
# self.db.commit()
# else:
# self.write("Name is null, Or it only have one parent category!")
#
# class AmdinCategoryDeleteHandler(BaseHandler):
# @authenticated
# def get(self, cid):
# self.db.query(Term_Relationship).filter_by(term_id=cid).delete()
# self.db.delete(self.db.query(Term).get(cid))
# return self.redirect('/admin/category/list')
# @authenticated
# def post(self, cid):
# pass
#
# class AmdinCategoryEditHandler(BaseHandler):
# @authenticated
# def get(self, cid):
# form = TermAddForm(self)
# current = self.db.query(Term).get(cid)
# form.parent.query = self.db.query(Term).filter(Term.taxonomy=='category').filter(Term.parent==0).filter(Term.id!=current.id).order_by(Term.name)
# form.name.process_data(current.name)
# form.description.process_data(current.description)
# form.parent.process_data(current.parent)
# return self.render('admin/category_edit.html', form=form, current=current)
# @authenticated
# def post(self, cid):
# form = TermAddForm(self)
# current = self.db.query(Term).get(cid)
# form.parent.query = self.db.query(Term).filter(Term.taxonomy=='category').filter(Term.parent==0).filter(Term.id!=current.id).order_by(Term.name)
# if form.validate():
# current.name = form.name.data.strip()
# current.slug = current.name.strip().lower()
# current.description = form.description.data
# current.parent = form.parent.data.id if form.parent.data else 0
# self.db.commit()
# return self.redirect('/admin/category/list')
# return self.render("admin/category_edit.html",form=form, current=current) | [
"leopallas@gmail.com"
] | leopallas@gmail.com |
697a14ba16fec12bc6822c838c5c9307b462870a | 4d7f743f871860e64f7e1e057b32c8af76fe98ff | /nmtlab/utils/vocab.py | 893b2a2fcfcb1d9146383a199150e541bf465ee5 | [
"MIT"
] | permissive | MarkWuNLP/nmtlab | 8a822c7d2385f885509b9b3e5d039b8fc38562ad | da9c28126336528fc6b85f2d424632ad227a3682 | refs/heads/master | 2022-02-21T14:05:10.523962 | 2019-10-05T08:32:21 | 2019-10-05T08:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torchtext.vocab
import pickle
from collections import Counter, defaultdict
DEFAULT_SPECIAL_TOKENS = ["<null>", "<s>", "</s>", "UNK"]
class Vocab(torchtext.vocab.Vocab):
def __init__(self, path=None, unk_token="UNK", picklable=False):
self._unk_token = unk_token
self.itos = []
if picklable:
self.stoi = {}
else:
self.stoi = defaultdict(lambda: 3)
if path:
self.load(path)
def size(self):
return len(self.itos)
def initialize(self, special_tokens=None):
if special_tokens is None:
special_tokens = DEFAULT_SPECIAL_TOKENS
self.itos = special_tokens
self._build_vocab_map()
def build(self, txt_path, limit=None, special_tokens=None, char_level=False, field=None, delim="\t"):
vocab_counter = Counter()
for line in open(txt_path):
line = line.strip()
if field is not None:
line = line.split(delim)[field]
if char_level:
words = [w.encode("utf-8") for w in line.decode("utf-8")]
else:
words = line.split(" ")
vocab_counter.update(words)
if special_tokens is None:
special_tokens = DEFAULT_SPECIAL_TOKENS
if limit is not None:
final_items = vocab_counter.most_common()[:limit - len(special_tokens)]
else:
final_items = vocab_counter.most_common()
final_items.sort(key=lambda x: (-x[1], x[0]))
final_words = [x[0] for x in final_items]
self.itos = special_tokens + final_words
self._build_vocab_map()
def set_vocab(self, unique_tokens, special_tokens=True):
if special_tokens:
self.itos = DEFAULT_SPECIAL_TOKENS + unique_tokens
else:
self.itos = unique_tokens
self._build_vocab_map()
def add(self, token):
if token not in self.stoi:
self.itos.append(token)
self.stoi[token] = self.itos.index(token)
def save(self, path):
pickle.dump(self.itos, open(path, "wb"))
def load(self, path):
with open(path, "rb") as f:
self.itos = pickle.load(f, encoding='utf-8')
self._build_vocab_map()
def _build_vocab_map(self):
self.stoi.update({tok: i for i, tok in enumerate(self.itos)})
def encode(self, tokens):
return list(map(self.encode_token, tokens))
def encode_token(self, token):
if token in self.stoi:
return self.stoi[token]
else:
return self.stoi[self._unk_token]
def decode(self, indexes):
return list(map(self.decode_token, indexes))
def decode_token(self, index):
return self.itos[index] if index < len(self.itos) else self._unk_token
def contains(self, token):
return token in self.stoi
def get_list(self):
return self.itos
| [
"raphael@uaca.com"
] | raphael@uaca.com |
e2666f46d0fa645869fb18bacd4d1c27777b444b | 1c2a0411f9cd24ad3076a7c85baeb7d72cd91c6f | /app_meta/admin.py | 680fe8ae84b8d8946d1f698f52786b3a719551e4 | [] | no_license | eshandas/django_scrapy_project_template | a98b75f5da5a5eeba16fd51be0347ad28432ce79 | 5eb2a2787727569e03f24a9f4bd75add37ddf9bb | refs/heads/master | 2021-01-19T13:06:18.712611 | 2017-04-29T15:48:08 | 2017-04-29T15:48:08 | 88,064,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.contrib import admin
from .models import (
KeyValueSetting,
)
admin.site.register(KeyValueSetting)
| [
"eshandasnit@gmail.com"
] | eshandasnit@gmail.com |
0ff47b51128d4c3f179c4ff101481282d1461151 | 47fabc7be3769cb1d2d17369efe2048818158477 | /test/test_multinomial_splitting.py | fb9f8105646ef967184adf4470dbd210056f4169 | [
"Apache-2.0"
] | permissive | jpeyhardi/GLM | 35ae651c4aa9771fec63b7c151858e0555a80c07 | 6f0fd763aec2a0ccdef3901b71ed990f20119510 | refs/heads/master | 2021-09-26T08:50:08.938073 | 2018-10-28T13:22:24 | 2018-10-28T13:22:24 | 125,999,551 | 0 | 0 | Apache-2.0 | 2018-03-20T10:21:13 | 2018-03-20T10:21:09 | C++ | UTF-8 | Python | false | false | 1,223 | py | from statiskit import (linalg,
core,
glm)
from statiskit.data import glm as data
import unittest
from nose.plugins.attrib import attr
import math
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestMultinomialSplittingRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Test Multinomial splitting regression construction"""
cls._data = data.load('KN03')
# def test_Fisher_estimation(self):
# """Test negative binomial regression Fisher estimation"""
# data = self._data.extract(explanatories=range(len(self._data.components) - 1),
# response=-1)
# fe = glm.negative_binomial_estimation(algo='Fisher',
# data=data,
# kappa=1.274892646)
# self.assertAlmostEqual(2 * fe.estimated.loglikelihood(data), -1093.15, places=2)
# # self.assertAlmostEqual(2 * fe.estimated.loglikelihood(data), -1093.61, places=2)
@classmethod
def tearDownClass(cls):
"""Test Negative Binomial regression deletion"""
del cls._data | [
"pfernique@gmail.com"
] | pfernique@gmail.com |
cf661184d8a46c7e786c11af458683f5b375343b | 8eed105bdcadc62826dda87ded2f0c2c3a6036aa | /3.py | 9514e2328296e7d83aed485a99d6eba4ac917b21 | [] | no_license | tatti2501/lesson1 | 1700b7a2a7818cf58ea624d05a4db9332a3d66b3 | cf61a28325ea774035e5b8aec9c3a32cfaf00d1b | refs/heads/master | 2020-05-24T17:00:33.707474 | 2019-05-18T15:02:25 | 2019-05-18T15:02:25 | 187,371,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | name=input('Введите ваше имя: ')
print(f'Привет, {name}! Как дела?') | [
"twinki2525@mail.ru"
] | twinki2525@mail.ru |
4703b8150ce9bee2ac6c320fc6e7a787b634d0f3 | 463721581c8454b594be74c454da13cbfb422bf8 | /neuralnet.py | 3fd839e16b87f10e19b06fac0121a0e77ec40288 | [] | no_license | mohdfawad99/first_neural_network | a080546c61bbb11c4685323874ac44ea7e433a53 | b65e80eadaab322c24015cdd8b04d6b167d4a0c3 | refs/heads/master | 2022-09-01T19:33:04.784770 | 2020-05-20T18:35:01 | 2020-05-20T18:35:01 | 265,653,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,420 | py | import numpy as np
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below
# Implement the backproagation function below
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
''' Implement forward pass here
Arguments
---------
X: features batch
'''
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs, hidden_outputs
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
''' Implement backpropagation
Arguments
---------
final_outputs: output from forward pass
y: target (i.e. label) batch
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
'''
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error,self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
return delta_weights_i_h, delta_weights_h_o
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
''' Update weights on gradient descent step
Arguments
---------
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
n_records: number of records
'''
self.weights_hidden_to_output += (self.lr * delta_weights_h_o) / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += (self.lr * delta_weights_i_h) / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
#########################################################
# Set your hyperparameters here
##########################################################
iterations = 5500
learning_rate = 0.6
hidden_nodes = 20
output_nodes = 1
| [
"noreply@github.com"
] | mohdfawad99.noreply@github.com |
cdb15796c9920683d73091825b00f99644a7dc2d | 9c2309bc4a948188c7967f1510581ea66b19350f | /solutions/algo-expert/hard/find-range.py | ddfabf56d74f7ee2dd047efe3887dae51e2f1598 | [] | no_license | QimanWang/interview-prep | 6ca88c209a96c2e25169407d5e9f1b0a096b0fa8 | afcc958bd6076c786dcfe7a01a92ee7005224519 | refs/heads/master | 2020-03-11T04:29:16.940769 | 2020-03-04T17:32:34 | 2020-03-04T17:32:34 | 129,777,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | '''
some kind of bst steps
'''
def searchForRange(array, target):
# final result
final_range = [-1,-1]
alteredBst(array,target,0,len(array)-1, final_range,True)
alteredBst(array,target,0,len(array)-1, final_range,False)
return final_range
def alteredBst(ary,tgt,l,r,finalRange,goLeft):
# iteratively traversing the ary
while l <= r:
m = (l + r)//2
# if smaller than tgt, we need to go right
if ary[m]< tgt:
l = m+1
# if bigger than tgt, we need to go left
elif ary[m]> tgt:
r = m-1
else:
if goLeft:
if m == 0 or ary[m-1] != tgt:
finalRange[0] = m
return
else:
r=m-1
else:
if m == len(ary)-1 or ary[m+1] != tgt:
finalRange[1] = m
return
else:
l = m+1
# return finalRange
ary = [0,1,21,33,45,45,45,45,45,45,61,71,71]
tgt = 45
print(searchForRange(ary,tgt))
| [
"qimanwang@northwesternmutual.com"
] | qimanwang@northwesternmutual.com |
3e1c5077ecb243ee0421f0cc303f389c21b8d623 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqpt/fanstats1d.py | ca82eee313ce110542aa85b5984e92415bd750ea | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 18,404 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FanStats1d(Mo):
"""
A class that represents the most current statistics for fan in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.eqpt.FanStats1d", "fan")
counter = CounterMeta("pwm", CounterCategory.GAUGE, "pwm", "pulse width modulation")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "pwmLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pwmMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pwmMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pwmAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pwmSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "pwmTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pwmThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pwmTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pwmTr"
meta._counters.append(counter)
counter = CounterMeta("speed", CounterCategory.GAUGE, "rpm", "speed")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "speedLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "speedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "speedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "speedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "speedSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "speedTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "speedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "speedTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "speedTr"
meta._counters.append(counter)
meta.moClassName = "eqptFanStats1d"
meta.rnFormat = "CDeqptFanStats1d"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current fan stats in 1 day"
meta.writeAccessMask = 0x80080000000001
meta.readAccessMask = 0x80080000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqpt.Fan")
meta.superClasses.add("cobra.model.eqpt.FanStats")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.rnPrefixes = [
('CDeqptFanStats1d', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pwmAvg", "pwmAvg", 8243, PropCategory.IMPLICIT_AVG)
prop.label = "pulse width modulation average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmAvg", prop)
prop = PropMeta("str", "pwmLast", "pwmLast", 8240, PropCategory.IMPLICIT_LASTREADING)
prop.label = "pulse width modulation current value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmLast", prop)
prop = PropMeta("str", "pwmMax", "pwmMax", 8242, PropCategory.IMPLICIT_MAX)
prop.label = "pulse width modulation maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmMax", prop)
prop = PropMeta("str", "pwmMin", "pwmMin", 8241, PropCategory.IMPLICIT_MIN)
prop.label = "pulse width modulation minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmMin", prop)
prop = PropMeta("str", "pwmSpct", "pwmSpct", 8244, PropCategory.IMPLICIT_SUSPECT)
prop.label = "pulse width modulation suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmSpct", prop)
prop = PropMeta("str", "pwmThr", "pwmThr", 8246, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "pulse width modulation thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pwmThr", prop)
prop = PropMeta("str", "pwmTr", "pwmTr", 8248, PropCategory.IMPLICIT_TREND)
prop.label = "pulse width modulation trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmTr", prop)
prop = PropMeta("str", "pwmTrBase", "pwmTrBase", 8247, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "pulse width modulation trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmTrBase", prop)
prop = PropMeta("str", "pwmTtl", "pwmTtl", 8245, PropCategory.IMPLICIT_TOTAL)
prop.label = "pulse width modulation total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmTtl", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "speedAvg", "speedAvg", 8264, PropCategory.IMPLICIT_AVG)
prop.label = "speed average value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedAvg", prop)
prop = PropMeta("str", "speedLast", "speedLast", 8261, PropCategory.IMPLICIT_LASTREADING)
prop.label = "speed current value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedLast", prop)
prop = PropMeta("str", "speedMax", "speedMax", 8263, PropCategory.IMPLICIT_MAX)
prop.label = "speed maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedMax", prop)
prop = PropMeta("str", "speedMin", "speedMin", 8262, PropCategory.IMPLICIT_MIN)
prop.label = "speed minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedMin", prop)
prop = PropMeta("str", "speedSpct", "speedSpct", 8265, PropCategory.IMPLICIT_SUSPECT)
prop.label = "speed suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("speedSpct", prop)
prop = PropMeta("str", "speedThr", "speedThr", 8267, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "speed thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("speedThr", prop)
prop = PropMeta("str", "speedTr", "speedTr", 8269, PropCategory.IMPLICIT_TREND)
prop.label = "speed trend"
prop.isOper = True
prop.isStats = True
meta.props.add("speedTr", prop)
prop = PropMeta("str", "speedTrBase", "speedTrBase", 8268, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "speed trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("speedTrBase", prop)
prop = PropMeta("str", "speedTtl", "speedTtl", 8266, PropCategory.IMPLICIT_TOTAL)
prop.label = "speed total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("speedTtl", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptSlotToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
26abe393261a86288211f6bc9fd241563a9b60ce | 119a85a388fe436361530fbb47932e704d749557 | /PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/util/signature.py | dbd8a3fbf7c6e8f609c9564bba8de27bd211ea3d | [
"Python-2.0"
] | permissive | chrisrgunn/cs156project | 014d5b05c6bf0e08ab8bd0dea525057d0e65b9a7 | e5414a37f9793c8b0674695b948482b559b18ea6 | refs/heads/master | 2021-01-19T14:09:49.046539 | 2017-05-24T02:10:29 | 2017-05-24T02:10:29 | 88,128,762 | 0 | 2 | null | 2017-05-04T23:49:09 | 2017-04-13T05:36:10 | Python | UTF-8 | Python | false | false | 1,270 | py | """Crude introspection of call signatures"""
import protocols; from protocols import adapt, Interface
from inspect import getargspec
from types import FunctionType, MethodType
__all__ = 'ISignature', 'getPositionalArgs'
class ISignature(Interface):
# XXX There should be a lot more here than this...
def getPositionalArgs():
"""Return a sequence of positional argument names"""
def getCallable():
"""Return the callable object"""
class FunctionAsSignature(protocols.Adapter):
protocols.advise(
instancesProvide=[ISignature],
asAdapterForTypes=[FunctionType]
)
def getPositionalArgs(self):
return getargspec(self.subject)[0]
def getCallable(self):
return self.subject
class MethodAsSignature(FunctionAsSignature):
protocols.advise(
instancesProvide=[ISignature],
asAdapterForTypes=[MethodType]
)
def __init__(self, ob):
self.funcSig = adapt(ob.im_func, ISignature)
self.offset = ob.im_self is not None
self.subject = ob
def getPositionalArgs(self):
return self.funcSig.getPositionalArgs()[self.offset:]
def getPositionalArgs(ob):
return adapt(ob,ISignature).getPositionalArgs()
| [
"chrisrgunn@gmail.com"
] | chrisrgunn@gmail.com |
70abc95ff75e0c7d60d786de6f31f6b67a50d02a | fa9af03a1971b51876e5c7e620f7f0b04e544363 | /python/awesome-python3-webapp/www/test.py | b5a916a2f0f0683d091348fbe1a5dc8b008a8be3 | [
"MIT"
] | permissive | summyer/common-source | 853ff2af160ac7beed23f15d7e31572cf0676e25 | 47331c4b8c035cb21c2b90a693b7005cad7b268b | refs/heads/master | 2021-09-07T22:31:31.718564 | 2018-03-02T07:56:51 | 2018-03-02T07:56:51 | 118,387,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 21:02:02 2018
@author: Administrator
"""
import time ,uuid
class Student():
name='sxk'
def __init__(self):
pass
print(Student.name)
print(Student().name)
print ('%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex))
import orm
from models import User, Blog, Comment
import asyncio
async def test(loop):
await orm.create_pool(loop,user='root',password='123456',db='awesome')
u = User(name='Test', email='test@1sd23.com',passwd='123456',image="about:blank")
print(11)
await u.save()
loop = asyncio.get_event_loop()
loop.run_until_complete(test(loop))
#
# 记得查看model中属性和表属性间的映射关系
# 解决运行出现的bug
| [
"sunxiakun@163.com"
] | sunxiakun@163.com |
8b208fa42ddbdffc9d002a42666e45e8d5fedcc3 | 4897acf2134ff1df8b577915b679739367fe1aad | /migrations/versions/2d9e1ec59a5c_initial_migration.py | 7aaef51d1e06c57ccd539cca0aa348af9f2870dc | [
"MIT"
] | permissive | KageniJK/blog | 5dc00e8d3dd437c4ef57020f5c01aacc0cb29685 | 6ee3557bbad53198efbab1ccd02a82f3fa3b9864 | refs/heads/master | 2020-03-28T16:52:53.258812 | 2018-09-17T02:28:23 | 2018-09-17T02:28:23 | 148,737,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | """initial migration
Revision ID: 2d9e1ec59a5c
Revises:
Create Date: 2018-09-16 19:23:32.451802
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d9e1ec59a5c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('avatar', sa.String(), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('password_hashed', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('post', sa.Text(), nullable=True),
sa.Column('date_posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('posts')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
| [
"james.kageni@gmail.com"
] | james.kageni@gmail.com |
0f5a67f833f3ce942196efe83b16f4f42942dccb | 20a715529a3ce4534544ca5f798cce792cff50a6 | /scripts/cores.py | e7664f6bbeb9326ff380c6936c4cc73f7425d85b | [
"MIT"
] | permissive | budes/2048.py | 3512b37028d119bdbdd8ee6cccdf6afc53573f49 | 6f5c5525c52adb21b5caebd68aa87ea5aa3f0510 | refs/heads/main | 2023-07-08T04:23:33.171346 | 2021-08-02T23:37:03 | 2021-08-02T23:37:03 | 392,122,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | class Cores():
def __init__(self, valor):
if type(valor) == str:
valor = int(valor)
self.valor = valor
# Todas as cores ANSI que achei
self.cores = [cor for cor in range(31, 37)] + [cor for cor in range(91, 97)]
self.DeterminaCor()
def DeterminaCor(self):
cont = 0
aux = self.valor
# Procura saber sua potencia de 2
while aux != 1:
aux //= 2
cont += 1
if cont > len(self.cores):
cont %= len(self.cores)
return f'\033[1;{self.cores[cont]}m {self.valor} \033[m'
if __name__ == '__main__':
import colorama
colorama.init()
inst = Cores(131072)
print(inst.DeterminaCor())
| [
"noreply@github.com"
] | budes.noreply@github.com |
055680806c6f2f1e7c1a86f9b17a5faee994923a | c77e9b3e5255db04a2d4a23b3b4d04b1e805d86d | /tests/test_io.py | 0b431b24afdb2e1d6a122cfb15704b751d539b6a | [
"Apache-2.0"
] | permissive | flolas/giraffez | 8cfb910386ad5a693f4ad6074e9eae4df59623f5 | e07b34907b1587248bc175b852a28ae0a9667425 | refs/heads/master | 2020-05-22T11:41:58.723083 | 2017-02-28T13:29:08 | 2017-02-28T13:29:08 | 84,695,437 | 0 | 1 | null | 2017-03-12T02:56:09 | 2017-03-12T02:56:09 | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
import os
import pytest
import giraffez
@pytest.mark.usefixtures('tmpfiles')
class TestInputOutput(object):
def test_writer(self, tmpfiles):
with giraffez.Writer(tmpfiles.output_file) as f:
f.write("value1|value2|value3\n")
def test_writer_gzip(self, tmpfiles):
with giraffez.Writer(tmpfiles.output_file, archive=True, use_gzip=True) as f:
f.write(b"value1|value2|value3\n")
| [
"christopher.marshall@capitalone.com"
] | christopher.marshall@capitalone.com |
4cf2a8b84c3cdd0ebae529ac5397255b44f2e9ee | 5f2103b1083b088aed3f3be145d01a770465c762 | /406. Queue Reconstruction by Height.py | 54dbb0fb8a1dbe5530f49b27d210c81d690d7a0e | [] | no_license | supersj/LeetCode | 5605c9bcb5ddcaa83625de2ad9e06c3485220019 | 690adf05774a1c500d6c9160223dab7bcc38ccc1 | refs/heads/master | 2021-01-17T17:23:39.585738 | 2017-02-27T15:08:42 | 2017-02-27T15:08:42 | 65,526,089 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | from operator import itemgetter
# todo insert order thinking
class Solution1(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = itemgetter(1,0))
result = []
start = 0
for ele in people:
if ele[1] == 0:
result.append(ele)
start += 1
else:
break
_last = start
_lastlevel = 0
for i in range(start,len(people)):
cnt = people[i][1]
if cnt != _lastlevel:
_last = 0
_lastlevel = cnt
_index = 0
for num in result:
if cnt == 0:
break
if num[0] >= people[i][0]:
cnt -= 1
_index += 1
_last = max(_last+1,_index)
result.insert(_last,people[i])
return result
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda x: x[1])
people.sort(key = lambda x: x[0],reverse= True)
result = []
print(people)
for ele in people:
result.insert(ele[1],ele)
return result
p = [[8,2],[4,2],[4,5],[2,0],[7,2],[1,4],[9,1],[3,1],[9,0],[1,0]]
hh = Solution()
hh.reconstructQueue(p) | [
"ml@ml.ml"
] | ml@ml.ml |
c7ded4c2a8ded00f26177d9fbb4563a25996e336 | c15db704dd7973d634ffd4bee81e9a6e173af9b5 | /venv/Scripts/easy_install-3.7-script.py | 8f060131245de7cc3ac16529b96c6e62732ab053 | [] | no_license | guowenzhuang/stats-gov | d4e780fa0d1b42141aca47e91121a7adce8da968 | 777b38b4a1d4bcd26b5ce5f75978765499c2410c | refs/heads/master | 2020-07-04T09:28:30.518799 | 2019-08-14T00:14:20 | 2019-08-14T00:14:20 | 202,240,953 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!D:\PycharmProjects\tjsj\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"qq123456"
] | qq123456 |
a52de2fb1424ea5d399877b53b6f048575a62dee | c7a1c1ae40e9d95dfb92251dcfbf3c5010e6ba81 | /essentials-gpiozero/02-LEDBasics/ch2listing4.py | 182bec077a8cdc43f6efc791966c9e6f6b7cdd87 | [] | no_license | pranavlathigara/Raspberry-Pi-DIY-Projects | efd18e2e5b9b8369bb1a5f5418782480cf9bc729 | 0c14c316898d4d06015912ac4a8cb7b71a3980c0 | refs/heads/master | 2021-04-06T09:14:28.088223 | 2018-02-19T00:15:22 | 2018-02-19T00:15:22 | 124,649,553 | 1 | 2 | null | 2018-03-10T11:30:59 | 2018-03-10T11:30:59 | null | UTF-8 | Python | false | false | 953 | py | from gpiozero import TrafficHat
from time import sleep
th = TrafficHat()
try:
while True:
# Traffic light code
# First, turn the green LED on
th.lights.green.on()
print(“Press the button to stop the lights!”)
# Next, we want to wait until the button is pressed
while(th.button.is_pressed == False):
#While not pressed do nothing
pass
# Button has been pressed!
th.lights.green.off()
# Amber on for a couple of seconds
th.lights.amber.on()
sleep(2)
th.lights.amber.off()
# Turn the red on
th.lights.red.on()
# Buzz the buzzer 20 times with 0.1 second intervals
th.buzzer.blink(0.1,0.1,20,False)
sleep(1)
th.lights.red.off()
# Red off and blink amber 4 times with 0.5 second intervals
th.lights.amber.blink(0.5,0.5,4,False)
except KeyboardInterrupt:
exit() | [
"tdamdouni@users.noreply.github.com"
] | tdamdouni@users.noreply.github.com |
190f211715f297791f19884662e5515fbe8aa0c7 | f245c87e84335dbe21ca131edfeeb73657b73522 | /heroku_env/lib/python3.6/hmac.py | d891186d82f2eb3bd000526cb93d8517e52368f9 | [] | no_license | fornesarturo/heroku | f42e2f11a0fe3f56d60831e2f961485f26ccc6fc | 7db6e9d533e5598cfcdabe2d5b38bd04eb297019 | refs/heads/master | 2021-01-21T11:00:01.005049 | 2017-05-27T13:45:16 | 2017-05-27T13:45:16 | 91,718,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | /home/fornesarturo/anaconda3/lib/python3.6/hmac.py | [
"fornesarturo@gmail.com"
] | fornesarturo@gmail.com |
5e97ee335b85ed1562ba97122a365eef2a05f7ff | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py | b969be415dd9b367021b7757c9c76ce0e0c6acb4 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 15,153 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from typing import Any, Dict, List, Optional, Tuple
import dateparser
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_file_reputation(self, file: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/hashes/{file}'
)
def get_health(self) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/health'
)
def submit_file(self, files: Dict[str, Any], data: Dict[str, Any]) -> Dict[str, Any]:
return self._http_request(
method='POST',
url_suffix='/files',
files=files,
data=data
)
def submit_urls(self, data: Dict[str, Any]) -> Dict[str, Any]:
return self._http_request(
method='POST',
url_suffix='/urls',
files=data,
data=None
)
def get_report_url(self, report_id: str, expiration: int) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/presigned-url/{report_id}',
params={
'expiry': expiration
}
)
def report_status(self, report_id: str, extended: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/reports/{report_id}',
params={
'extended': extended
}
)
def report_artifact(self, report_id: str, artifact_type: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/artifacts/{report_id}',
params={
'type': artifact_type,
},
resp_type='content'
)
''' HELPER FUNCTIONS '''
def convert_to_demisto_severity(severity: str) -> int:
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': 1, # low severity
'Medium': 2, # medium severity
'High': 3, # high severity
'Critical': 4 # critical severity
}[severity]
def arg_to_int(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
raise ValueError(f'Invalid number: "{arg_name}"="{arg}"')
if isinstance(arg, int):
return arg
raise ValueError(f'Invalid number: "{arg_name}"')
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp is a str containing digits - we just convert it to int
return int(arg)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"')
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
#
client.get_health()
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def get_hashes_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
hashes = argToList(args.get('md5_hashes'))
if len(hashes) == 0:
raise ValueError('hash(es) not specified')
for hash in hashes:
if md5Regex.match(hash):
continue
raise Exception('Invalid hash. Only MD5 is supported.')
dbot_score_list: List[Dict[str, Any]] = []
file_standard_list: List[Dict[str, Any]] = []
file_data_list: List[Dict[str, Any]] = []
for hash in hashes:
file_data = client.get_file_reputation(hash)
file_data['MD5'] = file_data['md5']
del file_data['md5']
# demisto.results(file_data)
engines = file_data.get('engine_results', {})
for key in engines.keys():
if engines[key].get('sha256'):
file_data['SHA256'] = engines[key].get('sha256')
del engines[key]['sha256']
# If the outer `is_malicious` is set to True, assume the score should be bad
# Otherwise, default to unknown unless at least one engine has returned a verdict besides `not_found`
if file_data['is_malicious']:
score = 3 # bad
else:
score = 0 # unknown
for key in engines.keys():
verdict = engines[key].get('verdict', 'not_found')
if verdict != "not_found" and verdict != "malicious":
score = 1 # good
break
dbot_score = {
'Indicator': hash,
'Vendor': 'FireEye DoD',
'Type': 'file',
'Score': score
}
file_standard_context = {
'MD5': hash,
}
if score == 3:
# if score is bad must add DBotScore Vendor and Description
file_standard_context['Malicious'] = {
'Vendor': 'FireEye DoD'
}
filedata = {}
filedata['FireEyeDoD'] = file_data
filedata['MD5'] = file_data['MD5']
del filedata['FireEyeDoD']['MD5']
if file_data.get('SHA256'):
dbot_score_sha256 = {
'Indicator': file_data.get('SHA256'),
'Vendor': 'FireEye DoD',
'Type': 'file',
'Score': score
}
dbot_score_list.append(dbot_score_sha256)
filedata['SHA256'] = file_data['SHA256']
file_standard_context['SHA256'] = file_data['SHA256']
del filedata['FireEyeDoD']['SHA256']
file_standard_list.append(file_standard_context)
dbot_score_list.append(dbot_score)
file_data_list.append(filedata)
outputs = {
'DBotScore(val.Vendor == obj.Vendor && val.Indicator == obj.Indicator)': dbot_score_list,
outputPaths['file']: file_standard_list,
'File(val.MD5 == obj.MD5 || val.SHA256 == obj.SHA256)': file_data_list
}
readable_output = tableToMarkdown('FireEye DoD Results', file_standard_list, headers=["MD5", "SHA256", "Malicious"])
return (
readable_output,
outputs,
file_data_list
)
def generate_report_url(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
report_id = str(args.get('report_id'))
expiration = arg_to_int(arg=args.get('expiration'), arg_name='expiration', required=True)
if expiration:
if expiration < 1 or expiration > 8760:
raise ValueError('Expiration must be between 1 and 8760 hours.')
else:
raise ValueError('Expiration not specified or not a number.')
report = client.get_report_url(report_id=report_id, expiration=expiration)
presigned_report_url = report.get('presigned_report_url')
readable_output = f'Report {report_id} is available [here]({presigned_report_url})'
return (
readable_output,
{},
report
)
def submit_file_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
entry_id = demisto.args().get('entryID')
file_entry = demisto.getFilePath(entry_id) # .get('path')
file_name = file_entry['name']
file_path = file_entry['path']
files = {'file': (file_name, open(file_path, 'rb'))}
# Optional parameters to send along with the file
optional_params = ['password', 'param', 'screenshot', 'video', 'fileExtraction', 'memoryDump', 'pcap']
data = {}
for param in optional_params:
value = demisto.args().get(param)
if value:
data[param] = value
scan = client.submit_file(files=files, data=data)
scan['filename'] = file_name
del scan['status']
scan['overall_status'] = 'RUNNING'
report_id = scan.get('report_id')
readable_output = (
f'Started analysis of {file_name} with FireEye Detection on Demand.'
f'Results will be published to report id: {report_id}'
)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan
}
return (
readable_output,
outputs,
scan
)
def submit_urls_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
urls = argToList(args.get('urls'))
if len(urls) == 0:
raise ValueError('hash(es) not specified')
# Format the URLs into a string list, which the API understands
formatted_urls = "[" + ",".join(list(map(lambda url: url.replace(url, f'"{url}"'), urls))) + "]"
data = {'urls': formatted_urls}
scan = client.submit_urls(data=data)
del scan['status']
scan['overall_status'] = 'RUNNING'
report_id = scan.get('report_id')
readable_output = (
f'Started analysis of {urls} with FireEye Detection on Demand.'
f'Results will be published to report id: {report_id}'
)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan
}
return (
readable_output,
outputs,
scan
)
def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
report_id_list = argToList(args.get('report_ids', []))
extended = args.get('extended_report', "False")
screenshot = args.get('get_screenshot', "false")
artifact = args.get('get_artifact', "")
if len(report_id_list) == 0:
raise ValueError('report_id(s) not specified')
report_list: List[Dict[str, Any]] = []
for report_id in report_id_list:
report = client.report_status(report_id=report_id, extended=extended)
if screenshot.lower() == "true":
screenshot = client.report_artifact(report_id=report_id, artifact_type="screenshot")
stored_img = fileResult('screenshot.gif', screenshot)
demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'],
'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''})
if artifact != "":
artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact)
stored_artifacts = fileResult('artifacts.zip', artifacts)
demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'],
'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''})
report_list.append(report)
readable_output = tableToMarkdown('Scan status', report_list)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list
}
return (
readable_output,
outputs,
report_list
)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = demisto.params()['url']
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'feye-auth-key': f'{api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fireeye-dod-get-hashes':
return_outputs(*get_hashes_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-get-reports':
return_outputs(*get_reports_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-submit-file':
return_outputs(*submit_file_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-submit-urls':
return_outputs(*submit_urls_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-get-report-url':
return_outputs(*generate_report_url(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
raise e
# demisto.error(traceback.format_exc()) # print the traceback
# return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
f4977b940309c3db80b8b7122ffe5463cb13ab45 | 7a4173164c3f2a29f89e8daa22f20a9ddf50cc2a | /build/common_msgs-jade-devel/trajectory_msgs/cmake/trajectory_msgs-genmsg-context.py | e1e80e98ac47c4d2a1c9a331c514fcfb58ca9d60 | [] | no_license | temburuyk/y_ws | 10325d370de5a1da6bea88d71950261dfd8cadc3 | 2b2ec0e81bc31f5ed20c73bcd2faebef8a99b1d1 | refs/heads/master | 2021-01-20T08:30:27.648840 | 2017-05-10T19:20:06 | 2017-05-10T19:20:06 | 90,156,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/yashwant/y_ws/src/common_msgs-jade-devel/trajectory_msgs/msg/JointTrajectory.msg;/home/yashwant/y_ws/src/common_msgs-jade-devel/trajectory_msgs/msg/JointTrajectoryPoint.msg;/home/yashwant/y_ws/src/common_msgs-jade-devel/trajectory_msgs/msg/MultiDOFJointTrajectory.msg;/home/yashwant/y_ws/src/common_msgs-jade-devel/trajectory_msgs/msg/MultiDOFJointTrajectoryPoint.msg"
services_str = ""
pkg_name = "trajectory_msgs"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "trajectory_msgs;/home/yashwant/y_ws/src/common_msgs-jade-devel/trajectory_msgs/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg;geometry_msgs;/home/yashwant/y_ws/src/common_msgs-jade-devel/geometry_msgs/msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"temburuyk@gmail.com"
] | temburuyk@gmail.com |
044d41548c3993057f71bf76ba54f7ce14c448b4 | 51417e013df3269356674c5731cce307ce60de48 | /assistant.py | 2af5201137c161961488f8c8eebfee0c6561905a | [] | no_license | shawavisek35/WordMasterIronMan | a3455d265dd7a0d8e1d3c59bc2bd9ed187c2f526 | 3227c40add8c9f0175307a9e29b1c45a0bea0669 | refs/heads/master | 2021-07-17T08:04:15.035793 | 2020-10-01T02:54:41 | 2020-10-01T02:54:41 | 215,551,076 | 2 | 1 | null | 2020-10-01T02:54:42 | 2019-10-16T13:09:27 | Python | UTF-8 | Python | false | false | 3,355 | py | import pyttsx3
import datetime
import speech_recognition as sr
import random
import time
file = "game.txt"
game = open(file,'r')
games = (game.read()).split("\n")
engine = pyttsx3.init('sapi5')#Initializing the voice given by microsoft
voices = engine.getProperty('voices')#getting voice
engine.setProperty('voice',voices[0].id)#setting the male voice of david for use
engine.setProperty('rate',150)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def takeCommand():
#it takes audio from microphone and returns a string
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening.............")
#r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing..............")
query = r.recognize_google(audio, language="en-in")
print(f"you said.......{query}\n")
except Exception as e:
print(f"say that again please........{e}")
return "None"
return query
def wishMe():
hour = int(datetime.datetime.now().hour)
if(hour>=5 and hour<12):
speak("Good Morning")
elif(hour>=12 and hour<16):
speak("Good Afternoon")
else:
speak("Good Evening")
speak("Hello Sir I am Friday How can i help you!")
def gameEngine(l):
t = "yes"
score = 0
k=0
while((t!="no") and k<len(l)):
l2 = []
b = str(l[k])
k=k+1
c = list(b)
d = random.randint(3,4)
for i in range(d):
h = random.randint(0,len(c)-1)
if(h in l2):
i = i-1
else:
l2.append(h)
i = i+1
speak("The word is......")
print(f"Word number {k} : ",end=" ")
for j in range(len(c)):
if(j in l2):
print("__",end = " ")
else:
print(c[j],end = " ")
print("Guess the correct word : ")
start = time.time()
f = takeCommand()
end = time.time()
s = end-start
if(s<=20):
if(f.lower()==b):
score = score +10
print(f":) Correct Guess! Score : {score}")
speak("Wohoo! Correct Guess......")
speak(f"Your score is {score}")
else:
score = score -5
print(f":( Wrong Guess! Score : {score}")
speak(f"Ahhhhh! Wrong Guess ")
speak(f"Your score is {score}")
else:
print("Sorry You missed it the time is over.........")
speak("Sorry you are out of time try another question")
score = score-5
speak("Would you like to continue")
t = takeCommand()
print('-'*15,"Game Over",'-'*15,'\n')
return score
if __name__ == '__main__':
wishMe()
while True:
query = takeCommand()
if (("game" in query) or ('play' in query)):
speak("Welcome to the game of word master")
print('*'*15,"||Welcome to Word Master ||",'*'*15,'\n')
run = gameEngine(games)
print(f"Your Final score is : {run}\n")
print("*"*15,"Thanks For Playing","*"*15)
speak(f"Your Final score is : {run}\n")
speak("Thanks For Playing")
| [
"shawavisek35@gmail.com"
] | shawavisek35@gmail.com |
7d41ee9b36e2d17373fbc0b8ffe80d82ccb59f6b | 0dd0f1944ffbaf12d5abe130e2d7d6d33b3ffb2a | /exercises/chapter1/exercise13.py | 82f3fba8be84a7b198cab155348e3a80e3c8b997 | [] | no_license | faroeq33/pythonworkshop | dbba888c3e5a1e61486c5438d63b8c9778774bcf | 69372c32f0ed912c3a5d41761e86dfeff0bd56a0 | refs/heads/master | 2023-08-10T20:04:50.587766 | 2021-09-26T11:57:42 | 2021-09-26T11:57:42 | 410,543,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | over_18 = True
print(type(over_18))
over_21 = False
over_18, over_21 = True, False
print(over_18 and over_21)
print(over_18 or over_21)
print(not over_21 or (over_21 or over_18))
| [
"alves_1044@hotmail.com"
] | alves_1044@hotmail.com |
1f330a243eabf5b8c046f3eeffcee642a856d548 | 0937646b6ce9249a8d193987f308ce398dc28bd1 | /104API/104API.py | 5ec24357b53855d4c9189223fbb28b268e8829ff | [] | no_license | barry800414/JobTitleNLP | 98622d02b25b1418f28698f7d772c8de96642032 | b379c2052447e6483d17f5db51fb918b37ac7a52 | refs/heads/master | 2021-06-08T19:36:39.044757 | 2016-10-21T03:11:10 | 2016-10-21T03:11:10 | 66,043,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | #!/usr/bin/env python3
# invoke 104 API to get all 104 jobs
import sys
import requests
import json
from getCat import getL3ID
API_URL = "http://www.104.com.tw/i/apis/jobsearch.cfm"
def getJobsByCatID(catID, verbose=0):
jobs = dict()
payload = {
"cat": catID,
"role": 1,
"fmt": 8,
"cols": "J"
}
try:
r = requests.get(API_URL, params = payload)
if verbose >= 1:
print(r.url, r.status_code)
p = r.json()
nPage = int(p['TOTALPAGE'])
for i in range(0, nPage):
jobs.update(__getJobsByCatID(catID, i+1, verbose))
except Exception as e:
print(e, file=sys.stderr)
return jobs
def __getJobsByCatID(catID, page, verbose=0):
jobs = dict()
payload = {
"cat": catID,
"role": 1,
"fmt": 8,
"cols": "J,JOB,JOBCAT_DESCRIPT,NAME",
"page": page
}
try:
r = requests.get(API_URL, params = payload)
if verbose >= 2:
print(r.url, r.status_code)
p = r.json()
for d in p['data']:
cat = [c for c in d['JOBCAT_DESCRIPT'].split('@') if c != "類目"]
jobs[d['J']] = { "title": d['JOB'], "cat": cat, 'company_name': d['NAME'] }
except Exception as e:
print(e, file=sys.stderr)
return jobs
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage:', sys.argv[0], 'category outJsonFile', file=sys.stderr)
exit(-1)
with open(sys.argv[1], 'r') as f:
rawCat = json.load(f)
cat = getL3ID(rawCat)
# all job category ids
allJobs = dict()
for i, (catID, catName) in enumerate(cat.items()):
print('(%d/%d) Start crawling Category %s(%s):' % (i+1, len(cat), catName, catID), end='', flush=True)
jobs = getJobsByCatID(catID)
print('%d' % len(jobs), flush=True)
allJobs[catName] = jobs
with open(sys.argv[2], 'w') as f:
json.dump(allJobs, f, indent=1, ensure_ascii=False)
| [
"barry800414@gmail.com"
] | barry800414@gmail.com |
f8356305f3dfeea075f0d0d3eea9504e757a78b8 | 75d5d899f0091e65b4b89b0eed58bdc562cb0f96 | /recharging/apps.py | 2cab36f2047b18624a5a2ecee9d4184566484ecf | [] | no_license | sowmya-matsa/phonepe | 21568d55c6beb5ce9790bd28517831c5fb70b8f7 | bac69e5ae52f00f1bb933b517b4dc330575fd8a4 | refs/heads/main | 2023-05-06T09:20:34.743756 | 2021-06-01T12:22:25 | 2021-06-01T12:22:25 | 372,738,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.apps import AppConfig
class RechargingConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'recharging'
| [
"sowmya.matsa@gmail.com"
] | sowmya.matsa@gmail.com |
325c6d40ef1f149c2dca061c17b5f36867213530 | bce19f6720149c92ff055cdf57323db20c831626 | /Data_collections/list/list_comprehensin.py | 5245cf0ddf639cbc1206b772842a8cb73af282d1 | [] | no_license | ashinsukumaran/pythonprograms | f365ec242a8371aa27b57870d39c08a19fd3c701 | f79dbcf4d64a77c38f9e8b252035a36664b69972 | refs/heads/master | 2023-07-25T02:13:54.894314 | 2021-09-11T04:38:55 | 2021-09-11T04:38:55 | 402,320,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | # a=[1,3,5,7,9,8]
# b=[]
# for i in a:
# b.append([i*5])
# print(b)
numbers=[1,3,5,7,9,8]
s=[n*5 for n in numbers]
print(s)
| [
"ashinsukumaran@gmail.com"
] | ashinsukumaran@gmail.com |
832f446fb1e15a7288064696f0c6f7f0f007b296 | 78b5447907992b5c0d26ec1559efba2e42ae6341 | /python/pandas-1/test_main.py | a1f49cf8efa6318e36b732f83441cb7e8ecc5d4d | [] | no_license | rpedrodasilva10/codenation | 842d18ed51a636ed37298f4fb10869789d2a975b | 8fd190af44d3818c06b6c51efb8d3e9a1a7de986 | refs/heads/master | 2020-04-17T06:17:16.834420 | 2019-05-29T02:29:22 | 2019-05-29T02:29:22 | 166,318,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import sanity_checks as sc
import main as m
def test_0():
assert sc.part_0(m.part_0())
def test_1():
assert sc.part_1(m.part_1())
def test_2():
assert sc.part_2(m.part_2())
def test_3():
assert sc.part_3(m.part_3())
def test_4():
assert sc.part_4(m.part_4())
def test_5():
assert sc.part_5(m.part_5())
def test_6():
assert sc.part_6(m.part_6())
def test_7():
assert sc.part_7(m.part_7())
| [
"rpedrodasilva10@gmail.com"
] | rpedrodasilva10@gmail.com |
592db6f6d70a8bff481119c1ab133e7005c03980 | 8a318931d633306cf38262e4eb409d18d9a37965 | /INICIANTE/1018 - Cédulas.py | 6e0bca3087d9269211bc3e23e2df0e460c167ca6 | [] | no_license | Jhonathanalvesbr/URI | 1f8820a1461140ecae8ffb9fc061c85a53e8faf8 | 85648534c01b5ade3cb2e878fb4cd40badbf494b | refs/heads/master | 2020-03-11T14:36:11.935023 | 2018-07-26T19:35:52 | 2018-07-26T19:35:52 | 130,059,567 | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 1,581 | py | # -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
dinheiro = int(input())
if (dinheiro > 0) and (dinheiro < 1000000):
print(dinheiro)
if dinheiro >= 100:
x = int(dinheiro / 100)
dinheiro = dinheiro - (x*100)
print ("%d nota(s) de R$ 100,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 100,00" %x)
if dinheiro >= 50:
x = int(dinheiro / 50)
dinheiro = dinheiro - (x*50)
print ("%d nota(s) de R$ 50,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 50,00" %x)
if dinheiro >= 20:
x = int(dinheiro / 20)
dinheiro = dinheiro - (x*20)
print ("%d nota(s) de R$ 20,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 20,00" %x)
if dinheiro >= 10:
x = int(dinheiro / 10)
dinheiro = dinheiro - (x*10)
print ("%d nota(s) de R$ 10,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 10,00" %x)
if dinheiro >= 5:
x = int(dinheiro / 5)
dinheiro = dinheiro - (x*5)
print ("%d nota(s) de R$ 5,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 5,00" %x)
if dinheiro >= 2:
x = int(dinheiro / 2)
dinheiro = dinheiro - (x*2)
print ("%d nota(s) de R$ 2,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 2,00" %x)
if dinheiro >= 1:
x = int(dinheiro / 1)
print ("%d nota(s) de R$ 1,00" %x)
else:
x = 0
print ("%d nota(s) de R$ 1,00" %x)
| [
"noreply@github.com"
] | Jhonathanalvesbr.noreply@github.com |
a3ddfd87f910aeddaeb2fdccc180e2928ab42be7 | bc441bb06b8948288f110af63feda4e798f30225 | /object_store_sdk/model/notify/subscriber_pb2.py | 692c20775b418cfdc51ab0f6e6720297f4eb1271 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,376 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: subscriber.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_store_sdk.model.notify import subscribe_info_pb2 as object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='subscriber.proto',
package='notify',
syntax='proto3',
serialized_options=_b('Z@go.easyops.local/contracts/protorepo-models/easyops/model/notify'),
serialized_pb=_b('\n\x10subscriber.proto\x12\x06notify\x1a\x32object_store_sdk/model/notify/subscribe_info.proto\"\xab\x01\n\nSubscriber\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x61\x64min\x18\x02 \x01(\t\x12\x10\n\x08\x63\x61llback\x18\x03 \x01(\t\x12\x0f\n\x07\x65nsName\x18\x04 \x01(\t\x12\x0f\n\x07procNum\x18\x05 \x01(\x05\x12\x0f\n\x07msgType\x18\x06 \x01(\x05\x12\r\n\x05retry\x18\x07 \x01(\x05\x12,\n\rsubscribeInfo\x18\x08 \x03(\x0b\x32\x15.notify.SubscribeInfoBBZ@go.easyops.local/contracts/protorepo-models/easyops/model/notifyb\x06proto3')
,
dependencies=[object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2.DESCRIPTOR,])
_SUBSCRIBER = _descriptor.Descriptor(
name='Subscriber',
full_name='notify.Subscriber',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='notify.Subscriber.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='admin', full_name='notify.Subscriber.admin', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='callback', full_name='notify.Subscriber.callback', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ensName', full_name='notify.Subscriber.ensName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='procNum', full_name='notify.Subscriber.procNum', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msgType', full_name='notify.Subscriber.msgType', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retry', full_name='notify.Subscriber.retry', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribeInfo', full_name='notify.Subscriber.subscribeInfo', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=252,
)
_SUBSCRIBER.fields_by_name['subscribeInfo'].message_type = object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2._SUBSCRIBEINFO
DESCRIPTOR.message_types_by_name['Subscriber'] = _SUBSCRIBER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Subscriber = _reflection.GeneratedProtocolMessageType('Subscriber', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBER,
'__module__' : 'subscriber_pb2'
# @@protoc_insertion_point(class_scope:notify.Subscriber)
})
_sym_db.RegisterMessage(Subscriber)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
f9ff122fcd931f462d919729a4c829331046a291 | b33b3969566b0208f4037c0b9a4aab6ec729bfe7 | /code/pde/6d1.py | 14cc6f49759585147a60535fd237eff0d9cbef30 | [] | no_license | cbtxs/cbt | 78cccc2248adb4561ceb443dcd19d4b4160aaffd | d9223abfa609e7022b31d2077b10c54a6344f9a0 | refs/heads/master | 2022-12-24T09:55:04.736353 | 2022-12-15T04:53:06 | 2022-12-15T04:53:06 | 195,201,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | import numpy as np
def f(x,y):
z=np.sin(np.pi*x)*np.cos(np.pi*y)
return z
def g(x,y,t):
z=np.sin(np.pi*x)*np.cos(np.pi*y)*np.exp(-t*np.pi*np.pi/8)
return z
n=int(input('n='))
h0,h1=1/n,1/n
tmx=1
xdate=np.linspace(0,1,n+1)
ydate=np.linspace(0,1,n+1)
tdate=np.linspace(0,tmx,n+1)
A=np.zeros([(n+1)**2,(n+1)**2])
for i in range(n+1):
for j in range(n+1):
if i==0 or i==n:
A[i*(n+1)+j,i*(n+1)+j]=1
elif j==0:
A[i*(n+1)+j,i*(n+1)+j]=1
A[i*(n+1)+j,i*(n+1)+j+1]=-1
elif j==n:
A[i*(n+1)+j,i*(n+1)+j]=1
A[i*(n+1)+j,i*(n+1)+j-1]=-1
else:
A[i*(n+1)+j,i*(n+1)+j+1]=-h0/(32*h1**2)
A[i*(n+1)+j,i*(n+1)+j-1]=-h0/(32*h1**2)
A[i*(n+1)+j,(i+1)*(n+1)+j]=-h0/(32*h1**2)
A[i*(n+1)+j,(i-1)*(n+1)+j]=-h0/(32*h1**2)
A[i*(n+1)+j,i*(n+1)+j]=1+h0/(8*h1**2)
B=np.zeros([(n+1)**2,(n+1)**2])
for i in range(n+1)[1:-1]:
for j in range(n+1)[1:-1]:
B[i*(n+1)+j,i*(n+1)+j+1]=h0/(32*h1**2)
B[i*(n+1)+j,i*(n+1)+j-1]=h0/(32*h1**2)
B[i*(n+1)+j,(i+1)*(n+1)+j]=h0/(32*h1**2)
B[i*(n+1)+j,(i-1)*(n+1)+j]=h0/(32*h1**2)
B[i*(n+1)+j,i*(n+1)+j]=1-h0/(8*h1**2)
x_d,y_d=np.meshgrid(xdate,ydate)
u=f(x_d,y_d)
u=u.T
u=u.flatten()
aa=np.linalg.inv(A)
bb=np.matmul(aa,B)
b=np.linalg.eig(bb)
ccc=abs(b[0]).max()
for t1 in tdate[1:]:
tmp=np.matmul(B,u)
u=np.linalg.solve(A,tmp)
u=u.reshape(n+1,n+1)
u=u.T
u_true=g(x_d,y_d,tmx)
er=np.linalg.norm(u[1:-1,1:-1]-u_true[1:-1,1:-1],ord=1)/(n-1)**2
err=abs(u-u_true).max()
print(err)
print(u)
| [
"chen742609031@163.com"
] | chen742609031@163.com |
58f05acbef9b1357f030cea3f83233397803dc13 | 828787d94932df409b647001fd8a56d6c69e13bd | /test/test_txEngine.py | 0f4348e53c40e0abac71f1e1a120b3fbea470599 | [] | no_license | ravijain056/GEMAC | bc8eab5bf25059ba479450bd414a98cf076dc018 | a34478cd0977ede330c77f259d5ed4f52a2b6efe | refs/heads/master | 2020-05-21T23:56:00.312261 | 2019-07-29T18:00:13 | 2019-07-29T18:00:13 | 58,395,981 | 4 | 3 | null | 2016-12-25T10:22:01 | 2016-05-09T17:42:05 | Python | UTF-8 | Python | false | false | 9,711 | py | from myhdl import block, instance, delay, ResetSignal, Signal, intbv,\
StopSimulation, now
from myhdl.conversion import verify
from gemac.intrafaces import TxGMII_Interface, TxFlowInterface
from gemac.txEngine import txengine
from gemac.interfaces import TxFIFOClientInterface
from random import randrange
import pytest
datastream = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0,
0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]
def clkwait(clk, count=1):
while count:
yield clk.posedge
count -= 1
@pytest.fixture()
def setuptb():
txclientintf = TxFIFOClientInterface()
txgmii_intf = TxGMII_Interface()
txflowintf = TxFlowInterface()
txconfig = Signal(intbv(0)[32:])
reset = ResetSignal(1, active=0, async=True)
@block
def testbench():
dutinst = txengine(txclientintf, txgmii_intf, txflowintf,
txconfig, reset)
@instance
def hostclkdriver():
while True:
txclientintf.clk.next = not txclientintf.clk
yield delay(5)
@instance
def resetonstart():
reset.next = 0
yield clkwait(txclientintf.clk, count=2)
reset.next = 1
yield clkwait(txclientintf.clk, count=2)
return dutinst, hostclkdriver, resetonstart
return testbench, txclientintf, txgmii_intf, txflowintf, txconfig
def test_inbandfcs(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
@block
def test():
tbinst = tb()
print("Testing Inter-FrameGap Delay %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x30000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=10)
yield txclientintf.ack.posedge
yield clkwait(txclientintf.clk, count=2)
for i in range(len(datastream)):
yield txclientintf.clk.posedge
assert txgmii_intf.data == datastream[i]
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=2000)
testInst.quit_sim()
def test_crc32(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
@block
def test():
tbinst = tb()
print("Testing CRC-32 Calculation %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x10000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
yield txclientintf.clk.posedge
crc32 = 0
for i in range(3, -1, -1):
yield txclientintf.clk.posedge
crc32 = (txgmii_intf.data << (i * 8)) | crc32
assert crc32 == 0x70C2A128
return tbinst, tbstim
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=2000)
testInst.quit_sim()
def test_preamble(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
@block
def test():
tbinst = tb()
print("Testing Preamble Addition %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x10000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
yield clkwait(txclientintf.clk, count=120)
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=16)
for _ in range(7):
assert txgmii_intf.data == 0x55
yield txclientintf.clk.posedge
assert txgmii_intf.data == 0xD5
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=2000)
testInst.quit_sim()
def test_ifgdelay(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
@block
def test():
tbinst = tb()
print("Testing Transmit Normal %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x32000000
txclientintf.ifgdelay.next = 20
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
yield txclientintf.tx(datastream)
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=14)
yield txclientintf.dv.negedge
count = 0
while not txclientintf.data == 0xAA:
count += 1
yield txclientintf.clk.posedge
assert count == 20
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=3000)
testInst.quit_sim()
def test_padding(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
@block
def test():
tbinst = tb()
print("Testing padding %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x10000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream[:20])
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=14)
yield txclientintf.dv.negedge
yield clkwait(txclientintf.clk, count=2)
count = 0
while txgmii_intf.dv:
yield txclientintf.clk.posedge
count += 1
assert count + 20 == 64
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=2000)
testInst.quit_sim()
def test_maxframesize(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
datastream = [randrange(256) for _ in range(1600)]
@block
def test():
tbinst = tb()
print("Testing Max Permitted Length Restriction %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x10000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=10)
yield txgmii_intf.dv.posedge
erred = False
while(txgmii_intf.dv):
yield txclientintf.clk.posedge
if (txgmii_intf.err):
erred = True
assert erred
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=20000)
testInst.quit_sim()
def test_jumboframe(setuptb):
tb, txclientintf, txgmii_intf, txflowintf, txconfig = setuptb
datastream = [randrange(256) for _ in range(1700)]
@block
def test():
tbinst = tb()
print("Testing Jumbo Frames %s" % tbinst)
@instance
def tbstim():
yield clkwait(txclientintf.clk, count=10)
txconfig.next = 0x50000000
yield clkwait(txclientintf.clk, count=2)
yield txclientintf.tx(datastream)
@instance
def tbcheck():
yield clkwait(txclientintf.clk, count=20)
yield txgmii_intf.dv.negedge
assert not txgmii_intf.err
return tbinst, tbstim, tbcheck
testInst = test()
testInst.config_sim(trace=False)
testInst.run_sim(duration=20000)
testInst.quit_sim()
def test_convertible():
@block
def test():
txclientintf = TxFIFOClientInterface()
txgmii_intf = TxGMII_Interface()
txflowintf = TxFlowInterface()
txconfig = Signal(intbv(0)[32:])
reset = ResetSignal(1, active=0, async=True)
dutinst = txengine(txclientintf, txgmii_intf, txflowintf,
txconfig, reset)
print("Testing Convertibility %s" % dutinst)
@instance
def hostclkdriver():
txclientintf.clk.next = 0
while True:
yield delay(5)
txclientintf.clk.next = not txclientintf.clk
@instance
def testlogic():
reset.next = 0
yield delay(15)
reset.next = 1
yield delay(20)
print("Converted! %d" % now())
raise StopSimulation
return dutinst, testlogic, hostclkdriver
testInst = test()
verify.simulator = 'iverilog'
assert testInst.verify_convert() == 0
| [
"ravijain056@gmail.com"
] | ravijain056@gmail.com |
0ecbf4138102c4055dd133fdab32236f77f95ea4 | 96f922b05c93587c3d0822dfb55530dbb7994884 | /Day 1/crowsnest.py | eb62a7a921af125e33c88937b38b3825d1a1f806 | [] | no_license | Ricareng1/python_class | a73bb107cadcf3036195777bb168db64606b1b07 | 6d48f8c7b3e6ee3b292d00af0c73ad67eafa7103 | refs/heads/master | 2023-07-01T09:25:52.292063 | 2021-08-11T22:02:51 | 2021-08-11T22:02:51 | 394,319,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #!/usr/bin/env python3
"""
Author : t18 <me@wsu.com>
Date : 8/9/2021
Purpose:
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Crow's nest -- choose the correct article",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word',
metavar='word',
help='A word')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Set the article based on the noun"""
args = get_args()
word = args.word
article = 'an' if word[0].lower() in 'aeiou' else 'a'
print(f'Ahoy, Captain, {article} {word} off the larboard bow!')
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"ricareng1"
] | ricareng1 |
40dcbecf7f281aed9dcb30300876afe764f694bd | 0c1d6b8dff8bedfffa8703015949b6ca6cc83f86 | /lib/worklists/operator/CT/v3.0/business/VDSL_4+2/WLAN_Multi/script.py | 9a9b6c6cbee9a6edf93585bdaa4fbf2b5d34cecf | [] | no_license | samwei8/TR069 | 6b87252bd53f23c37186c9433ce4d79507b8c7dd | 7f6b8d598359c6049a4e6cb1eb1db0899bce7f5c | refs/heads/master | 2021-06-21T11:07:47.345271 | 2017-08-08T07:14:55 | 2017-08-08T07:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,798 | py | #coding:utf-8
# -----------------------------rpc --------------------------
import os
import sys
DEBUG_UNIT = False
if (DEBUG_UNIT):
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1)
parent3 = os.path.dirname(parent2)
parent4 = os.path.dirname(parent3) # tr069v3\lib
parent5 = os.path.dirname(parent4) # tr069v3\
sys.path.insert(0, parent4)
sys.path.insert(0, os.path.join(parent4, 'common'))
sys.path.insert(0, os.path.join(parent4, 'worklist'))
sys.path.insert(0, os.path.join(parent4, 'usercmd'))
sys.path.insert(0, os.path.join(parent5, 'vendor'))
from TR069.lib.common.event import *
from TR069.lib.common.error import *
from time import sleep
import TR069.lib.common.logs.log as log
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1) # dir is system
try:
i = sys.path.index(parent2)
if (i !=0):
# stratege= boost priviledge
sys.path.pop(i)
sys.path.insert(0, parent2)
except Exception,e:
sys.path.insert(0, parent2)
import _Common
reload(_Common)
from _Common import *
import _WLANMulti
reload(_WLANMulti)
from _WLANMulti import WLANMulti
import _WLANMultiWANSetUP
reload(_WLANMultiWANSetUP)
from _WLANMultiWANSetUP import WLANMultiWANSetUP
def test_script(obj):
"""
"""
sn = obj.sn # 取得SN号
DeviceType = "VDSL" # 绑定tr069模板类型.只支持ADSL\LAN\EPON三种
rollbacklist = [] # 存储工单失败时需回退删除的实例.目前缺省是不开启回退
# 初始化日志
obj.dict_ret.update(str_result=u"开始执行工单:%s........\n" %
os.path.basename(os.path.dirname(__file__)))
# data传参
WEPKeyIndex = obj.dict_data.get("WEPKeyIndex")[0]
WEPEncryptionLevel = obj.dict_data.get("WEPEncryptionLevel")[0]
WEPKey = obj.dict_data.get("WEPKey")[0]
# WAN部分参数
PVC_OR_VLAN1 = obj.dict_data.get("PVC_OR_VLAN1")[0]
PVC_OR_VLAN2 = obj.dict_data.get("PVC_OR_VLAN2")[0]
PVC_OR_VLAN3 = obj.dict_data.get("PVC_OR_VLAN3")[0]
PVC_OR_VLAN4 = obj.dict_data.get("PVC_OR_VLAN4")[0]
Username1 = obj.dict_data.get("Username1")[0]
Password1 = obj.dict_data.get("Password1")[0]
Username2 = obj.dict_data.get("Username2")[0]
Password2 = obj.dict_data.get("Password2")[0]
WANEnable_Switch = obj.dict_data.get("WANEnable_Switch")[0]
# WLAN个数
Num = 4
BeaconType = 'Basic'
BasicAuthenticationMode = 'Both'
# LANDevice.{i}.WLANConfiguration.{i}.节点参数
dict_root = {'X_CT-COM_SSIDHide':[0, 'Null'],
'X_CT-COM_RFBand':[0, 'Null'],
'X_CT-COM_ChannelWidth':[0, 'Null'],
'X_CT-COM_GuardInterval':[0, 'Null'],
'X_CT-COM_RetryTimeout':[0, 'Null'],
'X_CT-COM_Powerlevel':[0, 'Null'],
'X_CT-COM_PowerValue':[0, 'Null'],
'X_CT-COM_APModuleEnable':[0, 'Null'],
'X_CT-COM_WPSKeyWord':[0, 'Null'],
'Enable':[1, '1'],
'Channel':[0, 'Null'],
'SSID':[0, 'Null'],
'BeaconType':[1, BeaconType],
'Standard':[0, 'Null'],
'WEPKeyIndex':[1, WEPKeyIndex],
'KeyPassphrase':[0, 'Null'],
'WEPEncryptionLevel':[1, WEPEncryptionLevel],
'BasicAuthenticationMode':[1, BasicAuthenticationMode],
'WPAEncryptionModes':[0, 'Null'],
'WPAAuthenticationMode':[0, 'Null'],
'IEEE11iEncryptionModes':[0, 'Null'],
'IEEE11iAuthenticationMode':[0, 'Null'],
'BasicDataTransmitRates':[0, 'Null'],
'OperationalDataTransmitRates':[0, 'Null']}
# WLANConfiguration.{i}.WEPKey.{i}.节点参数(WEP关心)
dict_WEPKey = {'WEPKey':[1, WEPKey]}
# WLANConfiguration.{i}.PreSharedKey.{i}.节点参数(WPA关心)
dict_PreSharedKey = {}
# 无线设置(第一个不修改,第二个设置为WEP,第三和第四个设置为不加密)
ret, ret_data = WLANMulti(obj, sn, Num, dict_root,
dict_WEPKey, dict_PreSharedKey={},
change_account=0,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 直接新建四条WAN连接
# 第一条IP_Routed,PPPOE,INTERNET 绑定LAN1和WLAN1
# 第二条IP_Routed,PPPOE,INTERNET 绑定LAN2和WLAN3
# 第三条PPPoE_Bridged INTERNET 绑定LAN3和WLAN3
# 第四条PPPoE_Bridged INTERNET 绑定LAN4和WLAN4
LAN1 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.1'
LAN2 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.2'
LAN3 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.3'
LAN4 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.4'
WLAN1 = 'InternetGatewayDevice.LANDevice.1.WLANConfiguration.1'
WLAN2 = 'InternetGatewayDevice.LANDevice.1.WLANConfiguration.2'
WLAN3 = 'InternetGatewayDevice.LANDevice.1.WLANConfiguration.3'
WLAN4 = 'InternetGatewayDevice.LANDevice.1.WLANConfiguration.4'
# 第一条WAN的 WANDSLLinkConfig节点参数
if PVC_OR_VLAN1 == "":
PVC_OR_VLAN1_flag = 0
else:
PVC_OR_VLAN1_flag = 1
dict_wanlinkconfig1 = {'Enable':[1, '1'],
'Mode':[PVC_OR_VLAN1_flag, '2'],
'VLANIDMark':[PVC_OR_VLAN1_flag, PVC_OR_VLAN1]}
# 第一条WAN的WANPPPConnection节点参数
# 注意:X_CT-COM_IPMode节点有些V4版本没有做,所以不能使能为1.实际贝曼工单也是没有下发的
dict_wanpppconnection1 = {'Enable':[1, '1'],
'ConnectionType':[1, 'IP_Routed'],
'Name':[0, 'Null'],
'Username':[1, Username1],
'Password':[1, Password1],
'X_CT-COM_LanInterface':[1, LAN1+','+WLAN1],
'X_CT-COM_ServiceList':[1, 'INTERNET'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null']}
# 第二条WAN的 WANDSLLinkConfig节点参数
if PVC_OR_VLAN2 == "":
PVC_OR_VLAN2_flag = 0
else:
PVC_OR_VLAN2_flag = 1
dict_wanlinkconfig2 = {'Enable':[1, '1'],
'Mode':[PVC_OR_VLAN2_flag, '2'],
'VLANIDMark':[PVC_OR_VLAN2_flag, PVC_OR_VLAN2]}
# 第二条WAN的WANPPPConnection节点参数
dict_wanpppconnection2 = {'Enable':[1, '1'],
'ConnectionType':[1, 'IP_Routed'],
'Name':[0, 'Null'],
'Username':[1, Username2],
'Password':[1, Password2],
'X_CT-COM_LanInterface':[1, LAN2+','+WLAN2],
'X_CT-COM_ServiceList':[1, 'INTERNET'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null']}
# 第三条WAN的 WANDSLLinkConfig节点参数
if PVC_OR_VLAN3 == "":
PVC_OR_VLAN3_flag = 0
else:
PVC_OR_VLAN3_flag = 1
dict_wanlinkconfig3 = {'Enable':[1, '1'],
'Mode':[PVC_OR_VLAN3_flag, '2'],
'VLANIDMark':[PVC_OR_VLAN3_flag, PVC_OR_VLAN3]}
# 第三条WAN的WANPPPConnection节点参数
dict_wanpppconnection3 = {'Enable':[1, '1'],
'ConnectionType':[1, 'PPPoE_Bridged'],
'Name':[0, 'Null'],
'Username':[0, 'Null'],
'Password':[0, 'Null'],
'X_CT-COM_LanInterface':[1, LAN3+','+WLAN3],
'X_CT-COM_ServiceList':[1, 'INTERNET'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null']}
# 第四条WAN的 WANDSLLinkConfig节点参数
if PVC_OR_VLAN4 == "":
PVC_OR_VLAN4_flag = 0
else:
PVC_OR_VLAN4_flag = 1
dict_wanlinkconfig4 = {'Enable':[1, '1'],
'Mode':[PVC_OR_VLAN4_flag, '2'],
'VLANIDMark':[PVC_OR_VLAN4_flag, PVC_OR_VLAN4]}
# 第四条WAN的WANPPPConnection节点参数
dict_wanpppconnection4 = {'Enable':[1, '1'],
'ConnectionType':[1, 'PPPoE_Bridged'],
'Name':[0, 'Null'],
'Username':[0, 'Null'],
'Password':[0, 'Null'],
'X_CT-COM_LanInterface':[1, LAN4+','+WLAN4],
'X_CT-COM_ServiceList':[1, 'INTERNET'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null']}
# 第一条PPPoE WAN连接开通
ret, ret_data = WLANMultiWANSetUP(obj, sn, WANEnable_Switch,
DeviceType, 'PPPoE',
PVC_OR_VLAN1, dict_wanlinkconfig1,
dict_wanpppconnection1,
dict_wanipconnection={},
change_account=0,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 第二条PPPoE WAN连接开通
sleep(2)
ret, ret_data = WLANMultiWANSetUP(obj, sn, WANEnable_Switch,
DeviceType, 'PPPoE',
PVC_OR_VLAN2, dict_wanlinkconfig2,
dict_wanpppconnection2,
dict_wanipconnection={},
change_account=0,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 第三条PPPoE_Bridged WAN连接开通
sleep(2)
ret, ret_data = WLANMultiWANSetUP(obj, sn, WANEnable_Switch,
DeviceType, 'PPPoE_Bridged',
PVC_OR_VLAN3, dict_wanlinkconfig3,
dict_wanpppconnection3,
dict_wanipconnection={},
change_account=0,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 第四条PPPoE_Bridged WAN连接开通
sleep(2)
ret, ret_data = WLANMultiWANSetUP(obj, sn, WANEnable_Switch,
DeviceType, 'PPPoE_Bridged',
PVC_OR_VLAN4, dict_wanlinkconfig4,
dict_wanpppconnection4,
dict_wanipconnection={},
change_account=1,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
if __name__ == '__main__':
log_dir = g_prj_dir
log.start(name="nwf", directory=log_dir, level="DebugWarn")
log.set_file_id(testcase_name="tr069")
obj = MsgWorklistExecute(id_="1")
obj.sn = "2013012901"
dict_data= {"WEPKeyIndex":("1","1"),"WEPEncryptionLevel":("40-bit","2"),
"WEPKey":("0123456789","3"),
"PVC_OR_VLAN1":("71","4"),"Username1":("TW71","5"),
"Password1":("TW71","6"),
"PVC_OR_VLAN2":("72","7"),"Username2":("TW72","8"),
"Password2":("TW72","9"), "PVC_OR_VLAN3":("73","10"),
"PVC_OR_VLAN4":("74","11"),"WANEnable_Switch":("1","12")}
obj.dict_data = dict_data
try:
ret = test_script(obj)
if ret == ERR_SUCCESS:
print u"测试成功"
else:
print u"测试失败"
print "****************************************"
print obj.dict_ret["str_result"]
except Exception, e:
print u"测试异常"
| [
"zhaojunhhu@gmail.com"
] | zhaojunhhu@gmail.com |
b8fa076cc99e5cf3599e0e0a1304b8534441495f | 2a83c0a7e35ebbf11c54d0c52393508260f2d386 | /NumberGame/propagators.py | 92476b49fe0e755fdac94dc739eb600e74b420b2 | [] | no_license | YijingChen622/GameAI | 67ed53254a80c94ff773df2eea3b052411e1fec1 | 5eeb819cad25e6af7ccd0118ca6ed01f18ce3d2e | refs/heads/master | 2023-04-06T04:42:20.873107 | 2021-04-07T12:05:14 | 2021-04-07T12:05:14 | 355,518,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,559 | py | #Look for #IMPLEMENT tags in this file. These tags indicate what has
#to be implemented to complete problem solution.
'''This file will contain different constraint propagators to be used within
bt_search.
propagator == a function with the following template
propagator(csp, newly_instantiated_variable=None)
==> returns (True/False, [(Variable, Value), (Variable, Value) ...]
csp is a CSP object---the propagator can use this to get access
to the variables and constraints of the problem. The assigned variables
can be accessed via methods, the values assigned can also be accessed.
newly_instaniated_variable is an optional argument.
if newly_instantiated_variable is not None:
then newly_instantiated_variable is the most
recently assigned variable of the search.
else:
progator is called before any assignments are made
in which case it must decide what processing to do
prior to any variables being assigned. SEE BELOW
The propagator returns True/False and a list of (Variable, Value) pairs.
Return is False if a deadend has been detected by the propagator.
in this case bt_search will backtrack
return is true if we can continue.
The list of variable values pairs are all of the values
the propagator pruned (using the variable's prune_value method).
bt_search NEEDS to know this in order to correctly restore these
values when it undoes a variable assignment.
NOTE propagator SHOULD NOT prune a value that has already been
pruned! Nor should it prune a value twice
PROPAGATOR called with newly_instantiated_variable = None
PROCESSING REQUIRED:
for plain backtracking (where we only check fully instantiated
constraints)
we do nothing...return true, []
for forward checking (where we only check constraints with one
remaining variable)
we look for unary constraints of the csp (constraints whose scope
contains only one variable) and we forward_check these constraints.
for gac we establish initial GAC by initializing the GAC queue
with all constaints of the csp
PROPAGATOR called with newly_instantiated_variable = a variable V
PROCESSING REQUIRED:
for plain backtracking we check all constraints with V (see csp method
get_cons_with_var) that are fully assigned.
for forward checking we forward check all constraints with V
that have one unassigned variable left
for gac we initialize the GAC queue with all constraints containing V.
var_ordering == a function with the following template
var_ordering(csp)
==> returns Variable
csp is a CSP object---the heuristic can use this to get access to the
variables and constraints of the problem. The assigned variables can be
accessed via methods, the values assigned can also be accessed.
var_ordering returns the next Variable to be assigned, as per the definition
of the heuristic it implements.
'''
def prop_BT(csp, newVar=None):
'''Do plain backtracking propagation. That is, do no
propagation at all. Just check fully instantiated constraints'''
if not newVar:
return True, []
for c in csp.get_cons_with_var(newVar):
if c.get_n_unasgn() == 0:
vals = []
vars = c.get_scope()
for var in vars:
vals.append(var.get_assigned_value())
if not c.check(vals):
return False, []
return True, []
def prop_FC(csp, newVar=None):
'''Do forward checking. That is check constraints with
only one uninstantiated variable. Remember to keep
track of all pruned variable,value pairs and return '''
#IMPLEMENT
if newVar: # only check constraints containing newVar
cons = csp.get_cons_with_var(newVar)
else: # check all constraints
cons = csp.get_all_cons() # check all constraints
# list of all pruned variables
pruned = []
for con in cons:
if con.get_n_unasgn() == 1:
var = con.get_unasgn_vars()[0] # get the only unassigned variable
values = var.cur_domain() # get all possible values of the variable
for val in values:
if not con.has_support(var, val): # the value of the variable falsifies the constraint
pruned.append((var, val)) # record it in list
var.prune_value(val) # prune the value from the domain of the variable
if var.cur_domain_size() == 0: # dead-end is found
return False, pruned
return True, pruned
def prop_GAC(csp, newVar=None):
'''Do GAC propagation. If newVar is None we do initial GAC enforce
processing all constraints. Otherwise we do GAC enforce with
constraints containing newVar on GAC Queue'''
#IMPLEMENT
if newVar: # only check constraints containing newVar
cons = csp.get_cons_with_var(newVar)
else: # check all constraints
cons = csp.get_all_cons()
# list of all pruned variables
pruned = []
# loop until cons is empty
while cons:
con = cons.pop(0)
for var in con.get_scope():
for val in var.cur_domain():
if not con.has_support(var, val): # the value of the variable falsifies the constraint
pruned.append((var, val)) # record it in list
var.prune_value(val) # prune the value from the domain of the variable
if var.cur_domain_size() == 0: # dead-end is found
return False, pruned
else:
# add affected constraints to cons
for c in csp.get_cons_with_var(var):
if c not in cons:
cons.append(c)
return True, pruned
def ord_mrv(csp):
''' return variable according to the Minimum Remaining Values heuristic '''
#IMPLEMENT
# list of tuples (variable, current domain size of variable)
size_ordering = []
# get the current domain size of all the variable and append them in list
for var in csp.get_all_unasgn_vars():
size_ordering.append((var, var.cur_domain_size()))
# order tuples by the current domain size of variables
size_ordering.sort(key=lambda x: x[1])
return size_ordering[0][0]
| [
"chenyijing0622@163.com"
] | chenyijing0622@163.com |
cae067658d839dcdf28dcaed240a09dd92cd5a67 | 0c770dd944b531f75f0c204ffd93a7aee9f82c6d | /myapp.py | 650a2efd52a3dc63f871147bae708acb548b93b3 | [] | no_license | flowertigertail/test2 | 723066be00cd48139e1289ed106491302c819509 | a18418f11c41d0be60e08c6b09a7798f47a01178 | refs/heads/main | 2023-01-24T12:34:40.717148 | 2020-12-09T10:54:04 | 2020-12-09T10:54:04 | 319,714,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from flask import Flask, redirect, url_for, request
app = Flask(__name__)
@app.route('/change', methods = ['POST'])
def change():
return redirect(request.form['url'])
if __name__ == '__main__':
app.run(debug=False)
| [
"noreply@github.com"
] | flowertigertail.noreply@github.com |
b039221976eb40e60a708a5b94fa29dbcb24495d | af9ea83d9d5ab6c979c2ef699748c6ed3e350d8b | /prac_07/programming_language.py | 92dc75f8d785a8bb945b0f146ca50b4ce44d6311 | [] | no_license | BelleMardy/all_pracs | f24af67d3e3448cd63883944b039d7a5d2fa09b4 | ad250a3d07036eba556e13f341590fbcd100bb3d | refs/heads/master | 2021-01-19T17:02:10.097914 | 2017-10-04T23:53:43 | 2017-10-04T23:53:43 | 101,037,408 | 0 | 0 | null | 2017-09-18T00:40:42 | 2017-08-22T07:59:14 | Python | UTF-8 | Python | false | false | 1,695 | py | """
CP1404/CP5632 Practical - Suggested Solution
Programming Language class with tests.
"""
R = "Reflection"
T = "Typing"
F = "First appeared in"
class ProgrammingLanguage:
"""Represent information about a programming language."""
def __init__(self, name, typing, reflection, year):
"""Construct a ProgrammingLanguage from the given values."""
self.name = name
self.typing = typing
self.reflection = reflection
self.year = year
def __str__(self):
"""Return string representation of a ProgrammingLanguage."""
return "{}, {} {}, {} = {}, {} {}.".format(self.name, self.typing, T, R, F, self.reflection, self.year)
def is_dynamic(self):
"""Determine if language is dynamically typed."""
return self.typing == "Dynamic"
# def run_tests():
# """Run simple tests/demos on ProgrammingLanguage class."""
# java = ProgrammingLanguage("Java", "Static", True, 1995)
# c_plus_plus = ProgrammingLanguage("C++", "Static", False, 1983)
# python = ProgrammingLanguage("Python", "Dynamic", True, 1991)
# visual_basic = ProgrammingLanguage("Visual Basic", "Static", False, 1991)
# ruby = ProgrammingLanguage("Ruby", "Dynamic", True, 1995)
#
#
# languages = [java, c_plus_plus, python, visual_basic, ruby]
# print(languages)
#
# print("The dynamically typed languages are:")
# for language in languages:
# if language.is_dynamic():
# print("Dynamic {}.".format(language.name))
# else:
# print("Not dynamic {}.".format(language.name))
#
#
# if __name__ == "__main__":
# run_tests() | [
"belle.belle@my.jcu.edu.au"
] | belle.belle@my.jcu.edu.au |
4de2647970b30aaf19b15a211f6ede47721708ae | b22ce7abc1f22969625200ca6fefb2d567806c91 | /testdemo/urls.py | 5ecc1872f97a7303451b9ec3cc7f88578ad46b0d | [] | no_license | GurusirTCR/test | 6d24ee8e3db819fc1f556fa4535d7c4ecd22df57 | 40ec4e7677d5794062740e98d19cef556c71ff8d | refs/heads/master | 2023-06-05T08:37:19.121138 | 2021-06-24T08:01:38 | 2021-06-24T08:01:38 | 379,855,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """testdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from myapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
]
| [
"tcrguru0@gmail.com"
] | tcrguru0@gmail.com |
cc47c3164ea6c1538720c0b5b3a5eb55b18ab862 | 7838895d7d8b9cb5c0db4ac788b76d3150b6322f | /wavenet.py | eaaeccdd1c93bdfdbeea50dfdab42e8268afc457 | [] | no_license | MindofMind/Music | 50c5e8fe4fa440d1375209ccf3467c9cd93a5437 | 25fc067190fb1ded0e7591f55a97a8a2bd12b8c6 | refs/heads/master | 2022-11-19T09:06:42.684143 | 2020-07-24T10:25:29 | 2020-07-24T10:25:29 | 282,181,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | K.clear_session()
def simple_wavenet():
no_of_kernels=64
num_of_blocks= int(np.sqrt(no_of_timesteps)) - 1 #no. of stacked conv1d layers
model = Sequential()
for i in range(num_of_blocks):
model.add(Conv1D(no_of_kernels,3,dilation_rate=(2**i),padding='causal',activation='relu'))
model.add(Conv1D(1, 1, activation='relu', padding='causal'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(n_vocab, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
return model | [
"noreply@github.com"
] | MindofMind.noreply@github.com |
1b33a0d2e211750824ab74b353f3eec8b0a32f06 | 6e2dfbf50c1def19cd6ae8e536a2ddb954a5ad63 | /predict.py | 3e96f112a8763f74066d46caa470404c48356c44 | [
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | boshining/NeuronBlocks | 9d71f087772eb17c3a4130d0374818cfd80d976f | 74fbb8658fb3f1cffea5c9bc84b2a1da59c20dd9 | refs/heads/master | 2020-05-27T16:24:10.244042 | 2019-08-06T07:37:55 | 2019-08-06T07:37:55 | 188,699,703 | 0 | 0 | MIT | 2019-08-06T08:19:55 | 2019-05-26T15:23:06 | Python | UTF-8 | Python | false | false | 3,096 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from settings import ProblemTypes, version
import os
import argparse
import logging
from ModelConf import ModelConf
from problem import Problem
from LearningMachine import LearningMachine
def main(params):
conf = ModelConf('predict', params.conf_path, version, params, mode=params.mode)
problem = Problem('predict', conf.problem_type, conf.input_types, None,
with_bos_eos=conf.add_start_end_for_seq, tagging_scheme=conf.tagging_scheme, tokenizer=conf.tokenizer,
remove_stopwords=conf.remove_stopwords, DBC2SBC=conf.DBC2SBC, unicode_fix=conf.unicode_fix)
if os.path.isfile(conf.saved_problem_path):
problem.load_problem(conf.saved_problem_path)
logging.info("Problem loaded!")
logging.debug("Problem loaded from %s" % conf.saved_problem_path)
else:
raise Exception("Problem does not exist!")
if len(conf.predict_fields_post_check) > 0:
for field_to_chk in conf.predict_fields_post_check:
field, target = field_to_chk.split('@')
if not problem.output_dict.has_cell(target):
raise Exception("The target %s of %s does not exist in the training data." % (target, field_to_chk))
lm = LearningMachine('predict', conf, problem, vocab_info=None, initialize=False, use_gpu=conf.use_gpu)
lm.load_model(conf.previous_model_path)
logging.info('Predicting %s with the model saved at %s' % (conf.predict_data_path, conf.previous_model_path))
lm.predict(conf.predict_data_path, conf.predict_output_path, conf.predict_file_columns, conf.predict_fields)
logging.info("Predict done! The predict result: %s" % conf.predict_output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prediction')
parser.add_argument("--conf_path", type=str, help="configuration path")
parser.add_argument("--predict_data_path", type=str, help='specify another predict data path, instead of the one defined in configuration file')
parser.add_argument("--previous_model_path", type=str, help='load model trained previously.')
parser.add_argument("--predict_output_path", type=str, help='specify another prediction output path, instead of conf[outputs][save_base_dir] + conf[outputs][predict_output_name] defined in configuration file')
parser.add_argument("--log_dir", type=str)
parser.add_argument("--batch_size", type=int, help='batch_size of each gpu')
parser.add_argument("--mode", type=str, default='normal', help='normal|philly')
parser.add_argument("--force", type=bool, default=False, help='Allow overwriting if some files or directories already exist.')
parser.add_argument("--disable_log_file", type=bool, default=False, help='If True, disable log file')
parser.add_argument("--debug", type=bool, default=False)
params, _ = parser.parse_known_args()
assert params.conf_path, 'Please specify a configuration path via --conf_path'
if params.debug is True:
import debugger
main(params) | [
"shoulinjun@126.com"
] | shoulinjun@126.com |
f994c3294b2ebe1af566741258c0d6bcdee0c87e | 97eaf6c173ba6a329656d3b7446a9dbe026ff8eb | /python/idddb-venv/bin/pip3.5 | cf587db4ac51bda8e580ac4a6f5cbfbd1009b496 | [] | no_license | cvp21594/IDDDB | fc1e2ac10ab4983eecbcbcd49dc37089c6af68df | 12cb7a3d10df8c9766fb3eb1b2485fd831d6f265 | refs/heads/master | 2021-01-01T03:41:48.668258 | 2016-05-09T20:18:57 | 2016-05-09T20:18:57 | 56,807,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | 5 | #!/home/caleb/code/umkc/IDDDB/python/idddb-venv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"caleb@chasecaleb.com"
] | caleb@chasecaleb.com |
753c80af4b7220ecbf7d03bf6d09ba99f4bc4f6f | f06e24e81d4ec23e658000d0f2e078e94a2cefc3 | /review.py | 71ace411b1f6a016836db8557afb5a1a586f67d4 | [] | no_license | 1798317135/note | ec64061671ec76fff1b95c58e082e247dd2986e0 | f0097241a19b437fd24695d20f006c38514be9ca | refs/heads/master | 2022-04-10T21:23:04.532601 | 2020-03-22T23:53:48 | 2020-03-22T23:53:48 | 174,942,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,322 | py | ######################### ubuntu复习 ###############################
# ************ 8.2 号
# ------ 基本常识
# / 根目录
# /root 超级管理员目录 系统文件所在目录
# /boot 系统启动文件目录
# /bin 存放二进制文件
# /sbin 存放系统二进制文件
# /etc 存放系统配置文件
# /dev 存放设备有关的文件 驱动等
# /home 用户目录 存放每个用户的个人数据 除了root用户
# /tmp 临时文件目录
# /usr 存放个人的程序和配置文件等 apt 工具会管理这个目录
# /usr/bin 存放个人程序
# /usr/sbin 存放超级管理的管理层序
# /usr/share 存放共享数据
# /usr/lib 存放程序的函数库文件
# /usr/local 存放手动安装的软件 里面有usr同样的目录结构
# /usr/src 用户存放下载的安装包等
# /opt 存放实验的程序 安装到此目录的程序和配置文件会在统一文件下
# ~ 用户的家目录
# . 开头的文件是隐藏文件
# . 当前目录
# .. 上级目录
# - 可以用cd 在最近使用的两个文件夹切换
# ./文件 执行文件
#
#shutdown 关闭或者重启电脑
# -r 重新启动 now 现在关闭
# pwd 查看当前路径
# who 查看当前登陆的用户列表
# whoami 查看当前登陆的用户
# whicth 查看执行程序所在的目录路径
# exit 退出用户
# su 用户名 切换用户不切换当前用户
# su - 用户名 切换用户并且切换到当前目录的家目录
# su 切换到超级管理员用户
# top 查看cpu的使用情况
# ps -aux 查看进程的详细信息
# date 查看时间
# cal -y 查看日历
#
#
# ls 查看文件夹目录
# -a 显示隐藏文件
# -l 列表的方式显示
# -h 现实文件大小
# * 任意字符 任意长度
# ? 单个字符
# 【】匹配其中的任意一个字符
# touch 创建文件 如果存在修改日期
# mkdir 创建目录
# -p 第归创建目录
# rm 删除文件
# -r 删除目录
# -f 强制删除不提示
# tree 树桩图结构目录
# -d 之显示目录
# -f 之显示文件
# cp 复制民命令
# -i 覆盖提示
# -r 复制目录
# mv 移动文件或者目录 可以改名
# -ℹ 覆盖提示
# cat 查看文件内容
# -n 显示所有行的行号
# -b 不显示空行的行号
# more 分批查看文件内容
# grep 搜索文本
# -n 显示搜索的行号
# -v 显示其他
# ssh -p 22 user@122.117.135 远程连接
# -p 端口号
#
# scp -P 22 本地文件 user@127.4154.45:远程路径
# scp -P 22 -r user@127.0.0.1:远程目录 本地路径
# -r 如果复制文件
# d 代表目录 -代表为文件
# r 可读权限 x 可执行权限 w 可写权限
# chmod -R 777 文件/目录 来增加或者减少文件目录的权限
# chown -R user:group 文件或目录 修改文件或这么目录的拥有者
# chgrod -R 祖名 文件/或者目录 修改文集和目录的组
# sudo groupadd 组名 添加组
# cat etc/group 确认组
# sudo groupdel 组名 删除组
# sudo groups 用户名 看看用户加入的所有组
# sudo gpasswd -a user group 添加用户到指定组
# sudo gpasswd -d user group 移除组里面的指定用户
# sudo useradd -m -g 所属组 用户名 创建用户 -m 会创建用户的家目录
# sudo passwd 用户名 给新用户设置密码
# cat /etc/passwd 储存的是用户信息
# sudo userdel 用户名 删除用户
# sudo usermod -g 主组 用户名 修改用户的主组
# sudo usermod -G 用户名 添加用户的附加组
# sudo usermod -s /bin/bash 用户名 添加用户的shell窗口类型
# sudo userdel -rf 强制删除用户和用户的家目录
# find 文件路径 -name 文件 在制定目录下查找指定文件
# ln -s 源文件(绝对路径) 阮连接路径 建立一个文件的快捷方式
# ln 源文件 硬链接地质 建立一个文件名的复制
# tar -vcf *.tar 创建一个tar包
# tar -xcf 包名 解开一个tar包
# tar -zvcf *.tar.gz 创建一gzip压缩的tar包
# tar -jcvf *.tar.zb2
# tar -jzvf
# vim
# 末尾模式
# x 保存退出
# q 退出
# w 保存
# ! 强制执行
# 编辑模式
# i 光标移动到字符前面
# I 光标移动行首
# a 移动到字符后面
# A 光标移动到行未
# o 光标下面创建一个空行
# O 光标在上面创建一个空行
# dd 删除光标所在行
# yy 复制
# p 粘贴
# vim file +: 打开文件并且光标在第一行
# vim file + 打开文件并且光标在末行
# vim file +/.. 打开文件光标移动到查找到字符的行
# vim -o file1 file2 file3 分屏显示多个文件
# gg 光标跳到文件首部
# G 光标跳到文件尾部尾
# 0 光标移动到行首
# $ 光标移动到行第一个非空白字符
# ^ 光标移动到行的最后一个空白字符
# w 向后面移动
# b 向前
# e 向后移动一个单词的距离
# shift + >> 向
# /sdf\c 查找字符 \c是不分大小写
# * 可以快速查找光标的单词
# /%s/查找字符/替换字符/gc 全部查找替换 c确认
########################## git #################
# git init 初始化仓库
# git status 查看状态
# git add * 跟踪文件和提交文件到暂存
# git rm -r 删除本地文件
# git rm -r --cached 删除工作区 不删除文件
# git checkout -- file 撤销工作区的改动
# git reset HEAD file 撤销缓存区
# git commit -a -m '1.0.0' 创建版本库
# git commit --amend -m 重新提交
# git reset --hard HEAD~1 回退到第一个版本
# git reset --hard tab或者hash
# git deff HEAD HADE^ -- 文件 对比这个版本和上一版本的区别
# git tag -a -m '辅助标签'
# git tag '轻量级标签'
# git tab 查看标签
# git tab -l 匹配查看标签
# git log 查看提交历史
# git log --pretty=online 每个显示一行
# git log --graph 显示合并路线
# git reflog 查看操作记录
# git branch 查看分支
# git barnch -a 查看远程分支
# git barnch -v 查看此分支
# git branch -d 删除分支
# git branch -vv 查看分支和提交历史
# git branch --delete 删除远程分支
# git checkout -b 创建并切换到分支
# git checkout -d
# git merge 合并分支
# git merge --no-ff -m 强制自动合并
# git remote -v 查看远程仓库
# git remote add url 添加远程仓库
# git remote fetch 拉取远程仓库
# git remote push 推送本地仓库
# git remote rename 修改远程仓库名字
# git remote pull 拉去远程并合并
#
| [
"1798317135"
] | 1798317135 |
1af74922d9f3e1042e0410f063163cfe07720b64 | 30dd6d0f4214d74e4d32be42a2c8a53e90bacc81 | /1. Elementary Data Structures/Stacks/LinkedStack.py | 0759617c47155fecece3d3b8176680011072e1fe | [
"MIT"
] | permissive | palashsharma891/Algorithms-in-Python | 0167b753dce243000ac5a902419432884f45c03e | 2f35be0af93046f2eabdb28eb4ed697fe47293cd | refs/heads/master | 2023-07-05T12:02:25.231331 | 2021-08-17T04:58:40 | 2021-08-17T04:58:40 | 267,847,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 10:54:10 2020
@author: Palash
"""
class Node(object):
""" Represents a node in the linked list"""
def __init__(self, data):
"""
Constructor to initialize the node
"""
self.data = data
self.next = None
class LinkedStack(object):
"""Represents the Stack ADT, implemented using a linked list"""
def __init__(self):
"""
Constructor to initalize the stack
"""
self.head = None
self.size = 0
def __len__(self):
"""
Getter for the size of the stack
Returns the length of (number of elements in) the stack
"""
return self.size
def is_empty(self):
"""
Returns True if the stack is empty, False otherwise
"""
return self.size == 0
def push(self, data):
"""
Adds an element on top of the stack
"""
new_node = Node(data)
new_node.next = self.head
self.head = new_node
self.size += 1
def top(self):
"""
Returns the element last inserted in the stack
"""
if self.is_empty():
raise ValueError('Stack is empty')
else:
return self.head.data
def pop(self):
"""
Removes(unlinks) and returns the top element in the stack
"""
if self.is_empty():
raise ValueError('Stack is empty')
else:
popped = self.head
self.head = self.head.next # unlinks the current head of the list
return popped
def printStack(self):
"""
Prints the elements in the stack
"""
temp = self.head
stack = []
while temp:
stack += [temp.data]
temp = temp.next
return stack
if __name__ == '__main__':
stack_obj = LinkedStack()
stack_obj.push(5)
stack_obj.push(234)
print(stack_obj.is_empty())
print(stack_obj.printStack())
print(stack_obj.top())
stack_obj.push(546)
stack_obj.push(7)
print(stack_obj.printStack())
stack_obj.pop()
print(stack_obj.printStack())
| [
"noreply@github.com"
] | palashsharma891.noreply@github.com |
edd46f3a03f275eeb414e48277f29814d3c9793d | 2c5b98b0119e1c16de4537ce3ac04997f048bca2 | /build/lib/src/ttsRogue/redditThief.py | 3b6d223601aa36a7a88701124d7fe0fe3112490b | [
"MIT"
] | permissive | AlexBacho/ezyt | 08a60d12865eadb8733f1bf2752b9e98011c891a | 5d9526d7901270ed5d97adf73ed0df9b21bf0387 | refs/heads/master | 2023-04-19T19:33:32.657825 | 2021-05-23T16:14:20 | 2021-05-23T16:14:20 | 289,915,144 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | import praw
import subprocess
import logging
from pathlib import Path
from .errors import ProcessingError
from .utils import get_rendered_template, run_subprocess
"""
class Submission(SubmissionListingMixin, UserContentMixin, FullnameMixin, RedditBase):
A class for submissions to reddit.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
necessarily complete.
=========================== ===============================================
Attribute Description
=========================== ===============================================
``author`` Provides an instance of :class:`.Redditor`.
``clicked`` Whether or not the submission has been clicked
by the client.
``comments`` Provides an instance of
:class:`.CommentForest`.
``created_utc`` Time the submission was created, represented in
`Unix Time`_.
``distinguished`` Whether or not the submission is distinguished.
``edited`` Whether or not the submission has been edited.
``id`` ID of the submission.
``is_original_content`` Whether or not the submission has been set
as original content.
``is_self`` Whether or not the submission is a selfpost
(text-only).
``link_flair_template_id`` The link flair's ID, or None if not flaired.
``link_flair_text`` The link flair's text content, or None if not
flaired.
``locked`` Whether or not the submission has been locked.
``name`` Fullname of the submission.
``num_comments`` The number of comments on the submission.
``over_18`` Whether or not the submission has been marked
as NSFW.
``permalink`` A permalink for the submission.
``poll_data`` A :class:`.PollData` object representing the
data of this submission, if it is a poll
submission.
``score`` The number of upvotes for the submission.
``selftext`` The submissions' selftext - an empty string if
a link post.
``spoiler`` Whether or not the submission has been marked
as a spoiler.
``stickied`` Whether or not the submission is stickied.
``subreddit`` Provides an instance of :class:`.Subreddit`.
``title`` The title of the submission.
``upvote_ratio`` The percentage of upvotes from all votes on the
submission.
``url`` The URL the submission links to, or the
permalink if a selfpost.
=========================== ===============================================
"""
REDDIT_TEMPLATE_FILEPATH = "resources/redditTemplate"
class RedditThief:
def __init__(self, cfg):
self.cfg = cfg
self.reddit = praw.Reddit(
client_id=cfg.reddit.client_id,
client_secret=cfg.reddit.client_id,
user_agent=cfg.reddit.user_agent,
)
self.already_scraped_file = cfg.reddit.already_scraped_file
def process(self, url):
pass
def auto_scrape(self, subreddit):
already_scraped = self._get_already_scraped()
for submission in self.reddit.subreddit(subreddit).hot():
if submission.id not in already_scraped and not submission.stickied:
return self.scrape(submission)
def scrape(self, submission):
return SubmissionData(self.cfg, submission).get_rendered_comments()
def _get_already_scraped(self):
lst = []
with open(self.already_scraped_file) as f:
for line in f:
lst.append(line)
return lst
def _write_submission_into_scraped_file(self, submission):
with open(self.already_scraped_file, "a+") as f:
f.write(f"{submission.id}\n")
class SubmissionData:
"""
rendered_comments:
list of tuples:
[(text,text-image,tts)]
includes OP
"""
def __init__(self, cfg, submission):
self.cfg = cfg
self.html_to_img_bin_path = cfg.reddit.html_to_img_bin_path
self.working_dir = f"{cfg.common.working_dir_root}/reddit/{submission.id}"
self.submission = submission
def get_rendered_comments(self, render_text=True, generate_tts=True):
Path(self.working_dir).mkdir(exist_ok=True, parents=True)
original_comments = self.submission.selftext + self.submission.comments
rendered_text = self.get_rendered_text_from_submission() if render_text else []
generated_tts = self.get_generated_tts_from_submission() if generate_tts else []
return zip(original_comments, rendered_text, generated_tts)
def get_rendered_text_from_submission(self):
images = []
part_counter = 0
images.append(self._get_rendered_text_from_comment(self, part_counter))
for comment in self.submission.comments:
images.append(self._get_rendered_text_from_comment(comment, part_counter))
return images
def _get_rendered_text_from_comment(self, comment, part):
rendered_template = self._get_rendered_template_from_comment(comment)
args = [
self.html_to_img_bin_path,
rendered_template,
f"{self.working_dir}/{part}",
]
stdout = run_subprocess(args)
if not stdout:
logging.error(f"Rendering of comment: {comment.id} failed.")
return f"{self.working_dir}/{part}"
def _get_rendered_template_from_comment(self, comment):
args = {
"author": comment.author,
"created_utc": comment.created_utc,
"score": comment.score,
"body": comment.body,
}
filepath = f"{self.working_dir}/{comment.id}.txt"
with open(filepath, "w+") as f:
f.write(get_rendered_template(REDDIT_TEMPLATE_FILEPATH, args))
return filepath
def get_generated_tts_from_submission(self):
tts = []
tts.append(self._get_generated_tts_from_text(self))
for comment in self.submission.comments:
tts.append(self._get_generated_tts_from_text(comment))
return tts
def _get_generated_tts_from_text(self, text):
pass
| [
"alex.sani.bacho@gmail.com"
] | alex.sani.bacho@gmail.com |
db2d51e68f75fd54dbe2bf01c8c4abef6b98d7a0 | b06552e9fd296b951e4e9de33ba2ac1d2a59ef0f | /googlecal.py | b7c9b753be34048918f60d6d5c69690a406c98f8 | [] | no_license | adotcruz/Epsilon | de473b0e80ad01b9eb475504b17c036a0e2bf540 | e4a4433001465481909e27f477774ab92403c3b0 | refs/heads/master | 2021-03-19T16:50:04.764124 | 2016-10-23T14:09:14 | 2016-10-23T14:09:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | import sys
import time
import json
from flask import Flask, jsonify, abort, request
#from pprint import pprint
example = [(1477090800, 1477242000), (1477234800, 1477238400), \
(1477263600, 1477263600), (1477265400, 1477270800), \
(1477270800, 1477274400), (1477270800, 1477274400), \
(1477279800, 1477279800), (1477335600, 1477340100), \
(1477346400, 1477350900), (1477355400, 1477359000)]
def main(eventslist, neweventlen, deadline):
'''
Parameters:
eventslist: a list (array? tuple? dict?) of existing events' start and
end times that are between now and the deadline
neweventlen: the duration of the new event
deadline: the due date/time
All time should be in number of milliseconds since Jan 1, 1970
00:00:00 UTC
Returns:
The start and end times of the new event
'''
""" Ver1: try to find a single chunk of time that works """
# Half hour time slots
slot_size = 1800
# Current epoch time
current_time = int(time.time())
# Round current time to nearest half hour in the future
current_time = slot_size - (int(time.time()) % slot_size)
print current_time
# Duration of the new event in terms of number of slots it takes
time_slots_needed = neweventlen / 1800
# Loop through all events and fill each entry with a list of
# half hour slots that that event fills
unavail_time_slots = set()
for i, (start, end) in enumerate(eventslist):
# Round start time down and end time up if need be
if (start % slot_size != 0):
start -= start % slot_size
if (end % slot_size != 0):
end += slot_size -(end % slot_size)
unavail_time_slots = unavail_time_slots | set(range(start, end, slot_size))
# Loop through time slots between now and deadline to find
# possible time slots
avail_time_slots = []
for i in range(current_time, deadline, slot_size):
if i not in unavail_time_slots:
avail_time_slots.append(i)
# For debugging purposes
print avail_time_slots
pprint([time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(a)) for a in avail_time_slots])
# TODO: Prioritize the time frame 10am - 10pm
# TODO: maybe change to generator
for time in avail_time_slots:
return jsonify({"start_time": time,
"end_time": time_slots_needed * slot_size})
if __name__ == "__main__":
deadline = 1477418400 # Oct 25, 2016, 2pm
duration = 7200 # Two hours
main(example, duration, deadline)
'''
app = Flask(__name__)
@app.route('/generate', methods=['GET'])
def generateSchedule():
if not request.json:
abort(400)
events = request.args.get('eventinfo')
duration = request.args.get('duration')
deadline = request.args.
#deadline = 1477418400 # Oct 25, 2016, 2pm
#duration = 7200 # Two hours
return main(events, duration, deadline)
'''
| [
"juliejiang_12@hotmail.com"
] | juliejiang_12@hotmail.com |
163c9ae06a55743d7c7773963e8c5aea8849a4b6 | c79f5a4ba78d990e38b3e2dd1eb371687a204679 | /media.py | a376a6b481b8bbb7fce1ad03ae0050d731849be1 | [] | no_license | AlexDiru/movie-website-full-stack-nanodegree-project-1 | e128d1ece504882fd266a1bb9051f861bc0505e6 | 79ccbde7810adfa846de16b05adec83f1bc9e239 | refs/heads/master | 2021-08-11T09:45:31.705301 | 2017-11-13T14:31:30 | 2017-11-13T14:31:30 | 110,356,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | import webbrowser
class Movie():
"""Class to represent a movie
Stores the title, storyline, URL to the poster and a URL to the
trailer (on YouTube)
Attributes:
title (str): Title of the movie
storyline (str): Storyline of the movie
poster_image_url (str): Image URL of the movie poster
trailer_youtube_url (str): Trailer URL of the movie on YouTube
Future work:
May be refactored to Video if TV Shows are implemented
Movie derives from Video
TV Show derives from Video
"""
def __init__(self, title, storyline, poster_image_url, trailer_url):
"""Initialises the class with four string parameters; exceptions are
thrown if any are empty
Args:
title (str): Title of the movie
storyline (str): Storyline of the movie
poster_image_url (str): Image URL of the movie poster
trailer_url (str): Trailer URL of the movie
"""
# Error checking
if title == "":
raise ValueError("Movie title is empty")
if storyline == "":
raise ValueError("Storyline is empty")
if poster_image_url == "":
raise ValueError("Poster Image URL is empty")
if trailer_url == "":
raise ValueError("Trailer URL is empty")
# Set params
self.title = title
self.storyline = storyline
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_url
| [
"alexspedding_oni@hotmail.com"
] | alexspedding_oni@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.