blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e480f97fb50625c014ac1023c841fd6e98be142 | ae29eab0e53decfa14787d496cb3d041f158337b | /misc/process_hchs.py | 1982cbaadc8c8988a857741de9e78c8f87cfcfcf | [
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | CyclotronResearchCentre/HypnosPy | c76f5cb98c4c004932d5ed32cb526f04317a65ac | 57d1feec32baf02023bb6413bdf2ad9a5a0c6bac | refs/heads/master | 2023-01-13T22:08:00.067865 | 2020-11-23T21:25:33 | 2020-11-23T21:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | from glob import glob
from hypnospy import Wearable
from hypnospy.data import ActiwatchSleepData
from hypnospy.analysis import SleepWakeAnalysis
from hypnospy.analysis import TimeSeriesProcessing
from hypnospy.analysis import PhysicalActivity
from hypnospy import Experiment
if __name__ == "__main__":
# Configure an Experiment
exp = Experiment()
file_path = "./data/small_collection_hchs/*"
# Iterates over a set of files in a directory.
# Unfortunately, we have to do it manually with RawProcessing because we are modifying the annotations
for file in glob(file_path):
pp = ActiwatchSleepData(file, col_for_datetime="time", col_for_pid="pid")
w = Wearable(pp) # Creates a wearable from a pp object
exp.add_wearable(w)
tsp = TimeSeriesProcessing(exp)
tsp.fill_no_activity(-0.0001)
tsp.detect_non_wear(strategy="choi")
tsp.check_consecutive_days(5)
print("Valid days:", tsp.get_valid_days())
print("Invalid days:", tsp.get_invalid_days())
tsp.detect_sleep_boundaries(strategy="annotation", annotation_hour_to_start_search=18)
tsp.invalidate_day_if_no_sleep()
print("Valid days:", tsp.get_valid_days())
tsp.check_valid_days(min_activity_threshold=0, max_non_wear_minutes_per_day=180)
print("Valid days:", tsp.get_valid_days())
print("Invalid days:", tsp.get_invalid_days())
tsp.drop_invalid_days()
# TODO: PA bouts? How to?
pa = PhysicalActivity(exp, lpa=0, mvpa=399, vpa=1404)
pa.generate_pa_columns()
mvpa_bouts = pa.get_mvpas(length_in_minutes=1, decomposite_bouts=False)
lpa_bouts = pa.get_lpas(length_in_minutes=1, decomposite_bouts=False)
print("DONE")
| [
"joaopalotti@gmail.com"
] | joaopalotti@gmail.com |
eec72bfca2db98b4a4a5ce142b1aa3708785ac73 | fa870f2b28912508aae3894b56c8dbc39134be5d | /curse_of_dim01.py | ecb0bee0cc4dbab6f59236a4e2e6933067cf8ad8 | [] | no_license | DrSdl/RiskX | 53d43e3a28299d98d95a8171bd9ff00a17b35165 | 5fabd5ecb99d6a97161729815222fcc018961708 | refs/heads/master | 2021-01-19T23:57:09.971838 | 2018-01-29T12:08:19 | 2018-01-29T12:08:19 | 89,055,265 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,455 | py | import math
# demonstration of the curse of dimensionality
# 4 disks inside a box
# Where are the disks after 100 time steps?
from decimal import *
getcontext().prec = 18
# calculate time to collision with wall --------------------------------------
def wall_time(pos_a, vel_a, sigma):
if vel_a > 0.0:
del_t = (1.0 - sigma - pos_a) / vel_a
elif vel_a < 0.0:
del_t = (pos_a - sigma) / abs(vel_a)
else:
del_t = float('inf')
return del_t
# calculate time to collsion with particle -----------------------------------
def pair_time(pos_a, vel_a, pos_b, vel_b, sigma):
del_x = [pos_b[0] - pos_a[0], pos_b[1] - pos_a[1]]
del_x_sq = del_x[0] ** 2 + del_x[1] ** 2
del_v = [vel_b[0] - vel_a[0], vel_b[1] - vel_a[1]]
del_v_sq = del_v[0] ** 2 + del_v[1] ** 2
scal = del_v[0] * del_x[0] + del_v[1] * del_x[1]
Upsilon = scal ** 2 - del_v_sq * ( del_x_sq - 4.0 * sigma **2)
if Upsilon > 0.0 and scal < 0.0:
del_t = - (scal + math.sqrt(Upsilon)) / del_v_sq
else:
del_t = float('inf')
return del_t
# init positions and velocities for 4 particles ------------------------------
pos = [[0.250000, 0.25], [0.75, 0.25], [0.25, 0.75], [0.75, 0.75]]
vel = [[0.21, 0.12], [0.71, 0.18], [-0.23, -0.79], [0.78, 0.1177]]
singles = [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)] #(disk,direction) pairs
pairs = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] #how many distinct pairs?
# disk radius, start time, number of events ----------------------------------
sigma = 0.15
t = 0.0
n_events = 100
# Big event loop -------------------------------------------------------------
for event in range(n_events):
# calculate time to next wall collision ----------------------------------
wall_times = [wall_time(pos[k][l], vel[k][l], sigma) for k, l in singles]
# calculate time to next particle - to - particle collision --------------
pair_times = [pair_time(pos[k], vel[k], pos[l], vel[l], sigma) for k, l in pairs]
next_event = min(wall_times + pair_times)
t += next_event
# position of next event
for k, l in singles: pos[k][l] += vel[k][l] * next_event
# check if wall collition
if min(wall_times) < min(pair_times):
collision_disk, direction = singles[wall_times.index(next_event)]
vel[collision_disk][direction] *= -1.0 # do a reflection on wall collision
else:
# do a pair collision
a, b = pairs[pair_times.index(next_event)]
del_x = [pos[b][0] - pos[a][0], pos[b][1] - pos[a][1]]
abs_x = math.sqrt(del_x[0] ** 2 + del_x[1] ** 2)
e_perp = [c / abs_x for c in del_x] #unit direction vector
del_v = [vel[b][0] - vel[a][0], vel[b][1] - vel[a][1]] #delta velocity vector
scal = del_v[0] * e_perp[0] + del_v[1] * e_perp[1] # projection of velocity on unit collision direction
for k in range(2):
vel[a][k] += e_perp[k] * scal #reflection of perpendicular velocity direction
vel[b][k] -= e_perp[k] * scal
#print 'event', event
#print 'time', t
#print 'wall', wall_times
#print 'pair', pair_times
#print 'pos', pos
#print 'vel', vel
print('event', event)
print('time', t)
print('pos', pos)
#event 100
#time 6.36010393101
#pos [[0.7217884445958171, 0.85], [0.8375690373531577, 0.546793225810059], [0.316978840352578, 0.42753404775012427], [0.16624374157804678, 0.8010609870874347]]
#event 99
#time 5.24927442637
#pos [[0.4705864318188132, 0.402615284052826], [0.821924632254859, 0.20872855244333718], [0.15, 0.5623774002752693], [0.8157385959995493, 0.8466113698125248]]
#vel [[0.11057788296618187, -0.08725963925497054], [-0.3558922495473892, 0.5968325889562937], [0.49600019321793853, 0.5547307524894192], [-0.7791504032172888, 0.480338832871403]]
#event 1
#time 0.128205128205
#wall [2.857138095238095, 5.0, 0.1408450704225352, 3.3333333333333335, 0.43478260869565216, 0.7594936708860759, 0.12820512820512817, 0.8496176720475784]
#pair [inf, 0.2410756230423861, inf, inf, inf, inf]
#pos [[0.2769240769230769, 0.2653846153846154], [0.841025641025641, 0.27307692307692305], [0.2205128205128205, 0.6487179487179487], [0.85, 0.7650897435897436]]
#vel [[0.21, 0.12], [0.71, 0.18], [-0.23, -0.79], [-0.78, 0.1177]]
#event 2
#time 0.140845070423
#wall [2.7289329670329674, 4.871794871794871, 0.012639942217406962, 3.205128205128205, 0.30657748049052397, 0.6312885426809477, 0.8974358974358974, 0.7214125438424503]
#pair [inf, 0.11287049483725801, inf, inf, inf, inf]
#pos [[0.2795784647887324, 0.2669014084507042], [0.85, 0.2753521126760563], [0.21760563380281692, 0.6387323943661972], [0.8401408450704225, 0.7665774647887323]]
#vel [[0.21, 0.12], [-0.71, 0.18], [-0.23, -0.79], [-0.78, 0.1177]]
#event 3
#time 0.241075623042
#wall [2.71629302481556, 4.859154929577465, 0.9859154929577465, 3.1924882629107985, 0.293937538273117, 0.6186486004635408, 0.8847959552184903, 0.7087726016250436]
#pair [0.2951787093240077, 0.10023055261985098, inf, inf, inf, inf]
#pos [[0.3006268808389011, 0.2789290747650863], [0.7788363076399057, 0.2933936121476295], [0.19455260670025118, 0.5595502577965149], [0.7619610140269387, 0.7783746008320888]]
#vel [[0.4559657292465651, -0.5307062573545285], [-0.71, 0.18], [-0.4759657292465651, -0.13929374264547156], [-0.78, 0.1177]]
#event 4
#time 0.334680275568
#wall [1.204856163354381, 0.2429386746027335, 0.8856849403378955, 3.0922577102909474, 0.09360465252566859, 2.940191354028704, 0.7845654025986394, 0.608542049005193]
#pair [0.18551835949217615, inf, inf, inf, inf, inf]
#pos [[0.3433073944886389, 0.22925249995221764], [0.712377004346681, 0.31024244960224984], [0.15, 0.5465117154171857], [0.6889493850569173, 0.7893918684343599]]
#vel [[0.4559657292465651, -0.5307062573545285], [-0.71, 0.18], [0.4759657292465651, -0.13929374264547156], [-0.78, 0.1177]]
#event5
#time 0.426593982535
#wall [1.1112515108287124, 0.1493340220770649, 0.7920802878122268, 2.9986530577652784, 1.4706941214193556, 2.846586701503036, 0.6909607500729709, 0.5149373964795246]
#pair [0.09191370696650769, inf, inf, 0.2571838526872258, inf, inf]
#pos [[0.3852168949133777, 0.1804733205284415], [0.6471182724004606, 0.32678691685622124], [0.19374777456406891, 0.5337087111734017], [0.6172566936230413, 0.8002101117443179]]
#vel [[-0.13005909563403395, -0.8580943776016878], [-0.12397517511940093, 0.5073881202471593], [0.4759657292465651, -0.13929374264547156], [-0.78, 0.1177]]
| [
"noreply@github.com"
] | DrSdl.noreply@github.com |
b0cc9ddfcf4d9759c48fb133a980ebb3d085e2f6 | 3089d12dc1a45ca79803ad567327eaea4b40b3df | /src/Operations/Operations.py | f56068678eefaee0f2bf1adc16fccc8cf19b520b | [] | no_license | Freyb/LegoAI-homework | e23f03e114186b174e4ca6d73186823fea94d64e | 2e4b28eed51df653a2fb3fa87693ab0c617b09b6 | refs/heads/master | 2020-03-09T00:01:25.101928 | 2018-05-23T23:48:42 | 2018-05-23T23:48:42 | 128,478,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | class Operation(object):
"""
The class :class:`Operation` represents the base class for all operations
that can be performed. Inherit from :class:`Operation`, overload
its methods, and instantiate super to create a new operation. See
the section on extending Augmentor with custom operations at
:ref:`extendingaugmentor`.
"""
def __init__(self, probability):
"""
All operations must at least have a :attr:`probability` which is
initialised when creating the operation's object.
:param probability: Controls the probability that the operation is
performed when it is invoked in the pipeline.
:type probability: Float
"""
self.probability = probability
def __str__(self):
"""
Used to display a string representation of the operation, which is
used by the :func:`Pipeline.status` to display the current pipeline's
operations in a human readable way.
:return: A string representation of the operation. Can be overridden
if required, for example as is done in the :class:`Rotate` class.
"""
return self.__class__.__name__
def perform_operation(self, images):
"""
Perform the operation on the passed images. Each operation must at least
have this function, which accepts a list containing objects of type
PIL.Image, performs its operation, and returns a new list containing
objects of type PIL.Image.
:param images: The image(s) to transform.
:type images: List containing PIL.Image object(s).
:return: The transformed image(s) as a list of object(s) of type
PIL.Image.
"""
raise RuntimeError("Illegal call to base class.")
| [
"balazs.frey@gmail.com"
] | balazs.frey@gmail.com |
4045d144a83b1c65582baa5d98f4ceece2698cd4 | 498a2d08c19eaf36945468e11fad1be97d62135b | /yaml_lsp/main.py | 125cdb4ed5c7ac43fcdd5ddc1769dfca7aed8329 | [
"BSD-3-Clause"
] | permissive | martinRenou/yaml-lsp | 94f4dc1744b5e8a4763983725cf482a5ab3f1207 | 79186d50289d172d2dc5a8420f1dc2cad1046ce7 | refs/heads/master | 2023-08-25T08:40:39.172933 | 2021-04-08T14:37:04 | 2021-04-08T14:37:04 | 417,399,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import pathlib
import shutil
import subprocess
import sys
NODE_LOCATION = (
shutil.which("node") or
shutil.which("node.exe") or
shutil.which("node.cmd")
)
NODE = str(pathlib.Path(NODE_LOCATION).resolve())
PATH_TO_BIN_JS = str(
(
pathlib.Path(__file__).parent /
'node_modules' / 'yaml-language-server' /
'bin' / 'yaml-language-server'
).resolve()
)
def main():
p = subprocess.Popen(
[NODE, PATH_TO_BIN_JS, '--stdio', *sys.argv[1:]],
stdin=sys.stdin, stdout=sys.stdout
)
sys.exit(p.wait())
def load(app):
return {
"yaml-language-server": {
"version": 2,
"argv": ['yaml-lsp'],
"languages": ["yaml"],
"mime_types": [
"text/x-yaml", "text/yaml"
]
}
}
if __name__ == "__main__":
main()
| [
"martin.renou@gmail.com"
] | martin.renou@gmail.com |
c49252ea5d6bd92a682ec6f7740d83a4a4e3b31d | e24a3601449f5aaf235cb2a7445146d622da1c87 | /test_ws/build/testing/catkin_generated/pkg.develspace.context.pc.py | db3f35c8fc060dced59d9343cf005f876e2137c9 | [] | no_license | udooer-old/ROS | 1edb919524535baf010f62b3dd6e499e859b8d70 | a19833431919d5995fc67dfd4a288b25919f5bfe | refs/heads/master | 2022-11-08T06:48:35.324381 | 2020-06-30T09:42:02 | 2020-06-30T09:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/yong/ROS/test_ws/devel/include".split(';') if "/home/yong/ROS/test_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "testing"
PROJECT_SPACE_DIR = "/home/yong/ROS/test_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"hunghsuyong114shane@gmail.com"
] | hunghsuyong114shane@gmail.com |
34df80d44954fbb824a9dad7091e6ee2e6eb9a0a | ac235a23f22be0d6f1818bb53902177f9969813a | /tests/datastreams/test_processor.py | d8b3879b0af52c14c14035455326ecafe89c7cd8 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 3,013 | py | import time
from ddtrace.internal.datastreams.processor import ConsumerPartitionKey
from ddtrace.internal.datastreams.processor import DataStreamsProcessor
from ddtrace.internal.datastreams.processor import PartitionKey
def test_data_streams_processor():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 1)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 2)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 4)
processor.on_checkpoint_creation(2, 4, ["direction:in", "topic:topicA", "type:kafka"], now, 1, 2)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
aggr_key_1 = (",".join(["direction:out", "topic:topicA", "type:kafka"]), 1, 2)
aggr_key_2 = (",".join(["direction:in", "topic:topicA", "type:kafka"]), 2, 4)
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.count == 3
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.count == 1
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.get_quantile_value(1) - 4)
<= 4 * 0.008
) # relative accuracy of 0.00775
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.get_quantile_value(1) - 2)
<= 2 * 0.008
) # relative accuracy of 0.00775
def test_data_streams_loop_protection():
processor = DataStreamsProcessor("http://localhost:8126")
ctx = processor.set_checkpoint(["direction:in", "topic:topicA", "type:kafka"])
parent_hash = ctx.hash
processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"])
# the application sends data downstream to two different places.
# Use the consume checkpoint as the parent
child_hash = processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"]).hash
expected_child_hash = ctx._compute_hash(["direction:out", "topic:topicB", "type:kafka"], parent_hash)
assert child_hash == expected_child_hash
def test_kafka_offset_monitoring():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.track_kafka_commit("group1", "topic1", 1, 10, now)
processor.track_kafka_commit("group1", "topic1", 1, 14, now)
processor.track_kafka_produce("topic1", 1, 34, now)
processor.track_kafka_produce("topic1", 2, 10, now)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 1)] == 34
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 2)] == 10
assert processor._buckets[bucket_time_ns].latest_commit_offsets[ConsumerPartitionKey("group1", "topic1", 1)] == 14
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
14e9ef9109c8780d59c7728c401441c038b76745 | 85da80afe360c0576d8b75a881308cc24e3dbcae | /Shapes.py | a05ef89fe80a1af329ad94e653a5e0508ecdb0c4 | [] | no_license | nasir-001/Core-Python-Exercise | 9334293cb86cac47adeaa4e3fd4fe809b9b8ea90 | 9cfa8de31568508b1fa9611a3509e9d3b28863ce | refs/heads/main | 2023-04-19T09:53:36.274170 | 2021-05-03T17:24:42 | 2021-05-03T17:24:42 | 364,007,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | # Authur: Nasir Lawal
# Date: 28-July-2019
# Description: Function that calculate and returns the Area and the Volume of Shapes
def main():
Circle()
Spheres()
Cubic()
Square()
def Circle():
User_input = float(input("Enter the radius of a circle: "))
if User_input:
Area = 2 * 3.141592653589793 * User_input ** 2
radius = 3.141592653589793 * User_input ** 2
print("The area of this circle is: " + str(Area))
print("The radius of this circle is: " + str(radius) + "\n")
def Spheres():
User_input = float(input("Enter the radius of the Sphere: "))
if User_input:
Area = 4 * 3.141592653589793 * User_input ** 2
Volume = 4 / 3 * 3.141592653589793 * User_input ** 3
print("The area of this sphere is: " + str(Area))
print("The volume of the this sphere is: " + str(Volume) + "\n")
def Cubic():
User_input = float(input("Enter the value of one side of the cube: "))
if User_input:
Area = 6 * User_input ** 2
Volume = User_input ** 3
print("The area of this cube is: " + str(Area))
print("The volume od this cube is: " + str(Volume) + "\n")
def Square():
User_input = float(input("Enter one side of the square: "))
if User_input:
Area = User_input ** 2
Volume = User_input ** 3
print("The area of this square is: " + str(Area))
print("The volume of this square is: " + str(Volume))
if __name__ == "__main__":
main() | [
"nasirlawal001@gmail.com"
] | nasirlawal001@gmail.com |
0f5fa11f99db977ba1acd735bd842d08e381d229 | 93d8da3722f2a862e56ee3cf125dbf18013051f0 | /src/data-mgt/python/devices-tranformation/utils.py | 192b400820208916006993819ea535b86a80b81c | [] | no_license | michaelgobz/AirQo-api | 78775bed6d86b4be8280187f36f9810797609de7 | 0a701af735401e60a4b937d9565c3e66366855a7 | refs/heads/master | 2023-07-14T18:55:19.439582 | 2021-08-28T19:28:45 | 2021-08-28T19:28:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from datetime import datetime
import pandas as pd
def str_to_date(str):
"""
Converts a string to datetime
"""
try:
return datetime.strptime(str, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.strptime(str, '%Y-%m-%dT%H:%M:%SZ')
def date_to_str(date):
"""
Converts datetime to a string
"""
return datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
def is_valid_double(value):
try:
float(value)
return True
except ValueError or TypeError:
return False
def handle_api_error(api_request):
json = None
try:
json = api_request.json()
print(api_request.request.url)
print(api_request.request.body)
finally:
if json and 'error' in json and 'message' in json['error']:
print(json)
raise Exception(json['error']['message'])
else:
print(api_request.content)
raise Exception('API request failed with status code %s' % api_request.status_code)
def array_to_csv(data):
df = pd.DataFrame(data)
df.to_csv(path_or_buf="output.csv", index=False)
def array_to_json(data):
df = pd.DataFrame(data)
df.to_json(path_or_buf="output.json", orient="records")
| [
"nsimbenoah@gmail.com"
] | nsimbenoah@gmail.com |
d3addb20a63dc39ad81e216789912e8dfab3d166 | 3da4e93a082d1b1cf5b08a833f3ce2ddc5ff2440 | /Python/sql.py | c729db7a8b4c5e01b7ce0b16869253b19336208c | [] | no_license | BrazierF/Hackathon_Shan17 | aded8667fcd8ef3d15a6577c2edcfa1f663b1af9 | 1d738d801f55f0d88339624637a990f0969371fc | refs/heads/master | 2021-01-11T15:02:21.330243 | 2017-01-29T18:58:43 | 2017-01-29T18:58:43 | 80,286,005 | 0 | 0 | null | 2017-01-29T09:21:16 | 2017-01-28T13:31:00 | Python | WINDOWS-1252 | Python | false | false | 7,800 | py | # -*- coding: iso-8859-1 -*-
"""
Created on Sat Jan 28 14:10:59 2017
@author: franck
"""
import mysql.connector, datetime,os,googlemaps
from travel_generator import *
import pickle
from datetime import datetime
#gmaps = googlemaps.Client(key='AIzaSyC_ETnxWmysf3X-ymcuLCUYwZVGgiCinWk')
# Geocoding an address
#geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')
#print geocode_result
#print geocode_result[0]['formatted_address']
# Look up an address with reverse geocoding
#reverse_geocode_result = gmaps.reverse_geocode((40.714224, -73.961452))
# Request directions via public transit
#now = datetime.now()
#directions_result = gmaps.directions("Sydney Town Hall",
# "Parramatta, NSW",
# mode="transit",
# departure_time=now)
def recuperer():
cnx = mysql.connector.connect(user='Hackathon', password='Python2.7',
host='127.0.0.1',
database='hackathon2017')
cursor = cnx.cursor()
# query = ("(SELECT * FROM lieux "
# "WHERE type='Patrimoine cult hist' AND lat IS NOT NULL LIMIT 2) UNION "
# "(SELECT * FROM lieux "
# "WHERE type='Evenements WE' AND lat IS NOT NULL LIMIT 2) UNION "
# "(SELECT * FROM lieux "
# "WHERE type='resto' AND lat IS NOT NULL LIMIT 2) UNION "
# "(SELECT * FROM lieux "
# "WHERE type='Parcs' AND lat IS NOT NULL LIMIT 2) ")
query = ("SELECT * FROM lieux ORDER BY id ASC")
# hire_start = datetime.date(1999, 1, 1)
#hire_end = datetime.date(1999, 12, 31)
cursor.execute(query)
res=[]
#row = dict(zip(cursor.column_names, cursor.fetchone()))
row = cursor.fetchone()
while row is not None:
#print(row)
#row = dict(zip(cursor.column_names, cursor.fetchone()))
act = Activity(row[2],row[4],row[5])
act.set_base_columns(row)
res.append(act)
#act.afficher()
row = cursor.fetchone()
#for item in cursor:
# print item
#print cursor.fetchall()
cursor.close()
cnx.close()
return res
def parser(filename,cursor):
return
def ajouter_item(cursor,item):
#print item
if 'lat_v' not in item.keys() or 'lon_v' not in item.keys():
add_item = ("INSERT INTO lieux "
"(type,nom,adresse,lat,lon,description,vecteur,extra) "
"VALUES (%(type_v)s, %(nom_v)s,%(adresse_v)s,DEFAULT,DEFAULT,%(description_v)s, %(vecteur_v)s, %(extra_v)s)")
else:
add_item = ("INSERT INTO lieux "
"(type,nom,adresse,lat,lon,description,vecteur,extra) "
"VALUES (%(type_v)s, %(nom_v)s,%(adresse_v)s,%(lat_v)s,%(lon_v)s,%(description_v)s, %(vecteur_v)s, %(extra_v)s)")
# Insert salary information
cursor.execute(add_item, item)
def const_item(type_lieu,tableau):
for x in tableau :
pass
#print x
if type_lieu == 'Parcs':
item = {
'type_v': type_lieu,
'nom_v': tableau[0],
'adresse_v':tableau[1],
#'lat_v': 'DEFAULT' ,
#'lon_v': 'DEFAULT',
'description_v': tableau[-1],
'vecteur_v': 'NULL',
'extra_v': " ; ".join(x for x in tableau),
}
return trouver_adresse_gps(item)
return item
elif type_lieu == 'Patrimoine cult hist':
item = {
'type_v': type_lieu,
'nom_v': tableau[0],
'adresse_v':tableau[1],
#'lat_v': 'DEFAULT' ,
#'lon_v': 'DEFAULT',
'description_v': tableau[-1],
'vecteur_v': 'NULL',
'extra_v': " ; ".join(x for x in tableau),
}
return trouver_adresse_gps(item)
return item
elif type_lieu == 'resto':
item = {
'type_v': type_lieu,
'nom_v': tableau[0],
'adresse_v':tableau[2],
#'lat_v': 'DEFAULT' ,
#'lon_v': 'DEFAULT',
'description_v': tableau[1],
'vecteur_v': 'NULL',
'extra_v': " ; ".join(x for x in tableau),
}
return trouver_adresse_gps(item)
return item
elif type_lieu == 'Evenements WE':
item = {
'type_v': type_lieu,
'nom_v': tableau[1],
'adresse_v':tableau[2],
#'lat_v': 'DEFAULT' ,
#'lon_v': 'DEFAULT',
'description_v': tableau[-1],
'vecteur_v': 'NULL',
'extra_v': " ; ".join(x for x in tableau),
}
return trouver_adresse_gps(item)
elif type_lieu == 'Toto':
pass
def trouver_adresse_gps(item):
adresse = item['adresse_v']
if adresse.find('°') != -1:
#print adresse
coords = adresse.split(',')
lat = [int(s[:-1]) for s in coords[0].split()[:-1] if s[:-1].isdigit()]
lat = lat[0] + lat[1]/60.0 + lat[2]/60.0/60.0
lon = [int(s[:-1]) for s in coords[1].split()[:-1] if s[:-1].isdigit()]
lon = lon[0] + lon[1]/60.0 + lon[2]/60.0/60.0
item['lat_v']=lat
item['lon_v']=lon
item['adresse_v']=''
#print item
if len(item['adresse_v']) == 0:
if 'lat_v' and 'lon_v' in item.keys():
result = gmaps.reverse_geocode((item['lat_v'], item['lon_v']))
#print resultat[0]
item['adresse_v']=result[0]['formatted_address']
pass
else:
pass
elif 'lat_v' not in item.keys() or 'lon_v' not in item.keys():
res = gmaps.geocode(item['adresse_v'])
item['lat_v']=res[0]['geometry']['location']['lat']
item['lon_v']=res[0]['geometry']['location']['lng']
item['adresse_v']=res[0]['formatted_address']
pass
return item
def ajouter(filename):
if(not os.path.isfile(filename) ):
return
else:
cnx = mysql.connector.connect(user='Hackathon', password='Python2.7',
host='127.0.0.1',
database='hackathon2017')
type_lieu = filename.split('.csv')[0]
cursor = cnx.cursor()
k = open(filename, 'r')
k.readline()
for line in k.readlines():
ajouter_item(cursor,const_item(type_lieu,line.split(';')))
k.close()
cursor.close()
cursor = cnx.cursor()
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
def recupererlo():
cnx = mysql.connector.connect(user='Hackathon', password='Python2.7',
host='127.0.0.1',
database='hackathon2017')
cursor = cnx.cursor()
query = ("SELECT id,type,nom,tags,score FROM lieux ")
# hire_start = datetime.date(1999, 1, 1)
#hire_end = datetime.date(1999, 12, 31)
cursor.execute(query)
res=[]
#row = dict(zip(cursor.column_names, cursor.fetchone()))
row = cursor.fetchone()
while row is not None:
print(row)
#row = dict(zip(cursor.column_names, cursor.fetchone()))
# act = Activity(row[2],row[4],row[5])
# act.set_base_columns(row)
res.append(row)
# act.afficher()
row = cursor.fetchone()
with open('toto.pkl','w') as f:
pickle.dump(res,f)
#for item in cursor:
# print item
#print cursor.fetchall()
cursor.close()
cnx.close()
return res
#recupererlo()
#ajouter('Patrimoine cult hist.csv')
#ajouter('Evenements WE.csv')
#ajouter('resto.csv')
#ajouter('Parcs.csv')
| [
"franck.brazier@telecom-paristech.fr"
] | franck.brazier@telecom-paristech.fr |
78ceabd58d710fb9dcbeaf09df30bf57c5e453ea | a18da1d9cebfb504b84cdc2e2d3a0760e8b0a616 | /pnl.py | 1dc631b3ec4fe40e4380263cb3434ba961023c6f | [
"MIT"
] | permissive | philippe-ostiguy/PyBacktesting | 609a216906799d9be01f922e68d03d3921a959b7 | 1046e52899461003ba7e563445d7acfe1b459189 | refs/heads/master | 2023-03-13T06:19:38.022822 | 2021-03-01T17:59:37 | 2021-03-01T17:59:37 | 287,831,173 | 101 | 43 | null | null | null | null | UTF-8 | Python | false | false | 4,886 | py | #!/usr/local/bin/env python3.7
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# The MIT License (MIT)
# Copyright (c) 2020 Philippe Ostiguy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
"""Module to assess the trading strategy performance"""
import trading_rules as tr
import numpy as np
import math
from date_manip import DateManip
class PnL(tr.RSquareTr):
def __init__(self):
super().__init__()
def pnl_(self):
"""Function that calculate the different metrics to evalute the trading strategy performance"""
super().__call__()
self.diff_ = ((self.end_date - self.start_date).days / 365) #diff in term of year with decimal
self.pnl_dict[self.range_date_] = self.range_date()
self.pnl_dict[self.ann_return_] = self.ann_return()
self.pnl_dict[self.ann_vol_] = self.ann_vol()
self.pnl_dict[self.sharpe_ratio_] = self.sharpe_ratio()
self.pnl_dict[self.max_draw_] = self.max_draw()
self.pnl_dict[self.pour_win_] = self.pour_win()
self.pnl_dict[self.nb_trades_] = self.nb_trades()
#Possible to have some trades but not real trades (0 return) when largest_extension is 0
if (self.pnl_dict[self.nb_trades_] != None):
if (self.pnl_dict[self.nb_trades_] > 0):
if self.pnl_dict[self.sharpe_ratio_] is None or math.isnan(self.pnl_dict[self.sharpe_ratio_]):
self.pnl_dict = {}
def annualized_(func):
"""Decorator to return annualized value"""
def wrap_diff(self):
return ((1+func(self))**(1/self.diff_)-1)
return wrap_diff
@annualized_
def ann_return(self):
"""Calculate the annualized return"""
return_ = 0
for index_ in self.trades_track.index:
return_ = (1+return_)*(1+self.trades_track.loc[index_,self.trade_return]) - 1
return return_
def ann_vol(self):
"""Calculate annualized vol
"""
vol_ = self.trades_track[self.trade_return].std()
if not np.isnan(vol_):
return (vol_ * math.sqrt(1/self.diff_))
else :
return None
def sharpe_ratio(self):
"""Sharpe ratio
Not using the risk-free rate has it doesn't change the final result. We could trade on margin and just
totally distort the return. Also, depending on the time intervals, the return are larger or smaller
(expected higher volatility on daily than hourly basis).
"""
if not bool(self.pnl_dict):
return None
if self.pnl_dict[self.ann_vol_] == None:
return None
elif ((self.pnl_dict[self.ann_vol_] == 0) | np.isnan(self.pnl_dict[self.ann_vol_])):
return None
else :
return (self.pnl_dict[self.ann_return_] /self.pnl_dict[self.ann_vol_])
def max_draw(self):
"""Return lowest return value """
return self.trades_track[self.trade_return].min()
def nb_trades(self):
"""Return the number of trades"""
return self.trades_track.shape[0]
def range_date(self):
""" Return the range date tested in a desired format
Using "%Y-%m-%d" as Timestamp format
"""
dm_begin_ = DateManip(self.start_date).end_format(self.end_format_)
dm_end_ = DateManip(self.end_date).end_format(self.end_format_)
return f"{dm_begin_} to {dm_end_}"
def pour_win(self):
"""Return the percentage of winning trades
"""
total_trade = self.nb_trades()
pour_win_ = self.trades_track[self.trades_track[self.trade_return] >= 0].shape[0]
return 0 if total_trade == 0 else (pour_win_ / total_trade) | [
"ostiguyphilippe@gmail.com"
] | ostiguyphilippe@gmail.com |
d03f061add4ecd353ce20b8d62e7bccb48c412e4 | 84bec9ff537ef85625799a5b5959d116b33919e6 | /ch02-variables+simple_data_types/famous_quote_2.py | fa40a8c721ddd28bd6b14c20bdaa62398a6f06bd | [] | no_license | nate-hunter/python-cc | ee5bd8a8675c5abcabcf79709f927d933fdd8d45 | 0f3aacc0875068bd1b8b8ea17020a6af60765822 | refs/heads/main | 2023-08-30T03:45:21.386464 | 2021-11-15T21:36:56 | 2021-11-15T21:36:56 | 426,832,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # 2.6: 11/10/21
# I did the same way for both
quote = "Let me tell you something you already know. The world ain't all sunshine and rainbows. It's a very mean and nasty place, and I don't care how tough you are, it will beat you to your knees and keep you there permanently if you let it. You, me, or nobody is gonna hit as hard as life. But it ain't about how hard you hit. It's about how hard you can get hit and keep moving forward; how much you can take and keep moving forward. That's how winning is done! Now, if you know what you're worth, then go out and get what you're worth. But you gotta be willing to take the hits, and not pointing fingers saying you ain't where you wanna be because of him, or her, or anybody. Cowards do that and that ain't you. You're better than that! I'm always gonna love you, no matter what. No matter what happens. You're my son and you're my blood. You're the best thing in my life. But until you start believing in yourself, you ain't gonna have a life."
author = "Rocky Balboa"
print(f'\nWhen life gets tough, and you think it\'s unfair, or you want to quit something, remember what {author} said, \n\n\t"{quote}"')
| [
"nate.indemand@gmail.com"
] | nate.indemand@gmail.com |
783f2da41ced0cebd04afafd2e2bc7328262ec2f | a9cade24ff3c7b1497f689b3cd8fd59b3fbbb71b | /backend.py | 912ff1d156ed3dce2dd1ac2689e512a7f330095f | [] | no_license | saman-azhar/multilingual-sentiment-predictor | 60f141d1933d26f06fdfbb5478714bef4c3610ae | 42c58307c554ae4cae198d3178c7e19167fc8c42 | refs/heads/main | 2023-06-19T04:21:29.879088 | 2021-07-18T11:24:36 | 2021-07-18T11:24:36 | 387,154,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,769 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# importing libraries
import pandas as pd
from textblob import TextBlob
import re
from nltk.stem.wordnet import WordNetLemmatizer
import preprocessor as p
import pickle
import numpy as np
#reading trainded models
filename1 = 'models/tweets_model(MNB).pkl'
filename2 = 'models/tweets_model(LR).pkl'
filename3 = 'models/tweets_model(SVC).pkl'
filename4 = 'models/tweets_model(SGD).pkl'
filename5 = 'models/tweets_model(CNB).pkl'
loaded_model_MNB = pickle.load(open(filename1, 'rb'))
loaded_model_LR = pickle.load(open(filename2, 'rb'))
loaded_model_SVC = pickle.load(open(filename3, 'rb'))
loaded_model_SGD = pickle.load(open(filename4, 'rb'))
loaded_model_CNB = pickle.load(open(filename5, 'rb'))
# list of stopwords of all 3 languages
eng_stopwords = ["a", "about", "above", "after", "again", "against", "ain", "all", "am", "an", "and", "any", "are", "aren", "aren't", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "can", "couldn", "couldn't", "d", "did", "didn", "didn't", "do", "does", "doesn", "doesn't", "doing", "don", "don't", "down", "during", "each", "few", "for", "from", "further", "had", "hadn", "hadn't", "has", "hasn", "hasn't", "have", "haven", "haven't", "having", "he", "her", "here", "hers", "herself", "him", "himself", "his", "how", "i", "if", "in", "into", "is", "isn", "isn't", "it", "it's", "its", "itself", "just", "ll", "m", "ma", "me", "mightn", "mightn't", "more", "most", "mustn", "mustn't", "my", "myself", "needn", "needn't", "no", "nor", "not", "now", "o", "of", "off", "on", "once", "only", "or", "other", "our", "ours", "ourselves", "out", "over", "own", "re", "s", "same", "shan", "shan't", "she", "she's", "should", "should've", "shouldn", "shouldn't", "so", "some", "such", "t", "than", "that", "that'll", "the", "their", "theirs", "them", "themselves", "then", "there", "these", "they", "this", "those", "through", "to", "too", "under", "until", "up", "ve", "very", "was", "wasn", "wasn't", "we", "were", "weren", "weren't", "what", "when", "where", "which", "while", "who", "whom", "why", "will", "with", "won", "won't", "wouldn", "wouldn't", "y", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves", "could", "he'd", "he'll", "he's", "here's", "how's", "i'd", "i'll", "i'm", "i've", "let's", "ought", "she'd", "she'll", "that's", "there's", "they'd", "they'll", "they're", "they've", "we'd", "we'll", "we're", "we've", "what's", "when's", "where's", "who's", "why's", "would", "able", "abst", "accordance", "according", "accordingly", "across", "act", "actually", "added", "adj", "affected", "affecting", "affects", "afterwards", "ah", "almost", "alone", "along", "already", "also", "although", "always", "among", "amongst", "announce", "another", "anybody", "anyhow", "anymore", "anyone", "anything", "anyway", "anyways", "anywhere", "apparently", "approximately", "arent", "arise", "around", "aside", "ask", "asking", "auth", "available", "away", "awfully", "b", "back", "became", "become", "becomes", "becoming", "beforehand", "begin", "beginning", "beginnings", "begins", "behind", "believe", "beside", "besides", "beyond", "biol", "brief", "briefly", "c", "ca", "came", "cannot", "can't", "cause", "causes", "certain", "certainly", "co", "com", "come", "comes", "contain", "containing", "contains", "couldnt", "date", "different", "done", "downwards", "due", "e", "ed", "edu", "effect", "eg", "eight", "eighty", "either", "else", "elsewhere", "end", "ending", "enough", "especially", "et", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere", "ex", "except", "f", "far", "ff", "fifth", "first", "five", "fix", "followed", "following", "follows", "former", "formerly", "forth", "found", "four", "furthermore", "g", "gave", "get", "gets", "getting", "give", "given", "gives", "giving", "go", "goes", "gone", "got", "gotten", "h", "happens", "hardly", "hed", "hence", "hereafter", "hereby", "herein", "heres", "hereupon", "hes", "hi", "hid", "hither", "home", "howbeit", "however", "hundred", "id", "ie", "im", "immediate", "immediately", "importance", "important", "inc", "indeed", "index", "information", "instead", "invention", "inward", "itd", "it'll", "j", "k", "keep", "keeps", "kept", "kg", "km", "know", "known", "knows", "l", "largely", "last", "lately", "later", "latter", "latterly", "least", "less", "lest", "let", "lets", "like", "liked", "likely", "line", "little", "'ll", "look", "looking", "looks", "ltd", "made", "mainly", "make", "makes", "many", "may", "maybe", "mean", "means", "meantime", "meanwhile", "merely", "mg", "might", "million", "miss", "ml", "moreover", "mostly", "mr", "mrs", "much", "mug", "must", "n", "na", "name", "namely", "nay", "nd", "near", "nearly", "necessarily", "necessary", "need", "needs", "neither", "never", "nevertheless", "new", "next", "nine", "ninety", "nobody", "non", "none", "nonetheless", "noone", "normally", "nos", "noted", "nothing", "nowhere", "obtain", "obtained", "obviously", "often", "oh", "ok", "okay", "old", "omitted", "one", "ones", "onto", "ord", "others", "otherwise", "outside", "overall", "owing", "p", "page", "pages", "part", "particular", "particularly", "past", "per", "perhaps", "placed", "please", "plus", "poorly", "possible", "possibly", "potentially", "pp", "predominantly", "present", "previously", "primarily", "probably", "promptly", "proud", "provides", "put", "q", "que", "quickly", "quite", "qv", "r", "ran", "rather", "rd", "readily", "really", "recent", "recently", "ref", "refs", "regarding", "regardless", "regards", "related", "relatively", "research", "respectively", "resulted", "resulting", "results", "right", "run", "said", "saw", "say", "saying", "says", "sec", "section", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sent", "seven", "several", "shall", "shed", "shes", "show", "showed", "shown", "showns", "shows", "significant", "significantly", "similar", "similarly", "since", "six", "slightly", "somebody", "somehow", "someone", "somethan", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specifically", "specified", "specify", "specifying", "still", "stop", "strongly", "sub", "substantially", "successfully", "sufficiently", "suggest", "sup", "sure", "take", "taken", "taking", "tell", "tends", "th", "thank", "thanks", "thanx", "thats", "that've", "thence", "thereafter", "thereby", "thered", "therefore", "therein", "there'll", "thereof", "therere", "theres", "thereto", "thereupon", "there've", "theyd", "theyre", "think", "thou", "though", "thoughh", "thousand", "throug", "throughout", "thru", "thus", "til", "tip", "together", "took", "toward", "towards", "tried", "tries", "truly", "try", "trying", "ts", "twice", "two", "u", "un", "unfortunately", "unless", "unlike", "unlikely", "unto", "upon", "ups", "us", "use", "used", "useful", "usefully", "usefulness", "uses", "using", "usually", "v", "value", "various", "'ve", "via", "viz", "vol", "vols", "vs", "w", "want", "wants", "wasnt", "way", "wed", "welcome", "went", "werent", "whatever", "what'll", "whats", "whence", "whenever",
"whereafter", "whereas", "whereby", "wherein", "wheres", "whereupon", "wherever", "whether", "whim", "whither", "whod", "whoever", "whole", "who'll", "whomever", "whos", "whose", "widely", "willing", "wish", "within", "without", "wont", "words", "world", "wouldnt", "www", "x", "yes", "yet", "youd", "youre", "z", "zero", "a's", "ain't", "allow", "allows", "apart", "appear", "appreciate", "appropriate", "associated", "best", "better", "c'mon", "c's", "cant", "changes", "clearly", "concerning", "consequently", "consider", "considering", "corresponding", "course", "currently", "definitely", "described", "despite", "entirely", "exactly", "example", "going", "greetings", "hello", "help", "hopefully", "ignored", "inasmuch", "indicate", "indicated", "indicates", "inner", "insofar", "it'd", "keep", "keeps", "novel", "presumably", "reasonably", "second", "secondly", "sensible", "serious", "seriously", "sure", "t's", "third", "thorough", "thoroughly", "three", "well", "wonder", "a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also", "although", "always", "am", "among", "amongst", "amoungst", "amount", "an", "and", "another", "any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven", "else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own", "part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "co", "op", "research-articl", "pagecount", "cit", "ibid", "les", "le", "au", "que", "est", "pas", "vol", "el", "los", "pp", "u201d", "well-b", "http", "volumtype", "par", "0o", "0s", "3a", "3b", "3d", "6b", "6o", "a1", "a2", "a3", "a4", "ab", "ac", "ad", "ae", "af", "ag", "aj", "al", "an", "ao", "ap", "ar", "av", "aw", "ax", "ay", "az", "b1", "b2", "b3", "ba", "bc", "bd", "be", "bi", "bj", "bk", "bl", "bn", "bp", "br", "bs", "bt", "bu", "bx", "c1", "c2", "c3", "cc", "cd", "ce", "cf", "cg", "ch", "ci", "cj", "cl", "cm", "cn", "cp", "cq", "cr", "cs", "ct", "cu", "cv", "cx", "cy", "cz", "d2", "da", "dc", "dd", "de", "df", "di", "dj", "dk", "dl", "do", "dp", "dr", "ds", "dt", "du", "dx", "dy", "e2", "e3", "ea", "ec", "ed", "ee", "ef", "ei", "ej", "el", "em", "en", "eo", "ep", "eq", "er", "es", "et", "eu", "ev", "ex", "ey", "f2", "fa", "fc", "ff", "fi", "fj", "fl", "fn", "fo", "fr", "fs", "ft", "fu", "fy", "ga", "ge", "gi", "gj", "gl", "go", "gr", "gs", "gy", "h2", "h3", "hh", "hi", "hj", "ho", "hr", "hs", "hu", "hy", "i", "i2", "i3", "i4", "i6", "i7", "i8", "ia", "ib", "ic", "ie", "ig", "ih", "ii", "ij", "il", "in", "io", "ip", "iq", "ir", "iv", "ix", "iy", "iz", "jj", "jr", "js", "jt", "ju", "ke", "kg", "kj", "km", "ko", "l2", "la", "lb", "lc", "lf", "lj", "ln", "lo", "lr", "ls", "lt", "m2", "ml", "mn", "mo", "ms", "mt", "mu", "n2", "nc", "nd", "ne", "ng", "ni", "nj", "nl", "nn", "nr", "ns", "nt", "ny", "oa", "ob", "oc", "od", "of", "og", "oi", "oj", "ol", "om", "on", "oo", "oq", "or", "os", "ot", "ou", "ow", "ox", "oz", "p1", "p2", "p3", "pc", "pd", "pe", "pf", "ph", "pi", "pj", "pk", "pl", "pm", "pn", "po", "pq", "pr", "ps", "pt", "pu", "py", "qj", "qu", "r2", "ra", "rc", "rd", "rf", "rh", "ri", "rj", "rl", "rm", "rn", "ro", "rq", "rr", "rs", "rt", "ru", "rv", "ry", "s2", "sa", "sc", "sd", "se", "sf", "si", "sj", "sl", "sm", "sn", "sp", "sq", "sr", "ss", "st", "sy", "sz", "t1", "t2", "t3", "tb", "tc", "td", "te", "tf", "th", "ti", "tj", "tl", "tm", "tn", "tp", "tq", "tr", "ts", "tt", "tv", "tx", "ue", "ui", "uj", "uk", "um", "un", "uo", "ur", "ut", "va", "wa", "vd", "wi", "vj", "vo", "wo", "vq", "vt", "vu", "x1", "x2", "x3", "xf", "xi", "xj", "xk", "xl", "xn", "xo", "xs", "xt", "xv", "xx", "y2", "yj", "yl", "yr", "ys", "yt", "zi", "zz"]
rom_urdu_stopwords = ['ai', 'ayi', 'hy', 'hai', 'main', 'ki', 'tha', 'koi', 'ko', 'sy', 'woh', 'bhi', 'aur', 'wo', 'yeh', 'rha', 'hota', 'ho', 'ga', 'ka', 'le', 'lye', 'kr', 'kar', 'lye', 'liye', 'hotay', 'waisay', 'gya', 'gaya', 'kch', 'ab', 'thy', 'thay', 'houn', 'hain', 'han', 'to', 'is', 'hi', 'jo', 'kya', 'thi', 'se', 'pe', 'phr', 'wala', 'waisay', 'us', 'na', 'ny', 'hun', 'rha', 'raha', 'ja', 'rahay', 'abi', 'uski', 'ne', 'haan', 'acha', 'nai', 'sent', 'photo', 'you', 'kafi', 'gai', 'rhy', 'kuch', 'jata', 'aye', 'ya', 'dono', 'hoa', 'aese', 'de', 'wohi', 'jati', 'jb', 'krta', 'lg', 'rahi', 'hui''karna', 'krna', 'gi', 'hova', 'yehi', 'jana', 'jye', 'chal', 'mil', 'tu', 'hum', 'par', 'hay', 'kis', 'sb', 'gy', 'dain', 'krny', 'tou', 'Mahnoor', 'Ali', 'Noor', 'MUHAMMAD', 'Mishael', 'MOHAMMAD', 'mariam', 'Tariq', 'Aisha', 'Sunny', 'faiza', 'waqas', 'Anam', 'Farooq', 'karen', 'Zahid', 'Ayesha', 'usman', 'rameen', 'Bilal', 'Unsa', 'hamza', 'Neha', 'Yasir', 'Rabia', 'adnan', 'Rida', 'Hammad', 'zainnah', 'Hassan', 'Ameria', 'saif', 'sarah', 'Saad', 'Asmi', 'Amir', 'Mizha', 'SAMEER', 'ruby', 'Babar', 'Areeba', 'JAVED', 'Zainab', 'Kashif', 'Momna', 'Ibrahim',
'Rue', 'idrees khan', 'Eraj', 'fazal', 'sara', 'Subhan', 'Krishma', 'moheem', 'Alayna', 'imran', 'USMAAN', 'Shehzad', 'kainat', 'Tahir', 'imama', 'irfan', 'Sana', 'umair', 'Ujalaa', 'naeem', 'nazy', 'KHAN', 'Niya', 'Hadier', 'Afifa', 'Shahid', 'murium', 'Asad', 'Zoha', 'Abdul', 'Lintah', 'aqasha', 'sonia', 'Rabeel', 'Zakia', 'James', 'Aanya', 'Bilal', 'Hanif', 'komal', 'SAR', 'hajra', 'farhan', 'dashti', 'Ifrah', 'Usama', 'Lintah', 'Xain', 'Leah', 'talat', 'yaseen', 'tooba', 'zee', 'Asma', 'Kabir', 'Hussain', 'Kheezran', 'chand', 'sana', 'riasat', 'Zian', 'Saima', 'Talha', 'mahrukh', 'physics', 'haniya', 'Faraz', 'mariyam', 'Beaconite', 'umara', 'jahanzaib', 'Zuny', 'Ali', 'Kazmi', 'sajida', 'Ejaz', 'Zia', 'moiz', 'ahmed', 'ALY', 'Owais', 'ATIF', 'Talal', 'sheryar', 'Ihtisham', 'Sufian', 'HASSAAN', 'IFTEE', 'mitho', 'Chaudhary', 'dad', 'Ghulam', 'Qadir', 'jamshed', 'saleem', 'sharif', 'Hansraj', 'rai', 'Shan', 'Aatif', 'Wishal', 'Maqbool', 'Ahmed', 'a haq', 'ansari', 'waseem', 'wakeel', 'khan', 'Jarrar', 'Faizan', 'daniyal', 'Wasay', 'Danial', 'noman', 'Mazhar', 'Ali', 'RAZA', 'Qais', 'Ranjhoo', 'Rauf', 'Shah', 'Aamir', 'Saleem', 'Fahid', 'Ash']
urdu_stopwords = ["ایم "," لگ رہا تھا "," بظاہر "," لگتا ہے "," دیکھا "," خود "," خود "," بھیجا "," سات "," متعدد "," گے "," شیڈ " , "شو", "دکھایا", "دکھایا گیا", "شو", "شو", "نمایاں", "نمایاں", "مماثل", "اسی طرح", "چونکہ", "چھ", "قدرے", " کوئی "," کسی طرح "," کسی "," کچھ "," کچھ "," کبھی "," کبھی "," کچھ "," کہیں "," جلد "," معذرت "," خاص ", "وضاحت", "پھر بھی", "رک", "مضبوطی سے", "ذیلی", "خاطر خواہ", "کامیابی", "کافی", "تجویز", "مدد", "یقینی" , "لیا" , "لے" , "بتائیں" , "ٹینڈز" , "ویں" , "تھینکس" , "تھینکس" , "تھینکس" , "تھات" , "جس نے" , "وہیں" , " اس کے بعد "," اس کے ساتھ "," تھیریڈ "," لہذا "," اس "," وہاں "," اس "," وہاں "," وہاں "," وہاں "," وہاں " , "ہزار", "اس طرح", " ٹپ "," ایک ساتھ "," لیا "," کی طرف "," کی طرف "," آزمایا "," کوشش "," واقعی "," کوشش "," کوشش "," ای "," دو "," یو "," ان "," بدقسمتی سے "," جب تک "," اس کے برعکس "," غیر امکان "," سے "," پر "," اپ "," ہم "," استعمال " , "استعمال شدہ", "مفید", "مفید", "افادیت", "استعمال", "استعمال", "عام طور پر", "قدر", "مختلف"," کے ذریعے ", "مثلا" ," والیوم "," جلد "," بمقابلہ "," ڈبلیو "," مطلوب "," مطلوب "," ضائع "," راستہ "," شادی "," استقبال "," گئے "," نہیں تھے " ," جو بھی "," کیا کریں گے "," کیا "," کہاں سے "," جب بھی "," جہاں "," جبکہ "," جہاں "," جہاں "," پہیے "," جہاں "," چاہے "," وہم "," وہاں "," کس طرح "," جو بھی "," سارا "," کون کرے گا "," کون "," کون "," جس کا "," وسیع پیمانے پر "," تیار "," خواہش "," کے اندر "," بغیر "," آوارہ "," الفاظ "," دنیا "," نہیں "," ہاں "," ابھی "," یو ڈی ", "آپ" , "زیڈ" , "صفر" , "ایک" , "نہیں" , "اجازت" , "اجازت" , "الگ" , "نمودار" , "تعریف" , "مناسب" , "وابستہ" , "بہترین" , "بہتر" , "کیمون" , "سی" , "کینٹ" , "تبدیلیاں" , "واضح طور پر" , "کے بارے میں" , "نتیجہ" , "غور" , "غور" , "مطابقت پذیر" , "کورس" , "فی الحال" , "یقینی طور پر" , "بیان" , "باوجود" , "مکمل"," بالکل "," مثال "," جا رہے ہیں "," مبارکبادیں "," ہیلو "," مدد "," امید ہے "," نظر انداز "," انسمچ "," اشارہ "," اشارہ "," اشارہ " , "اندرونی" , "انسفر" , "یہ" , "رکھیں" , "رکھتا ہے" , "ناول" , "شاید" , "معقول" , "دوسرا" , "ثانوی" , "سمجھدار" , "سنجیدہ" , "سنجیدگی سے", "یقینی", "تیسرا", "مکمل", "اچھی طرح", "تین", "اچھی طرح سے", "حیرت", "کے بارے میں", "اوپر", " اوپر "," پار "," بعد "," بعد "," پھر "," کے خلاف "," سب "," تقریبا "," تنہا "," ساتھ "," پہلے ہی "," بھی "," اگرچہ " , "ہمیشہ" , "ہوں" , "آپس میں" , "درمیان" , "امونگینگ" , "رقم" , "ان" , "اور" , "دوسرا" , "کوئی" , "کسی بھی طرح" , "کسی" , " کچھ بھی "," بہرحال "," کہیں بھی "," ہیں "," آس پاس "," جیسے "," پیچھے "," بن "," بنے "," کیونکہ "," بن "," بنے " , "بن رہا", "رہا", "پہلے", "پہلے", "پیچھے", "ہونا", "نیچے", "ساتھ", "علاوہ", "کے درمیان", "پرے", "بل", " دونوں "," نیچے "," لیکن "," بہ "," کال "," کر سکتے ہیں "," نہیں کر سکتے "," کھچڑی "," کو "," کون "," کر سکے "," رونے " , "ڈی" , "وضاحت" , "تفصیل" , "کرو" , "ہو گیا "," نیچے "," واجب "," دوران "," ہر "," مثال کے طور پر "," آٹھ "," یا تو "," گیارہ "," اور "," کہیں اور "," خالی "," کافی ", "وغیرہ" , "بھی" , "کبھی" , "ہر" , "سب" , "سب کچھ" , "ہر جگہ" , "سوائے" , "چند" , "پندرہ" , "فیٹ" , "پُر" , "ڈھونڈیں "," آگ "," پہلے "," پانچ "," کے لئے "," سابقہ "," پہلے "," چالیس "," پایا "," چار "," سے "," سامنے "," بھرا ", "مزید" , "حاصل" , "دینا" , "جانا" , "تھا" , "ہے" , "ہنس" , "ہے" , "وہ" , "لہذا" , "اس" , "یہاں" , "اس کے بعد "," اس کے ذریعہ "," یہاں "," یہاں "," اس کا "," خود "," خود "," خود "," اس "," کیسے "," تاہم "," سو "," یعنی ", "اگر" , "ان" , "انک" , "در حقیقت" , "دلچسپی" , "میں" , "ہے" , "یہ" , "اس" , "خود" , "رکھیں" , "آخری" , "مؤخر الذکر" ," بعد میں "," کم سے کم "," کم "," لمیٹڈ "," بنا "," بہت سے "," ہوسکتا ہے "," میں "," اس دوران "," شاید "," چکی "," میرا ", "زیادہ" , "مزید" , "زیادہ تر" , "زیادہ تر" , "چال" , "زیادہ" , "لازمی طور پر" , "میرا" , "خود" , "نام" , "نام" , "نہ ہی" , "کبھی نہیں"," اس کے باوجود "," اگلا "," نو "," نہیں "," کوئی نہیں "," نون "," اور "," نہیں "," کچھ نہیں "," اب "," کہیں نہیں ", "آف" , "آف" , "اکثر" , "آن" , "ایک بار" , "ایک" , "صرف" , "پر" , "یا" , "دوسرے" , "دوسرے" , "بصورت دیگر" , "ہمارے" , "ہمارے" , "خود" ," آؤٹ "," اوور "," خود "," پارٹ "," فی "," شاید "," پلیز "," ڈال "," بلکہ "," دوبارہ "," اسی "," دیکھیں ", "لگتا ہے" , "لگتا ہے" , "بظاہر" , "لگتا ہے" , "سنجیدہ" , "متعدد" , "وہ" , "چاہئے" , "شو" , "سائیڈ" , "چونکہ" , "مخلص" , "چھ "," ساٹھ "," تو "," کچھ "," کسی طرح "," کسی "," کچھ "," کبھی "," کبھی "," کہیں "," پھر بھی "," ایسے "," نظام ", "لے" , "دس" , "سے" , "وہ" ," ان "," انہیں "," خود "," پھر "," وہاں "," وہاں "," اس کے بعد "," اس کے بعد "," لہذا "," اس میں "," اس کے بعد "," یہ "," وہ "," موٹوی "," پتلا "," تیسرا "," یہ "," وہ "," اگرچہ "," تین ", "کے ذریعے" , "بھر" , "تھرو" , "اس طرح" , "سے" , "ایک ساتھ" , "بھی" , "ٹاپ" , "طرف" , "کی طرف" , "بارہ" , "بیس" , "دو "," ان "," تحت "," جب تک "," اوپر "," پر "," ہم "," بہت "," کے ذریعے "," تھا "," ہم "," خیریت سے "," تھے ", "کیا" , "جو بھی" , "جب" , "کہاں سے" , "جب بھی" , "جہاں" , "جہاں" , "جب" , "جہاں" , "جہاں","ایک" , "کے بارے میں" , "اوپر" , "بعد" , "پھر" , "کے خلاف" , "آئین" , "سب" , "ام" , "ایک" , "اور" , "کوئی" , "نہیں ہیں" , "جیسے" , "ہو" , "کیونکہ" , "رہے" , "پہلے" , "ہونے" , "نیچے" , "کے درمیان" , " دونوں "," لیکن "," بہ "," کر سکتے ہیں "," قابل "," نہیں کر سکتے "," د "," کیا "," کیا "," نہیں "," نہیں "," کرتا ہے " , "نہیں" , "نہیں" , "کر رہا ہے" , "ڈان" , "نہیں" , "نیچے" , "دوران" , "ہر" , "کچھ" , "کے لئے" , "سے" , " مزید "," تھا "," ہینڈ "," نہیں "," تھا "," ہنس "," نہیں "," ہے "," ہیون "," نہیں "," وہ "," اس "," یہاں "," اس "," خود "," اسے "," خود "," اس "," کیسے "," میں "," اگر "," میں "," میں ", "میں" , "طاقتور" , "شاید" , "زیادہ" , "سب سے زیادہ" , "مستن" , "نہیں" , "میرا" , "خود" , "محتاج" , "ضرورت نہیں" , "نہیں" , "نہ" , "نہیں" , "اب" , "او" , "آف" , "آف" , "آن" , "ایک بار" , "صرف" , "یا" , "دوسرے" , "ہمارے" , " ہماری "," خود "," آؤٹ "," اوور "," اپنی "," دوبارہ "," ایس "," وہی "," شان "," شانت "," وہ "," وہ ", "چاہئے" , "چاہئے" , "نہیں" , "تو" , "کچھ" , "ایسے" , "ٹی" , "سے" , "وہ" , "وہ" ,"گے "," ان "," ان "," ان "," وہ "," خود "," تب "," وہاں ", "یہ" , "وہ" , "یہ" , "وہ" , "کے ذریعے" , "سے" , "بھی" , "تحت" , "جب تک" , "اپ" , "وی" , "بہت" , "تھے" ," تھا "," نہیں "," ہم "," تھے "," تھے "," نہیں تھے "," کیا "," جب "," کہاں "," کون "," جبکہ ", "کون" , "کسے" , "کیوں" , "کرے گا" , "ساتھ" , "جیتا" , "نہیں کرے گا" , "نہیں" , "نہیں" , "ی" , "آپ" , "آپ" , "آپ" , "آپ" , "آپ" , "آپ" , "آپ" , "خود" , "خود" , "ہو" , "وہ" , "وہ" "ایل ایل" , "وہ" , "یہ ہے" , "کیسا ہے" , "میں ہوں" , "میں ہوں گا" , "میں ہوں" , "میں ہوں" , "آئیے" , "چاہئے" , "وہ" , "وہ" , "وہ" , "وہاں" , "وہ" , "وہ" , "وہ" , "وہ" , "ہم" , "ہم ہوں گے" , "ہم" "ایل ایل" , "ہم" , "ہم" , "کیا" , "کب" , "کہاں ہیں" , "کون ہے", "کیوں" , "کیوں" , "قابل" , "مطابق" ," مطابق "," اس کے مطابق "," اس پار "," ایکٹ "," اصل میں "," شامل "," صفت "," متاثر "," متاثر "," اثر انداز "," بعد میں "," آہ ", "تقریبا" , "تنہا" , "ساتھ" , "پہلے ہی" , "بھی" , "اگرچہ" , "ہمیشہ" , "آپس میں" , "درمیان" , " "," دوسرا "," کسی کو بھی "," کسی بھی طرح "," اب "," کسی کو بھی "," کچھ بھی "," بہرحال "," ویسے بھی "," کہیں بھی "," بظاہر "," تقریبا ","کا اعلان کریں۔ ", "اٹھ", "ارد گرد", "ایک طرف", "پوچھنا", "پوچھ", "دستیاب", "دور", "خوفناک", "پیچھے", "بن گیا", " بن "," بنتا ہے "," بنتا "," پہلے "," شروع "," شروع "," شروعات "," شروع "," پیچھے "," یقین "," ساتھ "," علاوہ "," سے آگے " , "بائول" , "مختصر" , "مختصر طور پر" , "سی" , "سی اے" , "آیا" , "نہیں کر سکتے" , "نہیں" , "وجہ" , "وجوہات" , "کچھ" , "یقینی طور پر" , "کو" , "کام" , "آئے" , "آتا ہے" , "مشتمل" , "مشتمل" , "مشتمل" , "کانٹ" , "تاریخ" , "مختلف" , "کیا ہوا" , "نیچے کی طرف" ," ای "," ای ڈی "," ایدو "," اثر "," مثال کے طور پر "," اسی "," اسی "," یا تو "," کسی اور "," کہیں اور "," اختتام "," اختتامی " , "کافی" , "خاص طور پر" , "ات" , "وغیرہ" , "یہاں تک" , "کبھی" , "ہر" , "ہر ایک" , "سب" , "سب کچھ" , "ہر جگہ" , "سابق" , " سوائے "," ایف "," بعید "," ایف ایف "," پانچواں "," پہلے "," پانچ "," فکس "," فالوڈ "," فالونگ "," فالس "," سابق "," پہلے " , "آگے" , "ملا" , "فو آپ "," اس کے علاوہ "," جی "," دیا "," حاصل "," ملتا ہے "," حاصل "," دے "," دیا "," دیتا "," دے "," جاتا "," جاتا ہے " , "گیا", "ملا", "حاصل", "ہ", "ہوتا ہے", "مشکل سے", "ہیڈ", "لہذا", "یہاں", "یہاں", "یہاں", " اس کے بعد "," ہیک "," ہائے "," چھپا "," یہاں "," گھر "," بہرحال "," تاہم "," سو "," شناخت "," یعنی "," آئی ایم "," فوری " , "فورا" ," اہمیت "," اہم "," انک "," واقعی "," اشاریہ "," معلومات "," بجائے "," ایجاد "," اندرونی "," آئی ٹی ڈی "," یہ " , "جے" , "کے" , "رکھیں" , "رکھتا ہے" , "رکھا" , "کلو" , "کلومیٹر" , "جانتے" , "جانا جاتا" , "جانتا" , "ایل" , "بڑے پیمانے پر" , " آخری "," حال ہی میں "," بعد میں "," مؤخر الذکر "," بعد میں "," کم سے کم "," کم "," ایسا نہیں "," لات "," اجازت "," پسند "," پسند "," امکان " , "لائن", "چھوٹا"," دیکھو "," دیکھ "," لگ رہا ہے "," لمیٹڈ "," بنا "," بنیادی طور پر "," بنا "," بناتا ہے "," بہت " "شاید" , "ہوسکتا ہے" , "مطلب" , "مطلب" , "اس دوران" , "اس دوران" , "محض" , "ملیگرام" , "شاید" , "ملین" , "مس" , "ملی" , "مزید یہ "," زیادہ تر "," مسٹر "," مسز "," زیادہ "," مگ "," ضرور "," این "," نا "," نام "," یعنی "," نہیں "," این ڈی ", "قریب" , "قریب" , " لازمی طور پر "," ضروری "," ضرورت "," ضروریات "," نہ "," کبھی نہیں "," بہرحال "," نیا "," اگلا "," نو "," نوے "," کوئی نہیں "," غیر " , "کچھ نہیں" , "بہرحال" , "نون" , "عام طور پر" , "نمبر" , "مشہور" , "کچھ نہیں" , "کہیں نہیں" , "حاصل" , "حاصل" , "ظاہر" , "اکثر" , " اوہ "," اوکے "," اوکے "," پرانا "," چھوٹا ہوا "," ایک "," ایک "," پر "," آرڈر "," دوسرے "," بصورت دیگر "," باہر "," مجموعی طور پر " , "واجب" , "پی" , "صفحہ" , "صفحات" , "حصہ" , "خاص" , "خاص طور پر" , "ماضی" , "فی" , "شاید" , "رکھے ہوئے" , "براہ کرم" , " جمع "," ناقص "," ممکن "," ممکنہ طور پر "," ممکنہ طور پر "," پی پی "," بنیادی طور پر "," حال "," پہلے "," بنیادی طور پر "," شاید "," فوری طور پر "," فخر " , "فراہم کرتا ہے", "ڈال", "کیو", "کوئ", "جلدی", "کافی", "کیو", "ر", "میں "," جہاں "," جہاں "," چاہے "," کون "," جبکہ "," جہاں "," کون "," جو "," پوری "," کس "," کس "," کیوں " , "ساتھ", "کے اندر", "بغیر", "آپ", "آپ", "آپ", "خود", "خود" , "این" , "او" , "پی" , "ق" , "ر" , "ایس" , "ٹی" , "یو" , "وی" , "ڈبلیو" , "ایکس" , "ی" ," زیڈ "," شریک "," آپ ","ریسرچ آرٹیکل "," پیجکاؤنٹ "," سائٹ "," آئبید "," لیس "," لی "," او "," کوئ "," لاس "," اب "," اشتہار " , "عی" , "اے ایف" , "اگ" , "اج" , "ال" , "آن" , "او او" , "اپ" , "اے آر" , "اے وی" , "او" , "کلہاڑی" , "بی این" , "بی پی" , "بی آر" , "بی ایس" , "بی ٹی" , "بی او" , "بی ایکس" , "سی " , "سی " , "سی" , "سی سی" , "سی ڈی" , " سی ای "," سییف "," سی جی "," چ "," سی آئی ", "سی زیڈ" , "ڈی " , "دا" , "ڈی سی" , "ڈی ڈی" , "ڈی" , "ڈی ایف" , "دی" , "ڈی جے" , "ڈی کے" , "ڈی ایل" , "ڈو" , " ڈی پی "," ڈرا "," ڈی ایس "," ڈی ٹی "," ڈو "," ڈی ایکس "," ڈائی "," ای "," ای "," ای اے "," ای سی "," ای ڈی "," ای ای " , "ای ایف" , "ای آئی" , "ایج" , "ایل" , "ایم" , "این" , "ای او" , "ایپی" , "ایق" , "ایر" , "ایس" , "ات" ," سابق "," فا ", "فو" ," ہائے "," ہو " , "آئک" , "یعنی" ," آئی جی "," آئی ایل "," ان "," آئی او "," آئی پی "," آئی کی "," آئی آر "," آئی وی "," آئیکس "," آئی آئی "," آئی ایس او "," جے جے " , "جونیئر" , "جے ایس" , "جے ٹی" , "جو" , "کی" , "کلو" , "کے جے" , "کلومیٹر" , "کو" , "ایل " , "لا" , "ایل بی" ," ایل ایف "," ایل جے "," ایل این "," لو "," ایل آر "," ایل ایس "," ایل ٹی "," ایم "," ایم ایل "," ایم این "," مو "," ایم ایس " , "ایم ٹی" , "ایم یو" , "این " , "این سی" , "این ڈی" , "نی" , "این جی" , "نی" , "این جے" , "این ایل" , "این این" , "این آر" , " این ایس "," این ٹی "," نی "," او اے "," اوب "," او سی "," اوڈ "," آف "," اوگ "," او آئی "," اوج "," او ایل "," اوم " , "آن" , "او" , "اوق" , "یا" , "اوس" , "اوٹ" , "او" , "اوہ" , "بیل" , "اوز" , "پی " , "پی " , "پی " , "پی سی" , "پی ڈی" , "پی اے" , "پی ایف" ," پی ایچ "," پی آئی "," پی جے "," پی کے "," پی ایل "," پی ایم "," پی این "," پو "," پی کیو "," پی آر "," پی ایس "," پی ٹی ", "پو" , "پیی" , "کی جے جے" , "ق" , "آر " , "را" , "آر سی" , "آر ڈی" , "آر ایف" , "آر ایچ" , "ر" , "آر جے" , "آر ایل "," آر ایم "," آر این "," آر او "," آر کیو "," آر آر "," آر ایس "," آر ٹی "," رو "," آر وی "," آری "," ایس ٹو "," سا ", "ایس سی" , "ایس ڈی" , "ایس ای" , "ایس ایف" , "سیی" , "ایس جے" , "ایس ایل" , "ایس ایم" , "ایس این" , "ایس پی" , "اسکیور" , "ایس آر" , "ایس ایس" ," ایس ٹی "," سی ای "," ایس زیڈ "," ٹی "," ٹی "," ٹی "," ٹی بی "," ٹی سی "," ٹی ڈی "," ٹی "," ٹی ایف "," ویں ", "ٹائی" , "ٹی جے" , "ٹی ایل" , "ٹی ایم" , "ٹی این" , "ٹی پی" , "ٹیکی" , "ٹی آر" , "ٹی ایس" , "ٹی ٹی" , "ٹی وی" , "ٹی ایکس" , "یو "," یوئی "," اوج "," یوکے "," ام "," ان "," یو او "," آپ "," یوٹ "," وا "," وا "," وی ڈی "," وائی ", "وی جے" , "وو" , "وو" , "وی کیو" , "وی ٹی" , "وو" , "ایکس " , "ایکس " , "ایکس " , "ایکس ایف" , "ایکس آئی" , "ایکس جے" , "ایکس کے" , "یٹ" , "زی" , "زیڈز","رن", "بلکہ", "آر ڈی", "آسانی سے", " واقعی "," حال ہی میں "," حال ہی میں "," ریف "," حوالہ جات "," متعلقہ "," قطع نظر "," احترام "," متعلقہ "," نسبتا, "," تحقیق "," بالترتیب "," نتیجہ " , "نتیجہ", "نتائج", "حق", "رن", "کہا", "دیکھا", "کہنا", "کہنا", "کہتا ہے", "سیکنڈ", "سیکشن", "دیکھیں"," دیکھیں"]
class MethodsForText(object):
def text_processing_english(self, text):
text = p.clean(text)
# Generating the list of words in the tweet (hastags and other punctuations removed)
def form_sentence(text):
text_blob = TextBlob(text)
return ' '.join(text_blob.words)
new_text = form_sentence(text)
# Removing stopwords and words with unusual symbols
def no_user_alpha(text):
text_list = [ele for ele in text.split() if ele != 'user']
clean_tokens = [t for t in text_list if re.match(r'[^\W\d]*$', t)]
clean_s = ' '.join(clean_tokens)
clean_mess = [word for word in clean_s.split(
) if word.lower() not in eng_stopwords]
return clean_mess
no_punc_text = no_user_alpha(new_text)
# Normalizing the words in tweets
def normalization(text_list):
lem = WordNetLemmatizer()
normalized_text = []
for word in text_list:
normalized_text = lem.lemmatize(word, 'v')
return normalized_text
return normalization(no_punc_text)
def text_processing_roman_urdu(self, text):
text = p.clean(text)
# Generating the list of words in the tweet (hastags and other punctuations removed)
def form_sentence(text):
text_blob = TextBlob(text)
return ' '.join(text_blob.words)
new_text = form_sentence(text)
# Removing stopwords and words with unusual symbols
def no_user_alpha(text):
text_list = [ele for ele in text.split() if ele != 'user']
clean_tokens = [t for t in text_list if re.match(r'[^\W\d]*$', t)]
clean_s = ' '.join(clean_tokens)
clean_mess = [word for word in clean_s.split(
) if word.lower() not in rom_urdu_stopwords]
return clean_mess
no_punc_text = no_user_alpha(new_text)
# Normalizing the words in tweets
def normalization(text_list):
lem = WordNetLemmatizer()
normalized_text = []
for word in text_list:
normalized_text = lem.lemmatize(word, 'v')
return normalized_text
return normalization(no_punc_text)
def text_processing_urdu(self, text):
text = re.sub(r"\d+", " ", text)
# English punctuations
text = re.sub(r"""[!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]+""", " ", text)
# Urdu punctuations
text = re.sub(r"[:؛؟’‘٭ء،۔]+", " ", text)
# Arabic numbers
text = re.sub(r"[٠١٢٣٤٥٦٧٨٩]+", " ", text)
text = re.sub(r"[^\w\s]", " ", text)
# Remove English characters and numbers.
text = re.sub(r"[a-zA-z0-9]+", " ", text)
# remove multiple spaces.
text = re.sub(r" +", " ", text)
text = text.split(" ")
# some stupid empty tokens should be removed.
text = [t.strip() for t in text if t.strip()]
return text
def prediction(self, preprocessed_text):
result_MNB = loaded_model_MNB.predict(preprocessed_text)
result_LR = loaded_model_LR.predict(preprocessed_text)
result_SVC = loaded_model_SVC.predict(preprocessed_text)
result_SGD = loaded_model_SGD.predict(preprocessed_text)
result_CNB = loaded_model_CNB.predict(preprocessed_text)
sum = result_MNB + result_LR + result_SVC + result_SGD + result_CNB
pos = ['Positive']
neg = ['Negative']
preprocessed_text = ' '.join([str(elem) for elem in preprocessed_text])
result_MNB = ' '.join([str(elem) for elem in result_MNB])
result_CNB = ' '.join([str(elem) for elem in result_CNB])
result_LR = ' '.join([str(elem) for elem in result_LR])
result_SGD = ' '.join([str(elem) for elem in result_SGD])
result_SVC = ' '.join([str(elem) for elem in result_SVC])
pos = ' '.join([str(elem) for elem in pos])
neg = ' '.join([str(elem) for elem in neg])
results_pos = pd.DataFrame(np.array([[preprocessed_text, result_MNB, result_CNB, result_LR, result_SGD, result_SVC,pos]]), columns=['preprocessed_text', 'result_MNB', 'result_CNB', 'result_LR', 'result_SGD', 'result_SVC', 'prediction'])
results_neg = pd.DataFrame(np.array([[preprocessed_text, result_MNB, result_CNB, result_LR, result_SGD, result_SVC,neg]]), columns=['preprocessed_text', 'result_MNB', 'result_CNB', 'result_LR', 'result_SGD', 'result_SVC', 'prediction'])
if(sum >= 3):
return results_neg
else:
return results_pos
def predict_sentiment(self, text, language):
if(language == 'en'):
text = self.text_processing_english(text)
preprocessed_text = ''.join(text.lower())
elif(language == 'ur'):
preprocessed_text = self.text_processing_urdu(text)
preprocessed_text = ''.join(text)
elif(language == 'in'):
text = self.text_processing_roman_urdu(text.lower())
preprocessed_text = ''.join(text)
else:
print(
'please choose one of these languages: "english: en", "urdu: ur" or "roman urdu: in"')
return self.prediction([preprocessed_text]) | [
"63509198+saman-azhar@users.noreply.github.com"
] | 63509198+saman-azhar@users.noreply.github.com |
2dc90c883f00cf9997bb9ba40caef6b2309ed6af | f86c1aef6940af525be3353742c6d22d76cd007d | /venv/bin/pip3 | bd31b853a2a5efc250f25637bcacd5c377d0e332 | [] | no_license | Nuvarion/project1 | 433631320cb49eee1578037e45b4f032804daf17 | 972e913e8218accb1a61ce8781629fdec10fe6ec | refs/heads/master | 2020-03-20T18:20:19.184952 | 2018-07-01T16:44:24 | 2018-07-01T16:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | #!/home/aleksandr/PycharmProjects/project1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"vinegret123456@gmail.com"
] | vinegret123456@gmail.com | |
05fda59cded94be9bbb895eadab8a3e36b1c5fd2 | c557fdbe268dcd2107fc94a9a99573398f12634b | /pflog-backend-flask/auth.py | 8dcec3900ec58d3d0d9e948f6b0a9697204b8dfc | [] | no_license | emilkloeden/pflog-backend-flask | 9bf2521a2c1227652d0be8017391febfbfa679f7 | 16bfe4decb7de498372ccf9e6c4ca7b625933525 | refs/heads/main | 2023-04-10T10:42:13.412079 | 2021-04-09T07:07:17 | 2021-04-09T07:07:17 | 355,080,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from flask_httpauth import HTTPBasicAuth
from werkzeug.security import check_password_hash
from app import app, db
from models import User
# auth = HTTPBasicAuth(app, db, user_model=User)
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
user = User.select().where(User.username == username).first()
if not user:
return False
if username == user.username and check_password_hash(user.password, password):
return user
return False
@auth.get_user_roles
def get_user_roles(user):
return [user_role.role.role for user_role in user.user_roles] | [
"emilkloeden@gmail.com"
] | emilkloeden@gmail.com |
a11414020e389e004fa7ba41d64bb7afc662c6ec | 33cc37817d93dd784be2398c904c9b4cacf84c52 | /Week5_Object_Oriented_Programming/Practice_Problems/currencies.py | 1e40cb96d3c767ece8d29f152b283a21e682a68a | [] | no_license | M1c17/ICS_and_Programming_Using_Python | 305e53561af27067998cb767ee5d566dfc02d33d | ee5127a272fbf19289a6a97cbe9b2ada2f7785ca | refs/heads/master | 2020-07-02T00:04:28.574491 | 2019-08-09T00:16:11 | 2019-08-09T00:16:11 | 201,354,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 12:09:10 2019
@author: MASTER
"""
'''
The class "Ccy" can be used to define money values in various currencies.
A Ccy instance has the string attributes 'unit' (e.g. 'CHF', 'CAD' od 'EUR'
and the 'value' as a float.
A currency object consists of a value and the corresponding unit.
'''
class Ccy:
currencies = {'CHF': 1.0821202355817312,
'CAD': 1.488609845538393,
'GBP': 0.8916546282920325,
'JPY': 114.38826536281809,
'EUR': 1.0,
'USD': 1.11123458162018}
def __init__(self, value, unit = 'EUR'):
self.value = value
self.unit = unit
def __str__(self):
return "{0:5.2f}".format(self.value) + " " + self.unit
def changeTo(self, new_unit):
"""
An Ccy object is transformed from the unit "self.unit" to "new_unit"
"""
self.value = (self.value / Ccy.currencies[self.unit] * Ccy.currencies[new_unit])
self.unit = new_unit
def __add__(self, other):
"""
Defines the '+' operator.
If other is a CCy object the currency values
are added and the result will be the unit of
self. If other is an int or a float, other will
be treated as a Euro value.
"""
if type(other) == int or type(other) == float:
x = (other * Ccy.currencies[self.unit])
else:
x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])
return Ccy(x + self.value, self.unit)
def __iadd__(self, other):
"""
Similar to __add__
"""
if type(other) == int or type(other) == float:
x = (other * Ccy.currencies[self.unit])
else:
x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])
self.value += x
return self
# we need change to EUR
def __radd__(self, other):
res = self + other
if self.unit != "EUR":
res.changeTo("EUR")
return res
# def __radd__(self, other):
# return Ccy.__add__(self,other)
def __mul__(self, other):
"""
Multiplication is only defined as a scalar multiplication,
i.e. a money value can be multiplied by an int or a float.
It is not possible to multiply to money values
"""
if type(other)==int or type(other)==float:
return Ccy(self.value * other, self.unit)
else:
raise TypeError("unsupported operand type(s) for *: 'Ccy' and " + type(other).__name__)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if type(other)==int or type(other)==float:
self.value *= other
return self
else:
raise TypeError("unsupported operand type(s) for *: 'Ccy' and " + type(other).__name__)
x = Ccy(10,"USD")
y = Ccy(11)
z = Ccy(12.34, "JPY")
z = 7.8 + x + y + 255 + z
print(z)
lst = [Ccy(10,"USD"), Ccy(11), Ccy(12.34, "JPY"), Ccy(12.34, "CAD")]
z = sum(lst)
print(z) | [
"pictor117@gmail.com"
] | pictor117@gmail.com |
330371ccb7a6a8e57e43fbf0e09926713293afb2 | fc94bf87abdb6b6b711e769d71afa988b6f22862 | /battleships/main.py | a5b0ef17193b003477fd5641c3627f5c8b492d76 | [] | no_license | oskarmampe/PythonScripts | 1b18d3dbaea75267ee9bccd4b92afc533ad4d59d | f47098ff7121667e89e71baacf7a86bb90192f9a | refs/heads/master | 2022-08-19T20:36:00.064118 | 2019-01-15T19:47:34 | 2019-01-15T19:47:34 | 165,910,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
s = " "
print(s.join(row))
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
print(ship_row)
print(ship_col)
# Everything from here on should go in your for loop!
# Be sure to indent four spaces!
def guess(row, col):
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! You sunk my battleship!")
return 0
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print("Oops, that's not even in the ocean.")
elif(board[guess_row][guess_col] == "X"):
print("You guessed that one already.")
else:
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
# Print (turn + 1) here!
print_board(board)
for turn in range(4):
print turn + 1
guess_row = int(raw_input("Guess Row: "))
guess_col = int(raw_input("Guess Col: "))
guess(guess_row, guess_col)
| [
"oskarmampe@hotmail.co.uk"
] | oskarmampe@hotmail.co.uk |
2e0df616e3d19ed3760d45049f817553c425871e | 4ffac980a662ce537b033667e2347355816972a3 | /observables/migrations/0030_auto_20180209_1426.py | a760e283cda101ae0bf1ca4db1705ed56cffa308 | [] | no_license | wahello/tim | 4d5a80ae7b966749ee86bd7bb43f1f67d9808e0d | 21c1f5ddbc6c1fecfd5c3684350c3ee23c93d5a8 | refs/heads/master | 2020-04-17T11:38:19.547786 | 2018-02-15T22:55:12 | 2018-02-15T22:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-09 14:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('observables', '0029_auto_20180209_1238'),
]
operations = [
migrations.AlterField(
model_name='observabletype',
name='type_class',
field=models.CharField(choices=[(b'ip_type', b'IP'), (b'string_type', b'String'), (b'email_type', b'Email'), (b'file_type', b'File')], default=None, max_length=25),
),
]
| [
"gerd@cert.europa.eu"
] | gerd@cert.europa.eu |
eef97e03ddd74ef3f1576a69e3e8d0bd0bde60a1 | 1a352b1e5059d02a6c29e9463a37267267e9a1a8 | /app/recipes/validations.py | f4abeed044222fd0f7004d92619d058f0446fba6 | [
"MIT"
] | permissive | desire-geogecko/newyummy_api | 3a86686c955faa8b5295d7656906f52a7d5eb31f | 3c3a56140ade917048050af8cf4262e341c8b65f | refs/heads/master | 2020-03-19T16:11:02.526620 | 2018-04-09T09:07:06 | 2018-04-09T09:07:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from flask import request, jsonify
from app.categories.validations import is_valid, has_numbers, authentication
def valid_recipe_title(title):
"""Function to handle validations in inputs"""
if title == "None":
return {"message": "Nothing is provided"}
if isinstance(title, int):
return {"message": "Recipe title"
" should not be an integer"}
if is_valid(title):
return {'message': 'Recipe title should'
' not have special characters'}
if has_numbers(title):
return {'message': 'Recipe title'
' should not have numbers'}
if not title or title.isspace():
return {'message': 'Recipe title'
' is mostly required'} | [
"hadijahkyampeire@HADIJAHs-MacBook-Pro.local"
] | hadijahkyampeire@HADIJAHs-MacBook-Pro.local |
1e5e76fae083171751770f86865273fe723b8aa0 | 4acd48fd26b40891e4f58b33bb86be95d935c018 | /utils/datasets/CommentsDatasetv3.py | 82ee0bd3897bad454ceff69e798baad56ad8a353 | [] | no_license | ezosa/topic-aware-moderation | 84052ef8b766348c4cfe25859801920f7d9f89d0 | 7721aab0625a49d0307a065783aae120cb140bf4 | refs/heads/main | 2023-08-21T09:00:18.839930 | 2021-10-20T06:23:28 | 2021-10-20T06:23:28 | 397,535,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | from torch.utils.data import Dataset
import pandas as pd
class CommentsDatasetv3(Dataset):
def __init__(self, csv_file, vocab, delimiter=' '):
self.data = pd.read_csv(csv_file)
self.delim = delimiter
self.vocab = vocab
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx, max_text_len=200):
row = dict(self.data.iloc()[idx])
topic_emb = [float(val) for val in row['topic_embedding'].split()]
row['topic_embedding'] = topic_emb
if 'sparse_embedding' in row:
topic_emb = [float(val) for val in row['sparse_embedding'].split(self.delim)]
row['topic_embedding'] = topic_emb
text = row['content'].lower().split()
text = [self.vocab[w] if w in self.vocab else self.vocab['OOV'] for w in text]
if len(text) > max_text_len:
text = text[:max_text_len]
else:
text.extend([self.vocab['OOV']]*(max_text_len-len(text)))
row['text'] = text
row['text_len'] = max_text_len
return row
| [
"elaine.zosa@helsinki.fi"
] | elaine.zosa@helsinki.fi |
f2fae076dfdc837fb05d3315138eee9210c62a1f | 8523acc83d183e37196542080ca39f9c38d1b26a | /vkrunner/make-features.py | ebbd1ade3eca439285537bd398718399fcf4531b | [
"MIT",
"HPND-sell-variant"
] | permissive | Igalia/vkrunner | f169d067366df78123dd17a32fc74577bfb3033a | 4ecc150eb01a617299a59263bb5331ee0f726d30 | refs/heads/master | 2023-04-27T20:41:46.743987 | 2023-04-24T04:37:34 | 2023-04-24T09:37:40 | 124,641,067 | 44 | 11 | NOASSERTION | 2023-04-24T09:37:42 | 2018-03-10T09:29:27 | C | UTF-8 | Python | false | false | 8,055 | py | #!/usr/bin/env python
# Copyright (C) 2019 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
# This script is used to generate vr-feature.c from vulkan.h. It
# is not run automatically as part of the build process but if need be
# it can be used to update the file as follows:
#
# ./make-features.py < /usr/include/vulkan/vulkan.h > vr-features.c
import re
import sys
from mako.template import Template
EXTENSIONS = [
"KHR_16BIT_STORAGE",
"KHR_8BIT_STORAGE",
{
"name": "EXT_ASTC_DECODE_MODE",
"struct": "VkPhysicalDeviceASTCDecodeFeaturesEXT",
"struct_type": "ASTC_DECODE_FEATURES_EXT"
},
"EXT_BLEND_OPERATION_ADVANCED",
{
"name": "EXT_BUFFER_DEVICE_ADDRESS",
"struct_type": "BUFFER_ADDRESS_FEATURES_EXT"
},
"NV_COMPUTE_SHADER_DERIVATIVES",
"EXT_CONDITIONAL_RENDERING",
"NV_CORNER_SAMPLED_IMAGE",
"EXT_DESCRIPTOR_INDEXING",
{
"name": "NV_SCISSOR_EXCLUSIVE",
"struct_type": "EXCLUSIVE_SCISSOR_FEATURES_NV"
},
{
"name": "KHR_SHADER_FLOAT16_INT8",
"struct_type": "FLOAT16_INT8_FEATURES_KHR"
},
"EXT_FRAGMENT_DENSITY_MAP",
"NV_FRAGMENT_SHADER_BARYCENTRIC",
"EXT_INLINE_UNIFORM_BLOCK",
"EXT_MEMORY_PRIORITY",
"NV_MESH_SHADER",
"KHR_MULTIVIEW",
"NV_REPRESENTATIVE_FRAGMENT_TEST",
"KHR_SAMPLER_YCBCR_CONVERSION",
"EXT_SCALAR_BLOCK_LAYOUT",
"KHR_SHADER_ATOMIC_INT64",
"NV_SHADER_IMAGE_FOOTPRINT",
"NV_SHADING_RATE_IMAGE",
"EXT_TRANSFORM_FEEDBACK",
{
"name": "KHR_VARIABLE_POINTERS",
"struct_type": "VARIABLE_POINTER_FEATURES_KHR"
},
{
"name": "EXT_VERTEX_ATTRIBUTE_DIVISOR",
"version": 3
},
"KHR_VULKAN_MEMORY_MODEL",
]
TEMPLATE="""\
/* Automatically generated by make-features.py */
#include "config.h"
#include <stddef.h>
#include "vr-feature.h"
#include "vr-vk.h"
% for e in extensions:
% if e.name:
#ifdef ${e.ext_name}
% if e.version:
#if ${e.version_var} >= ${e.version}
% endif
#define have_${e.name}
% endif
${e.storage}const struct vr_feature_offset
${e.var_name}[] = {
% for f in e.features:
{
.name = "${f}",
.offset = offsetof(${e.struct}, ${f})
},
% endfor
{ .name = NULL }
};
% if e.name:
% if e.version:
#else /* ${e.version_var} > ${e.version} */
#warning "The Vulkan headers are too old for ${e.ext_name}"
#endif
% endif
#else /* ${e.ext_name} */
#warning "The vulkan headers are missing ${e.ext_name}"
#endif
% endif
% endfor
const struct vr_feature_extension
vr_feature_extensions[] = {
% for e in extensions:
% if e.name:
#ifdef have_${e.name}
% endif
{
.name = ${e.ext_name},
.struct_size = sizeof(${e.struct}),
.struct_type = ${e.struct_type},
.offsets = ${e.var_name}
},
% if e.name:
#endif
% endif
% endfor
{ .struct_size = 0 }
};
"""
class Extension:
def __init__(self, name, struct, struct_type, features, version=None):
self.name = name
self.struct = struct
self.struct_type = struct_type
self.features = features
self.version = version
if name is None:
self.ext_name = "NULL"
self.var_name = "vr_feature_base_offsets"
self.storage = ""
else:
self.ext_name = 'VK_{}_EXTENSION_NAME'.format(name)
self.var_name = "offsets_{}".format(name)
self.storage = "static "
if version is not None:
self.version_var = 'VK_{}_SPEC_VERSION'.format(name)
def capitalize_part(part):
md = re.match(r'([0-9]*)(.*)', part)
return md.group(1) + md.group(2).capitalize()
def extension_to_struct_name(ext):
parts = ext.split("_")
vendor = parts[0]
rest = parts[1:]
return ("VkPhysicalDevice" +
"".join(capitalize_part(part) for part in rest) +
"Features" +
vendor)
def struct_to_regexp(struct):
# Make the vendor in the struct name optional
md = re.match(r'(.*?)([A-Z]+)$', struct)
return re.compile(r'^typedef\s+struct\s+' +
re.escape(md.group(1)) +
r'(?:' + re.escape(md.group(2)) + ')?\s+' +
'{\s*$',
flags=re.MULTILINE)
def get_struct_features(header, struct):
struct_re = struct_to_regexp(struct)
md = struct_re.search(header)
if md is None:
raise Exception("Couldn't find extension {} in vulkan header".format(
struct))
header_tail = header[md.end() + 1 :]
header_end = re.search(r'^}', header_tail, flags=re.MULTILINE).start()
members = header_tail[:header_end]
for line in members.splitlines():
md = re.match(r'\s*VkStructureType\s+[A-Za-z]+\s*;\s*$', line)
if md:
continue
md = re.match(r'\s*void\s*\*\s+pNext\s*;\s*$', line)
if md:
continue
md = re.match(r'\s*VkBool32\s+([a-zA-Z][a-zA-Z0-9_]*)\s*;\s*$', line)
if not md:
raise Exception("Unknown member in struct: " + line)
yield md.group(1)
def main():
header = sys.stdin.read()
extensions = []
for ext in EXTENSIONS:
if not isinstance(ext, dict):
ext = { "name": ext }
name = ext["name"]
try:
struct_type = ext["struct_type"]
except KeyError:
parts = name.split("_")
struct_type = ("_".join(parts[1:]) + "_" +
"FEATURES_" +
parts[0])
try:
struct = ext["struct"]
except KeyError:
parts = struct_type.split("_")
struct = ("VkPhysicalDevice" +
"".join(capitalize_part(part)
for part in parts[:-1]) +
parts[-1])
version = ext.get("version", None)
struct_type_enum = "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_" + struct_type
features = list(get_struct_features(header, struct))
extension = Extension(name, struct, struct_type_enum, features, version)
extensions.append(extension)
base_features = get_struct_features(header, "VkPhysicalDeviceFeaturesKHR")
extensions.append(Extension(None,
"VkPhysicalDeviceFeatures",
"0",
list(base_features)))
# Validate that all of the feature names are unique
feature_names = set()
for ext in extensions:
for feature in ext.features:
if feature in feature_names:
raise Exception("Feature {} is not unique".format(feature))
feature_names.add(feature)
template = Template(TEMPLATE)
print(template.render(extensions = extensions))
if __name__ == '__main__':
main()
| [
"nroberts@igalia.com"
] | nroberts@igalia.com |
44b1da96c889e9052f581c8a599ab49bc1951113 | a42e28ce6ffa8c511dabf515660b8734a76570b1 | /web/migrations/0002_auto_20190114_0830.py | d1b90f1206720e7feedeb9c20f19a897f184314e | [] | no_license | javad-hajiani/Exchanger | cc068241b21eb6abeed114f7d9d6a9ccc88b74c9 | 39dd8c52dd8c5739e64f93f2b6befd9f620b7b24 | refs/heads/master | 2022-12-10T20:13:56.014432 | 2019-02-15T13:55:50 | 2019-02-15T13:55:50 | 165,355,411 | 0 | 0 | null | 2022-12-08T01:32:08 | 2019-01-12T06:32:07 | CSS | UTF-8 | Python | false | false | 891 | py | # Generated by Django 2.1.5 on 2019-01-14 08:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='is_activate',
),
migrations.AddField(
model_name='userprofile',
name='picture',
field=models.ImageField(blank=True, default='default.jpg', upload_to='profile_images'),
),
migrations.AlterField(
model_name='userprofile',
name='is_verified',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=15),
),
]
| [
"j.hajiani@karinaco.com"
] | j.hajiani@karinaco.com |
f9c126902d927e7a260fb705bce0e1c27552cc30 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/309/usersdata/284/72935/submittedfiles/atm.py | bdf46aa78ba6d32e46748200a85c384c0a0db6f1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CODIGO AQUI
v=int(input('digite o valor a ser sacado: '))
a=20
b=10
c=5
d=2
e=1
f=(v%a)
g=(f%10)
h=(g%5)
i=(h%2)
if v//a!=0:
print(v//a)
print(f//10)
print(g//5)
print(h//2)
print(i//1)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3982c0daba0afe8214652111972eab15bda4919a | bdf96b55d7e18043d8b4330665036cdd4e84b02e | /_pyxel/pyxel_examples/02_jump_game.py | 54e15cff234be4c457dc3be6a904686ba3b53109 | [] | no_license | Yoshi-tmd/ichigojam_sourcecode | ea51d7fab16c137147cc3e67e2b41fa98d551820 | 654dcac437cc43ad1f8d61d638b6ec2bf53e402e | refs/heads/master | 2021-08-24T13:56:22.206551 | 2021-04-14T06:37:28 | 2021-04-14T06:37:28 | 125,824,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,402 | py | from random import randint
import pyxel
class App:
def __init__(self):
pyxel.init(160, 120, caption="Pyxel Jump")
pyxel.load("assets/jump_game.pyxel")
self.score = 0
self.player_x = 72
self.player_y = -16
self.player_vy = 0
self.player_is_alive = True
self.far_cloud = [(-10, 75), (40, 65), (90, 60)]
self.near_cloud = [(10, 25), (70, 35), (120, 15)]
self.floor = [(i * 60, randint(8, 104), True) for i in range(4)]
self.fruit = [(i * 60, randint(0, 104), randint(0, 2), True) for i in range(4)]
pyxel.playm(0, loop=True)
pyxel.run(self.update, self.draw)
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
self.update_player()
for i, v in enumerate(self.floor):
self.floor[i] = self.update_floor(*v)
for i, v in enumerate(self.fruit):
self.fruit[i] = self.update_fruit(*v)
def update_player(self):
if pyxel.btn(pyxel.KEY_LEFT) or pyxel.btn(pyxel.GAMEPAD_1_LEFT):
self.player_x = max(self.player_x - 2, 0)
if pyxel.btn(pyxel.KEY_RIGHT) or pyxel.btn(pyxel.GAMEPAD_1_RIGHT):
self.player_x = min(self.player_x + 2, pyxel.width - 16)
self.player_y += self.player_vy
self.player_vy = min(self.player_vy + 1, 8)
if self.player_y > pyxel.height:
if self.player_is_alive:
self.player_is_alive = False
pyxel.play(3, 5)
if self.player_y > 600:
self.score = 0
self.player_x = 72
self.player_y = -16
self.player_vy = 0
self.player_is_alive = True
def update_floor(self, x, y, is_active):
if is_active:
if (
self.player_x + 16 >= x
and self.player_x <= x + 40
and self.player_y + 16 >= y
and self.player_y <= y + 8
and self.player_vy > 0
):
is_active = False
self.score += 10
self.player_vy = -12
pyxel.play(3, 3)
else:
y += 6
x -= 4
if x < -40:
x += 240
y = randint(8, 104)
is_active = True
return (x, y, is_active)
def update_fruit(self, x, y, kind, is_active):
if is_active and abs(x - self.player_x) < 12 and abs(y - self.player_y) < 12:
is_active = False
self.score += (kind + 1) * 100
self.player_vy = min(self.player_vy, -8)
pyxel.play(3, 4)
x -= 2
if x < -40:
x += 240
y = randint(0, 104)
kind = randint(0, 2)
is_active = True
return (x, y, kind, is_active)
def draw(self):
pyxel.cls(12)
# draw sky
pyxel.blt(0, 88, 0, 0, 88, 160, 32)
# draw mountain
pyxel.blt(0, 88, 0, 0, 64, 160, 24, 12)
# draw forest
offset = pyxel.frame_count % 160
for i in range(2):
pyxel.blt(i * 160 - offset, 104, 0, 0, 48, 160, 16, 12)
# draw clouds
offset = (pyxel.frame_count // 16) % 160
for i in range(2):
for x, y in self.far_cloud:
pyxel.blt(x + i * 160 - offset, y, 0, 64, 32, 32, 8, 12)
offset = (pyxel.frame_count // 8) % 160
for i in range(2):
for x, y in self.near_cloud:
pyxel.blt(x + i * 160 - offset, y, 0, 0, 32, 56, 8, 12)
# draw floors
for x, y, is_active in self.floor:
pyxel.blt(x, y, 0, 0, 16, 40, 8, 12)
# draw fruits
for x, y, kind, is_active in self.fruit:
if is_active:
pyxel.blt(x, y, 0, 32 + kind * 16, 0, 16, 16, 12)
# draw player
pyxel.blt(
self.player_x,
self.player_y,
0,
16 if self.player_vy > 0 else 0,
0,
16,
16,
12,
)
# draw score
s = "SCORE {:>4}".format(self.score)
pyxel.text(5, 4, s, 1)
pyxel.text(4, 4, s, 7)
App()
| [
"tomoda@na-s.jp"
] | tomoda@na-s.jp |
abc48bd8502c8b8ef276ed7b449017c72c610d31 | 690b130c49fa01885a2658e97442e3b8ef5c9def | /Main.py | ee63452f207b2565ccee8e2cd9526d0b5c22b389 | [] | no_license | RomanAntonov98/VkTCGBot | 84e1a76773baef4ccbcfe1ff973b68e4bc77db95 | b44e039ed8ce4ff4c7d6686a510e3bebb810b857 | refs/heads/main | 2023-06-05T02:56:32.649297 | 2021-06-29T14:35:26 | 2021-06-29T14:35:26 | 381,386,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,700 | py | from vk_api.longpoll import VkLongPoll, VkEventType
import vk_api
import altmessageoperator
login, password = "Login", "Password" # логин и пароль
vk_session = vk_api.VkApi(login=login, password=password, app_id=2685278)
vk_session.auth(token_only=True)
token = "Token" # токен для группы, которая будет отправлять сообщения
vk_session = vk_api.VkApi(token=token)
session_api = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
while True:
try:
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and not event.from_me:
incmsg = event.text
if event.from_user and not event.from_me:
chat_id = 0
text, attachment = altmessageoperator.alt_read_msg(incmsg, event.user_id)
attachment_2 = "null"
if attachment == "null":
vk_session.method('messages.send', {'user_id': event.user_id, 'message': text, 'random_id': 0})
else:
if len(attachment) > 260: # по длине строки проверяет количество изображений
attachment_2 = attachment[260:]
attachment = attachment[:260]
vk_session.method('messages.send', {'user_id': event.user_id, 'message': text, 'random_id': 0,
'attachment': attachment})
if attachment_2 != "null": # остальные изображения отправляет во втором сообщинии
vk_session.method('messages.send',
{'user_id': event.user_id, 'message': "Остальные карты:",
'random_id': 0, 'attachment': attachment_2})
elif event.from_chat and incmsg[:5] == "баку " and not event.from_me: # реагирует только на обращение
try:
text, attachment = altmessageoperator.alt_read_msg(incmsg[5:], event.user_id)
attachment_2 = "null"
if attachment == "null":
vk_session.method('messages.send',
{'chat_id': event.chat_id, 'message': text, 'random_id': 0})
else:
if len(attachment) > 260:
attachment_2 = attachment[260:]
attachment = attachment[:260]
vk_session.method('messages.send',
{'chat_id': event.chat_id, 'message': text, 'random_id': 0,
'attachment': attachment})
if attachment_2 != "null":
vk_session.method('messages.send',
{'chat_id': event.chat_id, 'message': "Остальные карты:",
'random_id': 0, 'attachment': attachment_2})
except:
vk_session.method('messages.send',
{'chat_id': event.chat_id, 'message': 'Команда не распознана',
'random_id': 0})
except Exception as error:
print(error)
| [
"noreply@github.com"
] | RomanAntonov98.noreply@github.com |
1f96f72f233d70286289b429157d02f586e49a0c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/134/usersdata/228/53506/submittedfiles/escadarolante.py | b21aed3136bcf8b81ed533496221440ebca0f3d7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
def rolagem(lista):
for i in range (0,len(lista)-2,1):
tempo=10
if lista[i+1]<(lista[i]+10):
tempo=tempo+(lista[i]-lista[i+1])
elif lista[i+1]>=(lista[i]+10):
tempo=tempo+10
for i in range (len(lista)-1,len(lista)-2,1):
if lista[len(lista-1)]<lista[len(lista)-2]+10:
tempo=tempo+(lista[len(lista)-1]-lista[len(lista)-2])
elif lista[len(lista)-1]>=lista[len(lista)-2]+10:
tempo=tempo+10
return(tempo+10)
n=int(input('digite um valor:'))
lista=[]
for i in range(0,n,1):
tn=int(input('digite um tempo de passagem:'))
lista.append(tn)
print (rolagem(lista))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
71396f1bb124ce97821fd58da35598330e459569 | 67b872b2552974c156e547e18541e82b7efee621 | /address.py | 94fe479e5ab078f66b19c13d587b842bb1debe4a | [] | no_license | alekjedrosz/electives-recommendation-system | 6d5e280c0cf3516b8e8c120f955aaebc3c251576 | 9dc4143a8028b70ced0c69c50f6f50c2e3efc560 | refs/heads/master | 2021-02-24T02:59:24.254036 | 2020-03-06T12:53:27 | 2020-03-06T12:53:27 | 245,417,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | from sqlalchemy import Column, Integer, String
from utils import Base
class Address(Base):
"""Represents a postal address.
:param country: String representing a country.
:param city: String representing a city.
:param address_line: String representing the street name, street number, house number or similar.
:param postal_code: Postal/zip code passed in as a string.
"""
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
country = Column(String(100))
city = Column(String(100))
address_line = Column(String(500))
postal_code = Column(String(20))
def __init__(self, *, country, city, address_line, postal_code):
self.country = country
self.city = city
self.address_line = address_line
self.postal_code = postal_code
def __str__(self):
return f'{self.address_line}, {self.postal_code}, {self.city}, {self.country}'
| [
"jedrosz.alek@gmail.com"
] | jedrosz.alek@gmail.com |
c66ed6c09ba514f7af942d339d530959192d21f9 | 33e57df595d499289ad3a193fdc04213c495134e | /my_blog_project/urls.py | cc436ff62d8a2cd28a4c7eb011981472e73f24c9 | [] | no_license | Jawahar-007/My-personal-Blog | c1c0050814e01d8fd27d66ad5c508268cb2d2d1c | ac698bdbc7e1622e7e2e96b7d649f082f8fb8754 | refs/heads/main | 2023-02-04T09:48:40.591594 | 2020-12-21T09:05:18 | 2020-12-21T09:05:18 | 323,268,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | """my_blog_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import url
from blogapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('blogapp.urls')),
]
| [
"ahnashwin1305@gmail.com"
] | ahnashwin1305@gmail.com |
07ee526ae4cc62f861473e517b4b79b7aba4d6be | 8dc64db8a0d7ddb8778c8eae2dac9075b9a90e2b | /env/Lib/site-packages/pylint/checkers/variables.py | c7fd6318483362dc3ead1935ccf35244b261e505 | [
"MIT"
] | permissive | theXtroyer1221/Cloud-buffer | c3992d1b543a1f11fde180f6f7d988d28b8f9684 | 37eabdd78c15172ea980b59d1aff65d8628cb845 | refs/heads/master | 2022-11-22T22:37:10.453923 | 2022-02-25T01:15:57 | 2022-02-25T01:15:57 | 240,901,269 | 1 | 1 | MIT | 2022-09-04T14:48:02 | 2020-02-16T14:00:32 | HTML | UTF-8 | Python | false | false | 81,382 | py | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Grant Welch <gwelch925+github@gmail.com>
# Copyright (c) 2017-2018, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017-2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2017 Dan Garrette <dhgarrette@gmail.com>
# Copyright (c) 2018-2019 Jim Robertson <jrobertson98atx@gmail.com>
# Copyright (c) 2018 Mike Miller <mtmiller@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 Drew <drewrisinger@users.noreply.github.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Marianna Polatoglou <mpolatoglou@bloomberg.net>
# Copyright (c) 2018 mar-chi-pan <mar.polatoglou@gmail.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2019 Djailla <bastien.vallet@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 Andrew Simmons <anjsimmo@gmail.com>
# Copyright (c) 2020 Andrew Simmons <a.simmons@deakin.edu.au>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 Ashley Whetter <ashleyw@activestate.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import collections
import copy
import itertools
import os
import re
from functools import lru_cache
import astroid
from astroid import decorators, modutils, objects
from astroid.context import InferenceContext
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.interfaces import HIGH, INFERENCE, INFERENCE_FAILURE, IAstroidChecker
from pylint.utils import get_global_option
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, astroid.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, astroid.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = " %s" % inferred.as_string()
elif inferred.lineno:
more = " defined at line %s" % inferred.lineno
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (astroid.FunctionDef, astroid.Arguments)):
return False
elif any(
not isinstance(f, (astroid.ClassDef, astroid.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node):
parent = node.parent
if not isinstance(parent, astroid.If):
return False
test = parent.test
return test.as_string() in TYPING_TYPE_CHECKS_GUARDS
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
astroid.FunctionDef,
astroid.ClassDef,
astroid.Import,
astroid.ImportFrom,
)
for call in scope.nodes_of_class(astroid.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Used when a local variable is accessed before its assignment.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import %s from wildcard import",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0623": (
"Redefining name %r from %s in exception handler",
"redefine-in-handler",
"Used when an exception handler assigns the exception to an existing name",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
ScopeConsumer = collections.namedtuple(
"ScopeConsumer", "to_consume consumed scope_type"
)
class NamesConsumer:
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# Get the definition of `node` from this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (
found_node
and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent
):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
if (
found_node
and isinstance(parent_node, astroid.For)
and parent_node.iter == node
and parent_node.target in found_node
):
found_node = None
return found_node
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = (
None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
)
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._postponed_evaluation_enabled = False
@utils.check_messages("redefined-outer-name")
def visit_for(self, node):
assigned_to = [
var.name for var in node.target.nodes_of_class(astroid.AssignName)
]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node):
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"unused-variable",
)
def leave_module(self, node):
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node):
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (
isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, astroid.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(
stmt
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, astroid.node_classes.Import)
for local in locals_.get(name, ())
)
if not assign_nodes and not_defined_locally_by_import:
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
def visit_name(self, node):
"""Check that a name is defined in the current scope"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
name = node.name
frame = stmt.scope()
start_index = len(self._to_consume) - 1
undefined_variable_is_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
used_before_assignment_is_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if current_consumer.scope_type == "class" and (
utils.is_ancestor_name(current_consumer.node, node)
or (i != start_index and self._ignore_class_scope(node))
):
continue
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (
current_consumer.scope_type == "function"
and self._defined_in_function_definition(node, current_consumer.node)
):
# ignore function scope if is an annotation/default/decorator, as not in the body
continue
if current_consumer.scope_type == "lambda" and utils.is_default_argument(
node, current_consumer.node
):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, i)
):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if (
undefined_variable_is_enabled or used_before_assignment_is_enabled
) and defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, astroid.ClassDef)
and node.name == defframe.name
)
if (
recursive_klass
and utils.is_inside_lambda(node)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
break
(
maybee0601,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
)
if use_outer_definition:
continue
if (
maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (astroid.DelName, astroid.AssignName)
)
if (
recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
astroid.AnnAssign,
astroid.FunctionDef,
astroid.Arguments,
),
)
and name in node.root().locals
):
self.add_message(
"undefined-variable", args=name, node=node
)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt, (astroid.AnnAssign, astroid.FunctionDef)
)
):
self.add_message(
"used-before-assignment", args=name, node=node
)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment", args=name, node=node
)
else:
self.add_message(
"undefined-variable", args=name, node=node
)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if undefined_variable_is_enabled and not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or (
name == "__class__"
and isinstance(frame, astroid.FunctionDef)
and frame.is_method()
)
):
if not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=name, node=node)
@utils.check_messages("no-name-in-module")
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, astroid.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node):
self._store_type_annotation_names(node)
def leave_with(self, node):
self._store_type_annotation_names(node)
def visit_arguments(self, node):
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if isinstance(frame, astroid.FunctionDef) and node.statement() is frame:
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: astroid.node_classes.NodeNG, frame: astroid.node_classes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, astroid.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if (
isinstance(parent, astroid.node_classes.Comprehension)
and child is not parent.iter
):
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, astroid.scoped_nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
):
# pylint: disable=too-many-nested-blocks
# node: Node to check for violation
# name: name of node to check violation for
# frame: Scope of statement of node
# base_scope_type: local scope type
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, astroid.FunctionDef)
or isinstance(node.frame(), astroid.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, astroid.node_classes.Comprehension
)
# check if we have a nonlocal
elif name in defframe.locals:
maybee0601 = not any(
isinstance(child, astroid.Nonlocal) and name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, astroid.ClassDef)
and name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (
isinstance(defnode, astroid.Arguments)
and node in defnode.defaults
and frame.locals[name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, astroid.ClassDef) and isinstance(
frame, astroid.FunctionDef
):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if node is frame.returns and defframe.parent_of(frame.returns):
maybee0601 = annotation_return = True
if (
maybee0601
and defframe.name in defframe.locals
and defframe.locals[name][0].lineno < frame.lineno
):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
elif (
isinstance(defstmt, astroid.Assign)
and isinstance(defstmt.value, astroid.IfExp)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assingment expression on same
# line as assigment
# x = b if (b := True) else False
maybee0601 = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, astroid.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybee0601 = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (astroid.Import, astroid.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, astroid.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, astroid.Assign):
defined_in_or_else = any(
target.name == name for target in definition.targets
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybee0601 = True
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(
isinstance(frame, astroid.ClassDef)
or in_annotation_or_default_or_decorator
)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
if not self.linter.is_message_enabled("undefined-loop-variable"):
return
astmts = [stmt for stmt in node.lookup(name)[1] if hasattr(stmt, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, astroid.FunctionDef) and any(
asmt.statement().parent_of(scope) for asmt in astmts
):
return
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if (
not astmts
or (astmts[0].is_statement or astmts[0].parent)
and astmts[0].statement().parent_of(node)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement().parent_of(stmt) and not in_for_else_branch(
astmts[i].statement(), stmt
):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(
assign, (astroid.For, astroid.Comprehension, astroid.GeneratorExp)
)
and assign.statement() is not node.statement()
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, astroid.For):
self.add_message("undefined-loop-variable", args=name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
astroid.List,
astroid.Tuple,
astroid.Dict,
astroid.Set,
objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# pylint: disable=too-many-branches
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, astroid.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], astroid.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (astroid.Global, astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(
stmt.parent, (astroid.Assign, astroid.AnnAssign)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, astroid.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = "import %s" % name
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, astroid.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)
or isinstance(stmt, astroid.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node, assignment_node):
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
def _is_direct_lambda_call():
return (
isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope
)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index - 1 :: -1]:
if _consumer.scope_type == "function" and node.name in _consumer.to_consume:
return True
return False
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, astroid.Name):
self._type_annotation_names.append(type_annotation.name)
return
if not isinstance(type_annotation, astroid.Subscript):
return
if (
isinstance(type_annotation.value, astroid.Attribute)
and isinstance(type_annotation.value.expr, astroid.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name
for annotation in type_annotation.nodes_of_class(astroid.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node):
"""Check that self/cls don't get assigned"""
assign_names = {
target.name
for target in node.targets
if isinstance(target, astroid.AssignName)
}
scope = node.scope()
nonlocals_with_same_name = any(
child
for child in scope.body
if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, astroid.scoped_nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
target_assign_names = (
target.name
for target in node.targets
if isinstance(target, astroid.node_classes.AssignName)
)
if self_cls_name in target_assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, astroid.Arguments)
and isinstance(node.value, astroid.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
if isinstance(inferred, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = inferred.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, astroid.Module):
return module
return None
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, astroid.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in not_consumed.items():
for node in nodes:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, astroid.Import) or (
isinstance(stmt, astroid.ImportFrom) and not stmt.modname
):
if isinstance(stmt, astroid.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, astroid.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
self.add_message("unused-wildcard-import", args=name, node=stmt)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
del self._to_consume
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, astroid.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = None
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif (
isinstance(klass._metaclass, astroid.Attribute)
and klass._metaclass.expr
):
name = klass._metaclass.expr.name
if name is not None:
if not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or name in parent_node.locals
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
| [
"jaddou2005@gmail.com"
] | jaddou2005@gmail.com |
ad564b5e28eab19b52659de7ba1d8a85b5bd38e4 | f9d83106dc1e259a18b4fd923bd7641adec6f669 | /PatientReport/migrations/0023_auto_20150921_1549.py | e4404e44fcffbcc667daee501be00fdf5e26d985 | [] | no_license | Navid777/FJSharif | a0f2b6df5f16685a26e8d967e000eb1d22e251a1 | ae620d314ca71639fb3ff287b6c84dbc69d63379 | refs/heads/master | 2016-09-06T04:17:52.851926 | 2015-10-17T12:27:51 | 2015-10-17T12:27:51 | 37,974,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('PatientReport', '0022_auto_20150901_1210'),
]
operations = [
migrations.AddField(
model_name='guide',
name='description',
field=models.TextField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='preplanning',
name='description',
field=models.TextField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='pdf',
field=models.FileField(null=True, upload_to=b'Report_PDFs/', blank=True),
),
migrations.AddField(
model_name='uploadreport',
name='guide_stls',
field=models.FileField(default=0, upload_to=b'temp', verbose_name=b"Guides' STL Files( a Zip file)"),
preserve_default=False,
),
migrations.AddField(
model_name='uploadreport',
name='landmarks',
field=models.FileField(default=0, upload_to=b'temp', verbose_name=b'Report Landmarks'),
preserve_default=False,
),
migrations.AddField(
model_name='uploadreport',
name='pre_stls',
field=models.FileField(default=0, upload_to=b'temp', verbose_name=b"Preplannings' STL Files( a Zip file)"),
preserve_default=False,
),
migrations.AddField(
model_name='uploadreport',
name='report',
field=models.FileField(default=0, upload_to=b'temp', verbose_name=b'Report( a PDF report file)'),
preserve_default=False,
),
migrations.AlterField(
model_name='stlfile',
name='pre_planning',
field=models.ForeignKey(blank=True, to='PatientReport.PrePlanning', null=True),
),
migrations.AlterField(
model_name='uploadreport',
name='attributes',
field=models.FileField(upload_to=b'temp', verbose_name=b'Report Attributes( an Excel file)'),
),
migrations.AlterField(
model_name='uploadreport',
name='zip_pictures',
field=models.FileField(upload_to=b'temp', verbose_name=b'Report Pictures( a Zip file)'),
),
]
| [
"navid.azami@gmail.com"
] | navid.azami@gmail.com |
5ffc2b2a3766186bc0500851ad45cc0947381116 | 66cdae14afa8118fe38c57d1636bc0362b9cc2ae | /graphs/plots.py | b25d1668c60dacdd93978df15129def565525695 | [
"Apache-2.0"
] | permissive | yafraorg/pythonsamples | 792c202d1ab05b77e6517c2758364b2c2c43d378 | 8f61292c89b497d4067d2fb1873e5281877901f3 | refs/heads/master | 2022-12-15T16:21:04.279910 | 2020-04-20T20:58:49 | 2020-04-20T20:58:49 | 218,777,440 | 0 | 0 | Apache-2.0 | 2022-11-22T05:51:40 | 2019-10-31T13:51:52 | Python | UTF-8 | Python | false | false | 335 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
plt.show() | [
"martin.webernissle@pax.ch"
] | martin.webernissle@pax.ch |
9158cb280b7ba01895aec8462f9b58ca055a8e76 | f755534a615eb50c3fb0e09fb1596dcd7a6c55ae | /api/app.py | 69ae1d4e0d613b651627a51a9763bf81f317cc63 | [] | no_license | tomeinstein/workout_logger | 5a81eae7d8084fe7f09e87c6a9a658c4bb055958 | 5e140ed169a9a76969fd99ca0947dddb28c26bcc | refs/heads/master | 2022-12-24T17:07:15.770882 | 2020-09-23T16:29:24 | 2020-09-23T16:29:24 | 298,024,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from flask import Flask
from flask_restful import Api
from api.resources.user import User, Users
from api.resources.db_management import close_db
app = Flask(__name__)
api = Api(app)
app.teardown_appcontext(close_db)
api.add_resource(User, '/user', '/user/<int:id>')
api.add_resource(Users, '/users')
app.run()
| [
"tomeinstein18@gmail.com"
] | tomeinstein18@gmail.com |
0763a12895c3bcf6b3d6084d55754a7614c7f63f | e6cfa09c4f6467b4ff147c87c7fb25a1c51b172a | /Kegiatan 2 (9).py | abefd5d087bf7a1d25d60de044b4262329b7dedb | [] | no_license | ayyub127/Praktikum-Algopro | ed11939496952a0042cd2ea095e3f7cf2918586e | 3de586fbadc246969e344be0099eea61c400e357 | refs/heads/master | 2020-04-05T17:24:43.299192 | 2018-12-24T07:32:17 | 2018-12-24T07:32:17 | 157,059,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | latihan = open("L200183127", "w")
latihan.write("L200184092 \n")
latihan.write("11/07/1999 \n")
latihan.write("Muhammad Ayyub Nasrullah \n")
latihan.write("Dubai")
latihan.close()
import shelve
data = open("L200183127", "r")
NIM = data.readline()
TL = data.readline()
Nama = data.readline()
Kota = data.readline()
data.close
data = shelve.open("Ayyub")
data["databaru"] = [Nama, Kota, TL, NIM]
data.close()
data = shelve.open("Ayyub")
print(data["databaru"][0])
print(data["databaru"][1])
print(data["databaru"][2])
print(data["databaru"][3])
| [
"noreply@github.com"
] | ayyub127.noreply@github.com |
de9aa85ba68b22232f620903b947ceec4e31f5b5 | 782744e5fc4f341a330d0372faa4a854102825bc | /archive/ver_sub_pyscf | 62253a1d06f980fb559c808af4e1b965d22bd57e | [] | no_license | bbusemeyer/misc | 242ea8780b7aa780ab26cc7560da9e7cccb15d73 | 7c4426684a03d264ff2925e40d3f20b1907803bc | refs/heads/master | 2022-09-30T19:30:49.593545 | 2022-07-29T22:09:16 | 2022-07-29T22:09:16 | 45,492,046 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | #!/usr/bin/env python3
import sys
import subprocess as sub
import argparse
import os
import mython as my
dft='(=%(default)s)'
parser=argparse.ArgumentParser(sys.argv[0].split('/')[-1])
parser.add_argument('inpfn',type=str,
help='Python script to be submitted.')
parser.add_argument('-np',default=8,type=int,
help='Number of processors per node.'+dft)
parser.add_argument('-nn',default=1,type=int,
help='Number of nodes.'+dft)
parser.add_argument('-t',dest='time',default='500:00:00',type=str,
help='Time string.'+dft)
parser.add_argument('-q',dest='queue',default='batch',type=str,
help='Queue.'+dft)
args=parser.parse_args()
assert args.nn==1, "Only tested on one node for now."
qsub_lines=[
"#!/bin/bash",
"#PBS -l nodes=%d:ppn=%d"%(args.nn,args.np),
"#PBS -l walltime=%s"%args.time,
"#PBS -N %s"%args.inpfn,
"#PBS -e $PBS_JOBID.err",
"#PBS -o $PBS_JOBID.out",
"#PBS -q %s"%args.queue,
"",
"# To add certain modules that you do not have added via ~/.modules",
". /opt/modules/default/init/bash",
"",
"export PYTHONPATH=%s"%(':'.join(sys.path)),
"cd $PBS_O_WORKDIR",
"export OMP_NUM_THREADS=%d"%(args.nn*args.np),
"mpirun -n {nt} /usr/bin/python {inp} > {inp}.stdout"\
.format(nt=args.nn*args.np,inp=args.inpfn),
]
with open('qsub.in','w') as outf:
outf.write('\n'.join(qsub_lines))
print(sub.check_output("qsub qsub.in",shell=True).decode())
| [
"bbusemeyer@gmail.com"
] | bbusemeyer@gmail.com | |
cb90734c9366c0f8e64f2c414d8152f1f60034bd | e2eb2a89b7d96a26bed379c572612412e9d6f258 | /chap2/PreCG1.py | e941d9946894b6757d615025ecc129844e2b4487 | [] | no_license | chdlkl/DigitalComputation | 14ca61cc06de225f8492103bc670fb81613cf1fd | 31560bbf6ec6bd40b984c1814bc26badff39d91c | refs/heads/master | 2018-09-06T01:41:07.477414 | 2018-06-04T14:29:43 | 2018-06-04T14:29:43 | 117,341,939 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 18 10:57:51 2018
@author: luk
"""
"""
在病态矩阵问题上,共轭梯度法比部分主元的高斯消去法还要差
这种情况下,可通过预条件得到缓解,主要是将问题转化为良态矩阵系统,再实施CG方法
此代码的预条件因子M=D,为雅可比预条件因子,D为A的对角线矩阵
"""
import numpy as np
n = 3
a = np.array( [[4., -2., 2. ],[-2., 2., -4.],[2., -4., 11.]], dtype = np.float64 )
b = np.array( [6., -10., 27.], dtype = np.float64 )
alpha = 0.0
beta = 0.0
M = np.zeros_like( a, dtype = np.float64 )
x = np.zeros( (n,), dtype = np.float64 )
d = np.zeros_like( x, dtype = np.float64 )
r = np.zeros_like( x, dtype = np.float64 )
z = np.zeros_like( x, dtype = np.float64 )
rtmp = np.zeros_like( x, dtype = np.float64 )
ztmp = np.zeros_like( x, dtype = np.float64 )
for i in range(n):
M[i,i] = a[i,i]
M[i,i] = 1.0 / M[i,i]
r = b - np.matmul( a,x ) # r0 = b - Ax0
z = np.matmul( M,r ) # z0 = Inv(M)*r0
d = z # d0 = z0
for i in range(n):
if ( all(r<0.0) ):
quit()
rtmp = r
ztmp = z
tmp = np.matmul( np.transpose(rtmp),ztmp ) / np.matmul( np.matmul( np.transpose(d),a ), d )
alpha = tmp
x = x + alpha * d
r = r - alpha * np.matmul( a,d )
z = np.matmul( M,r )
tmp = np.matmul( np.transpose(r),z ) / np.matmul( np.transpose(rtmp),ztmp )
beta = tmp
d = z + beta * d
print( " the x of PreCG1 is :" )
for i in range(n):
print( x[i] )
print( "the x of python is :" )
a = np.array( [[4., -2., 2. ],[-2., 2., -4.],[2., -4., 11.]], dtype = np.float64 )
b = np.array( [6., -10., 27.], dtype = np.float64 )
a = np.linalg.inv(a) # 求矩阵a的逆矩阵
x = np.matmul(a,b)
for i in range(len(x)):
print( x[i] )
print( " please input Enter and stop!" )
input() | [
"chdlkl@163.com"
] | chdlkl@163.com |
244a752350c1c602db0969fbf42a67fd9c3c43f5 | 68bce7a4fb818fd124ad8135ee00af7d863cf30a | /groups/models.py | e39cdaa4f2d5c780557c098629a41f096e26b76e | [] | no_license | wolf637/simplesocial | 4eefba7c82ad4a80b055343077a3f04ce3693220 | 0acc746f18ab57bc556ecbe2304b80e971fa1556 | refs/heads/master | 2020-03-27T18:48:21.639419 | 2018-09-02T03:43:41 | 2018-09-02T03:43:41 | 146,944,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | from django.db import models
from django.utils.text import slugify
import misaka
from django.urls import reverse
from django.contrib.auth import get_user_model
User = get_user_model()
from django import template
register = template.Library()
class Group(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(allow_unicode=True, unique=True)
description = models.TextField(blank=True, default='')
description_html = models.TextField(editable=False, default='', blank=True)
members = models.ManyToManyField(User, through='GroupMember')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.description_html = misaka.html(self.description)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('groups:single', kwargs={'slug': self.slug})
class Meta:
ordering = ['name']
class GroupMember(models.Model):
group = models.ForeignKey(Group, related_name='memberships', on_delete='SET_NULL')
user = models.ForeignKey(User, related_name='user_groups', on_delete='SET_NULL')
def __str__(self):
return self.user.username
class Meta:
unique_together = ('group', 'user')
| [
"alexey.volkov@live.com"
] | alexey.volkov@live.com |
fd4dbe8cda242e9be984917356956e2ce2ea0fb8 | a016c7a98f42adaff0c6df9eabccfc16ced31766 | /Global software management project/Colombia.py | 0bf72586eb825cc76e31adc49d0666932fb8973a | [] | no_license | zhoumengxiong/AutoOutputReport | b22bb094a28f498e20f9a07f8fe691b2029894bc | 94159de7e6b80bcd724f63068446b7181524e789 | refs/heads/master | 2021-09-22T17:02:32.347520 | 2018-09-12T10:41:08 | 2018-09-12T10:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | # -*- coding: utf-8 -*-
import os
import smtplib
import datetime
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from email.mime.image import MIMEImage
def addimg(src, imgid):
fp = open(src, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', imgid)
return msgImage
def getNowYearWeek():
# 当前时间年第几周的计算
timenow = datetime.datetime.now()
NowYearWeek = timenow.isocalendar()
return NowYearWeek
# 中文处理
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def send_email(filepath_in):
# 邮件发送和接收人配置
from_addr = 'mengxiong.zhou@carlcare.com'
smtp_server = 'smtp.partner.outlook.cn'
password = 'TRA+2017' # 这是你邮箱的第三方授权客户端密码,并非你的登录密码
to_reciver = ['shanshan.meng@carlcare.com', 'repair.co@carlcare.com',
'mengxiong.zhou@carlcare.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
cc_reciver = ['barry.gao@carlcare.com', 'mingxiong.hao@carlcare.com', 'fuping.zhu@carlcare.com',
'miao.ji@carlcare.com', 'bo.wang@carlcare.com', 'dony.xiang@carlcare.com']
receiver = to_reciver + cc_reciver
wk = int(getNowYearWeek()[1]) - 1
content = '''
<body style="font-family:segoe ui;color:DarkSlateGray;font-size:15px;">
<p><b>Hi all,<br />Update latest version software check list FYR.</b></p>
<hr />
<p><b><i>Newly added or updated items are marked with red.</i></b></font><br /><img src="cid:Colombia">
<br /><strong>If any question, pls let me know!<strong></p>
<p><strong>Best regards<br /><font color="#0071BB">Carlcare Service</font> <br /><font color="#399340"><i>Yes!We care</i></font><br />
Dream Zhou | Carlcare HQ technical support</strong></p>
</body>
'''
subject = 'Colombia latest version software check list_week' + str(wk)
msg = MIMEMultipart('related')
msg['From'] = _format_addr('Carlcare HQ Technical Support <%s>' % from_addr) # 显示的发件人
# msg['To'] = _format_addr('管理员 <%s>' % to_addr) # 单个显示的收件人
msg['To'] = ','.join(to_reciver)
msg['Cc'] = ','.join(cc_reciver)
msg['Subject'] = Header(subject, 'utf-8')
# 需要传入的路径
# filepath = r'F:\Transsion\New market\latest version software checklist by country'
filepath = filepath_in
r = os.path.exists(filepath)
if r is False:
msg.attach(MIMEText('no file...', 'plain', 'utf-8'))
else:
# 邮件正文是MIMEText:
msg.attach(MIMEText(content, 'html', 'utf-8'))
msg.attach(addimg(r"F:\Python在路上\PythonI_project\SW checklist images\Colombia.jpg", "Colombia"))
# 遍历指定目录,显示目录下的所有文件名
pathdir = os.listdir(filepath)
for alldir in pathdir:
child = os.path.join(filepath, alldir)
# print(child.decode('gbk')) # .decode('gbk')是解决中文显示乱码问题
# 添加附件就是加上一个MIMEBase,从本地读取一个文件
with open(child, 'rb') as f:
# 设置附件的MIME和文件名,这里是txt类型:
mime = MIMEBase('file', 'xls', filename=alldir)
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename=alldir)
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
try:
server = smtplib.SMTP(smtp_server, 587)
server.starttls()
server.set_debuglevel(1) # 用于显示邮件发送的执行步骤
server.login(from_addr, password)
# print to_addrs
server.sendmail(from_addr, receiver, msg.as_string())
server.quit()
except:
print('Send failed!')
if __name__ == '__main__':
send_email(r'F:\Transsion\New market\latest version software checklist by country\Colombia')
| [
"zhoumengxiong@outlook.com"
] | zhoumengxiong@outlook.com |
813f0bc567bbb91bd66bafdc6b7465362a232c33 | 6a0f9a54baf2d7d0864c1a515097bec6ff60b77a | /account_routing/account_routing.py | c837cb01d219a5fad00d3f18a90e3ad12c8d7448 | [] | no_license | bala4901/openerp-custom-addons | e422e86bbaffcbebb17363abf674eb952fb0e370 | ffa0c032ff635f06433e461a99f5fde0ac3dd990 | refs/heads/master | 2021-01-15T19:04:37.871649 | 2015-03-06T18:52:30 | 2015-03-06T18:52:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,535 | py | from openerp import models, fields, api, _
class account_analytic_account(models.Model):
_inherit = "account.analytic.account"
account_routing_subrouting_ids = fields.One2many('account.routing.subrouting', 'account_analytic_id', 'Routing Subroutes')
color = fields.Selection([('black','Black'),('gray','Gray'),('maroon','Maroon'),('red','Red'),('purple','Purple'),('green','Green'),('olive','Olive'),('navy','Navy'),('teal','Teal'),],
string='Color')
display_color = fields.Selection([('black','Black'),('gray','Gray'),('maroon','Maroon'),('red','Red'),('purple','Purple'),('green','Green'),('olive','Olive'),('navy','Navy'),('teal','Teal'),],
string='Display Color', store=False, compute='_computed_color')
@api.one
def _computed_color(self):
# set row color for tree view
self.display_color = 'black'
if self.color:
self.display_color = self.color
else:
parent = self.parent_id
while parent:
if parent.color:
self.display_color = parent.color
break
parent = parent.parent_id
class account_routing(models.Model):
_name = 'account.routing'
_description = 'Account Routing'
_order = "name"
name = fields.Char('Task Category', size=128, required=True)
routing_lines = fields.One2many('account.routing.line', 'routing_id', 'Account Type Routes', ondelete='cascade')
section_ids = fields.Many2many('account.routing.section','account_routing_section_rel', 'routing_id', 'section_id', string="Applies to sections")
@api.multi
def _get_account_types(self):
return [routing_line.account_type_id.id for routing_line in self.routing_lines]
class account_routing_line(models.Model):
_name = 'account.routing.line'
_description = 'Task Type'
_order = "account_type_id"
name = fields.Char(string='Task Type', related='account_type_id.name')
routing_id = fields.Many2one('account.routing', 'Task Category', required=True, ondelete='cascade')
account_type_id = fields.Many2one('account.account.type', 'Account Type', required=True, select=True, ondelete='cascade')
subrouting_ids = fields.One2many('account.routing.subrouting', 'routing_line_id', 'Analytic Routes', ondelete='cascade')
section_ids = fields.Many2many('account.routing.section','account_routing_line_section_rel', 'routing_line_id', 'section_id', string="Applies to sections")
@api.one
@api.constrains('routing_id', 'account_type_id')
def _one_timekeeping_per_routing(self):
tk_section = self.env.ref('imsar_timekeeping.ar_section_timekeeping').id
if tk_section in self.section_ids.ids:
for line in self.routing_id.routing_lines:
if self.id != line.id and tk_section in line.section_ids.ids:
raise Warning(_("You may only designate one routing line for Timekeeping."))
_sql_constraints = [
('routing_account_type_uniq', 'unique (routing_id,account_type_id)', 'Only one account type allowed per account routing!')
]
class account_routing_subrouting(models.Model):
_name = 'account.routing.subrouting'
_description = 'Account Subrouting'
_order = "account_analytic_id"
name = fields.Char(string='Task Identifier', related='account_analytic_id.name', store=True)
fullname = fields.Char(string="Full Name", compute='_fullname', readonly=True,)
routing_id = fields.Many2one('account.routing', related='routing_line_id.routing_id', store=True, readonly=True)
routing_line_id = fields.Many2one('account.routing.line', 'Task Type', required=True)
account_type_id = fields.Many2one('account.account.type', related='routing_line_id.account_type_id', readonly=True)
account_analytic_id = fields.Many2one('account.analytic.account', 'Analytic Account', required=True, select=True)
account_id = fields.Many2one('account.account', 'Real Account', required=True, select=True)
from_parent = fields.Boolean('Added by parent', readonly=True, default=False)
type = fields.Selection(related='account_analytic_id.type', readonly=True)
display_color = fields.Selection(related='account_analytic_id.display_color', readonly=True)
# the following are only for Quickbooks integration, and should be removed once Quickbooks is no longer used
old_task_code = fields.Char('Old Task Code')
qb_company_job = fields.Char('QB Company Job')
qb_service_item = fields.Char('QB Service Item')
qb_payroll_item_st = fields.Char('QB ST Payroll Item')
qb_payroll_item_ot = fields.Char('QB OT Payroll Item')
@api.one
def _fullname(self):
self.fullname = self.routing_id.name + ' / ' + self.routing_line_id.name + ' / ' + self.name
@api.model
def create(self, vals):
existing_subroute = self.search([('routing_line_id','=',vals.get('routing_line_id')),('account_analytic_id','=',vals.get('account_analytic_id'))])
if not existing_subroute:
subroute = super(account_routing_subrouting, self).create(vals)
else:
subroute = existing_subroute
account_analytic_id = self.env['account.analytic.account'].browse(vals.get('account_analytic_id'))
if len(account_analytic_id.child_ids) > 0:
for analytic in account_analytic_id.child_ids:
if analytic.type in ('normal', 'contract'):
vals['account_analytic_id'] = analytic.id
vals['from_parent'] = True
self.env['account.routing.subrouting'].create(vals)
return subroute
@api.multi
def unlink(self):
if len(self.account_analytic_id.child_ids) > 0:
for subroute in self.search([('routing_line_id','=',self.routing_line_id.id),('account_analytic_id','in',self.account_analytic_id.child_ids.ids)]):
if subroute.from_parent:
subroute.unlink()
super(account_routing_subrouting, self).unlink()
@api.multi
def write(self, vals):
# Well this is just stupid. If you try to delete some records in a write, for some reason it chains the write
# to the records that got deleted and tries to call write on them. I have no idea what's going on. But if you
# leave out the delete calls, it works as normal. This check is to see if the system is trying to call write
# on an already deleted record.
if not self.search([('id','=',self.id)]):
return True
# if the analytic didn't change, do the write and end here
account_analytic_id = self.env['account.analytic.account'].browse(vals.get('account_analytic_id'))
if not account_analytic_id:
return super(account_routing_subrouting, self).write(vals)
# if we're changing analytics, first delete any children of the existing subroute
if len(self.account_analytic_id.child_ids) > 0:
for subroute in self.search([('routing_line_id','=',self.routing_line_id.id),('account_analytic_id','in',self.account_analytic_id.child_ids.ids)]):
if subroute and subroute.from_parent:
subroute.unlink()
# now create subroutes for any children
if len(account_analytic_id.child_ids) > 0:
childvals = {
'routing_line_id': self.routing_line_id.id,
'account_id': vals.get('account_id') or self.account_id.id,
'from_parent': True,
}
for child_id in account_analytic_id.child_ids.ids:
childvals['account_analytic_id'] = child_id
self.env['account.routing.subrouting'].create(childvals)
return super(account_routing_subrouting, self).write(vals)
_sql_constraints = [
('routing_line_analytic_uniq', 'unique (routing_line_id,account_analytic_id)', 'Only one analytic allowed per account routing line!')
]
class account_routing_section(models.Model):
_name = 'account.routing.section'
_description = 'Sections (or apps) the routes/lines apply to'
name = fields.Char('Section', size=64, required=True)
class account_account_type(models.Model):
_inherit = "account.account.type"
allow_routing = fields.Boolean('Allow routing', default=False, help="Allows you to set special account routing rules via this account type")
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
routing_id = fields.Many2one('account.routing', 'Category', required=True,)
routing_line_id = fields.Many2one('account.routing.line', 'Billing Type', required=True,)
routing_subrouting_id = fields.Many2one('account.routing.subrouting', 'Task Code', required=True,)
@api.onchange('routing_id')
def onchange_routing_id(self):
self.routing_line_id = ''
self.routing_subrouting_id = ''
@api.onchange('routing_line_id')
def onchange_routing_line_id(self):
self.routing_subrouting_id = ''
@api.onchange('routing_subrouting_id')
def onchange_analytic_id(self):
self.account_id = self.routing_subrouting_id.account_id
self.account_analytic_id = self.routing_subrouting_id.account_analytic_id
def product_id_change(self, *args, **kwargs):
res = super(account_invoice_line, self).product_id_change(*args, **kwargs)
if 'account_id' in res['value']:
del res['value']['account_id']
return res
| [
"ben.olsen@imsar.com"
] | ben.olsen@imsar.com |
97f9717024df32c598a14e0f724cbdbe3bf03874 | 08d316151302f7ba4ae841c15b7adfe4e348ddf1 | /reviewboard/hostingsvcs/tests/test_sourceforge.py | f51901fb883aee02393c4d7033117c7503f6470d | [
"MIT"
] | permissive | LloydFinch/reviewboard | aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8 | 563c1e8d4dfd860f372281dc0f380a0809f6ae15 | refs/heads/master | 2020-08-10T20:02:32.204351 | 2019-10-02T20:46:08 | 2019-10-02T20:46:08 | 214,411,166 | 2 | 0 | MIT | 2019-10-11T10:44:55 | 2019-10-11T10:44:54 | null | UTF-8 | Python | false | false | 2,727 | py | """Unit tests for the SourceForge hosting service."""
from __future__ import unicode_literals
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
class SourceForgeTests(HostingServiceTestCase):
"""Unit tests for the SourceForge hosting service."""
service_name = 'sourceforge'
def test_service_support(self):
"""Testing SourceForge service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertTrue(self.service_class.supports_repositories)
def test_get_repository_fields_with_bazaar(self):
"""Testing SourceForge.get_repository_fields for Bazaar"""
self.assertEqual(
self.get_repository_fields(
'Bazaar',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'bzr://myproj.bzr.sourceforge.net/bzrroot/myproj',
'mirror_path': ('bzr+ssh://myproj.bzr.sourceforge.net/bzrroot/'
'myproj'),
})
def test_get_repository_fields_with_cvs(self):
"""Testing SourceForge.get_repository_fields for CVS"""
self.assertEqual(
self.get_repository_fields(
'CVS',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': (':pserver:anonymous@myproj.cvs.sourceforge.net:'
'/cvsroot/myproj'),
'mirror_path': 'myproj.cvs.sourceforge.net/cvsroot/myproj',
})
def test_get_repository_fields_with_mercurial(self):
"""Testing SourceForge.get_repository_fields for Mercurial"""
self.assertEqual(
self.get_repository_fields(
'Mercurial',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'http://myproj.hg.sourceforge.net:8000/hgroot/myproj',
'mirror_path': 'ssh://myproj.hg.sourceforge.net/hgroot/myproj',
})
def test_get_repository_fields_with_svn(self):
"""Testing SourceForge.get_repository_fields for Subversion"""
self.assertEqual(
self.get_repository_fields(
'Subversion',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'http://myproj.svn.sourceforge.net/svnroot/myproj',
'mirror_path': ('https://myproj.svn.sourceforge.net/svnroot/'
'myproj'),
})
| [
"christian@beanbaginc.com"
] | christian@beanbaginc.com |
a68d4bb55951fec4e946c2e50822aaa85b5b4eaf | 41f87b4859c8689800699d5d4e47840f34a6fd50 | /virtual/bin/django-admin.py | 0d347a8c5880dc7ffef3e5659f2cff1523ee7117 | [
"MIT"
] | permissive | FGacheru/Awwards | bbd8f6af258f5d912d87c60a6a0768df329edc4f | 341124ad40f073a3ba3f4c712359a274f5cfa07f | refs/heads/master | 2023-02-23T04:58:55.764949 | 2021-01-25T21:43:38 | 2021-01-25T21:43:38 | 331,965,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | #!/home/frank/Desktop/core-projects/django/awwards/virtual/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"francisgacheru2001@gmail.com"
] | francisgacheru2001@gmail.com |
d7c4bc7341c8259a4b06d2373db1df7c3d374bec | b42c2ae9a74ac1b7a9f4a5c7803f289e11e86d3c | /webform.py | a8e5d26b91e35055543586bc3938ecd76a7cc667 | [
"CC0-1.0"
] | permissive | drumpfhouse/whSignupFormProtestSubmitter | 5e59e9af142a4005214213f8a522c304aa06eaf2 | ccfb7cc3805fe2798dfc0ccef0bcb2b893c0bf11 | refs/heads/master | 2021-01-09T06:16:08.637316 | 2017-03-06T01:39:38 | 2017-03-06T01:39:38 | 80,947,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | """
Defines a Webform class which defines the location (targetURL) and dictionary of fields and values which will
be submitted.
Previously this was implemented as a function as seen below.
def frontPageForm():
targetURL = 'https://forms.whitehouse.gov/webform/email-signup?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov'
dataPayload = {
"submitted[email_address]": _buildFakeEmailAddress(),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_111",
"form_build_id": "form-43X7sWhYGJ1EdVKeroNYk0M2Wnv7I-Bp4qrOtulPg6A"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
"""
class Webform:
targetURL = ''
dataPayload = {}
def __init__(self, targetURL = None, dataPayload = {}):
self.targetURL = targetURL
self.dataPayload = dataPayload
"""
Returns the content of the form as a dictionary of targetURL and dataPayload.
"""
def getData(self):
return {'targetURL': self.targetURL, 'dataPayload': self.dataPayload} | [
"drumpfhouse@gmail.com"
] | drumpfhouse@gmail.com |
c5e59098c7a8999f2f06dfab844226ad5c47b7af | eebf0990720038a1da22aab3a52fdf75f1370f70 | /venv/bin/jirashell | 201f470df3165b867b389ddcc664c1b1fc15a95c | [] | no_license | haiyongsong1921/GFT2 | 1769c243ed3370ac2e01670adb1132d21bb8616f | 77bb42f1938aec478e9dc79f2bda225472e200ff | refs/heads/master | 2021-04-06T11:43:39.086225 | 2018-04-02T13:17:17 | 2018-04-02T13:17:17 | 125,121,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | #!/Users/sunhaiyang/Documents/GitRepo/PythonProj/warrior_program_backend/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jira.jirashell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sunhy339@126.com"
] | sunhy339@126.com | |
3c69a69453081add6b6a63bf5a8c6e8cf120e7e1 | 8a9688d6da19846093b5c1ef5404f72104253df0 | /node_manager_fkie/src/node_manager_fkie/launch_server_handler.py | 96337ad9d79e5e225023ffe6887e9a4567d64901 | [
"BSD-3-Clause"
] | permissive | acschaefer/multimaster_fkie | 0ab10be93cfac0338f99ea361f08fa234c8b7190 | 49558d5f141da8a863d9f6b0e347c96b8622e541 | refs/heads/master | 2020-04-04T12:41:21.533232 | 2018-10-31T09:32:46 | 2018-10-31T09:32:46 | 154,203,623 | 0 | 0 | BSD-3-Clause | 2018-10-22T19:28:47 | 2018-10-22T19:28:46 | null | UTF-8 | Python | false | false | 6,644 | py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QObject, Signal
import random
import socket
import threading
import time
import xmlrpclib
import rospy
class LaunchServerHandler(QObject):
'''
A class to retrieve the state of launch servers. To retrieve the state a new
thread will be created.
'''
launch_server_signal = Signal(str, int, list)
'''
@ivar: launch_server_signal is a signal (serveruri, pid, nodes), which is emitted, if a info from
launch server was successful retrieved.
'''
error_signal = Signal(str, str)
'''
@ivar: error_signal is a signal (serveruri, error message), which is emitted,
if an error while retrieving a launch server info was occurred.
'''
def __init__(self):
QObject.__init__(self)
self.__updateThreads = {}
self.__requestedUpdates = {}
self._lock = threading.RLock()
def stop(self):
if len(self.__updateThreads) > 0:
print " Shutdown launch update threads..."
self.__requestedUpdates.clear()
with self._lock:
for _, thread in self.__updateThreads.iteritems():
thread.launch_server_signal.disconnect()
thread.error_signal.disconnect()
print " Launch update threads are off!"
def updateLaunchServerInfo(self, serveruri, delayed_exec=0.0):
'''
This method starts a thread to get the informations about the launch server by
the given RCP uri of the launch server. If all informations are
retrieved, a C{launch_server_signal} of this class will be emitted. If for given
serveruri a thread is already running, it will be inserted to the requested
updates. For the same serveruri only one requested update can be stored.
On update error the requested update will be ignored.
This method is thread safe.
@param serveruri: the URI of the remote launch server
@type serveruri: C{str}
@param delayed_exec: Delay the execution of the request for given seconds.
@type delayed_exec: C{float}
'''
with self._lock:
try:
if serveruri in self.__updateThreads:
self.__requestedUpdates[serveruri] = delayed_exec
else:
self.__create_update_thread(serveruri, delayed_exec)
except:
pass
def _on_launch_server_info(self, serveruri, pid, nodes):
self.launch_server_signal.emit(serveruri, pid, nodes)
self.__handle_requests(serveruri)
def _on_error(self, serveruri, error):
self.error_signal.emit(serveruri, error)
self.__handle_requests(serveruri)
def __handle_requests(self, serveruri):
with self._lock:
try:
thread = self.__updateThreads.pop(serveruri)
del thread
delayed_exec = self.__requestedUpdates.pop(serveruri)
self.__create_update_thread(serveruri, delayed_exec)
except KeyError:
pass
except:
import traceback
print traceback.format_exc(2)
def __create_update_thread(self, serveruri, delayed_exec):
upthread = LaunchServerUpdateThread(serveruri, delayed_exec)
self.__updateThreads[serveruri] = upthread
upthread.launch_server_signal.connect(self._on_launch_server_info)
upthread.error_signal.connect(self._on_error)
upthread.start()
class LaunchServerUpdateThread(QObject, threading.Thread):
'''
A thread to retrieve the list of pid and nodes from launch server and publish
it by sending a QT signal.
'''
launch_server_signal = Signal(str, int, list)
error_signal = Signal(str, str)
def __init__(self, launch_serveruri, delayed_exec=0.0, parent=None):
QObject.__init__(self)
threading.Thread.__init__(self)
self._launch_serveruri = launch_serveruri
self._delayed_exec = delayed_exec
self.setDaemon(True)
def run(self):
'''
'''
try:
delay = self._delayed_exec + 0.5 + random.random()
time.sleep(delay)
socket.setdefaulttimeout(25)
server = xmlrpclib.ServerProxy(self._launch_serveruri)
_, _, pid = server.get_pid() # _:=code, msg
_, _, nodes = server.get_node_names() # _:=code, msg
self.launch_server_signal.emit(self._launch_serveruri, pid, nodes)
except:
import traceback
# print traceback.print_exc()
formatted_lines = traceback.format_exc(1).splitlines()
rospy.logwarn("Connection to launch server @ %s failed:\n\t%s", str(self._launch_serveruri), formatted_lines[-1])
# 'print "request failed", self._monitoruri
self.error_signal.emit(self._launch_serveruri, formatted_lines[-1])
finally:
if socket is not None:
socket.setdefaulttimeout(None)
| [
"Alexander.Tiderko@fkie.fraunhofer.de"
] | Alexander.Tiderko@fkie.fraunhofer.de |
ea99fdbb982ad112eb16ccc721ba53b6e60578be | ba6e161312ee7fc0d7e28e7ad5e5133867cf2b38 | /analysis/plot_results_biobot.py | cf5a8cc4d24af4f23d32b1d8b70b1525e4e2a531 | [
"MIT"
] | permissive | m0khalifa/wastewater_analysis | b6f21d6cc89e772ed88716791392f8a5df721d66 | 61dd29df6a620db05838597e2fe12d8a2f8cca65 | refs/heads/main | 2023-08-16T19:42:15.340010 | 2021-09-16T18:57:27 | 2021-09-16T18:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,222 | py | #!/usr/bin/env python3
import sys
import os
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import math
import numpy as np
import scipy.stats
import h5py
import json
import seaborn as sns
colors = {
'B.1.1.7': (0.4980392156862745, 0.788235294117647, 0.4980392156862745, 1.0),
'B.1.351': (0.9921568627450981, 0.7529411764705882, 0.5254901960784314, 1.0),
'B.1.427': (0.2196078431372549, 0.4235294117647059, 0.6901960784313725, 1.0),
'B.1.429': (0.7490196078431373, 0.3568627450980392, 0.09019607843137253, 1.0),
'P.1': (0.4, 0.4, 0.4, 1.0),
'B.1.427/B.1.429': (0.2196078431372549, 0.4235294117647059, 0.6901960784313725, 1.0),
'B.1.526': 'gold'}
def main():
parser = argparse.ArgumentParser(description="Plot biobot results for a given VOC.")
parser.add_argument('predictions', type=str, nargs='+', help="prediction files")
parser.add_argument('--run_info', type=str, nargs='+', default=[], help="run info files")
parser.add_argument('--confidence', type=float, default=95, help="confidence interval width")
parser.add_argument('--metadata', type=str, required=True, help="metadata for samples")
parser.add_argument('--metadata_ref', type=str, required=True, help="metadata for references")
parser.add_argument('--gisaid_freqs', type=str, help="VOC frequencies per sampling date observed in GISAID")
parser.add_argument('--voc', type=str, required=True, help="VOC to plot")
parser.add_argument('--min_ab', default=0, type=float, help="set predictions <= min_ab to 0")
parser.add_argument('--min_aln', type=int, default=0, help="remove samples with < min_aln aligned reads")
parser.add_argument('--max_ct', type=float, default=34, help="remove samples with CT > max_ct")
parser.add_argument('--min_pop', type=int, default=0, help="remove samples with population < min_pop")
parser.add_argument('--quick', action='store_true', help="skip confidence interval computation for any predictions <= min_ab")
parser.add_argument('--fig_width', type=float, help="figure width")
parser.add_argument('--fig_height', default=4, type=float, help="figure height")
parser.add_argument('--sep_states', action="store_true", help="separate different states by vertical lines in plot")
parser.add_argument('--add_population', action="store_true", help="add population size on second y-axis")
parser.add_argument('--outdir', default='.')
parser.add_argument('--outprefix', default="plot_bootstrap", help="add prefix to output figure names")
parser.add_argument('--output_format', dest='output_format', default='png', help="comma-separated list of desired output formats")
parser.add_argument('-v,--verbose', dest='verbose', action='store_true')
args = parser.parse_args()
if args.min_aln > 0 and not args.run_info:
print("ERROR: can't filter for minimal number of aligned reads without run_info files")
sys.exit(1)
# read sample metadata
df = pd.read_csv(args.metadata, sep=',', header=0, dtype=str, parse_dates=["Date"])
voc_list = args.voc.split(',')
for variant in voc_list:
df[variant] = "."
# read run info per sample
df["n_reads"] = 0
df["n_aligned"] = 0
df["n_unique"] = 0
for filename in args.run_info:
# assumes that predictions are in directory named after sample
sample_id = filename.split('/')[-2]
with open(filename, 'r') as f:
run_info = json.load(f)
df.loc[df["ID"] == sample_id, "n_processed"] = run_info["n_processed"]
df.loc[df["ID"] == sample_id, "n_aligned"] = run_info["n_pseudoaligned"]
df.loc[df["ID"] == sample_id, "n_unique"] = run_info["n_unique"]
# read prediction files per sample
for filename in args.predictions:
# assumes that predictions are in directory named after sample
sample_id = filename.split('/')[-2]
with open(filename, 'r') as f:
freq_dict = {voc : 0 for voc in voc_list}
for line in f:
if line[0] == "#":
# header
continue
[variant, tmp, freq, adj_freq] = line.strip().split('\t')
if variant in voc_list:
freq_dict[variant] = float(freq)
for voc, freq in freq_dict.items():
row = df.loc[df["ID"] == sample_id]
if float(row["Cq"]) <= args.max_ct:
if row["n_aligned"].item() >= args.min_aln:
if float(row["population"]) >= args.min_pop:
df.loc[df["ID"] == sample_id, voc] = freq
if len(voc_list) > 1:
joint_voc_list = voc_list
joint_voc_name = '/'.join(joint_voc_list)
df[joint_voc_name] = df[joint_voc_list].sum(axis=1)
voc_list = [x for x in voc_list if x not in joint_voc_list]
voc_list.append(joint_voc_name)
colors[joint_voc_name] = colors[joint_voc_list[0]]
voc_list = sorted(voc_list)
colorlist = [colors[voc] for voc in voc_list]
# plot composition per sample, sorted by state
plot_df = df.loc[df[voc_list[0]] != "."]
plot_df = plot_df.sort_values(by=["State", "Sampling_location_(uploaded_from_processed_data)", "Date"])
states = plot_df["State"].unique()
if args.sep_states:
xticklabels = ["{0}, {1}".format(row["State"],
str(row["Date"]).split(" ")[0])
for index, row in plot_df.iterrows()]
xtickinfo = ["{0}, {1}, {2}".format(row["State"],
str(row["Date"]).split(" ")[0],
row["Sampling_location_(uploaded_from_processed_data)"])
for index, row in plot_df.iterrows()]
else:
xticklabels = ["{1}".format(row["State"], str(row["Date"]).split(" ")[0])
for index, row in plot_df.iterrows()]
# fig, ax = plt.subplots(figsize=(5, 20))
voc = voc_list[0]
metadata_ref = pd.read_csv(args.metadata_ref, sep='\t', header=0, dtype=str)
seqnames = list(metadata_ref.loc[metadata_ref["pangolin_lineage"] == voc,
"strain"])
if args.gisaid_freqs:
gisaid_df = pd.read_csv(args.gisaid_freqs, sep='\t', header=0, parse_dates=["Date"])
voc_freqs = []
for index, row in plot_df.iterrows():
freqs = gisaid_df.loc[(gisaid_df["State"] == row["State"]) &
(gisaid_df["Date"] == row["Date"])][voc]
freq = freqs.max()
assert freq == freqs.min()
voc_freqs.append(float(freq))
gisaid_col = "gisaid-{}".format(voc)
plot_df[gisaid_col] = voc_freqs
# plot predictions with error bars representing confidence intervals
plot_df = plot_df.reset_index(drop=True)
plt.rcParams.update({'font.size': 14,
'legend.fontsize': 12,
'legend.title_fontsize': 12}) # increase font size
plt.figure()
gisaid_col = "gisaid-{}".format(voc)
ax = plot_df.plot(x="State",
y=[voc, gisaid_col],
kind="bar",
# color=colors[voc],
legend=True,
capsize=2)
ax.legend(["Wastewater", "GISAID"])
ax.set_xticks(range(len(xticklabels)))
ax.set_xticklabels(xticklabels, fontsize=10, rotation=45, ha="right")
# plt.xlabel("Sample location (state) and date")
plt.xlabel("")
plt.ylabel("Abundance (%)")
plt.ylim(0, 32)
# add vertical lines to separate locations
if args.sep_states:
xticklocs, labels = plt.xticks()
prev_location = ""
prev_state = ""
for i, label in enumerate(xtickinfo):
[state, date, location] = label.split(",")
if prev_state != "" and state != prev_state:
line_loc = (xticklocs[i] + xticklocs[i-1]) / 2
plt.axvline(x=line_loc, color='k', alpha=0.5, lw=0.5)
elif prev_location != "" and location != prev_location:
line_loc = (xticklocs[i] + xticklocs[i-1]) / 2
plt.axvline(x=line_loc, color='k', alpha=0.5, lw=0.5, ls=(0, (5, 10)))
prev_location = location
prev_state = state
# add population size per sample
if args.add_population:
ax2 = ax.twinx()
ax2.spines['right'].set_position(('axes', 1.0))
plot_df["population"] = plot_df["population"].astype("float")
# print(plot_df)
plot_df.plot(y="population", ax=ax2, x_compat=True, color="k", style="+", legend=False)
ax2.set_ylabel("Population size per sample (+)")
ax2.set_yscale("log")
if args.fig_width:
plt.gcf().set_size_inches(args.fig_width, args.fig_height)
plt.title(voc, fontsize=16)
ax.yaxis.grid(alpha=0.2)
# ax.legend(ncol=len(voc_list), loc='upper right')
plt.tight_layout()
for format in args.output_format.split(','):
outfile = "{}/{}_{}.{}".format(args.outdir, args.outprefix, voc, format)
plt.savefig(outfile)
ax.set_ylim(0, 105)
for format in args.output_format.split(','):
outfile = "{}/{}_{}_ylim100.{}".format(args.outdir, args.outprefix, voc, format)
plt.savefig(outfile)
ax.set_ylim(0, 1.05)
for format in args.output_format.split(','):
outfile = "{}/{}_{}_ylim1.{}".format(args.outdir, args.outprefix, voc, format)
plt.savefig(outfile)
# write predictions to file
outfile = "{}/predictions_{}_m{}_a{}.csv".format(args.outdir, voc,
args.min_ab, args.min_aln)
plot_df.to_csv(outfile)
# plot population versus accuracy
plot_df["population"] = plot_df["population"].astype("float")
plot_df["diff"] = abs(plot_df[voc] - plot_df[gisaid_col])
selection = plot_df.loc[plot_df[gisaid_col] > 1]
plt.figure()
plt.rcParams.update({'font.size': 12}) # increase font size
selection.plot.scatter(x="population", y="diff")
plt.tight_layout()
plt.savefig(args.outdir + "/population_vs_accuracy_{}.png".format(voc))
if __name__ == "__main__":
sys.exit(main())
| [
"jasmijn_baaijens@hotmail.com"
] | jasmijn_baaijens@hotmail.com |
b2d53ffd5c0d66ff8ad6d790ce50cfb7e2ec15cd | 04a97dbb2ab510ec64a944dd5aaf834271788a32 | /django2/mysite/city_summary_stats.py | 465ddd22ae68c7689bedb5da94be9e8d77ced5a7 | [] | no_license | emilyding/ACE-cs122project | 98d2b67aa8151b2649c4a980d6daf80fb9817898 | 5941d8de85770f3e1b05e0bdc5a4b1cd6f1d35f5 | refs/heads/master | 2021-01-11T17:19:25.243093 | 2017-03-14T21:25:18 | 2017-03-14T21:25:18 | 79,745,285 | 0 | 0 | null | 2017-03-10T20:16:16 | 2017-01-22T20:59:03 | HTML | UTF-8 | Python | false | false | 6,330 | py | # Summary City Data
'''
Takes as an input a dictionary with the name of a city, and returns interesting
summary statistics. Note that using the adjusted database will lead to errors
identifying universally hated/acclaimed restaurants (as ratings of 1 or 5 will
be adjusted slightly upwards or downwards)
Usage: Call get_summary_info with the city of interest.
Example Call: get_summary_info({'city': 'Los Angeles'})
'''
import sqlite3
import csv
# Maps cities to median min meters between starbucks
# Dictionary readout produced by build_starbucks_dictionary.py
starbucks_mapper = {'albuquerque': '154.15', 'arlington': '83.33', 'atlanta': '352.59',
'austin': '123.41', 'baltimore': '86.41', 'boston': '98.32', 'buffalo': '162.93',
'charlotte': '251.00', 'chicago': '138.73', 'cleveland': '149.90', 'colorado springs': '221.52',
'columbus': '385.16', 'dallas': '517.69', 'denver': '282.46', 'detroit': '486.73',
'el paso': '241.77', 'fort worth': '239.43', 'fresno': '96.81', 'honolulu': '33.39',
'houston': '393.32', 'indianapolis': '406.86', 'jacksonville': '184.75', 'kansas city': '978.47',
'las vegas': '395.43', 'long beach': '112.44', 'los angeles': '187.45', 'louisville': '213.46',
'memphis': '219.27', 'mesa': '411.07', 'miami': '142.43', 'milwaukee': '146.95',
'minneapolis': '317.86', 'nashville': '173.47', 'new orleans': '103.72', 'new york': '105.39',
'oakland': '97.87', 'oklahoma city': '213.86', 'omaha': '228.06', 'philadelphia': '106.38',
'phoenix': '531.17', 'pittsburgh': '272.22', 'portland': '193.92', 'raleigh': '564.58',
'sacramento': '84.44', 'san antonio': '363.24', 'san diego': '110.48', 'san francisco': '67.07',
'san jose': '89.94', 'seattle': '134.22', 'st louis': '635.64', 'st paul': '125.64',
'tampa': '324.66', 'tucson': '135.19', 'tulsa': '327.75', 'virginia beach': '140.52',
'washington dc': '106.63'}
def build_starbucks_dictionary(filepath = 'starbucks_index.csv'):
'''
Given a filepath, constructs a dictionary mapping each city to the
median minimum distance to another Starbucks. Used in get_summary_info.
Inputs:
- filepath: The location of the Starbucks distance csv
Returns:
- Dictionary mapping cities to the median min starbucks distance
'''
starbucks_mapper = {}
with open(filepath) as holding:
reader = csv.reader(holding)
for row in reader:
# Skip headers
if row[2] != "Median Distance":
# Build dictionary, applying rounding
starbucks_mapper.update({row[1]: "{0:.2f}".format(float(row[2]))})
return starbucks_mapper
def get_summary_info(city = {'city': 'Los Angeles'}, database = "yelp_raw.db"):
'''
Takes in a city dictionary and database and returns summary statistics.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- A list of tuples displaying summary information
'''
# Change city input to lowercase, if necessary
city["city"] = city["city"].lower()
# Find necessary information
total_restaurants = find_total_restaurants(city, database)
starbucks_index = starbucks_mapper[city["city"]]
most_reviewed = find_most_reviewed_restaurant(city, database)
most_acclaimed = find_consensus_restaurant(city, database, rating = 5)
most_hated = find_consensus_restaurant(city, database, rating = 1)
# Construct Result List
result_list = []
result_list.append(("Total Restaurants in City:", total_restaurants))
result_list.append(("Starbucks Distance Index:",
"{} Meters".format(starbucks_index)))
result_list.append(("Most Reviewed Restaurant:",
"{}, {} Reviews".format(most_reviewed[0], most_reviewed[1])))
result_list.append(("Most Reviewed 5-Star Restaurant:",
"{}, {} Reviews".format(most_acclaimed[0], most_acclaimed[1])))
result_list.append(("Most Reviewed 1-Star Restaurant:",
"{}, {} Reviews".format(most_hated[0], most_hated[1])))
return result_list
def find_total_restaurants(city, database):
'''
Finds total number of restauarants in a city.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Integer of number of cities
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT COUNT(*)
FROM restaurant
WHERE city = ?
COLLATE NOCASE
'''
params = [city["city"]]
result = c.execute(search_string, params)
result = result.fetchone()
connection.commit()
c.connection.close()
return result[0]
def find_most_reviewed_restaurant(city, database):
'''
Finds the most reviewed restaurant and its review count
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Most reviewed restauarant and review count as a list
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT name, reviews
FROM restaurant
WHERE city = ?
COLLATE NOCASE
'''
params = [city["city"]]
result = c.execute(search_string, params)
results = result.fetchall()
# Sort by review count
results = sorted(results, key=lambda x: x[1], reverse = True)
connection.commit()
c.connection.close()
return results[0]
def find_consensus_restaurant(city, database, rating):
'''
Finds most reviewed restaurant at a given rating level.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Most reviewed restauarant and review count as a list
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT name, reviews, rating
FROM restaurant
WHERE city = ?
COLLATE NOCASE
AND rating = ?;
'''
params = [city["city"], rating]
result = c.execute(search_string, params)
results = result.fetchall()
# Sort by review count
results = sorted(results, key=lambda x: x[1], reverse = True)
connection.commit()
c.connection.close()
return results[0]
| [
"amr@cs.uchicago.edu"
] | amr@cs.uchicago.edu |
fc0a4373d757b3d7e96f4ec9c972e047cbfbd442 | c6be9ff92bb69504ec21292f28982a7b8aaf5bc4 | /pinky/test/helper.py | bdc9aee028f1d3adf3458c314ecd60c583cfe0c1 | [
"MIT"
] | permissive | dr4ke616/pinky | fa93fc9add40bd8896284a4c42a6a92338ca7e06 | 35c165f5a1d410be467621f3152df1dbf458622a | refs/heads/master | 2022-11-21T21:42:32.149255 | 2015-12-01T22:50:28 | 2015-12-01T22:50:28 | 40,613,393 | 1 | 0 | MIT | 2022-11-04T19:09:10 | 2015-08-12T17:04:48 | Python | UTF-8 | Python | false | false | 1,428 | py | import json
from mock import patch
from twisted.trial import unittest
from txzmq import ZmqEndpoint, ZmqFactory
class MockJSONSerializer(object):
""" Mock JSON serializer. Just used to json encode and decode
for various test cases
"""
@classmethod
def dump(cls, content):
if content is not None:
return json.dumps(content)
@classmethod
def load(cls, content):
if content is not None:
return json.loads(content)
class MockBaseServer(object):
__serializer__ = MockJSONSerializer
_debug = False
def __init__(self, factory, endpoint, *args, **kwargs):
self.factory = factory
self.endpoints = [endpoint]
def shutdown(self):
pass
@classmethod
def create(cls, address, *args, **kwargs):
return cls(
ZmqFactory(), ZmqEndpoint('bind', address), *args, **kwargs
)
class BaseTestServer(unittest.TestCase):
server = None
def __init__(self, *args, **kwargs):
self.patchs = [
patch.object(self.server, '__bases__', (MockBaseServer, ))
]
super(BaseTestServer, self).__init__(*args, **kwargs)
def add_patch(self, patch):
self.patchs.append(patch)
def setUp(self):
[p.start() for p in self.patchs]
def tearDown(self):
try:
[p.stop() for p in self.patchs]
except:
pass
| [
"adamdrakeford@gmail.com"
] | adamdrakeford@gmail.com |
044f1d8bb3d333af471355e336d8bdaea44454ab | 3b4527c8dab176fd26ac75f50f70a00971124db2 | /multiarm.py | 8f48cda671b4a651f8ef51cd5af8ce1ac29492d0 | [] | no_license | kevinduh/hyperband-sim | 324be88d1b8714598b8cde960f9cb49f0524fcc1 | 4ed4af506f7570da983fe19dffd585921259d0c0 | refs/heads/master | 2020-04-27T16:06:22.639027 | 2019-03-08T05:24:33 | 2019-03-08T05:24:33 | 174,472,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,050 | py | #!/usr/bin/env python
"""
Model Selection as Multi-Arm Bandit Experiment: This class encapsulates
a set of arms/models and provides convenience functions for summarizing
the playback of the training experiments. Note that if the number of real
models prepared in the datadir is less than the number of arms requested,
this class will re-sample and duplicate some arms.
"""
from arm import *
import os
import random
from copy import copy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class MultiArm():
def __init__(self, datadir, randomize=False, file_limit=10000):
"""Read raw metrics files and create 'arms' for simulation
:param datadir: root directory to search for metrics files
:param randomize: if true, shuffle order of metrics files for randomized experiments
:param file_limit: limit on number of metrics files to read in simulation
:return: list of Arm() representing the multi-arm bandit problem
:return: oracle reward (float) from the best arm/model
"""
# 1. Gather metrics files
metrics_files = [os.path.join(dp,f) for dp,dn,fn in os.walk(datadir) for f in fn if f == 'metrics']
self.num_arms_used = min(len(metrics_files), file_limit)
msg = "Found %d metrics files in %s" % (len(metrics_files), datadir)
#msg += "; Using %d models/arms in simulation:" % (self.num_arms_used)
print(msg)
# 2. Create Arm() object for each metrics file
if randomize:
random.shuffle(metrics_files)
self.arms = [Arm(m) for m in metrics_files[:self.num_arms_used]]
# 3. Print statistics
self.oracle_reward = 0.0
self.oracle_arm = 0
#print("\n==== Details of each arm/model ====")
for k in range(len(self.arms)):
# rename arm with integer id for easy identification
self.arms[k].name = str(k) + ":" + self.arms[k].name
#print(self.arms[k])
if self.oracle_reward < self.arms[k].final_reward():
self.oracle_reward = self.arms[k].final_reward()
self.oracle_arm = k
print("oracle arm=%d, oracle reward=%f\n" %(self.oracle_arm,
self.oracle_reward))
# 4. Create generator
def arm_generator():
# track duplicate resampling of same arm, d[arm name]->[list of id]
self.duplicates = defaultdict(list)
i = -1
while True:
i += 1
if i >= self.num_arms_used:
a = copy(self.arms[i%self.num_arms_used])
a.current_step = 0
self.arms.append(a)
self.duplicates[a.name].append(i)
else:
a = self.arms[i%self.num_arms_used]
yield a
self.arm_generator = arm_generator()
# End __init__
def sample_arm(self):
"""Returns an arm (simulates training a model)
"""
return self.arm_generator.next()
def __str__(self):
"""Prints snapshot of current Multi-Arm Bandit experiment status
:return: string
"""
s = "=== Multi-Arm experiment status ===\n"
s += "id current_step max_step current_reward final_reward name\n"
for ii, a in enumerate(self.arms):
s += "%d %d %d %f %f %s\n" %(ii, a.current_step, a.max_step,
a.current_reward(),
a.final_reward(), a.name)
d = self.stats()
s += "chosen_arm=%d final_reward=%f " %(d['best_arm'], d['best_final_reward'])
s += "oracle=%f regret=%f resource=%d " %(self.oracle_reward,
d['regret'],
d['resource'])
s += "num_arms_examined=%d\n" % d['num_arms_examined']
return s
def stats(self):
"""Get current Multi-Arm Bandit experiment statistics
:return: dictionary with best_arm, regret, resource, etc.
"""
stats_dict = {}
stats_dict['best_arm'] = 0
stats_dict['best_current_reward'] = 0.0
stats_dict['resource'] = 0
stats_dict['num_arms_examined'] = 0
for ii, a in enumerate(self.arms):
stats_dict['resource'] += a.current_step
if a.current_reward() > stats_dict['best_current_reward']:
stats_dict['best_arm'] = ii
stats_dict['best_current_reward'] = a.current_reward()
if a.current_step > 0:
stats_dict['num_arms_examined'] += 1
stats_dict['best_final_reward'] = self.arms[stats_dict['best_arm']].final_reward()
stats_dict['regret'] = self.oracle_reward - stats_dict['best_final_reward']
return stats_dict
def plot(self, max_step, filename="tmp.png"):
"""Plot the Multi-Arm Bandit simulation
The full curve is plotted for each arm/model
Note curves are smoothed monotonically-increasing version of the real BLEU curve
Current step where the training stops is indicated by a dot
:param max_step: max steps to draw in figure
:param filename: output filename for plot
"""
for a in self.arms:
c = a.get_curve()
m = min(len(c), max_step+1)
plt.plot(range(m),c[:m])
plt.scatter(a.current_step, a.current_reward())
d = self.stats()
title = "#arms=%d\n" %(len(self.arms))
title += "chosen_arm=%d\n" %(d['best_arm'])
title += "current_reward=%f\n" %(d['best_current_reward'])
title += "final_reward=%f\n" %(d['best_final_reward'])
title += "regret=%f resource=%d" %(d['regret'], d['resource'])
plt.legend(loc='lower right', title=title)
plt.ylabel('BLEU (validation set)')
plt.xlabel('steps')
#plt.show()
plt.savefig(filename)
| [
"kevinduh@cs.jhu.edu"
] | kevinduh@cs.jhu.edu |
6ec85a252515562cf031dc71db071109f9739d2f | e0bf5fd720c8b1539c8b4e6f48394a9d91adbe89 | /src/rangeproofs/rangeproof_prover.py | 2956466dd6a9245a3a863af528893f2d5bc9a42c | [] | no_license | yylluu/python-bulletproofs | 54636448891475b3199edd75d3c8d81ebf3bd29b | 967e1a54928b0130624fba6eeb3a19b42362b83d | refs/heads/master | 2022-01-23T14:26:18.581056 | 2019-07-23T12:10:07 | 2019-07-23T12:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,960 | py | from typing import List
from ..utils.utils import Point, ModP, inner_product, mod_hash
from ..utils.transcript import Transcript
from ..utils.commitments import vector_commitment, commitment
from .rangeproof_verifier import Proof
from ..innerproduct.inner_product_prover import NIProver
from ..pippenger import PipSECP256k1
class NIRangeProver:
def __init__(
self,
v: ModP,
n: int,
g: Point,
h: Point,
gs: List[Point],
hs: List[Point],
gamma: ModP,
u: Point,
group,
seed: bytes = b"",
):
self.v = v
self.n = n
self.g = g
self.h = h
self.gs = gs
self.hs = hs
self.gamma = gamma
self.u = u
self.group = group
self.transcript = Transcript(seed)
def prove(self):
v = self.v
n = self.n
gs = self.gs
hs = self.hs
h = self.h
aL = list(map(int, reversed(bin(v.x)[2:].zfill(n))))[:n]
aR = [
(x - 1) % self.group.q for x in aL
] # TODO implement inverse of elliptic curve point to compute -1 * g instead of multiplying by p-1
alpha = mod_hash(b"alpha" + self.transcript.digest, self.group.q)
A = vector_commitment(gs, hs, aL, aR) + alpha * h
sL = [
mod_hash(str(i).encode() + self.transcript.digest, self.group.q)
for i in range(n)
]
sR = [
mod_hash(str(i).encode() + self.transcript.digest, self.group.q)
for i in range(n, 2 * n)
]
rho = mod_hash(str(2 * n).encode() + self.transcript.digest, self.group.q)
S = vector_commitment(gs, hs, sL, sR) + rho * h
self.transcript.add_list_points([A, S])
y = self.transcript.get_modp(self.group.q)
self.transcript.add_number(y)
z = self.transcript.get_modp(self.group.q)
self.transcript.add_number(z)
t1, t2 = self._get_polynomial_coeffs(aL, aR, sL, sR, y, z)
tau1 = mod_hash(b"tau1" + self.transcript.digest, self.group.q)
tau2 = mod_hash(b"tau2" + self.transcript.digest, self.group.q)
T1 = commitment(self.g, h, t1, tau1)
T2 = commitment(self.g, h, t2, tau2)
self.transcript.add_list_points([T1, T2])
x = self.transcript.get_modp(self.group.q)
self.transcript.add_number(x)
taux, mu, t_hat, ls, rs = self._final_compute(
aL, aR, sL, sR, y, z, x, tau1, tau2, alpha, rho
)
# return Proof(taux, mu, t_hat, ls, rs, T1, T2, A, S), x,y,z
hsp = [(y.inv() ** i) * hs[i] for i in range(n)]
P = (
A
+ x * S
+ PipSECP256k1.multiexp(
gs + hsp,
[-z for _ in range(n)]
+ [(z * (y ** i)) + ((z ** 2) * (2 ** i)) for i in range(n)],
)
)
InnerProv = NIProver(gs, hsp, self.u, P + (-mu) * h, t_hat, ls, rs, self.group)
innerProof = InnerProv.prove()
return Proof(taux, mu, t_hat, T1, T2, A, S, innerProof, self.transcript.digest)
def _get_polynomial_coeffs(self, aL, aR, sL, sR, y, z):
t1 = inner_product(
sL, [(y ** i) * (aR[i] + z) + (z ** 2) * (2 ** i) for i in range(self.n)]
) + inner_product(
[aL[i] - z for i in range(self.n)],
[(y ** i) * sR[i] for i in range(self.n)],
)
t2 = inner_product(sL, [(y ** i) * sR[i] for i in range(self.n)])
return t1, t2
def _final_compute(self, aL, aR, sL, sR, y, z, x, tau1, tau2, alpha, rho):
ls = [aL[i] - z + sL[i] * x for i in range(self.n)]
rs = [
(y ** i) * (aR[i] + z + sR[i] * x) + (z ** 2) * (2 ** i)
for i in range(self.n)
]
t_hat = inner_product(ls, rs)
taux = tau2 * (x ** 2) + tau1 * x + (z ** 2) * self.gamma
mu = alpha + rho * x
return taux, mu, t_hat, ls, rs
| [
"williamborgeaud@gmail.com"
] | williamborgeaud@gmail.com |
c81784d9994ad9a1102b29e220192926722aa2a5 | 0236d8d787b2342d37fd02b53ca54961142f468c | /user/models.py | b56d2e2f79bd45a251a0f9894187e41004f2aefc | [
"BSD-3-Clause",
"MIT"
] | permissive | Spico197/TongfuRemotePrinter | b9a1095c85f675b14fe5c600e86c4d7bfa2475cc | c4edfab55f75c877efb9ec80d81bf205ca4e3da3 | refs/heads/master | 2022-11-04T22:18:46.386391 | 2020-06-25T04:41:16 | 2020-06-25T04:41:16 | 273,408,322 | 2 | 1 | NOASSERTION | 2020-06-25T04:23:52 | 2020-06-19T05:04:50 | Python | UTF-8 | Python | false | false | 2,118 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not username:
raise ValueError('The given stu_id must be set')
user = self.model(username=username, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, password, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, password, **extra_fields)
class LabUser(AbstractUser):
stu_id = models.CharField(max_length=20, null=False, blank=False)
name = models.CharField(max_length=200, default='李大嘴', null=True, blank=True)
memo = models.TextField(default='同福客栈掌勺', null=True, blank=True)
REQUIRED_FIELDS = ['stu_id']
objects = UserManager()
class Meta(AbstractUser.Meta):
verbose_name = '用户信息'
verbose_name_plural = verbose_name
class UserPrintRecord(models.Model):
user = models.ForeignKey(LabUser, on_delete=models.SET_NULL, null=True)
add_time = models.DateTimeField(auto_now_add=True)
file_path = models.CharField(max_length=1000)
class Meta:
verbose_name = '用户打印记录'
verbose_name_plural = verbose_name
| [
"tzhu1997@outlook.com"
] | tzhu1997@outlook.com |
1068b7713ec020de5734f28062543b8079b9df6d | d62fbff86f8d4f332e843843bba7d07e2361554f | /Examples/tests/example_2_unittest.py | a4ecd8c88c26e34fb9560b454a023f409b1c0266 | [] | no_license | haisfo/python-environments | e031850fa4e8778eea7c618d1eec74e723e615f1 | 73fb4dbe56f1ebbfba71d440ba3c953556688bf9 | refs/heads/master | 2022-12-27T19:51:13.046530 | 2020-10-16T00:11:28 | 2020-10-16T00:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | import unittest
from rock_paper_scissors_buggy import determine_winner, game_over, YOU, COMP
class TestWordGame(unittest.TestCase):
def test_determine_winner(self):
self.assertEqual(determine_winner('r', 'r'), None)
self.assertEqual(determine_winner('r', 'p'), COMP)
self.assertEqual(determine_winner('r', 's'), YOU)
self.assertEqual(determine_winner('p', 'r'), YOU)
self.assertEqual(determine_winner('p', 'p'), None)
self.assertEqual(determine_winner('p', 's'), COMP)
self.assertEqual(determine_winner('s', 'r'), COMP)
self.assertEqual(determine_winner('s', 'p'), YOU)
self.assertEqual(determine_winner('s', 's'), None)
def test_game_over(self):
self.assertEqual(game_over(3, [0, 0]), None)
self.assertEqual(game_over(3, [1, 1]), None)
self.assertEqual(game_over(3, [2, 1]), YOU)
self.assertEqual(game_over(3, [1, 2]), COMP)
self.assertEqual(game_over(5, [2, 2]), None)
self.assertEqual(game_over(5, [3, 0]), YOU)
self.assertEqual(game_over(5, [1, 3]), COMP)
if __name__ == '__main__':
unittest.main()
| [
"ariannedee@gmail.com"
] | ariannedee@gmail.com |
87000c88ad989e720e8303d6f29d9912748b2d30 | 254d38ad3d455b94170e4ef17045d6c10daee986 | /doc_src/code_sample/case1_admin.py | f34ae1be6a6e05c2cdd7ae730c848ce4f701da26 | [
"Apache-2.0"
] | permissive | giorgil/django-articleappkit | 4d2108b6bb32e3089c61c5b81c55d7d6febf5acb | d301f2d511a65461eedbcc301955dafecba189ca | refs/heads/master | 2019-07-06T06:43:47.256809 | 2017-10-19T18:10:40 | 2017-10-19T18:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from django.contrib import admin
from articleappkit.admin import (ArticleBaseAdmin, ARTICLE_BASE_FIELDSET,
SINGLE_AUTHOR_FIELDSET, KEY_IMAGE_FIELDSET,
PUBLISHING_FIELDSET)
from .models import Story
class StoryAdmin(ArticleBaseAdmin):
fieldsets = (
ARTICLE_BASE_FIELDSET,
SINGLE_AUTHOR_FIELDSET,
KEY_IMAGE_FIELDSET,
PUBLISHING_FIELDSET,
)
admin.site.register(Story, StoryAdmin) | [
"coreyoordt@gmail.com"
] | coreyoordt@gmail.com |
5cef80e72d9261fb5dc197cdbae7f87f1e8d37ad | 76f9eb030efbd98c04bee514e09e32312f608fe3 | /led_poop2.py | 3f0678fc58a5ec859576a8b4895c58e4786bc062 | [
"Apache-2.0"
] | permissive | kimtaehoho/osscap2020 | e0e418f66702645ab8c32d4a0c8f3eed142c517d | 7980ab742a1a90fb4405eeabe941504a0b859d20 | refs/heads/main | 2023-01-19T09:28:11.802955 | 2020-12-03T04:25:07 | 2020-12-03T04:25:07 | 303,358,286 | 0 | 1 | Apache-2.0 | 2020-11-27T12:25:10 | 2020-10-12T10:36:10 | Python | UTF-8 | Python | false | false | 13,439 | py | from matrix import * # matrix.py의 모든 함수들 가져오기
import time
import random
import pygame
import pygame as pg
import sys
import LED_display as LMD
import threading
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
pg.init()
screen = pg.display.set_mode((1, 1))
def draw_matrix(m):
array = m.get_array() # matrix의 객체로 부터 array라는 리스트 생
for y in range(m.get_dy()-4): # y는 matrix의 각 행을 말함.
for x in range(4, m.get_dx()-4):
if array[y][x] == 0:
LMD.set_pixel(x, y, 0)
elif array[y][x] == 1:
LMD.set_pixel(x, y, 1)
elif array[y][x] == 2:
LMD.set_pixel(x, y, 2)
elif array[y][x] == 3:
LMD.set_pixel(x, y, 3)
elif array[y][x] == 4:
LMD.set_pixel(x, y, 4)
elif array[y][x] == 5:
LMD.set_pixel(x, y, 5)
elif array[y][x] == 6:
LMD.set_pixel(x, y, 6)
elif array[y][x] == 7:
LMD.set_pixel(x, y, 7)
print()
#색 다르게 설정해야함.
GameOver = [
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 ,7, 7, 7],
[0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 ,7 ,7 ,7],
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 ,7 ,7 ,7],
[1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1 ,7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1 ,7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0 ,7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1 ,7 ,7 ,7 ,7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],]
rand = random.randint(1, 5)
def set_block(rand):
if rand == 1:
arrayBlk = [[1]] #enemy
elif rand == 2:
arrayBlk = [[1, 1]]
elif rand == 3:
arrayBlk = [[1, 1, 1]]
elif rand == 4:
arrayBlk = [[4, 4], [4, 4]] # character
return arrayBlk
Rand=random.randint(1,5)
def set_color(Rand):
if Rand == 1:
array2Blk = [[3, 3], [3, 3]]
if Rand == 2:
array2Blk = [[2, 2], [2, 2]]
elif Rand == 3:
array2Blk = [[5, 5], [5, 5]]
elif Rand == 4:
array2Blk = [[6, 6], [6, 6]]
return array2Blk
iScreenDy = 16 # 높이를 15칸으로 정의
iScreenDx = 32 # 폭을 10칸으로 정의
iScreenDw = 4
character_top = 6 # 초록색으로 지정해야함. , 나오는 도형의 좌측상단의 좌표y=0
character_left = 28 # 똥이 나오는 x좌표
item_top = random.randrange(4, 19) # 파란색으로 지정해야함. item 초기 y값
item_left = 4
enemy_top = random.randrange(4, 19) # 빨간색으로 지정해야함. enemy 초기 y값
enemy_left = 0
enemy2_top = random.randrange(4, 19) # 빨간색으로 지정해야함. enemy 초기 y값
enemy2_left = 1
enemy3_top = random.randrange(4, 19) # 빨간색으로 지정해야함. enemy 초기 y값
enemy3_left = 2
gameover=False
arrayScreen = [
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 0
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 2
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 4
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 6
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 8
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 10
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 12
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7], # 14
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
]
item_speed = 1
enemy_speed = 1
enemy2_speed = 1
enemy3_speed = 1
score=0
rand = random.randrange(1, 4)
enemyBlk = Matrix(set_block(rand))
rand = random.randrange(1, 4)
enemy2Blk = Matrix(set_block(rand))
rand = random.randrange(1, 4)
enemy3Blk = Matrix(set_block(rand))
Rand = random.randrange(1, 5)
itemBlk = Matrix(set_color(Rand))
print("아이템을 6(=30점)개 먹을때 마다 속도가 빨라집니다!")
time.sleep(2)
LED_init()
item_plus = 0
enemy_plus = 0
enemy2_plus = 0
enemy3_plus = 0
#item_speed = 1
#enemy_speed = 1
start_time = time.time()
while True:
iScreen = Matrix(arrayScreen)
# iscreen(입력스크린)
oScreen = Matrix(iScreen)
# oscreen(출력스크린)
charBlk = Matrix(set_block(4))
chartempBlk = iScreen.clip(character_top, character_left, character_top + charBlk.get_dy(),
character_left + charBlk.get_dx())
chartempBlk = chartempBlk + charBlk
oScreen.paste(chartempBlk, character_top, character_left)
itemtempBlk = iScreen.clip(item_top, item_left, item_top + itemBlk.get_dy(), item_left + itemBlk.get_dx())
itemtempBlk = itemtempBlk + itemBlk
oScreen.paste(itemtempBlk, item_top, item_left)
enemytempBlk = iScreen.clip(enemy_top, enemy_left, enemy_top + enemyBlk.get_dy(), enemy_left + enemyBlk.get_dx())
enemytempBlk = enemytempBlk + enemyBlk
oScreen.paste(enemytempBlk, enemy_top, enemy_left)
draw_matrix(oScreen);print()
enemy2tempBlk = iScreen.clip(enemy2_top, enemy2_left, enemy2_top + enemy2Blk.get_dy(), enemy2_left + enemy2Blk.get_dx())
enemy2tempBlk = enemy2tempBlk + enemy2Blk
oScreen.paste(enemy2tempBlk, enemy2_top, enemy2_left)
draw_matrix(oScreen);print()
enemy3tempBlk = iScreen.clip(enemy3_top, enemy3_left, enemy3_top + enemy3Blk.get_dy(), enemy3_left + enemy3Blk.get_dx())
enemy3tempBlk = enemy3tempBlk + enemy3Blk
oScreen.paste(enemy3tempBlk, enemy3_top, enemy3_left)
draw_matrix(oScreen);print()
item_plus = score//30
enemy_plus = score//30
enemy2_plus = score//30
enemy3_plus = score//30
#아이템과 똥이 떨어지는 것을 표현하기 위함.
item_left += 1 + item_plus
enemy_left += 1 + enemy_plus
enemy2_left += 1 + enemy2_plus
enemy3_left += 1 + enemy3_plus
gameover=False
#스코어가 30점씩 늘어날때 마다 스피드가 빨라지게 하는 코드 작성해야함.
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
pg.quit()
sys.exit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_LEFT:
if character_top < 18:
character_top += 1
elif event.key == pg.K_RIGHT:
if character_top > 4:
character_top -= 1
if enemytempBlk.anyGreaterThan(7):
enemy_left = 2
enemy_top = random.randrange(4, 19)
rand=random.randrange(1,4)
enemyBlk = Matrix(set_block(rand))
if enemy2tempBlk.anyGreaterThan(7):
enemy2_left = 3
enemy2_top = random.randrange(4, 19)
rand=random.randrange(1,4)
enemy2Blk = Matrix(set_block(rand))
if enemy3tempBlk.anyGreaterThan(7):
enemy3_left = 4
enemy3_top = random.randrange(4, 19)
rand=random.randrange(1,4)
enemy3Blk = Matrix(set_block(rand))
if itemtempBlk.anyGreaterThan(7):
item_left = 1
item_top = random.randrange(4, 19) # 파란색으로 지정해야함
while(enemy_top-1 <= item_top <= enemy_top+1):
item_top = random.randrange(4,19)
if item_plus < 4:
if item_left == 25 :
if character_top-2<=item_top<=character_top+1:
score+=5
Rand = random.randrange(1, 5)
itemBlk = Matrix(set_color(Rand))
while (enemy_top - 1 <= item_top <= enemy_top + 2):
item_left = 1
item_top = random.randrange(4, 19)
elif item_plus >= 4:
if item_left >= 25 :
if character_top-2<=item_top<=character_top+1:
score+=5
Rand = random.randrange(1, 5)
itemBlk = Matrix(set_color(Rand))
while (enemy_top - 1 <= item_top <= enemy_top + 2):
item_left = 1
item_top = random.randrange(4, 19)
if 27 < enemy_left :
if character_top <= enemy_top <= character_top + 1:
gameover=True
break
if 27 < enemy2_left :
if character_top <= enemy2_top <= character_top + 1:
gameover=True
break
if 27 < enemy3_left :
if character_top <= enemy3_top <= character_top + 1:
gameover=True
break
print(score)
time.sleep(0.1)
if gameover == True:
gameoScreen = Matrix(GameOver)
draw_matrix(gameoScreen);print()
print(score)
time.sleep(2)
| [
"noreply@github.com"
] | kimtaehoho.noreply@github.com |
9fbb1f254e2534f3160cc786d9b8586bce20608e | 9536a77dc5a6c5554e15209f2e2b47d0b1f89f04 | /gs1/api/serializers.py | bc4b1a7a592d0573aec705a3fb9771c446c7a90d | [] | no_license | arpit3q/DRFCode | 52ac7393f3bb6048c45657dffbe10e1db4f4b8e2 | d7b20a156d1ad4f5f5f281802d30da22d0763af4 | refs/heads/main | 2023-04-05T11:38:55.334912 | 2021-04-17T11:31:29 | 2021-04-17T11:31:29 | 358,861,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from rest_framework import serializers
class StudentSerializer(serializers.Serializer):
name = serializers.CharField(max_length=100)
roll = serializers.IntegerField()
city = serializers.CharField(max_length=100)
| [
"arpit3qq@gmail.com"
] | arpit3qq@gmail.com |
9594fb30b6b05afecf3b7962db4780dc72aa6172 | 7e30ab277ceb19eee074956e5792fc97c1ca2448 | /python-sdk/test/test_vehicle_stats_fuel_percent.py | fda9ecdfae4b4dcd03a8ed712cbd3340d5034fbd | [] | no_license | samsarahq/samsara-sdks | 7e9ef1f5b3958b2ac113d150d3360249a0cecff1 | e43cdc0b302b0a8f467ecb10c4210f994ab3674a | refs/heads/master | 2022-12-20T20:40:24.040207 | 2022-12-16T21:45:43 | 2022-12-16T21:45:43 | 224,282,335 | 0 | 1 | null | 2022-12-16T21:45:44 | 2019-11-26T20:41:12 | Python | UTF-8 | Python | false | false | 1,638 | py | # coding: utf-8
"""
Samsara API
Integrate your data with the Samsara API, customize the Samsara experience, and join a community of developers building with Samsara. # noqa: E501
The version of the OpenAPI document: 2019-12-12
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import samsara
from samsara.models.vehicle_stats_fuel_percent import VehicleStatsFuelPercent # noqa: E501
from samsara.rest import ApiException
class TestVehicleStatsFuelPercent(unittest.TestCase):
"""VehicleStatsFuelPercent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test VehicleStatsFuelPercent
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = samsara.models.vehicle_stats_fuel_percent.VehicleStatsFuelPercent() # noqa: E501
if include_optional :
return VehicleStatsFuelPercent(
time = '2020-01-27T07:06:25Z',
value = 54
)
else :
return VehicleStatsFuelPercent(
time = '2020-01-27T07:06:25Z',
value = 54,
)
def testVehicleStatsFuelPercent(self):
"""Test VehicleStatsFuelPercent"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"tyler.freckmann@samsara.com"
] | tyler.freckmann@samsara.com |
9472e05180e836aba9108404c926e7ac9797e95d | 9abc5ae8f67ebde2c31278197566a4403664bab0 | /classes.py | 080cd082acc3dc0bd4d12a4fbd534cea9e95d93a | [] | no_license | rjalfa/CFMemory | 5cae4b65308948b64080f310b996360c99665451 | 824e5029d68a1b86c00779e4fde49a1ee5a5d188 | refs/heads/master | 2021-06-29T21:49:00.980905 | 2017-09-18T16:36:04 | 2017-09-18T16:36:04 | 103,962,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | # Class for User
class User:
def __init__(self, index, user_id,age,gender,occupation,zip_code):
self.index = index
self.user_id = user_id
self.age = age
self.gender = gender
self.occupation = occupation
self.zip_code = zip_code
# Class for Movie
class Movie:
def __init__(self,index, movie_id,title,release_date, video_release_date,imdb_url,genre_vector):
self.index = index
self.movie_id = movie_id
self.title = title
self.release_date = release_date
self.video_release_date = video_release_date
self.imdb_url = imdb_url
self.genre_vector = genre_vector
# Class for association
class Rating:
def __init__(self, user_id, movie_id, rating, timestamp):
self.user_id = user_id
self.movie_id = movie_id
self.rating = rating
self.timestamp = timestamp
# Parse user data , returns list of User objects
def parse_users(filename):
users = []
with open(filename, encoding='iso-8859-1') as f:
for row in f.readlines():
row = row.strip('\n')
r = row.split('|')
users.append(User(len(users),int(r[0]),int(r[1]),r[2],r[3],r[4]))
return users
# Parse movie data , returns list of movie objects
def parse_movies(filename):
movies = []
with open(filename, encoding='iso-8859-1') as f:
for row in f.readlines():
row = row.strip('\n')
r = row.split('|')
movies.append(Movie(len(movies),int(r[0]),r[1],r[2],r[3],r[4],list(map(int,r[5:]))))
return movies
# Parse rating data , returns list of rating objects
def parse_ratings(filename):
ratings = []
with open(filename, encoding='iso-8859-1') as f:
for row in f.readlines():
row = row.strip('\n')
r = row.split('\t')
ratings.append(Rating(int(r[0]),int(r[1]),int(r[2]), int(r[3])))
return ratings
def user_item_matrix(users, movies, ratings):
d_users = {}
d_movies = {}
for user in users:
d_users[user.user_id] = user
for movie in movies:
d_movies[movie.movie_id] = movie
matrix = [[0 for i in range(len(movies))] for i in range(len(users))]
for rating in ratings:
matrix[d_users[rating.user_id].index][d_movies[rating.movie_id].index] = rating.rating
return matrix
def user_category_matrix(users, movies, ratings, categories):
d_users = {}
d_movies = {}
for user in users:
d_users[user.user_id] = user
for movie in movies:
d_movies[movie.movie_id] = movie
matrix = [[[0,0] for i in range(len(categories))] for i in range(len(users))]
for rating in ratings:
user_index = d_users[rating.user_id].index
for i in range(len(d_movies[rating.movie_id].genre_vector)):
if d_movies[rating.movie_id].genre_vector[i] == 1:
if rating.rating >= 3:
matrix[user_index][i][0] += 1
else:
matrix[user_index][i][1] += 1
return matrix
def item_category_matrix(movies):
return list(map(lambda x: x.genre_vector, movies)) | [
"rounaqwl66@gmail.com"
] | rounaqwl66@gmail.com |
e6c6dba5e9ed13f50e988e9357b3ace741e15349 | 00015dbf08177c33bb520e6d06ce6edb51a1fc72 | /hw/T07(web server)/T27.25/server.py | 280e050f18acf1624839534580c70dfd5acec98b | [] | no_license | avmepy/applied_programming | 6ab9c6c19a583318694a04ea505bae3101da9e6b | bd27a9c4b8d43d50cb596edda0c09bfd70ef8f71 | refs/heads/master | 2020-07-16T23:21:28.114859 | 2019-11-30T11:43:14 | 2019-11-30T11:43:14 | 205,889,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
# author: Valentyn Kofanov
import cgi
from copy import deepcopy
PORT = 8000
TRAVEL_PAGE = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<title>ticket price</title>
<body>
<h3>ticket price</h3>
<form method=POST action="">
<p>Enter name: </p>
<input type=text name=name value="">
<p>Enter byear: </p>
<input type=text name=byear value="">
<p>Enter departure: </p>
<input type=text name=departure value="">
<p>Enter destination: </p>
<input type=text name=destination value="">
<input type=submit value="get price">
</form>
<p>
{}
</p>
</body>
</html>
"""
ADD_PAGE = ''
HTML_PAGE = deepcopy(TRAVEL_PAGE)
class Person:
def __init__(self, name=None, byear=None):
self.name = name
self.byear = byear
def input(self):
self.name = input("enter name: ")
self.byear = input("enter byear: ")
def print(self):
print(self.name, self.byear, end=" ")
class Passenger(Person):
def __init__(self, name=None, byear=None, departure=None, destination=None):
Person.__init__(self, name=name, byear=byear)
self.departure = departure
self.destination = destination
def input(self):
Person.input(self)
self.departure = input("enter departure: ")
self.destination = input("enter destination: ")
def print(self):
Person.print(self)
print(self.departure, self.destination, end=" ")
def get_price(self, filename="travel.txt"):
with open(filename, 'r') as fin:
travel_list = [i.split() for i in fin]
price = -1
for route in travel_list:
if ((route[0] == self.departure and route[1] == self.destination) or
(route[1] == self.departure and route[0] == self.destination)):
price = 2.5 * float(route[2])
break
return price
def application(environ, start_response, filename="travel.txt"):
if environ.get('PATH_INFO', '').lstrip('/') == '':
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
result = ''
HTML_PAGE = TRAVEL_PAGE
if 'departure' in form and 'destination' in form and 'name' in form and 'byear' in form:
name = str(form['name'].value)
byear = str(form['byear'].value)
departure = str(form['departure'].value)
destination = str(form['destination'].value)
p = Passenger(name=name, byear=byear, departure=departure, destination=destination)
result = f'{name}{byear} , departure: {departure}, destination {destination}, ticket price = {p.get_price()}'
HTML_PAGE = deepcopy(TRAVEL_PAGE)
elif 'depar' in form and 'destin' in form and 'price' in form:
depar = str(form['depar'].value)
destin = str(form['destin'].value)
price = str(form['price'].value)
with open(filename, 'a') as f:
f.write(f'{depar} {destin} {price}')
HTML_PAGE = ADD_PAGE
body = HTML_PAGE.format(result)
start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])
else:
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
body = 'Сторінку не знайдено'
return [bytes(body, encoding='utf-8')]
if __name__ == '__main__':
from wsgiref.simple_server import make_server
print('=== Local WSGI webserver ===')
print(f'http://localhost:{PORT}')
httpd = make_server('localhost', PORT, application)
httpd.serve_forever()
| [
"valentyn.kofanov@knu.ua"
] | valentyn.kofanov@knu.ua |
8d9bbcf792afe8a014df437737fec5c542ae3093 | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/eager/python/checkpointable_utils.py | 4a060f0c75f66abe18e471a6b7e2ccdaaba062c3 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,665 | py | """Utilities for working with Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.contrib.eager.proto import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as core_checkpointable
from tensorflow.python.training import checkpointable_utils as core_checkpointable_utils
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
# Key where the object graph proto is saved in a TensorBundle
_OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.Object.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
# TODO(allenl): Extract out gathering logic so the naming logic doesn't have
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run restore ops in. If `None`, uses the default
session.
"""
self.run_restore_ops(session=session)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable):
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Only objects which would be saved by `Saver.save` will be initialized. See
`gather_initializers` for details.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
session.run(gather_initializers(self._root_checkpointable))
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
if not context.executing_eagerly():
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
else:
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._object_graph_restore_tensor = None
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert _OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[_OBJECT_GRAPH_PROTO_KEY] = _NoRestoreSaveable(
tensor=object_graph_tensor,
name=_OBJECT_GRAPH_PROTO_KEY)
if not in_graph_mode or self._last_save_object_graph != graph_proto:
if self._last_save_object_graph is not None and in_graph_mode:
raise NotImplementedError(
"Using a single Saver to save a mutated object graph is not "
"currently supported when graph building. Use a different Saver "
"when the object graph changes (save ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
saver = saver_lib.Saver(var_list=named_variables)
if in_graph_mode:
self._last_save_saver = saver
self._last_save_object_graph = graph_proto
else:
saver = self._last_save_saver
with ops.device("/cpu:0"):
save_path = saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path, session=None):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run. A
session is required to retrieve checkpoint metadata.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
session: The session to retrieve metadata with. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
session = None
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
try:
if not in_graph_mode or self._object_graph_restore_tensor is None:
with ops.device("/cpu:0"):
object_graph_string, = io_ops.restore_v2(
prefix=file_prefix_tensor,
tensor_names=[_OBJECT_GRAPH_PROTO_KEY],
shape_and_slices=[""],
dtypes=[dtypes.string],
name="object_graph_proto_read")
if in_graph_mode:
self._object_graph_restore_tensor = object_graph_string
if in_graph_mode:
object_graph_string = session.run(
self._object_graph_restore_tensor,
feed_dict=file_prefix_feed_dict)
else:
object_graph_string = object_graph_string.numpy()
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = core_checkpointable_utils._Checkpoint( # pylint: disable=protected-access
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
core_checkpointable._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint, feed_dict=file_prefix_feed_dict)
return load_status
class Checkpoint(core_checkpointable.Checkpointable):
"""A utility class which groups `Checkpointable` objects.
Accepts arbitrary keyword arguments to its constructor and saves those values
with a checkpoint. Maintains a `save_counter` for numbering checkpoints.
Example usage:
```python
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
root = tfe.Checkpoint(optimizer=optimizer, model=model)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... )
root.save(file_prefix=checkpoint_prefix)
```
For more manual control over saving, use `tfe.CheckpointableSaver` directly.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Attribute values must derive from
`CheckpointableBase`.
Raises:
ValueError: If objects in `kwargs` are not Checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, core_checkpointable.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting an object derived from "
"`CheckpointableBase`, got %s.") % (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a checkpoint. Wraps `tfe.CheckpointableSaver.save`."""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a checkpoint. Wraps `tfe.CheckpointableSaver.restore`."""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
| [
"pes.carceller@gmail.com"
] | pes.carceller@gmail.com |
4865ba692e18f4be9514c5c8b3d1459ef7c2017b | eba1e37dd58ce0f6378bf8b357d56992e2e5d10e | /burger/decorators.py | 772e01a73e798470599725df65ac917c16c738ab | [] | no_license | josejonatasoliveira/america-burger | dfc658129916da4005e53a498aa166bcf5d9651c | dae4e54dc820b4fb96bf84f9b2a9d61b20a8c047 | refs/heads/master | 2023-08-02T13:28:43.689058 | 2021-09-24T22:03:31 | 2021-09-24T22:03:31 | 410,112,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,516 | py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import base64
import logging
from functools import wraps
from django.contrib import auth
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.utils.decorators import classonlymethod
from django.core.exceptions import PermissionDenied
logger = logging.getLogger(__name__)
def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
#
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
#
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We are only support basic authentication for now.
#
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).decode('utf-8').split(':', 1)
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
if test_func(request.user):
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
#
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
def view_decorator(fdec, subclass=False):
"""
Change a function decorator into a view decorator.
https://github.com/lqc/django/tree/cbvdecoration_ticket14512
"""
def decorator(cls):
if not hasattr(cls, "as_view"):
raise TypeError(
"You should only decorate subclasses of View, not mixins.")
if subclass:
cls = type("%sWithDecorator(%s)" %
(cls.__name__, fdec.__name__), (cls,), {})
original = cls.as_view.__func__
@wraps(original)
def as_view(current, **initkwargs):
return fdec(original(current, **initkwargs))
cls.as_view = classonlymethod(as_view)
return cls
return decorator
def view_or_apiauth(view, request, test_func, *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(auth.get_user(request)) or not settings.OAUTH2_API_KEY:
# Already logged in, just return the view.
#
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
#
if 'HTTP_AUTHORIZATION' in request.META:
_auth = request.META['HTTP_AUTHORIZATION'].split()
if len(_auth) == 2:
# NOTE: We are only support basic authentication for now.
#
if _auth[0].lower() == "apikey":
auth_api_key = _auth[1]
if auth_api_key and auth_api_key == settings.OAUTH2_API_KEY:
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
#
response = HttpResponse()
response.status_code = 401
return response
def has_perm_or_basicauth(perm, realm=""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
def superuser_only(function):
"""
Limit view to superusers only.
Usage:
--------------------------------------------------------------------------
@superuser_only
def my_view(request):
...
--------------------------------------------------------------------------
or in urls:
--------------------------------------------------------------------------
urlpatterns = patterns('',
(r'^foobar/(.*)', is_staff(my_view)),
)
--------------------------------------------------------------------------
"""
def _inner(request, *args, **kwargs):
if not auth.get_user(request).is_superuser and not auth.get_user(request).is_staff:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
def logged_in_or_basicauth(realm=""):
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated,
realm, *args, **kwargs)
return wrapper
return view_decorator
def logged_in_or_apiauth():
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_apiauth(func, request,
lambda u: u.is_authenticated,
*args, **kwargs)
return wrapper
return view_decorator
def superuser_or_apiauth():
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_apiauth(func, request,
lambda u: u.is_superuser,
*args, **kwargs)
return wrapper
return view_decorator
def dump_func_name(func):
def echo_func(*func_args, **func_kwargs):
logger.debug('Start func: {}'.format(func.__name__))
return func(*func_args, **func_kwargs)
return echo_func
| [
"jose.jonatas@triasoftware.com.br"
] | jose.jonatas@triasoftware.com.br |
770193a8d0b5fc2d222eca16b274aae71635efe3 | 38138a7861c19d0c06dd0b0b05dc716426196230 | /mJ_MotionDetectionAndTracking.py | 5a350c566b32161d6ddcefb5da3f90e94e4f0746 | [] | no_license | Palak-137/weather-app | 635c5109e35f92856d115476670e6a712411e4d0 | 69c970adf48340958a2a4316e46eb66bbbf8ebb3 | refs/heads/master | 2022-03-01T12:22:08.425084 | 2019-11-01T08:44:13 | 2019-11-01T08:44:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture('vtest.avi')
ret, frame1 = cap.read()
ret, frame2 = cap.read()
#diff = cv2.absdiff(frame1, frame2)
#gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
#blur = cv2.GaussianBlur(gray, (5,5), 0)
#_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
#dilated = cv2.dilate(thresh, None, iterations=3)
#cv2.imshow('1',frame1)
#cv2.imshow('2',frame2)
#cv2.imshow('diff',diff)
#cv2.imshow('gray',gray)
#cv2.imshow('blur',blur)
#cv2.imshow('thresh',thresh)
#cv2.imshow('dilated',dilated)
print(frame1.shape)
while cap.isOpened():
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) <300:
continue
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 3)
#cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
cv2.imshow("feed", frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(40) == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()
cap.release()
out.release()
| [
"noreply@github.com"
] | Palak-137.noreply@github.com |
4a033dd3423f1a45be0b664a4d039ecfa1645715 | 2e98c8b32df4b48e51ef2d234901aaddce6944da | /src/xycrypto/ciphers/Cryptography/_base.py | 2ac4afc03a75f4497bc82271f2d74137fad67e25 | [
"MIT"
] | permissive | xymy/xycrypto | 7f34bc0aa9d8cde8e0d8cff2ccbae51128c1b079 | 1c509a2ab3e78e1f7a642ff31b393796d70bf587 | refs/heads/master | 2021-02-27T02:18:04.935213 | 2020-04-19T07:16:41 | 2020-04-19T07:16:41 | 245,569,896 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py | import abc
from xycrypto.ciphers import base, utils
from . import _lib
@base.Cipher.register
class Cipher(metaclass=abc.ABCMeta):
"""Abstract base class for cipher."""
@property
@abc.abstractmethod
def _algorithm(self):
"""The algorithm of cipher."""
@property
@abc.abstractmethod
def name(self):
"""The name of cipher."""
@property
def key_size(self):
"""The key size in bytes of cipher."""
# pylint: disable=no-member
return self._cipher.algorithm.key_size // 8
@abc.abstractmethod
def encryptor(self):
"""Return the encryptor context."""
@abc.abstractmethod
def decryptor(self):
"""Return the decryptor context."""
def encrypt(self, data):
"""Encrypt data and return encrypted data."""
encryptor = self.encryptor()
temp = encryptor.update(data)
return temp + encryptor.finalize()
def decrypt(self, data):
"""Decrypt data and return decrypted data."""
decryptor = self.decryptor()
temp = decryptor.update(data)
return temp + decryptor.finalize()
@base.StreamCipher.register
class StreamCipher(Cipher):
"""Abstract base class for stream cipher."""
def __init__(self, key):
self._cipher = _lib.Cipher(self._algorithm(key), None, _lib.backend)
def encryptor(self):
return self._cipher.encryptor()
def decryptor(self):
return self._cipher.decryptor()
@base.BlockCipher.register
class BlockCipher(Cipher):
"""Abstract base class for block cipher."""
@property
@abc.abstractmethod
def block_size(self):
"""The block size in bytes of cipher."""
@property
def mode_name(self):
"""The mode name of cipher."""
return self._cipher.mode.name
def __init__(self, key, mode, **kwargs):
mode = _lib.create_mode(mode, **kwargs)
# For ECB and CBC modes, the default padding is PKCS7.
# For other modes, padding will not be added automatically.
# However, user can force padding by providing the padding argument.
if mode.name in {'ECB', 'CBC'}:
padding = kwargs.pop('padding', 'PKCS7')
else:
padding = kwargs.pop('padding', None)
self._cipher = _lib.Cipher(self._algorithm(key), mode, _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
def encryptor(self):
return utils.determine_encryptor(self._cipher, self._padding)
def decryptor(self):
return utils.determine_decryptor(self._cipher, self._padding)
@base.BlockCipherECB.register
class BlockCipherECB(BlockCipher):
"""Abstract base class for block cipher in ECB mode."""
mode_name = 'ECB'
def __init__(self, key, *, padding='PKCS7'):
self._cipher = _lib.Cipher(self._algorithm(key), _lib.ECB(), _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
@base.BlockCipherCBC.register
class BlockCipherCBC(BlockCipher):
"""Abstract base class for block cipher in CBC mode."""
mode_name = 'CBC'
def __init__(self, key, *, iv, padding='PKCS7'):
self._cipher = _lib.Cipher(self._algorithm(key), _lib.CBC(iv), _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
@base.BlockCipherCFB.register
class BlockCipherCFB(BlockCipher):
"""Abstract base class for block cipher in CFB mode."""
mode_name = 'CFB'
def __init__(self, key, *, iv, padding=None):
self._cipher = _lib.Cipher(self._algorithm(key), _lib.CFB(iv), _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
@base.BlockCipherOFB.register
class BlockCipherOFB(BlockCipher):
"""Abstract base class for block cipher in OFB mode."""
mode_name = 'OFB'
def __init__(self, key, *, iv, padding=None):
self._cipher = _lib.Cipher(self._algorithm(key), _lib.OFB(iv), _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
@base.BlockCipherCTR.register
class BlockCipherCTR(BlockCipher):
"""Abstract base class for block cipher in CTR mode."""
mode_name = 'CTR'
def __init__(self, key, *, nonce, padding=None):
self._cipher = _lib.Cipher(self._algorithm(key), _lib.CTR(nonce), _lib.backend)
self._padding = utils.determine_padding(padding, self.block_size)
| [
"thyfan@163.com"
] | thyfan@163.com |
17a50a710128d01e325b178b1838f8dcb0030320 | 2dafcf1b78159914caf00e56d6d362a0428e16f9 | /sprites.py | 2cf1e93da756259ad8e6bb4f774c22050f5f4cb1 | [
"BSD-3-Clause"
] | permissive | andreyvydra/soul_in_the_pyramid | 8464be77a77bf8f5c975867b81462bc89e759895 | c044b205e61d75ecc1b0b15aeea34d06f94e4ee6 | refs/heads/master | 2023-03-27T03:21:38.405503 | 2021-03-25T11:27:01 | 2021-03-25T11:27:01 | 351,407,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,542 | py | from math import cos, sin, radians, atan2, pi
import pygame
from settings import *
from random import choice, randint
pygame.init()
class Sprite(pygame.sprite.Sprite):
hurt_sound = pygame.mixer.Sound('data/sounds/hurt.wav')
def __init__(self, img, x, y, *groups):
super().__init__(*groups)
self.image = img.copy()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Player(Sprite):
stand_image = pygame.image.load('data/img/mc_stand.png')
move_right_image = pygame.image.load('data/img/mc_move_right.png')
move_left_image = pygame.image.load('data/img/mc_move_left.png')
stand_images = []
for i in range(0, 27, 9):
sub_image = stand_image.subsurface((i, 0), (9, 18))
stand_images.append(pygame.transform.scale2x(sub_image))
move_right_images = []
for i in range(0, 27, 9):
sub_image = move_right_image.subsurface((i, 0), (9, 18))
move_right_images.append(pygame.transform.scale2x(sub_image))
move_left_images = []
for i in range(0, 27, 9):
sub_image = move_left_image.subsurface((i, 0), (9, 18))
move_left_images.append(pygame.transform.scale2x(sub_image))
jump_sound = pygame.mixer.Sound('data/sounds/jump.wav')
def __init__(self, x, y, *groups, blocks=None, health=5):
super().__init__(Player.stand_images[0], x, y, *groups)
self.speed = PLAYER_SPEED
self.hp = health
self.in_air_timer = 0
self.all_sprites = groups[0]
self.falling_speed = 0
self.number_of_image = 0
if blocks is not None:
self.collide_list = blocks
else:
self.collide_list = groups[0]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_PLAYER
self.is_standing = True
self.is_moving_right = False
self.is_moving_left = False
def set_image(self):
if self.is_standing:
if self.call_down_to_the_next_image < 0:
self.number_of_image = (self.number_of_image + 1) % len(Player.stand_images)
self.image = self.stand_images[self.number_of_image]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_PLAYER
elif self.is_moving_right:
if self.call_down_to_the_next_image < 0:
self.number_of_image = (self.number_of_image + 1) % len(Player.move_right_images)
self.image = self.move_right_images[self.number_of_image]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_PLAYER
elif self.is_moving_left:
if self.call_down_to_the_next_image < 0:
self.number_of_image = (self.number_of_image + 1) % len(Player.move_left_images)
self.image = self.move_left_images[self.number_of_image]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_PLAYER
def update(self, *args, **kwargs) -> None:
collision_types = {'top': False, 'bottom': False, 'right': False, 'left': False}
keys = pygame.key.get_pressed()
move_set = [0, 0]
delta = self.speed / FPS
if keys[pygame.K_a] or keys[pygame.K_d]:
if keys[pygame.K_a]:
if not self.is_moving_left:
self.number_of_image = 0
self.call_down_to_the_next_image = 0
self.is_moving_right = False
self.is_moving_left = True
self.is_standing = False
move_set[0] -= delta
if keys[pygame.K_d]:
if not self.is_moving_right:
self.number_of_image = 0
self.call_down_to_the_next_image = 0
self.is_moving_right = True
self.is_moving_left = False
self.is_standing = False
move_set[0] += delta
else:
if not self.is_standing:
self.number_of_image = 0
self.call_down_to_the_next_image = 0
self.is_moving_right = False
self.is_moving_left = False
self.is_standing = True
self.call_down_to_the_next_image -= 1000 / FPS
self.set_image()
self.falling_speed += 0.25
if self.falling_speed > FALLING_SPEED_MAX:
self.falling_speed = FALLING_SPEED_MAX
move_set[1] += self.falling_speed
if move_set[0] != 0:
self.move_x(move_set[0])
for tile in self.get_collision_list():
if move_set[0] > 0:
self.rect.right = tile.left
collision_types['right'] = True
elif move_set[0] < 0:
self.rect.left = tile.right
collision_types['left'] = True
if move_set[1] != 0:
self.move_y(move_set[1])
for tile in self.get_collision_list():
if move_set[1] > 0:
self.rect.bottom = tile.top
collision_types['bottom'] = True
elif move_set[1] < 0:
self.rect.top = tile.bottom
collision_types['top'] = True
if collision_types['bottom']:
self.falling_speed = 0
self.in_air_timer = 0
else:
self.in_air_timer += 1
if collision_types['top']:
self.falling_speed = 0
def jump(self):
if self.in_air_timer < IN_AIR_TIMER:
Player.jump_sound.play()
self.falling_speed -= JUMP_SPEED
def move_x(self, val):
self.rect.x += val
def move_y(self, val):
self.rect.y += val
def get_collision_list(self):
collisions = []
for r in [spr.rect for spr in self.collide_list]:
if self.rect.colliderect(r):
collisions.append(r)
return collisions
class Wall(Sprite):
images = [
pygame.transform.scale2x(pygame.image.load('data/img/wall_2.png')),
pygame.transform.scale2x(pygame.image.load('data/img/wall_1.png')),
pygame.transform.scale2x(pygame.image.load('data/img/wall_3.png')),
pygame.transform.scale2x(pygame.image.load('data/img/angle.png')),
pygame.transform.scale2x(pygame.image.load('data/img/send.png'))
]
images = {
'1': [
images[0].copy(),
images[1].copy(),
images[2].copy()
],
'2': [
pygame.transform.rotate(images[0].copy(), 90),
pygame.transform.rotate(images[1].copy(), 90),
pygame.transform.rotate(images[2].copy(), 90),
],
'3': [
pygame.transform.rotate(images[0].copy(), -90),
pygame.transform.rotate(images[1].copy(), -90),
pygame.transform.rotate(images[2].copy(), -90),
],
'4': [
pygame.transform.rotate(images[0].copy(), 180),
pygame.transform.rotate(images[1].copy(), 180),
pygame.transform.rotate(images[2].copy(), 180),
],
'5': images[3].copy(),
'6': pygame.transform.rotate(images[3].copy(), 90),
'7': pygame.transform.rotate(images[3].copy(), -90),
'8': pygame.transform.rotate(images[3].copy(), 180),
'9': images[4]
}
def __init__(self, x, y, *groups, key='1', is_angle=False, is_send=False):
image = choice(Wall.images[key]) if not is_angle else Wall.images[key]
if is_send:
image = Wall.images['9']
super(Wall, self).__init__(image, x, y, *groups)
class Mob(Sprite):
stand_image = pygame.image.load('data/img/mob_stand.png')
stand_images = []
shot = pygame.mixer.Sound('data/sounds/enemy_shot.wav')
for i in range(0, 18, 9):
sub_image = stand_image.subsurface((i, 0), (9, 18))
stand_images.append(pygame.transform.scale2x(sub_image))
def __init__(self, x, y, *groups, blocks=None, bullets=None, health=3):
super().__init__(Mob.stand_images[0], x, y, *groups)
self.call_down_for_the_bullet = randint(1500, 3000)
self.in_air_timer = 0
self.all_sprites = groups[0]
self.hp = health
self.max_hp = health
self.falling_speed = 0
self.number_of_image = 0
self.bullets = bullets
if blocks is not None:
self.collide_list = blocks
else:
self.collide_list = groups[0]
self.player = [i for i in self.all_sprites if isinstance(i, Player)][0]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_MOB
self.is_standing = True
self.is_moving_right = False
self.is_moving_left = False
for tile in self.get_collision_list():
self.rect.bottom = tile.top
def update(self, *args, **kwargs) -> None:
if self.call_down_for_the_bullet < 0:
if self.on_the_line():
Mob.shot.play()
rot = 180 / pi * atan2(- 1 * (self.rect.y - self.player.rect.centery),
(self.rect.centerx - self.player.rect.centerx)) - 180
Bullet(self.rect.centerx, self.rect.y, rot, self.bullets,
self.all_sprites, enemies=[self.player],
blocks=self.collide_list, is_enemy=True)
self.call_down_for_the_bullet = 1000
self.call_down_for_the_bullet -= 1000 / FPS
if self.hp <= 0:
self.kill()
self.call_down_to_the_next_image -= 1000 / FPS
self.set_image()
def get_collision_list(self):
collisions = []
for r in [spr.rect for spr in self.collide_list]:
if self.rect.colliderect(r):
collisions.append(r)
return collisions
def on_the_line(self):
dest_x = self.player.rect.centerx
dest_y = self.player.rect.centery
rect = pygame.rect.Rect(self.rect.centerx, self.rect.centery, 10, 10)
list_rects = [i.rect for i in self.collide_list]
flag = False
for i in range(min(rect.centery, dest_y), max(dest_y, rect.centery) + 10, 20):
for j in range(min(rect.centerx, dest_x), max(dest_x, rect.centerx) + 10, 20):
rect.centery = i
rect.centerx = j
if rect.collidelist(list_rects) != -1:
flag = True
return not flag
def set_image(self):
if self.is_standing:
if self.call_down_to_the_next_image < 0:
self.number_of_image = (self.number_of_image + 1) % len(Mob.stand_images)
self.image = self.stand_images[self.number_of_image]
self.call_down_to_the_next_image = CALL_DOWN_TO_THE_NEXT_IMAGE_MOB
class Bullet(Sprite):
image = pygame.image.load('data/img/bullet.png')
image_for_enemy = pygame.image.load('data/img/enemy_bullet.png')
sound_shot_wall = pygame.mixer.Sound('data/sounds/wall_shot.wav')
def __init__(self, x, y, rot, *groups, enemies=None, blocks=None, is_enemy=False):
if not is_enemy:
image = pygame.transform.rotate(Bullet.image.copy(), rot)
else:
image = pygame.transform.rotate(Bullet.image_for_enemy.copy(), rot)
super().__init__(image, x, y, *groups)
self.all_sprites = groups[1]
self.enemies = enemies
self.collide_list = blocks
self.rot = rot
self.delta_x = cos(radians(self.rot)) * BULLET_SPEED
self.delta_y = - sin(radians(self.rot)) * BULLET_SPEED
def update(self, *args, **kwargs) -> None:
self.create_particles()
self.rect.x += self.delta_x
self.rect.y += self.delta_y
for block in self.collide_list:
if self.rect.colliderect(block.rect):
Bullet.sound_shot_wall.play()
self.kill()
for enemy in self.enemies:
if self.rect.colliderect(enemy.rect):
Sprite.hurt_sound.play()
enemy.hp -= 1
self.kill()
def create_particles(self):
ParticleBullet(self.image.copy(), self.rect.x,
self.rect.y, self.rot, self.all_sprites,
blocks=self.collide_list, enemies=self.enemies)
class ParticleBullet(Sprite):
def __init__(self, image, x, y, rot, *groups, blocks=None, enemies=None):
super().__init__(image, x, y, *groups)
self.enemies = enemies
self.rot = rot
self.rad = 7
self.blocks = blocks
self.delta_x = cos(radians(self.rot)) * (BULLET_SPEED - 3)
self.delta_y = - sin(radians(self.rot)) * (BULLET_SPEED - 3)
def update(self, *args, **kwargs) -> None:
self.rect.x += self.delta_x
self.rect.y += self.delta_y
if self.delta_x > 0:
self.delta_x -= 1
else:
self.delta_x += 1
for block in self.blocks:
if self.rect.colliderect(block.rect):
self.kill()
for enemy in self.enemies:
if self.rect.colliderect(enemy.rect):
self.kill()
self.rad -= 0.5
if self.rad == 0:
self.kill()
self.image = pygame.transform.scale(self.image, (round(self.rad * 2), round(self.rad * 2)))
self.rect = self.image.get_rect(center=self.rect.center)
class Rune(Sprite):
image = pygame.transform.scale2x(pygame.image.load('data/img/rune.png'))
pick_up = pygame.mixer.Sound('data/sounds/pick_up.wav')
def __init__(self, x, y, *groups):
super().__init__(Rune.image, x, y, *groups)
self.player = [i.rect for i in groups[1] if isinstance(i, Player)][0]
self.speed_up_down = 10
self.is_downing = False
self.delta = 0
self.calldown = 100
def update(self, *args, **kwargs) -> None:
if self.player.colliderect(self.rect):
Rune.pick_up.play()
self.kill()
self.calldown -= 1000 / FPS
if self.calldown <= 0:
if not self.is_downing:
self.rect.y -= 2
self.delta -= 2
if self.delta < -self.speed_up_down:
self.is_downing = True
self.delta = 0
else:
self.rect.y += 2
self.delta += 2
if self.delta > self.speed_up_down:
self.is_downing = False
self.delta = 0
self.calldown = 100
| [
"55046615+shmonder-glitch@users.noreply.github.com"
] | 55046615+shmonder-glitch@users.noreply.github.com |
8f51ba00c95343b9bb716afd8882bf94bf4931e4 | 0c85cba348e9abace4f16dfb70531c70175dac68 | /cloudroast/blockstorage/volumes_api/integration/compute/fixtures.py | caefc012dfe9f485b122f7e89da62af0983f99b6 | [
"Apache-2.0"
] | permissive | RULCSoft/cloudroast | 31157e228d1fa265f981ec82150255d4b7876af2 | 30f0e64672676c3f90b4a582fe90fac6621475b3 | refs/heads/master | 2020-04-04T12:20:59.388355 | 2018-11-02T21:32:27 | 2018-11-02T21:32:27 | 155,923,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,788 | py | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.common.tools.datagen import random_string
from cloudcafe.compute.composites import ComputeIntegrationComposite
from cloudroast.blockstorage.volumes_api.fixtures import VolumesTestFixture
class ComputeIntegrationTestFixture(VolumesTestFixture):
@classmethod
def setUpClass(cls):
super(ComputeIntegrationTestFixture, cls).setUpClass()
cls.compute = ComputeIntegrationComposite()
cls.servers = cls.compute.servers
cls.flavors = cls.compute.flavors
cls.images = cls.compute.images
cls.volume_attachments = cls.compute.volume_attachments
@classmethod
def random_server_name(cls):
return random_string(prefix="Server_", size=10)
@classmethod
def new_server(
cls, name=None, image=None, flavor=None, add_cleanup=True):
name = name or cls.random_server_name()
image = image or cls.images.config.primary_image
flavor = flavor or cls.flavors.config.primary_flavor
resp = cls.servers.behaviors.create_active_server(
name, image_ref=image, flavor_ref=flavor)
if add_cleanup:
cls.addClassCleanup(
cls.servers.client.delete_server, resp.entity.id)
return resp.entity
@classmethod
def attach_volume_and_get_device_info(
cls, server_connection, server_id, volume_id):
original_details = server_connection.get_all_disk_details()
attachment = \
cls.volume_attachments.behaviors.attach_volume_to_server(
server_id, volume_id)
assert attachment, "Could not attach volume {0} to server {1}".format(
volume_id, server_id)
new_details = server_connection.get_all_disk_details()
volume_details = [d for d in new_details if d not in original_details]
cls.fixture_log.debug(volume_details)
assert len(volume_details) == 1, (
"Could not uniquely identity the attached volume via the OS.")
setattr(attachment, 'os_disk_details', volume_details)
os_disk_device_name = \
volume_details[0].get('Number') or "/dev/{0}".format(
volume_details[0].get('name'))
assert os_disk_device_name, (
"Could not get a unique device name from the OS")
setattr(attachment, 'os_disk_device_name', os_disk_device_name)
return attachment
@classmethod
def format_attached_volume(
cls, server_connection, device_name, fstype=None):
resp = None
if device_name.startswith('/dev'):
resp = server_connection.format_disk(device_name, fstype or 'ext3')
else:
resp = server_connection.format_disk(device_name, fstype or 'ntfs')
assert resp is not None, (
"An error occured while trying to format the attached volume")
return resp
@classmethod
def mount_attached_volume(
cls, server_connection, device_name, mount_point=None):
mount_point = mount_point or server_connection.generate_mountpoint()
if device_name.startswith('/dev'):
server_connection.create_directory(mount_point)
return server_connection.mount_disk(
source_path=device_name, destination_path=mount_point)
@classmethod
def unmount_attached_volume(cls, server_connection, device_name):
return server_connection.unmount_disk(device_name)
@classmethod
def _add_directory_prefix(cls, file_directory_string):
if not file_directory_string.startswith('/'):
if len(file_directory_string) == 1:
file_directory_string = file_directory_string + ":\\"
return file_directory_string
@classmethod
def get_remote_file_md5_hash(
cls, server_connection, file_directory, file_name):
file_directory = cls._add_directory_prefix(file_directory)
return server_connection.get_md5sum_for_remote_file(
file_directory, file_name)
@classmethod
def create_remote_file(
cls, server_connection, file_directory, file_name,
file_content=None):
file_content = file_content or "content"
file_directory = cls._add_directory_prefix(file_directory)
return server_connection.create_file(
file_name, file_content, file_directory)
@classmethod
def _get_remote_client(cls, client_type):
client = None
if client_type == 'windows':
from cloudcafe.compute.common.clients.remote_instance.windows.\
windows_client import WindowsClient
client = WindowsClient
if client_type == 'linux':
from cloudcafe.compute.common.clients.remote_instance.linux.\
linux_client import LinuxClient
client = LinuxClient
if not client:
raise Exception(
"Unrecognized client type: {0}".format(client_type))
return client
@classmethod
def _connect(
cls, remote_client, ip_address=None, username=None,
connection_timeout=None, key=None, password=None):
kwargs = {
'ip_address': ip_address,
'username': username,
'connection_timeout': connection_timeout}
# Key always takes precendence over password if both are provided
auth_strategy = "key" if key else "password"
kwargs[auth_strategy] = key or password
_client = remote_client(**kwargs)
return _client
@classmethod
def connect_to_server(
cls, ip_address, username='root', password=None, key=None,
connection_timeout=None, client_type='linux'):
"""Returns a client for communication with the server"""
remote_client = cls._get_remote_client(client_type)
return cls._connect(
remote_client, ip_address=ip_address, username=username,
connection_timout=connection_timeout, key=key,
password=password)
@classmethod
def get_image_os_type(cls, image_id):
# TODO: make this method handle the various versions of the images
# api and image model. This might mean making an images auto composite.
image = cls.images.client.get_image(image_id).entity
return image.metadata.get('os_type', '').lower()
@classmethod
def connect_to_instance(
cls, server_instance_model, key=None, connection_timeout=None,
os_type=None):
"""Special helper method that pulls all neccessary values from a
compute server model, and returns a client for communication with
that server
"""
_usernames = {'windows': 'administrator', 'linux': 'root'}
ip_address = None
if hasattr(server_instance_model, 'accessIPv4'):
ip_address = server_instance_model.accessIPv4
else:
ip_address = server_instance_model.addresses.public.ipv4
if os_type is None:
os_type = cls.get_image_os_type(server_instance_model.image.id)
username = _usernames.get(os_type)
password = server_instance_model.admin_pass
connection_timeout = \
connection_timeout or cls.servers.config.connection_timeout
remote_client = cls._get_remote_client(os_type)
return cls._connect(
remote_client, ip_address=ip_address, username=username,
connection_timeout=connection_timeout, key=key,
password=password)
@classmethod
def setup_server_and_attached_volume_with_data(
cls, server=None, volume=None):
"""
Builds a new server using configured defaults
Attaches, formats and mounts a new volume
Writes data to the volume
Saves the md5sum of the written data as a class attribute
Syncs the filesystem write cache.
"""
# Build new server using configured defaults
cls.test_server = server or cls.new_server()
# Set remote instance client up
cls.server_conn = cls.connect_to_instance(cls.test_server)
cls.volume_mount_point = cls.server_conn.generate_mountpoint()
cls.test_volume = volume or cls.new_volume()
# Attach Volume
cls.test_attachment = cls.attach_volume_and_get_device_info(
cls.server_conn, cls.test_server.id, cls.test_volume.id_)
# Format Volume
cls.format_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name)
# Mount Volume
cls.mount_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name,
mount_point=cls.volume_mount_point)
# Write data to volume
cls.written_data = "a" * 1024
cls.written_filename = "qe_test_datafile"
resp = cls.create_remote_file(
cls.server_conn, cls.volume_mount_point, cls.written_filename,
file_content=cls.written_data)
assert resp is not None, (
"Could not verify writability of attached volume")
# Save written file md5sum
cls.original_md5hash = cls.get_remote_file_md5_hash(
cls.server_conn, cls.volume_mount_point, cls.written_filename)
assert cls.original_md5hash is not None, (
"Unable to hash file on mounted volume")
# Make the fs writes cached data to disk before unmount.
cls.server_conn.filesystem_sync()
@classmethod
def unmount_and_detach_test_volume(cls):
cls.unmount_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name)
cls.volume_attachments.behaviors.delete_volume_attachment(
cls.test_attachment.id_, cls.test_server.id)
def calculate_volume_size_for_image(self, image):
"""Get size from image object if possible, or use configured value
TODO: Move this into a behavior
"""
size = getattr(image, 'min_disk', None)
# Log missing sizes
if not size:
msg = (
"Image {image_id} did not report a meaningful disks size. "
"Falling back to configured min_volume_size_from_image".format(
image_id=image.id))
self.fixture_log.warning(msg)
# If size is 0 or not reported (None), fall back to configured
# value for min_volume_size_from_image
return max(size, self.volumes.config.min_volume_from_image_size)
def _compare_volume_image_metadata(self, image, volume, key_list=None):
key_list = key_list or []
comparable_keys = [
key for key in image.metadata.keys() if key in key_list]
error_messages = []
for key in comparable_keys:
if key not in volume.volume_image_metadata:
error_messages.append(
"Metadata key '{0}' from image {1} not found in volume"
"{2} volume-image-metadata".format(
key, image.id, volume.id_))
elif volume.volume_image_metadata[key] != image.metadata[key]:
error_messages.append(
"Metadata keypair '{0}: {1}' from image {2} did not "
"match the keypair '{3}: {4}' in the "
"volume-image-metadata of volume {5}".format(
key, image.metadata[key], image.id,
key, volume.volume_image_metadata[key], volume.id_))
return error_messages
def assertImageMetadataWasCopiedToVolume(
self, image, volume, key_list=None, msg=None):
errors = self._compare_volume_image_metadata(image, volume, key_list)
if errors:
self.fail(self._formatMessage(msg, "\n".join(errors)))
def assertMinDiskSizeIsSet(self, image, msg=None):
# TODO: This should probably be an images behavior method that I
# wrap here.
if getattr(image, 'min_disk', 0) <= 0:
stdmsg = (
"\nImage {0} '{1}' does not have a min_disk size set, or "
"has a min_disk size of 0".format(image.id, image.name))
self.fail(self._formatMessage(msg, stdmsg))
def check_if_minimum_disk_size_is_set(self, image):
"""Check the image info to make sure the min_disk attribute
is set"""
try:
self.assertMinDiskSizeIsSet(image)
except AssertionError:
return False
return True
def make_server_snapshot(self, server, add_cleanup=True):
server_snapshot_name = random_string(
prefix="cbs_qe_image_of_{0}_".format(server.name), size=10)
create_img_resp = self.servers.client.create_image(
server.id, name=server_snapshot_name)
assert create_img_resp.ok, (
"Create-Server-Image call failed with a {0}".format(
create_img_resp.status_code))
self.images.behaviors.verify_server_snapshotting_progression(server.id)
# Poll for list of all snapshots and find the one that belongs to our
# server.
list_imgs_resp = self.images.client.list_images()
assert list_imgs_resp.ok, (
"list-images call failed with a {0}".format(
list_imgs_resp.status_code))
assert list_imgs_resp.entity is not None, (
"Unable to deserialize list-images response".format(
list_imgs_resp.status_code))
image_list = list_imgs_resp.entity
server_snapshot = None
for img in image_list:
if img.name == server_snapshot_name:
server_snapshot = img
break
assert server_snapshot is not None, "Could not locate image by name."
if add_cleanup is True:
self.addCleanup(
self.images.client.delete_image, server_snapshot.id)
# Wait for the image to become active just in case
self.images.behaviors.wait_for_image_status(
server_snapshot.id, 'ACTIVE', 10, 600)
# get the model for the snapshot in question
resp = self.images.client.get_image(server_snapshot.id)
assert resp.ok, ("Could not get updated snapshot info after create")
assert resp.entity is not None, (
"Could not deserialize snapshot infor response")
return resp.entity
def create_bootable_volume_from_server_snapshot(
self, image, flavor, volume_type):
# Create a server from the given image and flavor
server = self.new_server(
name=None, image=image.id, flavor=flavor.id, add_cleanup=False)
self.addCleanup(self.servers.client.delete_server, server.id)
# Make a snapshot of the server via the images api
server_snapshot = self.make_server_snapshot(server)
# Create a bootable volume from the server snapshot
return self.create_volume_from_image_test(volume_type, server_snapshot)
def create_volume_from_image_test(
self, volume_type, image, add_cleanup=True):
size = self.calculate_volume_size_for_image(image)
volume = self.volumes.behaviors.create_available_volume(
size, volume_type.id_, image_ref=image.id,
timeout=self.volumes.config.volume_create_max_timeout)
if add_cleanup:
try:
self.addCleanup(
self.volumes.behaviors.delete_volume_confirmed, volume.id_)
except:
raise Exception(
"Could not create a volume in setup for "
"create_volume_from_image test")
self.assertEquals(
str(size), str(volume.size),
"Expected volume size {0} did not match actual observed volume"
" size {1}".format(size, volume.size))
# TODO: Break this out into it's own assertion with progress verifer
# to give the bootable flag time to populate.
self.assertEquals(
'true', volume.bootable, "Volume built from image was not marked "
"as bootable")
self.assertImageMetadataWasCopiedToVolume(image, volume)
return volume
def create_bootable_volume_from_third_snapshot_of_server_test(
self, image, flavor, volume_type):
# Create a server from the given image and flavor
server = self.new_server(
name=None, image=image.id, flavor=flavor.id, add_cleanup=False)
self.addCleanup(self.servers.client.delete_server, server.id)
# Make a snapshot of the server via the images api
self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
server_snapshot_3 = self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
# Create a bootable volume from the server snapshot
self.create_volume_from_image_test(volume_type, server_snapshot_3)
| [
"jose.idar@rackspace.com"
] | jose.idar@rackspace.com |
5d95d610ebbca94067bde0d1705a24306610257b | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day2/functions/basics/2.py | 0c74a24791d53e68bd3d08f432369d8dbfa42c55 | [] | no_license | sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | def sqr(x):
return x*x
print sqr(10,20)
| [
"sreejithevwyd@gmail.com"
] | sreejithevwyd@gmail.com |
856dc99df1b2a415589cdc169f574672bd782c91 | 2616952e9dcf7a996c691e5410551d89ec735943 | /Python Basic for ML and DL Book3/Ensemble methods Ensemble Error diagram.py | 2ec889de7b8cb72089d30d213d0d2065c1cbc6fa | [] | no_license | BaoBao0406/Machine-Learning | 5c9f00c19422e7fead74d4f441fcc43556b62b78 | c3e1c03301b41220c58a1bbda8f872638dc24104 | refs/heads/master | 2021-07-12T10:25:28.791579 | 2020-08-24T00:17:43 | 2020-08-24T00:17:43 | 197,107,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from scipy.misc import comb
import math
def ensemble_error(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * error**k * (1-error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
ensemble_error(n_classifier=11, error=0.25)
import numpy as np
import matplotlib.pyplot as plt
error_range = np.range(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
plt.plot(error_range, ens_errors, label='Ensemble error', linewidth=2)
plt.plot(error_range, error_range, linestyle='--', label='Base error', linewidth=2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc='upper left')
plt.grid(alpha=0.5)
plt.show()
| [
"46430379+BaoBao0406@users.noreply.github.com"
] | 46430379+BaoBao0406@users.noreply.github.com |
639a60840ad7e8b452e12cb388e417b5a2b16264 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/12143431.py | e56ad1dfbd9078ce9947efcd957b4a0ee3f23907 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/12143431.py generated: Fri, 27 Mar 2015 16:10:07
#
# Event Type: 12143431
#
# ASCII decay Descriptor: [B+ -> K+ (J/psi(1S) -> mu+ mu- {,gamma} {,gamma}) (eta -> gamma gamma)]cc
#
from Configurables import Generation
Generation().EventType = 12143431
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_JpsietaK,mm,gg=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12143431
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
b4c9478bc20a6ac8ebf48c57abd9d5fc5295a79b | 731812a3927ab974a5804a753d929f865cf5344c | /ansys/mapdl/core/inline_functions/selection_queries.py | 9022f4691fd2a265dfb522101476610255736715 | [
"MIT"
] | permissive | JourneyG/pymapdl | a03b5e4bc29c182ee9bdffd866587b7170a8d540 | 23fdc008c151c0546504e4ef8257a64f5f169100 | refs/heads/main | 2023-06-25T07:14:11.873065 | 2021-07-20T10:21:11 | 2021-07-20T10:21:11 | 390,166,634 | 1 | 0 | MIT | 2021-07-28T00:42:37 | 2021-07-28T00:42:36 | null | UTF-8 | Python | false | false | 17,732 | py | from .core import _ParameterParsing, SelectionStatus
class _SelectionStatusQueries(_ParameterParsing):
_mapdl = None
def nsel(self, n: int) -> SelectionStatus:
"""Returns selection status of a node.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
n : int
Node number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Status of node
Examples
--------
Here we create a single node and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> n1 = mapdl.n(1, 0, 0, 0)
>>> n1
1
We can use ``Query.nsel`` to interrogate the selection status
of the node. The response is an ``enum.IntEnum`` object. If
you query a node that does not exist, it will return a status
``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.nsel(n1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.nsel('NONE')
>>> q.nsel(n1)
<SelectionStatus.UNSELECTED: -1>
>>> q.nsel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=NSEL({n})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
def ksel(self, k: int) -> SelectionStatus:
"""Returns selection status of a keypoint.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
k : int
Keypoint number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Status of keypoint
Examples
--------
Here we create a single keypoint and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> k1 = mapdl.k(1, 0, 0, 0)
>>> k1
1
We can use ``Query.ksel`` to interrogate the selection status
of the node. The response is an ``enum.IntEnum`` object. If
you query a node that does not exist, it will return a status
``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.ksel(k1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.ksel('NONE')
>>> q.ksel(k1)
<SelectionStatus.UNSELECTED: -1>
>>> q.ksel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=KSEL({k})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
def lsel(self, n: int) -> SelectionStatus:
"""Returns selection status of a line.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
n : int
Line number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Status of line
Examples
--------
Here we create a single line and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> k1 = mapdl.k(1, 0, 0, 0)
>>> k2 = mapdl.k(2, 1, 1, 1)
>>> L1 = mapdl.l(k1, k2)
>>> L1
1
We can use ``Query.lsel`` to interrogate the selection status
of the line. The response is an ``enum.IntEnum`` object. If
you query a line that does not exist, it will return a status
``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.lsel(L1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.lsel('NONE')
>>> q.lsel(L1)
<SelectionStatus.UNSELECTED: -1>
>>> q.lsel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=LSEL({n})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
def asel(self, a: int) -> SelectionStatus:
"""Returns selection status of an area.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
a : int
Area number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Selection status of the area.
Examples
--------
Here we create a single area and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> k1 = mapdl.k(1, 0, 0, 0)
>>> k2 = mapdl.k(2, 1, 0, 0)
>>> k3 = mapdl.k(3, 1, 1, 1)
>>> a1 = mapdl.a(k1, k2, k3)
>>> a1
1
We can use ``Query.asel`` to interrogate the selection status
of the line. The response is an ``enum.IntEnum`` object. If
you query a line that does not exist, it will return a status
``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.asel(a1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.asel('NONE')
>>> q.asel(a1)
<SelectionStatus.UNSELECTED: -1>
>>> q.asel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=ASEL({a})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
def esel(self, e: int) -> SelectionStatus:
"""Returns selection status of an element.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
e : int
Element number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Status of element
Examples
--------
Here we create a single element and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> mapdl.et(1, 'SHELL181')
>>> n1 = mapdl.n(1, 0, 0, 0)
>>> n2 = mapdl.n(2, 1, 0, 0)
>>> n3 = mapdl.n(3, 1, 1, 1)
>>> e1 = mapdl.e(n1, n2, n3)
>>> e1
1
We can use ``Query.esel`` to interrogate the selection status
of the element. The response is an ``enum.IntEnum`` object. If
you query an element that does not exist, it will return a
status ``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.esel(e1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.esel('NONE')
>>> q.esel(e1)
<SelectionStatus.UNSELECTED: -1>
>>> q.esel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=ESEL({e})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
def vsel(self, v: int) -> SelectionStatus:
"""Returns selection status of a volume.
Returns a ``SelectionStatus`` object with values:
1 - SELECTED
0 - UNDEFINED
-1 - UNSELECTED
Parameters
----------
v : int
Volume number
Returns
-------
mapdl.ansys.core.inline_functions.SelectionStatus
Status of element
Examples
--------
Here we create a single volume and interrogate its selection
status.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> mapdl.et(1, 'SHELL181')
>>> k1 = mapdl.k(1, 0, 0, 0)
>>> k2 = mapdl.k(2, 1, 0, 0)
>>> k3 = mapdl.k(3, 0, 1, 0)
>>> k4 = mapdl.k(4, 0, 0, 1)
>>> v1 = mapdl.v(k1, k2, k3, k4)
>>> v1
1
We can use ``Query.vsel`` to interrogate the selection status
of the element. The response is an ``enum.IntEnum`` object. If
you query a volume that does not exist, it will return a
status ``SelectionStatus.UNDEFINED``.
>>> q = Query(mapdl)
>>> q.vsel(v1)
<SelectionStatus.SELECTED: 1>
>>> mapdl.vsel('NONE')
>>> q.vsel(v1)
<SelectionStatus.UNSELECTED: -1>
>>> q.vsel(0)
<SelectionStatus.UNDEFINED: 0>
"""
response = self._mapdl.run(f'_=VSEL({v})')
integer = self._parse_parameter_integer_response(response)
return SelectionStatus(integer)
class _NextSelectedEntityQueries(_ParameterParsing):
_mapdl = None
def ndnext(self, n: int) -> int:
"""Returns next selected node with a number greater than `n`.
Returns the next highest node number after the supplied node
number `n`, from the current selection.
If no 'next selected' node exists (or if the supplied node
number does not exist in the selection) `0` is returned.
Parameters
----------
n : int
Node number
Returns
-------
int
Node number
Examples
--------
Here we create 10 nodes, select them all, and find the next
selected node for each. For the last node there are no other
nodes with a higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> q = Query(mapdl)
>>> nodes = [mapdl.n(i+1, i, 0, 0) for i in range(10)]
>>> next_selected_nodes = [q.ndnext(j) for j in nodes]
>>> next_selected_nodes
[2, 3, 4, 5, 6, 7, 8, 9, 10, 0]
"""
response = self._mapdl.run(f'_=NDNEXT({n})')
integer = self._parse_parameter_integer_response(response)
return integer
def kpnext(self, k: int) -> int:
"""Returns next selected keypoint with a number greater than `k`.
Returns the next highest keypoint number after the supplied
keypoint number `k`, from the current selection.
If no 'next selected' keypoint exists (or if the supplied
keypoint number does not exist in the selection) `0` is
returned.
Parameters
----------
k : int
Keypoint number
Returns
-------
int
Keypoint number
Examples
--------
Here we create 10 keypoints and find the next selected keypoint
for each. For the last node there are no other keypoints with a
higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> q = Query(mapdl)
>>> kps = [mapdl.k(i+1, i, 0, 0) for i in range(10)]
>>> next_selected_kps = [q.kpnext(j) for j in kps]
>>> next_selected_kps
[2, 3, 4, 5, 6, 7, 8, 9, 10, 0]
"""
response = self._mapdl.run(f'_=KPNEXT({k})')
integer = self._parse_parameter_integer_response(response)
return integer
def elnext(self, e: int) -> int:
"""Returns next selected element with a number greater than `e`.
Returns the next highest element number after the supplied
element number `e`, from the current selection.
If no 'next selected' element exists (or if the supplied
element number does not exist in the selection) `0` is
returned.
Parameters
----------
e : int
Element number
Returns
-------
int
Element number
Examples
--------
Here we create 9 elements from 10 nodes and find the next
selected element for each. For the last element there are no
other elements with a higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> mapdl.et(1, 'LINK11')
>>> q = Query(mapdl)
>>> nodes = [mapdl.n(i+1, i, 0, 0) for i in range(10)]
>>> els = [mapdl.e(i, i+1) for i in nodes[:-1]]
>>> next_selected_els = [q.elnext(j) for j in els]
>>> next_selected_els
[2, 3, 4, 5, 6, 7, 8, 9, 0]
"""
response = self._mapdl.run(f'_=ELNEXT({e})')
integer = self._parse_parameter_integer_response(response)
return integer
def lsnext(self, n: int) -> int:
"""Returns next selected line with a number greater than `n`.
Returns the next highest line number after the supplied
line number `n`, from the current selection.
If no 'next selected' line exists (or if the supplied
line number does not exist in the selection) `0` is
returned.
Parameters
----------
n : int
Line number
Returns
-------
int
Line number
Examples
--------
Here we create 9 lines from 10 nodes and find the next
selected line for each. For the last line there are no
other lines with a higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> mapdl.et(1, 'LINK11')
>>> q = Query(mapdl)
>>> kps = [mapdl.k(i+1, i, 0, 0) for i in range(10)]
>>> lines = [mapdl.l(i, i+1) for i in kps[:-1]]
>>> next_selected_lines = [q.lsnext(j) for j in lines]
>>> next_selected_lines
[2, 3, 4, 5, 6, 7, 8, 9, 0]
"""
response = self._mapdl.run(f'_=LSNEXT({n})')
integer = self._parse_parameter_integer_response(response)
return integer
def arnext(self, a: int) -> int:
"""Returns next selected area with a number greater than `a`.
Returns the next highest area number after the supplied
area number `a`, from the current selection.
If no 'next selected' area exists (or if the supplied
area number does not exist in the selection) `0` is
returned.
Parameters
----------
a : int
Area number
Returns
-------
int
Area number
Examples
--------
Here we create 9 areas from 11 nodes and find the next
selected area for each. For the last area there are no
other areas with a higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> q = Query(mapdl)
>>> farpoint = mapdl.k(999, 0, 10, 0)
>>> kps = [mapdl.k(i+1, i, 0, 0) for i in range(10)]
>>> areas = [mapdl.a(i, i+1, farpoint) for i in kps[:-1]]
>>> next_selected_areas = [q.arnext(j) for j in areas]
>>> next_selected_areas
[2, 3, 4, 5, 6, 7, 8, 9, 0]
"""
response = self._mapdl.run(f'_=ARNEXT({a})')
integer = self._parse_parameter_integer_response(response)
return integer
def vlnext(self, v: int) -> int:
"""Returns next selected volume with a number greater than `v`.
Returns the next highest volume number after the supplied
volume number `v`, from the current selection.
If no 'next selected' volume exists (or if the supplied
volume number does not exist in the selection) `0` is
returned.
Parameters
----------
v : int
Volume number
Returns
-------
int
Volume number
Examples
--------
Here we create 9 volumes from 12 nodes and find the next
selected volume for each. For the last volume there are no
other volumes with a higher number, so 0 is returned.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core.inline_functions import Query
>>> mapdl = launch_mapdl()
>>> mapdl.prep7()
>>> q = Query(mapdl)
>>> point1 = mapdl.k(999, 0, 10, 0)
>>> point2 = mapdl.k(99, 0, 0, 10)
>>> kps = [mapdl.k(i+1, i, 0, 0) for i in range(10)]
>>> vols = [mapdl.v(i, i+1, point1, point2) for i in kps[:-1]]
>>> next_selected_vols = [q.vlnext(j) for j in vols]
>>> next_selected_vols
[2, 3, 4, 5, 6, 7, 8, 9, 0]
"""
response = self._mapdl.run(f'_=VLNEXT({v})')
integer = self._parse_parameter_integer_response(response)
return integer
| [
"noreply@github.com"
] | JourneyG.noreply@github.com |
2184dcaa420425fb5ac05048a9db52672b34737d | 498a16afbe1194785020f3e60ec30c16ce386df1 | /Binary Search.py | 7ed6bcadcad5f7c84c6b7b669a2254d561210292 | [
"Unlicense"
] | permissive | 8059542577066/Heap-Sort-Merge-Sort-and-Binary-Search | f58147d3f4194f1deddcea717af0aac96c218f56 | 6ba1863179d079a71a9a931e28cc4b9ac557aa2a | refs/heads/master | 2021-06-23T16:50:55.453690 | 2017-08-30T04:11:55 | 2017-08-30T04:11:55 | 101,836,603 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import random
import bisect
def find(target, array, e1, e2):
m1 = (e1 + e2) / 2
m2 = m1 + 1
if target == array[m1]:
return m1
elif e1 == e2:
return -1
elif target < array[m1]:
return find(target, array, e1, m1)
else:
return find(target, array, m2, e2)
def main():
print "Binary Search"
count = int(raw_input("Number of Items: "))
numbers = []
for i in xrange(count):
numbers.append(random.getrandbits(64))
numbers.sort()
index = int(raw_input("Index of Item to Search: "))
if index >= 0 and index < len(numbers):
target = numbers[index]
else:
target = -999
print "\nfind(): " + str(find(target, numbers, 0, len(numbers) - 1))
print "bisect.bisect() - 1: " + str(bisect.bisect(numbers, target) - 1)
raw_input()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | 8059542577066.noreply@github.com |
9a2f7c955b4447e2d8cba5c050dfb293a9e90afb | 7bbcc4638c780641f6477b08d6dbed3d3d28ea0d | /rangers.py | c89f0d219ac71f3fc63e23eaf98ac825499c0059 | [] | no_license | Sanojdon/ansible_listen | afa0d50ad5c5eb74804a7c199fa25347039d79fd | e45eac3231940e51169be33a2f7e1128ebc2fb34 | refs/heads/master | 2020-08-24T05:31:13.883727 | 2019-10-22T09:03:52 | 2019-10-22T09:03:52 | 216,769,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
| [
"sanoj.mv.93@gmail.com"
] | sanoj.mv.93@gmail.com |
905e3a8be40c25915d9fb2dba278584f297734fe | f8515ff67d48440af1c9031eed2839b08cb4d785 | /accounts/models.py | e0033fdffd58f090f3ee0922f076a2faa9560337 | [] | no_license | ashutoshverma27/website | 15d67e00e842a58ab92996389be46b2098b5224d | 147699705d70287acad55e47b54a0bed589ca9fe | refs/heads/master | 2022-01-08T11:43:31.381136 | 2019-07-19T16:38:52 | 2019-07-19T16:38:52 | 194,356,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from django.db import models
class Account(models.Model):
fullname=models.CharField(max_length=32)
username=models.CharField(max_length=100)
date=models.DateTimeField()
password=models.CharField(max_length=32)
class Cart(models.Model):
username=models.CharField(max_length=100)
pid=models.IntegerField()
| [
"ashutoshverma2799@gmail.com"
] | ashutoshverma2799@gmail.com |
a9a48cd3bd4d3e32d592e5426bb83daf4f748d1c | 1d0b3dae8553cb0f774ef45fb4e29bdb6c8ab88e | /first.py | 2f41cfc8d019f88231d9b8331f2c5d091e988304 | [] | no_license | Vladimyr23/Python-Exercises | b4f1fdf079ab607d0b8b7a89d3bec871b0331e50 | dfca1c6adfd4f9ba5652c4eca384f57ec1480343 | refs/heads/master | 2020-08-02T20:17:20.110013 | 2019-09-28T12:07:55 | 2019-09-28T12:07:55 | 211,494,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Vlad
#
# Created: 23/11/2015
# Copyright: (c) Vlad 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
#import modules
import sys
import pygame
# initiate pygame
pygame.init()
#set size and position of screen
screen=pygame.display.set_mode ((640,480),0,32)
#set colour of screen background
screen.fill ((255,255,255))
from pygame.locals import *
#House
pygame.draw.rect(screen,(0,0,0),Rect(50,150,450,250))
pygame.display.update()
#House roof
#pygame.draw.triangle
#Tree trunk
pygame.draw.rect(screen,(200,200,0),Rect(530,340,50,100))
pygame.display.update()
#Windows
pygame.draw.rect(screen,(0,0,100),Rect(80,180,50,50))
pygame.display.update()
pygame.draw.rect(screen,(0,0,100),Rect(210,180,50,50))
pygame.display.update()
pygame.draw.rect(screen,(0,0,100),Rect(420,180,50,50))
pygame.display.update()
#Door
pygame.draw.rect(screen,(100,100,0),Rect(320,280,70,120))
pygame.display.update()
pygame.draw.circle(screen,(0,255,0),(555,300),60)
pygame.display.update()
#code to control using x
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
sys.exit() | [
"noreply@github.com"
] | Vladimyr23.noreply@github.com |
1eea293fd7e98fc481cc56458353e7c279bd1593 | 754c10812d740ce84b39cf1e4d752e012100347e | /car_graphical_auto_control.py | 73b0ceba098c8da16e471efc92710dac745d8adc | [] | no_license | blackbriar07/autonomous-car-path-planning | f477706781dcc20da3cf80f0876ceae8bda21a3f | c2e4bb7285c79ae25eb35359d77e43f84ed3a5be | refs/heads/master | 2022-09-20T17:57:01.853550 | 2020-06-02T16:32:34 | 2020-06-02T16:32:34 | 261,088,729 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,095 | py | # makin grids for the car to pass
import pygame
import math
import numpy as np
import time
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 200, 0)
RED = ( 200, 0, 0)
bright_green = (0,255,0)
bright_red = (255,0,0)
# This sets the width and height of each grid location
# This sets the margin between each cell
margin = 0
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
grids_startend = []
array2 = [0,0,"b"]
for row in range(10):
# Add an empty array that will hold each cell
# in this row
grid.append([])
for column in range(10):
grid[row].append(0) # Append a cell
for row in range(10):
# Add an empty array that will hold each cell
# in this row
grids_startend.append([])
for column in range(10):
grids_startend[row].append(0)
# Set row 1, cell 5 to one. (Remember rows and
# column numbers start at zero.)
# Initialize pygame
pygame.init()
# Set the height and width of the screen
num_rect_hor = 10
num_rect_ver = 10
screen_width = 800
screen_height = 550 #480
width = 48 # 60% of the whole screen width will be covered with the grids
height = 48
size = [screen_width, screen_height]
screen = pygame.display.set_mode(size)
thickness_rect = 1
# Set title of screen
position_vector = []
row_inc = 0
col_inc = 0
for pos_row in range(10):
array_make =[]
for pos_column in range(10):
array_make.append([row_inc,col_inc])
col_inc += width
position_vector.append(array_make)
row_inc += width
col_inc = 0
#print("grid_vector" + str(len(position_vector)))
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
block_grid =[]
# Load and set up graphics.
#background_image = pygame.image.load("saturn_family1.jpg").convert()
player_image = pygame.image.load("playerShip1_orange.png").convert()
player_image.set_colorkey(BLACK)
image = pygame.transform.scale(player_image, (int(width-10),int(height-10)))
#image1 = pygame.transform.rotate(image, 0)
pos_rect = []
grid_rect = []
i_num = 0
for i in range(num_rect_hor):
j_num = 0
pos_rect = []
for j in range(num_rect_ver):
pos_rect.append([j_num,i_num])
j_num += 48
grid_rect.append(pos_rect)
i_num += 48
def text_objects(text, font):
textSurface = font.render(text, True, BLACK)
return textSurface, textSurface.get_rect()
def button(msg,x,y,w,h,ic,ac, action = None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h >mouse[1] > y:
pygame.draw.rect(screen,ac,(x,y,w,h))
if click[0] == 1 and action != None:
if action == "play":
start_game()
if action == "start_end":
start_end_screen()
else:
pygame.draw.rect(screen,ic,(x,y,w,h))
smallText = pygame.font.Font("freesansbold.ttf",20)
textSurf, textRect = text_objects(msg,smallText)
textRect.center = ((x + (w/2)), (y + (h/2)))
screen.blit(textSurf, textRect)
def car(image1,x,y):
screen.blit(image1,(x,y))
def car_movement_automatic(event,image1,x_car,y_car,x_carchange,y_carchange,car_move,direction_side,position,prev_position):
prev_image = image1
prev_direction = direction_side
if position[2] == 6:
x_car = width * position[1]
y_car = width * position[0]
image1 = pygame.transform.rotate(image,0)
direction_side = "N"
if position[2] == 7:
x_car = width * position[1]
y_car = width * position[0]
image1 = pygame.transform.rotate(image,-90)
direction_side = "E"
if position[2] == 8:
x_car = width * position[1]
y_car = width * position[0]
image1 = pygame.transform.rotate(image,180)
direction_side = "S"
if position[2] == 9:
x_car = width * position[1]
y_car = width * position[0]
image1 = pygame.transform.rotate(image,90)
direction_side = "W"
'''
if prev_position[0] > position[0] and prev_position[1] == position[1] :
image1 = pygame.transform.rotate(image,0)
y_carchange = -car_move
direction_side = "N"
if prev_position[0] < position[0] and prev_position[1] == position[1] :
image1 = pygame.transform.rotate(image,180)
y_carchange = car_move
direction_side = "S"
if prev_position[0] == position[0] and prev_position[1] > position[1] :
image1 = pygame.transform.rotate(image,90)
x_carchange = -car_move
direction_side = "W"
if prev_position[0] == position[0] and prev_position[1] < position[1] :
image1 = pygame.transform.rotate(image,-90)
x_carchange = car_move
direction_side = "E"
if block_cancel == True:
for i in block_grid:
if grid_rect[i[0]][i[1]][0] <= x_car + x_carchange < grid_rect[i[0]][i[1]][0] + width and grid_rect[i[0]][i[1]][1] <= y_car + y_carchange < grid_rect[i[0]][i[1]][1] + height:
x_carchange = 0
y_carchange = 0
image1 = prev_image
direction_side = prev_direction
x_car += x_carchange
y_car += y_carchange
if x_car < 0 or x_car >= width*10 or y_car < 0 or y_car >= height*10 :
x_car -= x_carchange
y_car -= y_carchange
'''
return x_car , y_car , image1, direction_side
def car_movement_manual(event,image1,x_car,y_car,x_carchange,y_carchange,car_move,direction_side):
prev_image = image1
prev_direction = direction_side
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
image1 = pygame.transform.rotate(image,90)
x_carchange = -car_move
direction_side = "W"
elif event.key == pygame.K_RIGHT:
image1 = pygame.transform.rotate(image, -90)
x_carchange = car_move
direction_side = "E"
elif event.key == pygame.K_UP:
image1 = pygame.transform.rotate(image, 0)
y_carchange = -car_move
direction_side = "N"
elif event.key == pygame.K_DOWN:
image1 = pygame.transform.rotate(image, 180)
y_carchange = car_move
direction_side = "S"
if event.type == pygame.KEYUP:
#if event.key == pygame.K_LEFT or pygame.K_RIGHT or pygame.K_UP or pygame.K_DOWN:
x_carchange = 0
y_carchange = 0
if block_cancel == True:
for i in block_grid:
if grid_rect[i[0]][i[1]][0] <= x_car + x_carchange < grid_rect[i[0]][i[1]][0] + width and grid_rect[i[0]][i[1]][1] <= y_car + y_carchange < grid_rect[i[0]][i[1]][1] + height:
x_carchange = 0
y_carchange = 0
image1 = prev_image
direction_side = prev_direction
x_car += x_carchange
y_car += y_carchange
if x_car < 0 or x_car >= width*10 or y_car < 0 or y_car >= height*10 :
x_car -= x_carchange
y_car -= y_carchange
return x_car , y_car , image1, direction_side
def grids():
for row in range(num_rect_ver):
for column in range(num_rect_hor):
color = WHITE
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height],thickness_rect)
if grid[row][column] == 1:
color = RED
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height])
def grids_start_end():
for row in range(num_rect_ver):
for column in range(num_rect_hor):
color = WHITE
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height],thickness_rect)
if grids_startend[row][column] == 1:
color = GREEN
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height])
def start_end_screen():
start_end_exit = True
global start_end_array
start_end_array = []
while start_end_exit:
screen.fill(BLACK)
pygame.display.set_caption("Select start end points")
for event in pygame.event.get():
if event.type == pygame.QUIT:
start_end_exit = False
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if 0 <= pos[0] <= num_rect_hor*width and 0 <= pos[1] <= num_rect_ver*height:
start_end_array.append(pos)
#print("Hello")
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (width + margin)
row = pos[1] // (height + margin)
#print(column,row)
# Set that location to zero
grids_startend[row][column] = 1
#print(grids_startend)
#block_cancel = True
#block_grid.append([row,column])
#print("Click ", pos, "Grid coordinates: ", row, column)
grids_start_end()
grids()
button("start game",550,50,150,50,GREEN,bright_green,"play")
pygame.display.update()
clock.tick(50)
def start_game():
x_car = 0
y_car = 0
x_carchange = 0
y_carchange = 0
car_move = width
direction_side = "stationary"
image1 = pygame.transform.rotate(image,0)
array2 = [0,0,"b"]
car_move_automatic = True
movement_vector = np.array([[0,0,8],[1,0,8],[1,1,7],[1,2,7],[2,2,8]]) # set your path
b =5
gameExit = False
print(start_end_array)
while not gameExit:
screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# declaring car movement to be manual or automatic. The reiforcement learning would be taking it in automatic mode
if car_move_automatic == True :
#print(" the loop is working")
prev_position = [0,0]
for position in movement_vector:
x_car,y_car,image1,direction_side = car_movement_automatic(event,image1,x_car,y_car,x_carchange,y_carchange,car_move,direction_side,position,prev_position)
prev_position = position
grids()
grids_start_end()
car(image1,x_car,y_car)
array1 = [x_car // (width + margin) , y_car // (height + margin) , direction_side ]
if array1 != array2 :
print("state : (" + str(y_car // (height + margin)) + "," + str(x_car // (width + margin)) + "," + direction_side + ")" )
array2 = array1
button("game started",550,50,150,50,GREEN,bright_green,"haha")
pygame.display.update()
time.sleep(0.9)
clock.tick(50)
car_move_automatic = False
else:
x_car,y_car,image1,direction_side = car_movement_manual(event,image1,x_car,y_car,x_carchange,y_carchange,car_move,direction_side)
grids()
grids_start_end()
car(image1,x_car,y_car)
array1 = [x_car // (width + margin) , y_car // (height + margin) , direction_side ]
if array1 != array2 :
print("state : (" + str(y_car // (height + margin)) + "," + str(x_car // (width + margin)) + "," + direction_side + ")" )
array2 = array1
button("game started",550,50,150,50,GREEN,bright_green,"haha")
pygame.display.update()
clock.tick(50)
def game_intro():
intro =True
global block_cancel
block_cancel = False
while intro:
screen.fill(BLACK)
pygame.display.set_caption("Array Backed Grid")
for event in pygame.event.get():
if event.type == pygame.QUIT: #if event.type == pygame.QUIT:
intro = False
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
if 0 <= pos[0] <= num_rect_hor*width and 0 <= pos[1] <= num_rect_ver*height:
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (width + margin)
row = pos[1] // (height + margin)
# Set that location to zero
grid[row][column] = 1
block_cancel = True
block_grid.append([row,column])
print("Click ", pos, "Grid coordinates: ", row, column)
grids()
#button("start game",550,50,150,50,GREEN,bright_green,"play")
button("Start_End",550,110,200,50,RED,bright_red,"start_end")
pygame.display.update()
clock.tick(50)
game_intro()
pygame.quit()
# acknowledgement : sentdex harrison kinsley | [
"noreply@github.com"
] | blackbriar07.noreply@github.com |
6f89678c830463744f76b8bff98c9b6a3ddc4d8b | 58029d606fdbac2362100cfa340620459568cf30 | /mobile.py | 0adfa14d3807a60c3236f9398562140f7cdee869 | [
"CC0-1.0"
] | permissive | cadencework/gibberish | efb09ab3aa7b3e72fff6601429e001ed80b59180 | 9e105e07840ff06a6949f4ea44a7b1d5b133cbfd | refs/heads/master | 2021-01-26T06:38:35.090115 | 2020-07-01T23:49:24 | 2020-07-01T23:49:24 | 243,350,267 | 0 | 0 | CC0-1.0 | 2020-06-19T14:49:54 | 2020-02-26T19:41:45 | Python | UTF-8 | Python | false | false | 914 | py | import csv
# need to define cmp function in Python 3
def cmp(a, b):
return (a > b) - (a < b)
def under_attack(col, queens):
left = right = col
for r, c in reversed(queens):
left, right = left - 1, right + 1
if c in (left, col, right):
return True
return False
def solve(n):
if n == 0:
return [[]]
smaller_solutions = solve(n - 1)
return [solution+[(n,i+1)]
for i in range(BOARD_SIZE)
for solution in smaller_solutions
if not under_attack(i+1, solution)]
# read stocks data, print status messages
with open('stocks.csv', 'r') as stocksFile:
stocks = csv.reader(stocksFile)
status_labels = {-1: 'down', 0: 'unchanged', 1: 'up'}
for ticker, name, price, change, pct in stocks:
status = status_labels[cmp(float(change), 0.0)]
print ('%s is %s (%.2f)' % (name, status, float(pct)))
| [
"noreply@github.com"
] | cadencework.noreply@github.com |
cfb0dc113f70f8b60b6ff42fbd453b6d32ca1d85 | 2db531ae939dcce8b682d54bd7f8944608a4edb3 | /11.py | 1dedf01d61de28d9ce91d19a510594ecbb424569 | [] | no_license | 1654218052/learnpython | 4a17b68222725696e8ae995fe0c743087906b3c2 | fa886562b0fdc3e4d08733eec34bd909437a8f65 | refs/heads/master | 2021-07-15T11:46:52.563836 | 2020-10-23T09:48:15 | 2020-10-23T09:48:15 | 221,367,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
L = ['Bart', 'Lisa', 'Adam']
for x in L:
print('hello,', x, '!') | [
"linjing.guo@langtaojin.com"
] | linjing.guo@langtaojin.com |
ff5b9def75d33737ce601692e7d143f66e2a3130 | e7f7361657f96ee8312c2889e19e17fe38242a95 | /hash-generate.py | 19216aabf799709534a26fce04f43ab871bd2c72 | [
"Apache-2.0"
] | permissive | zhangmuu/Set-Associative-Hash-fqCodel | 6c00ea2f792e06ad5e0ce69ce4e52a37640fc6a6 | fca5159c8a28ddbb38e59c18f582c915629b06d3 | refs/heads/master | 2022-04-08T18:37:30.366493 | 2020-03-06T03:01:44 | 2020-03-06T03:01:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # ns-3 uses murmur3 as the Hash function in packet scheduling
# This returns a 32-bit unsigned integer
import random
# number of values to generate
num_val = 8000
# generate integers in the range [0, 2^32 - 1]
max_val = 2**32 - 1
min_val = 0
f = open('hashvalues.txt', 'w')
for _ in range(num_val):
f.write(str(random.randint(min_val, max_val)) + "\n")
print('Done') | [
"deepakkavoor99@gmail.com"
] | deepakkavoor99@gmail.com |
f1765a32853c4659c60f2b0311f645a606b7137d | a840368964b6fa6ecadaa0ddfab4f6daa9d70a3c | /bin/rotator | 8360a22217852448030737b7ff322a78109eb4c0 | [] | no_license | dlecocq/rotator | 102450ceda6877f3437f01076f1a8e16132b148a | c01a0bce8029c73109fd60523c71934e73347a2d | refs/heads/master | 2020-05-04T22:18:02.071871 | 2013-10-28T16:53:54 | 2013-10-28T16:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | #! /usr/bin/env python
from rotator import Watched, Signaled, Rotated
def watch(args):
'''Run the watched handler'''
mode = 'w+' if args.truncate else 'a'
Watched(args.path, mode).run(args.buffered)
def signal(args):
'''Run the signaled handler'''
mode = 'w+' if args.truncate else 'a'
Signaled(args.path, mode, args.signal).run(args.buffered)
def rotate(args):
'''Run the rotated handler'''
mode = 'w+' if args.truncate else 'a'
Rotated(args.path, mode, args.size, args.count).run(args.buffered)
def add_arguments(parser):
parser.add_argument('path', help='The path to write out to')
parser.add_argument('--buffered', dest='buffered', default=False,
action='store_true', help='Use buffered reads from stdin')
parser.add_argument('--truncate', dest='truncate', default=False,
action='store_true', help='Truncate the log file')
# Now run the parser
import argparse
parser = argparse.ArgumentParser(description='Write to a file, rotate logs')
subparsers = parser.add_subparsers(title='subcommands')
# Our watched subparser
watched = subparsers.add_parser('watched')
add_arguments(watched)
watched.set_defaults(func=watch)
# Our signal subparser
signaled = subparsers.add_parser('signaled')
add_arguments(signaled)
signaled.set_defaults(func=signal)
signaled.add_argument('--signal', dest='signal', default='HUP',
help='What signal to trap for rotate the file')
# Rotated subparser
rotated = subparsers.add_parser('rotated')
add_arguments(rotated)
rotated.set_defaults(func=rotate)
rotated.add_argument('--size', dest='size', default='100 MB', type=str,
help='What size to rotate at (in bytes or with KB, MB or GB)')
rotated.add_argument('--count', dest='count', default=5, type=int,
help='How many backups to keep around')
args = parser.parse_args()
args.func(args)
| [
"dan@seomoz.org"
] | dan@seomoz.org | |
1b94f99e70996a49390e0809ab6f034062c6dc92 | eb8a7abdbd575b39b763a19f3388ac14b0c2671f | /backend/processAPI/serializers.py | b54ee910b9e07771005ed1415ea65a637dabd346 | [] | no_license | dssudake/AudMIX | a1f97f084962788b0b5a0aeca8f442582286cbbd | bf496e86544a3c652c8a0f3acaaf89e71b6dcdfc | refs/heads/master | 2023-06-04T11:32:28.045239 | 2021-07-03T12:05:32 | 2021-07-03T12:05:32 | 295,716,312 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from rest_framework import serializers
from processAPI.models import AudioFile
class AudioFileSerializer(serializers.ModelSerializer):
class Meta:
model = AudioFile
fields = ['id', 'modified_at', 'created_at',
'name', 'audio', 'processed_audio', 'denoised_audio', 'vocals_audio', 'music_audio']
read_only_fields = ['id', 'created_at',
'modified_at', 'processed_audio', 'denoised_audio', 'vocals_audio', 'music_audio']
| [
"darshansudake555@gmail.com"
] | darshansudake555@gmail.com |
4de573eb34483281ccd1d495eb0356983844329c | 6cccac017896b5522d7a550ec4a290fbfb614737 | /DB/twitterMongo.py | 32b48e2c0b031bcb7276db25cc06b55f50e61cfd | [] | no_license | BixuanLu/TwitterTracker | 2e4e8c66997344356a01958cdf5fa0023780a6c5 | 1c2ab15c39d6533a2d099f6a3a0d63900abde7ab | refs/heads/master | 2020-04-06T04:35:06.179069 | 2015-04-17T14:26:53 | 2015-04-17T14:26:53 | 33,646,169 | 0 | 1 | null | 2015-04-09T03:24:22 | 2015-04-09T03:24:22 | null | UTF-8 | Python | false | false | 1,272 | py | __author__ = 'moqri'
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy
import json
import sys
import pymongo
#Variables that contains the user credentials to access Twitter API
access_token =
access_token_secret =
consumer_key =
consumer_secret =
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, api):
self.api = api
super(tweepy.StreamListener, self).__init__()
self.db = pymongo.MongoClient().test
def on_data(self, tweet):
self.db.t2.insert(json.loads(tweet))
decoded = json.loads(tweet)
try:
print '@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore'))
print ''
return True
except:
return True
def on_error(self, status_code):
return True # Don't kill the stream
def on_timeout(self):
return True # Don't kill the stream
sapi = tweepy.streaming.Stream(auth, CustomStreamListener(api))
sapi.filter(track=['Ebola']) | [
"mahdi.moqri@gmail.com"
] | mahdi.moqri@gmail.com |
daa80a98d910d089e94614c257d30306d7cc9fb6 | 68132437547e6697b401705115004f11c6679164 | /migrations/versions/45689cf7c965_datetime.py | 45f86d93d0454677c07ed5211e2a5630717c441c | [] | no_license | Gelion91/web_movie | fe863fcd80a898ba8485938a323ee1905d683f5d | bb641b8ad251a172afcc695b13c55d313e2aad33 | refs/heads/master | 2022-12-11T05:29:21.190786 | 2021-02-05T18:03:40 | 2021-02-05T18:03:40 | 244,685,292 | 3 | 0 | null | 2022-09-16T18:21:54 | 2020-03-03T16:24:48 | Python | UTF-8 | Python | false | false | 650 | py | """datetime
Revision ID: 45689cf7c965
Revises: c1be0e510f5f
Create Date: 2020-06-14 13:14:33.060249
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '45689cf7c965'
down_revision = 'c1be0e510f5f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('film', sa.Column('published', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('film', 'published')
# ### end Alembic commands ###
| [
"gelion91@mail.ru"
] | gelion91@mail.ru |
6c40f9589568cbbfc1a2ca04f5556ed4ae271221 | 25c6b7b0b42fd6ac7fb794830696132f24b1cc1d | /opencv/train.py | 3c26220f4839db9d1131ca7cbc74f35ab8ca068b | [
"MIT"
] | permissive | hxnguyen5/Face_Recognition_2 | 28cd9981d73d9a7871e22747f7a81318c37bb138 | 3d364cdfc72d5a42afdb88f23995e6a62b7e98e3 | refs/heads/master | 2021-07-13T19:11:07.781072 | 2017-10-17T12:35:59 | 2017-10-17T12:35:59 | 106,682,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,583 | py | """Raspberry Pi Face Recognition Treasure Box
Face Recognition Training Script
Copyright 2013 Tony DiCola
Run this script to train the face recognition system with positive and negative
training images. The face recognition model is based on the eigen faces
algorithm implemented in OpenCV. You can find more details on the algorithm
and face recognition here:
http://docs.opencv.org/modules/contrib/doc/facerec/facerec_tutorial.html
"""
import fnmatch
import os
import cv2
import numpy as np
import config
import face
MEAN_FILE = 'mean.png'
POSITIVE_EIGENFACE_FILE = 'positive_eigenface.png'
NEGATIVE_EIGENFACE_FILE = 'negative_eigenface.png'
def walk_files(directory, match='*'):
"""Generator function to iterate through all files in a directory recursively
which match the given filename match parameter.
"""
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, match):
yield os.path.join(root, filename)
def prepare_image(filename):
"""Read an image as grayscale and resize it to the appropriate size for
training the face recognition model.
"""
return face.resize(cv2.imread(filename, cv2.IMREAD_GRAYSCALE))
def normalize(X, low, high, dtype=None):
"""Normalizes a given array in X to a value between low and high.
Adapted from python OpenCV face recognition example at:
https://github.com/Itseez/opencv/blob/2.4/samples/python2/facerec_demo.py
"""
X = np.asarray(X)
minX, maxX = np.min(X), np.max(X)
# normalize to [0...1].
X = X - float(minX)
X = X / float((maxX - minX))
# scale to [low...high].
X = X * (high-low)
X = X + low
if dtype is None:
return np.asarray(X)
return np.asarray(X, dtype=dtype)
if __name__ == '__main__':
print ("Reading training images...")
faces = []
labels = []
pos_count = 0
neg_count = 0
# Read all positive images
for filename in walk_files(config.POSITIVE_DIR, '*.pgm'):
path = os.path.basename(filename)
path = path.split("_")[0]
# print path
if(path == "Billy"):
faces.append(prepare_image(filename))
labels.append(config.BILLY_LABEL)
pos_count += 1
if(path == "Mommy"):
faces.append(prepare_image(filename))
labels.append(config.MOMMY_LABEL)
pos_count += 1
if(path == "Cody"):
faces.append(prepare_image(filename))
labels.append(config.CODY_LABEL)
pos_count += 1
if(path == "Casey"):
faces.append(prepare_image(filename))
labels.append(config.CASEY_LABEL)
pos_count += 1
## if(path == "lourdes"):
## faces.append(prepare_image(filename))
## labels.append(config.LOURDES_LABEL)
## pos_count += 1
# if(path == "nadine"):
# faces.append(prepare_image(filename))
# labels.append(config.NADINE_LABEL)
# pos_count += 1
# if(path == "anamaria"):
# faces.append(prepare_image(filename))
# labels.append(config.ANAMARIA_LABEL)
# pos_count += 1
# Read all negative images
for filename in walk_files(config.NEGATIVE_DIR, '*.pgm'):
faces.append(prepare_image(filename))
labels.append(config.NEGATIVE_LABEL)
neg_count += 1
print ('Read', pos_count, 'positive images and', neg_count, 'negative images.')
# Train model
print ('Training model...')
model = cv2.face.createEigenFaceRecognizer()
## model = cv2.createEigenFaceRecognizer()if use OpenCV 2.7
model.train(np.asarray(faces), np.asarray(labels))
# Save model results
model.save(config.TRAINING_FILE)
print ('Training data saved to', config.TRAINING_FILE)
# Save mean and eignface images which summarize the face recognition model.
mean = model.getMean().reshape(faces[0].shape)
## mean = model.getMat("mean").reshape(faces[0].shape)if use OpenCV 2.7
cv2.imwrite(MEAN_FILE, normalize(mean, 0, 255, dtype=np.uint8))
eigenvectors = model.getEigenVectors()
## eigenvectors = model.getMat("eigenvectors")if use OpenCV 2.7
pos_eigenvector = eigenvectors[:,0].reshape(faces[0].shape)
cv2.imwrite(POSITIVE_EIGENFACE_FILE, normalize(pos_eigenvector, 0, 255, dtype=np.uint8))
neg_eigenvector = eigenvectors[:,1].reshape(faces[0].shape)
cv2.imwrite(NEGATIVE_EIGENFACE_FILE, normalize(neg_eigenvector, 0, 255, dtype=np.uint8))
| [
"hxnguyen5@hotmail.com"
] | hxnguyen5@hotmail.com |
24c926e2e0d0f2f86b212f62e5437858fcfe7afd | 8e1b2491d4afc40262b2fefa597ed01af13f529e | /mysite/lab/forms.py | 9dc2e77637cec5890d4bd998a04af2a4e7b1c5d6 | [] | no_license | WB-Solutions/lab | cd952a55bdf3121f08a7cefadf4bf53a04558f71 | 21d20498f4b985ed521214ae1e17aa7b8feb8b2f | refs/heads/master | 2016-08-06T22:09:23.546461 | 2015-08-18T02:26:48 | 2015-08-18T02:26:48 | 22,614,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | from django import forms
from django.core.validators import MinLengthValidator
from django.contrib.auth.forms import UserCreationForm
from .models import *
class UserEditForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
# TODO: this doesn't seem to work. Need to get to the bottom of it.
#self.base_fields["display_name"].min_length = 2
#self.base_fields["display_name"].validators.append(MinLengthValidator)
#print self.base_fields['display_name'].validators
super(forms.ModelForm, self).__init__(*args, **kwargs)
class Meta:
model = User
fields = ('first_name', 'last_name') # 'display_name'
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
def is_valid(self):
#log.info(force_text(self.errors))
return super(UserAdminForm, self).is_valid()
class UserCreateAdminForm(UserCreationForm):
username = forms.CharField(required=False) # ignored / hidden, just to NOT validate as mandatory during save.
class Meta:
model = User
fields = ('email',)
def clean_username(self):
pass
| [
"carlosgalindo@gmail.com"
] | carlosgalindo@gmail.com |
4c507eda6df4eec5409d9ba0f7c5c58dbe4adc2c | a8637de7c6e38c95cd19b46b45b3e00c42ae8140 | /recruitments/forms.py | b4d18a84d6fec828e87868c360f345c9f5ccb4dd | [] | no_license | nishant57/edc | 9c0d3d363882c44bc08dc4da47024e5e83731077 | 5ab9f6dc5d474b5071c7f027cd287c32a9d43501 | refs/heads/master | 2021-01-12T19:31:49.655509 | 2013-02-17T10:00:36 | 2013-02-17T10:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | from django.db import models
from django.forms import ModelForm, Textarea
from django import forms
from constants import *
from recruitments.models import Candidate, Setup
from ckeditor.widgets import CKEditorWidget
from django.core.exceptions import ObjectDoesNotExist
class CandidateForm(forms.ModelForm):
class Meta:
model = Candidate
exclude = ('hash_value', 'blocked', 'setup', 'slot')
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get('email')
try:
s = Setup.objects.get(date_recruitment_ends__gt=datetime.now(), date_recruitment_starts__lt=datetime.now())
Candidate.objects.get(email=email, setup=s)
raise forms.ValidationError('This Email ID has already applied for this recruitment process')
except ObjectDoesNotExist:
pass
return cleaned_data
'''
salutation = forms.CharField(max_length=10,required=True,choices=SALUTATION_CHOICES)
name = forms.CharField(max_length=50,required = True,label='Your name')
email = forms.EmailField(max_length=50,required=True,label='Email Address')
branch = forms.CharField(max_length=50,required=True,choices=BRANCH_CHOICES)
phone = forms.CharField(max_length=15, required=True)
why_edc = forms.TextField(max_length=500,required=True)
other_groups = forms.TextField(max_length=100)
interests = forms.
'''
| [
"axitkhurana@gmail.com"
] | axitkhurana@gmail.com |
a0d9755196bbd5f94ca3bbfa9bdf55549e7221f0 | 63becc7c429c1545f882b857a7a83413b4b9e2e9 | /generate_index.py | 60b09c74d8a97b1a1dc02d3d12b5e0b29153e0fa | [] | no_license | 2100992/pyweb-m3-hw | f43005cc36cdef779701e7a765558ad6c9c960e8 | 4f18083dee5b72c97be63f03370c8f012bdf02a5 | refs/heads/master | 2020-05-19T05:38:43.199068 | 2019-05-07T12:19:24 | 2019-05-07T12:19:24 | 184,854,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | # coding: utf-8
from horoscope import generate_prophecies
from datetime import datetime as dt
def generate_head(title):
head = f'''<head>
<meta charset = "utf-8">
<title>{title}</title>
<head>'''
return head
def generate_body(header, paragraphs, footer):
body = f'<h1>{header}</h1>'
for i in paragraphs:
body = f'''{body}
<p>{i}</p>'''
return f'''<body>
{body}
{footer}
</body>'''
def generate_page(head, body):
page = f'''<html>
{head}
{body}
</html>'''
return page
def generate_footer(link, description):
footer = f'''<hr/>
<a href="{link}">
{description}
</a>'''
return footer
def save_page(title, header, paragraphs, footer, output = 'index.html'):
fp = open(output, 'w', encoding='utf8')
#today = dt.now().date()
page = generate_page(
head = generate_head(title),
body = generate_body(header = header, paragraphs = paragraphs, footer = footer)
)
print(page, file = fp)
fp.close()
def main():
today = dt.now().date()
save_page(
title = 'Гороскоп на сегодня',
header = 'Что день ' + str(today) + ' готовит',
paragraphs = generate_prophecies(total_num=3, num_sentences=4),
footer = generate_footer('about.html', 'О реализации'),
)
main() | [
"2100992@gmail.com"
] | 2100992@gmail.com |
f894d84cf9acdc100224f2e0a31dfbbf34cef837 | aef325e409650c01c5795c9bba100bb956d6bd2c | /config.py | 1caf14b6555e9a359df5544ab6f4f010a4b95c68 | [
"MIT"
] | permissive | AmimoG/Blogging-website- | f25b4d839466b80bcc6f75c143723842271e93d5 | b7fbd9717aee337f1b9be9a6819b4f451b52be3e | refs/heads/master | 2022-12-19T00:25:00.768935 | 2020-09-30T07:19:12 | 2020-09-30T07:19:12 | 298,582,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | import os
class Config:
"""
This is the class which will contain the general configurations
"""
SECRET_KEY = os.environ.get('SECRET_KEY') or 'pluto1'
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
UPLOADED_PHOTOS_DEST = "app/static/photos"
class DevConfig(Config):
"""
This is the class which will contain the development configurations
"""
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:pluto1@localhost/blog'
DEBUG = True
class ProdConfig(Config):
"""
This is the class which will contain the production configurations
"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
"""
This is the class which will contain the test configurations
"""
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:pluto1@localhost/blog'
config_options = {
"development": DevConfig,
"test": TestConfig,
"production": ProdConfig
}
| [
"gilbertmatete6@gmail.com"
] | gilbertmatete6@gmail.com |
c62663faa729ad99eb3f2454501b39fcf2a2c9fd | caaf28c68d37811af440eb6ddd2b575fcae8f5bf | /tests/unit/projects/__init__.py | a4a9d32c6cec5536d82b3c6424ae01d67f3022b3 | [
"MIT"
] | permissive | cjolowicz/cutty | d01d4b5e3f96ebafef4b677fcc336d55b04bd229 | c6b26377153d60d5da825002e03f9a28467378a9 | refs/heads/main | 2023-03-07T01:25:18.721524 | 2022-01-27T14:28:28 | 2022-01-27T14:28:28 | 282,443,616 | 4 | 1 | MIT | 2023-03-06T14:59:29 | 2020-07-25T12:53:33 | Python | UTF-8 | Python | false | false | 37 | py | """Unit tests for cutty.projects."""
| [
"noreply@github.com"
] | cjolowicz.noreply@github.com |
f093b7b3e85ed9d663cb03666fdc60b3785c7dd6 | 240aa5c04062908053226ac28fc46c71d2a43ff4 | /imesupport/subclass.py | a9114e561bac7ec8497856362e628f6e6729aa46 | [] | no_license | tkmusic1976/IMESupport | 67a24253323aa245fa3e6911c356df48c4784cd5 | 1787839ca2fcc0488e03b40b7bdf48889c5729ec | refs/heads/master | 2021-01-16T20:01:22.787003 | 2012-11-07T13:46:14 | 2012-11-08T03:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | # -*- coding: utf-8 -*-
import ctypes
from ctypes.wintypes import HWND, UINT, WPARAM, LPARAM
prototype = ctypes.WINFUNCTYPE(ctypes.c_long, HWND, UINT, WPARAM, LPARAM)
GWL_WNDPROC = (-4)
subclass_map = {} # {HWND: {'orig': ORIGINAL_WINPROC, 'callback': CALLBACK}}
def proc_func(hwnd, msg, wParam, lParam):
try:
if hwnd in subclass_map:
ret = subclass_map[hwnd]['callback'](hwnd, msg, wParam, lParam)
if ret is not None:
return ret
except:
pass
return ctypes.windll.user32.CallWindowProcW(
subclass_map[hwnd]['orig'], hwnd, msg, wParam, lParam)
proc_obj = prototype(proc_func)
def setup(hwnd, callback):
if hwnd not in subclass_map:
proc = ctypes.windll.user32.GetWindowLongW(hwnd, GWL_WNDPROC)
if proc != proc_obj:
ctypes.windll.user32.SetWindowLongW(hwnd, GWL_WNDPROC, proc_obj)
subclass_map[hwnd] = {'orig': proc, 'callback': callback}
else:
assert False # Unexpected
else:
subclass_map[hwnd]['callback'] = callback
def term(hwnd):
if hwnd in subclass_map:
proc = ctypes.windll.user32.GetWindowLongW(hwnd, GWL_WNDPROC)
if proc == proc_obj:
ctypes.windll.user32.SetWindowLongW(hwnd, GWL_WNDPROC, subclass_map[hwnd]['orig'])
else:
assert False # Unexpected
del subclass_map[hwnd]
def test():
# Required pywin32
import win32gui
import win32con
import time
def on_create(hwnd):
def test_callback(hwnd, msg, wParam, lParam):
if msg == win32con.WM_KEYDOWN:
print('Subclased OnKeyDown')
return 0
return None
setup(hwnd, test_callback)
print('after setup', subclass_map)
setup(hwnd, test_callback)
print('after setup', subclass_map)
setup(hwnd, test_callback)
print('after setup', subclass_map)
# Original: http://kb.worldviz.com/articles/791
def OnKeyDown(hwnd, msg, wp, lp):
print('Original OnKeyDown')
def OnClose(hwnd, msg, wparam, lparam):
"""Destroy window when it is closed by user"""
win32gui.DestroyWindow(hwnd)
def OnDestroy(hwnd, msg, wparam, lparam):
"""Quit application when window is destroyed"""
win32gui.PostQuitMessage(0)
#Define message map for window
wndproc = {
win32con.WM_KEYDOWN: OnKeyDown,
win32con.WM_CLOSE: OnClose,
win32con.WM_DESTROY: OnDestroy
}
def CreateWindow(title, message_map, (l, t, r, b)):
"""Create a window with defined title, message map, and rectangle"""
wc = win32gui.WNDCLASS()
wc.lpszClassName = 'test_win32gui_1'
wc.style = win32con.CS_GLOBALCLASS | win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.lpfnWndProc = message_map
win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindow(wc.lpszClassName,
title,
win32con.WS_CAPTION | win32con.WS_VISIBLE | win32con.WS_SYSMENU,
l, t, r, b, 0, 0, 0, None)
on_create(hwnd)
while win32gui.PumpWaitingMessages() == 0:
time.sleep(0.01)
win32gui.UnregisterClass(wc.lpszClassName, None)
#Display sample window
CreateWindow('Pywin32 sample', wndproc, (100, 100, 500, 200))
if __name__ == '__main__':
test()
| [
"chikatoike@gmail.com"
] | chikatoike@gmail.com |
ea026000b6292aaf81ca75b7bc134d1a849290bd | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /build/turtlebot3-melodic-devel/turtlebot3_navigation/catkin_generated/pkg.installspace.context.pc.py | 2314ec5e0e068618e2ccde34da5357db5a46d171 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_navigation"
PROJECT_SPACE_DIR = "/home/sjtuwhl/ROBOTLAB_WS/install"
PROJECT_VERSION = "1.2.2"
| [
"bitwanghaili@gmail.com"
] | bitwanghaili@gmail.com |
6c8f5a37ab42913c4c7e4bf02a25deb471728273 | d655fd5a39c8434f7e25ec5d7ba24b0277ec3281 | /manager/views.py | a3102abf96c4ec7fa0a5637f6dc7e610a7d846f1 | [] | no_license | kan5/dormitoryDjango | 17a599ea337145e8596ec40b359555d68b590218 | df496f1e1eea840748cb93cf1f5198b021980a52 | refs/heads/main | 2023-04-06T09:31:41.713389 | 2021-03-29T10:15:04 | 2021-03-29T10:15:04 | 352,426,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,794 | py | from django.forms import model_to_dict
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import user_passes_test
from django.utils.decorators import method_decorator
from student.models import *
from .forms import StudentForm, RoomsForm
from django.views.generic.edit import UpdateView, CreateView, DeleteView
from django.contrib.auth.models import User, Group
def is_manager(user):
return user.groups.filter(name='manager').exists()
def transliterate(name):
"""
Автор: LarsKort
Дата: 16/07/2011; 1:05 GMT-4;
Не претендую на "хорошесть" словарика. В моем случае и такой пойдет,
вы всегда сможете добавить свои символы и даже слова. Только
это нужно делать в обоих списках, иначе будет ошибка.
"""
# Слоаврь с заменами
slovar = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'yo',
'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n',
'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h',
'ц': 'c', 'ч': 'ch', 'ш': 'sh', 'щ': 'sch', 'ъ': '', 'ы': 'y', 'ь': '', 'э': 'e',
'ю': 'u', 'я': 'ya', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'YO',
'Ж': 'ZH', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N',
'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H',
'Ц': 'C', 'Ч': 'CH', 'Ш': 'SH', 'Щ': 'SCH', 'Ъ': '', 'Ы': 'y', 'Ь': '', 'Э': 'E',
'Ю': 'U', 'Я': 'YA', ',': '', '?': '', ' ': '_', '~': '', '!': '', '@': '', '#': '',
'$': '', '%': '', '^': '', '&': '', '*': '', '(': '', ')': '', '-': '', '=': '', '+': '',
':': '', ';': '', '<': '', '>': '', '\'': '', '"': '', '\\': '', '/': '', '№': '',
'[': '', ']': '', '{': '', '}': '', 'ґ': '', 'ї': '', 'є': '', 'Ґ': 'g', 'Ї': 'i',
'Є': 'e', '—': ''}
# Циклически заменяем все буквы в строке
for key in slovar:
name = name.replace(key, slovar[key])
return name
# Create your views here.
@user_passes_test(is_manager)
def editor(request):
students = Student.objects.all()
data = {
"students": students
}
return render(request, 'manager/editor.html', data)
@user_passes_test(is_manager)
def new_student(request):
error = ''
if request.method == 'POST':
form = StudentForm(request.POST)
if form.is_valid():
inst = form.instance
username = transliterate(f'{inst.surname[0].lower()}{inst.patronymic[0].lower()}{inst.name.lower()}')
password = User.objects.make_random_password()
c = 1
while User.objects.filter(username=username).exists():
username = username[:-1] + str(c)
c += 1
user = User.objects.create_user(username=username, password=password)
user.save()
my_group = Group.objects.get(name='student')
my_group.user_set.add(user)
my_group.save()
inst.user = user
form.save()
return render(request, 'manager/new_student_success.html', {"form": form,
"username": username,
"password": password})
error = 'Форма успешно отправлена!'
error = 'Ошибка заполнения!'
form = StudentForm()
data = {
"form": form,
"error": error,
}
return render(request, 'manager/new_student.html', data)
@user_passes_test(is_manager)
def rooms(request):
roomses = Rooms.objects.order_by('number')
data = {
"rooms": roomses,
}
return render(request, 'manager/rooms.html', data)
class StudentView(UpdateView):
model = Student
template_name = "manager/form.html"
success_url = "/manager/editor"
form_class = StudentForm
@method_decorator(user_passes_test(lambda u: is_manager(u)))
def dispatch(self, *args, **kwargs):
return super(StudentView, self).dispatch(*args, **kwargs)
class StudentDelete(DeleteView):
model = User
template_name = "manager/student_delete.html"
success_url = '/manager/editor'
@method_decorator(user_passes_test(lambda u: is_manager(u)))
def dispatch(self, *args, **kwargs):
return super(StudentDelete, self).dispatch(*args, **kwargs)
class RoomsView(UpdateView):
model = Rooms
template_name = "manager/room_form.html"
success_url = "/manager/rooms"
form_class = RoomsForm
@method_decorator(user_passes_test(lambda u: is_manager(u)))
def dispatch(self, *args, **kwargs):
return super(RoomsView, self).dispatch(*args, **kwargs)
class RoomsCreate(CreateView):
model = Rooms
form_class = RoomsForm
template_name = "manager/room_add.html"
success_url = '/manager/rooms'
@method_decorator(user_passes_test(lambda u: is_manager(u)))
def dispatch(self, *args, **kwargs):
return super(RoomsCreate, self).dispatch(*args, **kwargs)
class RoomsDelete(DeleteView):
model = Rooms
template_name = "manager/room_delete.html"
success_url = '/manager/rooms'
@method_decorator(user_passes_test(lambda u: is_manager(u)))
def dispatch(self, *args, **kwargs):
return super(RoomsDelete, self).dispatch(*args, **kwargs)
| [
"aviator.skid@gmail.com"
] | aviator.skid@gmail.com |
e7118c70a59d6852b6cf2d259be9dde29f0060d1 | 52116128116b228960be86287fbfe5d9059b881b | /virtual/bin/django-admin.py | b93b9c850b00820f402184a17816f6527b5ef266 | [
"MIT"
] | permissive | najma-amin/Instagram | 82d376d66563f02a76af91eaf4a64f448c500615 | 1433e583e65f07df0ab990232b64f7ff59e1517b | refs/heads/master | 2021-09-10T17:17:09.134813 | 2020-03-11T10:05:11 | 2020-03-11T10:05:11 | 245,109,511 | 0 | 0 | MIT | 2021-09-08T01:44:20 | 2020-03-05T08:37:05 | Python | UTF-8 | Python | false | false | 173 | py | #!/home/user/Desktop/Python/Django/Instagram/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"najmaamin10@gmai.com"
] | najmaamin10@gmai.com |
5c71c5dde17a0ffc69632c02cde0ad96d0ca59d5 | 018f1b53befcb178376a62cd8f20c32b723ef0cb | /Неделя 4/Проверка числа на простоту.py | 928936a5da4f646ddcde8c38aaec26202fdb214e | [] | no_license | KiyamovSB/-Python | d8a1b014bb846645e79e4a3aa628158a8aa9fe43 | e0126c10077bb9b17c239de89ae12c7216a6e401 | refs/heads/master | 2022-12-09T17:03:13.580572 | 2020-09-26T20:23:47 | 2020-09-26T20:23:47 | 298,882,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | def IsPrime(n):
i = 2
prime = False
while i <= n ** 0.5:
if n % i == 0:
return prime
i += 1
return not(prime)
n = int(input())
if n == 2 or IsPrime(n):
print('YES')
else:
print('NO')
| [
"noreply@github.com"
] | KiyamovSB.noreply@github.com |
3892f819ab9827f25746acaf5e7ddb23394850ca | f259ca399ab33b5c2e66ae07921711ea5917ac9e | /pytorch/sphereface.py | d6413d43827cf63587f8d89889c96b608aa81521 | [] | no_license | jizhuoran/HyperTea_Maker | 9a7930e1d6af995c8fdb9a15354eea5fc29f0806 | 2c3f8dfcb699495093165cd986eebedfb17a2433 | refs/heads/master | 2020-04-22T19:32:39.385611 | 2019-04-14T15:12:06 | 2019-04-14T15:12:48 | 170,610,900 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # -*- coding: utf-8 -*-
import torch
from sphere20a import sphere20a as Model
from hypertea_generator.hypertea_generator import HyperteaGenerator
model = Model().train()
precision = 'float'
genetator = HyperteaGenerator(model, torch.ones((1, 3, 112, 96), dtype = torch.float), precision)
output = genetator.get_net_output()
inference_code = f'''
void inference( std::vector<{precision}> &data_from_user, std::vector<{precision}> &data_to_user) {{
auto x = DeviceTensor(data_from_user);
x = relu1_1(conv1_1(x))
x = x + relu1_3(conv1_3(relu1_2(conv1_2(x))))
x = relu2_1(conv2_1(x))
x = x + relu2_3(conv2_3(relu2_2(conv2_2(x))))
x = x + relu2_5(conv2_5(relu2_4(conv2_4(x))))
x = relu3_1(conv3_1(x))
x = x + relu3_3(conv3_3(relu3_2(conv3_2(x))))
x = x + relu3_5(conv3_5(relu3_4(conv3_4(x))))
x = x + relu3_7(conv3_7(relu3_6(conv3_6(x))))
x = x + relu3_9(conv3_9(relu3_8(conv3_8(x))))
x = relu4_1(conv4_1(x))
x = x + relu4_3(conv4_3(relu4_2(conv4_2(x))))
x = fc5(x)
x = fc6(x)
x.copy_to_ptr((void*)data_to_user.data());
}}
'''
print(genetator.network_defination(inference_code, 'work_space/new_net'))
| [
"jizr@connect.hku.hk"
] | jizr@connect.hku.hk |
290db7d14991e2b0d9918c41828a8cb1e04f61f1 | 276f99b46fbb67800d240379b0669af4f29c9801 | /lgsmun/dashboards/consumers.py | 937754887988d9418ade5ec0319be173c1e0f6c7 | [
"MIT"
] | permissive | s-malik03/lgsmun | 13ebdbcc323e090a8f455daae05bfc2d466cafff | f673b07166b4518a06301d14c07084519b2a1863 | refs/heads/master | 2023-05-26T15:43:22.204496 | 2021-06-11T10:29:58 | 2021-06-11T10:29:58 | 327,375,483 | 0 | 0 | MIT | 2021-05-24T13:52:45 | 2021-01-06T16:56:38 | HTML | UTF-8 | Python | false | false | 7,543 | py | from channels.generic.websocket import AsyncWebsocketConsumer
import json
from .models import Attendance, CommitteeControl, Notifications, GSL, RSL, Timer, Messages, FloorMods
from asgiref.sync import sync_to_async
import time
from django.db.models import Q
@sync_to_async
def essentialinfo(Committee, Country):
inbox_text = ''
rsl = ''
gsl = ''
list = ''
try:
inbox = Messages.objects.filter(Q(committee=Committee), Q(recipient=Country) | Q(sender=Country))
for i in inbox:
inbox_text = inbox_text + '(' + i.sender + ' to ' + i.recipient + ')' + i.message + '<br>'
except:
pass
try:
att = Attendance.objects.filter(committee=Committee).exclude(status="Absent").order_by('country')
for a in att:
plcrd = a.placard
if plcrd == "Placard Raised":
plcrd = ' <span class="dot"></span>'
list = list + '<div class="btn">' + a.country + plcrd + '</div>'
except:
pass
c = CommitteeControl.objects.get(committee=Committee)
t = Timer.objects.get(committee=Committee)
try:
g = GSL.objects.filter(committee=Committee).order_by('date')
r = RSL.objects.filter(committee=Committee).order_by('date')
for r_ in r:
rsl = rsl + '<div class="btn">' + r_.country + '</div>'
for g_ in g:
gsl = gsl + '<div class="btn">' + g_.country + '</div>'
except:
pass
nlist = ''
try:
n = Notifications.objects.filter(committee=Committee).order_by('-date')
for n_ in n:
nlist = nlist + '(' + n_.date.strftime("%H:%M:%S") + ')' + n_.country + ':' + n_.message + '<br>'
except Exception as e:
pass
modlist = ''
mnum = 1
try:
m = FloorMods.objects.filter(committee=Committee).order_by('date')
for mod in m:
modlist = modlist + str(mnum) + '. ' + mod.mod + '<br>'
mnum = mnum + 1
except:
pass
dict = {
'countrylist': list,
'current_topic': c.topic,
'speaking_mode': c.speaking_mode,
'current_mod': c.current_mod,
'notifications': nlist,
'gsl': gsl,
'rsl': rsl,
'timer_status': t.status,
'timer_duration': t.duration,
'total_time': t.total_time,
'inbox': inbox_text,
'mods': modlist,
'zoom_link': c.zoom_link,
'drive_link': c.drive_link,
'iteration': c.iteration
}
return json.dumps(dict)
@sync_to_async
def essentialinfo_dais(Committee, Country):
inbox_text = ''
rsl = ''
gsl = ''
list = ''
try:
inbox = Messages.objects.filter(Q(committee=Committee), Q(recipient=Country) | Q(sender=Country))
for i in inbox:
inbox_text = inbox_text + '(' + i.sender + ' to ' + i.recipient + ')' + i.message + '<br>'
except:
pass
try:
att = Attendance.objects.filter(committee=Committee).exclude(status="Absent").order_by('country').order_by(
'-placard')
for a in att:
plcrd = a.placard
if plcrd == "Placard Raised":
plcrd = '<span class="dot"></span>'
else:
plcrd = ''
list = list + '<div class="btn">' + a.country + ' | ' + a.status + ' | Recognized: ' + str(
a.recognized) + ' | ' + plcrd + '</div>\n'
except:
pass
c = CommitteeControl.objects.get(committee=Committee)
t = Timer.objects.get(committee=Committee)
try:
g = GSL.objects.filter(committee=Committee).order_by('date')
r = RSL.objects.filter(committee=Committee).order_by('date')
for r_ in r:
rsl = rsl + '<div class="btn">' + r_.country + '</div>'
for g_ in g:
gsl = gsl + '<div class="btn">' + g_.country + '</div>'
except:
pass
nlist = ''
try:
n = Notifications.objects.filter(committee=Committee).order_by('-date')
for n_ in n:
nlist = nlist + '(' + n_.date.strftime("%H:%M:%S") + ')' + n_.country + ':' + n_.message + '<br>'
except Exception as e:
pass
modlist = ''
mnum = 1
try:
m = FloorMods.objects.filter(committee=Committee).order_by('date')
for mod in m:
modlist = modlist + str(mnum) + '. ' + mod.mod + '<br>'
mnum = mnum + 1
except:
pass
dict = {
'countrylist': list,
'current_topic': c.topic,
'speaking_mode': c.speaking_mode,
'current_mod': c.current_mod,
'notifications': nlist,
'gsl': gsl,
'rsl': rsl,
'timer_status': t.status,
'timer_duration': t.duration,
'total_time': t.total_time,
'inbox': inbox_text,
'mods': modlist,
'zoom_link': c.zoom_link,
'drive_link': c.drive_link,
'iteration': c.iteration
}
return json.dumps(dict)
@sync_to_async
def check_iteration(committee, iteration):
committee_iteration = CommitteeControl.objects.get(committee=committee).iteration
if committee_iteration == iteration:
return False
else:
return True
@sync_to_async
def two_cent_time(committee, total_time, speaker_time):
timer = Timer.objects.get(committee=committee)
offset_total = int(timer.total_time) - int(total_time)
offset_speaker = int(timer.duration) - int(speaker_time)
if 1 <= offset_total < 5:
timer.total_time = total_time
if 1 <= offset_speaker < 5:
timer.duration = speaker_time
timer.save()
return 0
@sync_to_async
def goabsent(committee, country):
att = Attendance.objects.get(country=country, committee=committee)
att.status = 'Absent'
c = CommitteeControl.objects.get(committee=committee)
c.iteration += 1
att.save()
c.save()
return 0
class Delegate(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive(self, text_data):
json_data = json.loads(text_data)
iteration = int(json_data['iteration'])
committee = json_data['committee']
country = json_data['country']
self.country = country
self.committee = committee
total_time = json_data['total_time']
speaker_time = json_data['speaker_time']
tct = await two_cent_time(committee, total_time, speaker_time)
iter_test = await check_iteration(committee, iteration)
if iter_test:
einfo = await essentialinfo(committee, country)
await self.send(einfo)
else:
self.send("NULL")
async def disconnect(self, code):
g = await goabsent(self.committee, self.country)
class Dais(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive(self, text_data):
json_data = json.loads(text_data)
iteration = int(json_data['iteration'])
committee = json_data['committee']
country = json_data['country']
total_time = json_data['total_time']
speaker_time = json_data['speaker_time']
tct = await two_cent_time(committee, total_time, speaker_time)
iter_test = await check_iteration(committee, iteration)
if iter_test:
einfo = await essentialinfo(committee, country)
await self.send(einfo)
else:
self.send("NULL")
| [
"safiy.malik@gmail.com"
] | safiy.malik@gmail.com |
a3309f48dc0f1a5cf170079337921110045939e1 | e823bc36af457f229f6879d6e6a3ef6247c129aa | /virtualenv/Lib/site-packages/twisted/conch/test/test_knownhosts.py | fd2cec16c516734dfc889451660e4fb395e172db | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | William-An/DFB_Final | e772fa979c41f2f83a4bf657cde499456215fb3b | 49a9244c98116574676992ebecd1d9435e1d5b1e | refs/heads/master | 2022-11-07T15:47:36.189057 | 2017-07-22T01:01:37 | 2017-07-22T01:01:43 | 97,426,562 | 1 | 1 | MIT | 2022-10-15T02:45:57 | 2017-07-17T02:21:42 | Python | UTF-8 | Python | false | false | 49,361 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.client.knownhosts}.
"""
from __future__ import absolute_import, division
import os
from binascii import Error as BinasciiError, b2a_base64, a2b_base64
from twisted.python.reflect import requireModule
if requireModule('cryptography') and requireModule('pyasn1'):
from twisted.conch.ssh.keys import Key, BadKeyError
from twisted.conch.client.knownhosts import \
PlainEntry, HashedEntry, KnownHostsFile, UnparsedEntry, ConsoleUI
from twisted.conch.client import default
from twisted.conch.test import keydata
else:
skip = "cryptography and PyASN1 required for twisted.conch.knownhosts."
from zope.interface.verify import verifyObject
from twisted.python.filepath import FilePath
from twisted.python.compat import networkString
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.conch.interfaces import IKnownHostEntry
from twisted.conch.error import HostKeyChanged, UserRejectedKey, InvalidEntry
from twisted.test.testutils import ComparisonTestsMixin
sampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAQEAsV0VMRbGmzhqxxayLRHmvnFvtyNqgbNKV46dU1bVFB+3y'
b'tNvue4Riqv/SVkPRNwMb7eWH29SviXaBxUhYyzKkDoNUq3rTNnH1Vnif6d6X4JCrUb5d3W+Dm'
b'YClyJrZ5HgD/hUpdSkTRqdbQ2TrvSAxRacj+vHHT4F4dm1bJSewm3B2D8HVOoi/CbVh3dsIiC'
b'dp8VltdZx4qYVfYe2LwVINCbAa3d3tj9ma7RVfw3OH2Mfb+toLd1N5tBQFb7oqTt2nC6I/6Bd'
b'4JwPUld+IEitw/suElq/AIJVQXXujeyiZlea90HE65U2mF1ytr17HTAIT2ySokJWyuBANGACk'
b'6iIaw==')
otherSampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAIEAwaeCZd3UCuPXhX39+/p9qO028jTF76DMVd9mPvYVDVXuf'
b'WckKZauF7+0b7qm+ChT7kan6BzRVo4++gCVNfAlMzLysSt3ylmOR48tFpAfygg9UCX3DjHz0E'
b'lOOUKh3iifc9aUShD0OPaK3pR5JJ8jfiBfzSYWt/hDi/iZ4igsSs8=')
thirdSampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAQEAl/TQakPkePlnwCBRPitIVUTg6Z8VzN1en+DGkyo/evkmLw'
b'7o4NWR5qbysk9A9jXW332nxnEuAnbcCam9SHe1su1liVfyIK0+3bdn0YRB0sXIbNEtMs2LtCho'
b'/aV3cXPS+Cf1yut3wvIpaRnAzXxuKPCTXQ7/y0IXa8TwkRBH58OJa3RqfQ/NsSp5SAfdsrHyH2'
b'aitiVKm2jfbTKzSEqOQG/zq4J9GXTkq61gZugory/Tvl5/yPgSnOR6C9jVOMHf27ZPoRtyj9SY'
b'343Hd2QHiIE0KPZJEgCynKeWoKz8v6eTSK8n4rBnaqWdp8MnGZK1WGy05MguXbyCDuTC8AmJXQ'
b'==')
ecdsaSampleEncodedKey = (
b'AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIFwh3/zBANyPPIE60'
b'SMMfdKMYo3OvfvzGLZphzuKrzSt0q4uF+/iYqtYiHhryAwU/fDWlUQ9kck9f+IlpsNtY4=')
sampleKey = a2b_base64(sampleEncodedKey)
otherSampleKey = a2b_base64(otherSampleEncodedKey)
thirdSampleKey = a2b_base64(thirdSampleEncodedKey)
ecdsaSampleKey = a2b_base64(ecdsaSampleEncodedKey)
samplePlaintextLine = (
b"www.twistedmatrix.com ssh-rsa " + sampleEncodedKey + b"\n")
otherSamplePlaintextLine = (
b"divmod.com ssh-rsa " + otherSampleEncodedKey + b"\n")
sampleHostIPLine = (
b"www.twistedmatrix.com,198.49.126.131 ssh-rsa " + sampleEncodedKey + b"\n")
sampleHashedLine = (
b"|1|gJbSEPBG9ZSBoZpHNtZBD1bHKBA=|bQv+0Xa0dByrwkA1EB0E7Xop/Fo= ssh-rsa " +
sampleEncodedKey + b"\n")
class EntryTestsMixin:
"""
Tests for implementations of L{IKnownHostEntry}. Subclasses must set the
'entry' attribute to a provider of that interface, the implementation of
that interface under test.
@ivar entry: a provider of L{IKnownHostEntry} with a hostname of
www.twistedmatrix.com and an RSA key of sampleKey.
"""
def test_providesInterface(self):
"""
The given entry should provide IKnownHostEntry.
"""
verifyObject(IKnownHostEntry, self.entry)
def test_fromString(self):
"""
Constructing a plain text entry from an unhashed known_hosts entry will
result in an L{IKnownHostEntry} provider with 'keyString', 'hostname',
and 'keyType' attributes. While outside the interface in question,
these attributes are held in common by L{PlainEntry} and L{HashedEntry}
implementations; other implementations should override this method in
subclasses.
"""
entry = self.entry
self.assertEqual(entry.publicKey, Key.fromString(sampleKey))
self.assertEqual(entry.keyType, b"ssh-rsa")
def test_matchesKey(self):
"""
L{IKnownHostEntry.matchesKey} checks to see if an entry matches a given
SSH key.
"""
twistedmatrixDotCom = Key.fromString(sampleKey)
divmodDotCom = Key.fromString(otherSampleKey)
self.assertEqual(
True,
self.entry.matchesKey(twistedmatrixDotCom))
self.assertEqual(
False,
self.entry.matchesKey(divmodDotCom))
def test_matchesHost(self):
"""
L{IKnownHostEntry.matchesHost} checks to see if an entry matches a
given hostname.
"""
self.assertTrue(self.entry.matchesHost(b"www.twistedmatrix.com"))
self.assertFalse(self.entry.matchesHost(b"www.divmod.com"))
class PlainEntryTests(EntryTestsMixin, TestCase):
"""
Test cases for L{PlainEntry}.
"""
plaintextLine = samplePlaintextLine
hostIPLine = sampleHostIPLine
def setUp(self):
"""
Set 'entry' to a sample plain-text entry with sampleKey as its key.
"""
self.entry = PlainEntry.fromString(self.plaintextLine)
def test_matchesHostIP(self):
"""
A "hostname,ip" formatted line will match both the host and the IP.
"""
self.entry = PlainEntry.fromString(self.hostIPLine)
self.assertTrue(self.entry.matchesHost(b"198.49.126.131"))
self.test_matchesHost()
def test_toString(self):
"""
L{PlainEntry.toString} generates the serialized OpenSSL format string
for the entry, sans newline.
"""
self.assertEqual(self.entry.toString(), self.plaintextLine.rstrip(b"\n"))
multiHostEntry = PlainEntry.fromString(self.hostIPLine)
self.assertEqual(multiHostEntry.toString(),
self.hostIPLine.rstrip(b"\n"))
class PlainTextWithCommentTests(PlainEntryTests):
"""
Test cases for L{PlainEntry} when parsed from a line with a comment.
"""
plaintextLine = samplePlaintextLine[:-1] + b" plain text comment.\n"
hostIPLine = sampleHostIPLine[:-1] + b" text following host/IP line\n"
class HashedEntryTests(EntryTestsMixin, ComparisonTestsMixin, TestCase):
"""
Tests for L{HashedEntry}.
This suite doesn't include any tests for host/IP pairs because hashed
entries store IP addresses the same way as hostnames and does not support
comma-separated lists. (If you hash the IP and host together you can't
tell if you've got the key already for one or the other.)
"""
hashedLine = sampleHashedLine
def setUp(self):
"""
Set 'entry' to a sample hashed entry for twistedmatrix.com with
sampleKey as its key.
"""
self.entry = HashedEntry.fromString(self.hashedLine)
def test_toString(self):
"""
L{HashedEntry.toString} generates the serialized OpenSSL format string
for the entry, sans the newline.
"""
self.assertEqual(self.entry.toString(), self.hashedLine.rstrip(b"\n"))
def test_equality(self):
"""
Two L{HashedEntry} instances compare equal if and only if they represent
the same host and key in exactly the same way: the host salt, host hash,
public key type, public key, and comment fields must all be equal.
"""
hostSalt = b"gJbSEPBG9ZSBoZpHNtZBD1bHKBA"
hostHash = b"bQv+0Xa0dByrwkA1EB0E7Xop/Fo"
publicKey = Key.fromString(sampleKey)
keyType = networkString(publicKey.type())
comment = b"hello, world"
entry = HashedEntry(
hostSalt, hostHash, keyType, publicKey, comment)
duplicate = HashedEntry(
hostSalt, hostHash, keyType, publicKey, comment)
# Vary the host salt
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt[::-1], hostHash, keyType, publicKey,
comment))
# Vary the host hash
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash[::-1], keyType, publicKey,
comment))
# Vary the key type
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType[::-1], publicKey,
comment))
# Vary the key
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType,
Key.fromString(otherSampleKey), comment))
# Vary the comment
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType, publicKey,
comment[::-1]))
class HashedEntryWithCommentTests(HashedEntryTests):
"""
Test cases for L{PlainEntry} when parsed from a line with a comment.
"""
hashedLine = sampleHashedLine[:-1] + b" plain text comment.\n"
class UnparsedEntryTests(TestCase, EntryTestsMixin):
"""
Tests for L{UnparsedEntry}
"""
def setUp(self):
"""
Set up the 'entry' to be an unparsed entry for some random text.
"""
self.entry = UnparsedEntry(b" This is a bogus entry. \n")
def test_fromString(self):
"""
Creating an L{UnparsedEntry} should simply record the string it was
passed.
"""
self.assertEqual(b" This is a bogus entry. \n",
self.entry._string)
def test_matchesHost(self):
"""
An unparsed entry can't match any hosts.
"""
self.assertFalse(self.entry.matchesHost(b"www.twistedmatrix.com"))
def test_matchesKey(self):
"""
An unparsed entry can't match any keys.
"""
self.assertFalse(self.entry.matchesKey(Key.fromString(sampleKey)))
def test_toString(self):
"""
L{UnparsedEntry.toString} returns its input string, sans trailing
newline.
"""
self.assertEqual(b" This is a bogus entry. ", self.entry.toString())
class ParseErrorTests(TestCase):
"""
L{HashedEntry.fromString} and L{PlainEntry.fromString} can raise a variety
of errors depending on misformattings of certain strings. These tests make
sure those errors are caught. Since many of the ways that this can go
wrong are in the lower-level APIs being invoked by the parsing logic,
several of these are integration tests with the C{base64} and
L{twisted.conch.ssh.keys} modules.
"""
def invalidEntryTest(self, cls):
"""
If there are fewer than three elements, C{fromString} should raise
L{InvalidEntry}.
"""
self.assertRaises(InvalidEntry, cls.fromString, b"invalid")
def notBase64Test(self, cls):
"""
If the key is not base64, C{fromString} should raise L{BinasciiError}.
"""
self.assertRaises(BinasciiError, cls.fromString, b"x x x")
def badKeyTest(self, cls, prefix):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{fromString} should raise L{BadKeyError}.
"""
self.assertRaises(BadKeyError, cls.fromString, b' '.join(
[prefix, b"ssh-rsa", b2a_base64(
b"Hey, this isn't an SSH key!").strip()]))
def test_invalidPlainEntry(self):
"""
If there are fewer than three whitespace-separated elements in an
entry, L{PlainEntry.fromString} should raise L{InvalidEntry}.
"""
self.invalidEntryTest(PlainEntry)
def test_invalidHashedEntry(self):
"""
If there are fewer than three whitespace-separated elements in an
entry, or the hostname salt/hash portion has more than two elements,
L{HashedEntry.fromString} should raise L{InvalidEntry}.
"""
self.invalidEntryTest(HashedEntry)
a, b, c = sampleHashedLine.split()
self.assertRaises(InvalidEntry, HashedEntry.fromString, b' '.join(
[a + b"||", b, c]))
def test_plainNotBase64(self):
"""
If the key portion of a plain entry is not decodable as base64,
C{fromString} should raise L{BinasciiError}.
"""
self.notBase64Test(PlainEntry)
def test_hashedNotBase64(self):
"""
If the key, host salt, or host hash portion of a hashed entry is not
encoded, it will raise L{BinasciiError}.
"""
self.notBase64Test(HashedEntry)
a, b, c = sampleHashedLine.split()
# Salt not valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join([b"|1|x|" + b2a_base64(b"stuff").strip(), b, c]))
# Host hash not valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join(
[HashedEntry.MAGIC + b2a_base64(b"stuff").strip() + b"|x",
b, c]))
# Neither salt nor hash valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join([b"|1|x|x", b, c]))
def test_hashedBadKey(self):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{HashedEntry.fromString} should raise L{BadKeyError}.
"""
a, b, c = sampleHashedLine.split()
self.badKeyTest(HashedEntry, a)
def test_plainBadKey(self):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{PlainEntry.fromString} should raise L{BadKeyError}.
"""
self.badKeyTest(PlainEntry, b"hostname")
class KnownHostsDatabaseTests(TestCase):
"""
Tests for L{KnownHostsFile}.
"""
def pathWithContent(self, content):
"""
Return a FilePath with the given initial content.
"""
fp = FilePath(self.mktemp())
fp.setContent(content)
return fp
def loadSampleHostsFile(self, content=(
sampleHashedLine + otherSamplePlaintextLine +
b"\n# That was a blank line.\n"
b"This is just unparseable.\n"
b"|1|This also unparseable.\n")):
"""
Return a sample hosts file, with keys for www.twistedmatrix.com and
divmod.com present.
"""
return KnownHostsFile.fromPath(self.pathWithContent(content))
def test_readOnlySavePath(self):
"""
L{KnownHostsFile.savePath} is read-only; if an assignment is made to
it, L{AttributeError} is raised and the value is unchanged.
"""
path = FilePath(self.mktemp())
new = FilePath(self.mktemp())
hostsFile = KnownHostsFile(path)
self.assertRaises(AttributeError, setattr, hostsFile, "savePath", new)
self.assertEqual(path, hostsFile.savePath)
def test_defaultInitializerIgnoresExisting(self):
"""
The default initializer for L{KnownHostsFile} disregards any existing
contents in the save path.
"""
hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine))
self.assertEqual([], list(hostsFile.iterentries()))
def test_defaultInitializerClobbersExisting(self):
"""
After using the default initializer for L{KnownHostsFile}, the first use
of L{KnownHostsFile.save} overwrites any existing contents in the save
path.
"""
path = self.pathWithContent(sampleHashedLine)
hostsFile = KnownHostsFile(path)
entry = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
hostsFile.save()
# Check KnownHostsFile to see what it thinks the state is
self.assertEqual([entry], list(hostsFile.iterentries()))
# And also directly check the underlying file itself
self.assertEqual(entry.toString() + b"\n", path.getContent())
def test_saveResetsClobberState(self):
"""
After L{KnownHostsFile.save} is used once with an instance initialized
by the default initializer, contents of the save path are respected and
preserved.
"""
hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine))
preSave = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
hostsFile.save()
postSave = hostsFile.addHostKey(
b"another.example.com", Key.fromString(thirdSampleKey))
hostsFile.save()
self.assertEqual([preSave, postSave], list(hostsFile.iterentries()))
def test_loadFromPath(self):
"""
Loading a L{KnownHostsFile} from a path with six entries in it will
result in a L{KnownHostsFile} object with six L{IKnownHostEntry}
providers in it.
"""
hostsFile = self.loadSampleHostsFile()
self.assertEqual(6, len(list(hostsFile.iterentries())))
def test_iterentriesUnsaved(self):
"""
If the save path for a L{KnownHostsFile} does not exist,
L{KnownHostsFile.iterentries} still returns added but unsaved entries.
"""
hostsFile = KnownHostsFile(FilePath(self.mktemp()))
hostsFile.addHostKey(b"www.example.com", Key.fromString(sampleKey))
self.assertEqual(1, len(list(hostsFile.iterentries())))
def test_verifyHashedEntry(self):
"""
Loading a L{KnownHostsFile} from a path containing a single valid
L{HashedEntry} entry will result in a L{KnownHostsFile} object
with one L{IKnownHostEntry} provider.
"""
hostsFile = self.loadSampleHostsFile((sampleHashedLine))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], HashedEntry)
self.assertTrue(entries[0].matchesHost(b"www.twistedmatrix.com"))
self.assertEqual(1, len(entries))
def test_verifyPlainEntry(self):
"""
Loading a L{KnownHostsFile} from a path containing a single valid
L{PlainEntry} entry will result in a L{KnownHostsFile} object
with one L{IKnownHostEntry} provider.
"""
hostsFile = self.loadSampleHostsFile((otherSamplePlaintextLine))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], PlainEntry)
self.assertTrue(entries[0].matchesHost(b"divmod.com"))
self.assertEqual(1, len(entries))
def test_verifyUnparsedEntry(self):
"""
Loading a L{KnownHostsFile} from a path that only contains '\n' will
result in a L{KnownHostsFile} object containing a L{UnparsedEntry}
object.
"""
hostsFile = self.loadSampleHostsFile((b"\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"")
self.assertEqual(1, len(entries))
def test_verifyUnparsedComment(self):
"""
Loading a L{KnownHostsFile} from a path that contains a comment will
result in a L{KnownHostsFile} object containing a L{UnparsedEntry}
object.
"""
hostsFile = self.loadSampleHostsFile((b"# That was a blank line.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"# That was a blank line.")
def test_verifyUnparsableLine(self):
"""
Loading a L{KnownHostsFile} from a path that contains an unparseable
line will be represented as an L{UnparsedEntry} instance.
"""
hostsFile = self.loadSampleHostsFile((b"This is just unparseable.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"This is just unparseable.")
self.assertEqual(1, len(entries))
def test_verifyUnparsableEncryptionMarker(self):
"""
Loading a L{KnownHostsFile} from a path containing an unparseable line
that starts with an encryption marker will be represented as an
L{UnparsedEntry} instance.
"""
hostsFile = self.loadSampleHostsFile((b"|1|This is unparseable.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"|1|This is unparseable.")
self.assertEqual(1, len(entries))
def test_loadNonExistent(self):
"""
Loading a L{KnownHostsFile} from a path that does not exist should
result in an empty L{KnownHostsFile} that will save back to that path.
"""
pn = self.mktemp()
knownHostsFile = KnownHostsFile.fromPath(FilePath(pn))
entries = list(knownHostsFile.iterentries())
self.assertEqual([], entries)
self.assertFalse(FilePath(pn).exists())
knownHostsFile.save()
self.assertTrue(FilePath(pn).exists())
def test_loadNonExistentParent(self):
"""
Loading a L{KnownHostsFile} from a path whose parent directory does not
exist should result in an empty L{KnownHostsFile} that will save back
to that path, creating its parent directory(ies) in the process.
"""
thePath = FilePath(self.mktemp())
knownHostsPath = thePath.child("foo").child(b"known_hosts")
knownHostsFile = KnownHostsFile.fromPath(knownHostsPath)
knownHostsFile.save()
knownHostsPath.restat(False)
self.assertTrue(knownHostsPath.exists())
def test_savingAddsEntry(self):
"""
L{KnownHostsFile.save} will write out a new file with any entries
that have been added.
"""
path = self.pathWithContent(sampleHashedLine +
otherSamplePlaintextLine)
knownHostsFile = KnownHostsFile.fromPath(path)
newEntry = knownHostsFile.addHostKey(b"some.example.com",
Key.fromString(thirdSampleKey))
expectedContent = (
sampleHashedLine +
otherSamplePlaintextLine + HashedEntry.MAGIC +
b2a_base64(newEntry._hostSalt).strip() + b"|" +
b2a_base64(newEntry._hostHash).strip() + b" ssh-rsa " +
thirdSampleEncodedKey + b"\n")
# Sanity check, let's make sure the base64 API being used for the test
# isn't inserting spurious newlines.
self.assertEqual(3, expectedContent.count(b"\n"))
knownHostsFile.save()
self.assertEqual(expectedContent, path.getContent())
def test_savingAvoidsDuplication(self):
"""
L{KnownHostsFile.save} only writes new entries to the save path, not
entries which were added and already written by a previous call to
C{save}.
"""
path = FilePath(self.mktemp())
knownHosts = KnownHostsFile(path)
entry = knownHosts.addHostKey(
b"some.example.com", Key.fromString(sampleKey))
knownHosts.save()
knownHosts.save()
knownHosts = KnownHostsFile.fromPath(path)
self.assertEqual([entry], list(knownHosts.iterentries()))
def test_savingsPreservesExisting(self):
"""
L{KnownHostsFile.save} will not overwrite existing entries in its save
path, even if they were only added after the L{KnownHostsFile} instance
was initialized.
"""
# Start off with one host/key pair in the file
path = self.pathWithContent(sampleHashedLine)
knownHosts = KnownHostsFile.fromPath(path)
# After initializing the KnownHostsFile instance, add a second host/key
# pair to the file directly - without the instance's help or knowledge.
with path.open("a") as hostsFileObj:
hostsFileObj.write(otherSamplePlaintextLine)
# Add a third host/key pair using the KnownHostsFile instance
key = Key.fromString(thirdSampleKey)
knownHosts.addHostKey(b"brandnew.example.com", key)
knownHosts.save()
# Check that all three host/key pairs are present.
knownHosts = KnownHostsFile.fromPath(path)
self.assertEqual([True, True, True], [
knownHosts.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)),
knownHosts.hasHostKey(
b"divmod.com", Key.fromString(otherSampleKey)),
knownHosts.hasHostKey(b"brandnew.example.com", key)])
def test_hasPresentKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given
hostname is present and matches the expected key.
"""
hostsFile = self.loadSampleHostsFile()
self.assertTrue(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)))
def test_notPresentKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{False} when a key for the given
hostname is not present.
"""
hostsFile = self.loadSampleHostsFile()
self.assertFalse(hostsFile.hasHostKey(
b"non-existent.example.com", Key.fromString(sampleKey)))
self.assertTrue(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)))
self.assertFalse(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(ecdsaSampleKey)))
def test_hasLaterAddedKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given
hostname is present in the file, even if it is only added to the file
after the L{KnownHostsFile} instance is initialized.
"""
key = Key.fromString(sampleKey)
entry = PlainEntry([b"brandnew.example.com"], key.sshType(), key, b"")
hostsFile = self.loadSampleHostsFile()
with hostsFile.savePath.open("a") as hostsFileObj:
hostsFileObj.write(entry.toString() + b"\n")
self.assertEqual(
True, hostsFile.hasHostKey(b"brandnew.example.com", key))
def test_savedEntryHasKeyMismatch(self):
"""
L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is
present in the underlying file, but different from the expected one.
The resulting exception should have an C{offendingEntry} indicating the
given entry.
"""
hostsFile = self.loadSampleHostsFile()
entries = list(hostsFile.iterentries())
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
self.assertEqual(exception.offendingEntry, entries[0])
self.assertEqual(exception.lineno, 1)
self.assertEqual(exception.path, hostsFile.savePath)
def test_savedEntryAfterAddHasKeyMismatch(self):
"""
Even after a new entry has been added in memory but not yet saved, the
L{HostKeyChanged} exception raised by L{KnownHostsFile.hasHostKey} has a
C{lineno} attribute which indicates the 1-based line number of the
offending entry in the underlying file when the given host key does not
match the expected host key.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
self.assertEqual(exception.lineno, 1)
self.assertEqual(exception.path, hostsFile.savePath)
def test_unsavedEntryHasKeyMismatch(self):
"""
L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is
present in memory (but not yet saved), but different from the expected
one. The resulting exception has a C{offendingEntry} indicating the
given entry, but no filename or line number information (reflecting the
fact that the entry exists only in memory).
"""
hostsFile = KnownHostsFile(FilePath(self.mktemp()))
entry = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.example.com", Key.fromString(thirdSampleKey))
self.assertEqual(exception.offendingEntry, entry)
self.assertIsNone(exception.lineno)
self.assertIsNone(exception.path)
def test_addHostKey(self):
"""
L{KnownHostsFile.addHostKey} adds a new L{HashedEntry} to the host
file, and returns it.
"""
hostsFile = self.loadSampleHostsFile()
aKey = Key.fromString(thirdSampleKey)
self.assertEqual(False,
hostsFile.hasHostKey(b"somewhere.example.com", aKey))
newEntry = hostsFile.addHostKey(b"somewhere.example.com", aKey)
# The code in OpenSSH requires host salts to be 20 characters long.
# This is the required length of a SHA-1 HMAC hash, so it's just a
# sanity check.
self.assertEqual(20, len(newEntry._hostSalt))
self.assertEqual(True,
newEntry.matchesHost(b"somewhere.example.com"))
self.assertEqual(newEntry.keyType, b"ssh-rsa")
self.assertEqual(aKey, newEntry.publicKey)
self.assertEqual(True,
hostsFile.hasHostKey(b"somewhere.example.com", aKey))
def test_randomSalts(self):
"""
L{KnownHostsFile.addHostKey} generates a random salt for each new key,
so subsequent salts will be different.
"""
hostsFile = self.loadSampleHostsFile()
aKey = Key.fromString(thirdSampleKey)
self.assertNotEqual(
hostsFile.addHostKey(b"somewhere.example.com", aKey)._hostSalt,
hostsFile.addHostKey(b"somewhere-else.example.com", aKey)._hostSalt)
def test_verifyValidKey(self):
"""
Verifying a valid key should return a L{Deferred} which fires with
True.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(b"1.2.3.4", Key.fromString(sampleKey))
ui = FakeUI()
d = hostsFile.verifyHostKey(ui, b"www.twistedmatrix.com", b"1.2.3.4",
Key.fromString(sampleKey))
l = []
d.addCallback(l.append)
self.assertEqual(l, [True])
def test_verifyInvalidKey(self):
"""
Verifying an invalid key should return a L{Deferred} which fires with a
L{HostKeyChanged} failure.
"""
hostsFile = self.loadSampleHostsFile()
wrongKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
hostsFile.addHostKey(b"1.2.3.4", Key.fromString(sampleKey))
d = hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"1.2.3.4", wrongKey)
return self.assertFailure(d, HostKeyChanged)
def verifyNonPresentKey(self):
"""
Set up a test to verify a key that isn't present. Return a 3-tuple of
the UI, a list set up to collect the result of the verifyHostKey call,
and the sample L{KnownHostsFile} being used.
This utility method avoids returning a L{Deferred}, and records results
in the returned list instead, because the events which get generated
here are pre-recorded in the 'ui' object. If the L{Deferred} in
question does not fire, the it will fail quickly with an empty list.
"""
hostsFile = self.loadSampleHostsFile()
absentKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
l = []
d = hostsFile.verifyHostKey(
ui, b"sample-host.example.com", b"4.3.2.1", absentKey)
d.addBoth(l.append)
self.assertEqual([], l)
self.assertEqual(
ui.promptText,
b"The authenticity of host 'sample-host.example.com (4.3.2.1)' "
b"can't be established.\n"
b"RSA key fingerprint is "
b"SHA256:mS7mDBGhewdzJkaKRkx+wMjUdZb/GzvgcdoYjX5Js9I=.\n"
b"Are you sure you want to continue connecting (yes/no)? ")
return ui, l, hostsFile
def test_verifyNonPresentKey_Yes(self):
"""
Verifying a key where neither the hostname nor the IP are present
should result in the UI being prompted with a message explaining as
much. If the UI says yes, the Deferred should fire with True.
"""
ui, l, knownHostsFile = self.verifyNonPresentKey()
ui.promptDeferred.callback(True)
self.assertEqual([True], l)
reloaded = KnownHostsFile.fromPath(knownHostsFile.savePath)
self.assertEqual(
True,
reloaded.hasHostKey(b"4.3.2.1", Key.fromString(thirdSampleKey)))
self.assertEqual(
True,
reloaded.hasHostKey(b"sample-host.example.com",
Key.fromString(thirdSampleKey)))
def test_verifyNonPresentKey_No(self):
"""
Verifying a key where neither the hostname nor the IP are present
should result in the UI being prompted with a message explaining as
much. If the UI says no, the Deferred should fail with
UserRejectedKey.
"""
ui, l, knownHostsFile = self.verifyNonPresentKey()
ui.promptDeferred.callback(False)
l[0].trap(UserRejectedKey)
def test_verifyNonPresentECKey(self):
"""
Set up a test to verify an ECDSA key that isn't present.
Return a 3-tuple of the UI, a list set up to collect the result
of the verifyHostKey call, and the sample L{KnownHostsFile} being used.
"""
ecObj = Key._fromECComponents(
x=keydata.ECDatanistp256['x'],
y=keydata.ECDatanistp256['y'],
privateValue=keydata.ECDatanistp256['privateValue'],
curve=keydata.ECDatanistp256['curve']
)
hostsFile = self.loadSampleHostsFile()
ui = FakeUI()
l = []
d = hostsFile.verifyHostKey(
ui, b"sample-host.example.com", b"4.3.2.1", ecObj)
d.addBoth(l.append)
self.assertEqual([], l)
self.assertEqual(
ui.promptText,
b"The authenticity of host 'sample-host.example.com (4.3.2.1)' "
b"can't be established.\n"
b"ECDSA key fingerprint is "
b"SHA256:fJnSpgCcYoYYsaBbnWj1YBghGh/QTDgfe4w4U5M5tEo=.\n"
b"Are you sure you want to continue connecting (yes/no)? ")
def test_verifyHostIPMismatch(self):
"""
Verifying a key where the host is present (and correct), but the IP is
present and different, should result the deferred firing in a
HostKeyChanged failure.
"""
hostsFile = self.loadSampleHostsFile()
wrongKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
d = hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"4.3.2.1", wrongKey)
return self.assertFailure(d, HostKeyChanged)
def test_verifyKeyForHostAndIP(self):
"""
Verifying a key where the hostname is present but the IP is not should
result in the key being added for the IP and the user being warned
about the change.
"""
ui = FakeUI()
hostsFile = self.loadSampleHostsFile()
expectedKey = Key.fromString(sampleKey)
hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"5.4.3.2", expectedKey)
self.assertEqual(
True, KnownHostsFile.fromPath(hostsFile.savePath).hasHostKey(
b"5.4.3.2", expectedKey))
self.assertEqual(
["Warning: Permanently added the RSA host key for IP address "
"'5.4.3.2' to the list of known hosts."],
ui.userWarnings)
def test_getHostKeyAlgorithms(self):
"""
For a given host, get the host key algorithms for that
host in the known_hosts file.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
hostsFile.addHostKey(
b"www.twistedmatrix.com", Key.fromString(ecdsaSampleKey))
hostsFile.save()
options = {}
options['known-hosts'] = hostsFile.savePath.path
algorithms = default.getHostKeyAlgorithms(
b"www.twistedmatrix.com", options)
expectedAlgorithms = [b'ssh-rsa', b'ecdsa-sha2-nistp256']
self.assertEqual(algorithms, expectedAlgorithms)
class FakeFile(object):
"""
A fake file-like object that acts enough like a file for
L{ConsoleUI.prompt}.
"""
def __init__(self):
self.inlines = []
self.outchunks = []
self.closed = False
def readline(self):
"""
Return a line from the 'inlines' list.
"""
return self.inlines.pop(0)
def write(self, chunk):
"""
Append the given item to the 'outchunks' list.
"""
if self.closed:
raise IOError("the file was closed")
self.outchunks.append(chunk)
def close(self):
"""
Set the 'closed' flag to True, explicitly marking that it has been
closed.
"""
self.closed = True
class ConsoleUITests(TestCase):
"""
Test cases for L{ConsoleUI}.
"""
def setUp(self):
"""
Create a L{ConsoleUI} pointed at a L{FakeFile}.
"""
self.fakeFile = FakeFile()
self.ui = ConsoleUI(self.openFile)
def openFile(self):
"""
Return the current fake file.
"""
return self.fakeFile
def newFile(self, lines):
"""
Create a new fake file (the next file that self.ui will open) with the
given list of lines to be returned from readline().
"""
self.fakeFile = FakeFile()
self.fakeFile.inlines = lines
def test_promptYes(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is 'yes', then it returns a L{Deferred} that fires with
True.
"""
for okYes in [b'yes', b'Yes', b'yes\n']:
self.newFile([okYes])
l = []
self.ui.prompt("Hello, world!").addCallback(l.append)
self.assertEqual(["Hello, world!"], self.fakeFile.outchunks)
self.assertEqual([True], l)
self.assertTrue(self.fakeFile.closed)
def test_promptNo(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is 'no', then it returns a L{Deferred} that fires with
False.
"""
for okNo in [b'no', b'No', b'no\n']:
self.newFile([okNo])
l = []
self.ui.prompt("Goodbye, world!").addCallback(l.append)
self.assertEqual(["Goodbye, world!"], self.fakeFile.outchunks)
self.assertEqual([False], l)
self.assertTrue(self.fakeFile.closed)
def test_promptRepeatedly(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is neither 'yes' nor 'no', then it says "Please enter
'yes' or 'no'" until it gets a 'yes' or a 'no', at which point it
returns a Deferred that answers either True or False.
"""
self.newFile([b'what', b'uh', b'okay', b'yes'])
l = []
self.ui.prompt(b"Please say something useful.").addCallback(l.append)
self.assertEqual([True], l)
self.assertEqual(self.fakeFile.outchunks,
[b"Please say something useful."] +
[b"Please type 'yes' or 'no': "] * 3)
self.assertTrue(self.fakeFile.closed)
self.newFile([b'blah', b'stuff', b'feh', b'no'])
l = []
self.ui.prompt(b"Please say something negative.").addCallback(l.append)
self.assertEqual([False], l)
self.assertEqual(self.fakeFile.outchunks,
[b"Please say something negative."] +
[b"Please type 'yes' or 'no': "] * 3)
self.assertTrue(self.fakeFile.closed)
def test_promptOpenFailed(self):
"""
If the C{opener} passed to L{ConsoleUI} raises an exception, that
exception will fail the L{Deferred} returned from L{ConsoleUI.prompt}.
"""
def raiseIt():
raise IOError()
ui = ConsoleUI(raiseIt)
d = ui.prompt("This is a test.")
return self.assertFailure(d, IOError)
def test_warn(self):
"""
L{ConsoleUI.warn} should output a message to the console object.
"""
self.ui.warn("Test message.")
self.assertEqual(["Test message."], self.fakeFile.outchunks)
self.assertTrue(self.fakeFile.closed)
def test_warnOpenFailed(self):
"""
L{ConsoleUI.warn} should log a traceback if the output can't be opened.
"""
def raiseIt():
1 / 0
ui = ConsoleUI(raiseIt)
ui.warn("This message never makes it.")
self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)
class FakeUI(object):
"""
A fake UI object, adhering to the interface expected by
L{KnownHostsFile.verifyHostKey}
@ivar userWarnings: inputs provided to 'warn'.
@ivar promptDeferred: last result returned from 'prompt'.
@ivar promptText: the last input provided to 'prompt'.
"""
def __init__(self):
self.userWarnings = []
self.promptDeferred = None
self.promptText = None
def prompt(self, text):
"""
Issue the user an interactive prompt, which they can accept or deny.
"""
self.promptText = text
self.promptDeferred = Deferred()
return self.promptDeferred
def warn(self, text):
"""
Issue a non-interactive warning to the user.
"""
self.userWarnings.append(text)
class FakeObject(object):
"""
A fake object that can have some attributes. Used to fake
L{SSHClientTransport} and L{SSHClientFactory}.
"""
class DefaultAPITests(TestCase):
"""
The API in L{twisted.conch.client.default.verifyHostKey} is the integration
point between the code in the rest of conch and L{KnownHostsFile}.
"""
def patchedOpen(self, fname, mode):
"""
The patched version of 'open'; this returns a L{FakeFile} that the
instantiated L{ConsoleUI} can use.
"""
self.assertEqual(fname, "/dev/tty")
self.assertEqual(mode, "r+b")
return self.fakeFile
def setUp(self):
"""
Patch 'open' in verifyHostKey.
"""
self.fakeFile = FakeFile()
self.patch(default, "_open", self.patchedOpen)
self.hostsOption = self.mktemp()
self.hashedEntries = {}
knownHostsFile = KnownHostsFile(FilePath(self.hostsOption))
for host in (b"exists.example.com", b"4.3.2.1"):
entry = knownHostsFile.addHostKey(host, Key.fromString(sampleKey))
self.hashedEntries[host] = entry
knownHostsFile.save()
self.fakeTransport = FakeObject()
self.fakeTransport.factory = FakeObject()
self.options = self.fakeTransport.factory.options = {
'host': b"exists.example.com",
'known-hosts': self.hostsOption
}
def test_verifyOKKey(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fires with
C{1} when passed a host, IP, and key which already match the
known_hosts file it is supposed to check.
"""
l = []
default.verifyHostKey(self.fakeTransport, b"4.3.2.1", sampleKey,
b"I don't care.").addCallback(l.append)
self.assertEqual([1], l)
def replaceHome(self, tempHome):
"""
Replace the HOME environment variable until the end of the current
test, with the given new home-directory, so that L{os.path.expanduser}
will yield controllable, predictable results.
@param tempHome: the pathname to replace the HOME variable with.
@type tempHome: L{str}
"""
oldHome = os.environ.get('HOME')
def cleanupHome():
if oldHome is None:
del os.environ['HOME']
else:
os.environ['HOME'] = oldHome
self.addCleanup(cleanupHome)
os.environ['HOME'] = tempHome
def test_noKnownHostsOption(self):
"""
L{default.verifyHostKey} should find your known_hosts file in
~/.ssh/known_hosts if you don't specify one explicitly on the command
line.
"""
l = []
tmpdir = self.mktemp()
oldHostsOption = self.hostsOption
hostsNonOption = FilePath(tmpdir).child(".ssh").child("known_hosts")
hostsNonOption.parent().makedirs()
FilePath(oldHostsOption).moveTo(hostsNonOption)
self.replaceHome(tmpdir)
self.options['known-hosts'] = None
default.verifyHostKey(self.fakeTransport, b"4.3.2.1", sampleKey,
b"I don't care.").addCallback(l.append)
self.assertEqual([1], l)
def test_verifyHostButNotIP(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fires with
C{1} when passed a host which matches with an IP is not present in its
known_hosts file, and should also warn the user that it has added the
IP address.
"""
l = []
default.verifyHostKey(self.fakeTransport, b"8.7.6.5", sampleKey,
b"Fingerprint not required.").addCallback(l.append)
self.assertEqual(
["Warning: Permanently added the RSA host key for IP address "
"'8.7.6.5' to the list of known hosts."],
self.fakeFile.outchunks)
self.assertEqual([1], l)
knownHostsFile = KnownHostsFile.fromPath(FilePath(self.hostsOption))
self.assertTrue(knownHostsFile.hasHostKey(b"8.7.6.5",
Key.fromString(sampleKey)))
def test_verifyQuestion(self):
"""
L{default.verifyHostKey} should return a L{Default} which fires with
C{0} when passed an unknown host that the user refuses to acknowledge.
"""
self.fakeTransport.factory.options['host'] = b'fake.example.com'
self.fakeFile.inlines.append(b"no")
d = default.verifyHostKey(
self.fakeTransport, b"9.8.7.6", otherSampleKey,
b"No fingerprint!")
self.assertEqual(
[b"The authenticity of host 'fake.example.com (9.8.7.6)' "
b"can't be established.\n"
b"RSA key fingerprint is "
b"SHA256:vD0YydsNIUYJa7yLZl3tIL8h0vZvQ8G+HPG7JLmQV0s=.\n"
b"Are you sure you want to continue connecting (yes/no)? "],
self.fakeFile.outchunks)
return self.assertFailure(d, UserRejectedKey)
def test_verifyBadKey(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fails with
L{HostKeyChanged} if the host key is incorrect.
"""
d = default.verifyHostKey(
self.fakeTransport, b"4.3.2.1", otherSampleKey,
"Again, not required.")
return self.assertFailure(d, HostKeyChanged)
def test_inKnownHosts(self):
"""
L{default.isInKnownHosts} should return C{1} when a host with a key
is in the known hosts file.
"""
host = self.hashedEntries[b"4.3.2.1"].toString().split()[0]
r = default.isInKnownHosts(
host, Key.fromString(sampleKey).blob(),
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(1, r)
def test_notInKnownHosts(self):
"""
L{default.isInKnownHosts} should return C{0} when a host with a key
is not in the known hosts file.
"""
r = default.isInKnownHosts(
"not.there", b"irrelevant",
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(0, r)
def test_inKnownHostsKeyChanged(self):
"""
L{default.isInKnownHosts} should return C{2} when a host with a key
other than the given one is in the known hosts file.
"""
host = self.hashedEntries[b"4.3.2.1"].toString().split()[0]
r = default.isInKnownHosts(
host, Key.fromString(otherSampleKey).blob(),
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(2, r)
| [
"China_Aisa@live.com"
] | China_Aisa@live.com |
cb34ecf455869089c28b4136af48cd0efcd38d7c | 1a0f235eac43fd2aee565d4558026ce807ef64f3 | /hieu.py | 32cea649adbb3da3ccb8110b93451152018bd1a4 | [] | no_license | hiltoncybrigde/newlaravel | ddefae603788d88b020025c0f9ec91847794afbc | 95c656022250777a5ff55dd5fb8ce98342fe0c8d | refs/heads/master | 2022-10-05T07:28:09.647553 | 2020-06-09T03:18:20 | 2020-06-09T03:18:20 | 270,887,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | import subprocess
import os
you =input("what you want bruh??? ")
robot_do = "cd /var/www"
if "make folder" in you:
subprocess.Popen(robot_do, shell=True, stdout=subprocess.PIPE).stdout.read()
robot_brain = "nice \"First commit\""
print (robot_brain)
project_name = input("what yours project name bruh??? ")
robot_do_2 = "cd "+project_name
path = '../'+project_name
if os.path.exists(path):
getVersion = subprocess.Popen(robot_do, shell=True, stdout=subprocess.PIPE).stdout.read()
folder_name = input("what do you want yours folder name bruh??? ")
robot_do_3 = "mkdir ../"+project_name+"/"+folder_name
subprocess.Popen(robot_do_3, shell=True, stdout=subprocess.PIPE).stdout.read()
else:
folder_name = input("what do you want yours new folder name bruh??? ")
robot_do_3 = "mkdir ../"+folder_name
subprocess.Popen(robot_do_3, shell=True, stdout=subprocess.PIPE).stdout.read()
robot_brain = "done yours new folder is "+folder_name
print (robot_brain)
elif "git" in you:
subprocess.Popen(robot_do, shell=True, stdout=subprocess.PIPE).stdout.read()
robot_brain = "nice"
print (robot_brain)
project_name = input("what yours project name bruh??? ")
robot_do_2 = "cd ../"+project_name
path = '../'+project_name
if os.path.exists(path):
git_already = input("git yet?[y/n] ")
if git_already == "n":
robot_do_3 = "cd ../"+project_name+"\n"+"git add ."+"\n"+"git commit -m \"irst commit\""+"git push -u origin master"+"\n"
subprocess.Popen(robot_do_3, shell=True, stdout=subprocess.PIPE).stdout.read()
robot_brain = "done"
print (robot_brain)
elif you == "":
robot_brain = "bye bro"
| [
"hilton@asia.cybridge.jp"
] | hilton@asia.cybridge.jp |
48fd33e7df5a1383c24df241c2e1d5375fe9846b | bca58bb1294ea3ec9ea4ebb11b06f6e5d34ce7a3 | /backend/missingink/textreader.py | fc48476a23f3c6637be5604865947f137fa9ef10 | [] | no_license | PythonTutor-us/missing-ink | 67ab440269a199468beb61d32932ae3da3ef5e72 | d08615e5a9f8dfb899b8388462c550282ca6990a | refs/heads/master | 2020-04-07T01:11:12.441772 | 2018-11-17T18:22:05 | 2018-11-17T18:22:05 | 157,933,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,805 | py | if False:
input = "This is my test input string. This is my test input string. This is another test input string to work on."
wordlist = []
wordlist = input.split(" ")
print(wordlist)
countlist = ["1"]
wordindexlist = ["abcd"]
for i in range(0,len(wordlist)-1):
valuei = wordlist[i]
try:
index = wordindexlist.index(valuei)
except ValueError as e:
index = -1
if index == -1:
countlist.append(1)
wordindexlist.append(valuei)
else:
countlist[index] += 1
print(countlist)
from collections import defaultdict
from spellchecker import SpellChecker
import nltk
import json
input = "Thiss is my test inpaut string. This is myy test input string. This is another amaz ing test inp ut string to work onn.".lower()
input_tokenized = nltk.word_tokenize(input)
#print(input_tokenized)
word_count = defaultdict(lambda: 0)
#for index, element in enumerate(input_tokenized):
# print(index, element)
for i in range(0,len(input_tokenized)):
word_count[input_tokenized[i]] += 1
#rint(word_count)
word_map = {}
for key, value in word_count.items():
if value in word_map:
word_map[value].append(key)
else:
word_map[value] = [key]
#print(word_map)
#print(len(word_count))
#print(sum(word_count.values()))
#print(word_count["this"])
#print(word_map[1])
# Spell Checking: will create a new list of words "input_corrected" that is identical to input_tokenized, but corrected
input_corrected = input_tokenized.copy()
spell = SpellChecker()
corrections_map = {}
for i in range(len(input_corrected)):
if len(spell.unknown([input_corrected[i]])) != 0:
corrections_map[i] = next(iter(spell.unknown([input_corrected[i]])))
#print(corrections_map)
#Below will merge 2 words if they spell a word together
merge_map = {}
for key, value in corrections_map.items():
if key != len(input_corrected)-1:
if len(spell.unknown([value + input_corrected[key+1]])) == 0:
merge_map[key] = value + input_corrected[key + 1]
if key != 0:
if len(spell.unknown([input_corrected[key-1] + value])) == 0:
merge_map[key-1] = input_corrected[key-1] + value
for key in merge_map:
input_corrected[key] = input_corrected[key] + input_corrected[key + 1]
input_corrected[key+1] = ""
if key in corrections_map:
corrections_map.pop(key)
if key+1 in corrections_map:
corrections_map.pop(key+1)
#print(corrections_map)
for key, value in corrections_map.items():
corrections_map[key] = spell.correction(value)
for key, value in corrections_map.items():
input_corrected[key] = corrections_map[key]
i=0
while i < len(input_corrected):
if input_corrected[i] == "":
input_corrected.pop(i)
i +=1
#print(input_corrected)
#text = 'This is a table. We should table this offer. The table is in the center.'
#text = nltk.word_tokenize(text)
grammer_input = nltk.pos_tag(input_corrected)
grammer_map = {}
grammer_map2 = {}
grammer_map_help = {"NN":"Noun","NNS":"Noun","NNP":"Noun","NNPS":"Noun","PRP":"Noun","JJ":"Adjective","JJR":"Adjective","JJS":"Adjective","VBD":"Verb","VBG":"Verb","VBN":"Verb"}
for i in range(len(grammer_input)):
grammer_map[grammer_input[i][0]] = grammer_input[i][1]
for key, value in grammer_map.items():
if value in grammer_map_help:
grammer_map2[key] = grammer_map_help[value]
#print(grammer_map2)
#if value in word_map:
# appearance_map[value].append(key)
#else:
# appearance_map[value] = [key]
#result = [i for i in result if i[0].lower() == 'table']
print(word_count)
print(word_map)
print(corrections_map)
print(input_corrected)
print(grammer_map2)
json.dumps(word_count)
json.dumps(word_map)
json.dumps(corrections_map)
json.dumps(input_corrected)
json.dumps(grammer_map2)
'''
misspelled = spell.unknown(['something', 'is', 'hapenning', 'here'])
for word in misspelled:
# Get the one `most likely` answer
print(spell.correction(word))
# Get a list of `likely` options
print(spell.candidates(word))
'''
'''
for key, value in wordmap.items():
if value in word_map:
appearance_map[value].append(key)
else:
appearance_map[value] = [key]
'''
'''
word_map = {}
appearance_map = {}
input_tokenized.sort()
unique_element = set(input_tokenized)
print(unique_element)
for index, element in enumerate(input_tokenized):
if element in unique_element:
if element in appearance_map.keys():
appearance_map[element] += 1
else:
appearance_map[element] = 1
#print(appearance_map)
for key, value in appearance_map.items():
if value in word_map:
word_map[value].append(key)
else:
word_map[value] = [key]
print(word_map)
'''
#for key in wordmap:
# print (key, wordmap[key])
'''
wordcountlist = []
maxcount = 0
for key in wordmap:
maxcount = max(wordmap[key],maxcount)
for i in range(maxcount+1):
wordcountlist.append([])
for key in wordmap:
wordcountlist[wordmap[key]].append(key)
for i in range(maxcount+1):
wordcountlist[i].sort()
'''
# wordcountmap = {}
# maxcount = 0
# for key in wordmap:
# maxcount = max(wordmap[key],maxcount)
# for i in range(maxcount+1):
# wordcountmap[i] = []
# for key in wordmap:
# wordcountmap[wordmap[key]].append(key)
#
# for key in range(maxcount+1):
# wordcountmap[key].sort()
#
#
#
#
#
# print(wordcountmap)
'''
wordlist = []
i=0
input += " "
while i < len(input)-1:
if input[i:i+1] != " ":
j=1
while input[i+j:i+j+1] != " ":
j+=1
wordlist.append(input[i:i+j])
i += j
else:
i +=1
'''
#wordlist = []
#wordlist = input.split(" ")
| [
"jonahmerrell@gmail.com"
] | jonahmerrell@gmail.com |
82e75749c330f7681d206b65485e46c59d36fd2e | 0088ff708d0e32d24cefe82c958f39ecd31c4871 | /Ngo_website/run.py | 735fef98610ed15cbd12bbca693944aa68b2611d | [] | no_license | Nemilshah1999/NGO | 1c93c44f143cdde76cf6b55c3b4a63a8125d4906 | e1a55b2af96414ab3d493432720a7c9ed795c534 | refs/heads/master | 2020-03-29T18:44:14.365092 | 2019-03-04T14:19:54 | 2019-03-04T14:19:54 | 150,228,777 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | from simple import app
if __name__=='__main__':
app.debug=True
app.run()
| [
"noreply@github.com"
] | Nemilshah1999.noreply@github.com |
c777bda236d45c8da3df02608b87dfb8bf4feb11 | d48f49361680f3bc9db7cd2c2fa87730a19241a6 | /World/wsgi.py | 528abf609adca58a612db623a32e1565df893abd | [] | no_license | rocklikereeju00/Avijit-s-Site.github.io | e24d7c1a682a4a4ab39003802ff2a0597ca3b613 | feda24c7f3b1e96112129e0458b5ee6dc8e5c9bc | refs/heads/main | 2023-08-01T11:38:21.578267 | 2021-09-09T14:32:34 | 2021-09-09T14:32:34 | 398,778,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for World project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'World.settings')
application = get_wsgi_application()
| [
"77102472+rocklikereeju00@users.noreply.github.com"
] | 77102472+rocklikereeju00@users.noreply.github.com |
4fdd0aff70c6e5d48b238a75b5e44117d63fa8bf | 45ffac84a30fb00d8f350b6a0c8e547c755ff762 | /Fundamentals/XOs.py | 6221b200bb33225fe9b0ee076c9420f2f0d714cc | [] | no_license | All-I-Do-Is-Wynn/Python-Codes | 46324dab9e65edab2476dbae3ee8cbfb46528bed | f00810dc87c07e032352ce4f5fec341a179c3d2a | refs/heads/master | 2022-12-23T01:44:18.957728 | 2022-12-18T06:40:25 | 2022-12-18T06:40:25 | 160,409,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # Check to see if a string has the same amount of 'x's and 'o's.
# The method must return a boolean and be case insensitive.
# The string can contain any char.
def xo(s):
countx = 0
counto = 0
if not s:
return True
for char in s:
if char == 'x' or char == 'X':
countx += 1
elif char == 'o' or char == 'O':
counto += 1
if countx == counto:
return True
else:
return False | [
"36213318+All-I-Do-Is-Wynn@users.noreply.github.com"
] | 36213318+All-I-Do-Is-Wynn@users.noreply.github.com |
e9dfb0e3bcc9bd274fa48b51fe6060bd14ae10b0 | a6281073aaddf903d13d903e01ef8f6597e0c366 | /RPWR/lookup/urls.py | 5e482fdb24bd35e8a3820306e06da0aa9fab213c | [] | no_license | pronob1010/D152-Recipe-provider-with-Redis | 9c92be028bef4260a26b876084fde6aa51662ea6 | 970b5f98da7e5e35de9fe8b9642d64e89daff809 | refs/heads/main | 2023-06-23T18:21:42.697646 | 2021-07-25T11:57:01 | 2021-07-25T11:57:01 | 389,307,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django.urls import path
from . views import *
urlpatterns = [
path('', index, name="index"),
path('details/<int:pk>', details, name="details" )
]
| [
"pronobmozumder.info@gmail.com"
] | pronobmozumder.info@gmail.com |
051bdc1380d45c157e13f653fd4f88437038a031 | 97c08a5e9fbc49bdd43ae4943d9d788a6fa99c6a | /fuseq/collection.py | 4d3469617d2735c859a7dd18cb85391252acdef9 | [
"MIT"
] | permissive | kinkalow/fuseq | aecfd5a9e32fe127bdb71ee0ec341c8cb4d42fbd | 020c359881acb7971a350da16f1b687760dfa78e | refs/heads/main | 2023-06-03T14:49:28.596818 | 2021-06-23T06:59:05 | 2021-06-23T08:18:19 | 353,524,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,099 | py | import csv
import glob
import multiprocessing
import os
from fuseq.timer import Timer
from fuseq.base import Base
class Collection(Base):
def __init__(self, params):
super().__init__()
self.params = params
self.input_dir = f'{os.path.dirname(params.work_dir)}/input'
self.mf_path = f'{self.input_dir}/fusion.txt'
self.star_dir = f'{self.input_dir}/{os.path.basename(params.inputs["star_dir"])}'
self.out_file = self.files['coll']
def __create_symlinks(self):
# Create input directory
os.makedirs(self.input_dir, exist_ok=True)
# Remove files
if os.path.lexists(self.mf_path):
os.remove(self.mf_path)
if os.path.lexists(self.star_dir):
os.remove(self.star_dir)
# Create star symbolic file
os.symlink(self.params.inputs['star_dir'], self.star_dir)
# Create fusion symbolic file or new file
if not self.params.mf_lines:
os.symlink(self.params.inputs['mf_path'], self.mf_path)
else:
# Extract specified lines from a fusion file and write them
idx = 0
with open(self.params.inputs['mf_path'], 'r') as fr:
with open(self.mf_path, 'w') as fw:
for i, row in enumerate(fr, start=1):
if i == self.params.mf_lines[idx]:
fw.write(row)
idx += 1
if idx > len(self.params.mf_lines) - 1:
break
def __get_breakinfo(self):
chrs = list(map(lambda x: str(x), range(23))) + list('XY')
breakinfo = []
with open(self.mf_path, 'r') as f_mf:
# Skip header line
f_mf.readline()
# Main lines
linenr = 1
for row_mf in f_mf:
# Extract chrs and bps from merge_fusionfusion file
linenr += 1
row_mf = row_mf.rstrip('\n').split('\t')
if row_mf[1] not in chrs or row_mf[4] not in chrs:
continue
[sample, chr1, bp1, strand1, chr2, bp2, strand2] = row_mf[0:7]
[gene1, junc1, gene2, junc2] = row_mf[8:12]
breakinfo.append({
'linenr': linenr, 'sample': sample,
'chr1': chr1, 'bp1': bp1, 'strand1': strand1, 'gene1': gene1, 'junc1': junc1,
'chr2': chr2, 'bp2': bp2, 'strand2': strand2, 'gene2': gene2, 'junc2': junc2})
return breakinfo
def __create_script(self, breakinfo):
# Commands for filtering
readname_filt_cmd = \
f'[ "$readname" != \'{self.params.readname_filt}\' ] && continue' \
if self.params.readname_filt else ''
seq_filt_cmd = \
f'[ "$seq" != \'{self.params.seq_filt}\' ] && continue' \
if self.params.seq_filt else ''
# Commands
cmd_head = '#!/bin/bash\n\nset -eu\n\n'
cmd_main = '''\
chr1='{chr1}'
chr2='{chr2}'
bp1='{bp1}'
bp2='{bp2}'
jun_path='{jun_path}'
sam_path="${{jun_path%\\.*}}.sam"
out_path='{out_path}'
touch "$out_path"
cnt='0'
for readname in $(cat "$jun_path" | awk '{{ \\
if ( ($1 == "'$chr1'" && $2 == "'$bp1'" && $4 == "'$chr2'" && $5 == "'$bp2'") || \\
($1 == "'$chr2'" && $2 == "'$bp2'" && $4 == "'$chr1'" && $5 == "'$bp1'") \\
) print $10 }}'); do
{readname_filt_cmd}
seqs=$(grep "^$readname" "$sam_path" | awk '{{ if ($7 != "=" && $9 == 0 && $15 != "XS:A:+") print $10 }}')
[ -z "$seqs" ] && continue
for seq in $seqs; do
{seq_filt_cmd}
cnt=$((cnt+1))
printf ">{linenr}-${{cnt}}_$readname\\n$seq\\n" >> "$out_path"
done
done
\n
'''
line_cnt = len(breakinfo)
n_parallels = min(line_cnt, self.params.num_coll_parallels)
width = len(str(n_parallels))
# Determine the number of lines each process
lines_each_proc = line_cnt // n_parallels
n_plus1 = line_cnt - lines_each_proc * n_parallels
if n_plus1 == 0:
heads = [i * lines_each_proc for i in range(n_parallels + 1)]
else:
plus1lines_each_proc = lines_each_proc + 1
total_plus1lines = plus1lines_each_proc * n_plus1
n_plus0 = n_parallels - n_plus1
heads = \
[i * plus1lines_each_proc for i in range(n_plus1)] + \
[total_plus1lines + i * lines_each_proc for i in range(n_plus0 + 1)]
jun_dic = {}
out_paths = []
for i, (head, tail) in enumerate(zip(heads, heads[1:])):
out_path = f'{self.params.swork_dir}/{self.out_file}{str(i+1).zfill(width)}'
script_path = f'{out_path}.sh'
out_paths.append(out_path)
with open(script_path, 'w') as f:
f.write(cmd_head.format(out_path=out_path))
for d in breakinfo[head:tail]:
[linenr, sample, chr1, bp1, strand1, _, _,
chr2, bp2, strand2, _, _] = d.values()
if sample not in jun_dic:
jun_dic[sample] = glob.glob(f'{self.star_dir}/{sample}/*.junction')[0]
jun_path = jun_dic[sample]
bp1_arng = str(int(bp1) + 1) if strand1 == '+' else str(int(bp1) - 1)
bp2_arng = str(int(bp2) + 1) if strand2 == '+' else str(int(bp2) - 1)
cmd = cmd_main.format(linenr=linenr, chr1=chr1, bp1=bp1_arng, chr2=chr2, bp2=bp2_arng,
jun_path=jun_path, out_path=out_path,
readname_filt_cmd=readname_filt_cmd, seq_filt_cmd=seq_filt_cmd)
f.write(cmd)
os.chmod(script_path, 0o0755)
return out_paths
def __task(self, i_proc, out_path, errs, rcs):
cmd = f'bash {out_path}.sh'
_, err, rc = self._run_cmd(cmd, 'collection', ignore_err=True)
errs[i_proc] = err
rcs[i_proc] = rc
def __collect(self, breakinfo):
"""Collect data for Blat input"""
# Create scripts
out_paths = self.__create_script(breakinfo)
n_parallels = len(out_paths)
# Run scripts
if self.params.on_shirokane:
width = len(str(n_parallels))
coll_path = out_paths[0][:-width]
cmd = '''\
#!/usr/local/bin/nosh
#$ -S /usr/local/bin/nosh
#$ -cwd
#$ -l s_vmem=4G,mem_req=4G
#$ -e {coll_path}.log
#$ -o {coll_path}.log
id=$(printf "%0{width}d" ${{SGE_TASK_ID}})
bash {coll_path}${{id}}.sh
'''.format(coll_path=coll_path, width=width)
script_path = f'{coll_path}.sh'
self._run_cmd_on_uge(cmd, script_path, n_parallels, 'collection_uge')
else:
manager = multiprocessing.Manager()
errs = manager.dict()
rcs = manager.dict()
jobs = []
for i in range(n_parallels):
p = multiprocessing.Process(
target=self.__task,
args=(i, out_paths[i], errs, rcs))
jobs.append(p)
p.start()
for job in jobs:
job.join()
# Check return codes
has_err = False
for i in range(n_parallels):
if rcs[i] != 0:
print('[Error] Return code is not 0 at collection script')
print(f'err: {errs[i]}, rc: {rcs[i]}')
has_err = True
if has_err:
exit(1)
return out_paths
def __add_count_to(self, breakinfo):
coll_path = f'{self.params.work_dir}/{self.out_file}'
cnts = [0] * len(breakinfo)
with open(coll_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
prev_cnt = 0
tgt_linenr = 2 # first line is header line
for row in reader:
sp = row[0].split('_')[0].split('-') # row[0]=2-1_READNAME => sp=[2,1]
cur_linenr = int(sp[0][1:]) # sp[0][0] = '>'
cur_cnt = int(sp[1])
if cur_linenr != tgt_linenr:
cnts[tgt_linenr - 2] = prev_cnt
tgt_linenr = cur_linenr
prev_cnt = cur_cnt
next(reader) # sequence data
cnts[tgt_linenr - 2] = prev_cnt
for i, cnt in enumerate(cnts):
breakinfo[i]['cnt'] = cnt
def __concat(self, inp_paths):
inp_files = ' '.join([os.path.basename(path) for path in inp_paths])
cmd = '''\
#!/bin/bash
set -eu
cd {swork_dir}
cat {inp_files} > ../{out_file}
'''.format(swork_dir=self.params.swork_dir, inp_files=inp_files, out_file=self.out_file)
self._run_cmd(cmd, 'cat_coll_files')
@Timer('collection')
def run(self):
self.__create_symlinks()
breakinfo = self.__get_breakinfo()
coll_out_paths = self.__collect(breakinfo)
self.__concat(coll_out_paths)
self.__add_count_to(breakinfo)
return breakinfo
| [
"kinkalow90@gmail.com"
] | kinkalow90@gmail.com |
0d45c01ddfaa1d5ccdef1a57b1f65d272f41a670 | d4604dbbed7eecc47662e0690f13a2047b3a283f | /models/test.py | 697a84bef5b5e05a7c3dd7d154ea04177007ebcd | [] | no_license | iamswann/Vocabulary | 7757f89789e9484b123b7e13c3225423cf17d996 | 1338e8bcbfb69b80bbc8415e1f6394d0b9872d57 | refs/heads/main | 2023-03-31T04:17:24.843866 | 2021-04-09T06:04:55 | 2021-04-09T06:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class test(models.Model):
_name = 'vocabulary.test'
_description = 'Model kiểm tra'
name = fields.Char('Tên')
quiz = fields.One2many('vocabulary.quiz', string='Câu hỏi', inverse_name='test_id')
word_id = fields.Many2one('vocabulary.words', string='Từ vựng')
quiz_count = fields.Integer(compute='_count_quiz', string='Số câu hỏi')
@api.depends('quiz')
def _count_quiz(self):
for record in self:
record.quiz_count = len(record.quiz)
| [
"ngquan569@gmail.com"
] | ngquan569@gmail.com |
d5b6070866f6f4dc00662100e340c931bfb8608c | f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7 | /htdocs/submissions/d5b6070866f6f4dc00662100e340c931bfb8608c.py | a1b97dd53dd2bbaee95485fe137f502923b7d1af | [] | no_license | pycontest/pycontest.github.io | ed365ebafc5be5d610ff9d97001240289de697ad | 606015cad16170014c41e335b1f69dc86250fb24 | refs/heads/master | 2021-01-10T04:47:46.713713 | 2016-02-01T11:03:46 | 2016-02-01T11:03:46 | 50,828,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | seven_seg=lambda x:"\n".join("".join(' |'[b/4&1]+' _'[b&2]+' |'[b&1]for b in[i>>3*int(e)for e in x])for i in[306775170,1060861645,524130191])+'\n' | [
"info@pycontest.net"
] | info@pycontest.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.