blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d245c164d82570270462eb60a5a767d451d575f | 2bea6f65b6532589b05e64c4d87875f2521e3d08 | /Solver.py | be5231b21b1545281358020eb98e03d98fb2adbd | [] | no_license | JochemBruijninckx/supply-chain-analytics-2 | 21a64d47cbf4ed9531cb6a7010b45848547f45b0 | e3dd8c0c821f3debccba0afcd4f7e0b1d154f6c7 | refs/heads/master | 2023-08-09T03:34:37.971660 | 2021-03-28T14:00:20 | 2021-03-28T14:00:20 | 339,403,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,747 | py | import math
import copy
import time
from Model import Model
def solve(problem, settings=None, bounds=None):
# Create regular model
model = Model(problem, settings, bounds=bounds)
model.write(problem.instance_name)
model.solve(problem.instance_name)
# Load the solution into our problem object
problem.read_solution(problem.instance_name)
return problem
def heuristic(problem, settings, create_initial_solution=True):
time_used = []
# Step 1 - Create or load initial solution.
# --------------------------------------------------------------------------------------
start_time = time.time()
print()
if create_initial_solution:
print('Step 1 | Creating initial solution')
print('-' * 70)
# Create relaxed version of the model and solve it
relaxed_model = Model(problem, {
'all_links_open': True,
'non_integer_trucks': True,
'linear_backlog_approx': False,
'perfect_delivery': True
}, surpress_logs=settings['step_1']['surpress_gurobi'])
relaxed_model.write(problem.instance_name + '_relaxed')
relaxed_model.solve(problem.instance_name + '_relaxed', {
'gap': settings['step_1']['epsilon']
})
else:
print('Step 1 | Loading initial solution')
print('-' * 70)
# Load the solution into our problem object
problem.read_solution(problem.instance_name + '_relaxed')
problem.display()
end_time = time.time()
time_used.append(end_time - start_time)
# Step 2 - Mass link dropping (all low capacity links are removed if improvement found)
# --------------------------------------------------------------------------------------
start_time = time.time()
current_objective = problem.compute_objective()
original_problem = copy.deepcopy(problem)
# Try mass link dropping
print()
print('Step 2 | Mass link dropping (current objective', str(round(current_objective, 2)) + ')')
print('-' * 70)
start_capacity = settings['step_2']['start_capacity']
capacity_step = settings['step_2']['capacity_step']
current_capacity = start_capacity
while current_capacity >= 0:
step = round((start_capacity - current_capacity) / capacity_step)
# Create alternative problem in which all low-capacity links are dropped
alternative_problem = copy.deepcopy(original_problem)
drop_links(alternative_problem, current_capacity)
# Set lower bounds on the capacity of all remaining links equal to their current value
v_bounds = get_v_bounds(alternative_problem, method='exact')
# Construct and solve the alternative model
alternative_model = Model(alternative_problem, {
'non_integer_trucks': True,
'linear_backlog_approx': True
}, {'v': v_bounds}, surpress_logs=True, parameters=settings['model_parameters'])
alternative_objective = alternative_model.solve(problem.instance_name + '_alternative', {
'bound': current_objective
})
# If the solution to the alternative model is an improvement, use it as new starting point (skip to Step 3)
if alternative_objective < current_objective:
print('(' + str(step + 1) + '/' + str(round(start_capacity / capacity_step) + 1) + ')',
'| Found improvement by dropping all links with capacity <', current_capacity)
current_objective = alternative_objective
problem = alternative_problem
problem.read_solution(problem.instance_name + '_alternative')
print('New objective |', round(current_objective, 2))
break
else:
print('(' + str(step + 1) + '/' + str(round(start_capacity / capacity_step) + 1) + ')',
'| Rejected dropping all links with capacity <', current_capacity)
current_capacity -= capacity_step
end_time = time.time()
time_used.append(end_time - start_time)
problem.display()
# Step 3 - Dropping individual links
# --------------------------------------------------------------------------------------
start_time = time.time()
found_improvement = True
iteration = 0
print()
print('Step 3 | Dropping individual links (current objective', str(round(current_objective, 2)) + ')')
print('-' * 70)
rejected_links = set()
while found_improvement:
iteration += 1
found_improvement = False
print('Iteration', iteration, '|')
# Initialize best link/problem for this iteration
best_dropped_link = None
start_objective = current_objective
alternative_problem = copy.deepcopy(problem)
sorted_links = get_utilization_costs(alternative_problem)
for (link_index, dropped_link) in enumerate(sorted_links):
rejection_reason = ''
if dropped_link in rejected_links:
rejection_reason = '(Does not need to be reevaluated)'
alternative_objective = math.inf
else:
# Construct a v_bounds object that will limit our allowed choices of capacity
v_bounds = get_v_bounds(alternative_problem, method='exact')
v_bounds[dropped_link] = {'lb': 0, 'ub': 0}
alternative_links = get_alternative_links(alternative_problem, dropped_link[1], dropped_link)
# If this link is our only link to a customer, reject dropping it by default
if alternative_links == [] and dropped_link[1] in alternative_problem.C:
rejection_reason = '(Only route to customer)'
alternative_objective = math.inf
else:
for alternative_link in alternative_links:
v_bounds[alternative_link].pop('ub')
# Construct alternative model using the previously constructed v_bounds and solve it
alternative_model = Model(alternative_problem, {
'non_integer_trucks': True,
'linear_backlog_approx': True
}, {'v': v_bounds}, surpress_logs=True, parameters=settings['model_parameters'])
alternative_objective = alternative_model.solve(problem.instance_name + '_alternative', {
'bound': start_objective
})
# Check if the alternative capacity procurement leads to an objective improvement
if alternative_objective < start_objective:
# Dropping this link is an improvement compared to last iteration
found_improvement = True
if alternative_objective < current_objective:
# Dropping this link is the best improvement so far
current_objective = alternative_objective
best_dropped_link = dropped_link
problem.read_solution(problem.instance_name + '_alternative')
# If we are going to check the full list, simply note that this is the best so far
if settings['step_3']['check_full_list']:
print('(' + str(link_index + 1) + '/' + str(len(sorted_links)) + ')',
'| Current best improvement by dropping link', dropped_link,
round(alternative_objective, 2))
# If we run a greedy approach, immediately break to end this iteration
else:
print('(' + str(link_index + 1) + '/' + str(len(sorted_links)) + ')',
'| Found improvement by dropping link', dropped_link)
break
else:
# Dropping this link is an improvement, but not the best one in this iteration
print('(' + str(link_index + 1) + '/' + str(len(sorted_links)) + ')',
'| Found improvement by dropping link', dropped_link, round(alternative_objective, 2))
else:
# Dropping this link is not an improvement compared to last iteration, it is therefore rejected.
print('(' + str(link_index + 1) + '/' + str(len(sorted_links)) + ')',
'| Rejected dropping link', dropped_link, rejection_reason)
# Store the rejected link
rejected_links.add(dropped_link)
if best_dropped_link is not None:
print('Dropped link |', best_dropped_link)
print('New objective |', round(current_objective, 2))
print('-' * 70)
# Drop selected link from problem
drop_link(problem, best_dropped_link)
# Remove links from rejected set that we now want to re-evaluate
connected_links = set()
connected_links = connected_links.union(get_connected_links(problem, best_dropped_link[0])[1])
connected_links = connected_links.union(get_connected_links(problem, best_dropped_link[1])[1])
rejected_links = rejected_links - connected_links
end_time = time.time()
time_used.append(end_time - start_time)
problem.display()
# Step 4 - Converting to integer solution
# --------------------------------------------------------------------------------------
start_time = time.time()
print()
print('Step 4 | Converting to integer solution, finalizing operational decisions')
print('-' * 70)
# Construct bounds to be used in reduced problem
bounds = {
'v': get_v_bounds(problem, method='integer_round_up')
}
# Create reduced, non-relaxed model
reduced_model = Model(problem, {
'linear_backlog_approx': True
}, bounds=bounds, surpress_logs=settings['step_4']['surpress_gurobi'], parameters=settings['model_parameters'])
reduced_model.solve(problem.instance_name, {
'gap': settings['step_4']['epsilon']
})
# Load the feasible solution into our problem object
original_problem.read_solution(problem.instance_name)
end_time = time.time()
time_used.append(end_time - start_time)
# Log used time
print('Time overview:')
print('-' * 70)
for i, t in enumerate(time_used):
print('Time for step', i + 1, '| Time spent:', str(round(t, 2)) + 's')
print('-' * 70)
print('Total time |', str(round(sum(time_used), 2)) + 's')
print('-' * 70)
return original_problem
def drop_link(problem, link):
problem.links.remove(link)
for t in problem.T:
problem.link_time.remove((link[0], link[1], t))
for p in problem.P:
problem.link_product_time.remove((link[0], link[1], p, t))
def drop_links(problem, maximum_capacity=0.0):
unused_links = [link for link in problem.links if problem.solution['v'][link] <= maximum_capacity]
# Remove unused links form all relevant sets
for link in unused_links:
problem.links.remove(link)
for t in problem.T:
problem.link_time.remove((link[0], link[1], t))
for p in problem.P:
problem.link_product_time.remove((link[0], link[1], p, t))
return unused_links
def get_v_bounds(problem, method='integer'):
v_bounds = {}
if method == 'all_zero':
v_bounds = {(i, j): {'lb': 0,
'ub': 0}
for (i, j) in problem.links}
elif method == 'exact':
v_bounds = {(i, j): {'lb': problem.solution['v'][(i, j)],
'ub': problem.solution['v'][(i, j)]}
for (i, j) in problem.links}
elif method == 'integer':
v_bounds = {(i, j): {'lb': math.floor(problem.solution['v'][(i, j)]),
'ub': math.ceil(problem.solution['v'][(i, j)])}
for (i, j) in problem.links}
elif method == 'exact_lower_bounds':
v_bounds = {(i, j): {'lb': problem.solution['v'][(i, j)]}
for (i, j) in problem.links}
elif method == 'integer_round_up':
v_bounds = {(i, j): {'lb': math.ceil(problem.solution['v'][(i, j)]),
'ub': math.ceil(problem.solution['v'][(i, j)])}
for (i, j) in problem.links}
return v_bounds
def get_utilization_costs(problem):
utilization_costs = {}
for link in problem.links:
if link in problem.solution['v']:
v = problem.solution['v'][link]
if v > 0:
link_costs = problem.opening_cost[link] + problem.capacity_cost[link] * v
link_utilization = sum([problem.product_volume[p] * problem.solution['x'][link[0], link[1], p, str(t)]
for p in problem.P for t in problem.T])
if link_utilization == 0:
utilization_costs[link] = math.inf
else:
utilization_costs[link] = link_costs / link_utilization
utilization_costs = dict(sorted(utilization_costs.items(), key=lambda item: -item[1]))
return utilization_costs
def get_alternative_links(problem, destination, dropped_link, alternative_links=None):
if alternative_links is None:
alternative_links = []
new_links = []
for i in problem.S_and_D:
if (i, destination) not in alternative_links:
if (i, destination) in problem.links and problem.solution['v'][(i, destination)] > 0 \
and (i, destination) != dropped_link:
new_links += [(i, destination)]
alternative_links += new_links
for new_link in new_links:
alternative_links = get_alternative_links(problem, new_link[0], dropped_link, alternative_links)
return alternative_links
def get_connected_links(problem, start_node, connected_nodes=None, connected_links=None):
if connected_links is None:
connected_links = set()
if connected_nodes is None:
connected_nodes = set()
connected_nodes.add(start_node)
for node in problem.S + problem.D + problem.C:
if node not in connected_nodes and node != start_node:
for link in [(start_node, node), (node, start_node)]:
if link in problem.solution['v'].keys():
connected_nodes.add(node)
connected_links.add(link)
if node in problem.D:
extra_nodes, extra_links = get_connected_links(problem, node, connected_nodes, connected_links)
connected_nodes = connected_nodes.union(extra_nodes)
connected_links = connected_links.union(extra_links)
return connected_nodes, connected_links
| [
"jochembruijninckx@gmail.com"
] | jochembruijninckx@gmail.com |
55b461faccac5bf670cf7329c54c33c214927f2a | fa08e70e023946c1e818017beb46d695e0553c49 | /utils/dataset.py | 7f84b3198c21081c7d19ac3b394abeac7316f4fa | [] | no_license | rhythmcao/slu-dual-learning | f9d5b2403a358aed1d494134252eef764037fe92 | daee32d9cc03610bb2e2ed06e8259d36e923cfcc | refs/heads/master | 2023-04-11T03:57:10.799918 | 2021-04-01T06:27:47 | 2021-04-01T06:27:47 | 352,605,195 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | #coding=utf8
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.constants import DATAPATH
from utils.example import *
import numpy as np
def read_dataset(dataset='atis', choice='train'):
assert choice in ['train', 'valid', 'test']
assert dataset in ['atis', 'snips']
filepath = DATAPATH(dataset, choice)
def read_dataset_from_filepath(filepath):
dataset = []
with open(filepath, 'r') as infile:
for line in infile:
line = line.strip()
if line == '': continue
sentence, intents = line.split(' <=> ')
chunks = map(lambda item: item.split(':'), filter(lambda item: ':' in item, sentence.split(' ')))
words, bios = zip(*map(lambda item: (':'.join(item[:-1]), item[-1]), chunks))
words = map(lambda item: '_' if item == '' else item, words)
dataset.append(Example(words, bios, intents))
return dataset
return read_dataset_from_filepath(filepath)
def split_dataset(dataset, split_ratio=1.0):
split_seed = 999
assert split_ratio >= 0. and split_ratio <= 1.0
index = np.arange(len(dataset))
state = np.random.get_state()
np.random.seed(split_seed)
np.random.shuffle(index)
np.random.set_state(state)
splt = int(len(dataset) * split_ratio)
first = [dataset[idx] for idx in index[:splt]]
second = [dataset[idx] for idx in index[splt:]]
return first, second | [
"ruishengcao@gmail.com"
] | ruishengcao@gmail.com |
23397448d1fe6599e575a43d4155512a93975142 | d9f6894acb9bc7f86e218fdec9f55d131889f4c3 | /env/bin/gunicorn_paster | d6d38ef658a6cd5087a558e75a0a1ab97881df4e | [] | no_license | Marckhz/easycredit | 9f2fbc678c14a2fb6b2f972a6041b5aa6bf90a3b | bc67ad796ee7d3b5c1a93e0eaa4a907211ad9644 | refs/heads/master | 2020-03-26T20:39:14.767302 | 2018-08-20T00:35:49 | 2018-08-20T00:35:49 | 145,337,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/marco/ConCredito/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"marcohdes94i@gmail.com"
] | marcohdes94i@gmail.com | |
6a07e41eadd5843c9f9ea52799678337cdcbf8b2 | 891ee5786d43c148674a1a691b0f8203f8d1b007 | /day02/solution.py | ca9f8b09f3336af1c4de57bf10c6b4f3be7b3273 | [] | no_license | bwdvolde/advent-of-code-2020 | 537d665d7bac766c75978a8faca337f2978fbc0d | afca0cd30d4c289e64d48fdd5263073f47a0b882 | refs/heads/main | 2023-02-07T07:03:22.117138 | 2020-12-25T08:55:25 | 2020-12-25T08:55:25 | 316,995,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import re
from read_file.read_file import read_file
class Entry:
def __init__(self, lower_bound, upper_bound, letter, password):
self.lower_bound = int(lower_bound)
self.upper_bound = int(upper_bound)
self.letter = letter
self.password = password
def is_valid_part_1(entry):
count = sum(1 for c in entry.password if c == entry.letter)
return entry.lower_bound <= count <= entry.upper_bound
def is_valid_part_2(entry):
count = sum(1 for i in [entry.lower_bound - 1, entry.upper_bound - 1] if entry.password[i] == entry.letter)
return count == 1
if __name__ == '__main__':
lines = read_file("input.txt")
entries = []
for line in lines:
if line != "":
match = re.match("(.*)-(.*) (.): (.*)", line)
entry = Entry(*match.groups())
# print(entry.letter)
entries.append(entry)
part_1_answer = sum(is_valid_part_1(entry) for entry in entries)
print(f"Part 1: {part_1_answer}")
part_2_answer = sum(is_valid_part_2(entry) for entry in entries)
print(f"Part 2: {part_2_answer}")
| [
"devolder.billie96@gmail.com"
] | devolder.billie96@gmail.com |
d0877718d33c3a193b412968aad59094e9d5eef6 | 4e6113103b89e55a0b44e8a6a7a638d80a75c8cf | /build/lib/riot_api/api/post/tournament_v4.py | 53af310cde18320b7721a8d2763fe2c6d696ed07 | [
"MIT"
] | permissive | Alex-Weatherhead/riot_api | 236eca3586ab4f3ca74cb61f46a6e9b5207c0dd6 | 2d589f57cd46e0f7c54de29245078c730acd710f | refs/heads/master | 2020-05-01T17:08:34.087151 | 2019-04-03T01:53:22 | 2019-04-03T01:53:22 | 177,592,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | from . import endpoints
from ...api import _request_executor
def codes (service_platform,
api_key,
tournament_id,
spectator_type,
team_size,
pick_type,
map_type,
allowed_summoner_ids=None,
count=None,
metadata=None):
""" Create a mock tournament code for the given tournament.
References:
https://developer.riotgames.com/regional-endpoints.html
https://developer.riotgames.com/api-methods/#tournament-v4/POST_createTournamentCode
Arguments:
service_platform (str): The service platform that the request should be issued to.
tournament_code (str): The tournament code to update.
api_key (str): The client's api key.
spectator_type (str): The spectator type. Supported values include "NONE", "LOBBYONLY", and "ALL".
pick_type (str): The pick type. Supported values include "BLIND_PICK", "DRAFT_MODE", "ALL_RANDOM", and "TOURNAMENT_DRAFT".
map_type (str): The map type. Supported values include "SUMMONERS_RIFT", "TWISTED_TREELINE", and "HOWLING_ABYSS".
allow_summoner_ids (str): A list of encrypted summonerIds eligible to join the lobby. (default [])
count (int): The number of codes to create (maximum 1000).
metadata (str): Used to denote any custom information about the game.
Returns:
dict: the details of the response to the issued http request.
"""
header_parameters = {
"X-Riot-Token": api_key
}
query_parameters = {
"count": count,
"tournamentId": tournament_id
}
body_parameters = {
"spectatorType": spectator_type,
"teamSize": team_size,
"pickType": pick_type,
"allowedSummonerIds": allowed_summoner_ids,
"mapType": map_type,
"metadata": metadata
}
url = endpoints.v4["host"]["endpoint"].format(service_platform)
path = endpoints.v4["tournament"]["codes"]["endpoint"]
return _request_executor.post("".join([url, path]),
header_parameters=header_parameters,
query_parameters=query_parameters,
body_parameters=body_parameters)
def providers (service_platform,
api_key,
region,
url):
""" Creates a tournament provider and returns its ID.
References:
https://developer.riotgames.com/regional-endpoints.html
https://developer.riotgames.com/api-methods/#tournament-v4/POST_registerProviderData
Arguments:
service_platform (str): The service platform that the request should be issued to.
api_key (str): The client's api key.
region (str): The region in which the provider will be running tournaments. Supported values include "BR", "EUNE", "EUW", "JP", "LAN", "LAS", "NA", "OCE", "PBE", "RU", and "TR".
url (str): The provider's callback URL to which tournament game results in this region should be posted.
Returns:
dict: the details of the response to the issued http request.
"""
header_parameters = {
"X-Riot-Token": api_key
}
body_parameters = {
"region": region,
"url": url
}
url = endpoints.v4["host"]["endpoint"].format(service_platform)
path = endpoints.v4["tournament"]["codes"]["endpoint"]
return _request_executor.post("".join([url, path]),
header_parameters=header_parameters,
body_parameters=body_parameters)
def tournaments (service_platform,
api_key,
provider_id,
name=None):
""" Creates a tournament and returns its ID.
References:
https://developer.riotgames.com/regional-endpoints.html
https://developer.riotgames.com/api-methods/#tournament-v4/POST_registerProviderData
Arguments:
service_platform (str): The service platform that the request should be issued to.
api_key (str): The client's api key.
region (str): The region in which the provider will be running tournaments. Supported values include "BR", "EUNE", "EUW", "JP", "LAN", "LAS", "NA", "OCE", "PBE", "RU", and "TR".
url (str): The provider's callback URL to which tournament game results in this region should be posted.
Returns:
dict: the details of the response to the issued http request.
"""
header_parameters = {
"X-Riot-Token": api_key
}
body_parameters = {
"name": name,
"provider_id": provider_id
}
url = endpoints.v4["host"]["endpoint"].format(service_platform)
path = endpoints.v4["tournament-stub"]["codes"]["endpoint"]
return _request_executor.post("".join([url, path]),
header_parameters=header_parameters,
body_parameters=body_parameters)
| [
"22604743+Alex-Weatherhead@users.noreply.github.com"
] | 22604743+Alex-Weatherhead@users.noreply.github.com |
9b9fe3aff8048b6c6c4093f1e775cfafe5aeee20 | b9222036fb8693c1c201e7de134e6ac9fdd78521 | /object_size.py | 2f4d51352b9259d75af5530a91cadc9ed027f2da | [] | no_license | RenYates/TissueScanning | e6d8f5be3705c18cb0fff92195806a0bf3716f7e | eb48577715d5647515b3b9ef201465bf4c8cf110 | refs/heads/master | 2021-06-29T00:45:19.241342 | 2020-11-09T23:03:09 | 2020-11-09T23:03:09 | 187,892,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,281 | py | # import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) / 2, (ptA[1] + ptB[1]) / 2)
def object_size(image, cnts, width):
# width of quarter (in inches)
#width = 3.5
# load the image, covert it to grayscale, blur it slightly
#image = cv2.imread('slide.jpg')
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray = cv2.medianBlur(gray,7)
# invert the image color
# inv_image = cv2.bitwise_not(gray)
# perform edge detection, then perform a dilation + erosion to close gaps in b/w object edges
# edged = cv2.Canny(inv_image, 50, 100)
# edged = cv2.dilate(edged, None, iterations=1)
# edged = cv2.erode(edged, None, iterations=1)
# Calculate the threshold for the edges of the object in the image
# _, threshold = cv2.threshold(inv_image, 20, 255, 0)
# find contours in the edge map
# cnts = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and initialize the
# 'pixels per metric' calibration variable
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
# loop over the contours individually
for c in cnts:
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100:
continue
#compute the rotated bounding box of the contour
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype='int')
# order the points in the contour such that they appear in
# top-left, top-right, bottom-right, bottom-left order
# then draw the outline of the rotated bounding box
box = perspective.order_points(box)
#cv2.drawContours(image, [box.astype("int")], -1, (0,255,0), 2)
# loop over the original points and draw them
#for (x,y) in box:
# cv2.circle(image, (int(x), int(y)), 5, (0,0,255), -1)
# unpack the ordered bounding box, then compute the midpoint b/w the top-left and top-right coordinates
# then compute the midpoint b/w the bottom left and bottom right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint b/w the top-left and bottom-left points
# then compute the midpoint b/w the top-right and bottom right points
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
#cv2.circle(image, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
#cv2.circle(image, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
#cv2.circle(image, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
#cv2.circle(image, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines b/w the midpoints
#cv2.line(image, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
#cv2.line(image, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255,0,255), 2)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# if the pixels per metric has not been initialized, then compute it as the ratio of pixels to supplied metric
# in this case, mm
if pixelsPerMetric is None:
pixelsPerMetric = dB / width
# compute the size of the object
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
# draw the object sizes on the image
#cv2.putText(image, "{:.1f}mm".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2)
#cv2.putText(image, "{:.1f}mm".format(dimB), (int(trbrX - 15), int(trbrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2)
# show the output image
#cv2.imshow("Image", orig)
#cv2.waitKey(0)
return pixelsPerMetric
#cv2.destroyAllWindows() | [
"15ly1@queensu.ca"
] | 15ly1@queensu.ca |
38261d5119fefbf26f954891b562e86666dcca04 | 13c80bc1ca61cb8c6b8cf4e14e736def5a8e9157 | /dawanda-exporter.py | 1178ad2d47b16c1912ae08ad0a460450ba428750 | [
"MIT"
] | permissive | youngage/dawanda-exporter | 6fb5092d707f07b4b9be35c8c40b4c3129c1e317 | e838fbaf7e94d2fe17248ce2b0515cd70ba0f7d6 | refs/heads/master | 2020-03-26T01:47:13.583337 | 2018-08-11T12:37:24 | 2018-08-11T12:37:24 | 144,382,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,777 | py | #!/usr/bin/env python3
import argparse
from getpass import getpass
import json
import logging
import sys
from tempfile import mkstemp
from time import sleep, strftime
import traceback
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
try:
from bs4 import BeautifulSoup
except ImportError:
print('MISSING BeautifulSoup LIBRARY, TRYING TO INSTALL')
from pip._internal import main as pip_main
pip_main(['install', 'bs4', 'lxml'])
from bs4 import BeautifulSoup
try:
import requests
except ImportError:
print('MISSING requests LIBRARY, TRYING TO INSTALL')
from pip._internal import main as pip_main
pip_main(['install', 'requests'])
import requests
try:
import colorama
except ImportError:
print('MISSING colorama LIBRARY, TRYING TO INSTALL')
from pip._internal import main as pip_main
pip_main(['install', 'colorama'])
import colorama
finally:
colorama.init(autoreset=True)
DAWANDA_BASEURL = 'https://de.dawanda.com'
def iterate_urls(session, urls, handler, results):
assert isinstance(urls, list)
seen_urls = []
while len(urls) > 0:
url = urls.pop()
if url.startswith('/'):
url = DAWANDA_BASEURL + url
# silently ignore duplicate URLs
if url in seen_urls:
continue
print('\033[K ', url[len(DAWANDA_BASEURL):], ' ... ', end='\r', flush=True, sep='')
req = session.get(url)
if req.status_code != 200:
print('Got error', req.status_code, 'loading', url)
continue
seen_urls.append(url)
try:
more_urls = handler(req.text, results) or []
for u in more_urls:
urls.append(u)
except Exception as err:
datafilefd, datafilename = mkstemp(prefix='dawanda-', suffix='-page.txt', text=True)
with open(datafilefd, 'w') as datafile:
print(traceback.format_exc(), file=datafile)
print(req.text, file=datafile)
print('Error parsing URL ({err}), saved debug data to {f}'.format(err=err, f=datafilename))
def parse_product_list(text, products):
req_page = BeautifulSoup(text, 'lxml')
prod_table = req_page.find('table', id='product_table')
if prod_table is None:
return []
prod_table_body = prod_table.find('tbody')
for row in prod_table_body.find_all('tr'):
cells = row.find_all('td')
prod_id = cells[0].find('input')['value']
prod_title = cells[2].find('a')
prod_title = prod_title.string if prod_title is not None else list(cells[2].stripped_strings)[0]
prod_sku = cells[2].select_one('div.product-sku')
prod_sku = prod_sku.string if prod_sku else None
prod_price = row.select_one('td span.money')
if prod_id in products:
print('WARNING: product {id} encountered twice'.format(id=prod_id))
products[prod_id] = {
'id': prod_id,
'sku': prod_sku,
'title': prod_title,
'price': {'amount': float(prod_price.select_one('span.amount').string), 'unit': prod_price.select_one('abbr.unit').string}
}
return [link['href'] for link in req_page.select('div.pagination > a.next_page')]
def parse_product(text):
req_page = BeautifulSoup(text, 'lxml')
product_tag = req_page.select_one('script.product_data')
product = json.loads(product_tag.string)
return product
def get_product_list(session):
products = {}
urls = [
DAWANDA_BASEURL + '/seller/products?product_search[state]=draft',
DAWANDA_BASEURL + '/seller/products?product_search[state]=paused',
DAWANDA_BASEURL + '/seller/products?product_search[state]=past',
DAWANDA_BASEURL + '/seller/products?product_search[state]=active',
]
iterate_urls(session, urls, parse_product_list, products)
return products
def get_product_details(session, product_id):
url = DAWANDA_BASEURL + '/seller/products/' + str(product_id) + '/edit'
req = session.get(url)
req.raise_for_status()
product = parse_product(req.text)
return product
def parse_ratings(text, ratings):
req_page = BeautifulSoup(text, 'lxml')
ratings_table = req_page.find('table', id='feedback')
assert ratings_table is not None
first = True
for row in ratings_table.find_all('tr'):
# skip the first row
if first:
first = False
continue
cells = row.find_all('td')
rating = {
'stars': len(cells[0].find_all('img')),
'text': list(cells[1].stripped_strings),
'author': cells[2].string,
'date': cells[3].string,
}
ratings.append(rating)
return [link['href'] for link in req_page.select('div.pagination > a.next_page')]
def get_ratings(session, username):
ratings = []
urls = [
DAWANDA_BASEURL + '/user/feedback/' + username,
]
iterate_urls(session, urls, parse_ratings, ratings)
return ratings
def main():
data = {}
session = requests.Session()
parser = argparse.ArgumentParser(description='Dawanda Data Extractor')
parser.add_argument('--exit-timeout', type=int, default=5, help='wait given number of seconds before exiting (default: %(default)ds)')
parser.add_argument('--session', help='Dawanda-Session ID to use, don\'t ask for credentials or log in at all')
parser.add_argument('--output', '-o', default=None, help='ZIP file returning all data, defaults to "dawanda_YYYY-MM-DD_HH-MM_SS.zip"')
parser.add_argument('--debug', action='store_true', help='show HTTP requests and replies')
parser.add_argument('--skip-products', action='store_true', help='do not fetch the products (nor their images)')
parser.add_argument('--skip-images', action='store_true', help='do not fetch product images')
parser.add_argument('--skip-ratings', action='store_true', help='do not fetch ratings')
args = parser.parse_args()
logging.basicConfig()
if args.debug:
from http import client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
if args.session is not None:
session.cookies.set('_dawanda_session', args.session, domain='.dawanda.com', path='/')
else:
# we need the user to log in
dw_user = input('DaWanda user: ')
dw_password = getpass('DaWanda password (not shown): ')
login_req = session.post(DAWANDA_BASEURL + '/core/sessions', data={'user[email_or_username]': dw_user, 'user[password]': dw_password, 'user[remember_me]': 'true'})
if login_req.status_code != 201:
print('LOGIN FAILED.', file=sys.stderr)
sleep(args.exit_timeout)
sys.exit(1)
output_filename = args.output or strftime('dawanda_%Y-%m-%d_%H-%M-%S.zip')
print('[*] output:', output_filename)
output = ZipFile(output_filename, 'w')
print('[*] fetching profile ... ', end='')
profile = session.get(DAWANDA_BASEURL + '/current_user/profile').json()
output.writestr('profile.json', json.dumps(profile, indent=2), compress_type=ZIP_DEFLATED)
if not profile.get('logged_in', False):
print('NOT LOGGED IN')
output.close()
sleep(args.exit_timeout)
sys.exit(1)
print(profile.get('username'))
if not args.skip_ratings:
print('\033[K[*] fetching ratings')
ratings = get_ratings(session, profile['username'])
output.writestr('ratings.json', json.dumps(ratings, indent=2), compress_type=ZIP_DEFLATED)
print('\033[K got', len(ratings))
if not args.skip_products:
print('\033[K[*] fetching products')
products = get_product_list(session)
output.writestr('productlist.json', json.dumps(products, indent=2), compress_type=ZIP_DEFLATED)
idx = 0
total = len(products)
print('\033[K got', total)
for prod_id, product in products.items():
print('\033[K fetching details {idx}/{count}: {id}'.format(idx=idx+1, count=total, id=prod_id), end='\r', flush=True)
details = get_product_details(session, prod_id)
product.update(details)
idx += 1
output.writestr('products.json', json.dumps(products, indent=2), compress_type=ZIP_DEFLATED)
if not args.skip_images:
idx = 0
total = sum(len(prod.get('product_images_attributes', [])) for prod in products.values())
for product_id, product in products.items():
for img in product.get('product_images_attributes', []):
img_id = img.get('id') or img.get('guid')
print('\033[K... fetching images {idx}/{count}: {prod_id} / {img_id}'.format(idx=idx+1, count=total, prod_id=product_id, img_id=img_id), end='\r', flush=True)
img_data = session.get(img.get('url'))
with output.open('product_images/' + str(img_id) + '.' + img.get('extension', 'dat').lower(), 'w') as f:
f.write(img_data.content)
idx += 1
output.close()
if args.session is None:
print('\033[K[*] Logging out.')
session.get(DAWANDA_BASEURL + '/account/logout')
print('\033[K[+] done', end='')
if not args.skip_products:
print(' [', len(products), ' products]', sep='', end='')
if not args.skip_ratings:
print(' [', len(ratings), ' ratings]', sep='', end='')
print()
sleep(args.exit_timeout)
if __name__ == '__main__':
main()
| [
"helge.jung@youngage.eu"
] | helge.jung@youngage.eu |
31721c0fd2e68f0c38ec1c14db4ce512bc605cdb | 0627c9e2d7dc9ec452134f18be3e2f92b9a1a40e | /zmian_na_04.01.2021/IO_project/IO_project/settings.py | 7de6d7c624542db80195ae59451b9fe6dc97a731 | [] | no_license | IOSebastianiSzymon/IO | 576f0818960393a736984e0014519ea2b796f5a9 | bc707435f6e0244e8f2fdc70ade2e11521abf48b | refs/heads/main | 2023-02-11T20:02:34.500260 | 2021-01-04T13:09:33 | 2021-01-04T13:09:33 | 303,470,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,479 | py | """
Django settings for IO_project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fg(fqoxy^bxq)lxpio_yziz*0tdieyx+3u@rsw3vzy$u=l7si='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig',
'frontend.apps.FrontendConfig',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'IO_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'IO_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.Account'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
| [
"noreply@github.com"
] | IOSebastianiSzymon.noreply@github.com |
2bc186d49fd3741a5945895a8313e016d372f690 | d10724d15f2888c5d2de8abb340995aa2a2074b9 | /examples/python/src/07fizzbuzz/main.py | a7bacd972b2efa6b474803bcf2f437439b106265 | [
"MIT"
] | permissive | podhmo/prestring | 5849e7f7de3626e8a1f48740190d98cd55bd3721 | 8a3499377d1b1b2b180809b31bd7536de5c3ec4d | refs/heads/master | 2021-07-16T06:35:10.555681 | 2021-03-28T05:35:37 | 2021-03-28T05:35:37 | 31,548,112 | 10 | 1 | MIT | 2021-03-28T05:27:35 | 2015-03-02T15:53:34 | Python | UTF-8 | Python | false | false | 292 | py | def fizzbuzz(n: int) -> str:
if n % 3 == 0 and n % 5 == 0:
return "fizzbuzz"
elif n % 3 == 0:
return "fizz"
elif n % 5 == 0:
return "buzz"
else:
return str(n)
if __name__ == "__main__":
print(", ".join(fizzbuzz(i) for i in range(1, 21)))
| [
"noreply@github.com"
] | podhmo.noreply@github.com |
694a55ffe10f4262a60d4c2029e30a6b57a22ff9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/499.py | b15fef3836eebfaaa053844940f5b5fa956d25de | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | for _ in xrange(input()):
print "Case #%d:" % (_+1),
n = raw_input()
l = len(n)
nn = map(int, n)
def dfs(c, less, st):
if c == l:
return int(st)
if less:
v = dfs(c+1, 1, st + '9')
else:
v = 0
if c == l-1 or nn[c] <= nn[c+1]:
v = max(v, dfs(c+1, 0, st + n[c]))
if c == 0 or nn[c-1] <= nn[c]-1:
v = max(v, dfs(c+1, 1, st + str(nn[c]-1)))
return v
print dfs(0, 0, "")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8d3542cd2af74501b3466b835a0066195a42106b | 8e84abd6d1c943e5a530e2d86fb295f6c84f89b2 | /Q1_&_Statistical_aproac.py | 766883cb3e68dd78093599ba0053a274a72d6480 | [] | no_license | SusuHao/Text-Mining | f34cb6c5b8f13b7fceb29efac16c0bd152b548f6 | 828a2e198d1dad19e08c1ab463d27b5733e820fe | refs/heads/master | 2021-01-11T20:18:31.464632 | 2017-01-16T05:53:56 | 2017-01-16T05:53:56 | 79,086,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,677 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import os
import unicodedata
import string
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.tag import pos_tag
from nltk.tokenize import sent_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import stem
from gensim import corpora, models, similarities
import re
os.chdir('C:\\Users\\8-)\\OneDrive\\Dokumente\\Visual Studio 2015\\Projects\\TM-Workshop\\TM-Workshop')
def df_to_list(data_df, training=True):
data = []
for row in data_df.iterrows():
index,value = row
data.append(value.tolist())
if training==True:
y = [d[0] for d in data]
X = [d[1]+' '+d[2] for d in data]
else:
y = '0'
if data_df.shape[1] > 2:
X = [str(d[0])+' '+d[1]+' '+d[2]+' '+d[3] for d in data]
else:
X = [d[1] for d in data]
return X, y
def get_top_keywords(clf, name, X_train, y_train, categories, feature_names, num):
print('=' * 80)
print(clf)
print(name)
clf.fit(X_train, y_train)
top = pd.DataFrame()
print("Dimensionality: %d" % clf.coef_.shape[1])
for i, category in enumerate(categories):
top[category] = feature_names[np.argsort(clf.coef_[i])[-num:]].tolist()
clf_descr = str(clf).split('(')[0] + ': ' + name
return (clf_descr, top)
def benchmark(clf, name, X_train, y_train, X_test, y_test, y_pred_df, categories):
print('=' * 80)
print(clf)
print(name)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, 'best_params_'):
print(clf.best_params_)
print("Classification report:")
print(metrics.classification_report(y_test, y_pred, target_names=categories))
print()
print("Confusion matrix:")
print(metrics.confusion_matrix(y_test, y_pred))
print()
score = metrics.accuracy_score(y_test, y_pred)
print("Accuracy: %0.3f" % score)
print()
print()
clf_descr = str(clf).split('(')[0] + ': ' + name
y_pred_df[clf_descr] = y_pred
return clf_descr, score
def preprocess_data(X, lemma=False):
X_Preproc = []
for text in X:
ntext=unicodedata.normalize('NFKD', text).encode('ascii','ignore')
text_nopunc=ntext.translate(string.maketrans("",""), string.punctuation)
text_nopunc = re.sub(r'\d+', '', text_nopunc)
text_lower=text_nopunc.lower()
stop = stopwords.words('english')
text_nostop=" ".join(filter(lambda word: word not in stop, text_lower.split()))
if lemma:
tokens = word_tokenize(text_nostop)
wnl = WordNetLemmatizer()
text_nostop=" ".join([wnl.lemmatize(t) for t in tokens])
#stemmer = stem.porter.PorterStemmer()
#stemmer = stem.lancaster.LancasterStemmer()
#stemmer = stem.snowball.EnglishStemmer()
#text_stem=" ".join([stemmer.stem(t) for t in tokens])
X_Preproc.append(text_nostop)
return X_Preproc
def select_best(X_train, X_test, y_train, feature_names, select_chi2 = False):
if select_chi2:
ch2 = SelectKBest(chi2, k='all')
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
feature_names = [feature_names[i] for i in ch2.get_support(indices=True)]
return X_train, X_test, feature_names
def make_plot(results):
indices = np.arange(len(results))
results.sort(key=lambda x: x[1])
results2 = [[x[i] for x in results] for i in range(2)]
clf_names, score = results2
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
def tokenize_only(text):
ntext = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
text_nopunc = ntext.translate(string.maketrans("",""), string.punctuation)
text_nopunc = re.sub(r'\d+', '', text_nopunc)
text_lower = text_nopunc.lower()
tokens = word_tokenize(text_lower)
return tokens
def tokenize_and_lem(text):
tokens = tokenize_only(text)
wnl = WordNetLemmatizer()
lems = ([wnl.lemmatize(token) for token in tokens])
return lems
#strip any proper nouns (NNP) or plural proper nouns (NNPS) from a text
def filter_out_nouns_POS(text):
word_no_nouns = []
sent_no_nouns = []
for sentence in sent_tokenize(text):
tagged = pos_tag(word_tokenize(sentence))
for word,pos in tagged:
if pos != 'NNP' and pos != 'NNPS':
word_no_nouns.append(word)
sent_no_nouns.append(" ".join(word_no_nouns))
return " ".join(sent_no_nouns)
def filter_only_nouns_POS(text):
word_no_nouns = []
sent_no_nouns = []
for sentence in sent_tokenize(text):
tagged = pos_tag(word_tokenize(sentence))
for word,pos in tagged:
if pos == 'NNP' or pos == 'NNPS' or pos == 'NN' or pos == 'NNS':
word_no_nouns.append(word)
sent_no_nouns.append(" ".join(word_no_nouns))
return " ".join(sent_no_nouns)
#################################################################################################################
################################################ Classifiers ####################################################
#################################################################################################################
## Data is highly screwed and very small
estimators_all=[
("L1", LinearSVC(dual=False, loss='squared_hinge', penalty="l1")), #Loss functions reduce feature space
("L2", LinearSVC(dual=False, loss='squared_hinge', penalty="l2")), #Loss functions reduce feature space
("NearestCentroid (Rocchio classifier)", NearestCentroid()),
("SVM", GridSearchCV(SVC(kernel='rbf', probability=True), cv=10, param_grid={'gamma': [1e-1, 1e-2, 1e-3], 'C': [10, 100]})),
("Elastic-Net penalty", SGDClassifier(n_iter=50, penalty="elasticnet")), #Loss functions reduce feature space
("Naive Bayes Based (BernoulliNB)", GridSearchCV(BernoulliNB(), cv=10, param_grid={'alpha': [1, 1e-1, 1e-2, 1e-3]})),
("Naive Bayes Based (MultinomialNB)", GridSearchCV(MultinomialNB(), cv=10, param_grid={'alpha': [1, 1e-1, 1e-2, 1e-3]})),
("kNN", GridSearchCV(KNeighborsClassifier(), cv=10, param_grid={'n_neighbors': range(5,9)}))]
estimators_ensemble = estimators_all[:5]
estimators_imp_words=[
("Elastic-Net penalty", SGDClassifier(n_iter=50, penalty="elasticnet")),
("Naive Bayes Based (BernoulliNB)", BernoulliNB(alpha=0.1)),
("Naive Bayes Based (MultinomialNB)", MultinomialNB(alpha=0.1))]
#lr_gs = GridSearchCV(Pipeline([('clf', LogisticRegression())]), lr_parameters)
#lr_gs.best_estimator_.named_steps['clf'].coef_
#################################################################################################################
################################################ Evaluation #####################################################
#################################################################################################################
data_df = pd.read_excel(r'MsiaAccidentCases.xlsx')
data_df.columns = ['Cause', 'TitleCase', 'SummaryCase']
data_df[data_df.Cause == u'Others'] = u'Other'
categories = data_df['Cause'].unique()
categories = [x.encode('UTF8') for x in list(categories)]
print("%d categories" % len(categories))
X, y = df_to_list(data_df, training=True)
X_Preproc = preprocess_data(X, lemma=False)
X_train,X_test,y_train,y_test = train_test_split(X_Preproc, y, test_size=0.15, random_state=40)
print "Training Samples Size: " + str(len(y_train))
print "Test Samples Size: " + str(len(y_test))
# Create Tf-idf Representation
#vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=1.0, min_df=1, ngram_range=(1, 2), max_features=None, stop_words='english')
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=1.0, min_df=1, ngram_range=(1, 1), max_features=None, stop_words='english')
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
feature_names = vectorizer.get_feature_names()
#X_train, X_test, feature_names = select_best(X_train, X_test, y_train, feature_names, select_chi2 = True) # Reduce Feature Space
feature_names = np.asarray(feature_names)
y_pred_df = pd.DataFrame()
results = []
for name, clf in estimators_all:
results.append(benchmark(clf, name, X_train, y_train, X_test, y_test, y_pred_df, categories))
clf = VotingClassifier(estimators=estimators_ensemble, voting='hard', weights=[1,1,1,1,2])
results.append(benchmark(clf, 'Ensemble hard', X_train, y_train, X_test, y_test, y_pred_df, categories))
make_plot(results)
classifier_summary = pd.DataFrame(sorted(results, key=lambda x: x[1], reverse=True))
classifier_summary.to_csv('Classifier_summary.csv')
# predict class probabilities for all classifiers which give probabilities
#probas = [c.fit(X_train, y_train).predict_proba(X_train) for name,c in estimators[-4:]]
# get class probabilities for the first sample in the dataset
#categories_test = categories[:]
#del categories_test[8] # No sample of this category in test dataset
#class_proba = pd.DataFrame()
#for i, cat in enumerate(categories_test):
# class_proba[cat] = [pr[0, i] for pr in probas]
#################################################################################################################
################################################ Train final ####################################################
#################################################################################################################
data_train_df = pd.read_excel(r'MsiaAccidentCases.xlsx')
data_train_df.columns = ['Cause', 'TitleCase', 'SummaryCase']
data_train_df[data_train_df.Cause == u'Others'] = u'Other'
categories = data_train_df['Cause'].unique()
categories = [x.encode('UTF8') for x in list(categories)]
print("%d categories" % len(categories))
data_pred_df = pd.read_excel(r'osha.xlsx', header=None)
data_pred_df.columns = ['Number', 'TitleCase', 'SummaryCase', 'FirstDiagnose', 'Hospitalized']
del data_pred_df['Number']
X_train_, y_train = df_to_list(data_train_df, training=True)
X_train = preprocess_data(X_train_, lemma=False)
X_pred_, y_dummy = df_to_list(data_pred_df, training=False)
X_pred = preprocess_data(X_pred_, lemma=False)
print "Training Samples Size: " + str(len(y_train))
# Create Tf-idf Representation
#vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=1.0, min_df=1, ngram_range=(1, 2), max_features=None, stop_words='english')
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=1.0, min_df=1, ngram_range=(1, 1), max_features=None, stop_words='english')
X_train = vectorizer.fit_transform(X_train)
X_pred = vectorizer.transform(X_pred)
feature_names = vectorizer.get_feature_names()
feature_names = np.asarray(feature_names)
impWords = []
for name, clf in estimators_imp_words:
impWords.append(get_top_keywords(clf, name, X_train, y_train, categories, feature_names, 10))
impWordsAll_df = pd.DataFrame()
for name, df in impWords:
impWordsAll_df = pd.concat([impWordsAll_df, df], axis=0)
for cat in categories:
impWordsAll_df[cat][impWordsAll_df[cat].duplicated()]=np.NaN
print impWordsAll_df
impWordsAll_df.to_csv('ImportantWords_byClassifer.csv')
clf = VotingClassifier(estimators=estimators_ensemble, voting='hard', weights=[1,1,1,1,2])
clf.fit(X_train, y_train)
y_pred = clf.predict(X_pred)
pred_df = pd.DataFrame()
pred_df['y'] = y_pred
pred_df['X'] = X_pred_
summary = pred_df['y'].value_counts() / len(pred_df) * 100
###############################################################################################
cat = 'Collapse of object'
num_clusters = 4
###############################################################################################
## LDA
data_filtered_df = pred_df[pred_df['y'] == cat]
text = df_to_list(data_filtered_df, training=False)[0] # Focus only on one category
text_filtered = [filter_only_nouns_POS(doc) for doc in text]
#text_filtered = [filter_out_nouns_POS(doc) for doc in text]
#text_filtered = text # not filtered
tokenized_text = [tokenize_and_lem(text) for text in text_filtered]
stopwordsEng = stopwords.words('english')
texts = [[word for word in text if word not in stopwordsEng] for text in tokenized_text]
dictionary = corpora.Dictionary(texts)
# remove extremes (similar to the min/max df step used when creating the tf-idf matrix)
dictionary.filter_extremes(no_below=1, no_above=0.9)
# convert the dictionary to a bag of words corpus for reference
corpus = [dictionary.doc2bow(text) for text in texts]
lda = models.LdaModel(corpus, num_topics=num_clusters, id2word=dictionary, update_every=5, chunksize=10000, passes=100)
topics_matrix = lda.top_topics(corpus)
importantWords_df = pd.DataFrame()
for i, topic in enumerate(topics_matrix):
for words in topic[:-1]:
importantWords_df[i] = words
#print importantWords_df #Number possible reason - num_topics=3
result_lda_df = pd.DataFrame()
result_lda_df = pd.concat([result_lda_df, importantWords_df], axis=0)
result_lda_df
result_lda_df.to_csv('Result_lda.csv')
################################################################################################################
## kMeans
text = df_to_list(pred_df[pred_df['y'] == cat], training=False)[0]
# Focus only on non nouns (can be changed to focus only on nouns)
#text_filtered = [filter_out_nouns_POS(doc) for doc in text[0]]
tfidf_vectorizer_km = TfidfVectorizer(max_df=0.9, min_df=0.10, max_features=200000, stop_words='english', use_idf=True, tokenizer=tokenize_and_lem, ngram_range=(1,3))
X_tfidf = tfidf_vectorizer_km.fit_transform(text)
km = KMeans(n_clusters=num_clusters, random_state=23)
km.fit(X_tfidf)
clusters = np.array(km.labels_.tolist())
silhouette_score(X_tfidf, clusters, metric='euclidean', sample_size=None, random_state=None) #-1 = bad; 1 = good
svd = TruncatedSVD(2)
lsa = make_pipeline(svd, Normalizer(copy=False))
X_svd = lsa.fit_transform(X_tfidf)
km_svd = KMeans(n_clusters=num_clusters, random_state=42)
km_svd.fit(X_svd)
clusters_svd = np.array(km_svd.labels_.tolist())
silhouette_score(X_svd, clusters_svd, metric='euclidean', sample_size=None, random_state=None)
accidents = { 'cluster': clusters, 'category': pred_df[pred_df['y'] == cat].y.tolist(), 'summary': pred_df[pred_df['y'] == cat].X.tolist() }
frame = pd.DataFrame(accidents, index = [clusters_svd] , columns = ['cluster', 'category', 'summary'])
# Check the number of members of each cluster
print frame['cluster'].value_counts()
print frame['category'].value_counts()
# Check the average rank (1 .. 100) of movies in each cluster
grouped = frame.groupby('category')
grouped['cluster'].sum() / grouped['cluster'].count()
print("Top terms per cluster:")
totalvocab_lemmed = []
totalvocab_tokenized = []
for doc in text:
allwords_lemmed = tokenize_and_lem(doc)
totalvocab_lemmed.extend(allwords_lemmed)
allwords_tokenized = tokenize_only(doc)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_lemmed)
print ('there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame')
terms = tfidf_vectorizer_km.get_feature_names()
order_centroids = svd.inverse_transform(km_svd.cluster_centers_).argsort()[:, ::-1]
result_kmeans = pd.DataFrame()
for i in range(num_clusters):
print("Cluster %d words:" %i)
words = []
for ind in order_centroids[i, :12]:
word = vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')
words.append(word)
print(' %s' % word)
print('')
result_kmeans[i] = words
result_kmeans.index.name=cat
result_kmeans.to_csv('Result_kmeans.csv')
################################################################################################################
## Tf-idf - Most Important Words based on Td-idf
corpus = df_to_list(pred_df[pred_df['y'] == cat], training=False)
#text_filtered = [filter_out_nouns_POS(doc) for doc in text[0]]
tfv = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, stop_words='english', tokenizer=tokenize_only, ngram_range=(1,1))
X_tfidf = tfv.fit_transform(corpus[0])
feature_names = tfv.get_feature_names()
len(feature_names)
#feature_names[50:70]
dense = X_tfidf.todense()
hi_Tfidf = pd.DataFrame(columns=['document', 'phrase', 'score'])
for i in range(len(dense)):
doc_tfidf = dense[i].tolist()[0] #doc i
#len(doc_tfidf)
doc_tfidf_non0 = [pair for pair in zip(range(0, len(doc_tfidf)), doc_tfidf) if pair[1] > 0] #doc i words used
#len(doc_tfidf_non0) #number used unique words
doc_tfidf_non0_sorted = sorted(doc_tfidf_non0, key=lambda t: t[1] * -1)
for phrase, score in [(feature_names[word_id], score) for (word_id, score) in doc_tfidf_non0_sorted][:5]:
hi_Tfidf.loc[hi_Tfidf.shape[0]] = [i, phrase, score]
hi_Tfidf = hi_Tfidf.sort(['score'], ascending=[False])
hi_Tfidf[hi_Tfidf['phrase'].duplicated()]=np.NaN
hi_Tfidf.dropna()
hi_Tfidf.to_csv('Result_Tfidf.csv') | [
"noreply@github.com"
] | SusuHao.noreply@github.com |
dd9b6c4854567505f701afee829db2ed9f4ed619 | a4029532841b6a3b930cfba18c9a872db8680f1b | /hello.py | 7b9ee8766288b039d0c9d0d33a0b6f6ef9aebb6e | [] | no_license | leowf/visual_code | b7f870b65406d86be3bf7e51d00f2820b8244ae2 | 995abff96e76672b4260397a897ab7ab2d32928f | refs/heads/master | 2022-12-15T03:32:09.837149 | 2020-09-14T07:33:41 | 2020-09-14T07:33:41 | 295,301,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | print("hello flask!") | [
"leowf01@gmail.com"
] | leowf01@gmail.com |
45b7193b9e36e0ceb7d6cdceeb758a380ea8adb4 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=75/sched.py | 580c5a476d4a3ce082bab13eb8366ff4f2034cf6 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | -X FMLP -Q 0 -L 2 84 250
-X FMLP -Q 0 -L 2 79 400
-X FMLP -Q 0 -L 2 66 300
-X FMLP -Q 1 -L 1 54 250
-X FMLP -Q 1 -L 1 50 250
-X FMLP -Q 1 -L 1 49 400
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 2 -L 1 37 125
-X FMLP -Q 3 -L 1 35 250
-X FMLP -Q 3 -L 1 31 300
30 125
28 125
26 300
25 100
21 100
19 125
15 175
11 100
10 100
7 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
2adfa7d968a07dd30d191878d89081daf3f7949b | c7e028d71b5dd72eb18b72c6733e7e98a969ade6 | /src/demos/datastructures/fifo.py | 74444fc181f799a0428cb21e7b27d0e754254573 | [
"MIT"
] | permissive | antoniosarosi/algoritmia | da075a7ac29cc09cbb31e46b82ae0b0ea8ee992f | 22b7d61e34f54a3dee03bf9e3de7bb4dd7daa31b | refs/heads/master | 2023-01-24T06:09:37.616107 | 2020-11-19T16:34:09 | 2020-11-19T16:34:09 | 314,302,653 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | #coding: latin1
#< full
from algoritmia.datastructures.queues import Fifo
dfltFifo = Fifo([0, 1])
listBasedFifo = Fifo([0, 1], createList=lambda data: list(data))
for i in range(2, 6):
listBasedFifo.push(i)
dfltFifo.push(i)
while len(listBasedFifo) > 0:
print(dfltFifo.pop(), listBasedFifo.pop(), end=" : ")
#> full | [
"amarzal@localhost"
] | amarzal@localhost |
604e1cf9670914ce04a204ab023bee3c7df91028 | 55569f12da5bd2cfa3c38829a509a5a66c7ae9c8 | /cws.py | 32cb4b27380c6ad0a0101ca0a7e06116301bd4d9 | [
"MIT"
] | permissive | fhltang/chews | 92da2253a2a4dbab49839ce4314766dc90595164 | 87f774a9ad1e034b4b387c910a0f29f5dad01403 | refs/heads/master | 2022-12-21T03:56:42.799961 | 2022-05-23T23:17:07 | 2022-05-23T23:17:07 | 101,605,982 | 0 | 0 | MIT | 2022-12-09T06:31:42 | 2017-08-28T05:10:42 | Python | UTF-8 | Python | false | false | 9,012 | py | # Code for managing life cycle of Cloud Workstations.
import enum
import hashlib
import time
import libcloud
class Error(Exception):
pass
class StateError(Error):
pass
class Volume(object):
_SALT = 'volume'
def __init__(self, context, cws_name, volume_name):
self._context = context
self._cws = self._context.get_cws(cws_name)
self._volume = self._context.get_volume(cws_name, volume_name)
def unique_name(self):
# Append the first six digits of the SHA1 hash.
full_name = '%s-%s' % (self._cws.name, self._volume.name)
m = hashlib.sha1(('%s%s' % (full_name, self._SALT)).encode())
return '%s-%s' % (full_name, m.hexdigest()[:6])
def snapshot_name_prefix(self):
return '%s-' % self.unique_name()
# Cloud Workstation states
class CwsState(enum.Enum):
NOT_EXIST = 0
DESSICATED = 1
OFF = 2
ON = 3
RECOVERABLE_ERROR = 4 # A non-standard state which can be recovered
UNRECOVERABLE_ERROR = 5 # A non-standard state which requires operator intervention
class Cws(object):
def __init__(self, context, cws_name):
self._context = context
self._cws = self._context.get_cws(cws_name)
self._all_volumes = set()
# List of lists of snapshot names. i-th index is a list of
# snapshots for the i-th volume in self._cws.volumes .
self._snapshots = []
# List of snapshot names. i-th index is the newest snapshot
# (or None) for the i-th volume in self._cws.volumes .
self._newest_snapshots = []
self._volume_count = 0
self._snapshot_count = 0
self._populate()
def _populate(self):
driver = self._context.driver()
self._all_volumes = set(v.name for v in driver.list_volumes())
all_snapshots = [s.name for s in driver.ex_list_snapshots()]
self._snapshots = []
self._newest_snapshots = [None] * len(self._cws.volumes)
self._volume_count = 0
self._snapshot_count = 0
for i, vc in enumerate(self._cws.volumes):
v = Volume(self._context, self._cws.name, vc.name)
if v.unique_name() in self._all_volumes:
self._volume_count += 1
snapshots = []
for s in all_snapshots:
if s.startswith(v.snapshot_name_prefix()):
snapshots.append(s)
if self._newest_snapshots[i] is None or s > self._newest_snapshots[i]:
self._newest_snapshots[i] = s
snapshots.sort()
self._snapshots.append(snapshots)
if self._newest_snapshots[i] is not None:
self._snapshot_count += 1
def unique_name(self):
return self._cws.name
def volumes(self):
return [Volume(self._context, self._cws.name, v.name) for v in self._cws.volumes]
def snapshot_names(self):
return self._snapshots
def state(self):
# Check if state is NOT_EXIST.
# First determine if any volumes exist.
driver = self._context.driver()
if self._volume_count == 0 and self._snapshot_count == 0:
return CwsState.NOT_EXIST
if self._volume_count == len(self._cws.volumes):
# As a hack, use the GCE extension to get a Node by name.
# libcloud has no native way to do this.
try:
node = driver.ex_get_node(self.unique_name())
except libcloud.common.google.ResourceNotFoundError:#
# This is actually a recoverable error since we could
# just recreate the node. For now, we just pretend it
# is an unrecoverable error.
return CwsState.UNRECOVERABLE_ERROR
# libcloud documentation suggests node.state should be of
# type libcloud.compute.types.NodeState but I am getting
# str
if node.state == 'running':
return CwsState.ON
elif node.state == 'stopped':
return CwsState.OFF
# State is considered to be DESSICATED only if each volume has
# at least one snapshot and the latest snapshot for each
# volume has the same timestamp (in the name).
if self._snapshot_count == len(self._cws.volumes):
timestamps = set()
for i, vc in enumerate(self._cws.volumes):
v = Volume(self._context, self._cws.name, vc.name)
snapshot_name = self._newest_snapshots[i]
timestamps.add(snapshot_name[len(v.snapshot_name_prefix()):])
if len(timestamps) == 1:
return CwsState.DESSICATED
return CwsState.UNRECOVERABLE_ERROR
def _create_node_and_attach_volumes(self):
driver = self._context.driver()
boot_volume = Volume(self._context, self._cws.name, self._cws.volumes[0].name)
node = driver.create_node(
self._cws.name, self._cws.node.size, None,
location=self._cws.location, use_existing_disk=True,
ex_boot_disk=boot_volume.unique_name(), ex_disk_auto_delete=False)
for i, vc in enumerate(self._cws.volumes):
if i == 0:
continue
v = Volume(self._context, self._cws.name, vc.name)
driver.attach_volume(node, driver.ex_get_volume(v.unique_name()), ex_boot=i==0)
def create(self):
if self.state() != CwsState.NOT_EXIST:
raise StateError('Cloud Workstation must be in state NOT_EXIST in order to Create.')
driver = self._context.driver()
for i, vc in enumerate(self._cws.volumes):
image_family = None
if i == 0:
image_family = self._cws.image_family
v = Volume(self._context, self._cws.name, vc.name)
driver.create_volume(
vc.size, v.unique_name(), location=self._cws.location,
ex_image_family=image_family, use_existing=False, ex_disk_type=vc.volume_type)
self._create_node_and_attach_volumes()
def stop(self):
if self.state() != CwsState.ON:
raise StateError('Cloud workstation must be in state ON in order to Stop.')
driver = self._context.driver()
node = driver.ex_get_node(self.unique_name())
# Annoyingly, libcloud has no native way to stop an instance.
# We must use the GCE extension.
driver.ex_stop_node(node)
def dessicate(self):
if self.state() != CwsState.OFF:
raise StateError('Cloud workstation must be in state OFF in order to Dessicate.')
driver = self._context.driver()
node = driver.ex_get_node(self.unique_name())
driver.destroy_node(node)
volumes = self._context.get_volumes(self._cws.name)
timestamp = '%.10d' % int(time.time())
for volume in volumes:
vol = Volume(self._context, self._cws.name, volume.name)
v = driver.ex_get_volume(vol.unique_name())
snapshot_name = '%s%s' % (vol.snapshot_name_prefix(), timestamp)
driver.create_volume_snapshot(v, snapshot_name)
driver.destroy_volume(v)
def rehydrate(self):
state = self.state()
if state != CwsState.DESSICATED:
raise StateError('Cloud workstation must be in state DESSICATED to Rehydrate. State is %s' % state)
driver = self._context.driver()
for i, vc in enumerate(self._cws.volumes):
snapshot = self._newest_snapshots[i]
v = Volume(self._context, self._cws.name, vc.name)
driver.create_volume(
None, v.unique_name(), location=self._cws.location,
snapshot=snapshot, use_existing=False,
ex_disk_type=vc.volume_type)
self._create_node_and_attach_volumes()
def powerup(self):
state = self.state()
if state != CwsState.OFF:
raise StateError('Cloud workstation must be in state OFF to Powerup. State is %s' % state)
driver = self._context.driver()
node = driver.ex_get_node(self.unique_name())
driver.ex_start_node(node)
def powerdown(self):
state = self.state()
if state != CwsState.ON:
raise StateError('Cloud workstation must be in state ON to Powerdown. State is %s' % state)
driver = self._context.driver()
node = driver.ex_get_node(self.unique_name())
driver.ex_stop_node(node)
def tidy_snapshots(self):
if self.state() == CwsState.NOT_EXIST:
raise StateError('Cloud workstation cannot be in state NO_EXIST for TidySnapshots.')
driver = self._context.driver()
for i, vc in enumerate(self._cws.volumes):
snapshots = self._snapshots[i]
for snapshot in snapshots[:-vc.max_snapshots]:
s = driver.ex_get_snapshot(snapshot)
driver.destroy_volume_snapshot(s)
| [
"francis.tang@gmail.com"
] | francis.tang@gmail.com |
0f71214394ea80e1e5ca7752eaf879a8f93f5a0c | af36fdba6ef43a9d4d61b201d7181a30b659a2e9 | /pra10/wikipedia_API.py | d7ef2fc974f73dcb03a694f232eeaab9969e0ca7 | [] | no_license | woochul-hyun-uni/programming1_pra | df1de139fefbdcfafad34844a326233ad96d7483 | cfb9d4efc25d07e8db9854f287e34fac07558894 | refs/heads/master | 2023-02-24T00:06:08.731485 | 2017-10-05T15:23:36 | 2017-10-05T15:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import wikipedia
def main():
choice = input("Enter a page title or search phrase: ")
while choice != "":
try:
page = wikipedia.page(choice)
print(page.title)
print(page.summary)
print(page.url)
except wikipedia.exceptions.DisambiguationError as e:
print(e.options)
choice = input("Enter a page title or search phrase: ")
main()
| [
"woochul.hyun@my.jcu.edu.au"
] | woochul.hyun@my.jcu.edu.au |
b64d842a5f0f64d7ae91f197a6e0a98a5a0be31d | f7a474af31989a7492411b9e18ba76d3c1527029 | /Day-18/DjangoForm/views.py | b64cc455fe609dd5123bb340d8d00f2c33eeac6a | [] | no_license | chikkalarameshsaikumar/Django-TOT | 01fa4190ca7d2c23e3e0d74e704037babd5b3217 | fb91bb6b2db306b1379f2c00f8d5d27e9b5821f2 | refs/heads/main | 2023-02-05T00:13:03.310573 | 2020-12-25T11:10:03 | 2020-12-25T11:10:03 | 339,008,757 | 0 | 1 | null | 2021-02-15T08:18:18 | 2021-02-15T08:18:18 | null | UTF-8 | Python | false | false | 1,545 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
# Create your views here.
# from DjangoForm.forms import DynamicHtmlFormGen, RegisterForm
from .models import Register
from .forms import Reg
def registerForm(request):
if request.method=='POST':
#data = request.POST
#print(data)
# name = data['name']
# print(name)
f = RegisterForm(request.POST)
f.save()
return HttpResponse("record inserted successfully...")
f = RegisterForm()
return render(request,'DjangoForm/registerForm.html',{"f":f})
def fetchAll(request):
data = Register.objects.all()
#print(data)
#return HttpResponse('check in cmd')
return render(request,'DjangoForm/fetchAll.html',{'data':data})
def dynamicHtmlFormGen(request):
# return HttpResponse("hi i am working fine")
t = DynamicHtmlFormGen()
return render(request,'DjangoForm/dynamicHtmlFormGen.html',{'form':t})
def home(request):
return render(request,'DjangoForm/home.html')
def rgform(request):
if request.method == "POST":
y = Reg(request.POST)
if y.is_valid():
# print(y)
y.save()
return redirect("/")
y = Reg()
return render(request,'DjangoForm/register.html',{'tg':y})
def fetchall(request):
t = Register.objects.all()
return render(request,'DjangoForm/fetch.html',{'y':t})
def upd(request,id):
a = Register.objects.get(id=id)
if request.method == "POST":
w = Reg(request.POST,instance=a)
if w.is_valid():
w.save()
return redirect('/ft')
w = Reg(instance=a)
return render(request,'DjangoForm/update.html',{'t':w}) | [
"rravikumar34@gmail.com"
] | rravikumar34@gmail.com |
72d60e449d5f5339a2cc810ed032d685439af130 | b483c961320d2840497643aac7e4b4eaab70eb7a | /app/bookmark/tests/test_bookmark_details_api.py | 0bc27dd99320a6f80d6d0235a707cd9c3c76358f | [
"MIT"
] | permissive | Shahroz16/bookmark-app-api | 12e359d40c953ec7edd4bbc169e7ac3101a75d13 | 7451ab894a4b6de461b3a62577b914f6f2712542 | refs/heads/master | 2020-04-12T15:18:40.462963 | 2019-03-22T11:00:08 | 2019-03-22T11:00:08 | 162,576,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import BookmarkDetail
from bookmark.serializers import BookmarkDetailSerializer
BOOKMARK_DETAILS_URL = reverse('bookmark:detail')
class PublicBookmarkDetailsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(BOOKMARK_DETAILS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateBookmarkDetailsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().object.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_bookmark_details(self):
BookmarkDetail.objects.create(user=self.user, name='Salt')
BookmarkDetail.objects.create(user=self.user, name='Ploo')
res = self.client.get(BOOKMARK_DETAILS_URL)
bookmarkDetails = BookmarkDetail.objects.all().order_by('-name')
serializer = BookmarkDetailSerializer(bookmarkDetails, many=True)
self.assertEquals(res.data, serializer.data)
def test_bookmark_details_limited_to_user(self):
user2 = get_user_model().object.create_user(
'test2@test.com',
'testt22')
BookmarkDetail.objects.create(user=self.user, name='Salt')
detail = BookmarkDetail.objects.create(user=self.user2, name='JOIL')
res = self.client.get(BOOKMARK_DETAILS_URL)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], detail.name)
def test_create_bookmark_detail(self):
payload = {'name': 'Coco'}
self.client.post(BOOKMARK_DETAILS_URL, payload)
exists = BookmarkDetail.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists) | [
"shrozali@gmail.com"
] | shrozali@gmail.com |
45e3d003adda4530fe51d38d0b9dfa66e4d681a6 | df6a182f6be1fd5ad6393cf5200d0d6d26dd992a | /core/views/genres.py | 3cec6457f453710dad1ce81c5e4c6d0f1cdab76e | [] | no_license | LizaMetla/liblessonDjango | 560dcd3d866cfe1003c21df80397dd60aa10b7f8 | 7e4acc082399b25ad8614adf338ed9441571554d | refs/heads/master | 2023-06-20T21:27:54.789874 | 2021-07-03T15:45:12 | 2021-07-03T15:45:12 | 376,153,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import TemplateView
from core.forms import GenreForm
from mixins.auth import AbsLoginMixin
class GenreCreateView(AbsLoginMixin, TemplateView):
template_name = 'core/genre-create.html'
def get_context_data(self, **kwargs):
context = super(GenreCreateView, self).get_context_data(**kwargs)
context['form'] = GenreForm(self.request.POST or None, self.request.FILES or None)
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if context['form'].is_valid():
genre = context['form'].save()
return redirect(reverse('index'))
else:
return render(request, self.template_name, context) | [
"liza.metla@gmail.com"
] | liza.metla@gmail.com |
c666864202b8a020bd1fbf62891737b1d82993b4 | f2481cf8e8802219e99b8d7057f93f44c0d0ba2c | /rango/migrations/0005_userprofile.py | 099137a9c807b9fd6ccdc958515c05513ad976ec | [] | no_license | sibsones/tango_with_django_project | 011c7aaf9e7b234dc419eff4563d97feb8222012 | fd2c34edc43f668a9289fd2ac3b572abfb7e3f19 | refs/heads/master | 2020-04-19T04:50:53.985613 | 2019-02-07T16:44:47 | 2019-02-07T16:44:47 | 165,224,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-02-07 10:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rango', '0004_auto_20190131_1656'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('picture', models.ImageField(blank=True, upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"2260702s@student.gla.ac.uk"
] | 2260702s@student.gla.ac.uk |
3278c70f6d0adc70a0bdbf0573b625a2f594e0a4 | ababd7fdd615419880f55c2f0938e91c46643923 | /CULTIVO/SUELO.py | eec11ee9b4e12122e4455044ae71930cc8e05632 | [] | no_license | sativa/Tikon | aa7faaf2e377a91b6dafbdf961c1c93d45ddbd04 | 0f7c5821d971bb5ca46f4f398d19c4a7c89badc9 | refs/heads/master | 2021-01-21T18:18:00.912488 | 2016-08-07T02:50:05 | 2016-08-07T02:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | from NuevoCoso import Coso
# Esta clase representa los suelos de una parcela.
class Suelo(Coso):
ext = '.suel'
def __init__(símismo, nombre, directorio):
# El diccionario de los datos para cada suelo
dic = dict(Nombre=nombre, Textura_suelo="", Profundidad_suelo=(), Color="", Albedo=(), Límite_evap=(),
Drenaje=(), Factor_mineral=(), Factor_fotosyn=(), Método_pH="", Metodo_potasio="",
Profund_muestras=[], Fósforo_extract=[], Fósforo_total=[], Fósforo_orgán=[], CaCO3=[],
Aluminio=[], Hierro=[], Manganesio=[], Satur_base=[], Isoterm_P_a=[], Isoterm_P_b=[],
Potas_intercamb=[], Magnesio=[], Sodio=[], Solfor=[], Conduct_eléc=[],
Calcio_intercamb=[], Límite_bajo=[], Límite_alto=[], Límite_alto_sat=[],
Factor_crec_raíz=[], Cond_hidró_sat=[], Densidad_suelo=[], Carbono_orgán=[],
Fracción_argi=[], Fracción_lim=[], Fracción_rocas=[], Nitró_total=[], pH_agua=[],
pH_tamp=[], Poten_intercamb_cat=[])
# Esta variable se initializa como Coso
super().__init__(nombre=nombre, ext='su', dic=dic, directorio=directorio)
| [
"julien.malard@mail.mcgill.ca"
] | julien.malard@mail.mcgill.ca |
b10d5b121f3e73cea133b65794eafed83928cf88 | 2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df | /contributions/Ruud/Python/Data Structures/2016-10-07.py | 6919122f960fe5c13bd2f44b54f466ad6257c302 | [] | no_license | 0x8801/commit | 18f25a9449f162ee92945b42b93700e12fd4fd77 | e7692808585bc7e9726f61f7f6baf43dc83e28ac | refs/heads/master | 2021-10-13T08:04:48.200662 | 2016-12-20T01:59:47 | 2016-12-20T01:59:47 | 76,935,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | Following PEP 8 styling guideline.
Working with `set`s
How to count
A thread-safe `Queue`
Special `queue` methods | [
"ruud@arfie.nl"
] | ruud@arfie.nl |
fc34da7d0a63f931eb43704be15efd3f638678f9 | 650b3dd4cc74f32db78f7d99cef9907aec78a222 | /dialogs/tools/fDepreciation_data.py | 832cfd0bbed68f7aae6e702a9f8b189942aee073 | [] | no_license | mech4/PKTrx | 29b871ab587434e7c208175c248f48d9b6c80a17 | cf01bc5be8837d632974786d2419c58b94a0381d | refs/heads/master | 2020-03-29T19:55:07.331831 | 2012-09-18T20:22:52 | 2012-09-18T20:22:52 | 6,289,691 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import sys
import com.ihsan.foundation.pobjecthelper as phelper
def FormSetDataEx(uideflist,params):
config = uideflist.config
uipData = uideflist.uipData.Dataset.AddRecord()
app = config.AppObject
res = app.rexecscript('accounting','appinterface/AccountingDay.GetLastCloseDate',app.CreateValues())
rec = res.FirstRecord
if rec.Is_Err : raise '',rec.Err_Message
LastCloseDate = int(rec.LastCloseDate)
uipData.LastCloseDate = LastCloseDate
uipData.ProcessDate = LastCloseDate + 1
| [
"wisnu27@gmail.com"
] | wisnu27@gmail.com |
f537c996a4274187fd0bb92ebd24bd534cedd400 | 0e1b1f5e2893070ebdcb5eb15b07b89b0f31f471 | /submodules/ont_fast5_api/setup.py | 1763434727ef2ef7df46a7d1a9feb850b9fa9999 | [
"MPL-2.0",
"MIT"
] | permissive | sheffield-bioinformatics-core/STRique | 1a4a3e59e0ac66174ed5c9a4498d6d8bed40b54d | fd2df916847727b3484b2bbad839814043d7dbea | refs/heads/master | 2022-12-27T22:28:31.893074 | 2020-09-29T14:31:45 | 2020-09-29T14:31:45 | 296,618,760 | 0 | 0 | MIT | 2020-09-18T12:45:30 | 2020-09-18T12:45:29 | null | UTF-8 | Python | false | false | 2,341 | py | import os
import re
from setuptools import setup, find_packages
__pkg_name__ = 'ont_fast5_api'
def get_version():
init_file = os.path.join(__pkg_name__, '__init__.py')
with open(init_file, 'r') as init_fh:
verstrline = init_fh.read()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError("Unable to find version string in '{}'".format(init_file))
with open('README.rst') as readme:
documentation = readme.read()
installation_requirements = []
if 'IGNORE_INCLUDES' not in os.environ:
installation_requirements = ['h5py>=2.2.1', 'numpy>=1.8.1',
'six>=1.9', 'progressbar33>=2.3.1']
setup(name=__pkg_name__.replace("_", "-"),
author='Oxford Nanopore Technologies, Limited',
description='Oxford Nanopore Technologies fast5 API software',
long_description=documentation,
version=get_version(),
url='https://github.com/nanoporetech/{}'.format(__pkg_name__),
install_requires=installation_requirements,
license='MPL 2.0',
packages=find_packages(),
entry_points={'console_scripts': [
"multi_to_single_fast5={}.conversion_tools.multi_to_single_fast5:main".format(__pkg_name__),
"single_to_multi_fast5={}.conversion_tools.single_to_multi_fast5:main".format(__pkg_name__)
]},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='fast5 nanopore')
| [
"matthew.parker@sheffield.ac.uk"
] | matthew.parker@sheffield.ac.uk |
bcfed8ec3f9751d32f44af9a5318aa9d368658da | b1a75194b53305243ff87b92f3c19c7ccbfe7fae | /conftest.py | 80decbf07c7cd9a2ca2aa7289eba63a81aed4160 | [] | no_license | SNadezhdaI/stepik-auto-tests-course | a15dc1718df38667dcad30afbcf8405bf8698ac7 | 015cd37c4cf863fcb08187b20bedeb6687efbb3f | refs/heads/main | 2023-09-05T00:21:35.563137 | 2021-10-19T20:15:22 | 2021-10-19T20:15:22 | 412,529,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import pytest
from selenium import webdriver
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome",
help="Choose browser: chrome or firefox")
@pytest.fixture(scope="function")
def browser(request):
browser_name = request.config.getoption("browser_name")
browser = None
if browser_name == "chrome":
print("\nstart chrome browser for test..")
browser = webdriver.Chrome()
elif browser_name == "firefox":
print("\nstart firefox browser for test..")
browser = webdriver.Firefox()
else:
raise pytest.UsageError("--browser_name should be chrome or firefox")
yield browser
print("\nquit browser..")
browser.quit() | [
"79163793388@yandex.ru"
] | 79163793388@yandex.ru |
e7591c29d28eb94dede0687778c05ae5ebba9be1 | b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6 | /verp/hr/doctype/department/department.py | 78df3a770042e793040a9911f00f7b77bfe97d92 | [] | no_license | vsadminpk18/verpfinalversion | 7148a64fe6134e2a6371470aceb1b57cc4b5a559 | 93d164b370ad9ca0dd5cda0053082dc3abbd20da | refs/heads/master | 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
from verp.utilities.transaction_base import delete_events
from frappe.model.document import Document
class Department(NestedSet):
nsm_parent_field = 'parent_department'
def autoname(self):
root = get_root_of("Department")
if root and self.department_name != root:
self.name = get_abbreviated_name(self.department_name, self.company)
else:
self.name = self.department_name
def validate(self):
if not self.parent_department:
root = get_root_of("Department")
if root:
self.parent_department = root
def before_rename(self, old, new, merge=False):
# renaming consistency with abbreviation
if not frappe.get_cached_value('Company', self.company, 'abbr') in new:
new = get_abbreviated_name(new, self.company)
return new
def on_update(self):
if not frappe.local.flags.ignore_update_nsm:
super(Department, self).on_update()
def on_trash(self):
super(Department, self).on_trash()
delete_events(self.doctype, self.name)
def on_doctype_update():
frappe.db.add_index("Department", ["lft", "rgt"])
def get_abbreviated_name(name, company):
abbr = frappe.get_cached_value('Company', company, 'abbr')
new_name = '{0} - {1}'.format(name, abbr)
return new_name
@frappe.whitelist()
def get_children(doctype, parent=None, company=None, is_root=False):
condition = ''
var_dict = {
"name": get_root_of("Department"),
"parent": parent,
"company": company,
}
if company == parent:
condition = "name=%(name)s"
elif company:
condition = "parent_department=%(parent)s and company=%(company)s"
else:
condition = "parent_department = %(parent)s"
return frappe.db.sql("""
select
name as value,
is_group as expandable
from `tab{doctype}`
where
{condition}
order by name""".format(doctype=doctype, condition=condition), var_dict, as_dict=1)
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_department == args.company:
args.parent_department = None
frappe.get_doc(args).insert()
| [
"admin@vespersolutions.tech"
] | admin@vespersolutions.tech |
212ae839fc4995842e57d2a227c3fc5d77dc51fb | 8a58b02b1dfc97bf56a5fd94732316c032e24a70 | /api/tests.py | d76ab163735695925faa78e7a7a3345bf8ab58bb | [] | no_license | momentum-team-2/example--django-recipebook | ab04d4957268ed8251e84d8a09cfc60a138c9d9f | 4a4e17c396fcc9f4c648cea494c4ae6d5dc5e570 | refs/heads/main | 2022-11-28T13:40:13.301591 | 2020-08-05T14:09:55 | 2020-08-05T14:09:55 | 279,464,956 | 0 | 0 | null | 2023-09-04T18:58:14 | 2020-07-14T02:50:58 | Python | UTF-8 | Python | false | false | 762 | py | from django.test import TestCase
from rest_framework.test import APIClient
from users.models import User
from rest_framework.authtoken.models import Token
# Create your tests here.
class RecipesAPITestCase(TestCase):
def test_user_is_added_to_recipe_on_creation(self):
user = User.objects.create(username="test")
token = Token.objects.filter(user=user).first()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
response = client.post(
"/api/recipes/",
{"title": "Test Recipe", "ingredients": [], "steps": []},
format="json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["user"], user.username)
| [
"clinton@dreisbach.us"
] | clinton@dreisbach.us |
718559c2ac4ab854f51d624b912324dcf7fe2be7 | 20b76d0a9a2d31ec929ffcdb082931201b58361f | /homework/2020-09-20/2020-09-20-杨婷婷.py | 46f6234d029e6c75645919444ff24e57147ec43e | [] | no_license | yangtingting123456/interfaceiframe | 3a6ff3f386cb98dcf7849ea3ab52a8ce93c6d306 | 12fc9ec2366f220a5cb1ce51c3a6a9ad7316316e | refs/heads/master | 2023-01-02T00:13:53.878122 | 2020-10-26T06:08:16 | 2020-10-26T06:08:16 | 306,569,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # 1、charles 的 三种过滤方式操作截图做成文档
#见charles四种过滤数据文档
# 2、charles 抓取 论坛 注册 、发帖数据,截图抓到了请求即可
# 3、requests 编写脚本 实现获取access_token、增加标签接口、实现查询标签接口、实现删除标签接口
# 用的公司项目做的,登录(获取token,密码md5加密)-获取用户列表-用户更新,详情,-退出等;
# 4、requests 模拟 https://www.qq.com的请求,用re模块截取出
# <meta name="description" content="(.+?)" />中的content内容
# import requests
# import re
#
# response = requests.get(url='https://www.qq.com' )
# body = response.content.decode('gbk')
# # print(body)
# content = re.findall(' <meta name="description" content="(.+?)" /> ',body)
# print(content)
import re
import requests
response = requests.get(url='https://www.qq.com')
body = response.content.decode('gbk')
# print(body)
con = re.findall(' name="description" content="(.+?)"',body)
print( con ) | [
"3048903923@qq.com"
] | 3048903923@qq.com |
63bf7b2d11cc8e21ef72573a2d0886feb4b3e5c1 | 9fef12768592678e9d6a38852558dcfdf40671c0 | /realtors/migrations/0003_auto_20210304_1535.py | 88991c47246cd561f9f346895378a335db46a74a | [] | no_license | mnjayswal84/btre_project | 6366bc600f30ac91110621920cfa377103416dae | 7a416fe80d25c05adcf5f147a30b77a29a1ee00b | refs/heads/master | 2023-03-21T12:45:22.285686 | 2021-03-04T11:13:54 | 2021-03-04T11:13:54 | 333,825,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | # Generated by Django 3.1.5 on 2021-03-04 10:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('realtors', '0002_auto_20210203_1202'),
]
operations = [
migrations.AlterField(
model_name='realtor',
name='photo',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='realtor',
name='user_id',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
| [
"mihir.j@simformsolutions.com"
] | mihir.j@simformsolutions.com |
87963d9f79ea0bacba2f103ea07f3d7b5fa68329 | 811b3ad8cf06ad6c196a3c3d0437027dc12bb65a | /test/first_project/first_app/views.py | 4b4ab683ae5e6d2265ebf4922822bbef7539a310 | [
"MIT"
] | permissive | imsks/Web-Dev-with-Django | b9de3ceb904452e8a63eb3d15943dea9aa4a9cce | dec87795d18c24a5492758202c29a463bbba105a | refs/heads/master | 2020-05-26T01:43:46.164923 | 2019-08-11T04:14:57 | 2019-08-11T04:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.shortcuts import render
from first_app.forms import NewUserForm
# Create your views here.
def index(request):
return render(request, 'first_app/index.html')
def users(request):
form = NewUserForm()
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit = True)
return index(request)
else:
print("Error")
return render(request, 'first_app/users.html', {'form' : form}) | [
"sachinkshuklaoo7@gmail.com"
] | sachinkshuklaoo7@gmail.com |
d8782e2c28f1dd51ec2e5bc836156abd30f05050 | d8080248b1cd1419fa92f4d72fbe24b1197f91d1 | /getcode.py | 118bdfb7072df7555e7f0c3cc68eb0ec4dd533d2 | [] | no_license | wblyy/Get_Verification_code | 23fcb981b8fd2058156c6fba7746b2b7935d6258 | c760c2e6a0e7199411c38b5498f045bc0d07ecc5 | refs/heads/master | 2020-05-18T17:20:22.280325 | 2014-12-04T09:33:43 | 2014-12-04T09:33:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #coding=utf-8
import requests
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
post_verifycode_url = "http://54.223.153.21:9000/"
def post_verifycode(filepath, code=None):
codetype = {
"en": "1004",
"cn": "2004",
}
try:
r = requests.post(post_verifycode_url+"baike_code?type="+codetype[code], files={'code.png': open(filepath, 'rb')}, timeout=60)
except Exception, e:
raise
return r.content
data={
'userName':"sonidigg",
'passWord':"sonidigg",
'verifycode':"e0LD",
'loginStatus':"userLogin",
'ischecked':"0"
}
s = requests.session()
login=s.get('http://www.uuwise.com/Common/Captcha.aspx?t=1417684633627')
s.post(url='http://www.uuwise.com/Common/AjaxLogin.aspx',data=data)
r=s.get('http://www.uuwise.com/User/History.aspx?Action=&d=2014-12-03&page=5')
r.encoding='utf-8'
print r.text
if __name__ == "__main__":
| [
"wblyy0911@gmail.com"
] | wblyy0911@gmail.com |
3326bb1fb5cbac4d64d7c676e5314aa82ee4732b | 90596c0ea387748e300aa01753231ab703e9393f | /events/urls.py | c3d0824eb33ddf598f0fb59095d69978df492ef8 | [] | no_license | brunoasilv/StreetEvent | 7931dc537e1bca6d263b2e278bcf112ac9618b8e | e1f1b6f3dcee0a6d14a137d89e6cdcc316d3d56c | refs/heads/master | 2022-11-26T02:28:42.359818 | 2020-07-31T14:48:03 | 2020-07-31T14:48:03 | 284,059,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.urls import include, path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('createOccurrence/', views.createOccurrence, name='createOccurrence'),
path('createOccurrence/register', views.registerOccurrence, name='registerOccurrence'),
] | [
"brunos@ua.pt"
] | brunos@ua.pt |
cfd74d1c0517e174874f8f15b351bd7482fe723f | 8c2844290d933a0560ee6b50143b7bb4d0d93f1b | /midgard/parsers/bernese_compar_out.py | 527ada7ff0238e472573f4b1458f6c1db6f0593a | [
"MIT"
] | permissive | kartverket/midgard | 33ec346829b988b364ee43ca8392f3960a6f5b9a | 31939afee943273b23fa0a5ef193cfecfa68d6c0 | refs/heads/master | 2023-07-20T01:32:37.045520 | 2023-06-06T10:25:52 | 2023-06-06T10:25:52 | 131,258,554 | 18 | 10 | MIT | 2023-05-22T10:35:21 | 2018-04-27T07:00:45 | Python | UTF-8 | Python | false | false | 21,943 | py | """A parser for reading coordinate comparison in Bernese OUT format
Example:
--------
from midgard import parsers
p = parsers.parse_file(parser_name='bernese_compar_out', file_path='COMP211670.OUT')
data = p.as_dict()
Description:
------------
Reads coordinate comparison data from files in OUT format
"""
# Standard library imports
from datetime import datetime
import itertools
from typing import Any, Dict, Iterable, Tuple
# Midgard imports
from midgard.data import dataset
from midgard.dev import log
from midgard.dev import plugins
from midgard.math.unit import Unit
from midgard.parsers import ChainParser, ParserDef
@plugins.register
class BerneseComparOutParser(ChainParser):
"""A parser for reading coordinate comparison in Bernese OUT format
The parsed data are saved in variable **data** as a dictionay with 4-digit station name as key. The station
related data are saved in a dictionary with following keys:
| Key | Type |Description |
|-----------------------|-------------|----------------------------------------------------------------------|
| coord_comp_east | List[float] | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_north | List[float] | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_up | List[float] | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| coord_comp_rms_east | float | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_rms_north | float | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_rms_up | float | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| pos_mean_x | float | X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms1 | float | RMS1 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms2 | float | RMS2 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_y | float | Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms1 | float | RMS1 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms2 | float | RMS2 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_z | float | Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms1 | float | RMS1 of Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms2 | float | RMS2 of Z-coordinate of mean station coordinate position in [m] |
and **meta**-data:
| Key | Description |
|----------------------|--------------------------------------------------------------------------------------|
| num_coord_files | Number of coordinate files used for analysis |
| time | Date of analysis session |
| \__data_path__ | File path |
| \__parser_name__ | Parser name |
"""
def __init__(self, *args: Tuple[Any], **kwargs: Dict[Any, Any]):
"""
Args:
args: Parameters without keyword.
kargs: Keyword arguments.
"""
super().__init__(*args, **kwargs)
self.fields = list() # Save field names, which are read. Needed by generating of dataset.
def setup_parser(self) -> Iterable[ParserDef]:
"""Set up information needed for the parser
This should return a dictionary with all parameters needed by np.genfromtxt to do the actual parsing.
Returns:
Dict: Parameters needed by np.genfromtxt to parse the input file.
"""
# Skip lines until 'Verification of fiducial stations' section
skip_lines_parser = ParserDef(
end_marker=lambda line, _ln, _n: (
"LIST OF COORDINATE FILES" in line # Start of num_coord_files_parser
or "COMPARISON OF COORDINATES" in line # Start of coord_comparison_parser
),
label= lambda line, _ln: line,
parser_def = {},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# ================================================================================
# Bernese GNSS Software, Version 5.2
# --------------------------------------------------------------------------------
# Program : COMPAR
# Purpose : Coordinate comparison
# --------------------------------------------------------------------------------
# Campaign : ${P}/PGS
# Default session: 1690 year 2021
# Date : 19-Jun-2021 13:13:07
# User name : satref
# ================================================================================
#
time_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.strip().startswith("User name"),
label=lambda line, _ln: line.strip().startswith("Default session"),
parser_def={
True: {
"parser": self._parse_time,
"fields": {
"time": (0, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# -------------------------------------------------------------------------------------------
# File Coordinate files
# -------------------------------------------------------------------------------------------
# 1 ${P}/PGS/STA/F1_211600.CRD
# 2 ${P}/PGS/STA/F1_211610.CRD
# 3 ${P}/PGS/STA/F1_211620.CRD
# 4 ${P}/PGS/STA/F1_211630.CRD
# 5 ${P}/PGS/STA/F1_211640.CRD
# 6 ${P}/PGS/STA/F1_211650.CRD
# 7 ${P}/PGS/STA/F1_211660.CRD
# -------------------------------------------------------------------------------------------
#
#
# -------------------------------------------------------------------------------------------
# LIST OF COORDINATE FILES
# -------------------------------------------------------------------------------------------
#
# NUMBER OF COORDINATE FILES: 7
num_coord_files_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith("1"),
label=lambda line, _ln: line.strip().startswith("NUMBER OF COORDINATE FILES"),
parser_def={
True: {
"parser": self._parse_num_coord_files,
"strip": "",
"fields": {
"num_coord_files": (0, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# COMPARISON OF COORDINATES (IN NORTH, EAST, AND HEIGHT COMPONENT)
# EPOCH FOR COMPARISON: AS IN COORDINATE FI
# RMS: UNWEIGHTED RMS OF THE ESTIMATION OF ONE COORDINATE COMPONENT IN MM
# ---------------------------------------------------------------------------------
#
# NUM STATION #FIL C RMS 1 2 3 4 5 6 7
# ---------------------------------------------------------------------------------
# 2 AASC 7 N 1.21 -1.85 -0.41 0.90 -1.27 0.67 1.39 0.56
# E 0.98 1.50 -1.02 0.47 0.51 -0.78 -1.11 0.42
# U 3.09 0.54 5.33 -0.23 -3.92 0.64 -3.41 1.04
#
# 3 ADAC 7 N 1.76 -1.80 -1.47 0.88 3.25 0.60 -1.24 -0.23
# E 0.82 0.02 -0.20 0.65 -0.84 1.47 -0.37 -0.74
# U 9.21 -1.14 5.65 17.49 -0.76 -9.54 -3.61 -8.09
#
# 72 ENON 5 N 5.03 -7.11 -1.71 -0.84 5.30 4.37
# E 1.85 0.78 0.75 2.13 -2.61 -1.06
# U 6.34 8.82 2.17 1.37 -6.60 -5.76
#
# 33 BIRK 1 N 0.00 0.00
# E 0.00 0.00
# U 0.00 0.00
coord_comparison_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith("1"),
label=lambda line, _ln: (
len(line) > 27 # Length of line has to be larger than 27 characters
and line[27] in ["N", "E", "U"] # Coordinate flag ('N', 'E' or 'U')
and line[31].isnumeric() # RMS
),
parser_def={
True: {
"parser": self._parse_coord_comparison,
"strip": "",
"fields": {
"station": (6, 10),
"num_files": (11, 25),
"flag_coord": (26, 28),
"rms": (29, 35),
"values": (36, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2----
#
# MEAN VALUES OF GEOCENTRIC X,Y,Z - COORDINATES
# RMS1: RMS OF UNWEIGHTED AVERAGE OF EACH COORDINATE COMPONENT
# RMS2: FORMAL ACCURACY OF EACH COORDINATE COMPONENT FROM COMBINED SOLUTION USING EQUAL WEIGHTS
# ----------------------------------------------------------------------------------------------------------------------------
#
# NUM STATION #FIL FLG X (M) RMS1 RMS2 Y (M) RMS1 RMS2 Z (M) RMS1 RMS2
# ----------------------------------------------------------------------------------------------------------------------------
#
# 2 AASC 7 M 3172870.21703 0.00072 0.00144 604208.68041 0.00041 0.00144 5481574.63290 0.00101 0.00144
# 3 ADAC 7 M 1916240.20525 0.00114 0.00144 963577.13113 0.00066 0.00144 5986596.69558 0.00330 0.00144
coord_mean_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith(">>> CPU"),
label=lambda line, _ln: (
len(line) > 100 # Length of line has to be larger than 100 characters
and line[0:4].strip().isnumeric() # 1st column is a number
and line[6:10].isalnum() # Station name
),
parser_def={
True: {
"parser": self._parse_line,
"fields": {
"station": (6, 10),
"num_files": (12, 26),
"flag": (27, 29),
"pos_mean_x": (30, 46),
"pos_mean_x_rms1": (47, 54),
"pos_mean_x_rms2": (55, 62),
"pos_mean_y": (62, 77),
"pos_mean_y_rms1": (78, 85),
"pos_mean_y_rms2": (86, 93),
"pos_mean_z": (94, 108),
"pos_mean_z_rms1": (109, 116),
"pos_mean_z_rms2": (117, 124),
},
},
},
)
return itertools.chain([
time_parser,
skip_lines_parser,
num_coord_files_parser,
skip_lines_parser,
coord_comparison_parser,
coord_mean_parser,
])
#
# PARSERS
#
def _parse_coord_comparison(self, line: Dict[str, str], cache: Dict[str, Any]) -> None:
"""Parse station coordinate comparison table
"""
if line["station"].strip().lower():
cache["station"] = line["station"].strip().lower()
station = cache["station"]
self.data.setdefault(station, dict())
coord_def = {
"N": "north",
"E": "east",
"U": "up",
}
coord_key = coord_def[line['flag_coord'].strip()]
self.data[station][f"coord_comp_rms_{coord_key}"] = float(line["rms"]) * Unit.millimeter2meter
if not f"coord_comp_rms_{coord_key}" in self.fields:
self.fields.append(f"coord_comp_rms_{coord_key}")
# Parse values line
#----+----1----+----2----+----3----+----4----+-----
# 1.21 -1.85 -0.41 0.90 -1.27 0.67 1.39 0.56
# 5.03 -7.11 -1.71 -0.84 5.30 4.37
# 0.00 0.00
if not "num_coord_files" in self.meta:
log.warn("Number of coordinate files are unknown. Daily comparison values can not be read.")
return
len_values = self.meta["num_coord_files"] * 6 # length of line depends on number of files
line_values = line["values"].ljust(len_values)
values = [line_values[i:i+6] for i in range(0, len_values, 6)]
for idx, value in enumerate(values):
value = float("inf") if value.strip() == "******" else value.strip()
if value:
values[idx] = float(value) * Unit.millimeter2meter
else:
values[idx] = float('nan')
self.data[station][f"coord_comp_{coord_key}"] = values
if not f"coord_comp_{coord_key}" in self.fields:
self.fields.append(f"coord_comp_{coord_key}")
def _parse_time(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse date of analysis session
"""
# Example to parse for getting date:
#
# Default session: 1690 year 2021
#
session, year = line["time"].split(":")[1].split("year")
self.meta["time"] = datetime.strptime(session.strip()[:-1] + year.strip(), "%j%Y")
def _parse_line(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse line
"""
station = line["station"].lower()
self.data.setdefault(station, dict())
skip_fields = ["flag", "num_files", "station"]
for key, value in line.items():
if key in skip_fields:
continue
if not key in self.fields:
self.fields.append(key)
self.data[station][key] = float(value)
def _parse_num_coord_files(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse number of coordinate files
"""
# Example to parse for getting number of coordinate files:
#
# NUMBER OF COORDINATE FILES: 7
#
self.meta["num_coord_files"] = int(line["num_coord_files"].split(":")[1])
#
# GET DATASET
#
def as_dataset(self) -> "Dataset":
"""Return the parsed data as a Dataset
Returns:
Midgard Dataset where station coordinates and belonging information are stored with following fields:
| Field | Type | Description |
|-------------------------|---------------|-------------------------------------------------------------------|
| coord_comp_east_day<x> | numpy.ndarray | Station coordinate comparison results for East component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_north_day<x> | numpy.ndarray | Station coordinate comparison results for North component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_up_day<x> | numpy.ndarray | Station coordinate comparison results for Up component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_rms_east | numpy.ndarray | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_rms_north | numpy.ndarray | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_rms_up | numpy.ndarray | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| pos_mean_x | numpy.ndarray | X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms1 | numpy.ndarray | RMS1 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms2 | numpy.ndarray | RMS2 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_y | numpy.ndarray | Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms1 | numpy.ndarray | RMS1 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms2 | numpy.ndarray | RMS2 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_z | numpy.ndarray | Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms1 | numpy.ndarray | RMS1 of Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms2 | numpy.ndarray | RMS2 of Z-coordinate of mean station coordinate position in [m] |
| station | numpy.ndarray | Station names |
| time | TimeTable | Date of analysis session |
and following Dataset `meta` data:
| Entry | Type | Description |
|---------------------|-------|--------------------------------------------------------------------------------|
| num_coord_files | int | Number of coordinate files used for analysis |
| \__data_path__ | str | File path |
"""
data = dict()
# Generate dataset
dset = dataset.Dataset(num_obs=len(self.data.keys()))
dset.meta = self.meta.copy()
# Remove unnecessary fields in meta
for key in ["__parser_name__"]:
del dset.meta[key]
# Prepare data for adding to dataset
for sta in sorted(self.data.keys()):
for field in self.fields:
if field in ["coord_comp_east", "coord_comp_north", "coord_comp_up"]:
for idx in range(0, self.meta["num_coord_files"]):
if field in self.data[sta]:
data.setdefault(f"{field}_day{idx+1}", list()).append(self.data[sta][field][idx])
else:
data.setdefault(f"{field}_day{idx+1}", list()).append(float('nan'))
continue
if field in self.data[sta]:
data.setdefault(field, list()).append(self.data[sta][field])
else:
# Field does not exist for station 'sta', therefore it is initialized with NaN.
data.setdefault(field, list()).append(float('nan'))
# Add fields to dataset
dset.add_text("station", val=sorted(self.data.keys()))
for field in data:
dset.add_float(field, val=data[field], unit="meter")
dset.add_time(
"time",
val=[dset.meta["time"] for ii in range(0, dset.num_obs)],
scale="utc",
fmt="datetime",
)
return dset
| [
"michael.daehnn@kartverket.no"
] | michael.daehnn@kartverket.no |
8d6dee6211d3b8e0bd8f42cb2ce3ca58cf345e87 | 54bc239124576563c1f0c72e381fb2a4fcaa6a9e | /Adafruit_AD8495_Guide/AD8495_Temperature.py | 4546df8dcb61aa12248110733193b2823c7e335d | [
"MIT"
] | permissive | jonsampson/Adafruit_Learning_System_Guides | 79359154e26e710b088e0c1cbc9969a26a938a25 | b941d8209cec42e3dce5f5e6b533584e3e99ac73 | refs/heads/master | 2020-07-29T17:43:53.439741 | 2019-10-14T01:53:01 | 2019-10-14T01:53:01 | 209,904,940 | 3 | 1 | MIT | 2019-09-21T01:04:35 | 2019-09-21T01:04:34 | null | UTF-8 | Python | false | false | 283 | py | import time
import analogio
import board
ad8495 = analogio.AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 65536
while True:
temperature = (get_voltage(ad8495) - 1.25) / 0.005
print(temperature)
print(get_voltage(ad8495))
time.sleep(0.5)
| [
"kattni@adafruit.com"
] | kattni@adafruit.com |
a9a475960b93e3db4b1ea17a2d7950f0ea9ce5b7 | 388e6813bd12e42ca39c46d7382249bd7d833bca | /base/094/solver.py | 30087abab74ebaf275778b2c8b55624b4d933b66 | [] | no_license | jardellx/arcade | c592693a49986690ee9758d2e6ff1a90f2d3c0fb | e7ae255d2111c8c32486a4271ac0dcec2702522f | refs/heads/master | 2023-08-14T19:46:51.652562 | 2021-10-06T11:54:46 | 2021-10-06T11:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | def eh_vogal(letra):
vogais = "aeiouAEIOU"
if letra in vogais:
return True
return False
texto = input()
saida = ""
for x in texto:
if x == " ":
saida += " "
elif eh_vogal(x):
saida += "v"
else:
saida += "c"
print (saida) | [
"andreinamendes63@alu.ufc.br"
] | andreinamendes63@alu.ufc.br |
6d8e0b06af1fe6c38ccb5fa6ae767ea297b9a966 | 7c5bdcd556a8454481553611a07b7c63e3a3bb1c | /lambda/TopicRuleIotEventsFunction/TopicRuleIotEventsFunction.py | 81ded0d619031f56fbf5544f0792ed028f339047 | [] | no_license | sangeethsajeev/capstone_project | 080aa6aa50741522eba0d6ba0eebda16d92e2ce1 | fe05108ebcae285a4f44f620130dd12c52a3baa0 | refs/heads/master | 2022-12-27T05:24:26.905815 | 2020-10-12T17:09:05 | 2020-10-12T17:09:05 | 294,651,121 | 1 | 0 | null | 2020-10-12T15:03:25 | 2020-09-11T09:25:31 | Python | UTF-8 | Python | false | false | 1,520 | py | import boto3
import cfnresponse
iotClient = boto3.client('iot')
def createTopicRule(ruleName, topicSql, topicDescription, inputName, roleArn):
response = iotClient.create_topic_rule(
ruleName=ruleName,
topicRulePayload={
'sql': topicSql,
'description': topicDescription,
'actions': [
{
'iotEvents': {
'inputName': inputName,
'roleArn': roleArn
}
},
],
'ruleDisabled': False
}
)
def deleteTopicRule(ruleName):
response = iotClient.delete_topic_rule(ruleName=ruleName)
def lambda_handler(event, context):
ruleName = event['ResourceProperties']['RuleName']
topicSql = event['ResourceProperties']['TopicSql']
topicDescription = event['ResourceProperties']['TopicDescription']
inputName = event['ResourceProperties']['InputName']
roleArn = event['ResourceProperties']['RoleArn']
if event['RequestType'] == 'Delete':
try:
deleteTopicRule(ruleName)
cfnresponse.send(event, context, cfnresponse.SUCCESS, {})
except Exception as ex:
print(ex)
cfnresponse.send(event, context, cfnresponse.FAILED, {})
else:
physicalResourceId = ruleName
try:
createTopicRule(ruleName, topicSql, topicDescription, inputName, roleArn)
response_data = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, physicalResourceId)
except Exception as ex:
print(ex)
cfnresponse.send(event, context, cfnresponse.FAILED, {}, physicalResourceId) | [
"asrece.86@gmail.com"
] | asrece.86@gmail.com |
fcd289284e4efed97c2a82f8d042206f5de93157 | 96e53e30bd01d232f09ae030f34257573b32369c | /frames.py | 958503d74e7f7307dea8ecfe1231f43c48c9b3f2 | [] | no_license | Aster-Iris/menatbot | 6b53be673fadad4aacc90c044c34c515120de200 | 7d1b5c1000351d473b8e279a11dc45465bcf5ded | refs/heads/master | 2023-04-13T10:31:25.948331 | 2023-01-20T12:30:44 | 2023-01-20T12:30:44 | 219,141,774 | 0 | 1 | null | 2023-02-16T04:39:03 | 2019-11-02T11:04:32 | Python | UTF-8 | Python | false | false | 15,070 | py | import re
import os
import json
import discord
from fuzzywuzzy import process
from collections import OrderedDict
from itertools import chain
dir_path = os.path.dirname(os.path.realpath(__file__))
class Frames:
def __init__(self):
self.info_regex = r'^-v'
self.regex = r'(^\S*)\s*(vt1|vt2)?\s+(.+)'
self.char_ratio_thresh = 65
self.move_ratio_thresh = 65
self.short_mapping = {
'cr': 'crouch ',
'st': 'stand ',
'jp': 'jump ',
'c': 'crouch ',
's': 'stand ',
'j': 'jump '
}
# Regex to capture input that starts in the form "cr.", "cr ", "c."
# and "c " for cr, st and jp.
self.short_regex = r'((^cr|^c)(\s|\.))|((^st|^s)(\s|\.))|((^jp|^j)(\s|\.))'
self.output_format = ('%s - (%s - %s) - [Startup]: %s [Active]: %s [Recovery]: %s '
'[On Hit]: %s [On Block]: %s')
self.stats_format = '%s - [%s] - %s'
self.knockdown_format = ' [KD Adv]: %s [Quick Rise Adv]: %s [Back Rise Adv]: %s '
self.vt_mappings = {'1': 'vtOne', '2': 'vtTwo'}
self.custom_fields = [
'vtc2DashOnHit', 'runstopOB', 'vtc1OnHit', 'vtc2OnHit',
'ocOnBlock', 'ssOnHit', 'vscoH', 'vtc1OnBlockD',
'vtc1GapOnBlock', 'LKorMKDashOH', 'vscoB', 'LKorMKDashOB',
'ssOnBlock', 'vtcOnBlock', 'lmaoB', 'VSKGapBlock',
'vtcOnHitD', 'lmaoH', 'vt1dashOB', 'vtc2OnBlock',
'vtc1OnBlockB', 'vtcOnBlockD', 'vtc1OnBlock', 'hopsOnBlock',
'VSKGapHit', 'vtc1OnHitB', 'ocOnHit', 'vtc1OnHitF',
'rollcOnBlock', 'transfOH', 'exDashOB', 'VSPGapHit', 'lkDashOH',
'vtc1GapOnHit', 'vtc1OnBlockF', 'transfOB', 'lkDashOB',
'vtcOnHit', 'exDashOH', 'mkDashOB', 'runstopOH', 'vt1dashOH',
'rollcOnHit', 'vtc1OnHitD', 'hopsOnHit', 'vtcOnHitF',
'vtcOnBlockB', 'vtcOnHitB', 'vtc2GapOnBlock', 'vtcOnBlockF',
'vtc2DashOnBlock', 'VSPGapBlock', 'mkDashOH',
'KnifeReloadOH', 'KnifeReloadOB', 'BeanBallOH', 'BeanBallOB'
]
self.custom_fields.sort()
self.stats_mapping = {
'dash': ('bDash', 'fDash', 'bDashDist', 'fDashDist'),
'walk': ('bWalk', 'fWalk'),
'jump': ('bJump', 'fJump', 'nJump', 'bJumpDist', 'fJumpDist'),
'throw': ('throwHurt', 'throwRange')
}
self.frame_data = self.get_data()
self.add_reverse_mapping()
def get_data(self):
'''
Grab all the saved moves in a json format.
'''
with open(f"{dir_path}/frames.json", "r") as f:
frame_data = json.load(f)
return frame_data
def match_move(self, char, move, vt, data):
'''
Main helper function that handles matching the move.
Uses the reverse mapping of the common name, input command
and short form converter to increase the chances of a better
match.
'''
# First find the char they want.
char_match, char_ratio = process.extractOne(char,
data.keys())
if char_ratio < self.char_ratio_thresh:
return False
# They might have supplied the move name in shortened format
# so convert it to how the frame data dump expects.
result = re.search(self.short_regex, move)
if result:
matched = result.group(0)
# Slice to the second last char because the matched move might
# be 'cr. 'or 'cr ' but the mapping only contains cr.
move = re.sub(
self.short_regex, self.short_mapping[matched[:-1]], move
)
# Use the reverse mapping to determine which move they
# were looking for.
moves = data[char_match]['reverse_mapping']
move_match, move_ratio = process.extractOne(move, moves.keys())
if move_ratio < self.move_ratio_thresh:
return False
move = data[char_match]['reverse_mapping'][move_match]
# Check if the matched name was a char stat or a move.
if 'char_stat' in move:
return char_match, move_match, move
else:
# Find the move they want.
if vt:
# The move might not have any difference in vtrigger
# so just return the normal version.
try:
move_data = data[char_match]['moves'][self.vt_mappings[vt]][move]
except KeyError:
move_data = data[char_match]['moves']['normal'][move]
else:
try:
move_data = data[char_match]['moves']['normal'][move]
# Might be a vtrigger only move.
except KeyError:
try:
move_data = data[char_match]['moves']['vtOne'][move]
except KeyError:
move_data = data[char_match]['moves']['vtTwo'][move]
return char_match, move, move_data
def format_stats_output(self, char, move, move_data, data, searched_move):
match, ratio = process.extractOne(
searched_move, self.stats_mapping.keys()
)
if ratio > 85:
related_fields = {}
for field in self.stats_mapping[match]:
try:
related_fields[field] = data[char]['stats'][field]
except KeyError:
pass
output = ''.join(
[' [%s] - %s' % (key, value)
for key, value in related_fields.items()]
)
output = '%s -' % char + output
else:
output = self.stats_format % (char, move, move_data[0])
return output
def escape_chars(self, value):
'''
Escape characters like * to prevent discord from using it
for formatting.
'''
try:
return value.replace('*', '\*')
except AttributeError:
return value
def format_output(self, char, move, vt, move_data, data, searched_move):
'''
Formats the msg to a nicely spaced string for
presentation.
'''
if 'char_stat' in move_data:
output = self.format_stats_output(
char, move, move_data, data, searched_move
)
else:
cmds = [
'plnCmd', 'startup', 'active', 'recovery', 'onHit',
'onBlock'
]
msg_format = self.output_format
# Have to parse knockdown advantage frames if it causes one.
if 'kd' in move_data and move_data['onHit'] == 'KD':
msg_format = self.output_format + self.knockdown_format
cmds.extend(['kd', 'kdr', 'kdrb'])
moves = [char, move]
moves.extend(
self.escape_chars(move_data.get(cmd, '-')) for cmd in cmds
)
output = msg_format % tuple(moves)
return output
def format_embeded_message(self, char, move, vt, data):
em = discord.Embed(
title='%s' % char,
description='%s - %s' % (move, data['plnCmd']),
colour=0x3998C6
)
fields = ['startup', 'active', 'recovery', 'onHit', 'onBlock']
for field in ['kd', 'kdr', 'kdrb']:
if field in data:
fields.append(field)
field_mapping = {
'startup': 'Startup', 'active': 'Active',
'recovery': 'Recovery', 'onHit': 'On Hit',
'onBlock': 'On Block', 'kd': 'Knockdown Adv',
'kdr': 'Quick Rise Adv', 'kdrb': 'Back Roll Adv'
}
for field in fields:
if field in data:
em.add_field(
name=field_mapping[field], value=self.escape_chars(data[field])
)
if 'extraInfo' in data:
# Maybe they messed up the encoding so attemtpt to handle it.
if type(data['extraInfo']) == str:
data['extraInfo'] = json.loads(data['extraInfo'])
em.set_footer(text=', '.join(data['extraInfo']))
return em
def add_custom_fields(self, data, text_output, embed_output):
# Use an ordered dict here because we want to display stats in
# the order we defined them.
custom_fields = OrderedDict()
for field in self.custom_fields:
if field in data:
custom_fields[field] = self.escape_chars(data[field])
text_output = text_output + (
''.join(
[' [%s]: %s' % (key, value)
for key, value in custom_fields.items()]
)
)
if 'extraInfo' in data:
if type(data['extraInfo']) == str:
data['extraInfo'] = json.loads(data['extraInfo'])
info = ' ```%s``` ' % ', '.join(data['extraInfo'])
text_output = text_output + info
for field, value in custom_fields.items():
embed_output.add_field(name=field, value=value)
return text_output, embed_output
def get_frames(self, msg, user, *args, **kwargs):
'''
Main method thats called for the frame data function.
Currently works only for SFV data thanks to Pauls nicely
formatted data <3.
'''
# Check if they want verbose output.
verbose = False
info_result = re.search(self.info_regex, msg)
if info_result:
verbose = True
msg = re.sub(self.info_regex, '', msg).strip()
result = re.search(self.regex, msg)
if not result:
return [("You've passed me an incorrect format %s. "
"The correct format is !frames character_name "
"[vt1/vt2] move_name") % user]
char_name = result.group(1)
move_name = result.group(3)
if result.group(2):
# If either of the vtriggers matched, then we will
# pass the number of the matched one.
vtrigger = result.group(2)[-1]
else:
vtrigger = False
frame_data = self.get_data()
if not frame_data:
return ['Got an error when trying to get frame data :(.']
matched_value = self.match_move(char_name, move_name,
vtrigger, self.frame_data)
if not matched_value:
return [("Don't waste my time %s. %s with %s is not a valid "
"character/move combination for SFV.") % (user,
char_name,
move_name)]
else:
char, move, data = matched_value
text_output = self.format_output(
char, move, vtrigger, data, frame_data, move_name
)
if verbose and 'char_stat' not in data:
embed_output = self.format_embeded_message(
char, move, vtrigger, data
)
return [self.add_custom_fields(data, text_output, embed_output)]
else:
return [text_output]
def add_reverse_mapping(self):
'''
Create a reverse mapping between common names,
move command and the actual name of the moves.
Increases the time on the first queury but the result
is cached for subsequent ones.
'''
common_name_dict = {}
numpad_dict = {}
commands_dict = {}
v_triggers = ['vtTwo', 'vtOne']
data = self.frame_data
for char in data.keys():
char_moves = {}
# Its possible that the vtrigger moves even with the
# same name are lowercased. To avoid duplication, we
# enforce that all the moves are lower cased.
moves = list(data[char]['moves']['normal'].keys())
for m in moves:
v = data[char]['moves']['normal'][m]
char_moves[m.lower()] = v
data[char]['moves']['normal'].pop(m)
data[char]['moves']['normal'][m.lower()] = v
vt_moves = {}
for v_trigger in v_triggers:
v_moves = list(data[char]['moves'][v_trigger].keys())
for vt_move in v_moves:
v = data[char]['moves'][v_trigger][vt_move]
vt_moves[vt_move.lower()] = v
data[char]['moves'][v_trigger][vt_move.lower()] = v
data[char]['moves'][v_trigger].pop(vt_move)
vt_only_moves = set(vt_moves) - set(char_moves)
for move in chain(char_moves.keys(), vt_only_moves):
if move == 'undefined':
continue
# Add the common name of the move to the dict.
try:
common_name = char_moves[move]['cmnCmd']
common_name_dict[common_name] = move
# Some moves dont have common name so just pass.
except KeyError:
pass
try:
command = char_moves[move]['plnCmd']
except KeyError:
command = vt_moves[move]['plnCmd']
# Add the numpad notation
try:
numpad_dict[str(char_moves[move]['numCmd'])] = move
except KeyError:
pass
# Wierd edge case where a vt only move has the
# same plain command. In this case don't overwrite
# the already existing normal command. Depends on
# the iteration order being normal moves -> vt moves.
if command in commands_dict:
continue
commands_dict[command] = move
common_name_dict.update(commands_dict)
common_name_dict.update(numpad_dict)
data[char]['reverse_mapping'] = common_name_dict
# Also add a set of keys/values with official name
offical_names = dict(zip(char_moves.keys(), char_moves.keys()))
data[char]['reverse_mapping'].update(offical_names)
# Update the reverse mapping with vtrigger only moves.
data[char]['reverse_mapping'].update(
dict(zip(vt_only_moves, vt_only_moves))
)
# Add the stats of the char to the mapping as well. The extra value
# 'char_stat' is added to later determine if the matched move is a
# stat or not.
stats_mapping = {stat: (value, 'char_stat')
for stat, value in data[char]['stats'].items()}
data[char]['reverse_mapping'].update(stats_mapping)
common_name_dict = {}
commands_dict = {}
numpad_dict = {}
self.frame_data = data | [
"nerekmichal@gmail.com"
] | nerekmichal@gmail.com |
18b214c4cf04361391d182819b1fe59372391e3d | 7e133f923cfce156a523d868eefcc6ca2a1fa4ab | /1 Sem/Python/Британские учёные.py | 7fd3b10fe573f3bcb9e7193caf51276e6d59acca | [] | no_license | krimmkr/MIPT-Programming | 78fb9853dbfab2ed6aef423e0bb4fb2d4c9e2500 | b2f6d37a031f0d47980870f52e7dff592cf15ae2 | refs/heads/master | 2020-03-18T15:07:19.177035 | 2018-05-28T19:36:17 | 2018-05-28T19:36:17 | 134,888,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from random import shuffle
text = input().split(' ')
for i in text:
temp = list(i)
shuffle(temp[1:-1])
print(text, sep=' ') | [
"noreply@github.com"
] | krimmkr.noreply@github.com |
f2e13fa61c33372dc14195d71ce1621f73418304 | a8fc4173c729a48c7d20b150fd067d4d8a1f3846 | /app1/models.py | 4cdb4e4b7277e9b3ae72434a996dd16a87360cf2 | [] | no_license | su6838354/ocp | 313dbfd1fd43bd0d51fcd0a220f2481a489d4cc8 | 1dc98bc7e87f642e2f6d8274419ddaf846b21931 | refs/heads/master | 2020-04-05T09:55:47.311188 | 2017-08-16T14:15:22 | 2017-08-16T14:15:22 | 81,633,748 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,301 | py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class _User(models.Model):
objectId = models.CharField('主键', max_length=30, primary_key=True)
salt = models.CharField(max_length=100, default='', null=True)
email = models.CharField(max_length=100, default='', null=True)
sessionToken = models.CharField(max_length=100, default='', null=True)
password = models.CharField(max_length=200, default='', null=True)
username = models.CharField(max_length=30, default='', null=True)
emailVerified = models.BooleanField(default=False)
mobilePhoneNumber = models.CharField(max_length=30, default='', null=True)
authData = models.CharField(max_length=100, default='', null=True)
userRole = models.CharField(max_length=20, default='', null=True)
mobilePhoneVerified = models.BooleanField(default=False)
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
@staticmethod
def build(_user):
_user = _User(objectId=_user.get('objectId'),
salt=_user.get('salt', ''),
email=_user.get('email', ''),
sessionToken=_user.get('sessionToken', ''),
password=_user.get('password', ''),
username=_user.get('username', ''),
emailVerified=_user.get('emailVerified', False),
mobilePhoneNumber=_user.get('mobilePhoneNumber', ''),
authData=_user.get('authData', ''),
userRole=_user.get('userRole', ''),
mobilePhoneVerified=_user.get('mobilePhoneVerified', False),
createdAt=_user.get('createdAt'),
updatedAt=_user.get('updatedAt')
)
return _user
"""用户表"""
class Users(models.Model):
objectId = models.CharField('主键id', max_length=30, unique=True)
address = models.CharField('地址', max_length=300, default='', null=True)
group = models.ForeignKey('Admins', related_name='user_group', null=True)
realname = models.CharField('真实姓名', max_length=50, default='', null=True)
sex = models.CharField('性别', max_length=10, null=True)
idcard = models.CharField('身份证号', max_length=30, default='', null=True)
username = models.CharField('用户名', max_length=100, default='', null=True)
checkin = models.CharField('bool,签到日期用,隔开', max_length=300, null=True)
checkin2016 = models.CharField('bool,2016签到日期用,隔开', max_length=300, null=True)
pid = models.CharField('主键对应到_user中', max_length=30, primary_key=True)
political = models.CharField('党组织身份', max_length=50, default='', null=True)
isShow = models.CharField(max_length=10, default='', null=True)
mobile = models.CharField('手机号码', max_length=30, default='', null=True)
location = models.ForeignKey('Admins', related_name='user_location', null=True)
flagNumber = models.CharField('flagNumber', max_length=20, default='', null=True)
birth = models.DateTimeField('生日,年月日', null=True)
job = models.CharField('职业', max_length=300, default='', null=True)
createdAt = models.DateTimeField('创建时间', null=True)
updatedAt = models.DateTimeField('更新时间', null=True)
isDelete = models.IntegerField(default=0)
@staticmethod
def build(user):
user = Users(objectId=user.get('objectId'),
address=user.get('address'),
group=user.get('group'),
realname=user.get('realname'),
sex=user.get('sex'),
idcard=user.get('idcard'),
username=user.get('username'),
checkin=user.get('checkin'),
pid=user.get('pid'),
political=user.get('political'),
isShow=user.get('isShow'),
mobile=user.get('mobile'),
location=user.get('location'),
flagNumber=user.get('flagNumber'),
birth=user.get('birth'),
job=user.get('job'),
createdAt=user.get('createdAt'),
updatedAt=user.get('updatedAt'),
isDelete=user.get('isDelete', 0)
)
return user
"""单位或者社区表"""
class Admins(models.Model):
objectId = models.CharField('id', max_length=30, unique=True)
address = models.CharField('地址', max_length=300, default='', null=True)
person = models.CharField('管理员', max_length=30, default='', null=True)
pwd = models.CharField('密码', max_length=20, default='', null=True)
name = models.CharField('单位组织名称', max_length=200, default='', null=True)
username = models.CharField('用户名', max_length=100, default='', null=True)
type = models.CharField('类别,单位or社区', max_length=30, default='', null=True)
tel = models.CharField('电话号码', max_length=30, default='', null=True)
pid = models.CharField('主键id', max_length=30, primary_key=True)
isShow = models.CharField(max_length=5, default='', null=True)
mobile = models.CharField(max_length=30, default='', null=True)
flagNumber = models.CharField(max_length=100, default='', null=True)
group_type = models.IntegerField(default=0)
parentId = models.CharField(max_length=30, default='')
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
isDelete = models.IntegerField(default=0)
@staticmethod
def build(admins):
return Admins(objectId=admins.get('objectId'),
address=admins.get('address'),
person=admins.get('person'),
pwd=admins.get('pwd'),
name=admins.get('name'),
username=admins.get('username'),
type=admins.get('type'),
tel=admins.get('tel'),
pid=admins.get('pid'),
isShow=admins.get('isShow'),
mobile=admins.get('mobile'),
flagNumber=admins.get('flagNumber', ''),
group_type=admins.get('group_type', 0),
parentId=admins.get('parentId', ''),
createdAt=admins.get('createdAt'),
updatedAt=admins.get('updatedAt'),
isDelete=admins.get('isDelete', 0)
)
"""活动表"""
class Activities(models.Model):
objectId = models.CharField('主键id', max_length=30, primary_key=True)
limit = models.CharField(max_length=30, null=True)
admin = models.ForeignKey(Admins, null=True)
place = models.CharField(max_length=300, null=True)
content = models.CharField(max_length=300, null=True)
title = models.CharField(max_length=100, null=True)
begin = models.DateTimeField(null=True)
isDelete = models.CharField(max_length=10, null=True)
isShow = models.CharField(max_length=10, null=True)
joinnum = models.IntegerField(null=True)
end = models.DateTimeField(null=True)
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
status = models.CharField(max_length=100, null=True, default='pass')
@staticmethod
def build(activities):
return Activities(objectId=activities.get('objectId'),
limit=activities.get('limit'),
admin=activities.get('admin'),
place=activities.get('place'),
content=activities.get('content'),
title=activities.get('title'),
begin=activities.get('begin'),
isDelete=activities.get('isDelete'),
isShow=activities.get('isShow'),
joinnum=activities.get('joinnum'),
end=activities.get('end'),
createdAt=activities.get('createdAt'),
updatedAt=activities.get('updatedAt'),
status=activities.get('status')
)
"""活动参加信息表"""
class ActRegistration(models.Model):
objectId = models.CharField(max_length=30, primary_key=True)
admin = models.ForeignKey(Admins, related_name='actR_admin_group', null=True)
userLocationArr = models.ForeignKey(Admins, related_name='actR_user_group', null=True)
activity = models.ForeignKey(Activities, related_name='actR_user_group', null=True)
isInner = models.BooleanField(default=True)
userGroupArr = models.ForeignKey(Admins, related_name='actR_user_location', null=True)
user = models.ForeignKey(Users, null=True)
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
@staticmethod
def build(act_registration):
act_registration = ActRegistration(objectId=act_registration.get('objectId'),
admin=act_registration.get('admin'),
userLocationArr=act_registration.get('userLocationArr'),
activity=act_registration.get('activity'),
isInner=act_registration.get('isInner'),
userGroupArr=act_registration.get('userGroupArr'),
user=act_registration.get('user'),
createdAt=act_registration.get('createdAt'),
updatedAt=act_registration.get('updatedAt')
)
return act_registration
class ActJoinLog(models.Model):
objectId = models.CharField(max_length=30, primary_key=True)
admin = models.ForeignKey(Admins, related_name='actJL_admin_group', null=True)
userLocationArr = models.ForeignKey(Admins, related_name='actJL_user_location', null=True)
extra = models.IntegerField(null=True)
star = models.IntegerField(null=True)
mark = models.IntegerField(null=True)
activity = models.ForeignKey(Activities, null=True)
isInner = models.BooleanField(default=False)
userGroupArr = models.ForeignKey(Admins, related_name='actJL_user_group', null=True)
user = models.ForeignKey(Users, null=True)
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
@staticmethod
def build(act_join_log):
act_join_log = ActJoinLog(objectId=act_join_log.get('objectId'),
admin=act_join_log.get('admin'),
userLocationArr=act_join_log.get('userLocationArr'),
extra=act_join_log.get('extra'),
star=act_join_log.get('star'),
mark=act_join_log.get('mark'),
activity=act_join_log.get('activity'),
isInner=act_join_log.get('isInner'),
userGroupArr=act_join_log.get('userGroupArr'),
user=act_join_log.get('user'),
createdAt=act_join_log.get('createdAt'),
updatedAt=act_join_log.get('updatedAt')
)
return act_join_log
class Tag(models.Model):
id = models.AutoField(primary_key=True)
txt = models.CharField(max_length=300, null=True)
isDelete = models.IntegerField(default=0)
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
class Activity2Tag(models.Model):
id = models.AutoField(primary_key=True)
activity_id = models.CharField(max_length=30)
tag_id = models.IntegerField()
createdAt = models.DateTimeField(null=True)
updatedAt = models.DateTimeField(null=True)
| [
"suyuan1573@gmail.com"
] | suyuan1573@gmail.com |
9e450b6b92e3daf14bb83e04fca230d46a8d2849 | 2c3a9f50c7a55aff1119909190f8f4472283afae | /test.py | d9e45930699a691b199d74589d8143fe56427122 | [] | no_license | sunder3344/python_script | 0c609667985dbaae7e6123e9fa782d7ced972462 | ea1c1d12c3f505a4b30d1089598205639cd9503c | refs/heads/master | 2020-07-03T02:30:51.975529 | 2019-01-11T04:11:16 | 2019-01-11T04:11:16 | 74,204,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #coding=utf-8
def log(func):
def wrapper(*args, **kw):
print('===call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
def logWithParam(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s ===call %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
def logWithParamOrNot(text = 'test'):
def decorator(func):
def wrapper(*args, **kw):
print('%s ===call %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
def logWithPrint(func):
def wrapper(*args, **kw):
print('begin call %s():' % func.__name__)
func(*args, **kw)
print('end call %s():' % func.__name__)
return wrapper
@logWithPrint
def now():
print('2016-05-17')
if __name__ == '__main__':
now() | [
"sunder3344@sina.com"
] | sunder3344@sina.com |
1d8e65a59c8fb5f1110b2ca01d7ca08764315240 | 854afc6c576b3466db59d7b098d08d071c29139d | /main/migrations/0003_product_image.py | 656566c1511ee525231e12628fc3838615f63c60 | [] | no_license | Peterc1712/CLCI-A3-django.ecommerce.app | c5bb7f40f11d47202b0d6e420dadaa5b67eb932d | 978fba586aa77639e1444fb3f67a3e1257714ef6 | refs/heads/master | 2023-09-01T22:08:34.618231 | 2021-11-02T14:17:27 | 2021-11-02T14:17:27 | 423,859,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 3.2.6 on 2021-10-16 03:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_customer_order_orderitem_product_shippingaddress'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"peter.chung@student.uts.edu.au"
] | peter.chung@student.uts.edu.au |
fe859ca1c52becc338725ff15bd6adef5fbb361c | f6894df72d1b4958c50c4ae598ae092af2a5a32f | /Docker_Control_panel/docker.py | 5459c96fc621c7d5bb00d35988382f1697f8beac | [] | no_license | Zahidsqldba07/Python-Flask-applications | d19135091ef7e384193856f5a830ac8ffe6c0df1 | 2d0cf7810a0ad8baf8c4d656785b009fab77c764 | refs/heads/main | 2023-05-31T13:18:34.285304 | 2021-06-26T14:34:44 | 2021-06-26T14:34:44 | 521,623,318 | 1 | 0 | null | 2022-08-05T12:06:13 | 2022-08-05T12:06:13 | null | UTF-8 | Python | false | false | 221 | py | #!/usr/bin/python3
import cgi
import subprocess
print("Access-Control-Allow-Origin: *")
print("content-type:text/html")
print()
fs=cgi.FieldStorage()
cmd=fs.getvalue('x')
op=subprocess.getoutput("sudo "+ cmd)
print(op)
| [
"venkateshpensalwar@gmail.com"
] | venkateshpensalwar@gmail.com |
178826d2adfe700318ad4b33e99180716f786e78 | 546bbc52c76c19913d57b592572697179833f599 | /baseline/uer/utils/data.py | b0ec96b0e98d952607bf131bcfe2830296b61eb9 | [] | no_license | sheller2010/AutoIE2 | d3a64f83f42e4f48f778027aa72acb4c6efe82f4 | 41eb7e7625fde7a532c2c592880bbe672cfaf7dc | refs/heads/main | 2023-06-16T17:25:57.474246 | 2021-07-19T09:37:31 | 2021-07-19T09:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,439 | py | import os
import random
import pickle
import torch
from multiprocessing import Pool
from uer.utils.constants import *
from uer.utils.tokenizers import *
from uer.utils.misc import count_lines
from uer.utils.seed import set_seed
def mask_seq(src, tokenizer, whole_word_masking, span_masking, span_geo_prob, span_max_length):
vocab = tokenizer.vocab
for i in range(len(src) - 1, -1, -1):
if src[i] != PAD_ID:
break
src_no_pad = src[:i + 1]
tokens_index, src_no_pad = create_index(src_no_pad, tokenizer, whole_word_masking, span_masking, span_geo_prob, span_max_length)
if len(src_no_pad) < len(src):
src = src_no_pad + (len(src) - len(src_no_pad)) * [PAD_ID]
else:
src = src_no_pad
random.shuffle(tokens_index)
num_to_predict = max(1, int(round(len(src_no_pad) * 0.15)))
tgt_mlm = []
for index_set in tokens_index:
if len(tgt_mlm) >= num_to_predict:
break
if whole_word_masking:
i = index_set[0]
mask_len = index_set[1]
if len(tgt_mlm) + mask_len > num_to_predict:
continue
for j in range(mask_len):
token = src[i + j]
tgt_mlm.append((i + j, token))
prob = random.random()
if prob < 0.8:
src[i + j] = vocab.get(MASK_TOKEN)
elif prob < 0.9:
while True:
rdi = random.randint(1, len(vocab) - 1)
if rdi not in [vocab.get(CLS_TOKEN), vocab.get(SEP_TOKEN), vocab.get(MASK_TOKEN), PAD_ID]:
break
src[i + j] = rdi
elif span_masking:
i = index_set[0]
span_len = index_set[1]
if len(tgt_mlm) + span_len > num_to_predict:
continue
for j in range(span_len):
token = src[i + j]
tgt_mlm.append((i + j, token))
prob = random.random()
if prob < 0.8:
for j in range(span_len):
src[i + j] = vocab.get(MASK_TOKEN)
elif prob < 0.9:
for j in range(span_len):
while True:
rdi = random.randint(1, len(vocab) - 1)
if rdi not in [vocab.get(CLS_TOKEN), vocab.get(SEP_TOKEN), vocab.get(MASK_TOKEN), PAD_ID]:
break
src[i + j] = rdi
else:
i = index_set[0]
token = src[i]
tgt_mlm.append((i, token))
prob = random.random()
if prob < 0.8:
src[i] = vocab.get(MASK_TOKEN)
elif prob < 0.9:
while True:
rdi = random.randint(1, len(vocab) - 1)
if rdi not in [vocab.get(CLS_TOKEN), vocab.get(SEP_TOKEN), vocab.get(MASK_TOKEN), PAD_ID]:
break
src[i] = rdi
tgt_mlm = sorted(tgt_mlm, key=lambda x: x[0])
return src, tgt_mlm
def create_index(src, tokenizer, whole_word_masking, span_masking, span_geo_prob, span_max_length):
tokens_index = []
span_end_position = -1
vocab = tokenizer.vocab
if whole_word_masking:
src_wwm = []
src_length = len(src)
has_cls, has_sep = False, False
if src[0] == vocab.get(CLS_TOKEN):
src = src[1:]
has_cls = True
if src[-1] == vocab.get(SEP_TOKEN):
src = src[:-1]
has_sep = True
sentence = "".join(tokenizer.convert_ids_to_tokens(src)).replace('[UNK]', '').replace('##', '')
import jieba
wordlist = jieba.cut(sentence)
if has_cls:
src_wwm += [vocab.get(CLS_TOKEN)]
for word in wordlist:
position = len(src_wwm)
src_wwm += tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word))
if len(src_wwm) < src_length:
tokens_index.append([position, len(src_wwm)-position])
if has_sep:
src_wwm += [vocab.get(SEP_TOKEN)]
if len(src_wwm) > src_length:
src = src_wwm[:src_length]
else:
src = src_wwm
else:
for (i, token) in enumerate(src):
if token == vocab.get(CLS_TOKEN) or token == vocab.get(SEP_TOKEN) or token == PAD_ID:
continue
if not span_masking:
tokens_index.append([i])
else:
if i < span_end_position:
continue
span_len = get_span_len(span_max_length, span_geo_prob)
span_end_position = i + span_len
if span_end_position > len(src):
span_len = len(src) - i
tokens_index.append([i, span_len])
return tokens_index, src
def get_span_len(max_span_len, p):
geo_prob_cum = [0.0]
geo_prob = 1.0
for i in range(max_span_len + 1):
if i == 0:
continue
if i == 1:
geo_prob *= p
geo_prob_cum.append(geo_prob_cum[-1] + geo_prob)
else:
geo_prob *= (1 - p)
geo_prob_cum.append(geo_prob_cum[-1] + geo_prob)
prob = geo_prob_cum[-1] * random.random()
for i in range(len(geo_prob_cum) - 1):
if prob >= geo_prob_cum[i] and prob < geo_prob_cum[i + 1]:
current_span_len = i + 1
return current_span_len
def merge_dataset(dataset_path, workers_num):
# Merge datasets.
dataset_writer = open(dataset_path, "wb")
for i in range(workers_num):
tmp_dataset_reader = open("dataset-tmp-" + str(i) + ".pt", "rb")
while True:
tmp_data = tmp_dataset_reader.read(2^20)
if tmp_data:
dataset_writer.write(tmp_data)
else:
break
tmp_dataset_reader.close()
os.remove("dataset-tmp-" + str(i) + ".pt")
dataset_writer.close()
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
""" truncate sequence pair to specific length """
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
class Dataset(object):
def __init__(self, args, vocab, tokenizer):
self.vocab = vocab
self.tokenizer = tokenizer
self.corpus_path = args.corpus_path
self.dataset_path = args.dataset_path
self.seq_length = args.seq_length
self.seed = args.seed
self.dynamic_masking = args.dynamic_masking
self.whole_word_masking = args.whole_word_masking
self.span_masking = args.span_masking
self.span_geo_prob = args.span_geo_prob
self.span_max_length = args.span_max_length
self.docs_buffer_size = args.docs_buffer_size
self.dup_factor = args.dup_factor
def build_and_save(self, workers_num):
"""
Build dataset from the given corpus.
Start workers_num processes and each process deals with a part of data.
"""
lines_num = count_lines(self.corpus_path)
print("Starting %d workers for building datasets ... " % workers_num)
assert (workers_num >= 1)
if workers_num == 1:
self.worker(0, 0, lines_num)
else:
pool = Pool(workers_num)
for i in range(workers_num):
start = i * lines_num // workers_num
end = (i + 1) * lines_num // workers_num
pool.apply_async(func=self.worker, args=[i, start, end])
pool.close()
pool.join()
# Merge datasets.
merge_dataset(self.dataset_path, workers_num)
def worker(self, proc_id, start, end):
raise NotImplementedError()
class DataLoader(object):
def __init__(self, args, dataset_path, batch_size, proc_id, proc_num, shuffle=False):
self.tokenizer = args.tokenizer
self.batch_size = batch_size
self.instances_buffer_size = args.instances_buffer_size
self.proc_id = proc_id
self.proc_num = proc_num
self.shuffle = shuffle
self.dataset_reader = open(dataset_path, "rb")
self.read_count = 0
self.start = 0
self.end = 0
self.buffer = []
self.vocab = args.vocab
self.whole_word_masking = args.whole_word_masking
self.span_masking = args.span_masking
self.span_geo_prob = args.span_geo_prob
self.span_max_length = args.span_max_length
def _fill_buf(self):
try:
self.buffer = []
while True:
instance = pickle.load(self.dataset_reader)
self.read_count += 1
if (self.read_count - 1) % self.proc_num == self.proc_id:
self.buffer.append(instance)
if len(self.buffer) >= self.instances_buffer_size:
break
except EOFError:
# Reach file end.
self.dataset_reader.seek(0)
if self.shuffle:
random.shuffle(self.buffer)
self.start = 0
self.end = len(self.buffer)
def _empty(self):
return self.start >= self.end
def __del__(self):
self.dataset_reader.close()
class BertDataset(Dataset):
"""
Construct dataset for MLM and NSP tasks from the given corpus.
Each document consists of multiple sentences,
and each sentence occupies a single line.
Documents in corpus must be separated by empty lines.
"""
def __init__(self, args, vocab, tokenizer):
super(BertDataset, self).__init__(args, vocab, tokenizer)
self.short_seq_prob = args.short_seq_prob
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
docs_buffer = []
document = []
pos = 0
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
if pos >= end:
if len(docs_buffer) > 0:
instances = self.build_instances(docs_buffer)
for instance in instances:
pickle.dump(instance, dataset_writer)
break
if not line.strip():
if len(document) >= 1:
docs_buffer.append(document)
document = []
if len(docs_buffer) == self.docs_buffer_size:
# Build instances from documents.
instances = self.build_instances(docs_buffer)
# Save instances.
for instance in instances:
pickle.dump(instance, dataset_writer)
# Clear buffer.
docs_buffer = []
continue
sentence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(line))
if len(sentence) > 0:
document.append(sentence)
dataset_writer.close()
def build_instances(self, all_documents):
instances = []
for _ in range(self.dup_factor):
for doc_index in range(len(all_documents)):
instances.extend(self.create_ins_from_doc(all_documents, doc_index))
return instances
def create_ins_from_doc(self, all_documents, document_index):
document = all_documents[document_index]
max_num_tokens = self.seq_length - 3
target_seq_length = max_num_tokens
if random.random() < self.short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
is_random_next = 0
if len(current_chunk) == 1 or random.random() < 0.5:
is_random_next = 1
target_b_length = target_seq_length - len(tokens_a)
for _ in range(10):
random_document_index = random.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = 0
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
src = []
src.append(self.vocab.get(CLS_TOKEN))
src.extend(tokens_a)
src.append(self.vocab.get(SEP_TOKEN))
seg_pos = [len(src)]
src.extend(tokens_b)
src.append(self.vocab.get(SEP_TOKEN))
seg_pos.append(len(src))
while len(src) != self.seq_length:
src.append(PAD_ID)
if not self.dynamic_masking:
src, tgt_mlm = mask_seq(src, self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
instance = (src, tgt_mlm, is_random_next, seg_pos)
else:
instance = (src, is_random_next, seg_pos)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
class BertDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt_mlm = []
is_next = []
seg = []
masked_words_num = 0
for ins in instances:
if len(ins) == 4:
src.append(ins[0])
masked_words_num += len(ins[1])
tgt_mlm.append([0] * len(ins[0]))
for mask in ins[1]:
tgt_mlm[-1][mask[0]] = mask[1]
is_next.append(ins[2])
seg.append([1] * ins[3][0] + [2] * (ins[3][1] - ins[3][0]) + [PAD_ID] * (len(ins[0]) - ins[3][1]))
else:
src_single, tgt_mlm_single = mask_seq(ins[0], self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
masked_words_num += len(tgt_mlm_single)
src.append(src_single)
tgt_mlm.append([0] * len(ins[0]))
for mask in tgt_mlm_single:
tgt_mlm[-1][mask[0]] = mask[1]
is_next.append(ins[1])
seg.append([1] * ins[2][0] + [2] * (ins[2][1] - ins[2][0]) + [PAD_ID] * (len(ins[0]) - ins[2][1]))
if masked_words_num == 0:
continue
yield torch.LongTensor(src), \
torch.LongTensor(tgt_mlm), \
torch.LongTensor(is_next), \
torch.LongTensor(seg)
class MlmDataset(Dataset):
def __init__(self, args, vocab, tokenizer):
super(MlmDataset, self).__init__(args, vocab, tokenizer)
self.full_sentences = args.full_sentences
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
docs_buffer = []
for _ in range(self.dup_factor):
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
document = [self.vocab.get(CLS_TOKEN)] + self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(line)) + [self.vocab.get(SEP_TOKEN)]
if self.full_sentences:
if len(document) > 0:
docs_buffer.append(document)
if len(docs_buffer) == self.docs_buffer_size:
# Build instances from documents.
all_documents = self.concatenate_docs(docs_buffer)
instances = self.build_instances(all_documents)
# Save instances.
for instance in instances:
pickle.dump(instance, dataset_writer)
# Clear buffer.
docs_buffer = []
if pos >= end:
if len(docs_buffer) > 0:
all_documents = self.concatenate_docs(docs_buffer)
instances = self.build_instances(all_documents)
# Save instances.
for instance in instances:
pickle.dump(instance, dataset_writer)
break
else:
if len(document) > 0:
instances = self.build_instances(document)
# Save instances.
for instance in instances:
pickle.dump(instance, dataset_writer)
if pos >= end:
break
dataset_writer.close()
def concatenate_docs(self, docs_buffer):
all_documents = []
for i in range(len(docs_buffer)):
all_documents += docs_buffer[i]
return all_documents
def build_instances(self, all_documents):
instances = []
instances_num = len(all_documents) // self.seq_length
for i in range(instances_num):
src = all_documents[i * self.seq_length: (i + 1) * self.seq_length]
seg_pos = [len(src)]
if not self.dynamic_masking:
src, tgt = mask_seq(src, self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
instance = (src, tgt, seg_pos)
else:
instance = (src, seg_pos)
instances.append(instance)
src = all_documents[instances_num * self.seq_length:]
seg_pos = [len(src)]
while len(src) != self.seq_length:
src.append(PAD_ID)
if not self.dynamic_masking:
src, tgt = mask_seq(src, self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
instance = (src, tgt, seg_pos)
else:
instance = (src, seg_pos)
instances.append(instance)
return instances
class MlmDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt = []
seg = []
masked_words_num = 0
for ins in instances:
if len(ins) == 3:
src.append(ins[0])
masked_words_num += len(ins[1])
tgt.append([0] * len(ins[0]))
for mask in ins[1]:
tgt[-1][mask[0]] = mask[1]
seg.append([1] * ins[2][0] + [PAD_ID] * (len(ins[0]) - ins[2][0]))
else:
src_single, tgt_single = mask_seq(ins[0], self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
masked_words_num += len(tgt_single)
src.append(src_single)
tgt.append([0] * len(ins[0]))
for mask in tgt_single:
tgt[-1][mask[0]] = mask[1]
seg.append([1] * ins[1][0] + [PAD_ID] * (len(ins[0]) - ins[1][0]))
if masked_words_num == 0:
continue
yield torch.LongTensor(src), \
torch.LongTensor(tgt), \
torch.LongTensor(seg)
class AlbertDataset(Dataset):
"""
Construct dataset for MLM and SOP tasks from the given corpus.
Each document consists of multiple sentences,
and each sentence occupies a single line.
Documents in corpus must be separated by empty lines.
"""
def __init__(self, args, vocab, tokenizer):
super(AlbertDataset, self).__init__(args, vocab, tokenizer)
self.short_seq_prob = args.short_seq_prob
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
document = []
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
for _ in range(self.dup_factor):
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
if not line.strip():
if len(document) >= 1:
instances = self.build_instances(document)
for instance in instances:
pickle.dump(instance, dataset_writer)
document = []
sentence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(line))
if len(sentence) > 0:
document.append(sentence)
if pos >= end - 1:
if len(document) >= 1:
instances = self.build_instances(document)
for instance in instances:
pickle.dump(instance, dataset_writer)
break
dataset_writer.close()
def build_instances(self, document):
instances = []
instances.extend(self.create_ins_from_doc(document))
return instances
def create_ins_from_doc(self, document):
max_num_tokens = self.seq_length - 3
target_seq_length = max_num_tokens
if random.random() < self.short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
is_wrong_order = 0
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if random.random() < 0.5:
is_wrong_order = 1
tmp = tokens_a
tokens_a = tokens_b
tokens_b = tmp
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
src = []
src.append(self.vocab.get(CLS_TOKEN))
src.extend(tokens_a)
src.append(self.vocab.get(SEP_TOKEN))
seg_pos = [len(src)]
src.extend(tokens_b)
src.append(self.vocab.get(SEP_TOKEN))
seg_pos.append(len(src))
while len(src) != self.seq_length:
src.append(PAD_ID)
if not self.dynamic_masking:
src, tgt_mlm = mask_seq(src, self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
instance = (src, tgt_mlm, is_wrong_order, seg_pos)
else:
instance = (src, is_wrong_order, seg_pos)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
class AlbertDataLoader(BertDataLoader):
'''
AlbertDataLoader can reuse the code of BertDataLoader.
'''
pass
class LmDataset(Dataset):
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
document = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(line))
document = [self.vocab.get(CLS_TOKEN)] + document + [self.vocab.get(SEP_TOKEN)]
instances_num = len(document) // (self.seq_length + 1)
for i in range(instances_num):
src = document[i * (self.seq_length + 1): (i + 1) * (self.seq_length + 1)]
seg_pos = self.seq_length
pickle.dump((src, seg_pos), dataset_writer)
src = document[instances_num * (self.seq_length + 1):]
if len(src) > 0:
seg_pos = len(src)
while len(src) != self.seq_length + 1:
src.append(PAD_ID)
pickle.dump((src, seg_pos), dataset_writer)
if pos >= end:
break
dataset_writer.close()
class LmDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt = []
seg = []
for ins in instances:
src.append(ins[0][:-1])
tgt.append(ins[0][1:])
if ins[1] == len(ins[0]):
seg.append([1] * (ins[1] - 1))
else:
seg.append([1] * ins[1] + [PAD_ID] * (len(ins[0]) - 1 - ins[1]))
yield torch.LongTensor(src), \
torch.LongTensor(tgt), \
torch.LongTensor(seg)
class BilmDataset(Dataset):
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
document = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(line))
instances_num = len(document) // self.seq_length
for i in range(instances_num):
src = document[i * self.seq_length: (i + 1) * self.seq_length]
tgt_forward = src[1:] + [self.vocab.get(SEP_TOKEN)]
tgt_backward = [self.vocab.get(CLS_TOKEN)] + src[:-1]
seg = [1] * len(src)
pickle.dump((src, tgt_forward, tgt_backward, seg), dataset_writer)
src = document[instances_num * self.seq_length:]
if len(src) < 1:
continue
tgt_forward = src[1:] + [self.vocab.get(SEP_TOKEN)]
tgt_backward = [self.vocab.get(CLS_TOKEN)] + src[:-1]
seg = [1] * len(src)
while len(src) != self.seq_length:
src.append(PAD_ID)
tgt_forward.append(PAD_ID)
tgt_backward.append(PAD_ID)
seg.append(PAD_ID)
pickle.dump((src, tgt_forward, tgt_backward, seg), dataset_writer)
if pos >= end - 1:
break
dataset_writer.close()
class BilmDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt_forward = []
tgt_backward = []
seg = []
for ins in instances:
src.append(ins[0])
tgt_forward.append(ins[1])
tgt_backward.append(ins[2])
seg.append(ins[3])
yield torch.LongTensor(src), \
torch.LongTensor(tgt_forward), \
torch.LongTensor(tgt_backward), \
torch.LongTensor(seg)
class Seq2seqDataset(Dataset):
def __init__(self, args, vocab, tokenizer):
super(Seq2seqDataset, self).__init__(args, vocab, tokenizer)
self.tgt_seq_length = args.tgt_seq_length
self.src_vocab, self.src_tokenizer = vocab, tokenizer
self.tgt_tokenizer = args.tgt_tokenizer
self.tgt_vocab = self.tgt_tokenizer.vocab
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
if len(line.strip().split("\t")) != 2:
if pos >= end:
break
continue
document_src, document_tgt = line.strip().split("\t")
src = self.src_tokenizer.convert_tokens_to_ids(self.src_tokenizer.tokenize(document_src))
tgt = self.tgt_tokenizer.convert_tokens_to_ids(self.tgt_tokenizer.tokenize(document_tgt))
src = [self.src_vocab.get(CLS_TOKEN)] + src + [self.src_vocab.get(SEP_TOKEN)]
tgt = [self.tgt_vocab.get(CLS_TOKEN)] + tgt + [self.tgt_vocab.get(SEP_TOKEN)]
seg = [1] * len(src)
src, tgt, seg = src[:self.seq_length], tgt[:self.tgt_seq_length + 1], seg[:self.seq_length]
while len(src) != self.seq_length:
src.append(PAD_ID)
seg.append(PAD_ID)
while len(tgt) != self.tgt_seq_length + 1:
tgt.append(PAD_ID)
pickle.dump((src, tgt, seg), dataset_writer)
if pos >= end:
break
dataset_writer.close()
class Seq2seqDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt_in = []
tgt_out = []
seg = []
for ins in instances:
src.append(ins[0])
tgt_in.append(ins[1][:-1])
tgt_out.append(ins[1][1:])
seg.append(ins[2])
yield torch.LongTensor(src), \
torch.LongTensor(tgt_in), \
torch.LongTensor(tgt_out), \
torch.LongTensor(seg)
class T5Dataset(MlmDataset):
'''
T5 can reuse the code of MlmDataset.
'''
pass
class T5DataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt_in = []
tgt_out = []
seg = []
tgt_seq_length = 0
for _, ins in enumerate(instances):
if len(ins) == 3:
src_single = ins[0]
tgt_single = ins[1]
seg.append([1] * ins[2][0] + [PAD_ID] * (len(ins[0]) - ins[2][0]))
else:
src_single, tgt_single = mask_seq(ins[0], self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
seg.append([1] * ins[1][0] + [PAD_ID] * (len(ins[0]) - ins[1][0]))
MASK_ID = self.vocab.get(MASK_TOKEN)
SENTINEL_ID = self.vocab.get(SENTINEL_TOKEN)
for src_index, _ in tgt_single:
if src_single[src_index] != MASK_ID:
src_single[src_index] = MASK_ID
tgt_in_single = [self.vocab.get(CLS_TOKEN)]
mask_index = 0
src_with_sentinel = []
for token_id in src_single:
if token_id == MASK_ID:
if len(src_with_sentinel) > 0 and src_with_sentinel[-1] == (SENTINEL_ID - 1):
pass
else:
src_with_sentinel.append(SENTINEL_ID)
tgt_in_single.append(SENTINEL_ID)
if SENTINEL_ID < len(self.vocab) - 1:
SENTINEL_ID += 1
tgt_in_single.append(tgt_single[mask_index][1])
mask_index += 1
else:
src_with_sentinel.append(token_id)
tgt_in_single.append(SENTINEL_ID)
tgt_in_single.append(self.vocab.get(SEP_TOKEN))
while len(src_with_sentinel) < len(src_single):
src_with_sentinel.append(PAD_ID)
if len(tgt_in_single) > tgt_seq_length:
tgt_seq_length = len(tgt_in_single)
src.append(src_with_sentinel)
tgt_in.append(tgt_in_single)
tgt_out.append(tgt_in[-1][1:] + [PAD_ID])
for i in range(len(tgt_in)):
while len(tgt_in[i]) != tgt_seq_length:
tgt_in[i].append(PAD_ID)
tgt_out[i].append(PAD_ID)
yield torch.LongTensor(src), \
torch.LongTensor(tgt_in), \
torch.LongTensor(tgt_out), \
torch.LongTensor(seg)
class ClsDataset(Dataset):
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
f_write = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
line = f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
line = line.strip().split('\t')
if len(line) == 2:
label = int(line[0])
text = " ".join(line[1:])
src = [self.vocab.get(t) for t in self.tokenizer.tokenize(text)]
src = [self.vocab.get(CLS_TOKEN)] + src
tgt = label
seg = [1] * len(src)
if len(src) >= self.seq_length:
src = src[:self.seq_length]
seg = seg[:self.seq_length]
else:
while len(src) != self.seq_length:
src.append(PAD_ID)
seg.append(PAD_ID)
pickle.dump((src, tgt, seg), f_write)
elif len(line) == 3: # For sentence pair input.
label = int(line[0])
text_a, text_b = line[1], line[2]
src_a = [self.vocab.get(t) for t in self.tokenizer.tokenize(text_a)]
src_a = [self.vocab.get(CLS_TOKEN)] + src_a + [self.vocab.get(SEP_TOKEN)]
src_b = [self.vocab.get(t) for t in self.tokenizer.tokenize(text_b)]
src_b = src_b + [self.vocab.get(SEP_TOKEN)]
src = src_a + src_b
seg = [1] * len(src_a) + [2] * len(src_b)
if len(src) >= self.seq_length:
src = src[:self.seq_length]
seg = seg[:self.seq_length]
else:
while len(src) != self.seq_length:
src.append(PAD_ID)
seg.append(PAD_ID)
pickle.dump((src, tgt, seg), f_write)
else:
pass
if pos >= end - 1:
break
f_write.close()
class ClsDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt = []
seg = []
for ins in instances:
src.append(ins[0])
tgt.append(ins[1])
seg.append(ins[2])
yield torch.LongTensor(src), \
torch.LongTensor(tgt), \
torch.LongTensor(seg)
class PrefixlmDataset(Dataset):
def worker(self, proc_id, start, end):
print("Worker %d is building dataset ... " % proc_id)
set_seed(self.seed)
dataset_writer = open("dataset-tmp-" + str(proc_id) + ".pt", "wb")
pos = 0
with open(self.corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
f.readline()
pos += 1
while True:
line = f.readline()
pos += 1
if len(line.strip().split("\t")) != 2:
if pos >= end:
break
continue
document_src, document_tgt = line.strip().split("\t")
src = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(document_src))
tgt = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(document_tgt))
src = [self.vocab.get(CLS_TOKEN)] + src + [self.vocab.get(SEP_TOKEN)]
tgt = tgt + [self.vocab.get(SEP_TOKEN)]
seg_pos = [len(src)]
if seg_pos[0] >= self.seq_length:
continue
src = src + tgt
tgt = [0] * seg_pos[0] + tgt[1:] + [PAD_ID]
seg_pos.append(len(src))
src, tgt = src[:self.seq_length], tgt[:self.seq_length]
while len(src) != self.seq_length:
src.append(PAD_ID)
tgt.append(PAD_ID)
if seg_pos[1] > self.seq_length:
seg_pos[1] = self.seq_length
pickle.dump((src, tgt, seg_pos), dataset_writer)
if pos >= end:
break
dataset_writer.close()
class PrefixlmDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if self.start + self.batch_size >= self.end:
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start: self.start + self.batch_size]
self.start += self.batch_size
src = []
tgt = []
seg = []
for ins in instances:
src.append(ins[0])
tgt.append(ins[1])
seg.append([1] * ins[2][0] + [2] * (ins[2][1] - ins[2][0]) + [PAD_ID] * (len(ins[0]) - ins[2][1]))
yield torch.LongTensor(src), \
torch.LongTensor(tgt), \
torch.LongTensor(seg)
| [
"noreply@github.com"
] | sheller2010.noreply@github.com |
ffd4c2e8f9b2b32a63248f015518bc3a2832f8dd | 2dbdf395b691d26861909bc47a858bd76de1c5e5 | /main.py | 28d4bae908a4faffc19bdf53edb522399bfab01d | [] | no_license | hkhare42/placebo_app | a867e7d429a670935511106ec6dd3b1ced911551 | 96e6f3f87173f342254ba09690f4d28bb8c3c496 | refs/heads/master | 2020-06-21T04:15:34.199361 | 2016-12-08T14:43:06 | 2016-12-08T14:43:06 | 74,806,629 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,076 | py | from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, SlideTransition
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Color, Rectangle
import codecs
import requests
import random
from kivy.utils import get_color_from_hex
# from kivy.config import Config
# Config.set('graphics', 'width', '1080')
# Config.set('graphics', 'height', '1920')
class MultiScreens(ScreenManager):
""" Screen management class containing most of app. """
# Picked from http://www.alexandrafranzen.com/2012/02/25/50-ways-to-say-youre-awesome/
message_dict = eval(codecs.open('messages.txt', 'r',
encoding='utf-8', errors='ignore').read())
ranstr = random.choice(list(message_dict.keys()))
def send_resp(self, response):
ind = 1 if response == 'YES' else 0
requests.post('http://ec2-35-154-66-204.ap-south-1.compute.amazonaws.com:8080/entry',
data= {'user_id': self.ids.ti.text,
'msg_id': self.ranstr, 'response': ind})
self.current = 'home'
self.transition = SlideTransition(
direction=random.choice(['left','right','up','down'])
)
self.ranstr = random.choice(list(self.message_dict.keys()))
self.ids.cl.set_canvas()
def set_stage(self):
self.ids.pm.disabled = False
self.ids.pm.opacity = 1
self.ids.al2.pos_hint = {'x': 0, 'top': 1.2}
class ColLayout(FloatLayout):
def set_canvas(self):
saved = self.children[:]
self.clear_widgets()
self.canvas.clear()
with self.canvas.before:
Color(rgba= [random.random() for i in range(3)] + [0.3])
Rectangle(pos=self.pos, size=self.size)
for widget in saved:
self.add_widget(widget)
class PlaceboApp(App):
""" Main app class. """
def build(self):
return MultiScreens()
def open_settings(*args):
pass
if __name__ == '__main__':
from kivy.core.window import Window
Window.clearcolor = get_color_from_hex('#FFFFFF')
PlaceboApp().run() | [
"khareharshit@gmail.com"
] | khareharshit@gmail.com |
86fd160d79dd80ef7cc6e2c71d7e79b9a6feb620 | 531d496fc882f2888e56b3e9518de8c1f5cc198e | /Web Application/trivia_quiz/views.py | 548830e310699841fcf40cc7f601cb1e224d4452 | [] | no_license | satista/Data_Science | 79d3a3e9be82c442e3f359db8202008c9dd384b1 | 1b52dea56fdffeee0d5ba1d157e5e39fa663ea6d | refs/heads/main | 2023-03-23T23:23:45.629765 | 2021-03-16T17:29:01 | 2021-03-16T17:29:01 | 302,077,469 | 0 | 0 | null | 2020-10-29T21:20:28 | 2020-10-07T15:20:44 | Jupyter Notebook | UTF-8 | Python | false | false | 377 | py | from django.shortcuts import render
from .models import trivia_quiz
# Create your views here.
#CRUD - create retrieve update delete
#List all the trivia quiz
def trivia_quiz_list_view(request):
trivia_quiz_obj = trivia_quiz.objects.all()
content = {
"trivia_quiz_obj": trivia_quiz_obj
}
return render(request, "quizzes/index.html", context=content) | [
"47756835+satista@users.noreply.github.com"
] | 47756835+satista@users.noreply.github.com |
77e8fbc9bed47d65344b5cc9003be7ee08f8b7f6 | d42eeb160cb06d5238fe83aa4dfa030ca71a4087 | /growth_v01.py | 5d21f62532d6a73fffbea73c00f1db18775b549b | [
"MIT"
] | permissive | diegopmayer/crescimento-populacional-brasileiro | 28f0b75e5ca45bf761d98be7cb8ee5b9f5086612 | 3964ec5368ce6015ee76e0b9221a72c6baec6e3b | refs/heads/master | 2020-05-30T10:00:57.346653 | 2019-06-01T19:26:39 | 2019-06-01T19:26:39 | 189,663,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import matplotlib.pyplot as plt
#Variables with label's name
title_name = 'Crescimento da População Brasileira 1980-2016'
x_name = 'Ano'
y_name = 'População x 100.000.000'
#Functions to label's name
plt.title(title_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.legend()
#data to graphic
ano = [year for year in range(1980, 2016+1)]
pop = [119011052, 121154159, 123774229, 126403352, 129025577, 131639272,
134228492, 136780739, 139280140, 141714953, 144090756, 146825475,
148684120, 151556521, 153726463, 155822296, 157070163, 159636297,
161790182, 163947436, 169799170, 172385776, 174632932, 176876251,
179108134, 184184074, 186770613, 189335191, 189612814, 191481045,
190755799, 192379287, 193976530, 201062789, 202799518, 204482459,
206114067
]
#plotation from data
plt.bar(ano, pop, color='#e3eee6')
plt.plot(ano, pop, linestyle='dashed', color='k')
#print screen the graphic
plt.show() | [
"dpmayer@hotmail.com"
] | dpmayer@hotmail.com |
6279bb7dc4eab7bf1d3db696e27c99d8bfccb52d | 8469f426b47222d8f0c82c5f05131e61ea7bf623 | /uri1001/uri1038.py | e457d15a1db5b4f4c419457e1dd215415c1aa5cf | [] | no_license | jamil2gomes/uri-python | 493b07448fc9ddc9e86ae30808c0cd7465444281 | db3c9ae4dac93c4c03c040ee46a74a6c5987bc11 | refs/heads/master | 2020-08-23T07:18:13.146535 | 2019-10-26T01:59:46 | 2019-10-26T01:59:46 | 216,568,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | split = input().split(" ")
item = int(split[0])
qnt = int(split[1])
total = 0
if item == 1:
total = qnt * 4
elif item == 2:
total = qnt * 4.5
elif item == 3:
total = qnt * 5
elif item == 4:
total = qnt * 2
elif item == 5:
total = qnt * 1.5
print("Total: R$ {:0.2f}".format(total))
| [
"jamil.lannister23@gmail.com"
] | jamil.lannister23@gmail.com |
db985281b42e7256f86e97b45e00e71da8cd0b1d | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/psu/instpol.py | 82e180acf3f6c666ea7d10c6bd65a11d575327f0 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 6,877 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class InstPol(Mo):
"""
The power redundancy policy is for all power supply units on the fabric nodes (leaves and spines) that are consuming the power supply policy through their respective selector profile policy.
"""
meta = ClassMeta("cobra.model.psu.InstPol")
meta.moClassName = "psuInstPol"
meta.rnFormat = "psuInstP-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Power Supply Redundancy Policy"
meta.writeAccessMask = 0x20000000001
meta.readAccessMask = 0x800ae700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.psu.RtPsuInstPolCons")
meta.childClasses.add("cobra.model.psu.RtResPsuInstPol")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.psu.RtPsuInstPol")
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPol", "rtfabricPsuInstPol-"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPolCons", "rtpsuInstPolCons"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtResPsuInstPol", "rtresPsuInstPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fabric.Inst")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.UtilInstPol")
meta.rnPrefixes = [
('psuInstP-', True),
]
prop = PropMeta("str", "adminRdnM", "adminRdnM", 765, PropCategory.REGULAR)
prop.label = "Admin Redundancy Mode"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 3
prop.defaultValueStr = "comb"
prop._addConstant("comb", "combined", 3)
prop._addConstant("insrc-rdn", "input-source-redundancy", 6)
prop._addConstant("n-rdn", "non-redundant", 4)
prop._addConstant("not-supp", "not-supported", 1)
prop._addConstant("ps-rdn", "n+1-redundancy", 5)
prop._addConstant("rdn", "n+n-redundancy", 2)
prop._addConstant("sinin-rdn", "single-input-redundancy", 7)
prop._addConstant("unknown", "unknown", 0)
meta.props.add("adminRdnM", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 7080, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
78494f279a863c7e8050471c823747064c1d61ed | efea4efb891c3f91f2d165778832eff7cf0264a5 | /linearSVCtest.py | bc8e76d514f34c7b65f04da4d59f18a7fd8188ed | [] | no_license | Sapphirine/TwitterBasedMovieRecommendationSystem | 759a860a56fbb29da9fbd2cc79fa0231f6f927ca | 78170e39ffb28c969b40963bcd23583a88887421 | refs/heads/master | 2021-01-10T15:34:00.039266 | 2015-12-23T16:52:34 | 2015-12-23T16:52:34 | 48,466,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | ##Author Xing Lan
##TPC: Big data analysis
##Linear SVC classification test
#from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.svm import LinearSVC
import os
import numpy as np
import pickle
#import time
#import nltk
##set the working dictionary
os.chdir("/Users/lanxing/Desktop/TPC big data/final project/linearSVM")
##read testing data
testdatapos = []
testdataneg = []
text_file = open("test_positive.txt", "r")
lines = text_file.readlines()
for line in lines:
testdatapos.append(' '.join(line.split()))
text_file.close()
text_file = open("test_negative.txt", "r")
lines = text_file.readlines()
for line in lines:
testdataneg.append(' '.join(line.split()))
text_file.close()
##apply the svc model
x_test_pos = np.array([''.join(el) for el in testdatapos[0:len(testdatapos)]])
x_test_neg = np.array([''.join(el) for el in testdataneg[0:len(testdataneg)]])
clf = open('linearSVCclassifier.pickle')
vec = open('vectorizer.pickle')
classifier=pickle.load(clf)
vectorizer=pickle.load(vec)
clf.close()
vec.close()
y_pred_pos=classifier.predict(vectorizer.transform(x_test_pos)).astype(int)
y_pred_neg=classifier.predict(vectorizer.transform(x_test_neg)).astype(int)
print 'Positive correct rate %.2f%%' %(50.0+50.0*y_pred_pos.sum()/y_pred_pos.shape[0])
print 'Negative correct rate %.2f%%' %(50.0-50.0*y_pred_neg.sum()/y_pred_neg.shape[0])
| [
"yao.yang.cksp@gmail.com"
] | yao.yang.cksp@gmail.com |
af8dd99afea7c75e39cb4277511cca74253830a3 | f3d13fd10dc1c2a0086e85a01db99173a49470b6 | /tourmarks/migrations/0002_auto_20180512_1755.py | 602c6d52d7c0585ec21e5675d558cb664b172a68 | [] | no_license | gh720/test_proj1 | 0c3c864c3e92cdb6dc60d108719b6ca9221e7ead | df28a8e3a903d9db0aea0c442f34f755a7fe76c9 | refs/heads/jwt_auth | 2022-12-12T18:33:16.478209 | 2018-05-14T09:51:24 | 2018-05-14T09:51:24 | 133,282,157 | 0 | 0 | null | 2022-12-08T02:05:25 | 2018-05-13T23:25:26 | Python | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.0.5 on 2018-05-12 12:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tourmarks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='visit',
old_name='location_id',
new_name='location',
),
migrations.RenameField(
model_name='visit',
old_name='user_id',
new_name='user',
),
]
| [
"name@name.com"
] | name@name.com |
ea44472cf613f7a505cdbd709dcbf6b69628ed94 | 35d42fa466f6457c83f9e89b6e87e050c0189bf2 | /news/urls.py | 8cd7b45c9fe79d8f08621a003fef854c096236ef | [] | no_license | Burence1/The-Moringa-Tribune | 4c0473f50f84f0f6563369b805d7b00bf8aa43ec | b035a082580eb1e8841e504c87f56392f85ae43e | refs/heads/main | 2023-05-12T11:17:26.898628 | 2021-05-27T13:48:13 | 2021-05-27T13:48:13 | 365,954,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path,re_path
from . import views
urlpatterns = [
path('',views.news_today,name='newsToday'),
re_path('archives/(\d{4}-\d{2}-\d{2})/',views.past_days_news,name = 'pastNews'),
path('search/',views.search_results,name='search_results'),
path('article/(\d+)', views.article, name='article'),
path('new-article',views.new_article, name='new-article'),
path('ajax/newsletter/', views.newsletter, name='newsletter'),
path('api/merch/merch-id/<int:pk>/',views.MerchDescription.as_view())
# path('api/merch/', views.MerchList.as_view()),
# re_path('api/merch/merch-id/(?P<pk>[0-9]+)/',
# views.MerchDescription.as_view())
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"burensomondi@gmail.com"
] | burensomondi@gmail.com |
ec44f9d859a52ea23febe2b58e708b0a57791bac | 63104fedde095c400dcf92fd90c07ef51ead5ea9 | /chet_nechet.py | cff0248475a89819d047e6d0a2ea4c2622354307 | [] | no_license | Artarin/Python-Trashbox | f5295e776a5aa548e5dee92531231b0069300417 | c7c20ea2bb0c78fc02b10c10496cee5daf54b419 | refs/heads/master | 2023-01-03T04:36:12.051489 | 2020-10-30T16:15:30 | 2020-10-30T16:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | #Заданы две клетки шахматной доски. Если они покрашены в один цвет,
#то выведите слово YES, а если в разные цвета — то NO. Программа получает
# на вход четыре числа от 1 до 8 каждое, задающие номер столбца и номер
#строки сначала для первой клетки, потом для второй клетки.
a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a % 2 != 0 and b % 2 !=0:
perv = 'black'
elif a % 2 == 0 and b %2 ==0:
perv = 'black'
else:
perv= 'white'
if c % 2 != 0 and d % 2 !=0:
vtor = 'black'
elif c % 2 == 0 and d %2 ==0:
vtor = 'black'
else:
vtor = 'white'
if perv ==vtor:
print ('YES')
else:
print ('NO')
| [
"oskarualds@mail.ru"
] | oskarualds@mail.ru |
b7e4e280e4c4ea18117163135448ed4e9f3b14b8 | 19be48da7eb090f31fd88b1cef9c8ef3a6aaa0eb | /funcion23.py | c746bc134e246f9f9e9ecf9b80faae8d064e47c1 | [] | no_license | smith-sanchez/t09_Carrion_Villavicencio | 376608d60dd175d872f2622b38ff220b6160ff9a | 4cbb0e0694b35fd7135748bc7ef13db7c7374390 | refs/heads/master | 2020-11-27T05:08:14.629793 | 2019-12-20T18:36:00 | 2019-12-20T18:36:00 | 229,316,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # funcion 23
# salario de un chef
import libreria
import os
#import os
dia=int(os.sys.argv[1])
precio_dia=float(os.sys.argv[2])
# import libreia
salario_total=libreria.salario(dia,precio_dia)
print(" el salario es:",salario_total) | [
"noreply@github.com"
] | smith-sanchez.noreply@github.com |
d92fef518f89eec2329a096bce5710368a2371ee | 072e0706d4751d9b2788a16d8e273baeceffde2e | /02_DS/week2_priority_queues_and_disjoint_sets/3_merging_tables/merging_tables.py | db7e90825832869354017aa5f603fd00d21f3f73 | [] | no_license | AndreiZn/Algorithms_and_DS | 74ee046568eee8be10a7c8eee8feaaa7c3afcfe4 | 3a139ca68d056cb2d0fa489f8a56e084910e1055 | refs/heads/master | 2021-09-08T03:42:09.847221 | 2021-09-06T21:17:57 | 2021-09-06T21:17:57 | 202,354,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | # python3
import os
class Database:
def __init__(self, row_counts):
self.row_counts = list(row_counts)
self.max_row_count = max(self.row_counts)
n_tables = len(row_counts)
self.ranks = [1] * n_tables
self.parents = list(range(n_tables))
def union(self, i, j):
i_id = self.get_parent(i)
j_id = self.get_parent(j)
if i_id == j_id:
return
if self.ranks[i_id] > self.ranks[j_id]:
self.parents[j_id] = i_id
self.row_counts[i_id] += self.row_counts[j_id]
if self.row_counts[i_id] > self.max_row_count:
self.max_row_count = self.row_counts[i_id]
else:
self.parents[i_id] = j_id
self.row_counts[j_id] += self.row_counts[i_id]
if self.row_counts[j_id] > self.max_row_count:
self.max_row_count = self.row_counts[j_id]
if self.ranks[i_id] == self.ranks[j_id]:
self.ranks[j_id] += 1
def merge(self, src, dst):
src_parent = self.get_parent(src)
dst_parent = self.get_parent(dst)
if src_parent == dst_parent:
return False
# merge two components
# use union by rank heuristic
self.union(src, dst)
# update max_row_count with the new maximum table size
#num_rows_merged = self.row_counts[src] + self.row_counts[dst]
#self.row_counts[src] = num_rows_merged
#self.row_counts[dst] = num_rows_merged
#a = list(self.row_counts)
#self.max_idx = a.index(self.max_row_count)
return True
def get_parent(self, table):
if table != self.parents[table]:
self.parents[table] = self.get_parent(self.parents[table])
return self.parents[table]
def main():
# test_dir = '/Users/andreizn/Desktop/Algorithms_and_DS/02_DS/week2_priority_queues_and_disjoint_sets/3_merging_tables/tests/'
# test_files = sorted(os.listdir(test_dir))
# #print(test_files)
# for file_id in range(0, len(test_files), 2):
# test_filename = test_dir + test_files[file_id]
# # print(test_filename)
# f = open(test_filename, "r")
# correct_ans_filename = test_dir + test_files[file_id + 1]
# f_cor = open(correct_ans_filename, "r")
# correct_ans = f_cor.readlines()
# text = f.readlines()
# nums = list(map(int, text[0].split()))
# n_tables, n_queries = nums[0], nums[1]
# counts = list(map(int, text[1].split()))
# assert len(counts) == n_tables
# db = Database(counts)
# for i in range(n_queries):
# dst, src = map(int, text[i+2].split())
# db.merge(dst - 1, src - 1)
# correct_max_count = list(map(int, correct_ans[i].split()))[0]
# assert db.max_row_count == correct_max_count
# if i % 1000 == 0:
# print(i)
# print('test ', file_id, 'passed')
n_tables, n_queries = map(int, input().split())
counts = list(map(int, input().split()))
assert len(counts) == n_tables
db = Database(counts)
for i in range(n_queries):
dst, src = map(int, input().split())
db.merge(dst - 1, src - 1)
print(db.max_row_count)
if __name__ == "__main__":
main()
| [
"anznobishchev@gmail.com"
] | anznobishchev@gmail.com |
1a20eabfbc7f573e3b0fa38c7ad36bd5c8bfb1cc | 21f62294ecf92126f07d396aa5825631f1c4c845 | /flocking.pyde | e3dcbc336b7e0a96a4dcd3c62c3f1429514b05ff | [
"MIT"
] | permissive | cody-berry/flocking | 50dc6657d84f8c15dbf52b86871ce4560b4382c4 | bd2cbda878e2993e59b3d6dcfd5fa7d3b54cab41 | refs/heads/main | 2023-07-18T03:57:11.178746 | 2021-09-07T12:01:39 | 2021-09-07T12:01:39 | 401,462,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | pyde | # Cody
# August 30, 2021
# Flocking simulation
# Coding Challenge #124 —— Daniel Shiffman
# Boid rules:
# Separation: Steering to avoid crowding other boids
# Alignment: Steer in the average direction of nearby boids
# Cohesion: Steer towards nearby boids
# Obsticle Avoidance: Get away from obsticles
#
# v0.01 - Create the Boid class
# v0.02 - Alignment
# v0.03 - Cohesion
# v0.04 - Separation
# v0.05 - Seek
# v0.06 - Seek velocity
# v0.0 - Hack bot
# v0.1 - 3D
# v0.1 - Adjustible obstacles
# v0.1 - Target
# v0.1 - Auto-obstacle
# v0.1 - Auto-target
# v0. - Obstacle Avoidance <- where does this go? I need to see when I create
# my Obsticle Path repository.
from Boid import *
from Quadtree import *
from Rectangle import *
from Point import *
def setup():
global boids, qt, points
colorMode(HSB, 360, 100, 100, 100)
size(640, 360)
boids = []
points = []
boundary = Rectangle(0, 0, width, height)
qt = Quadtree(boundary, 4)
frameRate(600)
for i in range(150):
x = random(width)
y = random(height)
b = Boid(x, y)
p = Point(x, y, b)
boids.append(Boid(random(width), random(height)))
qt.insert(p)
def draw():
global boids, qt, points
qt = Quadtree(Rectangle(0, 0, width, height), 4)
background(210, 80, 32)
fill(0, 0, 100)
# If we do just alignment, if the force is too strong, since the boids
# depend on all of the other boids, some of the depended ones updated and
# others not updated, resulting in the boids just going in circles.
for i in range(len(boids)):
b = boids[i]
p = Point(b.pos.x, b.pos.y, b)
qt.insert(p)
qt.show()
mouse = PVector(mouseX, mouseY)
fill(90, 100, 100, 50)
for boid in boids:
# now that we have a quadtree, we can find all of the points in the
# quadtree
perception_radius = 15
qt_query = qt.query(Rectangle(boid.pos.x - perception_radius,
boid.pos.y - perception_radius,
perception_radius*2,
perception_radius*2))
queried_boids = []
for p in qt_query:
queried_boids.append(p.data)
boid.flock(queried_boids)
# boid.acc.add(boid.seek(mouse))
boid.update()
boid.edges()
# boid.acc.add(PVector.random2D().mult(random(0.1, 0.3)))
for boid in boids:
fill(0, 0, 100)
boid.show()
s = "FPS: {}".format(frameRate)
fill(0, 0, 100, 30)
stroke(0, 0, 100)
rect(40, 55, textWidth(s)+20, -32, 5)
textSize(24)
fill(0, 0, 100)
text(s, 50, 50)
| [
"cody.tian@icloud.com"
] | cody.tian@icloud.com |
4c3cf47ce7068940c3eacbc788ec7144396160d0 | 2fada6a77030fb32b1ead048a4c59f7a47c4b839 | /1jgg_mp/30_score.py | 2c20560915f56a35d7af9ccbf36bb2f285dab135 | [] | no_license | pgbarletta/facu | 77ba96f344ea651fb0576e46a1865128f03a3fec | 53a5061bbb313ea498a7e54eb1d05ee55ed5dcbd | refs/heads/master | 2021-01-21T20:59:33.355424 | 2017-07-13T13:52:21 | 2017-07-13T13:52:21 | 92,293,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from modeller import *
from modeller.scripts import complete_pdb
log.verbose() # request verbose output
env = environ()
env.libs.topology.read(file='$(LIB)/top_heav.lib') # read topology
env.libs.parameters.read(file='$(LIB)/par.lib') # read parameters
# read model file
mdl = complete_pdb(env, 'mp.B99990030.pdb')
# Assess with DOPE:
s = selection(mdl) # all atom selection
s.assess_dope(output='ENERGY_PROFILE NO_REPORT', file='profile_30',
normalize_profile=True, smoothing_window=15)
| [
"pbarletta@gmail.com"
] | pbarletta@gmail.com |
60f8367ccb07dc2bd4bfcc6e255bcfa7bb1e4b19 | 8af70681695c8727724f3f47ca9f1e1d9a2d3d2a | /Travail/Python/great_heuristique_v5_feh.py | 401ad7d1b7a6246fd5c83619d654d3a1750a2959 | [] | no_license | LixonClem/Stage | f6cf32a9f6c4b050049ede2be6e80a6a5f2f451b | 71f8e6810d1322ac3a3d00970fd5214aaaf35f88 | refs/heads/master | 2020-03-18T19:11:53.137075 | 2018-07-26T14:16:50 | 2018-07-26T14:16:50 | 135,140,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,146 | py |
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as py
import random as rd
import math as m
from lxml import etree
import os.path
global Capacity
global instance_test
global lam
global Error
global KNN
global relocation
global mu
global nu
global execute
Capacity = 100
KNN = 30
relocation = 3
Error = (0, (0, 0), ([[0], 0], [[0], 0]))
#######################
# Gestion de fichiers #
#######################
def read(file): # give the path of the file
x = []
y = []
demand = [0]
tree = etree.parse("" + file)
for abs in tree.xpath("/instance/network/nodes/node/cx"):
x.append(int(float(abs.text)))
for ord in tree.xpath("/instance/network/nodes/node/cy"):
y.append(int(float(ord.text)))
inst = [(x[i], y[i]) for i in range(len(x))]
for dem in tree.xpath("/instance/requests/request/quantity"):
demand.append(int(float(dem.text)))
return inst, demand
def writef(namefile, text):
if not os.path.isfile(namefile):
f = open(namefile,'w')
f.write(text + '\n')
f.close()
else:
f = open(namefile,'a')
f.write(text + '\n')
f.close()
#######################
# fonctions d'affichage #
#######################
def print_instance(inst):
dep = inst[0]
cust = inst[1:]
py.plot(dep[0], dep[1], color='blue', marker='o')
for i in cust:
py.plot(i[0], i[1], color='red', marker='o')
def print_route(route, inst, c):
x = []
y = []
for i in range(len(route)):
x.append(inst[route[i]][0])
y.append(inst[route[i]][1])
x.append(inst[route[0]][0])
y.append(inst[route[0]][1])
py.plot(x, y) # , label="route " + str(c))
def print_routes(routes, inst):
c = 1
for i in routes:
print_route(i, inst, c)
c += 1
def print_edges(edges, inst,col):
for e in edges:
x = [inst[e[0]][0], inst[e[1]][0]]
y = [inst[e[0]][1], inst[e[1]][1]]
py.plot(x, y, color=col)
def print_current_sol(routes, inst):
print_instance(inst)
print_routes(routes, inst)
####################
# fonctions communes #
####################
# compute the demand of the route
def route_demand(route, demand):
d = 0
for i in route:
d += demand[i]
return d
def verification(sol,demand):
for r in sol:
if route_demand(r,demand)>Capacity:
return False
return True
# Compute the cost of a solution
def distance(p1, p2):
return m.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
def cost_sol(routes, inst):
c = 0
for r in routes:
for i in range(len(r)-1):
a = inst[r[i]]
b = inst[r[i+1]]
c += distance(a, b)
c += distance(inst[r[len(r)-1]], inst[r[0]])
return c
# Compute the kNN for each node
def voisins(k, inst):
v = []
for i in range(len(inst)):
vi = []
couples = []
for j in range(len(inst)):
if i != j:
vi.append([distance(inst[i], inst[j]), j])
vi.sort()
for l in vi:
couples.append(l[1])
v.append(couples[:k])
return v
def find_route(i, routes): # Trouve la route à laquelle appartient l'usager i
for k in range(len(routes)):
if i in routes[k]:
return routes[k]
def copy_sol(sol):
new_sol = []
for i in sol:
r = list(np.copy(i))
new_sol += [r.copy()]
return new_sol
def fixed_alea(edges):
fe = []
n = len(edges)
for i in range(n//10):
alea = rd.randint(0,n-i-1)
choice = edges[alea]
edges.remove(choice)
fe.append(choice)
return fe
def fixed_0(edges):
fe = []
n = len(edges)
for i in range(n//2):
if 0 in edges[i]:
fe.append(edges[i])
edges.remove(edges[i])
return fe
def adjacents(pi,fe):
a = []
for e in fe:
if e[0]==pi and e[1] not in a:
a.append(e[1])
elif e[1]==pi and e[0] not in a:
a.append(e[0])
return a
#####################
# Implemenation of CW #
#####################
# Code for a basic CW heuristic, give an initial solution for the pb.
def init_routes(inst, demand):
routes = []
for j in range(1, len(inst)):
routej = [0, j, 0]
routes.append(routej)
return routes
def mean_demand(demand):
n = len(demand)
d = 0
for i in demand:
d += i
return d/(n-1)
def compute_savings(inst, demand, lam, mu, nu):
savings = [[0 for j in range(len(inst)-1)] for i in range(len(inst)-1)]
d_bar = mean_demand(demand)
for i in range(len(inst)-1):
for j in range(i+1,len(inst)-1):
if (i == j):
savings[i][j] = 0
else:
savings[i][j] = distance(inst[i+1], inst[0]) + distance(inst[j+1], inst[0])- lam*distance(inst[i+1], inst[j+1])+ mu*abs(distance(inst[i+1], inst[0]) -distance(inst[j+1], inst[0]))+ (nu*(demand[i+1] + demand[j+1])/d_bar)
return savings
def max_savings(n, savings):
cand = (-1, 0, 0)
for i in range(n):
for j in range(i+1, n):
if cand[0] < 0 or savings[i][j] > cand[2]:
cand = (i+1, j+1, savings[i][j])
return cand
def can_merge(i, r1, j, r2, demand):
if r1 == r2:
return -1
elif (r1[1] == i and r2[len(r2)-2] == j and route_demand(r1, demand)+route_demand(r2, demand) <= Capacity):
return 1
elif (r1[len(r1)-2] == i and r2[1] == j and route_demand(r1, demand)+route_demand(r2, demand) <= Capacity):
return 2
else:
return -1
def merge_routes(cand, routes, savings, inst, demand):
i, j = cand[0], cand[1]
r1, r2 = find_route(i, routes), find_route(j, routes)
mrge = can_merge(i, r1, j, r2, demand)
new_road = []
if mrge > 0:
routes.remove(r1)
routes.remove(r2)
if mrge == 1:
r1.pop()
r2.remove(0)
new_road = r1 + r2
else:
r2.pop()
r1.remove(0)
new_road = r2 + r1
routes.append(new_road)
savings[i-1][j-1] = 0
savings[j-1][i-1] = 0
def ClarkeWright(inst, demand, lam, mu, nu):
routes = init_routes(inst, demand)
savings = compute_savings(inst, demand, lam, mu, nu)
(i, j, s) = max_savings(len(inst)-1, savings)
while s > 0:
merge_routes((i, j, s), routes, savings, inst, demand)
(i, j, s) = max_savings(len(inst)-1, savings)
for i in range(len(routes)):
routes[i].pop()
return routes
##################
# Cross - Exchange #
##################
# Code for the cross-exchange operator. Apply the operator for a certain edge.
# Return the nearest route of the edge given
def another_routeCE(edge, voisins, routes, demand,fe):
(a, b) = edge
r1 = find_route(a, routes)
adja = adjacents(a,fe)
for i in voisins[a]:
r2 = find_route(i, routes)
adjpi = adjacents(r2[r2.index(i)-1],fe)
# we verify that the future demand on the route won't exceed his capacity
if r2 != r1 and i != 0 and len(adjpi)==0 and len(adja) == 0 and route_demand(r1, demand)-demand[b]+demand[i] <= Capacity and route_demand(r2, demand)-demand[i]+demand[b] <= Capacity:
return ((r1, r2), i)
# error case, we haven't found a second route, so no modifications
return ((r1, r1), -1)
# Apply the cross-exchange operator
def cross_exchange(edge, voisins, routes, inst, demand,fe):
copy_routes = copy_sol(routes)
(a, b) = edge
feasible = []
# compute the two routes considered, and the NN of the point we remove (a). v is a point
(r1, r2), v = another_routeCE(edge, voisins, copy_routes, demand,fe)
if v < 0:
return routes
copy_routes.remove(r1)
copy_routes.remove(r2)
routesBis = copy_sol(copy_routes)
# copy of the current solution
current_cand = copy_sol([r1.copy(), r2.copy()])
i_v = current_cand[1].index(v)
i_a = current_cand[0].index(a)
if i_v != 1:
current_cand[0][i_a], current_cand[1][i_v -
1] = current_cand[1][i_v-1], a
else:
current_cand[0][i_a], current_cand[1][i_v] = current_cand[1][i_v], a
current_current_cand = copy_sol(current_cand)
for j in range(len(r2)-1):
if (i_v != 1 and j != i_v-2) or (j!=0):
for i in range(len(r1)-1):
if i != i_a-1:
p1 = current_current_cand[0][i+1]
p2 = current_current_cand[1][j+1]
current_current_cand[0][i+1], current_current_cand[1][j + 1] = p2, p1
routesBis = routesBis + current_current_cand
adj1 = adjacents(p1,fe)
adj2 = adjacents(p2,fe)
if verification(routesBis,demand) and len(adj1)==0 and len(adj2)==0:
feasible.append((i+1,j+1))
current_current_cand = copy_sol(current_cand)
routesBis = copy_sol(copy_routes)
if len(feasible)==0:
return routes
pivot = feasible[rd.randint(0,len(feasible)-1)]
p1 = current_cand[0][pivot[0]]
p2 = current_cand[1][pivot[1]]
current_cand[0][pivot[0]], current_cand[1][pivot[1]] = p2, p1
routes = copy_routes + current_cand
return routes
##################
# Ejection - Chain #
##################
def reject(route, routes, voisins, inst, demand):
point = route[1]
for i in voisins[point]:
r = find_route(i, routes)
if r != route and route_demand(r, demand)+demand[point] <= Capacity:
routes.remove(route)
r.insert(r.index(i)+1, point)
return routes
return routes
# Compute the saving of the new edge
def saving(i, ri, j, rj, inst):
ri.append(0)
rj.append(0)
s = distance(inst[ri[i]], inst[ri[i+1]])
s += distance(inst[ri[i]], inst[ri[i-1]])
s -= distance(inst[ri[i+1]], inst[ri[i-1]])
s += distance(inst[rj[j]], inst[rj[j+1]])
s -= distance(inst[ri[i]], inst[rj[j]])
s -= distance(inst[ri[i]], inst[rj[j+1]])
ri.pop()
rj.pop()
return s
# Code for ejection-chain operator. Apply the operator for a certain edge.
def another_routeEC(a, voisins, routes, demand, inst,fe):
r1 = find_route(a, routes)
adja = adjacents(a,fe)
for i in voisins[a]:
r2 = find_route(i, routes)
if r2 != r1 and i != 0 and len(adja)==0 and route_demand(r2, demand)+demand[a] <= Capacity:
return ((r1, r2), i)
return (r1, r1), -1
# evalue a possible next edge.
def best_point(edge, routes, inst):
(a, b) = edge
if a == 0:
return b
elif b == 0:
return a
else:
r = find_route(a, routes)
a0 = r[r.index(a)-1]
b1 = r[r.index(b)-1]
if distance(inst[a0], inst[a])+distance(inst[a], inst[b1]) > distance(inst[a0], inst[b])+distance(inst[b], inst[b1]):
return a
else:
return b
def eval_cand(point, voisins, routes, inst, demand,fe):
(r1, r2), v = another_routeEC(point, voisins, routes, demand, inst,fe)
if v < 0:
return Error
i_v, i = r2.index(v), r1.index(point)
return (saving(i, r1, i_v, r2, inst), (i, i_v), (r1, r2))
# return the best relocation for each point p in the route.
# Return the point to relocate and his neighbour considered.
def best_cand(route, np, voisins, routes, inst, demand,fe):
S = []
for p in route:
i = route.index(p)
if p != np:
cp = best_point((route[i-1], p), routes, inst)
S.append(eval_cand(cp, voisins, routes, inst, demand, fe))
S.sort()
return S[-1]
def ejection_chain(l, point, voisins, routes, inst, demand,fe):
S = 0 # global cost modification of the current solution
s, I, R = eval_cand(point, voisins, routes, inst, demand,fe)
if (s, I, R) == Error:
return routes
S += s
relocated_cust = R[0][I[0]]
# update the routes
R[1].insert(I[1]+1, relocated_cust)
R[0].remove(relocated_cust)
for k in range(l-1):
curr_route = R[1]
s, I, R = best_cand(curr_route, relocated_cust,
voisins, routes, inst, demand,fe)
if (s, I, R) == Error:
return routes
S += s
relocated_cust = R[0][I[0]]
R[1].insert(I[1]+1, relocated_cust)
R[0].remove(relocated_cust)
"""
if S < 0: # If the final result is worse than the initial then we don't apply changes
return initial_routes
"""
return routes
#########################
# Lin-Kernighan Heuristic #
#########################
# Code for LK, take only one route in argument
def decross_route(route, inst):
route.append(0)
d = (distance(inst[route[2]], inst[route[1]])+distance(inst[route[0]], inst[route[-2]]) -
distance(inst[route[0]], inst[route[2]]) - distance(inst[route[-2]], inst[route[1]]))
if d > 0:
cand = route.copy()
cand.remove(route[1])
cand.insert(-1, route[1])
cand.pop()
return cand
else:
route.pop()
return route
def DeuxOpt(route, inst):
l = len(route)-1
best_tuple = (0, 0)
best = 2e-10
for i in range(l-1):
pi = inst[route[i]]
spi = inst[route[i+1]]
for j in range(i+2, l-1):
pj = inst[route[j]]
spj = inst[route[j+1]]
d = (distance(pi, spi) + distance(pj, spj)) - \
distance(pi, pj)-distance(spi, spj)
if d > best:
best_tuple = (i, j)
best = d
if best_tuple[0] != best_tuple[1]:
cand = route.copy()
cand[best_tuple[0]+1], cand[best_tuple[1]
] = cand[best_tuple[1]], cand[best_tuple[0]+1]
return cand
else:
return route
# Itérations successives de 2-opt. Pas suffisant si grandes tournées,
# mais suffisant sur des petits morceaux de tournées (en considérant les plus
# proches voisins de la zone autour de l'arête à éliminer).
# i et j délimitent la partie de la tournée à optimiser
def LK(route, inst):
route.append(0)
next_cand = DeuxOpt(route, inst)
while route != next_cand:
route = next_cand.copy()
next_cand = DeuxOpt(route, inst)
route.pop()
return next_cand
#############
# Heuristique #
#############
def gravity_center(route, inst):
xg = 0
yg = 0
c = 0
for i in route:
pi = inst[i]
xg += pi[0]
yg += pi[1]
c += 1
return (xg/c, yg/c)
def width(i, j, G):
theta = m.acos(G[1]/distance(G, (0, 0)))
proj_i = (i[0]*m.sin(theta), i[1]*m.cos(theta))
proj_j = (j[0]*m.sin(theta), j[1]*m.cos(theta))
return abs(distance(i, proj_i)-distance(j, proj_j))
def cost(i, j, p):
return distance(i, j)*(1 + 0.2*p)
def depth(i, j):
return max(distance(i, (0, 0)), distance(j, (0, 0)))
def max_depth(inst):
d = 0
for i in inst:
di = distance(i, (0, 0))
if di > d:
d = di
return d
def penalization_function(lw, lc, ld, max_d):
return lambda i, j, G, p: ((lw * width(i, j, G) + lc * cost(i, j, p))*(depth(i, j)/max_d)**(ld/2))/(1 + p)
def bad_edge(b, p, routes, inst,fixed):
cand = [0, (0, 0)]
for r in routes:
G = gravity_center(r, inst)
for i in range(len(r)-1):
pi = r[i]
pj = r[i+1]
b_ij = b(inst[pi], inst[pj], G, p[pi][pj])
if b_ij > cand[0] and pi != 0 and pj != 0 and (pi,pj) not in fixed and (pj,pi) not in fixed:
cand[0] = b_ij
cand[1] = (pi, pj)
return cand
def apply_heuristic(inst, demand, lam, mu, nu, l,max_d,v):
# Initial solution
record = [[0, 7, 25, 35, 16], [0, 27, 32, 15, 30, 13], [0, 24, 29, 36, 6, 14], [0, 4, 10, 11, 12, 22, 23, 28, 2, 33], [0, 20, 8, 5, 3, 1, 34, 17], [0, 18, 31, 19, 9, 21, 26]]
initial_solution = ClarkeWright(inst, demand, lam, mu, nu)
for i in range(len(initial_solution)):
initial_solution[i] = decross_route(initial_solution[i].copy(), inst)
initial_solution[i] = LK(initial_solution[i].copy(), inst)
routes2 = copy_sol(initial_solution)
routes = copy_sol(initial_solution)
# compute global variables
B = [penalization_function(1, 0, 0, max_d), penalization_function(1, 1, 0, max_d), penalization_function(
1, 0, 1, max_d), penalization_function(1, 1, 1, max_d), penalization_function(0, 1, 0, max_d), penalization_function(0, 1, 1, max_d)]
b_i = 0
b = B[b_i]
p = [[0 for j in range(len(inst))] for i in range(len(inst))]
N = 0 # laps without improvement
gs = 0 # laps for last improvement
c_init = cost_sol(routes, inst)
time = 0
e,ei,ef = common_edges(initial_solution,record)
fixed_edges = fixed_alea(all_edges(initial_solution))
# find the worst edge
while time < 1500:
worst = bad_edge(b, p, routes, inst,fixed_edges)[1]
p[worst[0]][worst[1]] += 1
p[worst[1]][worst[0]] += 1
# apply ejection-chain
cp = best_point(worst, routes, inst)
routes = ejection_chain(l, cp, v, routes, inst, demand,fixed_edges)
for i in range(len(routes)):
routes[i] = LK(routes[i], inst)
# apply cross-exchange
routes = cross_exchange(worst, v, routes, inst, demand,fixed_edges)
# apply LK
for i in range(len(routes)):
routes[i] = LK(routes[i], inst)
c_final = cost_sol(routes, inst)
if c_final < c_init:
routes2 = copy_sol(routes) # new optimum
#fixed_edges = fixed_alea(all_edges(routes))
gs = 0
N = 0
c_init = cost_sol(routes2, inst)
time = 0
if gs > 10:
# return to the last global solution, for gs iterations
routes = copy_sol(routes2)
#fixed_edges = fixed_alea(all_edges(routes2))
gs = 0
if N > 100:
b_i += 1
if b_i < len(B):
b = B[b_i]
p = [[0 for j in range(len(inst))]
for i in range(len(inst))]
N = 0
else:
b_i = 0
b = B[b_i]
p = [[0 for j in range(len(inst))]
for i in range(len(inst))]
N = 0
for i in (routes2):
if len(i) == 2:
routes2 = reject(i, routes2, v, inst, demand)
for i in range(len(routes2)):
routes2[i] = decross_route(routes2[i].copy(), inst)
routes2[i] = LK(routes2[i], inst)
routes = copy_sol(routes2)
gs += 1
N += 1
time += 1
for i in (routes2):
if len(i) == 2:
routes2 = reject(i, routes2, v, inst, demand)
for i in range(len(routes2)):
routes2[i] = decross_route(routes2[i].copy(), inst)
routes2[i] = LK(routes2[i], inst)
if not verification(routes2,demand):
routes2 = initial_solution
return initial_solution, routes2
###########
# Solutions #
###########
def are_equal(edge1, edge2):
return (edge1 == edge2) or (edge1[1] == edge2[0] and edge1[0] == edge2[1])
def all_edges(sol):
E = []
for r in sol:
for i in range(len(r)-1):
pi = r[i]
pj = r[i+1]
E.append((pi, pj))
E.append((r[len(r)-1], r[0]))
return E
def common_edges(sol1, sol2):
E1 = all_edges(sol1)
E2 = all_edges(sol2)
E = []
E_init = []
E_final = []
for i in E1:
for j in E2:
if are_equal(i, j) and (i[0], i[1]) not in E and (i[1], i[0]) not in E:
E.append(i)
for i in E1:
if i not in E and (i[1],i[0]) not in E:
E_init.append(i)
for j in E2:
if j not in E and (j[1],j[0]) not in E:
E_final.append(j)
return E,E_init,E_final
def rank_costs(E, inst):
r = []
rc = []
for e in E:
c = distance(inst[e[0]], inst[e[1]])
r.append((c, e))
r.sort()
for i in r:
rc.append(i[1])
return rc
def rank_depth(E, inst):
r = []
rd = []
dmax = max_depth(inst)
for e in E:
d = depth(inst[e[0]], inst[e[1]])/dmax
r.append((d, e))
r.sort()
for i in r:
rd.append(i[1])
return rd
def rank_width(E, sol, inst):
r = []
rw = []
for e in E:
route = find_route(e[0], sol)
G = gravity_center(route, inst)
w = width(inst[e[0]], inst[e[1]], G)
r.append((w, e))
r.sort()
for i in r:
rw.append(i[1])
return rw
def rank_edges(sol, inst):
E = all_edges(sol)
n = len(E)
rc = rank_costs(E, inst)
rd = rank_depth(E, inst)
rw = rank_width(E, sol, inst)
return n, rc, rd, rw
def give_rank(e, rank):
for i in range(len(rank)):
if are_equal(e, rank[i]):
return (i+1)
def all_ranks(cE, sol, inst):
n, rc, rd, rw = rank_edges(sol, inst)
r = []
r_mean = []
for e in cE:
g1 = give_rank(e, rc)
g2 = give_rank(e, rd)
g3 = give_rank(e, rw)
r_mean.append((g1+g2+g3)/3)
g = [g1, g2, g3]
g.sort()
r.append(g)
r.sort()
return n, r, r_mean
def analyse(n, ranks):
a = [0 for i in range(16)]
for r in ranks:
if r[0] < n/3 or r[1] < n/3 or r[2] < n/3:
a[0] += 1
if r[0] < n/3 and r[1] < n/3 and r[2] < n/3:
a[1] += 1
if r[0] < 15 or r[1] < 15 or r[2] < 15:
a[2] += 1
if r[0] < 15 and r[1] < 15 and r[2] < 15:
a[3] += 1
if r[0] < 10 or r[1] < 10 or r[2] < 10:
a[4] += 1
if r[0] < 10 and r[1] < 10 and r[2] < 10:
a[5] += 1
if r[0] < 5 or r[1] < 5 or r[2] < 5:
a[6] += 1
if r[0] < 5 and r[1] < 5 and r[2] < 5:
a[7] += 1
if r[0] > n-n/3 or r[1] > n-n/3 or r[2] > n-n/3:
a[8] += 1
if r[0] > n-n/3 and r[1] > n-n/3 and r[2] > n-n/3:
a[9] += 1
if r[0] > n-15 or r[1] > n-15 or r[2] > n-15:
a[10] += 1
if r[0] > n-15 and r[1] > n-15 and r[2] > n-15:
a[11] += 1
if r[0] > n-10 or r[1] > n-10 or r[2] > n-10:
a[12] += 1
if r[0] > n-10 and r[1] > n-10 and r[2] > n-10:
a[13] += 1
if r[0] > n-5 or r[1] > n-5 or r[2] > n-5:
a[14] += 1
if r[0] > n-5 and r[1] > n-5 and r[2] > n-5:
a[15] += 1
return a
# Tests #
##########
A_n32_k05 = read("Instances/A-n32-k05.xml")
# sol_A3205 = [[[0, 30, 16, 1, 12], 100], [[0, 14, 24], 82], [[0, 20, 5, 25, 10, 29, 15, 22, 9, 18, 8, 28, 4, 11], 82], [[0, 7, 13, 26], 47], [[0, 27, 6, 23, 3, 2, 17, 19, 31, 21], 99]]
init_A3205 = [[0, 18, 22, 9, 11, 4, 28, 8], [0, 29, 15, 10, 25, 5, 20], [
0, 21, 31, 19, 17, 13, 7, 26], [0, 27, 23, 2, 3, 6, 14, 24], [0, 12, 1, 16, 30]]
sol_A3205 = [[0, 21, 31, 19, 17, 13, 7, 26], [0, 28, 11, 4, 23, 2, 3, 6], [
0, 20, 5, 25, 10, 29, 15, 22, 9, 8, 18], [0, 27, 24, 14], [0, 12, 1, 16, 30]]
##########
A_n33_k05 = read("Instances/A-n33-k05.xml")
# sol_A3305 = [[[0, 22, 15, 16, 3, 9, 17], 94], [[0, 23, 11, 6, 24, 2], 82], [[0, 28, 18, 19, 14, 21, 1, 31, 29], 98], [[0, 20, 32, 13, 8, 7, 26, 4], 78], [[0, 10, 30, 25, 27, 5, 12], 94]]
init_A3305 = [[0, 10, 30, 25, 27, 5, 12], [0, 4, 26, 7, 8, 13, 32, 20], [
0, 29, 3, 9, 17, 16, 15], [0, 28, 18, 31, 1, 21, 14, 19, 11], [0, 2, 24, 6, 23, 22]]
sol_A3305 = [[0, 20, 32, 13, 8, 7, 26, 4, 22], [0, 10, 30, 25, 27, 5, 12], [
0, 11, 19, 14, 21, 1, 31, 18, 28], [0, 2, 24, 6, 23], [0, 15, 17, 9, 3, 16, 29]]
##########
A_n33_k06 = read("Instances/A-n33-k06.xml")
init_A3306 = [[0, 4, 8, 3, 9, 15, 20, 2, 5], [0, 11, 29, 6, 7, 19], [0, 13, 1, 18, 17], [
0, 21, 12, 10], [0, 31, 23, 24, 26, 22, 14], [0, 32, 25, 16, 30, 27, 28]]
sol_A3306 = [[0, 4, 8, 3, 9, 15, 20, 2, 5], [0, 17, 11, 29, 19, 7], [0, 21, 12, 10], [
0, 32, 25, 16, 30, 27, 28], [0, 31, 23, 24, 26, 22], [0, 13, 6, 18, 1, 14]]
##########
A_n34_k05 = read("Instances/A-n34-k05.xml")
init_A3405 = [[0, 8, 11, 23, 27, 1, 29], [0, 7, 15, 19, 17, 25, 28, 32, 31], [
0, 21, 3, 12, 9, 22, 16, 2, 33], [0, 4, 26, 30, 24, 5], [0, 14, 6, 13, 10, 20, 18]]
sol_A3405 = [[0, 5, 30, 24, 29, 6, 7], [0, 27, 1, 23, 11, 8, 15, 14], [
0, 19, 17, 25, 31, 28, 13, 10], [0, 26, 4, 33, 16, 2, 18], [0, 21, 32, 3, 12, 9, 22, 20]]
##########
A_n36_k05 = read("Instances/A-n36-k05.xml")
init_A3605 = [[0, 9, 23, 2, 35, 8, 34, 14], [0, 21, 18, 33, 29, 30, 17, 13, 32, 22, 1], [
0, 12, 31, 19, 4, 3, 6, 28, 15], [0, 26, 7, 10], [0, 20, 5, 25, 27, 24, 11, 16]]
sol_A3605 = [[0, 28, 14, 34, 23, 2, 35, 8, 15], [0, 1, 22, 32, 13, 17, 30, 29, 33, 18, 21], [
0, 12, 31, 19, 4, 3, 6, 9], [0, 10, 7, 26], [0, 20, 5, 25, 27, 24, 11, 16]]
##########
A_n37_k05 = read("Instances/A-n37-k05.xml")
init_A3705 = [[0, 30, 25, 35, 18, 26, 31, 28, 32, 29], [0, 17, 14, 23, 20, 19, 2, 12, 1], [
0, 22, 13, 10, 6, 5, 33, 4, 7], [0, 21, 16], [0, 3, 24, 9, 11, 27, 8, 34, 36, 15]]
sol_A3705 = [[0, 22, 13, 10, 6, 5, 33, 4, 7], [0, 21, 16], [0, 1, 12, 2, 19, 20, 23, 14, 17], [
0, 3, 24, 9, 11, 27, 8, 25, 35, 18, 26, 15], [0, 34, 36, 29, 32, 28, 31, 30]]
##########
A_n37_k06 = read("Instances/A-n37-k06.xml")
init_A3706 = [[0, 4], [0, 5, 3], [0, 6], [0, 7], [0, 8], [0, 9], [0, 10], [0, 11], [0, 12], [0, 13], [0, 14], [0, 16], [0, 18], [0, 20], [0, 21], [
0, 22], [0, 23], [0, 24], [0, 26], [0, 27], [0, 29], [0, 32], [0, 33], [0, 36], [0, 25, 35], [0, 19, 31], [0, 15, 30], [0, 2, 28], [0, 17, 34], [0, 1]]
# sol_A3706 = [[[0, 29, 36, 14], 65], [[0, 24, 16, 7], 47], [[0, 27, 32, 15, 30, 13], 89], [[0, 25, 35], 81], [[0, 26, 21, 9, 1, 3, 5, 8], 96], [[0, 10, 11, 12, 22, 23, 28, 2, 33, 20], 97], [[0, 18, 4, 17, 34, 19, 31, 6], 95]]
# sol_A3706 = [[0, 7, 25, 35, 16], [0, 13, 30, 15, 32, 27], [0, 10, 11, 12, 22, 23, 28, 2, 33], [0, 24, 29, 36, 6, 14], [0, 4, 26, 19, 31, 34, 17, 18], [0, 20, 8, 5, 3, 1, 9, 21]]
sol_A3706 = [[0, 7, 25, 35, 16], [0, 27, 32, 15, 30, 13], [0, 20, 33, 2, 28, 23, 22, 12, 11, 10], [
0, 14, 6, 36, 29, 24], [0, 31, 19, 9, 21, 26, 4], [0, 18, 17, 34, 1, 3, 5, 8]]
##########
A_n38_k05 = read("Instances/A-n38-k05.xml")
init_A3805 = [[0, 2], [0, 9], [0, 14], [0, 15], [0, 24], [0, 4, 16, 25], [0, 12, 1, 3, 26], [0, 7, 22, 27, 11, 5], [
0, 31, 37, 28], [0, 8, 23, 35, 33], [0, 18, 6, 34, 29, 19], [0, 10, 30, 21], [0, 17, 36, 13], [0, 20, 32]]
sol_A3805 = [[0, 18, 19, 34, 29, 30, 10], [0, 28, 31, 37, 11, 27, 22, 5], [0, 7, 20, 32,
15, 13, 36, 17, 2, 24], [0, 9, 8, 23, 35, 33, 14], [0, 6, 25, 16, 4, 1, 3, 12, 26, 21]]
##########
A_n39_k05 = read("Instances/A-n39-k05.xml")
init_A3905 = [[0, 2, 22, 3, 7, 16, 32, 10, 20], [0, 38, 15, 5, 29, 23, 1, 31, 12], [
0, 13, 28, 6, 26, 17, 11, 8, 9], [0, 24, 35, 37, 34, 27, 36, 30, 21], [0, 4, 18, 33, 25, 19, 14]]
sol_A3905 = [[0, 2, 22, 3, 7, 16, 32, 10], [0, 38, 15, 5, 29, 20, 23, 1, 31, 12], [
0, 14, 19, 25, 33, 18, 9, 4], [0, 6, 36, 27, 28, 13, 30, 21], [0, 17, 24, 35, 37, 34, 26, 11, 8]]
##########
A_n39_k06 = read("Instances/A-n39-k06.xml")
init_A3906 = [[0, 3], [0, 5], [0, 11], [0, 13], [0, 15], [0, 20], [0, 24], [0, 26], [0, 30], [0, 27, 16, 10], [
0, 2, 33, 19, 4, 7, 8], [0, 12, 38], [0, 9, 28, 29], [0, 32, 34, 22, 18], [0, 21, 23, 17, 36, 1, 6], [0, 37, 31, 35, 25, 14]]
sol_A3906 = [[0, 15, 30, 13], [0, 24, 3, 38, 12, 9, 28, 29], [0, 7, 8, 4, 16, 10, 27, 18], [
0, 5, 26, 11], [0, 37, 31, 14, 35, 25, 33, 19, 2], [0, 6, 1, 36, 17, 23, 21, 22, 34, 32, 20]]
##########
A_n65_k09 = read("Instances/A-n65-k09.xml")
lam = 1.0
mu = 0.2
nu = 0.6
execute = 20
t = "A-n37-k06"
instance, demand = A_n37_k06
initiale = init_A3706
solution = sol_A3706
max_d = max_depth(instance)
v = voisins(KNN, instance)
# print(route_demand([0, 22, 13, 10, 6, 5, 33, 4, 7],demand)) # 3705
# print(route_demand([0, 21, 31, 19, 17, 13, 7, 26],demand)) # 3205
# print(route_demand([0, 10, 30, 25, 27, 5, 12],demand)) # 3305
"""
record = [[0, 16, 35, 25, 7], [0, 4, 18, 14, 36, 29, 24], [0, 31, 33, 5, 3, 1, 8, 6], [0, 27, 32, 15, 21, 34, 17], [0, 13, 30, 10, 26, 20], [0, 11, 12, 22, 23, 28, 2, 9, 19]]
for r in record:
print(route_demand(r, demand))
print_current_sol(record,instance)
py.show()
"""
"""
init, reso = apply_heuristic(instance, demand, lam, mu,nu, relocation,max_d,v)
print(cost_sol(init,instance),cost_sol(reso,instance))
"""
costs = []
best = []
c_best = 100000
for i in range(execute):
init, reso = apply_heuristic(instance, demand, lam, mu,nu, relocation,max_d,v)
c_sol = cost_sol(reso,instance)
print(i,c_sol)
costs.append(round(c_sol,3))
if c_sol < c_best:
best = reso
c_best = c_sol
"""
namefile = "resultats/Heuristic_results/Values/"+t+"/results.txt"
"""
print(costs)
mean = 0
for c in costs:
mean += c
print(mean/len(costs))
print(min(costs))
print(best)
"""
writef(namefile,'\n')
writef(namefile,'#################')
writef(namefile,'lambda = '+ str(lam))
writef(namefile,'mu = ' + str(mu))
writef(namefile,'nu = ' + str(nu))
writef(namefile,'execute = ' + str(execute))
writef(namefile,'')
writef(namefile,str(costs))
writef(namefile,'')
writef(namefile,'mean = ' + str(round(mean/len(costs),3)))
writef(namefile,'min = ' + str(min(costs)))
writef(namefile,'')
writef(namefile,str(best))
"""
"""
def total_execution(min_lam,max_lam,min_mu,max_mu,min_nu,max_nu,execute):
for li in range(int(10*min_lam),int(10*max_lam)):
for mi in range(int(10*min_mu),int(10*max_mu)):
for ni in range(int(10*min_nu),int(10*max_nu)):
c_best = 100000
lam = 0.1*li
mu = 0.1 * mi
nu = 0.1*ni
print(lam,mu,nu)
costs = []
best = []
for i in range(execute):
init, reso = apply_heuristic(instance, demand, lam, mu,nu, relocation,max_d,v)
c_sol = cost_sol(reso,instance)
print(i,c_sol)
costs.append(round(c_sol,3))
if c_sol < c_best:
best = reso
c_best = c_sol
namefile = "resultats/Heuristic_results/Values/"+t+"/stochastic_results.txt"
mean = 0
for c in costs:
mean += c
print(mean/len(costs))
print(min(costs))
writef(namefile,'\n')
writef(namefile,'#################')
writef(namefile,'lambda = '+ str(lam))
writef(namefile,'mu = ' + str(mu))
writef(namefile,'nu = ' + str(nu))
writef(namefile,'execute = ' + str(execute))
writef(namefile,'')
writef(namefile,str(costs))
writef(namefile,'')
writef(namefile,'mean = ' + str(round(mean/len(costs),3)))
writef(namefile,'min = ' + str(min(costs)))
writef(namefile,'gap = ' + str(round((1-(949/min(costs)))*100,3)))
writef(namefile,'')
writef(namefile,str(best))
total_execution(0.0,0.1,1.3,1.4,1.7,1.8,30)
"""
"""
sol_para = []
for li in range(1,21):
for mj in range(21):
for nk in range(21):
lam = 0.1*li
mu = 0.1*mj
nu = 0.1*nk
print("")
print(lam,mu,nu)
init,reso = apply_heuristic(instance,demand,lam,mu,nu,relocation,max_d,v)
sol_para.append(((lam,mu,nu),(cost_sol(init,instance),cost_sol(reso,instance))))
print(cost_sol(init,instance),cost_sol(reso,instance))
print(sol_para)
"""
"""
print_current_sol(initiale,instance)
py.title("Solution initiale " + t)
py.savefig("resultats/Heuristic_results/litterature_instances/"+t+"/initiale_"+t+".png")
py.close()
print_current_sol(solution,instance)
py.title("Solution obtenue pour " + t)
py.savefig("resultats/Heuristic_results/litterature_instances/"+t+"/solution_"+t+".png")
py.close()
E = common_edges(initiale,solution)
print_instance(instance)
print_edges(E,instance)
py.title("Arêtes communes pour " + t)
py.savefig("resultats/Heuristic_results/litterature_instances/"+t+"/commonEdges_"+t+".png")
py.close()
"""
"""
Eref = all_edges(initiale)
E = common_edges(initiale,solution)
n,rei,r_mean = all_ranks(E,initiale,instance)
nref,reiref,r_meanref = all_ranks(Eref,initiale,instance)
print(n)
r_mean.sort()
r_meanref.sort()
# print(reiref)
# print(rei)
print(r_meanref)
print(reiref)
print(r_mean)
print(rei)
"""
"""
instanceA = np.array(instance)
tri = Delaunay(instanceA)
print_instance(instance)
py.triplot(instanceA[:,0], instanceA[:,1], tri.simplices)
py.show()
"""
| [
"clement.legrand-lixon@ens-rennes.fr"
] | clement.legrand-lixon@ens-rennes.fr |
49131845d1781dc4fcddbc8a2237f30f37bab6e9 | e476bfa232b733e17a0ee69c5bbdd66c2dab589b | /R.Briggs.Python_for_kids/correct_number.py | 3621093be5abc84af3ad2ff5ac04b6ed5dbb60b0 | [] | no_license | I-hel-l/python | 36deb9ed6e00743b991d43153b5b47c2559766e7 | fcc69c364554d131bb85e80b4bbdae1812f63596 | refs/heads/master | 2022-12-23T06:51:28.353136 | 2020-10-05T06:39:44 | 2020-10-05T06:39:44 | 258,753,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py |
money = 8000
if (100 <= money <= 500) or (1000 <= money <= 5000):
print('The condition is met!')
else:
print('Condition not met!')
| [
"id13061992@gmail.com"
] | id13061992@gmail.com |
3cdf8011b618b07498f42f587746389db19ab840 | e7964338707afba0228866a33f954a974fcc693b | /code/linreg/boston3d_loss.py | 93704fc901be370ade12eb00fcf6b4701c31b2e4 | [
"MIT"
] | permissive | anawatbk/msds621 | f96346ddc4fd47d7b9c3a40e2632da7a39aaf2e0 | 869a309e235359119f30477c7a57763e222197e5 | refs/heads/master | 2023-03-25T10:20:02.072200 | 2021-03-10T09:39:33 | 2021-03-10T09:39:33 | 333,196,889 | 0 | 0 | MIT | 2021-03-10T09:39:34 | 2021-01-26T19:41:04 | Jupyter Notebook | UTF-8 | Python | false | false | 2,523 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # required even though not ref'd!
from matplotlib import rcParams
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
import glob
import os
from PIL import Image as PIL_Image
# STOPPED WORK IN PROGRESS
def loss(B, X, y):
"Line coefficients: B = [y-intercept, slope]"
return np.mean(y - np.dot(X, np.array(B))) ** 2
def get_surface(X, y, loss, b0_range, b1_range):
n = len(X)
B0 = np.ones(shape=(n, 1))
X = np.hstack([np.ones(shape=(n, 1)), X]) # add ones column
(b0_mesh, b1_mesh) = np.meshgrid(b0_range, b1_range, indexing='ij')
L = np.zeros(b0_mesh.shape)
for i in range(len(b0_range)):
for j in range(len(b1_range)):
L[i][j] = loss([b0_range[i], b1_range[j]], X=X, y=y)
return L
def plot3d(L, b0_range, b1_range, ax, elev=50, azim=145):
rcParams["font.size"] = 10
ax.view_init(elev, azim)
b0_range_mesh, b1_range_mesh = np.meshgrid(b0_range, b1_range, indexing='ij')
surface = ax.plot_surface(b0_range_mesh, b1_range_mesh, L, alpha=0.7, cmap='coolwarm')
# plt.title("""$loss(\\beta) = \sum_{i=1}^{N}(y^{{(i)}} - (\\beta_0 + \\beta_1 x^{{(i)}}))^2$""", fontsize=12)
ax.set_xlabel('$\\beta_0$', fontsize=14)
ax.set_ylabel('$\\beta_1$', fontsize=14)
ax.zaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:.0f}'))
boston = load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
print(df.head(3))
X = df.drop('MEDV', axis=1)
y = df['MEDV']
lm = LinearRegression()
lm.fit(X, y)
true_b0 = lm.intercept_
coeff = lm.coef_
print(f"True beta = {true_b0:.2f}, {coeff}")
b0_range = np.arange(-3030, -2900, .1) # y intercept
b1_range = np.arange(105, 120, .05) # slope
L = get_surface(X['LSTAT'], y, loss, b0_range=b0_range, b1_range=b1_range)
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
plot3d(L, b0_range=b0_range, b1_range=b1_range, ax=ax, elev=25, azim=110)
#Theax.plot([true_b0], [true_b1], marker='x', markersize=10, color='black')
plt.show()
| [
"parrt@cs.usfca.edu"
] | parrt@cs.usfca.edu |
22378801d4b6576b4b1e94a7ff806f4eda02baae | e4bc18c121d9a4883b70f4c4b18d33b97fa49548 | /OES/oes/index/views.py | a6f2d041c4722b90cfd86accf1f8c7864c1e4153 | [] | no_license | sunjinsong/online-education-platform | ba10c50d29cd24f3ba1e2760368d37ae662f40a2 | ce653ccef4cb5c8386efabc427eee4256cd79d5f | refs/heads/master | 2022-12-22T12:34:16.098721 | 2020-09-24T12:20:41 | 2020-09-24T12:20:41 | 294,900,073 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from django.shortcuts import render
from .models import SlideShowTop,SlideShowBottom
# Create your views here.
from courses.models import Course
from organizations.models import Organization
def index(request):
slides=SlideShowTop.objects.all()
slides_bottom = SlideShowBottom.objects.all()
course_list = Course.objects.all()[:6]
org_list = Organization.objects.all()[:15]
context={
'slides':slides,
'slides_bottom':slides_bottom,
'course_list':course_list,
'org_list':org_list,
'flag':'index',
}
return render(request,'index.html',context=context) | [
"673074094@qq.com"
] | 673074094@qq.com |
fa65816e2cfb758c378945e5151479007fa07174 | 02b5012356592f2f2b1e75a884aa715049cb71c0 | /env/lib/python3.7/stat.py | f0b28a1ccbbab165353351e42a45beafd2cb41e7 | [] | no_license | wsiqing/KSquare | d7b5ecdbd4e5d846e4d76225477452836c58f3bd | f7e959010493d0dceb30b4dc4f9d6839f0b436c0 | refs/heads/master | 2020-04-07T19:31:49.762406 | 2018-11-22T03:48:28 | 2018-11-22T03:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /Users/yukimoto/anaconda3/lib/python3.7/stat.py | [
"shtomcom@gmail.com"
] | shtomcom@gmail.com |
d3d140c8f717f45b6a828980ac963c2f3e3a1098 | bd5611b95d9bfa80a3fba331fbaa7fcee561b633 | /main.py | 54de849d04e06ecdfa08f51152fd286a3fc91592 | [] | no_license | brazer/SteemNotificationServer | d6dc8048c5cceb96d0a9a67da39c1e80570af820 | ef184a777c2e634be846c4a18ea3a758206fafc1 | refs/heads/master | 2020-03-29T10:27:22.426572 | 2018-09-21T19:43:51 | 2018-09-21T19:43:51 | 149,806,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | import threading
from steem.blockchain import Blockchain
from model import Vote, Comment, Follow
def parse_next_block(block):
transactions = block['transactions']
for transaction in transactions:
operations = transaction['operations']
for operation in operations:
name = operation[0]
if name == "vote":
process_vote(operation)
elif name == "custom_json":
process_custom_json(operation)
elif name == "comment":
process_comment(operation)
#else:
# print(operation)
# ['vote', {'voter': 'onelovecuration', 'author': 'enjoyinglife', 'permlink': 'ixtf4g8l', 'weight': 2000}]
def process_vote(operation):
vote = Vote()
vote.author = operation[1]["author"]
# ['custom_json', {'required_auths': [], 'required_posting_auths': ['frebie'], 'id': 'follow',
# 'json': '["follow",{"follower":"frebie","following":"jabbir","what":["blog"]}]'}]
def process_custom_json(operation):
id = operation[1]["id"]
if id == "follow":
follow = Follow() # todo
else:
print(operation)
# ['comment', {'parent_author': 'changelly', 'parent_permlink': 'getting-listed-on-changelly-why-and-how',
# 'author': 'coinratecap', 'permlink': 're-changelly-getting-listed-on-changelly-why-and-how-20180913t203330725z',
# 'title': '', 'body': 'Nice one Changelly', 'json_metadata': '{"tags":["cryptocurrency"],"app":"steemit/0.1"}'}]
def process_comment(operation):
comment = Comment()
comment.author = operation[1]['author']
current_id = "" # todo: save in file
def run():
blockchain = Blockchain()
id = blockchain.get_current_block_num()
global current_id
if id != current_id :
block = blockchain.get_current_block()
thread = threading.Thread(target=parse_next_block, args=(block,))
print("Start new thread", thread.name)
thread.start()
current_id = id
threading.Timer(1, run).start()
if __name__ == '__main__':
run()
| [
"salanevich.anatol@gmail.com"
] | salanevich.anatol@gmail.com |
5c97af41a1774f86f8de44b3008cd498ea9860f9 | e3fe268f5f0c9dc7bbc4df24796d4d347607c384 | /migrations/0003_fix_tweet_id.py | 5ae17aac3997275d3ed6f2a0f8f4d035e45f69e8 | [] | no_license | eyeseast/beijing_air | 70b7f065eba1ee7152369047f0380b51dcad0299 | c2fbaaf30beabeac1b9aae069cd415f4127d3d19 | refs/heads/master | 2021-01-19T15:03:13.965034 | 2010-02-13T15:50:55 | 2010-02-13T15:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py |
from south.db import db
from django.db import models
from beijing_air.models import *
class Migration:
def forwards(self, orm):
# Adding field 'SmogUpdate.id'
db.add_column('beijing_air_smogupdate', 'id', orm['beijing_air.smogupdate:id'])
# Changing field 'SmogUpdate.tweet_id'
# (to signature: django.db.models.fields.CharField(max_length=20))
db.alter_column('beijing_air_smogupdate', 'tweet_id', orm['beijing_air.smogupdate:tweet_id'])
def backwards(self, orm):
# Deleting field 'SmogUpdate.id'
db.delete_column('beijing_air_smogupdate', 'id')
# Changing field 'SmogUpdate.tweet_id'
# (to signature: django.db.models.fields.IntegerField(primary_key=True))
db.alter_column('beijing_air_smogupdate', 'tweet_id', orm['beijing_air.smogupdate:tweet_id'])
models = {
'beijing_air.aqidefinition': {
'color': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_aqi': ('django.db.models.fields.IntegerField', [], {}),
'min_aqi': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'beijing_air.smogupdate': {
'aqi': ('django.db.models.fields.IntegerField', [], {}),
'concentration': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'definition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updates'", 'to': "orm['beijing_air.AqiDefinition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'tweet_timestamp': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['beijing_air']
| [
"chrisamico@web48.webfaction.com"
] | chrisamico@web48.webfaction.com |
dfa5c23cb31d92a13364ba4353601996d1aece31 | a1b192cb09a50219b0b16f9910318ba20d71e2e4 | /seller/migrations/0001_initial.py | 53fdca4ec26b2ae1e85ca9d79e8215afdc84c2e9 | [] | no_license | sakshipatne29/ShoppingApp | 20d2397625e09ac21f7e0e336378d4acd71d0ba8 | abb52adb0ac7e77769f9c7b04e0ebb68eb23bb07 | refs/heads/master | 2022-12-04T19:07:37.824836 | 2020-08-15T06:44:17 | 2020-08-15T06:44:17 | 287,693,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | # Generated by Django 2.1.7 on 2020-01-07 09:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ShoppingApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('catname', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('des', models.CharField(max_length=100)),
('qty', models.IntegerField()),
('pro_image', models.ImageField(null=True, upload_to='productimage')),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='ShoppingApp.UserProfile')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='seller.Category')),
],
),
]
| [
"sakshipatne206@gmail.com"
] | sakshipatne206@gmail.com |
4e210b9c9ccca1c22a9822913c5a925758b57bd1 | d0ad533a311ed8603c9a271f7bee7f1f36b48323 | /loan4u/settings.py | 63440d30f75ce61b4da1ec689c738c0dcefa6c9b | [] | no_license | christie-joseph/loan2 | 1b1a9a09189a27329918da82388d845284f34426 | cd337d7fe887b1c40881af689b2a4161a25f0718 | refs/heads/master | 2022-01-14T08:56:55.604587 | 2018-11-23T17:22:26 | 2018-11-23T17:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,695 | py | """
Django settings for loan4u project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR=os.path.join(BASE_DIR,"templates")
STATIC_DIR=os.path.join(BASE_DIR,"static")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!r7h^tb9od-xn2gp!^)&k0ra8vawx9dwppar2iq3gcr5i8lqqk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'loan_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'loan4u.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'loan4u.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[STATIC_DIR,]
| [
"noreply@github.com"
] | christie-joseph.noreply@github.com |
41c9d295e2d724898ffac2ed8acb1e9bb08fba51 | a4041998897913c09f13c8ff6bd9452acd2cc4e0 | /test_project/someapp/views.py | a45bab2a6f316ba3ea7cecbd863cfdc6ec424b58 | [] | no_license | dromanow/django_simple | 73a25d80df976a16314d09ac5f8a0e842f871732 | 22cdbfc38e4cc2e11939e0f458e8d6068f34fa9f | refs/heads/master | 2023-07-18T06:02:37.655660 | 2021-09-09T16:02:22 | 2021-09-09T16:02:22 | 404,030,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | from django.shortcuts import render
from django.views.generic.list import ListView
from .models import SomeModel
class SomeModelView(ListView):
model = SomeModel
template_name = 'someapp/index.html'
# queryset = SomeModel.objects.all()
def get_queryset(self):
return SomeModel.objects.select_related('other').prefetch_related('other1').all()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context.update({
'user': {'name': 'Denis', 'age': '40'}
})
return context
def index(request):
site = request.site
return render(request, 'someapp/index.html', context={
'user': {'name': 'Denis', 'age': '40'},
'object_list': SomeModel.objects.all()
# 'object_list': SomeModel.on_site.all()
})
| [
"denis.romanow@gmail.com"
] | denis.romanow@gmail.com |
bfb4f12275d4630557cbb7716232b552fb2bc121 | ba1e90ae6ea9f8f74d9b542e159825341c717712 | /2014/w33.py | e5aa36b9425bc3b95b355755a29e3a5445ba785d | [] | no_license | sailesh2/CompetitiveCode | b384687a7caa8980ab9b9c9deef2488b0bfe9cd9 | 5671dac08216f4ce75d5992e6af8208fa2324d12 | refs/heads/master | 2021-06-24T22:39:11.396049 | 2020-11-27T05:22:17 | 2020-11-27T05:22:17 | 161,877,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | n=input()
ar=[0]*1000001
i=0
while i<n:
k=input()
x=raw_input().split(' ')
j=0
while j<k:
ar[int(x[j])]=1
j=j+1
i=i+1
i=1
while i<=1000001:
if ar[i]==1:
print i,
i=i+1
| [
"sailesh.ku.upadhyaya@gmail.com"
] | sailesh.ku.upadhyaya@gmail.com |
0907f798af431c2d6c383ca74dbf7bc8e1850daa | 2f3a2318c2b2b891c0e5b22d31f56e63adf94025 | /proximity.py | 2257d869f9c3d5ba75c1ce98c35278c4e5791941 | [] | no_license | piano-man/smart_lighting | 4edcef3fd6e8476f469ed2420bdcb486a0b4d514 | f467a96c9a988d5403dce43352bcfdcc72d8f848 | refs/heads/master | 2021-01-02T08:30:22.225529 | 2018-03-27T22:27:24 | 2018-03-27T22:27:24 | 99,013,741 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN) #Read output from PIR motion sensor
#GPIO.setup(3, GPIO.OUT) #LED output pin
while True:
i=GPIO.input(11)
if i==0: #When output from motion sensor is LOW
print "No intruders",i
# GPIO.output(3, 0) #Turn OFF LED
time.sleep(0.5)
elif i==1: #When output from motion sensor is HIGH
print "Intruder detected",i
#GPIO.output(3, 1) #Turn ON LED
time.sleep(0.1)
| [
"icm2015003@iiita.ac.in"
] | icm2015003@iiita.ac.in |
f171dcf7166219415a77dffee8ae4e950c5df132 | d9c976c1812ab5c9606042b8138527a2345d34af | /songuyento.py | 7582693fab1c21b4a0562ca407c47c44be720153 | [] | no_license | hoana2007/PythonByEx | 757361a748db7b394b9e44afcdd08b110e166e38 | ec5a9901a86fad98b96e97c94ed3ba88d408abee | refs/heads/master | 2020-12-15T13:13:18.407226 | 2020-01-20T13:54:46 | 2020-01-20T13:54:46 | 235,113,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | def range_prime(n):
numbers = [True] * n
numbers[0] = False
numbers[1] = False
for i in range(2, int(n**(1/2))):
k = i * 2 # Đánh dấu bội số của i
while k < n:
numbers[k] = False
k = k + i
primes = [i for i in range(2, n) if numbers[i]]
return primes
n = 19
print(range_prime(n)) | [
"hoana2007@outlook.com"
] | hoana2007@outlook.com |
7e6448f2793bf033036579c65c34ebcdbf85e254 | a3ee53ce0e544c7b432f6e92f677edb53ddf6ba1 | /virtualplotter.py | ac69c2ee6514e845caa14551ba0ceddd240db0a5 | [] | no_license | derikvanschaik/quick-and-simple-beginner-projects | 40fb0dba4a8e7532dd418433cd1b2e0796f78aeb | 459b21e1bf964ed536c587d5dfb6bb5a7c2f545f | refs/heads/main | 2023-07-23T12:00:59.731131 | 2021-09-05T20:05:59 | 2021-09-05T20:05:59 | 377,302,572 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | import PySimpleGUI as sg
BG = 'white'
def update_cur_pos_label(cur_pos, label, label_pos, canvas):
canvas.delete_figure(label)
return canvas.draw_text(f"Point current position: {cur_pos}", label_pos)
def main():
sg.theme('Material1')
size = (400,400)
bott_left, top_right = (0, 0), (400,400)
canvas = sg.Graph(size, bott_left, top_right, enable_events = True, key='canvas')
# If we use the return keyboard events we cannot simply hold down a key and have a new event occur
# so this is a hack to make holding down a key trigger events:
keyboard_input = sg.Input('', key='keyboard_input', enable_events = True, background_color = BG, text_color=BG)
layout = [[canvas],[keyboard_input]]
window = sg.Window('', layout, background_color=BG).finalize()
keyboard_input.Widget.config(insertbackground=BG) #makes the cursor color the background color, now this is invisible
window['canvas'].draw_line(bott_left, top_right)
cur_pos = (5,5)
point = canvas.draw_point(cur_pos, size=20, color='red')
label_pos = (150,350)
point_label = canvas.draw_text(f'Point current position: {cur_pos}', label_pos, font='digital')
UP_KEY = 'j'
DOWN_KEY = 'k'
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == 'keyboard_input':
key_pressed = values['keyboard_input'][-1]#last char pressed
shift = 0
if key_pressed == UP_KEY and cur_pos < top_right:
shift = 1
elif key_pressed == DOWN_KEY and cur_pos > bott_left:
shift = -1
cur_pos = tuple(map(lambda point: point + shift ,cur_pos) ) #shifts both points by one (going up the line)
canvas.move_figure(point, shift, shift)
point_label = update_cur_pos_label(cur_pos, point_label, label_pos, canvas) #updates the label
window.close()
if __name__ == '__main__':
main() | [
"derikvanschaik@gmail.com"
] | derikvanschaik@gmail.com |
6f3e3f2f7ca6c0342aca5a9638fc8eab77130d69 | 3258bc4854d71ccc523cbfd072a8734ccb66c2c5 | /lecture_code/OURKITCHEN/ourkitchen/settings.py | 4a8b7c33a980eea4eb156f64b78a5fdc169d8095 | [] | no_license | minwooDB/open_api | 2f3b18e696f674df48e74242c58a4d21be67261c | b46a568a7cc633a2093c51024d76efe377d447c0 | refs/heads/master | 2022-12-22T23:56:36.952572 | 2019-11-14T07:16:33 | 2019-11-14T07:16:33 | 218,244,235 | 0 | 0 | null | 2022-11-22T04:49:15 | 2019-10-29T08:58:32 | Python | UTF-8 | Python | false | false | 3,177 | py | """
Django settings for ourkitchen project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x264^atp3o(*uoissisbfh2f4v8hj01h5p5lf3ssp1c-&r^c)l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account',
'commercial_analysis',
'kitchen_reservation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ourkitchen.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ourkitchen.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"alsdn114560@gmail.com"
] | alsdn114560@gmail.com |
67558cce12fa4dac149bcb1e868f56821bca6d40 | 5e45a4d8cec4ccfe3d51687175bb9d3546485cd8 | /완전탐색/모의고사.py | b0150e19af3e469ba0a872eb94f08cdd4ff77be9 | [] | no_license | unnullian/AlgorithmPractice | 5fea2ffb9d580af242cfd017c76be77ed4dde99e | 278400279578870ede831239663ed773f115ddc3 | refs/heads/main | 2023-06-10T12:49:01.550131 | 2021-07-02T01:31:45 | 2021-07-02T01:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | def solution(answers):
answer = []
hash_map = {k:0 for k in range(1, 4)}
pattern1 = [1,2,3,4,5]
pattern2 = [2,1,2,3,2,4,2,5]
pattern3 = [3,3,1,1,2,2,4,4,5,5]
st1 = []
st2 = []
st3 = []
for i in range(len(answers)) :
st1.append(pattern1[i%len(pattern1)])
st2.append(pattern2[i%len(pattern2)])
st3.append(pattern3[i%len(pattern3)])
if st1[i] == answers[i] :
hash_map[1] += 1
if st2[i] == answers[i] :
hash_map[2] += 1
if st3[i] == answers[i] :
hash_map[3] += 1
max_value = max(hash_map.values())
for key, value in hash_map.items() :
if value == max_value :
answer.append(key)
return answer | [
"unknown90420@naver.com"
] | unknown90420@naver.com |
2008b3dbe90ff6f13aa32832741f8f39400092f3 | 395a71b505a21b6843f5d48e653b8ef4223a61b7 | /app/venv/bin/blueprint-create | d4336d103660f8f833d8bfd39859540bc02cc39e | [] | no_license | kwl3434/PEviewer-Packer-Protector-PEinfo-website | 55a5600644668390819902e26eebdfb03da0805d | 7e18c8146f52e423865f82583396538e81530ad9 | refs/heads/master | 2023-05-27T21:14:27.837789 | 2021-04-29T07:43:00 | 2021-04-29T07:43:00 | 176,113,366 | 4 | 7 | null | 2023-05-01T20:32:30 | 2019-03-17T14:32:01 | Python | UTF-8 | Python | false | false | 2,957 | #!/PEviewer-Packer-Protector-PEinfo-website/app/venv/bin/python2
import errno
import logging
import optparse
import sys
import blueprint.cli
from blueprint import context_managers
import blueprint.git
parser = optparse.OptionParser('Usage: %prog [-d <subtrahend>] [-P|-C|-S|-R|...] '
'[-m <message>] [-r] [-q] <name>')
parser.add_option('-d', '--diff',
dest='subtrahend',
default=None,
help='blueprint to subtract from the generated blueprint')
parser.add_option('-P', '--puppet',
dest='generate',
action='store_const',
const='puppet',
help='generate a Puppet module')
parser.add_option('-C', '--chef',
dest='generate',
action='store_const',
const='chef',
help='generate a Chef cookbook')
parser.add_option('-S', '--sh',
dest='generate',
action='store_const',
const='sh',
help='generate POSIX shell code')
parser.add_option('-R', '--rules',
dest='generate',
action='store_const',
const='blueprint_rules',
help='generate Blueprint rules')
parser.add_option('--cfn',
dest='generate',
action='store_const',
const='cfn',
help='generate an AWS CloudFormation template')
parser.add_option('-m', '--message',
dest='message',
default=None,
help='commit message')
parser.add_option('-r', '--relaxed',
dest='relaxed',
default=False,
action='store_true',
help='relax version constraints in generated code')
parser.add_option('-q', '--quiet',
dest='quiet',
default=False,
action='store_true',
help='operate quietly')
options, args = parser.parse_args()
if options.quiet:
logging.root.setLevel(logging.CRITICAL)
if 1 != len(args):
parser.print_usage()
sys.exit(1)
if not blueprint.git.configured():
logging.error('please give Git your name and email address so commits have an author')
logging.error('')
logging.error(' git config --global user.email "you@example.com"')
logging.error(' git config --global user.name "Your Name"')
logging.error('')
sys.exit(1)
b = blueprint.cli.create(options, args)
try:
if options.generate is not None:
try:
filename = getattr(b, options.generate)(options.relaxed).dumpf()
except OSError as e:
if errno.EEXIST == e.errno:
logging.error('{0} already exists'.format(args[0]))
sys.exit(1)
if not options.quiet:
print(filename)
except IOError:
pass
| [
"32947371+kwl3434@users.noreply.github.com"
] | 32947371+kwl3434@users.noreply.github.com | |
8ab19fa5fd2feeb2f90170490301f9dafdc9b5cb | b16c3d8e3bae87428f414312f4ffcbb9b39573fc | /.venv/bin/easy_install-3.7 | bc71d2a00d65a44e164bfb1f8f94a0223b89a102 | [] | no_license | zhengzhongjin/ComputationalGenomics | 190a2512bb208535d19656be7752db9f438f5e68 | a0fe18436ed2310dca310fa03442a4eccf7b23b1 | refs/heads/master | 2021-06-23T23:32:57.431088 | 2019-12-15T11:13:09 | 2019-12-15T11:13:09 | 220,879,131 | 1 | 1 | null | 2021-04-26T19:45:45 | 2019-11-11T01:45:40 | C++ | UTF-8 | Python | false | false | 285 | 7 | #!/users/zzjin/course_project/ComputationalGenomics/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zzjin@gradx.cs.jhu.edu"
] | zzjin@gradx.cs.jhu.edu |
7d6ab9147f7e2b8536e088e2f9369d2f7f13d547 | 4a36849188747a1e3cc4b052eb6bc3a21e3e53bb | /POJ/3061.Subsequence/3061.Subsequence.py | e877888939ef6ca21888b36bf9aeb5ccaf105122 | [] | no_license | koking0/Algorithm | 88f69a26f424d1b60a8440c09dd51c8563a86309 | 2828811ae2f905865b4f391672693375c124c185 | refs/heads/master | 2022-07-06T17:10:07.440930 | 2022-06-24T14:59:40 | 2022-06-24T14:59:40 | 216,952,717 | 35 | 48 | null | 2020-07-21T02:46:26 | 2019-10-23T02:41:09 | Java | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python
# -*- coding: utf-H -*-
# @Time : 2020/1/28 16:27
# @File : 3061.Subsequence.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex
# >>> QQ : 2426671397
# >>> Mail : alex18812649207@gmail.com
# >>> Github : https://github.com/koking0
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
import sys
while True:
try:
length, target = map(int, input().split())
sequence = list(map(int, input().split()))
left, sum_num, ans = 0, 0, sys.maxsize
for right in range(length):
sum_num += sequence[right]
while sum_num > target:
ans = min(right - left + 1, ans)
sum_num -= sequence[left]
left += 1
print(ans if ans != sys.maxsize else 0)
except EOFError:
break
| [
"alex18812649207@gmail.com"
] | alex18812649207@gmail.com |
a976c9d14e1dee06b2ff83170340b7db50d36e35 | f0cdda3cf2817bcf991a14cf46e38c353e6872a6 | /src/epuck2_gazebo/scripts/epuck2_control_codes/epuck_pid_controller.py | 83a881db1ca46ec151e0f02e6df04aef77f70ca8 | [] | no_license | vinits5/gym-vinit | efc1b5312674840333eea4fb3912aa579c295f5f | 3ebd79ee94a51c12a6b64fe743ebc742f8d5e63d | refs/heads/master | 2020-03-22T00:55:19.272167 | 2018-06-30T19:00:12 | 2018-06-30T19:00:12 | 138,631,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | #! /usr/bin/python
import rospy
import math
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import matplotlib.pyplot as plt
import numpy as np
import tf
from tf.transformations import euler_from_quaternion
from std_srvs.srv import Empty
import time
velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
class epuck():
def __init__(self):
rospy.init_node('epuck_controller', anonymous=True)
self.velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
self.pose_subscriber = rospy.Subscriber('/epuck2/odom_diffdrive', Odometry, self.callback)
self.rate = rospy.Rate(10)
def callback(self,data):
self.x = data.pose.pose.position.x
self.y = data.pose.pose.position.y
q0 = data.pose.pose.orientation.x
q1 = data.pose.pose.orientation.y
q2 = data.pose.pose.orientation.z
q3 = data.pose.pose.orientation.w
quaternion = (q0,q1,q2,q3)
self.euler = euler_from_quaternion(quaternion)
def orientation(self,angle):
angle = angle*(180.0/math.pi)
if angle >= -90:
angle = 90 - angle
else:
angle = - angle - 270
return angle
def motion(self,xg,yg):
loop = True
#PID Parameters
Kp = 1 #Proportional constant
Ki = 0.075 #Integral constant
Kd = 0 #Differential constant
E = 0 #Difference of errors
I = 0 #Sum of all errors
ai = 0 #Previous orientation of robot
ei = 0 #Previous error in orientation of robot
goal = True #True if goal not reached & False if reached
#Path points:
path_x = []
path_y = []
#PID loop
while goal:
yi = self.y #Current y position
xi = self.x #Current x position
path_x.append(xi)
path_y.append(yi)
#Error Calculations
ad = math.atan2(yg-yi,xg-xi) #Angle from curent position to Goal
e = ad - ai #Error in current and previous orientations
e = math.atan2(math.sin(e),math.cos(e)) #Error converted in range -90 to 90
#PID control
E = e - ei #Difference of previous and current error
I = I + e #Sum of all erros
w = Kp*e + Ki*I + Kd*E #Calculation of angular velocity
#Command Velocities to robot
vel = Twist() #Velocity object
if e >= 0: #Check for left or right turn
w = -w #For left: -w & for right: w
vel.angular.z = w
vel.linear.x = 0.05
velocity_publisher.publish(vel)
#Loop running at 10Hz frequency.
self.rate.sleep()
#New positions
yn = self.y #New y position
xn = self.x #New x position
ai = math.atan2(yn-yi,xn-xi) #New orientation from goal
ai = math.atan2(math.sin(ai),math.cos(ai)) #New orientation in range -90 to 90
#Check the goal condition
if ((xn-xg)*(xn-xg)+(yn-yg)*(yn-yg)-0.01*0.05)<0:
print('Goal Reached!')
vel.angular.z = 0
vel.linear.x = 0
velocity_publisher.publish(vel)
goal = False
return(path_x,path_y)
def circular_motion(self):
path_X = []
path_Y = []
y = [0,0.2,0.4,0.6,0.8,1.0]
x2 = []
for i in y:
x3 = 0.25-(i-0.5)*(i-0.5)
x2.append(x3)
x = [math.sqrt(i) for i in x2]
xf = []
yf = []
[xf.append(i) for i in x]
[yf.append(i) for i in y]
y.reverse()
[yf.append(i) for i in y]
x.reverse()
[xf.append(-i) for i in x]
for i in range(len(xf)):
path_x,path_y = self.motion(xf[i],yf[i])
path_X.append(path_x)
path_Y.append(path_y)
return (path_X,path_Y)
if __name__ == '__main__':
try:
X = epuck()
#xg = input('Enter xg: ')
#yg = input('Enter yg: ')
#path_x,path_y = X.motion(xg,yg)
x = input('Enter anything to start: ')
#reset_world = rospy.ServiceProxy('/gazebo/reset_world',Empty)
path_X,path_Y = X.circular_motion()
xx = []
yy = []
for i in path_X:
for j in i:
xx.append(j)
for i in path_Y:
for j in i:
yy.append(j)
plt.plot(xx,yy)
plt.show()
#reset_world()
except rospy.ROSInterruptException:
pass
| [
"vinitsarode5@gmail.com"
] | vinitsarode5@gmail.com |
2be6e03963662472280160148c47c1ae27b413e0 | 248c5865e790ddb8fc098062b08cba9b00a3cdb6 | /jsonMod.py | 84e2d5b282a4047d49d0cd1ad17cafc8ccfeacdb | [] | no_license | yourant/jg5 | d71a1e7e633617eab5a7d41e071cacfc0b8190c8 | c05a24cc28e749db56584afaa273169dce907cfb | refs/heads/main | 2023-03-13T08:14:38.742638 | 2021-02-23T06:54:47 | 2021-02-23T06:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,784 | py | import requests
import json
import re
url = 'http://54.222.221.139:8088/wanna-console/wanna/message/anon/get'
param = {'webSiteNo': '01', 'code': 'M1236', 'locale': 'en_US'}
all_lan = ['en', 'fr', 'de', 'es', 'pt', 'sv', 'da', 'nb', 'is', 'fi']
all_plat_code = ['M1236', 'M1284', 'M1243', 'M1316']
class JsonMod():
wmap_reverse = {}
web_code_lan = {}
lmap = {}
def __init__(self):
# read web_code_lan, wmap and lmap into memory
try:
self.web_code_lan = self.read_json_file('web_code_lan.txt')
except:
self.web_code_lan = {}
with open('wmap.txt', 'r', encoding='UTF-8') as f:
line = f.readline()
while line:
line = line.replace('\n', '')
s = line.split(' ')
self.wmap_reverse[s[0]] = s[2]
line = f.readline()
with open('lmap.txt', 'r', encoding='UTF-8') as f:
line = f.readline()
while line:
line = line.replace('\n', '')
s = line.split('_')
self.lmap[s[0]] = line
line = f.readline()
def end(self):
self.write_json_file('web_code_lan.txt', self.web_code_lan)
def read_json_file(self, o):
"""读取一个json文本
:arg
o {str} -- 文件路径
:returns
str -- json对象的字符串
"""
with open(o, 'r', encoding='UTF-8') as f:
s = f.read()
return json.loads(s)
def write_json_file(self, o, s):
"""将一个json对象写入指定文件路径下的文本
:arg
o {str} -- 文件路径
s {dict} -- json对象
"""
with open(o, 'w', encoding='UTF-8') as f:
js = json.dumps(s)
f.write(js)
def get_ori_json(self, p):
"""
该函数通过p参数,获取wanna里特定网站编号、编码类型、语言(locale)的‘json文本’(如果本地result_开头的文件夹下存在该‘json文本’,则读取本地的)
p参数是字典,示例:{'webSiteNo': '01', 'code': 'M1236', 'locale': 'en_US'}
"""
lan = p['locale'].split('_')[0]
code = p['code']
j = None
res = requests.get(url=url, params=p)
j = json.loads(res.text)
assert j['code'] == 200, '不存在该语言或平台:' + str(p)
j = json.loads(j['result'])
assert j
return j
def read_jsonpath(self, path, d):
"""该函数按给定的json路径读取json字符串d的部分内容
:param path: {str}json路径,例:"__Default_Country__/__New_Customer__/pc/modules"
:param d: {str}要读取的json字符串
:return: {str}/{dict}
"""
path = path.replace('//', '')
paths = path.split('/')
temp_d = d
if path == '':
return temp_d
for p in paths:
n = None
m = re.search('\[(.*)\]', p)
if m:
n = eval(m.group(1))
p = p.replace(m.group(0), '')
if n is not None:
temp_d = temp_d[p][n]
else:
temp_d = temp_d[p]
print(temp_d)
return temp_d
def read_jsonpath_from_Wanna(self, path, params):
"""该函数按给定的json路径读取一个wanna上指定的json文件的部分内容
:param path: {str}json路径,例:"__Default_Country__/__New_Customer__/pc/modules"
:param params: {dict}访问wanna的参数,例:"{'webSiteNo': '01', 'code': 'M1236', 'locale': 'en_US'}"
:return: {str}/{dict}
"""
path = path.replace('//', '')
paths = path.split('/')
d = self.get_ori_json(params)
temp_d = d
for p in paths:
n = None
m = re.search('\[(.*)\]', p)
if m:
n = eval(m.group(1))
p = p.replace(m.group(0), '')
if n is not None:
temp_d = temp_d[p][n]
else:
temp_d = temp_d[p]
print(temp_d)
return temp_d
def read_jsonpath_from_localfile(self, path, filepath):
# if path is None:
pass
def write_jsonpath_to_localfile(self, path, value, jsonText, writeFilepath):
if path is None or value is None:
self.write_json_file(writeFilepath, jsonText)
return
path = path.replace('//', '')
paths = path.split('/')
d = jsonText
temp_d = d
lastKey = paths[len(paths) - 1]
for i in range(len(paths) - 1):
n = None
m = re.search('\[(.*)\]', paths[i])
if m:
n = eval(m.group(1))
paths[i] = paths[i].replace(m.group(0), '')
if n is not None:
temp_d = temp_d[paths[i]][n]
else:
temp_d = temp_d[paths[i]]
n = None
m = re.search('\[(.*)\]', lastKey)
if m:
n = eval(m.group(1))
lastKey = lastKey.replace(m.group(0), '')
if n is not None:
temp_d[lastKey][n] = value
else:
temp_d[lastKey] = value
self.write_json_file(writeFilepath, d)
def get_web_status(self, websiteno):
"""检测网站的状态,如拥有几种编码,每种编码下有几种语言。
检测的结果存入self.web_code_lan
"""
self.web = self.wmap_reverse[websiteno]
if self.web in self.web_code_lan:
# self.log_dict_format(self.web_code_lan[self.web])
pass
else:
self.web_code_lan[self.web] = {}
global param
p = param
p['webSiteNo']=websiteno
for code in all_plat_code:
# p['webSiteNo']=self.wmap[self.web]
p['code'] = code
for lan in all_lan:
p['locale'] = self.lmap[lan]
res = json.loads(requests.get(url=url, params=p).text)
if lan == 'en':
if res['code'] == 200:
self.web_code_lan[self.web][code] = []
self.web_code_lan[self.web][code].append(lan)
else:
break
else:
if res['code'] == 200:
self.web_code_lan[self.web][code].append(lan)
#
if p['locale'] == 'pt_BR' and p['code'] == 'M1316':
print(res)
#
# self.log_dict_format(self.web_code_lan[self.web])
| [
"1532611298@qq.com"
] | 1532611298@qq.com |
23a50a2b4da06d596b1c7971e5d4afb4285e8767 | e2265493d2c94c32834541b5b3c1a937c1d25450 | /venv/Lib/site-packages/social_core/tests/backends/test_strava.py | 741ea88517da49b606172d96c53d4645d74b4743 | [] | no_license | BekturMuratov/todoist_api | a209665eb6154c47c501c296cfcecac29d2e2fb5 | 35df8cb4a867e4ba123b0f30a4d7426792ba6ace | refs/heads/master | 2023-08-13T02:35:20.379169 | 2021-10-04T15:27:30 | 2021-10-04T15:27:30 | 413,477,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | import json
from .oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social_core.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = 'marianne_v'
access_token_body = json.dumps({
"token_type": "Bearer",
"expires_at": 1572805000,
"expires_in": 227615,
"refresh_token": "f51defab4632d27255dd0d106504dfd7568fd1df6",
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 1234567890987654321,
"username": "marianne_v",
"resource_state": 2,
"firstname": "Marianne",
"lastname": "V.",
"city": "Francisco",
"state": "California",
"country": "United States",
"sex": "F",
"premium": "true",
"summit": "true",
"created_at": "2017-11-14T02:30:05Z",
"updated_at": "2018-02-06T19:32:20Z",
"badge_type_id": 4,
"profile_medium": "https://xxxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/medium.jpg",
"profile": "https://xxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/large.jpg",
"friend": "null",
"follower": "null"
}
})
user_data_body = json.dumps({
"id": 1234567890987654321,
"username": "marianne_v",
"resource_state": 3,
"firstname": "Marianne",
"lastname": "V.",
"city": "San Francisco",
"state": "CA",
"country": "US",
"sex": "F",
"premium": "true",
"created_at": "2017-11-14T02:30:05Z",
"updated_at": "2018-02-06T19:32:20Z",
"badge_type_id": 4,
"profile_medium": "https://xxxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/medium.jpg",
"profile": "https://xxxxx.cloudfront.net/pictures/athletes/123456789/123456789/2/large.jpg",
"friend": "null",
"follower": "null",
"follower_count": 5,
"friend_count": 5,
"mutual_friend_count": 0,
"athlete_type": 1,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"clubs": [],
"ftp": "null",
"weight": 0,
"bikes": [],
"shoes": []
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| [
"Muratov225@gmail.com"
] | Muratov225@gmail.com |
dcae8117729ff8761a27195b4513d6be32743ee9 | 0dc758bc71a2ce345d1d64b0662664a13e31515c | /basic/list2.py | 716b2904914c40aca3878ba06507c92a00921f78 | [
"Apache-2.0"
] | permissive | vasyl-shumskyi/google-python-class | b4748e8a77965a5fa147047f862b41d9fa7dc912 | d4c98bf39b2af0c7f828050b3442ce6324272aaf | refs/heads/master | 2021-01-22T09:26:54.122995 | 2017-04-05T07:28:47 | 2017-04-05T07:28:47 | 82,465,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,494 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
#def remove_adjacent(nums):
# # +++your code here+++
# new_list = []
# y = 0
# for x in nums:
# if nums[x-1] != nums[x]:
# new_list.append(nums[y])
# y=x
# if nums and nums[-1] != new_list[-1]: new_list.append(nums[-1])
# return new_list
#def remove_adjacent(nums):
# +++your code here+++
## for n in nums:
# i=1
# for n in nums:
# print 'n=',n, 'len(nums)',len(nums)
# if nums[i-1]==nums[i] and i<len(nums):
# print 'len=', len(nums),'i=',i
# nums.pop(i)
# else:
# i=i+1
# return nums
#def remove_adjacent(nums):
# # +++your code here+++
# y=0
# for x in range(len(nums)-2):
# if nums[x-1] == nums[x]:
# y=x
# nums.pop(y)
# else: y = x-1
# return nums
#def remove_adjacent(nums):
# +++your code here+++
# x = 1
# print nums
# while x < len(nums):
# if nums[x-1] == nums[x]:
# nums.pop(x)
# print '==', nums
# else:
# x=x+1
# print '!=', nums
# return nums
########################## LAST (working both / while and for)
def remove_adjacent(nums):
# +++your code here+++
x = 1
while x < len(nums):
if nums[x-1] == nums[x]: nums.pop(x)
else: x += 1
return nums
def remove_adjacent(nums):
# +++your code here+++
## for n in nums:
backup = nums[:]
i=1
for n in nums:
if i < len(backup) and backup[i-1] == backup[i]: backup.pop(i)
else: i=i+1
return backup
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
new_list = []
print '\n', list1, list2
for x in range(len(list1)):
for y in range(len(list2)):
if list1[x] >= list2[y]:
new_list.insert(x, list2[y])
list2.pop(y)
else: new_list.append(list2[y])
print '\n', list1, list2
return new_list
def linear_merge(list1, list2):
x = 1
if list1 > list2:
big = list1
small = list2
y = len(list1)
else:
big = list2
small = list1
y = len(list2)
print '\n', list1, list2
while x < y:
if big[x] >= small[0]:
big.insert(x,small[0])
small.pop(0)
x += 1
y = len(list1)
else: x += 1
print list1, list2, '\n'
return big
def linear_merge(list1, list2):
x = 0
cp_list1 = list1[:]
sum_list = list1 + list2
while x < len(cp_list1):
print x, list1, list2
if list1[x] < list2[0]:
list1.insert(x,list2[0])
list2.pop(0)
x = 0
x+=1
return list1
def linear_merge(list1, list2):
list1_cp = list1[:]
list2_cp = list2[:]
L1 = range(len(list1))
L2 = range(len(list2))
print list1, list2
for x in L1:
for y in L2:
print 'x', x, 'y', y
if list2 and list2[y] <= list1[x]:
list1.insert(x,list2[y])
# list2.pop(y)
# x = 0
print list1, list2
return list1
def linear_merge(list1, list2):
# +++your code here+++
x = 0
y = 0
while x < len(list1):
while y < len(list2):
if list2 and list2[y] <= list1[x]:
list1.insert(x,list2[y])
list2.pop(y)
print list1,list2
# if nums[x-1] == nums[x]: nums.pop(x)
else:
x += 1
y += 1
return list1
########################## LAST (working)
def linear_merge(list1, list2):
x = 0
while list2:
if list1[x] > list2[0]:
list1.insert(x,list2[0])
list2.pop(0)
else:
if x == len(list1)-1:
list1.append(list2[0])
list2.pop(0)
else: x += 1
# print x, list1,list2
return list1
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| [
"vasyl.shumskyi@gmail.com"
] | vasyl.shumskyi@gmail.com |
0564832fefe25917c2968f933f065792cce19e13 | 948da9e511b31ff37552388649abe80ea5d38f19 | /blackhole.py | 9bb19a5e4decb4c87e84db280a10aa7db00c84ea | [] | no_license | juanurzua94/Inheritance-and-Animation | e20297ec22536faf95a6743bf22e654c73512d23 | 507140465d947c081a409b519b78fb1ee83e2941 | refs/heads/master | 2020-03-21T17:00:29.181973 | 2018-06-27T00:26:30 | 2018-06-27T00:26:30 | 138,807,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Black_Hole is derived from Simulton: i.e., it updates by finding and removing
# any Prey whose center is contained within its radius
# (returning a set of all eaten simultons), and
# displays as a black circle with a radius of 10
# (width/height 20).
# Calling get_dimension for the width/height (for
# containment and displaying) will facilitate
# inheritance in Pulsator and Hunter
from simulton import Simulton
from prey import Prey
class Black_Hole(Simulton):
radius = 10
def __init__(self, x, y):
Simulton.__init__(self, x, y, Black_Hole.radius * 2, Black_Hole.radius * 2 )
def display(self, canvas):
coordinate = self.get_location()
canvas.create_oval(coordinate[0]-self.get_dimension()[0], coordinate[1]-self.get_dimension()[1],
coordinate[0]+self.get_dimension()[0], coordinate[1]+self.get_dimension()[1],
fill='Black')
def update(self,model):
eaten = set()
for p in set(model.simultons):
if isinstance(p, Prey) and self.contains(p):
eaten.add(p)
model.remove(p)
return eaten
def contains(self, prey):
return Simulton.contains(self, prey.get_location())
| [
"jpurzua@uci.edu"
] | jpurzua@uci.edu |
774d66cb1470234449465f0188cd76c1b7dd3b9f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_gigabits.py | 5b528902501ccb6eb2a2803116afd4524cf7a3d7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._gigabit import _GIGABIT
#calss header
class _GIGABITS(_GIGABIT, ):
def __init__(self,):
_GIGABIT.__init__(self)
self.name = "GIGABITS"
self.specie = 'nouns'
self.basic = "gigabit"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
109e9e3110831891b1598a8c71aceb1aea69f13d | aaddccb9fcbfb568bb10ccc2ae644998fcff03a2 | /Tensorflow Projects/yolo_tensorflow/train.py | 6ebbb03ec7023fe3add36face62b6cc106056183 | [
"MIT"
] | permissive | clhne/Ubuntu14.04_LC | 9948087a3b9e49e3eeda2ab1de2832afee8894e2 | c5a3e1662aca562cb31ae48e224e2e86509c5c90 | refs/heads/master | 2022-11-04T22:07:38.270149 | 2018-03-09T02:41:13 | 2018-03-09T02:41:13 | 124,469,553 | 3 | 1 | null | 2022-10-31T20:11:40 | 2018-03-09T01:24:57 | C++ | UTF-8 | Python | false | false | 6,217 | py | import tensorflow as tf
import datetime
import os
import argparse
import yolo.config as cfg
from yolo.yolo_net import YOLONet
from utils.timer import Timer
from utils.pascal_voc import pascal_voc
class Solver(object):
def __init__(self, net, data):
self.net = net
self.data = data
self.weights_file = cfg.WEIGHTS_FILE
self.max_iter = cfg.MAX_ITER
self.initial_learning_rate = cfg.LEARNING_RATE
self.decay_steps = cfg.DECAY_STEPS
self.decay_rate = cfg.DECAY_RATE
self.staircase = cfg.STAIRCASE
self.summary_iter = cfg.SUMMARY_ITER
self.save_iter = cfg.SAVE_ITER
self.output_dir = os.path.join(
cfg.OUTPUT_DIR, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M'))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.save_cfg()
self.variable_to_restore = tf.global_variables()
self.restorer = tf.train.Saver(self.variable_to_restore, max_to_keep=None)
self.saver = tf.train.Saver(self.variable_to_restore, max_to_keep=None)
self.ckpt_file = os.path.join(self.output_dir, 'save.ckpt')
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.output_dir, flush_secs=60)
self.global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
self.learning_rate = tf.train.exponential_decay(
self.initial_learning_rate, self.global_step, self.decay_steps,
self.decay_rate, self.staircase, name='learning_rate')
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate).minimize(
self.net.total_loss, global_step=self.global_step)
self.ema = tf.train.ExponentialMovingAverage(decay=0.9999)
self.averages_op = self.ema.apply(tf.trainable_variables())
with tf.control_dependencies([self.optimizer]):
self.train_op = tf.group(self.averages_op)
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
if self.weights_file is not None:
print('Restoring weights from: ' + self.weights_file)
self.restorer.restore(self.sess, self.weights_file)
self.writer.add_graph(self.sess.graph)
def train(self):
train_timer = Timer()
load_timer = Timer()
for step in xrange(1, self.max_iter + 1):
load_timer.tic()
images, labels = self.data.get()
load_timer.toc()
feed_dict = {self.net.images: images, self.net.labels: labels}
if step % self.summary_iter == 0:
if step % (self.summary_iter * 10) == 0:
train_timer.tic()
summary_str, loss, _ = self.sess.run(
[self.summary_op, self.net.total_loss, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
log_str = ('{} Epoch: {}, Step: {}, Learning rate: {},'
' Loss: {:5.3f}\nSpeed: {:.3f}s/iter,'
' Load: {:.3f}s/iter, Remain: {}').format(
datetime.datetime.now().strftime('%m/%d %H:%M:%S'),
self.data.epoch,
int(step),
round(self.learning_rate.eval(session=self.sess), 6),
loss,
train_timer.average_time,
load_timer.average_time,
train_timer.remain(step, self.max_iter))
print(log_str)
else:
train_timer.tic()
summary_str, _ = self.sess.run(
[self.summary_op, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
self.writer.add_summary(summary_str, step)
else:
train_timer.tic()
self.sess.run(self.train_op, feed_dict=feed_dict)
train_timer.toc()
if step % self.save_iter == 0:
print('{} Saving checkpoint file to: {}'.format(
datetime.datetime.now().strftime('%m/%d %H:%M:%S'),
self.output_dir))
self.saver.save(self.sess, self.ckpt_file,
global_step=self.global_step)
def save_cfg(self):
with open(os.path.join(self.output_dir, 'config.txt'), 'w') as f:
cfg_dict = cfg.__dict__
for key in sorted(cfg_dict.keys()):
if key[0].isupper():
cfg_str = '{}: {}\n'.format(key, cfg_dict[key])
f.write(cfg_str)
def update_config_paths(data_dir, weights_file):
cfg.DATA_PATH = data_dir
cfg.PASCAL_PATH = os.path.join(data_dir, 'pascal_voc')
cfg.CACHE_PATH = os.path.join(cfg.PASCAL_PATH, 'cache')
cfg.OUTPUT_DIR = os.path.join(cfg.PASCAL_PATH, 'output')
cfg.WEIGHTS_DIR = os.path.join(cfg.PASCAL_PATH, 'weights')
cfg.WEIGHTS_FILE = os.path.join(cfg.WEIGHTS_DIR, weights_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--threshold', default=0.2, type=float)
parser.add_argument('--iou_threshold', default=0.5, type=float)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
if args.gpu is not None:
cfg.GPU = args.gpu
if args.data_dir != cfg.DATA_PATH:
update_config_paths(args.data_dir, args.weights)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
yolo = YOLONet()
pascal = pascal_voc('train')
solver = Solver(yolo, pascal)
print('Start training ...')
solver.train()
print('Done training.')
if __name__ == '__main__':
# python train.py --weights YOLO_small.ckpt --gpu 0
main()
| [
"luchenglin@leadcoretech.com"
] | luchenglin@leadcoretech.com |
9c2aa150b9b7abbab3bc15bcc19cbffd2f73bcfe | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/deqp/src/scripts/khr_util/registry.py | 58dab52c64d273336ed333499b874f040502eb67 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 11,636 | py | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys, logging, re
from lxml import etree
from collections import OrderedDict
from functools import wraps, partial
log = logging.getLogger(__name__)
debug = log.debug
info = log.info
warning = log.warning
def warnElem(elem, fmt, *args):
warning('%s:%d, %s %s: ' + fmt, elem.base, elem.sourceline, elem.tag, elem.get('name') or '', *args)
class Object(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Located(Object):
location = None
class Group(Located): pass
class Enum(Located): pass
class Enums(Located):
name = None
comment = None
enums = None
class Type(Located):
location = None
name=None
definition=None
api=None
requires=None
def makeObject(cls, elem, **kwargs):
kwargs.setdefault('name', elem.get('name'))
kwargs.setdefault('comment', elem.get('comment'))
kwargs['location'] = (elem.base, elem.sourceline)
return cls(**kwargs)
def parseEnum(eEnum):
return makeObject(
Enum, eEnum,
value=eEnum.get('value'),
type=eEnum.get('type'),
alias=eEnum.get('alias'))
class Param(Located): pass
class Command(Located):
name=None
declaration=None
type=None
ptype=None
group=None
params=None
alias=None
class Interface(Object): pass
class Index:
def __init__(self, items=[], **kwargs):
self.index = {}
self.items = []
self.__dict__.update(kwargs)
self.update(items)
def append(self, item):
keys = self.getkeys(item)
for key in keys:
self[key] = item
self.items.append(item)
def update(self, items):
for item in items:
self.append(item)
def __iter__(self):
return iter(self.items)
def nextkey(self, key):
raise KeyError
def getkeys(self, item):
return []
def __contains__(self, key):
return key in self.index
def __setitem__(self, key, item):
if key in self.index:
self.duplicateKey(key, item)
else:
self.index[key] = item
def duplicateKey(self, key, item):
warning("Duplicate %s: %r", type(item).__name__.lower(), key)
def __getitem__(self, key):
try:
while True:
try:
return self.index[key]
except KeyError:
pass
key = self.nextkey(key)
except KeyError:
item = self.missingKey(key)
self.append(item)
return item
def missingKey(self, key):
raise KeyError(key)
def __len__(self):
return len(self.items)
class ElemNameIndex(Index):
def getkeys(self, item):
return [item.get('name')]
def duplicateKey(self, key, item):
warnElem(item, "Duplicate key: %s", key)
class CommandIndex(Index):
def getkeys(self, item):
return [item.findtext('proto/name'), item.findtext('alias')]
class NameApiIndex(Index):
def getkeys(self, item):
return [(item.get('name'), item.get('api'))]
def nextkey(self, key):
if len(key) == 2 and key[1] is not None:
return key[0], None
raise KeyError
def duplicateKey(self, key, item):
warnElem(item, "Duplicate key: %s", key)
class TypeIndex(NameApiIndex):
def getkeys(self, item):
return [(item.get('name') or item.findtext('name'), item.get('api'))]
class EnumIndex(NameApiIndex):
def getkeys(self, item):
name, api, alias = (item.get(attrib) for attrib in ['name', 'api', 'alias'])
return [(name, api)] + ([(alias, api)] if alias is not None else [])
def duplicateKey(self, (name, api), item):
if name == item.get('alias'):
warnElem(item, "Alias already present: %s", name)
else:
warnElem(item, "Already present")
class Registry:
def __init__(self, eRegistry):
self.types = TypeIndex(eRegistry.findall('types/type'))
self.groups = ElemNameIndex(eRegistry.findall('groups/group'))
self.enums = EnumIndex(eRegistry.findall('enums/enum'))
for eEnum in self.enums:
groupName = eEnum.get('group')
if groupName is not None:
self.groups[groupName] = eEnum
self.commands = CommandIndex(eRegistry.findall('commands/command'))
self.features = ElemNameIndex(eRegistry.findall('feature'))
self.apis = {}
for eFeature in self.features:
self.apis.setdefault(eFeature.get('api'), []).append(eFeature)
for apiFeatures in self.apis.itervalues():
apiFeatures.sort(key=lambda eFeature: eFeature.get('number'))
self.extensions = ElemNameIndex(eRegistry.findall('extensions/extension'))
self.element = eRegistry
def getFeatures(self, api, checkVersion=None):
return [eFeature for eFeature in self.apis[api]
if checkVersion is None or checkVersion(eFeature.get('number'))]
class NameIndex(Index):
createMissing = None
kind = "item"
def getkeys(self, item):
return [item.name]
def missingKey(self, key):
if self.createMissing:
warning("Reference to implicit %s: %r", self.kind, key)
return self.createMissing(name=key)
else:
raise KeyError
def matchApi(api1, api2):
return api1 is None or api2 is None or api1 == api2
class Interface(Object):
pass
def extractAlias(eCommand):
aliases = eCommand.xpath('alias/@name')
return aliases[0] if aliases else None
def getExtensionName(eExtension):
return eExtension.get('name')
def extensionSupports(eExtension, api, profile=None):
if api == 'gl' and profile == 'core':
needSupport = 'glcore'
else:
needSupport = api
supporteds = eExtension.get('supported').split('|')
return needSupport in supporteds
class InterfaceSpec(Object):
def __init__(self):
self.enums = set()
self.types = set()
self.commands = set()
def addComponent(self, eComponent):
if eComponent.tag == 'require':
def modify(items, item): items.add(item)
else:
assert eComponent.tag == 'remove'
def modify(items, item):
try:
items.remove(item)
except KeyError:
warning("Tried to remove absent item: %s", item)
for typeName in eComponent.xpath('type/@name'):
modify(self.types, typeName)
for enumName in eComponent.xpath('enum/@name'):
modify(self.enums, enumName)
for commandName in eComponent.xpath('command/@name'):
modify(self.commands, commandName)
def addComponents(self, elem, api, profile=None):
for eComponent in elem.xpath('require|remove'):
cApi = eComponent.get('api')
cProfile = eComponent.get('profile')
if (matchApi(api, eComponent.get('api')) and
matchApi(profile, eComponent.get('profile'))):
self.addComponent(eComponent)
def addFeature(self, eFeature, api=None, profile=None, force=False):
info('Feature %s', eFeature.get('name'))
if not matchApi(api, eFeature.get('api')):
if not force: return
warnElem(eFeature, 'API %s is not supported', api)
self.addComponents(eFeature, api, profile)
def addExtension(self, eExtension, api=None, profile=None, force=False):
if not extensionSupports(eExtension, api, profile):
if not force: return
warnElem(eExtension, '%s is not supported in API %s' % (getExtensionName(eExtension), api))
self.addComponents(eExtension, api, profile)
def createInterface(registry, spec, api=None):
def parseType(eType):
# todo: apientry
#requires = eType.get('requires')
#if requires is not None:
# types[requires]
return makeObject(
Type, eType,
name=eType.get('name') or eType.findtext('name'),
definition=''.join(eType.xpath('.//text()')),
api=eType.get('api'),
requires=eType.get('requires'))
def createType(name):
info('Add type %s', name)
try:
return parseType(registry.types[name, api])
except KeyError:
return Type(name=name)
def createEnum(enumName):
info('Add enum %s', enumName)
return parseEnum(registry.enums[enumName, api])
def extractPtype(elem):
ePtype = elem.find('ptype')
if ePtype is None:
return None
return types[ePtype.text]
def extractGroup(elem):
groupName = elem.get('group')
if groupName is None:
return None
return groups[groupName]
def parseParam(eParam):
return makeObject(
Param, eParam,
name=eParam.get('name') or eParam.findtext('name'),
declaration=''.join(eParam.xpath('.//text()')).strip(),
type=''.join(eParam.xpath('(.|ptype)/text()')).strip(),
ptype=extractPtype(eParam),
group=extractGroup(eParam))
def createCommand(commandName):
info('Add command %s', commandName)
eCmd = registry.commands[commandName]
eProto = eCmd.find('proto')
return makeObject(
Command, eCmd,
name=eCmd.findtext('proto/name'),
declaration=''.join(eProto.xpath('.//text()')).strip(),
type=''.join(eProto.xpath('(.|ptype)/text()')).strip(),
ptype=extractPtype(eProto),
group=extractGroup(eProto),
alias=extractAlias(eCmd),
params=NameIndex(map(parseParam, eCmd.findall('param'))))
def createGroup(name):
info('Add group %s', name)
try:
eGroup = registry.groups[name]
except KeyError:
return Group(name=name)
return makeObject(
Group, eGroup,
# Missing enums are often from exotic extensions. Don't create dummy entries,
# just filter them out.
enums=NameIndex(enums[name] for name in eGroup.xpath('enum/@name')
if name in enums))
def sortedIndex(items):
return NameIndex(sorted(items, key=lambda item: item.location))
groups = NameIndex(createMissing=createGroup, kind="group")
types = NameIndex(map(createType, spec.types),
createMissing=createType, kind="type")
enums = NameIndex(map(createEnum, spec.enums),
createMissing=Enum, kind="enum")
commands = NameIndex(map(createCommand, spec.commands),
createMissing=Command, kind="command")
# This is a mess because the registry contains alias chains whose
# midpoints might not be included in the interface even though
# endpoints are.
for command in commands:
alias = command.alias
aliasCommand = None
while alias is not None:
aliasCommand = registry.commands[alias]
alias = extractAlias(aliasCommand)
command.alias = None
if aliasCommand is not None:
name = aliasCommand.findtext('proto/name')
if name in commands:
command.alias = commands[name]
return Interface(
types=sortedIndex(types),
enums=sortedIndex(enums),
groups=sortedIndex(groups),
commands=sortedIndex(commands))
def spec(registry, api, version=None, profile=None, extensionNames=[], protects=[], force=False):
available = set(protects)
spec = InterfaceSpec()
if version is None or version is False:
def check(v): return False
elif version is True:
def check(v): return True
else:
def check(v): return v <= version
for eFeature in registry.getFeatures(api, check):
spec.addFeature(eFeature, api, profile, force)
for extName in extensionNames:
eExtension = registry.extensions[extName]
protect = eExtension.get('protect')
if protect is not None and protect not in available:
warnElem(eExtension, "Unavailable dependency %s", protect)
if not force:
continue
spec.addExtension(eExtension, api, profile, force)
available.add(extName)
return spec
def interface(registry, api, **kwargs):
s = spec(registry, api, **kwargs)
return createInterface(registry, s, api)
def parse(path):
return Registry(etree.parse(path))
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com |
11cb7c1066e417c8a9457d88bc2ca91dafccd076 | cda05670b5952f890018e54f133373912b77a4e9 | /Hackerrank/Python/Loops.py | e91a4a9e13bc4e3fd69c9027cb3a18cbaf14f6d2 | [] | no_license | rishichawla/CompetitiveProgramming | 30bd59a7a496da7b8bbac3f8aef5b52b2a580504 | 6596938395d48dddd160294dcf1f73d064541d09 | refs/heads/master | 2020-03-31T03:43:48.832372 | 2019-10-21T10:35:08 | 2019-10-21T10:35:08 | 151,875,473 | 0 | 1 | null | 2019-10-09T14:18:41 | 2018-10-06T19:40:24 | C++ | UTF-8 | Python | false | false | 175 | py | # https://www.hackerrank.com/challenges/python-loops/problem
if __name__ == '__main__':
n = int(input())
l = [i**2 for i in range(n)]
for i in l:
print(i)
| [
"rishi.chawla14@gmail.com"
] | rishi.chawla14@gmail.com |
2d32a2dd27389430754868b22aa0dee710fde449 | 5b6a98d871dc04623f96e5e37e4f6f2efbe1bdda | /Other/Training/Useless/auto_encoding01raw.py | 940cb35c945713ed54bff483421bdfdce230a64f | [] | no_license | bolatroniks/AlphaEvolution | 5ab9bbe698b26ae0b43041aa69a33a3a1bc76ec9 | 3873e042a1d442fed05206e9608f5a1447e95b9b | refs/heads/master | 2022-12-12T04:33:08.148714 | 2019-11-27T19:56:07 | 2019-11-27T19:56:07 | 185,647,627 | 0 | 1 | null | 2022-12-08T06:35:25 | 2019-05-08T17:03:27 | Python | UTF-8 | Python | false | false | 4,777 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 21:35:27 2017
@author: renato
"""
import numpy
import Pycluster
from sklearn.cluster import KMeans
import numpy as np
points = numpy.vstack([numpy.random.multivariate_normal(mean,
0.03 * numpy.diag([1,1]),
20)
for mean in [(1, 1), (2, 4), (3, 2)]])
points.shape
kmeans = KMeans(n_clusters=2, random_state=0).fit(points)
kmeans
kmeans.labels_
kmeans.labels_.shape
plt.plot(kmeans.labels_)
from matplotlib import pyplot as plt
plt.plot(kmeans.labels_)
kmeans.predict([[0, 0], [4, 4]])
kmeans.cluster_centers_
from Trading.Dataset.Dataset import Dataset
ds = Dataset(ccy_pair='USD_ZAR', from_time=2000, to_time=2010)
ds.initOnlineConfig ()
ds.loadCandles ()
ds.loadFeatures ()
ds.f_df.shape
X = np.zeros ((2067, 4))
X[:,0] = (ds.f_df.Close [1,:] / ds.f_df.Close[0:-1] - 1) / ds.f_df.hist_vol_1y_close[0,:]
X[:,0] = (ds.f_df.Close [1,:] / ds.f_df.Close[0:-1] - 1) / ds.f_df.hist_vol_1y_close[0:]
ds.f_df.hist_vol_1y_close[0:].shape
X[:,0] = (ds.f_df.Close [1,:] / ds.f_df.Close[0:-1] - 1) / ds.f_df.hist_vol_1y_close[0:-1]
X[:,0] = (ds.f_df.Close [1:] / ds.f_df.Close[0:-1] - 1) / ds.f_df.hist_vol_1y_close[0:-1]
ds.f_df.Close [1:].shape
ds.f_df.Close[0:-1].shape
ds.f_df.hist_vol_1y_close[0:-1].shape
X[:,0].shape
X[:,0] = (ds.f_df.Close [1:] / ds.f_df.Close[0:-1] - 1) / ds.f_df.hist_vol_1y_close[0:-1]
X[:,0] = (ds.f_df.Close [1:] / ds.f_df.Close[0:-1] - 1)
X[:,0] = (ds.f_df.Close [1:])
X[:,0] = ds.f_df.Close [1:] / ds.f_df.Close[0:-1] - 1
X[:,0] = ds.f_df.Close [1:] / ds.f_df.Close[0:-1]
ds.f_df.Close[0:-1].shape
X[:,0] = ds.f_df.Close [1:]
ds.f_df.Close [1:] / ds.f_df.Close[0:-1]
(ds.f_df.Close [1:] / ds.f_df.Close[0:-1]).shape
ds.f_df.Close [1:].shape
ds.f_df.Close [1:] / ds.f_df.Close [1:]
ds.f_df.Close [1:] / ds.f_df.Close [1:].shape
(ds.f_df.Close [1:] / ds.f_df.Close [1:]).shape
(ds.f_df.Close [1:] / ds.f_df.Close [0:-1]).shape
(ds.f_df.Close [1:] / ds.f_df.Close [0:-2]).shape
(ds.f_df.Close [1:] / ds.f_df.Close [0:-10]).shape
a = ds.f_df.Close [1:]
b = ds.f_df.Close [0:-1]
a.shape
b.shape
(a/b).shape
(np.array(a)/np.array(b)).shape
X[:,0] = np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close) + 1.0
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) + 1.0
plt.plot(X[:,0])
plt.plot(ds.f_df.Close)
plt.plot(ds.f_df.hist_vol_1y_close)
ds.computeFeatures ()
plt.plot(ds.f_df.hist_vol_1y_close)
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) + 1.0
X = np.zeros ((len(ds.f_df), 4))
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) + 1.0
X = np.zeros ((len(ds.f_df-1), 4))
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) + 1.0
X.shape
len(ds.f_df)
X = np.zeros ((len(ds.f_df-1), 4))
X.shape
X = np.zeros ((len(ds.f_df)-1, 4))
X.shape
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) + 1.0
plt.plot(X[:,0])
plt.plot(ds.f_df.hist_vol_1y_close)
plt.plot( (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) )
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.01 + 1.0
plt.plot(X[:,0])
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.1 + 1.0
plt.plot(X[:,0])
X[:,0] = (np.array(ds.f_df.Close [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.1
plt.plot(X[:,0])
X[:,1] = (np.array(ds.f_df.Open [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.1
X[:,2] = (np.array(ds.f_df.High [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.1
X[:,3] = (np.array(ds.f_df.Low [1:]) / np.array(ds.f_df.Close[0:-1]) - 1.0) / np.array(ds.f_df.hist_vol_1y_close[0:-1]) * 0.1
kmeans = KMeans(n_clusters=256, random_state=0).fit(X)
plt.hist(kmeans.labels_, bins=256)
a = kmeans.labels_(kmeans.labels_ == 150)
type(kmeans.labels_)
a = kmeans.labels_[kmeans.labels_ == 150]
a.shape
a
idx = kmeans.labels_ == 150
idx
b = X[idx, :]
b
X[kmeans.labels_ == 151, :]
X[kmeans.labels_ == 152, :]
X[kmeans.labels_ == 153, :]
X[kmeans.labels_ == 154, :]
X[kmeans.labels_ == 255, :] | [
"renato.o.barros@gmail.com"
] | renato.o.barros@gmail.com |
ea2f3fd552459d85a170b03d4f5e904f7c191349 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s750895351.py | 7378de763ab9a50cfa785771268623df0b68e5e7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import sys
input = sys.stdin.buffer.readline
from collections import defaultdict
def main():
H,W,N = map(int,input().split())
d = defaultdict(int)
for i in range(N):
a,b = map(int,input().split())
a -= 1
b -= 1
for x in range(3):
for y in range(3):
na,nb = a-x,b-y
if (0 <= na < H-2 and 0 <= nb < W-2):
d[na*W+nb] += 1
d = list(d.values())
ans = (H-2)*(W-2)-len(d)
print(ans)
for i in range(9):
i += 1
print(d.count(i))
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
db8cf8813ddff05f44e5040fb86e0b586ca7cef5 | 968b8341b2327b84a1405a4742cba482e556b3f3 | /Configuration/python/histogramDefinitions.py | 7278c15fdd5098bf0c2f9962704a80fd53ff4ae5 | [] | no_license | tmrhombus/OSUDisplacedHiggs | 130652d5bc5540b5eed5b51036b429d2ffb8e058 | 1366e79119cee20a97b91c4c33251922adc058fb | refs/heads/master | 2021-01-01T16:57:21.364029 | 2017-08-04T14:01:56 | 2017-08-04T14:01:56 | 97,959,263 | 0 | 1 | null | 2017-07-21T14:57:03 | 2017-07-21T14:57:03 | null | UTF-8 | Python | false | false | 42,998 | py | import FWCore.ParameterSet.Config as cms
# import definitions of d0/dz
from OSUDisplacedHiggs.Configuration.objectDefinitions import *
###############################################
##### Set up the histograms to be plotted #####
###############################################
#ElectronD0Histograms = cms.PSet(
# inputCollection = cms.vstring("electrons","beamspots"),
# histograms = cms.VPSet (
###################################################################
# track d0 error histogram
# cms.PSet (
# name = cms.string("electronTrackD0Error"),
# title = cms.string("Electron track #sigma(d_{0});electron track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("10000*electron.gsfTrack.d0Error"),
# ),
###################################################################
# d0 histograms
# cms.PSet (
# name = cms.string("electronD0_100um"),
# title = cms.string("Electron d_{0};electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -100, 100),
# inputVariables = cms.vstring(electronD0_um),
# ),
# cms.PSet (
# name = cms.string("electronD0_200um"),
# title = cms.string("Electron d_{0};electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -200, 200),
# inputVariables = cms.vstring(electronD0_um),
# ),
# cms.PSet (
# name = cms.string("electronD0_500um"),
# title = cms.string("Electron d_{0};electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring(electronD0_um),
# ),
# cms.PSet (
# name = cms.string("electronD0_5mm"),
# title = cms.string("Electron d_{0};electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -5000, 5000),
# inputVariables = cms.vstring(electronD0_um),
# ),
# cms.PSet (
# name = cms.string("electronD0_5cm"),
# title = cms.string("Electron d_{0};electron d_{0} [cm]"),
# binsX = cms.untracked.vdouble(100, -5, 5),
# inputVariables = cms.vstring(electronD0_cm),
# ),
# cms.PSet (
# name = cms.string("electronD0_10cm"),
# title = cms.string("Electron d_{0};electron d_{0} [cm]"),
# binsX = cms.untracked.vdouble(100, -10, 10),
# inputVariables = cms.vstring(electronD0_cm),
# ),
# cms.PSet (
# name = cms.string("electronD0_20cm"),
# title = cms.string("Electron d_{0};electron d_{0} [cm]"),
# binsX = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring(electronD0_cm),
# ),
###################################################################
# abs(d0) histograms
# cms.PSet (
# name = cms.string("electronAbsD0_100um"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 100),
# inputVariables = cms.vstring(electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_200um"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# inputVariables = cms.vstring(electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_500um"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring(electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_5mm"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 5000),
# inputVariables = cms.vstring(electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_5cm"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 5),
# inputVariables = cms.vstring(electronAbsD0_cm),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_10cm"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 10),
# inputVariables = cms.vstring(electronAbsD0_cm),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_20cm"),
# title = cms.string("Electron |d_{0}|;electron |d_{0}| [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 20),
# inputVariables = cms.vstring(electronAbsD0_cm),
# ),
###################################################################
# sig(d0) histograms
# cms.PSet (
# name = cms.string("electronD0Sig_5"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -5, 5),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_10"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -10, 10),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_20"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_50"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -50, 50),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_100"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -100, 100),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_200"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -200, 200),
# inputVariables = cms.vstring(electronD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronD0Sig_500"),
# title = cms.string("Electron d_{0}/#sigma(d_{0});electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring(electronD0Sig),
# ),
#
###################################################################
# abs(sig(d0)) histograms
# cms.PSet (
# name = cms.string("electronAbsD0Sig_5"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 5),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_10"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 10),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_20"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 20),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_50"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 50),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_100"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 100),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_200"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_500"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring(electronAbsD0Sig),
# ),
#
###################################################################
# 2D abs(d0) vs. abs(sig(d0))
# cms.PSet (
# name = cms.string("electronAbsD0_500um_vs_ElectronAbsD0Sig_50"),
# title = cms.string("Electron |d_{0}| vs. Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 50),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring(electronAbsD0Sig, electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_5mm_vs_ElectronAbsD0Sig_500"),
# title = cms.string("Electron |d_{0}| vs. Electron |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|;electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# binsY = cms.untracked.vdouble(100, 0, 5000),
# inputVariables = cms.vstring(electronAbsD0Sig, electronAbsD0_um),
# ),
###################################################################
# 2D abs(d0) vs. d0 error
# cms.PSet (
# name = cms.string("electronAbsD0_500um_vs_ElectronTrackD0Error_500"),
# title = cms.string("Electron |d_{0}| vs. Electron #sigma(d_{0});electron #sigma(d_{0}) [#mum];electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("10000*electron.gsfTrack.d0Error", electronAbsD0_um),
# ),
#
###################################################################
###################################################################
# 2D d0 vs. pt
# cms.PSet (
# name = cms.string("electronD0_vs_electronPt"),
# title = cms.string("Electron d_{0} vs. Electron p_{T};electron p_{T} [GeV];electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("electron.pt", electronD0_um),
# ),
# ###################################################################
# 2D sig(d0) vs. pt
# cms.PSet (
# name = cms.string("electronD0Sig_vs_electronPt"),
# title = cms.string("Electron d_{0}/#sigma(d_{0}) vs. Electron p_{T};electron p_{T} [GeV];electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("electron.pt", electronD0Sig),
# ),
###################################################################
# 2D track d0 error vs. pt
# cms.PSet (
# name = cms.string("electronTrackD0Error_vs_electronPt"),
# title = cms.string("Electron track #sigma(d_{0}) vs. Electron p_{T};electron p_{T} [GeV];electron track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("electron.pt", "10000*electron.gsfTrack.d0Error"),
# ),
###################################################################
# 2D d0 vs. eta
# cms.PSet (
# name = cms.string("electronD0_vs_electronEta"),
# title = cms.string("Electron d_{0} vs. Electron #eta;electron #eta;electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("electron.eta", electronD0_um),
# ),
###################################################################
# 2D sig(d0) vs. eta
# cms.PSet (
# name = cms.string("electronD0Sig_vs_electronEta"),
# title = cms.string("Electron d_{0}/#sigma(d_{0}) vs. Electron #eta;electron #eta;electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("electron.eta", electronD0Sig),
# ),
###################################################################
# 2D track d0 error vs. eta
# cms.PSet (
# name = cms.string("electronTrackD0Error_vs_electronEta"),
# title = cms.string("Electron track #sigma(d_{0}) vs. Electron #eta;electron #eta;electron track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("electron.eta", "10000*electron.gsfTrack.d0Error"),
# ),
###################################################################
# 2D d0 vs. phi
# cms.PSet (
# name = cms.string("electronD0_vs_electronPhi"),
# title = cms.string("Electron d_{0} vs. Electron #phi;electron #phi;electron d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("electron.phi", electronD0_um),
# ),
###################################################################
# 2D sig(d0) vs. phi
# cms.PSet (
# name = cms.string("electronD0Sig_vs_electronPhi"),
# title = cms.string("Electron d_{0}/#sigma(d_{0}) vs. Electron #phi;electron #phi;electron d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("electron.phi", electronD0Sig),
# ),
###################################################################
# 2D track d0 error vs. phi
# cms.PSet (
# name = cms.string("electronTrackD0Error_vs_electronPhi"),
# title = cms.string("Electron track #sigma(d_{0}) vs. Electron #phi;electron #phi;electron track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("electron.phi", "10000*electron.gsfTrack.d0Error"),
# ),
#
# )
#)
MuonD0Histograms = cms.PSet(
inputCollection = cms.vstring("muons","beamspots"),
histograms = cms.VPSet (
###################################################################
# track d0 error histogram
cms.PSet (
name = cms.string("muonTrackD0Error"),
title = cms.string("Muon track #sigma(d_{0});muon track #sigma(d_{0}) [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring("10000*muon.innerTrack.d0Error"),
),
###################################################################
# d0 histograms
cms.PSet (
name = cms.string("muonD0_100um"),
title = cms.string("Muon d_{0};muon d_{0} [#mum]"),
binsX = cms.untracked.vdouble(100, -100, 100),
inputVariables = cms.vstring(muonD0_um),
),
cms.PSet (
name = cms.string("muonD0_200um"),
title = cms.string("Muon d_{0};muon d_{0} [#mum]"),
binsX = cms.untracked.vdouble(100, -200, 200),
inputVariables = cms.vstring(muonD0_um),
),
cms.PSet (
name = cms.string("muonD0_500um"),
title = cms.string("Muon d_{0};muon d_{0} [#mum]"),
binsX = cms.untracked.vdouble(100, -500, 500),
inputVariables = cms.vstring(muonD0_um),
),
cms.PSet (
name = cms.string("muonD0_5mm"),
title = cms.string("Muon d_{0};muon d_{0} [#mum]"),
binsX = cms.untracked.vdouble(100, -5000, 5000),
inputVariables = cms.vstring(muonD0_um),
),
cms.PSet (
name = cms.string("muonD0_5cm"),
title = cms.string("Muon d_{0};muon d_{0} [cm]"),
binsX = cms.untracked.vdouble(100, -5, 5),
inputVariables = cms.vstring(muonD0_cm),
),
cms.PSet (
name = cms.string("muonD0_10cm"),
title = cms.string("Muon d_{0};muon d_{0} [cm]"),
binsX = cms.untracked.vdouble(100, -10, 10),
inputVariables = cms.vstring(muonD0_cm),
),
cms.PSet (
name = cms.string("muonD0_20cm"),
title = cms.string("Muon d_{0};muon d_{0} [cm]"),
binsX = cms.untracked.vdouble(100, -20, 20),
inputVariables = cms.vstring(muonD0_cm),
),
###################################################################
# abs(d0) histograms
cms.PSet (
name = cms.string("muonAbsD0_100um"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 100),
inputVariables = cms.vstring(muonAbsD0_um),
),
cms.PSet (
name = cms.string("muonAbsD0_200um"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 200),
inputVariables = cms.vstring(muonAbsD0_um),
),
cms.PSet (
name = cms.string("muonAbsD0_500um"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring(muonAbsD0_um),
),
cms.PSet (
name = cms.string("muonAbsD0_5mm"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 5000),
inputVariables = cms.vstring(muonAbsD0_um),
),
cms.PSet (
name = cms.string("muonAbsD0_5cm"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [cm]"),
binsX = cms.untracked.vdouble(100, 0, 5),
inputVariables = cms.vstring(muonAbsD0_cm),
),
cms.PSet (
name = cms.string("muonAbsD0_10cm"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [cm]"),
binsX = cms.untracked.vdouble(100, 0, 10),
inputVariables = cms.vstring(muonAbsD0_cm),
),
cms.PSet (
name = cms.string("muonAbsD0_20cm"),
title = cms.string("Muon |d_{0}|;muon |d_{0}| [cm]"),
binsX = cms.untracked.vdouble(100, 0, 20),
inputVariables = cms.vstring(muonAbsD0_cm),
),
###################################################################
# sig(d0) histograms
cms.PSet (
name = cms.string("muonD0Sig_5"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -5, 5),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_10"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -10, 10),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_20"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -20, 20),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_50"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -50, 50),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_100"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -100, 100),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_200"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -200, 200),
inputVariables = cms.vstring(muonD0Sig),
),
cms.PSet (
name = cms.string("muonD0Sig_500"),
title = cms.string("Muon d_{0}/#sigma(d_{0});muon d_{0}/#sigma(d_{0})"),
binsX = cms.untracked.vdouble(100, -500, 500),
inputVariables = cms.vstring(muonD0Sig),
),
###################################################################
# abs(sig(d0)) histograms
cms.PSet (
name = cms.string("muonAbsD0Sig_5"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 5),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_10"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 10),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_20"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 20),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_50"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 50),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_100"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 100),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_200"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 200),
inputVariables = cms.vstring(muonAbsD0Sig),
),
cms.PSet (
name = cms.string("muonAbsD0Sig_500"),
title = cms.string("Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|"),
binsX = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring(muonAbsD0Sig),
),
###################################################################
# 2D abs(d0) vs. abs(sig(d0))
cms.PSet (
name = cms.string("muonAbsD0_500um_vs_MuonAbsD0Sig_50"),
title = cms.string("Muon |d_{0}| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 50),
binsY = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring(muonAbsD0Sig, muonAbsD0_um),
),
cms.PSet (
name = cms.string("muonAbsD0_5mm_vs_MuonAbsD0Sig_500"),
title = cms.string("Muon |d_{0}| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 500),
binsY = cms.untracked.vdouble(100, 0, 5000),
inputVariables = cms.vstring(muonAbsD0Sig, muonAbsD0_um),
),
###################################################################
# 2D abs(d0) vs. d0 error
cms.PSet (
name = cms.string("muonAbsD0_500um_vs_MuonTrackD0Error_500"),
title = cms.string("Muon |d_{0}| vs. Muon #sigma(d_{0});muon #sigma(d_{0}) [#mum];muon |d_{0}| [#mum]"),
binsX = cms.untracked.vdouble(100, 0, 500),
binsY = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring("10000*muon.innerTrack.d0Error", muonAbsD0_um),
),
###################################################################
# 2D d0 vs. pt
# cms.PSet (
# name = cms.string("muonD0_vs_muonPt"),
# title = cms.string("Muon d_{0} vs. Muon p_{T};muon p_{T} [GeV];muon d_{0} [#mum];"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("muon.pt", muonD0_um),
# ),
###################################################################
# 2D sig(d0) vs. pt
# cms.PSet (
# name = cms.string("muonD0Sig_vs_muonPt"),
# title = cms.string("Muon d_{0}/#sigma(d_{0}) vs. Muon p_{T};muon p_{T} [GeV];muon d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, 0, 200),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("muon.pt", muonD0Sig),
# ),
###################################################################
# 2D track d0 error vs. pt
# cms.PSet (
# name = cms.string("muonTrackD0Error_vs_muonPt"),
# title = cms.string("Muon track #sigma(d_{0}) vs. Muon p_{T};muon p_{T} [GeV];muon track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 500),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("muon.pt", "10000*muon.innerTrack.d0Error"),
# ),
###################################################################
# 2D d0 vs. eta
# cms.PSet (
# name = cms.string("muonD0_vs_muonEta"),
# title = cms.string("Muon d_{0} vs. Muon #eta;muon #eta;muon d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("muon.eta", muonD0_um),
# ),
###################################################################
# 2D sig(d0) vs. eta
# cms.PSet (
# name = cms.string("muonD0Sig_vs_muonEta"),
# title = cms.string("Muon d_{0}/#sigma(d_{0}) vs. Muon #eta;muon #eta;muon d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("muon.eta", muonD0Sig),
# ),
###################################################################
# 2D track d0 error vs. eta
# cms.PSet (
# name = cms.string("muonTrackD0Error_vs_muonEta"),
# title = cms.string("Muon track #sigma(d_{0}) vs. Muon #eta;muon #eta;muon track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, -3, 3),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("muon.eta", "10000*muon.innerTrack.d0Error"),
# ),
# ###################################################################
# 2D d0 vs. phi
# cms.PSet (
# name = cms.string("muonD0_vs_muonPhi"),
# title = cms.string("Muon d_{0} vs. Muon #phi;muon #phi;muon d_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, -500, 500),
# inputVariables = cms.vstring("muon.phi", muonD0_um),
# ),
###################################################################
# 2D sig(d0) vs. phi
# cms.PSet (
# name = cms.string("muonD0Sig_vs_muonPhi"),
# title = cms.string("Muon d_{0}/#sigma(d_{0}) vs. Muon #phi;muon #phi;muon d_{0}/#sigma(d_{0})"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, -20, 20),
# inputVariables = cms.vstring("muon.phi", muonD0Sig),
# ),
###################################################################
# 2D track d0 error vs. phi
# cms.PSet (
# name = cms.string("muonTrackD0Error_vs_muonPhi"),
# title = cms.string("Muon track #sigma(d_{0}) vs. Muon #phi;muon #phi;muon track #sigma(d_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, -3.14, 3.14),
# binsY = cms.untracked.vdouble(100, 0, 500),
# inputVariables = cms.vstring("muon.phi", "10000*muon.innerTrack.d0Error"),
# ),
)
)
#ElectronMuonD0Histograms = cms.PSet(
# inputCollection = cms.vstring("electrons","muons","beamspots"),
# histograms = cms.VPSet (
###################################################################
#
# cms.PSet (
# name = cms.string("electronAbsD0_vs_muonAbsD0_100um"),
# title = cms.string("Electron |d_{0}| vs. Muon |d_{0}|;muon |d_{0}| [#mum];electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100,0,100),
# binsY = cms.untracked.vdouble(100,0,100),
# inputVariables = cms.vstring(muonAbsD0_um, electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_vs_muonAbsD0_200um"),
# title = cms.string("Electron |d_{0}| vs. Muon |d_{0}|;muon |d_{0}| [#mum];electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100,0,200),
# binsY = cms.untracked.vdouble(100,0,200),
# inputVariables = cms.vstring(muonAbsD0_um, electronAbsD0_um),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_vs_muonAbsD0_500um"),
# title = cms.string("Electron |d_{0}| vs. Muon |d_{0}|;muon |d_{0}| [#mum];electron |d_{0}| [#mum]"),
# binsX = cms.untracked.vdouble(100,0,500),
# binsY = cms.untracked.vdouble(100,0,500),
# inputVariables = cms.vstring(muonAbsD0_um, electronAbsD0_um),
# ),
# This plot will be used for limit-setting, make it with 100um bins !!!
# cms.PSet (
# name = cms.string("electronAbsD0_vs_muonAbsD0_10cm"),
# title = cms.string("Electron |d_{0}| vs. Muon |d_{0}|;muon |d_{0}| [cm];electron |d_{0}| [cm]"),
# binsX = cms.untracked.vdouble(1000,0,10),
# binsY = cms.untracked.vdouble(1000,0,10),
# inputVariables = cms.vstring(muonAbsD0_cm, electronAbsD0_cm),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0_vs_muonAbsD0_20cm"),
# title = cms.string("Electron |d_{0}| vs. Muon |d_{0}|;muon |d_{0}| [cm];electron |d_{0}| [cm]"),
# binsX = cms.untracked.vdouble(100,0,20),
# binsY = cms.untracked.vdouble(100,0,20),
# inputVariables = cms.vstring(muonAbsD0_cm, electronAbsD0_cm),
# ),
#
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_5"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,5),
# binsY = cms.untracked.vdouble(100,0,5),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_10"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,10),
# binsY = cms.untracked.vdouble(100,0,10),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_20"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,20),
# binsY = cms.untracked.vdouble(100,0,20),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_50"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,50),
# binsY = cms.untracked.vdouble(100,0,50),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_100"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,100),
# binsY = cms.untracked.vdouble(100,0,100),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_200"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,200),
# binsY = cms.untracked.vdouble(100,0,200),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# cms.PSet (
# name = cms.string("electronAbsD0Sig_vs_muonAbsD0Sig_500"),
# title = cms.string("Electron |d_{0}/#sigma(d_{0})| vs. Muon |d_{0}/#sigma(d_{0})|;muon |d_{0}/#sigma(d_{0})|;electron |d_{0}/#sigma(d_{0})|"),
# binsX = cms.untracked.vdouble(100,0,500),
# binsY = cms.untracked.vdouble(100,0,500),
# inputVariables = cms.vstring(muonAbsD0Sig, electronAbsD0Sig),
# ),
# )
#)
#
#BeamspotHistograms = cms.PSet(
# inputCollection = cms.vstring("beamspots"),
# histograms = cms.VPSet (
#
###################################################################
#
# cms.PSet (
# name = cms.string("beamspotV0"),
# title = cms.string("Beamspot v_{0};beamspot v_{0} [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 5000),
# inputVariables = cms.vstring("10000*hypot(beamspot.x0, beamspot.y0)"),
# ),
# cms.PSet (
# name = cms.string("beamspotV0Error"),
# title = cms.string("Beamspot #sigma(v_{0});beamspot #sigma(v_{0}) [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 1),
# inputVariables = cms.vstring("10000*hypot(beamspot.x0Error, beamspot.y0Error)"),
# ),
# cms.PSet (
# name = cms.string("beamspotVz"),
# title = cms.string("Beamspot v_{z};beamspot v_{z} [#mum]"),
# binsX = cms.untracked.vdouble(100, 0, 50000),
# inputVariables = cms.vstring("10000*beamspot.z0"),
# ),
# )
#)
jetHistograms = cms.PSet(
inputCollection = cms.vstring("jets"),
histograms = cms.VPSet (
cms.PSet (
name = cms.string("alphaMax"),
title = cms.string("alphaMax;alphaMax"),
binsX = cms.untracked.vdouble(100, -1, 1),
inputVariables = cms.vstring("alphamax"),
),
cms.PSet (
name = cms.string("medianlog10ipsig"),
title = cms.string("medianlog10ipsig;medianlog10ipsig"),
binsX = cms.untracked.vdouble(100, -5, 5),
inputVariables = cms.vstring("medianlog10ipsig"),
),
)
)
eventHistograms = cms.PSet(
inputCollection = cms.vstring("eventvariables"),
histograms = cms.VPSet (
cms.PSet (
name = cms.string("numPV"),
title = cms.string("Number of Primary Vertex; #PV"),
binsX = cms.untracked.vdouble(75, 0, 75),
inputVariables = cms.vstring("numPV"),
),
cms.PSet (
name = cms.string("numTruePV"),
title = cms.string("Number of True PVs; #True PVs"),
binsX = cms.untracked.vdouble(75, 0, 75),
inputVariables = cms.vstring("numTruePV"),
),
cms.PSet (
name = cms.string("puScalingFactor"),
title = cms.string("PU Scaling Factor; log(PU Scaling Factor)"),
binsX = cms.untracked.vdouble(200, -4, 4),
inputVariables = cms.vstring("log10(puScalingFactor)"),
),
cms.PSet (
name = cms.string("sumJetPt"),
title = cms.string("Sum of Jet Transverse Momentum; #Sigma p_{T}_{jet}"),
binsX = cms.untracked.vdouble(100, 0, 500),
inputVariables = cms.vstring("sumJetPt"),
),
cms.PSet (
name = cms.string("passTrigger"),
title = cms.string("Pass Trigger; Trigger Flag"),
binsX = cms.untracked.vdouble(4, -2, 2),
inputVariables = cms.vstring("passTrigger"),
),
cms.PSet (
name = cms.string("triggerScalingFactor"),
title = cms.string("Trigger Scaling Factor; Trigger Scaling Factor"),
binsX = cms.untracked.vdouble(10, 0, 1),
inputVariables = cms.vstring("triggerScalingFactor"),
),
cms.PSet (
name = cms.string("electronReco2016"),
title = cms.string("Electron Reco SF; Electron Reco SF"),
binsX = cms.untracked.vdouble(100, 0.5, 1.5),
inputVariables = cms.vstring("electronReco2016"),
),
cms.PSet (
name = cms.string("electronID2016Tight"),
title = cms.string("Electron ID SF; Electron ID SF"),
binsX = cms.untracked.vdouble(100, 0.5, 1.5),
inputVariables = cms.vstring("electronID2016Tight"),
),
cms.PSet (
name = cms.string("muonReco2016"),
title = cms.string("Muon Reco SF; Muon Reco SF"),
binsX = cms.untracked.vdouble(100, 0.5, 1.5),
inputVariables = cms.vstring("muonReco2016"),
),
cms.PSet (
name = cms.string("muonID2016Tight"),
title = cms.string("Muon ID SF; Muon ID SF"),
binsX = cms.untracked.vdouble(100, 0.5, 1.5),
inputVariables = cms.vstring("muonID2016Tight"),
),
cms.PSet (
name = cms.string("muonIso2016Tight"),
title = cms.string("Muon Iso SF; Muon Iso SF"),
binsX = cms.untracked.vdouble(100, 0.5, 1.5),
inputVariables = cms.vstring("muonIso2016Tight"),
),
cms.PSet (
name = cms.string("lifetimeWeight"),
title = cms.string("Lifetime Scaling Factor; Lifetime Scaling Factor"),
binsX = cms.untracked.vdouble(200, -4, 4),
inputVariables = cms.vstring("log10(lifetimeWeight)"),
),
cms.PSet (
name = cms.string("ctauStop0_100um"),
title = cms.string("Stop 0 c#tau;c#tau [cm]"),
binsX = cms.untracked.vdouble(100, 0, 0.01),
inputVariables = cms.vstring("cTau_1000006_0"),
),
cms.PSet (
name = cms.string("ctauStop1_100um"),
title = cms.string("Stop 1 c#tau;c#tau [cm]"),
binsX = cms.untracked.vdouble(100, 0, 0.01),
inputVariables = cms.vstring("cTau_1000006_1"),
),
# cms.PSet (
# name = cms.string("ctauStop0_1mm"),
# title = cms.string("Stop 0 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 0.1),
# inputVariables = cms.vstring("cTau_1000006_0"),
# ),
# cms.PSet (
# name = cms.string("ctauStop1_1mm"),
# title = cms.string("Stop 1 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 0.1),
# inputVariables = cms.vstring("cTau_1000006_1"),
# ),
# cms.PSet (
# name = cms.string("ctauStop0_1cm"),
# title = cms.string("Stop 0 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 1),
# inputVariables = cms.vstring("cTau_1000006_0"),
# ),
# cms.PSet (
# name = cms.string("ctauStop1_1cm"),
# title = cms.string("Stop 1 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 1),
# inputVariables = cms.vstring("cTau_1000006_1"),
# ),
# cms.PSet (
# name = cms.string("ctauStop0_10cm"),
# title = cms.string("Stop 0 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 10),
# inputVariables = cms.vstring("cTau_1000006_0"),
# ),
# cms.PSet (
# name = cms.string("ctauStop1_10cm"),
# title = cms.string("Stop 1 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 10),
# inputVariables = cms.vstring("cTau_1000006_1"),
# ),
# cms.PSet (
# name = cms.string("ctauStop0_100cm"),
# title = cms.string("Stop 0 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 100),
# inputVariables = cms.vstring("cTau_1000006_0"),
# ),
# cms.PSet (
# name = cms.string("ctauStop1_100cm"),
# title = cms.string("Stop 1 c#tau;c#tau [cm]"),
# binsX = cms.untracked.vdouble(100, 0, 100),
# inputVariables = cms.vstring("cTau_1000006_1"),
# ),
)
)
| [
"nunezornelas.1@osu.edu"
] | nunezornelas.1@osu.edu |
3357a70b2ecd4f3212b41073723613285ecef369 | fd540b341a290a37c3a3a7e8fbffda93bfed2864 | /cloudops/devops/cijenkins/apps.py | 5fe331e54a5b8fb5ea9eb285f051e1e8d35867ab | [] | no_license | bopopescu/devops | 69d88b8209d744d4e722b482d1a7b1a5e95b0850 | 6a03805fc805f7604273e92f19f7fdea953451c2 | refs/heads/master | 2022-11-28T17:58:42.923890 | 2018-11-16T09:49:07 | 2018-11-16T09:49:07 | 281,549,091 | 0 | 0 | null | 2020-07-22T02:01:07 | 2020-07-22T02:01:06 | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class CijenkinsConfig(AppConfig):
name = 'cijenkins'
| [
"83098148@qq.com"
] | 83098148@qq.com |
714c38f965dd80a8f4f4b2f7f6cbe55f29e53778 | 04c5dd05a4571fce72f537f93eb2a58913a17cfe | /pypelco/setup.py | 569a84361cbc0e2ebc273496bde0ba64f27406df | [] | no_license | ska-sa/pypelco | 72402ead78149caf599d506898d91f87082f66df | 40b21936a2d78f9505e55effb1e6f915bf17c2e5 | refs/heads/master | 2021-01-01T06:50:07.697089 | 2014-07-30T08:28:44 | 2014-07-30T08:28:44 | 22,337,426 | 1 | 0 | null | 2014-07-30T08:28:46 | 2014-07-28T09:29:02 | Python | UTF-8 | Python | false | false | 178 | py | from distutils.core import setup
setup(
name='pypelco',
version='0.1dev',
packages=['pypelco'],
license='MIT',
long_description=open('README.rst').read(),
)
| [
"martin.slabber@gmail.com"
] | martin.slabber@gmail.com |
10c05665316e6ebc86e866f4e4e2e896452a2d8a | 8cd268c0b26a08cbf1f5f5c5b328f3295c75937d | /src/contact/views.py | 6320b86e4b87a1f08e241f2050874f9a540cee9b | [
"MIT"
] | permissive | rsm23/trade-motors | 3197d2dcdc60da780c91c4d84c8148db84f3f0f4 | 3336585b36cb8945e73ca38ef620a86fc7de0c27 | refs/heads/master | 2022-03-29T04:17:28.941317 | 2017-10-02T20:40:09 | 2017-10-02T20:40:09 | 105,381,566 | 0 | 0 | MIT | 2020-01-21T14:13:52 | 2017-09-30T15:59:53 | Python | UTF-8 | Python | false | false | 2,340 | py | from django.shortcuts import render_to_response, RequestContext
from vehicles.context_processor import global_context_processor
from .forms import ContactForm
from dynamic_preferences import global_preferences_registry
# import settings so send_mail can have access to email settings
from django.conf import settings
from django.core.mail import send_mail
from django.contrib import messages
def contact_page(request):
form = ContactForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
data = form.cleaned_data
sender_name = data['name']
sender_email = data['email']
sender_phone = data['phone']
sender_message = data['query']
email_subject = data['topic']
message_to_send = (
"Name: {0}\n"
"Email: {1}\n"
"Phone: {2}\n"
"Enquiry:\n{3}").format(
sender_name,
sender_email,
sender_phone,
sender_message
)
# instanciate a manager for global preferences
global_preferences = global_preferences_registry.manager()
# get default email address from global preferences
send_to_email = global_preferences.get(
'general__default_email', None)
# if for some reason, the email from global preferences is None
# then get default from settings
if not send_to_email:
send_to_email = settings.DEFAULT_EMAIL_ADDRESS
# email the details
# send_mail(subject, message, from, to, fail_silently)
from_email = settings.SERVER_EMAIL \
or 'info@globaltrademotors.com'
send_mail(
email_subject,
message_to_send,
from_email, [send_to_email],
fail_silently=True
)
messages.success(
request,
"Thank you getting in touch! We'll get back to you shortly."
)
form = ContactForm()
return render_to_response(
"contact_page.html", locals(),
context_instance=RequestContext(
request, processors=[global_context_processor]
)
)
| [
"haroon@sitture.com"
] | haroon@sitture.com |
6c39c671da8ea030b974588fc017b2bac50a4db6 | feeeab5dc580786a35dbddcb99ddab85bc893668 | /managers/cc_help.py | 208cebfbd552ce1485297a7e2ef7c4c00e44949c | [] | no_license | idelfrides/POC_test_creditCard_type | 54dd3c5de02547802074e2acf50295463e92f17d | 10792ac8f3393a6e3d621d24a43eb794ec241a02 | refs/heads/master | 2020-08-08T01:58:54.997806 | 2019-10-08T14:43:23 | 2019-10-08T14:43:23 | 213,668,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py |
# from .help import get_digits
import re
from .help import get_digits
# there are codes --> code
CC_TYPE_GENERIC = 0
CC_TYPE_VISA = 1
CC_TYPE_AMEX = 2
CC_TYPE_DINERS = 3
CC_TYPE_DISCOVER = 4
CC_TYPE_MASTERCARD = 5
CC_TYPE_ELO = 6
CC_TYPE_JCB = 7
CC_TYPE_MIR = 8
CC_TYPE_UNIONPAY = 9
CC_TYPES = (
(CC_TYPE_ELO, {
'title': 'Elo',
'regex': re.compile(r'^(?:431274|451416|5067|5090|627780|636297)')
}),
(CC_TYPE_VISA, {
'title': 'Visa',
'regex': re.compile(r'^4')
}),
(CC_TYPE_AMEX, {
'title': 'American Express',
'regex': re.compile(r'^3[47]')
}),
(CC_TYPE_DINERS, {
'title': 'Diners Club',
'regex': re.compile(r'^3(?:0[0-5]|095|[689])')
}),
(CC_TYPE_DISCOVER, {
'title': 'Discover Card',
'regex': re.compile(r'^6(?:011|4[4-9]|5)')
}),
(CC_TYPE_JCB, {
'title': 'JCB',
'regex': re.compile(r'^35(?:2[89]|[3-8])')
}),
(CC_TYPE_MIR, {
'title': 'MIR',
'regex': re.compile(r'^220[0-4]')
}),
(CC_TYPE_UNIONPAY, {
'title': 'UnionPay',
'regex': re.compile(r'^62')
}),
(CC_TYPE_MASTERCARD, {
'title': 'MasterCard',
'regex': re.compile(r'^(?:5[1-5]|222[1-9]|22[3-9]|2[3-6]|27[01]|2720)')
}),
)
CC_TYPE_CHOICES = (
(CC_TYPE_GENERIC, 'Generic'),
(CC_TYPE_VISA, 'Visa'),
(CC_TYPE_AMEX, 'American Express'),
(CC_TYPE_DINERS, 'Diners Club'),
(CC_TYPE_DISCOVER, 'Discover Card'),
(CC_TYPE_MASTERCARD, 'MasterCard'),
(CC_TYPE_ELO, 'Elo'),
(CC_TYPE_JCB, 'JCB'),
(CC_TYPE_MIR, 'MIR'),
(CC_TYPE_UNIONPAY, 'UnionPay'),
)
def get_type(number):
"""
Gets credit card type given number.
:type number: str
:rtype: int
"""
number = get_digits(number)
for code, record in CC_TYPES:
if re.match(record['regex'], number):
return code
return CC_TYPE_GENERIC
| [
"idelfridesjorgepapai@gmail.com"
] | idelfridesjorgepapai@gmail.com |
b603e746dc5f758e8ad5e6b8160c2676e856d555 | 6fbd56a12f8675c8ee6dd9ad23101a9c02d34387 | /setup.py | 9ee9310affb4d9f8071f556091f427c1ae42963a | [
"MIT"
] | permissive | matthiasdebernardini/topology | aa666940786dfdbc1fe1f732b73365d1eb596893 | 5cb7cb1e9a602874e7a325f95e50dfe110ca8efb | refs/heads/main | 2023-02-14T18:54:40.751005 | 2021-01-05T09:29:01 | 2021-01-05T09:29:01 | 328,508,598 | 0 | 0 | MIT | 2021-01-11T00:26:57 | 2021-01-11T00:26:56 | null | UTF-8 | Python | false | false | 846 | py | from setuptools import setup
import io
with io.open('README.org', encoding='utf-8') as f:
long_description = f.read()
with io.open('requirements.txt', encoding='utf-8') as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name='lntopo',
version='0.1.0',
description='Tools to work with lnresearch/topology datasets',
long_description=long_description,
long_description_content_type='text/x-org',
url='http://github.com/lnresearch/topology',
author='Christian Decker',
author_email='decker.christian@gmail.com',
license='MIT',
packages=[],
package_data={},
scripts=[],
zip_safe=True,
entry_points = {
'console_scripts': [
'lntopo-cli = cli.__main__:cli',
],
},
install_requires=requirements
)
| [
"decker.christian@gmail.com"
] | decker.christian@gmail.com |
31cb950512503b022a03ac1e83ea42c8f4d63f78 | bcd9f237407954a015e79dad6c1efd5da39b78e9 | /tools/div_free_base.py | 0b1ef82017bc12fcbc5bfd81d5ba603840e67d95 | [
"MIT"
] | permissive | royvelich/hamiltonian-interpolation | d317cff4277381b91877aa8eb25a3394715680f1 | d18c2f401feffc672998c5fa1d50c1de03dba902 | refs/heads/master | 2023-08-28T13:02:48.865232 | 2021-10-14T15:15:26 | 2021-10-14T15:15:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | import torch
import math
import numpy as np
from torch.nn import Parameter
import torch.sparse
from shape_utils import *
from param import device, device_cpu
import quaternion as quat
from base_tools import *
def compute_eigenvectors_3d(vert, k):
kv = torch.arange(1, k+1, device=device, dtype=torch.float32).unsqueeze(0).unsqueeze(1)
vert = vert.unsqueeze(2) * kv * math.pi
vert_sin = torch.sin(vert)
vert_cos = torch.cos(vert) * kv
sin_x = vert_sin[:, 0, :].unsqueeze(2).unsqueeze(3)
sin_y = vert_sin[:, 1, :].unsqueeze(1).unsqueeze(3)
sin_z = vert_sin[:, 2, :].unsqueeze(1).unsqueeze(2)
cos_x = vert_cos[:, 0, :].unsqueeze(2).unsqueeze(3)
cos_y = vert_cos[:, 1, :].unsqueeze(1).unsqueeze(3)
cos_z = vert_cos[:, 2, :].unsqueeze(1).unsqueeze(2)
phi = torch.cat(((cos_x * sin_y * sin_z).unsqueeze(1),
(sin_x * cos_y * sin_z).unsqueeze(1),
(sin_x * sin_y * cos_z).unsqueeze(1)), 1)
scale_fac = torch.sqrt(kv.unsqueeze(2) ** 2 + kv.unsqueeze(3) ** 2) ** (-1)
scale_fac = scale_fac.transpose(1, 3).unsqueeze(4)
scale_fac = torch.cat((scale_fac.unsqueeze(1).repeat_interleave(k, 1),
scale_fac.unsqueeze(2).repeat_interleave(k, 2),
scale_fac.unsqueeze(3).repeat_interleave(k, 3)), 5)
phi = phi.transpose(1, 4).unsqueeze(5).unsqueeze(6)
phi = torch.sum(hat_matrix.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) * phi, 4)
phi = phi * scale_fac
phi = phi.transpose(1, 4).reshape(vert.shape[0], 3, -1).transpose(1, 2)
return phi
def tensor_prod_velocity(phi, a):
return torch.bmm(phi.permute((2, 0, 1)), a.unsqueeze(0).unsqueeze(2).repeat(3, 1, 1)).permute((1, 2, 0)).squeeze()
def div_free_trans(velo_t, vert_t, k):
n_feat = 3 * k ** 3
phi = compute_eigenvectors_3d(vert_t, k)
M = my_eye(n_feat) * 1e-3
for d in range(3):
M = M + torch.mm(phi[..., d].transpose(0, 1), phi[..., d])
M = M.unsqueeze(0)
phi = phi.permute([2, 0, 1])
xi_d = torch.bmm(phi.transpose(1, 2), velo_t.unsqueeze(2).permute([1, 0, 2]))
xi_d, _ = torch.solve(xi_d, M)
velo_t = torch.bmm(phi, xi_d)
velo_t = velo_t.permute([1, 2, 0]).squeeze()
return velo_t, xi_d
def apply_field(vert_t, xi_d, k):
phi = compute_eigenvectors_3d(vert_t, k)
phi = phi.permute([2, 0, 1])
velo_t = torch.bmm(phi, xi_d)
velo_t = velo_t.permute([1, 2, 0]).squeeze()
return velo_t
class Rigid(torch.nn.Module):
def __init__(self):
super().__init__()
self.translation = Parameter(torch.zeros([3], dtype=torch.float32, device=device), requires_grad=True)
self.rotation = Parameter(torch.as_tensor([1, 0, 0, 0], dtype=torch.float32, device=device), requires_grad=True)
def forward(self, vert):
vert = quat.qrot((self.rotation / (self.rotation.norm())).repeat(vert.shape[0], 1), vert - 0.5)
vert = vert + self.translation.unsqueeze(0) + 0.5
return vert
def detach_(self):
self.translation.requires_grad_(False)
self.rotation.requires_grad_(False)
def detach(self):
self.detach_()
return self
if __name__ == "__main__":
print("main of div_free_base.py")
| [
"marvin.eisenberger@in.tum.de"
] | marvin.eisenberger@in.tum.de |
bc9d45300161229d609bb0f7aa32108c35c8b740 | f361bff53d4f572cb84f6c00581ee9f5a9b1ef77 | /todo_project/todo_app/admin.py | 8741c318586c79c849857178baacbda72f582ad5 | [] | no_license | AjaY-A-J/todo_app | cae1b42645e4bb6603f32a0b89e06acde82a0f9e | 2e2ff222661c8985108d8b89b7354fdcb7886c59 | refs/heads/master | 2023-06-11T06:36:32.786600 | 2021-07-03T07:26:51 | 2021-07-03T07:26:51 | 382,551,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from todo_app.models import Task
# Register your models here.
admin.site.register(Task) | [
"ajayj11021998@gmail.com"
] | ajayj11021998@gmail.com |
97d4161cbd5fcee8969175a8304d2910d0f49588 | 3d954220e07c76a081238549715108916f41d9ec | /Saved/Project_prev.py | fd9b2d961257ff2817643cf1d3739cc0b6627ee8 | [] | no_license | Hotchmoney/NeuralNets-Crime | 3c530dceb2d61ab435fcaf8726f5edbbaad5e095 | 793f786fa8b6904dd860e632f899a668b81c56c1 | refs/heads/master | 2021-05-01T14:31:52.319130 | 2018-02-11T05:18:09 | 2018-02-11T05:18:09 | 121,087,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
numpy.random.seed(7)
dataframe = numpy.loadtxt("Kingston_Police_Formatted.csv",delimiter=",")
train_size = int(len(dataframe)*0.67)
test_size = int(len(dataframe)-train_size)
train, test=dataframe[0:train_size,:], dataframe[train_size:len(dataframe),:]
print(len(train),len(test))
def create_database(dataset,look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a= dataset[i:(i+look_back),:]
dataX.append(a)
dataY.append(dataset[i+look_back,:])
return numpy.array(dataX), numpy.array(dataY)
look_back = 1
trainX, trainY = create_database(train,look_back)
testX, testY = create_database(test,look_back)
trainX = numpy.reshape(trainX,(trainX.shape[0],1,9))
testX = numpy.reshape(testX,(testX.shape[0],1,9))
print(trainX[0])
'''
model=Sequential()
model.add(LSTM(16,input_dim=look_back*4+5))
model.add(Dense(9))
model.compile(loss='mean_squared_error',optimizer='adam')
model.fit(trainX, trainY, nb_epoch=100,batch_size=10, verbose=2)
model.save('my_model.h5')
'''
| [
"hotchkissrnd@gmail.com"
] | hotchkissrnd@gmail.com |
feb5a545428069471023e52d8880ae8b2fad2321 | 4cd8330ec6380adc485a902fd72ecc0b4dffac11 | /autodot/conf/settings.py | 9aeb74910ada14d699fe80329dfdc1dd1f00584a | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"JSON"
] | permissive | spurfly/django-autodot | 9b04ac561bc39a64199de3c79c9c8fcc5a31f78f | 3be875812a11436594671b28138c518639c719df | refs/heads/master | 2021-01-18T15:15:08.506831 | 2011-05-05T11:48:03 | 2011-05-05T11:48:03 | 1,575,265 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
MEDIA_URL = getattr(settings, 'COMPRESS_URL', settings.MEDIA_URL)
if not MEDIA_URL.endswith('/'):
raise ImproperlyConfigured(
'The MEDIA_URL and COMPRESS_URL settings must have a trailing slash.')
MEDIA_ROOT = getattr(settings, 'COMPRESS_ROOT', settings.MEDIA_ROOT)
OUTPUT_DIR = getattr(settings, 'COMPRESS_OUTPUT_DIR', 'CACHE')
STORAGE = getattr(settings, 'COMPRESS_STORAGE', 'compressor.storage.CompressorFileStorage')
# rebuilds the cache every 30 days if nothing has changed.
REBUILD_TIMEOUT = getattr(settings, 'COMPRESS_REBUILD_TIMEOUT', 2592000) # 30 days
# the upper bound on how long any compression should take to be generated
# (used against dog piling, should be a lot smaller than REBUILD_TIMEOUT
MINT_DELAY = getattr(settings, 'COMPRESS_MINT_DELAY', 30) # 30 seconds
# check for file changes only after a delay (in seconds, disabled by default)
MTIME_DELAY = getattr(settings, 'COMPRESS_MTIME_DELAY', None)
# Allows changing verbosity from the settings.
VERBOSE = getattr(settings, "COMPRESS_VERBOSE", False)
# the cache backend to use
CACHE_BACKEND = getattr(settings, 'COMPRESS_CACHE_BACKEND', None)
if CACHE_BACKEND is None:
# If we are on Django 1.3 AND using the new CACHES setting...
if getattr(settings, "CACHES", None):
CACHE_BACKEND = "default"
else:
# fallback for people still using the old CACHE_BACKEND setting
CACHE_BACKEND = settings.CACHE_BACKEND
| [
"Jameson.Quinn+pyjamas@gmail.com"
] | Jameson.Quinn+pyjamas@gmail.com |
2158e8067cd9d63acebc081e566af22f4a3499f8 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/network/v20191101/connection_monitor.py | bfaf57c8f178117cd1274495f9a585d412064692 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,417 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ConnectionMonitorArgs', 'ConnectionMonitor']
@pulumi.input_type
class ConnectionMonitorArgs:
def __init__(__self__, *,
network_watcher_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input['ConnectionMonitorDestinationArgs']] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]] = None,
source: Optional[pulumi.Input['ConnectionMonitorSourceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]] = None):
"""
The set of arguments for constructing a ConnectionMonitor resource.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input['ConnectionMonitorDestinationArgs'] destination: Describes the destination of connection monitor.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]] endpoints: List of connection monitor endpoints.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] notes: Optional notes to be associated with the connection monitor.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]] outputs: List of connection monitor outputs.
:param pulumi.Input['ConnectionMonitorSourceArgs'] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]] test_configurations: List of connection monitor test configurations.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]] test_groups: List of connection monitor test groups.
"""
pulumi.set(__self__, "network_watcher_name", network_watcher_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if auto_start is None:
auto_start = True
if auto_start is not None:
pulumi.set(__self__, "auto_start", auto_start)
if connection_monitor_name is not None:
pulumi.set(__self__, "connection_monitor_name", connection_monitor_name)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if location is not None:
pulumi.set(__self__, "location", location)
if monitoring_interval_in_seconds is None:
monitoring_interval_in_seconds = 60
if monitoring_interval_in_seconds is not None:
pulumi.set(__self__, "monitoring_interval_in_seconds", monitoring_interval_in_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if outputs is not None:
pulumi.set(__self__, "outputs", outputs)
if source is not None:
pulumi.set(__self__, "source", source)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if test_configurations is not None:
pulumi.set(__self__, "test_configurations", test_configurations)
if test_groups is not None:
pulumi.set(__self__, "test_groups", test_groups)
@property
@pulumi.getter(name="networkWatcherName")
def network_watcher_name(self) -> pulumi.Input[str]:
"""
The name of the Network Watcher resource.
"""
return pulumi.get(self, "network_watcher_name")
@network_watcher_name.setter
def network_watcher_name(self, value: pulumi.Input[str]):
pulumi.set(self, "network_watcher_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group containing Network Watcher.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@auto_start.setter
def auto_start(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_start", value)
@property
@pulumi.getter(name="connectionMonitorName")
def connection_monitor_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the connection monitor.
"""
return pulumi.get(self, "connection_monitor_name")
@connection_monitor_name.setter
def connection_monitor_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_monitor_name", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input['ConnectionMonitorDestinationArgs']]:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input['ConnectionMonitorDestinationArgs']]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]]:
"""
List of connection monitor endpoints.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@monitoring_interval_in_seconds.setter
def monitoring_interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "monitoring_interval_in_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
Optional notes to be associated with the connection monitor.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]]:
"""
List of connection monitor outputs.
"""
return pulumi.get(self, "outputs")
@outputs.setter
def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]]):
pulumi.set(self, "outputs", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['ConnectionMonitorSourceArgs']]:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['ConnectionMonitorSourceArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="testConfigurations")
def test_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]]:
"""
List of connection monitor test configurations.
"""
return pulumi.get(self, "test_configurations")
@test_configurations.setter
def test_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]]):
pulumi.set(self, "test_configurations", value)
@property
@pulumi.getter(name="testGroups")
def test_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]]:
"""
List of connection monitor test groups.
"""
return pulumi.get(self, "test_groups")
@test_groups.setter
def test_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]]):
pulumi.set(self, "test_groups", value)
class ConnectionMonitor(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]]] = None,
__props__=None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']] destination: Describes the destination of connection monitor.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]] endpoints: List of connection monitor endpoints.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] notes: Optional notes to be associated with the connection monitor.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]] outputs: List of connection monitor outputs.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]] test_configurations: List of connection monitor test configurations.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]] test_groups: List of connection monitor test groups.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionMonitorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param ConnectionMonitorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionMonitorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionMonitorArgs.__new__(ConnectionMonitorArgs)
if auto_start is None:
auto_start = True
__props__.__dict__["auto_start"] = auto_start
__props__.__dict__["connection_monitor_name"] = connection_monitor_name
__props__.__dict__["destination"] = destination
__props__.__dict__["endpoints"] = endpoints
__props__.__dict__["location"] = location
if monitoring_interval_in_seconds is None:
monitoring_interval_in_seconds = 60
__props__.__dict__["monitoring_interval_in_seconds"] = monitoring_interval_in_seconds
if network_watcher_name is None and not opts.urn:
raise TypeError("Missing required property 'network_watcher_name'")
__props__.__dict__["network_watcher_name"] = network_watcher_name
__props__.__dict__["notes"] = notes
__props__.__dict__["outputs"] = outputs
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["source"] = source
__props__.__dict__["tags"] = tags
__props__.__dict__["test_configurations"] = test_configurations
__props__.__dict__["test_groups"] = test_groups
__props__.__dict__["connection_monitor_type"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20201101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20210201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20210201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20210301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20210301:ConnectionMonitor")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConnectionMonitor, __self__).__init__(
'azure-native:network/v20191101:ConnectionMonitor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectionMonitor':
"""
Get an existing ConnectionMonitor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectionMonitorArgs.__new__(ConnectionMonitorArgs)
__props__.__dict__["auto_start"] = None
__props__.__dict__["connection_monitor_type"] = None
__props__.__dict__["destination"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["monitoring_interval_in_seconds"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["notes"] = None
__props__.__dict__["outputs"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["test_configurations"] = None
__props__.__dict__["test_groups"] = None
__props__.__dict__["type"] = None
return ConnectionMonitor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@property
@pulumi.getter(name="connectionMonitorType")
def connection_monitor_type(self) -> pulumi.Output[str]:
"""
Type of connection monitor.
"""
return pulumi.get(self, "connection_monitor_type")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[Optional['outputs.ConnectionMonitorDestinationResponse']]:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorEndpointResponse']]]:
"""
List of connection monitor endpoints.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
The monitoring status of the connection monitor.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the connection monitor.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
Optional notes to be associated with the connection monitor.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def outputs(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorOutputResponse']]]:
"""
List of connection monitor outputs.
"""
return pulumi.get(self, "outputs")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the connection monitor.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional['outputs.ConnectionMonitorSourceResponse']]:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[str]:
"""
The date and time when the connection monitor was started.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="testConfigurations")
def test_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorTestConfigurationResponse']]]:
"""
List of connection monitor test configurations.
"""
return pulumi.get(self, "test_configurations")
@property
@pulumi.getter(name="testGroups")
def test_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorTestGroupResponse']]]:
"""
List of connection monitor test groups.
"""
return pulumi.get(self, "test_groups")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connection monitor type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
a507d124f2b368eb5c8a2917c26182464ee82d80 | 0f460d915073394419edae4fd474b8cd0f3fbb20 | /ee/clickhouse/views/element.py | b446df5e51a83798bb68df723686a4a031eb048f | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | adamb70/posthog | a74a209e404ea1a4971a693b4e245f20b9c26685 | 54ae8f0e70092f86b4aefbd93b56680dbd28b1c5 | refs/heads/master | 2023-01-24T10:59:32.608200 | 2020-11-18T13:42:26 | 2020-11-18T13:42:26 | 311,673,788 | 0 | 0 | NOASSERTION | 2020-11-10T13:53:18 | 2020-11-10T13:53:18 | null | UTF-8 | Python | false | false | 2,479 | py | from rest_framework import authentication, request, response, serializers, viewsets
from rest_framework.decorators import action
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.element import chain_to_elements
from ee.clickhouse.models.property import parse_prop_clauses
from ee.clickhouse.queries.util import parse_timestamps
from ee.clickhouse.sql.element import GET_ELEMENTS, GET_VALUES
from posthog.api.element import ElementSerializer, ElementViewSet
from posthog.models.filter import Filter
class ClickhouseElement(ElementViewSet):
@action(methods=["GET"], detail=False)
def stats(self, request: request.Request) -> response.Response:
filter = Filter(request=request)
date_from, date_to = parse_timestamps(filter)
prop_filters, prop_filter_params = parse_prop_clauses(filter.properties, request.user.team.pk)
result = sync_execute(
GET_ELEMENTS.format(date_from=date_from, date_to=date_to, query=prop_filters),
{"team_id": request.user.team.id, **prop_filter_params},
)
return response.Response(
[
{
"count": elements[1],
"hash": None,
"elements": [ElementSerializer(element).data for element in chain_to_elements(elements[0])],
}
for elements in result
]
)
@action(methods=["GET"], detail=False)
def values(self, request: request.Request) -> response.Response:
key = request.GET.get("key")
value = request.GET.get("value")
select_regex = '[:|"]{}="(.*?)"'.format(key)
# Make sure key exists, otherwise could lead to sql injection lower down
if key not in self.serializer_class.Meta.fields:
return response.Response([])
if key == "tag_name":
select_regex = "^([-_a-zA-Z0-9]*?)[\.|:]"
filter_regex = select_regex
if value:
filter_regex = "^([-_a-zA-Z0-9]*?{}[-_a-zA-Z0-9]*?)[\.|:]".format(value)
else:
if value:
filter_regex = '[:|"]{}=".*?{}.*?"'.format(key, value)
else:
filter_regex = select_regex
result = sync_execute(
GET_VALUES.format(), {"team_id": request.user.team.id, "regex": select_regex, "filter_regex": filter_regex}
)
return response.Response([{"name": value[0]} for value in result])
| [
"noreply@github.com"
] | adamb70.noreply@github.com |
65dee06c04a9d05d64c166c0b7e5e83898d94e9c | a78e2e6aeea1968297b2c221906edc2aa661c820 | /example/pop_op/server.py | dc1024cee5c959c07cc132d70c56a904c11e2301 | [] | no_license | its-dirg/proof-of-possession | 09e475e43fe42734e8c4962f302341661be76e3a | da8b69ec9cfabab48178105ce89773094f01dd29 | refs/heads/master | 2020-04-01T22:00:22.119397 | 2015-09-29T12:27:49 | 2015-09-29T12:27:49 | 36,801,142 | 0 | 1 | null | 2015-08-21T10:49:00 | 2015-06-03T12:07:55 | JavaScript | UTF-8 | Python | false | false | 6,663 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,import-error,no-name-in-module
import json
import ssl
import sys
import logging
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.multi_auth import AuthnIndexedEndpointWrapper
from oic.utils.authn.user import UsernamePasswordMako
from oic.utils import shelve_wrapper
from oic.utils.authn.client import verify_client
from oic.utils.authz import AuthzHandling
from oic.utils.keyio import keyjar_init
from oic.utils.userinfo import UserInfo
from oic.utils.userinfo.aa_info import AaUserInfo
from mako.lookup import TemplateLookup
from jwkest import as_unicode
from example.pop_op import APP
from flask.ext.oidc.op.op_blueprint import OIDCOPBlueprint, make_auth_verify
from pop.PoPProvider import PoPProvider
__author__ = 'rohe0002'
LOGGER = logging.getLogger("")
LOGFILE_NAME = 'oc.log'
HDLR = logging.FileHandler(LOGFILE_NAME)
BASE_FORMATTER = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
CPC = ('%(asctime)s %(name)s:%(levelname)s '
'[%(client)s,%(path)s,%(cid)s] %(message)s')
CPC_FORMATTER = logging.Formatter(CPC)
HDLR.setFormatter(BASE_FORMATTER)
LOGGER.addHandler(HDLR)
LOGGER.setLevel(logging.DEBUG)
URLMAP = {}
NAME = "pyoic"
OAS = None
PASSWD = {
"diana": "krall",
"babs": "howes",
"upper": "crust"
}
# ----------------------------------------------------------------------------
ROOT = './'
LOOKUP = TemplateLookup(directories=[ROOT + 'templates', ROOT + 'htdocs'],
module_directory=ROOT + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
# ----------------------------------------------------------------------------
def create_authn_broker(config, lookup, password):
ac = AuthnBroker()
end_points = config.AUTHENTICATION["UserPassword"]["END_POINTS"]
full_end_point_paths = ["%s%s" % (config.issuer, ep) for ep in end_points]
username_password_authn = UsernamePasswordMako(
None, "login.mako", lookup, password, "%sauthorization" % config.issuer,
None, full_end_point_paths)
for authkey, value in config.AUTHENTICATION.items():
authn = None
if "UserPassword" == authkey:
PASSWORD_END_POINT_INDEX = 0
end_point = config.AUTHENTICATION[authkey]["END_POINTS"][
PASSWORD_END_POINT_INDEX]
authn = AuthnIndexedEndpointWrapper(username_password_authn,
PASSWORD_END_POINT_INDEX)
APP.add_url_rule("/{}".format(end_point), end_point, make_auth_verify(authn.verify),
methods=['GET', 'POST'])
if "JavascriptLogin" == authkey:
pass
if "SAML" == authkey:
pass
if "SamlPass" == authkey:
pass
if "JavascriptPass" == authkey:
pass
if authn is not None:
ac.add(config.AUTHENTICATION[authkey]["ACR"], authn,
config.AUTHENTICATION[authkey]["WEIGHT"],
"")
return ac
if __name__ == '__main__':
import argparse
import importlib
from oic.utils.sdb import SessionDB
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-v', dest='verbose', action='store_true')
PARSER.add_argument('-d', dest='debug', action='store_true')
PARSER.add_argument('-p', dest='port', default=80, type=int)
PARSER.add_argument('-k', dest='insecure', action='store_true')
PARSER.add_argument(
'-c', dest='capabilities',
help="A file containing a JSON representation of the capabilities")
PARSER.add_argument('-b', dest='baseurl', help="base url of the OP")
PARSER.add_argument(dest="config")
ARGS = PARSER.parse_args()
# Client data base
CDB = shelve_wrapper.open("client_db")
sys.path.insert(0, ".")
CONFIG = importlib.import_module(ARGS.config)
if ARGS.baseurl:
CONFIG.baseurl = ARGS.baseurl
CONFIG.issuer = CONFIG.issuer.format(base=CONFIG.baseurl, port=ARGS.port)
CONFIG.SERVICE_URL = CONFIG.SERVICE_URL.format(issuer=CONFIG.issuer)
# dealing with authorization
AUTHZ = AuthzHandling()
KWARGS = {
"template_lookup": LOOKUP,
"template": {"form_post": "form_response.mako"},
}
# Should I care about verifying the certificates used by other entities
if ARGS.insecure:
KWARGS["verify_ssl"] = False
else:
KWARGS["verify_ssl"] = True
if ARGS.capabilities:
KWARGS["capabilities"] = json.loads(open(ARGS.capabilities).read())
else:
pass
AC = create_authn_broker(CONFIG, LOOKUP, PASSWD)
OAS = PoPProvider(CONFIG.issuer, SessionDB(CONFIG.baseurl), CDB, AC, None,
AUTHZ, verify_client, CONFIG.SYM_KEY, **KWARGS)
OAS.baseurl = CONFIG.issuer
if CONFIG.USERINFO == "SIMPLE":
# User info is a simple dictionary in this case statically defined in
# the configuration file
OAS.userinfo = UserInfo(CONFIG.USERDB)
elif CONFIG.USERINFO == "SAML":
OAS.userinfo = UserInfo(CONFIG.SAML)
elif CONFIG.USERINFO == "AA":
OAS.userinfo = AaUserInfo(CONFIG.SP_CONFIG, CONFIG.issuer, CONFIG.SAML)
else:
raise Exception("Unsupported userinfo source")
try:
OAS.cookie_ttl = CONFIG.COOKIETTL
except AttributeError:
pass
try:
OAS.cookie_name = CONFIG.COOKIENAME
except AttributeError:
pass
if ARGS.debug:
OAS.debug = True
try:
JWKS = keyjar_init(OAS, CONFIG.keys, kid_template="op%d")
except Exception as err:
LOGGER.error("Key setup failed: %s" % err)
OAS.key_setup("static", sig={"format": "jwk", "alg": "rsa"})
else:
NEW_NAME = "static/jwks.json"
f = open(NEW_NAME, "w")
for key in JWKS["keys"]:
for k in key.keys():
key[k] = as_unicode(key[k])
f.write(json.dumps(JWKS))
f.close()
OAS.jwks_uri.append("%s%s" % (OAS.baseurl, NEW_NAME))
for b in OAS.keyjar[""]:
LOGGER.info("OC3 server keys: %s" % b)
OP_BLUEPRINT = OIDCOPBlueprint(OAS)
APP.register_blueprint(OP_BLUEPRINT)
HTTPS = ""
CONTEXT = None
if CONFIG.SERVICE_URL.startswith("https"):
HTTPS = "using HTTPS"
CONTEXT = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
CONTEXT.load_cert_chain(CONFIG.SERVER_CERT, CONFIG.SERVER_KEY)
print("OC server starting listening on port:%s %s" % (ARGS.port, HTTPS))
APP.run(ssl_context=CONTEXT, host='0.0.0.0', port=ARGS.port, debug=True)
| [
"mathias.hedstrom.mh@gmail.com"
] | mathias.hedstrom.mh@gmail.com |
01c6775be2db39c9556c8c55c3d1e8c38e94c04b | 77721977db9f9095b46459dbd5000cfd87a1f67f | /tests/app/migrations/0001_initial.py | 5c32401e9dfdf9fcd2fc6da97f7a9183c9c383d8 | [
"MIT"
] | permissive | saruba/django-backoffice-extensions | f4a1e82b4684621b61dd16b00a1c688c389a7ac6 | 107cf92d4152587bfc471fc742baf7c3b5e616a5 | refs/heads/master | 2022-11-29T10:25:56.120566 | 2020-08-19T07:40:30 | 2020-08-19T08:01:26 | 280,137,144 | 1 | 0 | MIT | 2020-07-16T11:33:40 | 2020-07-16T11:33:39 | null | UTF-8 | Python | false | false | 1,441 | py | # Generated by Django 3.0.7 on 2020-06-29 13:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Stuff",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
("idle", "Idle"),
("active", "Active"),
("error", "Error"),
],
default="idle",
max_length=32,
),
),
(
"owner",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ["id"],},
),
]
| [
"hey@marcosgabarda.com"
] | hey@marcosgabarda.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.