blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce4db0d1eefa29d48921b8c480811378e92db97a | b943d3c32cac2b4d9ab85753c0a611688fba82ad | /resume_parser/parser_app/views.py | 3379793d2e341273319f0dea8815914b786cd1c5 | [
"MIT"
] | permissive | ashokraman/ResumeParser | 787e0d5fdc560c35630c1a78411e28725812a737 | 2238b7f3ea955f04cf5ccda619a15f62fcf066e3 | refs/heads/master | 2020-06-20T13:16:49.115304 | 2019-07-04T05:38:26 | 2019-07-04T05:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,190 | py | from django.shortcuts import render, redirect
from resume_parser import resume_parser
from .models import UserDetails, Competencies, MeasurableResults, Resume, ResumeDetails, UploadResumeModelForm
from django.contrib.auth.models import User
from django.contrib import messages
from django.conf import settings
from django.db import IntegrityError
from django.http import HttpResponse, FileResponse, Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .serializers import UserDetailsSerializer, CompetenciesSerializer, MeasurableResultsSerializer, ResumeSerializer, ResumeDetailsSerializer
import os
import requests
def homepage(request):
if request.method == 'POST':
user = User.objects.get(id=1)
UserDetails.objects.filter(user=user).delete()
Competencies.objects.filter(user=user).delete()
MeasurableResults.objects.filter(user=user).delete()
Resume.objects.filter(user=user).delete()
ResumeDetails.objects.filter(resume__user=user).delete()
file_form = UploadResumeModelForm(request.POST, request.FILES)
files = request.FILES.getlist('resume')
if file_form.is_valid():
for file in files:
try:
user = User.objects.get(id=1)
# saving the file
resume = Resume(user=user, resume=file)
resume.save()
# extracting resume entities
parser = resume_parser.ResumeParser(os.path.join(settings.MEDIA_ROOT, resume.resume.name))
data = parser.get_extracted_data()
# User Details
# resume.name = data.get('name')
# resume.email = data.get('email')
# resume.education = get_education(data.get('education'))
user_details = UserDetails()
user_details.user = user
user_details.name = data.get('name')
user_details.email = data.get('email')
user_details.mobile_number = data.get('mobile_number')
user_details.skills = ', '.join(data.get('skills'))
user_details.years_of_exp = data.get('total_experience')
user_details.save()
for comp in data.get('competencies'):
competencies = Competencies()
competencies.user = user
competencies.competency = comp
competencies.save()
for mr in data.get('measurable_results'):
measurable_results = MeasurableResults()
measurable_results.user = user
measurable_results.measurable_result = mr
measurable_results.save()
# Resume Details
resume_details = ResumeDetails()
resume_details.resume = resume
resume_details.page_nos = data.get('no_of_pages')
resume_details.save()
# resume.experience = ', '.join(data.get('experience'))
# measurable_results.append(data.get('measurable_results'))
# resume.save()
except IntegrityError:
messages.warning(request, 'Duplicate resume found:', file.name)
return redirect('homepage')
resumes = Resume.objects.filter(user=User.objects.get(id=1))
user_detail = UserDetails.objects.get(user=user)
messages.success(request, 'Resumes uploaded!')
overall_score = 0
competencies = data.get('competencies')
measurable_results = data.get('measurable_results')
if competencies and measurable_results:
overall_score = competencies.get('score') + measurable_results.get('score')
if competencies:
context = {
'resumes': resumes,
'competencies': competencies,
'measurable_results': measurable_results,
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
else:
context = {
'resumes': resumes,
'competencies': [],
'measurable_results': [],
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
return render(request, 'base.html', context)
else:
form = UploadResumeModelForm()
return render(request, 'base.html', {'form': form})
def get_education(education):
'''
Helper function to display the education in human readable format
'''
education_string = ''
for edu in education:
education_string += edu[0] + ' (' + str(edu[1]) + '), '
return education_string.rstrip(', ')
@csrf_exempt
def user_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
user = User.objects.get(pk=pk)
user_details = UserDetails.objects.get(user=user)
comp = Competencies.objects.filter(user=user)
mr = MeasurableResults.objects.filter(user=user)
resume = Resume.objects.get(user=user)
resume_details = ResumeDetails.objects.filter(resume=resume)
except UserDetails.DoesNotExist:
return HttpResponse(status=404)
except Competencies.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
comp_serializer = CompetenciesSerializer(comp, many=True)
mr_serializer = MeasurableResultsSerializer(mr, many=True)
resume_serializer = ResumeSerializer(resume)
resume_details_serializer = ResumeDetailsSerializer(resume_details, many=True)
user_details_serializer = UserDetailsSerializer(user_details)
data = {}
data['competencies'] = comp_serializer.data
data['measurable_results'] = mr_serializer.data
data['resume'] = resume_serializer.data
data['resume_details'] = resume_details_serializer.data
data['user_details'] = user_details_serializer.data
return JsonResponse(data)
@csrf_exempt
def job_recommendation(request):
if request.method == 'POST':
job_title = request.POST.get('job_title')
job_location = request.POST.get('job_location')
data = requests.get('https://api.ziprecruiter.com/jobs/v1?search=Python&location=Santa%20Monica&api_key=mqpqz4ev44nfu3n9brazrrix27yzipzm').json()
return JsonResponse(data) | [
"omkarpathak27@gmail.com"
] | omkarpathak27@gmail.com |
f8b20135a0371d89e32f4b88100180d8d03aeb95 | 22a3051f110d7ddf7d4470aec6e3b6b9ec38769d | /Math problems/DataAnalysis.py | 16bcdccccf418b25172ab33fc9cb06d7f9051e0e | [] | no_license | bilalib/Math-problems | b47993f4093090c1434e75dd83b58b8020c8fee2 | 020499e861358f171cd2f2924d642d62a14ba45f | refs/heads/master | 2020-06-01T21:26:35.409965 | 2019-06-23T00:12:53 | 2019-06-23T00:12:53 | 190,932,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,553 | py | from Settings import *
from pylab import *
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import json
from Problem import Problem
from datetime import datetime
# The amount of days the Per-Question bar graph should reflect. 0 => All days
LAST_N_DAYS = 3
def analyze(history_file_name, min_date):
# Opens json file that class Problems saves to
with open(history_file_name) as history_file:
history = json.load(history_file)
# Store per-day measurements
dates = [min_date]
day_correct = [0]
day_incorrect = [0]
day_total = [0]
# Store per-question measurements
pblm_total = np.zeros(Problem.num_problems, dtype=np.int)
pblm_correct = np.zeros(Problem.num_problems, dtype=np.int)
# Store per-attempt measurements
attempt_times = np.zeros(3, dtype=np.float)
attempt_counts = np.zeros(3, dtype=np.int)
attempt_date_counts = np.zeros(3, dtype=np.int)
attempt_date_times = np.zeros((1, 3))
# Stores per-answer distance measurements
solutions = list()
attempt_dists = list()
result_colors = list()
# Gets all required vaules for all charts
for problem in (problem for problem
in history if datetime.strptime(problem["date"], "%d/%M/%Y")
>= datetime.strptime(min_date, "%d/%M/%Y")):
curr_date = problem["date"]
if curr_date != dates[-1]:
# Adds new date for per-day data
dates.append(curr_date)
day_correct.append(0)
day_incorrect.append(0)
day_total.append(0)
# Adds new date for per-attempt date count
attempt_date_counts[attempt_date_counts == 0] = 1
attempt_date_times[-1] /= attempt_date_counts * 60
attempt_date_times = np.vstack([attempt_date_times, np.zeros(3)])
attempt_date_counts = np.zeros(3, dtype=np.int)
idx = problem["index"]
solution = problem["solution"]
# Iterates through each attempt, getting all values for all charts
for i, attempt in enumerate(problem["attempts"]):
result = attempt["result"]
# Gets per-day result totals
if result == "correct":
day_correct[-1] += 1
else:
day_incorrect[-1] += 1
day_total[-1] += 1
# Gets per-question result totals
if result == "correct":
pblm_correct[idx] += 1
pblm_total[idx] += 1
# Gets per-attempt-number time totals
sec = attempt["seconds"]
attempt_times[i] += sec
attempt_counts[i] += 1
attempt_date_counts[i] += 1
attempt_date_times[-1][i] += sec
# Gets per-answer distance measurements
solutions.append(solution)
float_input = attempt["float input"]
attempt_dists.append(abs(solution - float_input))
if result == "correct":
result_colors.append("none")
elif abs(float_input - solution) < ERROR_MARGIN:
result_colors.append("gold")
else:
result_colors.append("r")
# Averages per-attempt daily measurements
attempt_date_counts[attempt_date_counts == 0] = 1
attempt_date_times[-1] /= attempt_date_counts * 60
# Plotting the data
fig, axes = plt.subplots(2, 2, figsize=(4,3))
fig.subplots_adjust(right=3, top=3)
# Per-day plot
# Reformats the dates
for i, date in enumerate(dates):
if date[0] == "0":
date = date.replace("0", "", 1)
dates[i] = date.rpartition("/")[0]
axes[0][0].plot(dates, day_correct, color = "green", marker="o")
axes[0][0].plot(dates, day_incorrect, color = "red", marker="o")
axes[0][0].plot(dates, day_total, marker="o")
axes[0][0].legend(["correct", "incorrect", "total"])
axes[0][0].set_xlabel("Dates")
axes[0][0].set_ylabel("Total attempts")
axes[0][0].set_title("Per-day attempt totals")
# Per-question bar graph
problem_indexes = range(Problem.num_problems)
pblm_total[pblm_total == 0] = 1
avg_correct = pblm_correct / pblm_total
last_n = max(-LAST_N_DAYS, -len(problem_indexes))
axes[0][1].bar(problem_indexes[last_n:], avg_correct[last_n:])
axes[0][1].set_xticks(problem_indexes)
axes[0][1].set_xlabel("Problem index")
axes[0][1].set_ylabel("Correct / incorrect ratio")
axes[0][1].set_title("Per-problem scores in last "
+ str(LAST_N_DAYS) + " days")
# Provides the total problem attempts at top
for i, height in enumerate(avg_correct):
axes[0][1].text(i - 0.13, height + .01, str(pblm_total[i]))
# Per-attempt-number bar graphs
# Top graph compares attempts side by side
attempt_numbers = np.arange(1, 4, dtype=np.int)
attempt_counts[attempt_counts == 0] = 1
avg_attempt_times = attempt_times / attempt_counts / 60
axes[1][0].axis("off")
top = inset_axes(axes[1][0], "100%", "60%", "upper right")
bot = inset_axes(axes[1][0], "100%", "17%", "lower left")
if np.all(attempt_date_times[0] == 0):
attempt_date_times = np.delete(attempt_date_times, (0), 0)
top.stackplot(dates, attempt_date_times.transpose(), colors=sns.color_palette("Greens", 3))
top.legend(attempt_numbers, loc="upper left")
top.set_title("Average time spent per attempt per day")
top.set_xlabel("Dates")
top.set_ylabel("Time spent (min)")
bot.barh(attempt_numbers, avg_attempt_times, color=sns.color_palette("Greens", 3))
bot.set_yticks(attempt_numbers)
for i, height in enumerate(avg_attempt_times):
# Provides at top of bar number of times student got to attempt
bot.text(height + .02, attempt_numbers[i] - .15, str(attempt_counts[i]))
bot.set_title("Average across all days of above graph")
bot.set_xlabel("Time spent (min)")
bot.set_ylabel("Attempt")
bot.xaxis.set_major_locator(MaxNLocator(integer=True))
# Per-answer distance scatterplot
axes[1][1].scatter(solutions, attempt_dists, c=result_colors, s=5)
axes[1][1].set_ylim(0, 80)
axes[1][1].set_xlabel("Solution")
axes[1][1].set_title("Distance from answer vs solution")
# Moves y-axis to center
axes[1][1].spines['left'].set_position('zero')
axes[1][1].spines['right'].set_color('none')
return fig
analyze("history_json.txt", "06/16/2019").savefig("figure", bbox_inches='tight') | [
"bilalib@umich.edu"
] | bilalib@umich.edu |
36930f21ba8d41407c35a4beec90cabd1b4db5d3 | bc62a832051f5981b648115c3aa61dc8855c7698 | /ExampleCodesFromClass/Week8/Class2/Example1_TokenizerInterface.py | c5e1cd7b2b3577e0464ec3671a933fdd15519a66 | [] | no_license | stelukutla/LING516 | cc49749916b5fa0f304f15d80a3130dfbf3b8060 | 45628c975be04ed620a6e7ba58acb183a655680f | refs/heads/master | 2020-04-07T01:11:22.005198 | 2018-11-03T23:48:17 | 2018-11-03T23:48:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | from bottle import get, post, request, route, template, run
import re
import string
def do_tokenize(someString):
pattern = "[" + string.punctuation + "]"
allOccurences = re.findall(pattern, someString)
print("These are the locations of tokenization in this sentence: ", allOccurences) #Just for checking.
for i in range(0,len(allOccurences)):
someString = someString.replace(allOccurences[i], " " +
allOccurences[i] +" ")
someString = re.sub(r"\s+","\n",someString)
return someString
@route('/')
@get('/tokenize')
def ask_tokenize():
return '''
<form action="/tokenize" method="post" id="formid">
<textarea form ="formid" name="taname" id="taid" cols="35" wrap="soft"></textarea>
<input value="Tokenize" type="submit" />
</form>
'''
@post('/tokenize')
def post_tokenize():
someString = request.forms.get('taname')
tokenized_string = do_tokenize(someString)
return "<b>Your tokenized string is:</b> <br />" + \
"<textarea rows=\"20\" cols=\"40\" wrap=\"soft\" readonly>" \
+ tokenized_string \
+ "</textarea>"
run()
| [
"vbsowmya@gmail.com"
] | vbsowmya@gmail.com |
05423c174b31b915d1aa2e5c7e66eff20ca99cb2 | 735f4a6eb4e9c72dc664926ff8b42d02da9067f2 | /batch_four/session-3/simple_file_creation.py | e0e87709ebb33e6df3d688ad5803ce85956fee80 | [] | no_license | sopanshewale/python-datascience | 943b689d4264ad06f19c8039745ba6625d556282 | 0014b48d2397e16536731e1ee91e5e36f31e1ed9 | refs/heads/master | 2021-01-11T20:24:58.567677 | 2018-06-09T07:07:10 | 2018-06-09T07:07:10 | 79,097,836 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/python3
f = open ('simple_data.txt', 'w')
f.write("Hello to writing data into file")
f.write("Line ------2")
f.write("Line ------3")
f.close()
| [
"sopan.shewale@gmail.com"
] | sopan.shewale@gmail.com |
48df31f511a2c0cdd28adb617140b4b8e1fcdf10 | b2b6cc7b5b1c2e95fddb492d9fc3508e66e6b5a2 | /Project_final_ver/random_player/pathfinding.py | c592912a35fc62e4700bf8c39ee5ab1f23ae6de7 | [
"MIT"
] | permissive | vivianjia123/RoPaSci-360 | 331f80892e3930488403bed317b3bd49fde83915 | 1c93b3d6766053a0e0d5cd59254c2c7eeb50546f | refs/heads/main | 2023-07-19T13:22:44.188778 | 2021-09-02T13:27:10 | 2021-09-02T13:27:10 | 402,426,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,691 | py | """
COMP30024 Artificial Intelligence, Semester 1, 2021
Project Part B: Playing the Game
Team Name: Admin
Team Member: Yifeng Pan (955797) & Ziqi Jia (693241)
This module contain functions to defines core game structure and actions.
"""
import math
import random
def boarder_check(position):
if (position[0] < -4) or (position[0] > 4):
return False
if position[0] == 4:
if not(-4 <= position[1] <= 0):
return False
elif position[0] == 3:
if not(-4 <= position[1] <= 1):
return False
elif position[0] == 2:
if not(-4 <= position[1] <= 2):
return False
elif position[0] == 1:
if not(-4 <= position[1] <= 3):
return False
elif position[0] == 0:
if not(-4 <= position[1] <= 4):
return False
elif position[0] == -1:
if not(-3 <= position[1] <= 4):
return False
elif position[0] == -2:
if not(-2 <= position[1] <= 4):
return False
elif position[0] == -3:
if not(-1 <= position[1] <= 4):
return False
elif position[0] == -4:
if not(0 <= position[1] <= 4):
return False
return True
def vertical_movement(token_position, direction):
if direction == "up":
return (token_position[0]+1,token_position[1])
elif direction == "down":
return (token_position[0]-1,token_position[1])
def horizontal_movement(token_position, direction):
if direction == "left":
return (token_position[0],token_position[1]-1)
elif direction == "right":
return (token_position[0],token_position[1]+1)
def diagonal_movement(token_position, direction):
if direction == "up":
return (token_position[0]+1,token_position[1]-1)
elif direction == "down":
return (token_position[0]-1,token_position[1]+1)
def calculate_distance(start, goal):
distance = math.sqrt((start[0]-goal[0])**2+(start[1]-goal[1])**2+(start[0]-goal[0])*(start[1]-goal[1]))
return distance
def gen_next_all_potential_moves(token_position):
potential_movements = []
potential_movements.append(vertical_movement(token_position, "up"))
potential_movements.append(vertical_movement(token_position, "down"))
potential_movements.append(horizontal_movement(token_position, "left"))
potential_movements.append(horizontal_movement(token_position, "right"))
potential_movements.append(diagonal_movement(token_position, "up"))
potential_movements.append(diagonal_movement(token_position, "down"))
potential_movements_in_range = potential_movements.copy()
for move in potential_movements:
if boarder_check(move) is False:
potential_movements_in_range.remove(move)
return potential_movements_in_range
def get_token_adjacency(token_position, my_token_array):
adjacent_token_pos_list = []
for token in my_token_array:
if calculate_distance(token_position, token[1]) == 1.0:
adjacent_token_pos_list.append(token[1])
return adjacent_token_pos_list
def check_hex_occupancy(destination_hex, my_token_list, my_token_array):
temp = my_token_array.copy()
temp.remove(my_token_list)
for token in my_token_array:
if token[1] == destination_hex:
if token[0] == my_token_list[0]:
pass
return False
return True
def gen_all_potential_swing_moves(token_position, adjacent_token_list):
potential_swing_movements = []
if len(adjacent_token_list) == 0:
return potential_swing_movements
for adjacent_token in adjacent_token_list:
potential_swing_movements.extend(gen_next_all_potential_moves(adjacent_token))
for move in potential_swing_movements:
if move == token_position:
potential_swing_movements.remove(move)
return potential_swing_movements
def gen_sorted_dist_for_possible_moves(possible_move_list, goal):
possible_move_list_with_dist = []
for i in range(len(possible_move_list)):
possible_move_list_with_dist.append([possible_move_list[i],calculate_distance(possible_move_list[i], goal)])
sorted_lst = sorted(possible_move_list_with_dist,key=lambda x: x[1])
pos_only_lst = []
for move in sorted_lst:
pos_only_lst.append(move[0])
return pos_only_lst
def gen_closest_goal(token_position, goal_list):
if not goal_list:
return
closest = goal_list[0]
closest_dist = calculate_distance(token_position, goal_list[0])
for goal in goal_list:
distance = calculate_distance(token_position, goal)
if distance < closest_dist:
closest = goal
return closest
def explore_next_point(destination, explored_pos_lst):
if destination not in explored_pos_lst:
explored_pos_lst.append(destination)
def pop_move_history_stack(move_history, branch_pos, origin):
if len(move_history) == 0:
pass
else:
if branch_pos != origin:
while branch_pos in move_history:
if len(move_history) != 0:
move_history.pop()
else:
break
move_history.append(branch_pos)
elif branch_pos == origin:
while len(move_history) != 1:
move_history.pop()
def gen_possible_moves(token_position, token_type, opponent_token_array, adjacent_token_list, my_token_array, explored_pos_lst, show_text=False):
potential_movements = gen_next_all_potential_moves(token_position)
truly_adjacent = adjacent_token_list.copy()
if show_text == True:
print("**Originally has move", potential_movements)
print("**Iterating over adjacency list", truly_adjacent)
if len(adjacent_token_list) != 0:
for adjacent_pos in adjacent_token_list:
if calculate_distance(token_position, adjacent_pos) != 1.0:
truly_adjacent.remove(adjacent_pos)
if len(truly_adjacent) != 0:
potential_swing_moves = gen_all_potential_swing_moves(token_position, truly_adjacent)
in_swing = set(potential_swing_moves)
in_move = set(potential_movements)
in_swing_but_not_in_move = in_swing - in_move
potential_movements = potential_movements + list(in_swing_but_not_in_move)
if show_text == True:
print("**Current potential moves", potential_movements)
for move in potential_movements:
if move in truly_adjacent:
for my_token in my_token_array:
if (move == my_token[1]) and (move in potential_movements):
if show_text == True:
print("**2:Removed adjacent node for not possible overlapping", move)
potential_movements.remove(move)
if show_text == True:
print("**Here are all the potential moves:",potential_movements)
filtered_move = potential_movements.copy()
for move in potential_movements:
if move in explored_pos_lst:
filtered_move.remove(move)
else:
for my_token in my_token_array:
if not check_hex_occupancy(move, my_token,my_token_array):
if show_text == True:
print("**Validated as false")
print("**3:Removed adjacent node for not possible overlapping", move)
filtered_move.remove(move)
if show_text == True:
print("** Now the filtered moves are", filtered_move)
break
else:
if show_text == True:
print("**Validated as True")
for token in opponent_token_array:
for move in potential_movements:
if (token[0] == 'r') and (token_type == 's'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
elif (token[0] == 's') and (token_type == 'p'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
elif (token[0] == 'p') and (token_type == 'r'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
if show_text == True:
print("The final possible moves are",filtered_move)
return filtered_move
def gen_move_failure(token_position,token_type, opponent_token_array, adjacent_token_list, my_token_array, explored_pos_lst):
potential_movements = gen_next_all_potential_moves(token_position)
truly_adjacent = adjacent_token_list.copy()
if len(adjacent_token_list) != 0:
for adjacent_pos in adjacent_token_list:
if calculate_distance(token_position, adjacent_pos) != 1.0:
truly_adjacent.remove(adjacent_pos)
if len(truly_adjacent) != 0:
potential_swing_moves = gen_all_potential_swing_moves(token_position, truly_adjacent)
in_swing = set(potential_swing_moves)
in_move = set(potential_movements)
in_swing_but_not_in_move = in_swing - in_move
potential_movements = potential_movements + list(in_swing_but_not_in_move)
for move in potential_movements:
if move in truly_adjacent:
for my_token in my_token_array:
if move == my_token[1] and my_token[3]==False:
potential_movements.remove(move)
filtered_move = potential_movements.copy()
for move in potential_movements:
if move in explored_pos_lst:
filtered_move.remove(move)
for token in opponent_token_array:
for move in potential_movements:
if (token[0] == 'r') and (token_type == 's'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
elif (token[0] == 's') and (token_type == 'p'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
elif (token[0] == 'p') and (token_type == 'r'):
if (token[1] == move) and (move in filtered_move):
filtered_move.remove(move)
if not filtered_move:
return False
else:
return filtered_move
def recursive_DFS_path_finding(token_position, goal, token_type, opponent_token_array, prev_position, adjacent_token_list, my_token_array,explored_pos_lst,hist_stack, path_found, origin):
raw_possible_move_list = gen_possible_moves(token_position, token_type, opponent_token_array, adjacent_token_list, my_token_array, explored_pos_lst)
possible_move_list = gen_sorted_dist_for_possible_moves(raw_possible_move_list, goal)
if not possible_move_list:
reserve_option = gen_move_failure(token_position, token_type, opponent_token_array, adjacent_token_list, my_token_array, explored_pos_lst)
if reserve_option:
raw_possible_move_list = reserve_option
possible_move_list = gen_sorted_dist_for_possible_moves(raw_possible_move_list, goal)
else:
return
#print("**Debugging: For position", token_position,"should have moves",possible_move_list)
for next_move in possible_move_list:
if next_move not in explored_pos_lst:
explore_next_point(next_move, explored_pos_lst)
if next_move == goal:
hist_stack.append(goal)
#print("**Reached goal")
#print("**Explore history", hist_stack)
for move in hist_stack:
path_found.append(move)
return True
for next_move_1 in possible_move_list:
#print("**Debugging: This is the next move:", next_move_1,"from",possible_move_list)
hist_stack.append(next_move_1)
prev_position = token_position
#print("*Debugging-1:",hist_stack)
recursive_DFS_path_finding(next_move_1, goal, token_type, opponent_token_array, prev_position, adjacent_token_list, my_token_array,explored_pos_lst,hist_stack, path_found, origin)
pop_move_history_stack(hist_stack, prev_position, origin)
#print("*Debugging-2:",hist_stack)
def offense_route_opt(my_token_list, goal, prev_pos, my_token_array, opponent_token_array, adjacent_token_list):
explored_pos_lst = [my_token_list[1]]
if not goal:
return
temp = [my_token_list[1]]
hist_stack = []
recursive_DFS_path_finding(my_token_list[1], goal, my_token_list[0], opponent_token_array, prev_pos, adjacent_token_list, my_token_array ,explored_pos_lst, temp, hist_stack, my_token_list[1])
return hist_stack
def check_destination_covered(destination, my_token_list, my_token_array):
token_type = my_token_list[0]
potential_cover_lst = []
if token_type == "r":
for my_token in my_token_array:
if my_token[0] == "s":
potential_cover_lst.append(my_token[1])
for cover in potential_cover_lst:
if calculate_distance(destination, cover) == 1:
return True
elif token_type == "s":
for my_token in my_token_array:
if my_token[0] == "p":
potential_cover_lst.append(my_token[1])
for cover in potential_cover_lst:
if calculate_distance(destination, cover) == 1:
return True
elif token_type == "p":
for my_token in my_token_array:
if my_token[0] == "r":
potential_cover_lst.append(my_token[1])
for cover in potential_cover_lst:
if calculate_distance(destination, cover) == 1:
return True
return False
def defense_opt(my_token_list, opponent_token_array, adjacent_token_list, my_token_array): # threat_list is the list of all threats while threat_token_list is the info of a single threat
explored_pos_lst = explored_pos_lst = [my_token_list[1]]
token_pos = my_token_list[1]
token_type = my_token_list[0]
threat_list = my_token_list[3]
escape_opts = gen_possible_moves(token_pos, token_type, opponent_token_array, adjacent_token_list, my_token_array, explored_pos_lst, show_text=False)
escape_opts.append(token_pos) #include staying still
dist_from_escape_opt = [[]for i in range(len(escape_opts))]
escape_opts_is_covered = [[]for i in range(len(escape_opts))]
covered_move_lst = []
covered_move_dist = []
safest_move = escape_opts[-1]
longest_move_cost = 0
shortest_move_cost = 10000
for escape_move in escape_opts:
for threat in threat_list:
for opponent in opponent_token_array:
if threat == opponent[1]:
threat_token_list = opponent
break
adjacency_list = get_token_adjacency(threat, opponent_token_array)
offense_route = offense_route_opt(threat_token_list, escape_move, threat, opponent_token_array, my_token_array, adjacency_list)
if (len(offense_route) < shortest_move_cost):
shortest_move_cost = len(offense_route)
dist_from_escape_opt[escape_opts.index(escape_move)] = len(offense_route)
shortest_move_cost = 10000
for escape_move in escape_opts:
if check_destination_covered(escape_move, my_token_list, my_token_array):
escape_opts_is_covered[escape_opts.index(escape_move)] = True
else:
escape_opts_is_covered[escape_opts.index(escape_move)] = False
if not any (escape_opts_is_covered):
if not run_for_cover(my_token_list, opponent_token_array, adjacent_token_list, my_token_array):
best_escape_move = escape_opts[dist_from_escape_opt.index(max(dist_from_escape_opt))]
else:
potential_move = run_for_cover(my_token_list, opponent_token_array, adjacent_token_list, my_token_array)
if (calculate_distance(potential_move, gen_closest_goal(potential_move, threat_list)) < calculate_distance(token_pos, gen_closest_goal(token_pos, threat_list))) or (calculate_distance(potential_move, gen_closest_goal(potential_move, threat_list)) == 1):
best_escape_move = escape_opts[dist_from_escape_opt.index(max(dist_from_escape_opt))]
else:
best_escape_move = run_for_cover(my_token_list, opponent_token_array, adjacent_token_list, my_token_array)
else:
for move in escape_opts:
if escape_opts_is_covered[escape_opts.index(move)] == True:
covered_move_lst.append(move)
covered_move_dist.append(dist_from_escape_opt[escape_opts.index(move)])
best_escape_move = covered_move_lst[covered_move_dist.index(max(covered_move_dist))]
return best_escape_move
def run_for_cover(my_token_list, opponent_token_array, adjacent_token_list, my_token_array):
token_type = my_token_list[0]
token_position = my_token_list[1]
potential_cover_lst = []
dist_to_cover = []
if token_type == "r":
for my_token in my_token_array:
if my_token[0] == "s":
cover_type = my_token[0]
potential_cover_lst.append(my_token[1])
elif token_type == "s":
for my_token in my_token_array:
if my_token[0] == "p":
cover_type = my_token[0]
potential_cover_lst.append(my_token[1])
elif token_type == "p":
for my_token in my_token_array:
if my_token[0] == "r":
cover_type = my_token[0]
potential_cover_lst.append(my_token[1])
if not potential_cover_lst:
return False
else:
for cover in potential_cover_lst:
dist_to_cover.append(calculate_distance(token_position, cover))
closest_cover = potential_cover_lst[dist_to_cover.index(min(dist_to_cover))]
potential_contacts = gen_next_all_potential_moves(closest_cover)
available_contacts = potential_contacts.copy()
for contact in potential_contacts:
for token in my_token_array:
if token[1] == contact:
if token[0] == token_type:
pass
else:
if contact in available_contacts:
available_contacts.remove(contact)
for token in opponent_token_array:
if token[1] == contact:
if contact in available_contacts:
available_contacts.remove(contact)
closest_contact_dist = 10000
for contact in available_contacts:
if calculate_distance(contact, token_position) < closest_contact_dist:
closest_contact_dist = calculate_distance(contact, token_position)
closest_contact_point = contact
escape_route = offense_route_opt(my_token_list, closest_contact_point, token_position, opponent_token_array, my_token_array, adjacent_token_list)
return escape_route[1]
def counter_type(token_type):
if token_type == "r":
return "p"
elif token_type == "p":
return "s"
elif token_type == "s":
return "r"
def protective_type(token_type):
if token_type == "r":
return "s"
elif token_type == "p":
return "r"
elif token_type == "s":
return "p"
def throw_action(throw_range, player, throws_left, opponent_token_array, my_token_array):
'''
Generate a list of player's throw actions.
:param throw_range: a range of throw
:param player: the current player
:param throws_left: the number of throws the player left
:param opponent_token_array: the opponent tokens list
:param my_token_array: the player's tokens list
:return: a list contain all throw options of the player
'''
if player == "upper":
allowed_r_coord = [i for i in range(4 - throw_range, 5)]
else:
allowed_r_coord = [i for i in range(-4 , -3 + throw_range)]
allowed_q_coordinate = [i for i in range(-4 , 5)]
raw_coordinates = []
for r in allowed_r_coord:
for q in allowed_q_coordinate:
raw_coordinates.append((r,q))
allowed_coordinates = raw_coordinates.copy()
for coordinate in raw_coordinates:
if not boarder_check(coordinate):
allowed_coordinates.remove(coordinate)
#print("allowed coordinates are", allowed_coordinates)
#print("**Allowed throw r range is:", allowed_r_coord)
#print("**Allowed throw r coordinates are:", allowed_coordinates)
if throws_left <= 0: # No throws left
return False
else:
if (len(my_token_array) == 0) and (len(opponent_token_array) == 0): # No friendly or hostile tokens on board yet
type_choice = ["r","s","p"]
return (random.choice(type_choice), random.choice(allowed_coordinates))
elif (len(my_token_array) != 0) and (len(opponent_token_array) != 0): # Exists some friendly and hostile tokens
throw_choices = [[],[],[]]
for enemy in opponent_token_array:
counter_token_type= counter_type(enemy[0])
if enemy[1] in allowed_coordinates:
throw_choices[0].append((counter_token_type, enemy[1])) # Add immediate kill throw option for consideration
else:
potential_throw_contacts = gen_next_all_potential_moves(enemy[1])
available_throw_contacts = potential_throw_contacts.copy()
for throw_dest in potential_throw_contacts:
for token in my_token_array:
if token[1] == throw_dest:
if token[0] == counter_token_type:
pass
else:
available_throw_contacts.remove(throw_dest)
for token in opponent_token_array:
if token[1] == throw_dest:
if token[0] == counter_token_type:
pass
elif (token[0] == counter_type(counter_token_type)) and (throw_dest in available_throw_contacts):
available_throw_contacts.remove(throw_dest)
for throw_opt in available_throw_contacts:
if throw_opt in allowed_coordinates:
throw_choices[1].append((counter_token_type, throw_opt)) # Add kill in next round throw option for consideration
num_of_s = 0
num_of_p = 0
num_of_r = 0
for friendly in my_token_array:
if friendly[0] == "r":
num_of_r += 1
elif friendly[0] == "s":
num_of_s += 1
elif friendly[0] == "p":
num_of_p += 1
type_choice = []
if num_of_s < 1:
type_choice.append("s")
if num_of_p < 1:
type_choice.append("p")
if num_of_r < 1:
type_choice.append("r")
if type_choice: # Perform a protective throw if number of tokens < than 1p 1r 1s
#print("** Protective throw")
for choice in type_choice:
for my_token in my_token_array:
if choice == protective_type(my_token[0]):
counter_token_type = choice
temp = gen_next_all_potential_moves(my_token[1])
thow_pos = temp.copy()
for throw in temp:
if not check_hex_occupancy(throw, my_token, my_token_array):
thow_pos.remove(throw)
break
#print("** Protective throw opt are:", thow_pos)
for throw_opt in thow_pos:
if throw_opt in allowed_coordinates:
throw_choices[2].append((counter_token_type, throw_opt))
elif (len(my_token_array) == 0) and (len(opponent_token_array) != 0):
throw_choices = [[],[],[]]
for enemy in opponent_token_array:
counter_token_type= counter_type(enemy[0])
if enemy[1] in allowed_coordinates:
throw_choices[0].append((counter_token_type, enemy[1])) # Add immediate kill throw option for consideration
else:
potential_throw_contacts = gen_next_all_potential_moves(enemy[1])
available_throw_contacts = potential_throw_contacts.copy()
for throw_dest in potential_throw_contacts:
for token in my_token_array:
if token[1] == throw_dest:
if token[0] == counter_token_type:
pass
else:
available_throw_contacts.remove(throw_dest)
for token in opponent_token_array:
if token[1] == throw_dest:
if token[0] == counter_token_type:
pass
elif token[0] == counter_type(counter_token_type):
available_throw_contacts.remove(throw_dest)
for throw_opt in available_throw_contacts:
if throw_opt in allowed_coordinates:
throw_choices[1].append((counter_token_type, throw_opt)) # Add kill in next round throw option for consideration
if (not throw_choices[0]) and (not throw_choices[1]):
type_choice = ["r","s","p"]
return (random.choice(type_choice), random.choice(allowed_coordinates))
#print("**Throw choices are:", throw_choices)
return throw_choices
| [
"noreply@github.com"
] | noreply@github.com |
08614e6d097655c7c676a0336d9f847227e88e3d | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /validators/funnel/_marker.py | 475ac8c006ae087f0522dd87148fdf5d681678a6 | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,407 | py | import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="funnel", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`new_plotly.graph_objects.funnel.marker.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`new_plotly.graph_objects.funnel.marker.Line
` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
""",
),
**kwargs
)
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
ab2f5517b77290ef79e8d5d6fcfa1dbbde88463a | 5a0d31d01df4744a831068e33714756dcd3aca95 | /lol.py | 76efc25f7b46511049bbb3d095a073c32b21d672 | [] | no_license | MathieuDuponchelle/Kerious-Resource-Editor | 39ff04d326e64f28c34b1cf96dec566a24c0b3ec | a336c6d6baca0ab90cce8ee54c2565cff4fda8bb | refs/heads/master | 2020-12-24T15:40:25.581900 | 2012-10-10T15:11:08 | 2012-10-10T15:11:08 | 5,225,734 | 2 | 1 | null | 2012-08-17T15:59:26 | 2012-07-29T21:20:56 | Python | UTF-8 | Python | false | false | 9,128 | py | #!/usr/bin/env python
#
# signal.py
#
# Copyright (c) 2006, Richard Boulton <richard@tartarus.org>
# Copyright (C) 2012 Thibault Saunier <thibaul.saunier@collabora.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
"""
Helpers classes to handle signals
"""
from random import randint
class SignalGroup:
"""
A group of signals, which can be disconnected easily.
Used to make it easy to keep signals attached to the current project.
"""
def __init__(self):
self.signal_handler_ids = {}
def connect(self, object, signal, sid, callback, *args):
"""Connect a signal.
_ `object` is the object which defines the signal.
_ `signal` is the name of the signal to connect to.
_ `id` is a unique (within this SignalGroup) identifer for the signal to
connect to. If this is None, the value of `signal` will be used
instead.
_ `callback` is the callable to call on the signal.
_ `args` are any extra arguments to pass to the callback.
If there is already a connected signal in the group with the specified
unique identifier, this signal will first be disconnected.
"""
if sid is None:
sid = signal
if sid in self.signal_handler_ids:
old_object, handler_id = self.signal_handler_ids[sid]
old_object.disconnect(handler_id)
del self.signal_handler_ids[sid]
handler_id = object.connect(signal, callback, *args)
self.signal_handler_ids[id] = (object, handler_id)
def disconnect(self, sid):
"""Disconnect the signal with the specified unique identifier.
If there is no such signal, this returns without having any effect.
"""
if id in self.signal_handler_ids:
old_object, handler_id = self.signal_handler_ids.pop(sid)
old_object.disconnect(handler_id)
def disconnectAll(self):
"""Disconnect all signals in the group.
"""
for old_object, handler_id in self.signal_handler_ids.itervalues():
old_object.disconnect(handler_id)
self.signal_handler_ids = {}
def disconnectForObject(self, obj):
"""
Disconnects all signal in the group connect on the given object
"""
assert obj != None
objids = [sid for sid in self.signal_handler_ids.keys() if self.signal_handler_ids[sid][0] == obj]
for sid in objids:
old_object, handler_id = self.signal_handler_ids.pop(id)
old_object.disconnect(handler_id)
class Signallable(object):
"""
Signallable interface
@cvar __signals__: The signals the class can emit as a dictionnary of
- Key : signal name
- Value : List of arguments (can be None)
@type __signals__: Dictionnary of L{str} : List of L{str}
"""
class SignalGroup:
# internal
def __init__(self, signallable):
self.siglist = signallable.get_signals()
# self.ids is a dictionnary of
# key: signal name (string)
# value: list of:
# (callback (callable),
# args (list),
# kwargs (dictionnary))
self.ids = {}
self.callback_ids = {}
# self.handlers is a dictionnary of callback ids per
# signals.
self.handlers = {}
for signame in self.siglist.keys():
self.handlers[signame] = []
def connect(self, signame, cb, args, kwargs):
""" connect """
# get a unique id
if not signame in self.handlers.keys():
raise Exception("Signal %s is not one of %s" % (signame,
",\n\t".join(self.handlers.keys())))
if not callable(cb):
raise Exception("Provided callable '%r' is not callable" % cb)
uuid = randint(0, 2 ** 64)
while uuid in self.ids:
uuid = randint(0, 2 ** 64)
self.ids[uuid] = (cb, args, kwargs)
self.callback_ids.setdefault(cb, []).append(uuid)
self.handlers[signame].append(uuid)
return uuid
def disconnect(self, sigid):
""" disconnect """
try:
cb = self.ids[sigid][0]
del self.ids[sigid]
except KeyError:
raise Exception("unknown signal id")
for lists in self.handlers.itervalues():
try:
lists.remove(sigid)
except ValueError:
continue
self.callback_ids.get(cb, []).remove(sigid)
def disconnect_by_function(self, function):
try:
sig_ids = self.callback_ids[function]
except KeyError:
raise Exception("function is not a known callback")
for sigid in list(sig_ids):
self.disconnect(sigid)
del self.callback_ids[function]
def emit(self, signame, *args, **kwargs):
""" emit """
# emits the signal,
# will concatenate the given args/kwargs with
# the ones supplied in .connect()
res = None
# Create a copy because if the handler being executed disconnects,
# the next handler will not be called.
signame_handlers = list(self.handlers[signame])
for sigid in signame_handlers:
if sigid not in self.handlers[signame]:
# The handler has been disconnected in the meantime!
continue
# cb: callable
cb, orar, kwar = self.ids[sigid]
ar = args[:] + orar
kw = kwargs.copy()
kw.update(kwar)
res = cb(*ar, **kw)
return res
# key : name (string)
# value : signature (list of any strings)
__signals__ = {}
def emit(self, signame, *args, **kwargs):
"""
Emit the given signal.
The provided kwargs should contain *at-least* the arguments declared
in the signal declaration.
The object emitting the signal will be provided as the first
argument of the callback
@return: The first non-None return value given by the callbacks if they
provide any non-None return value.
"""
if not hasattr(self, "_signal_group"):
# if there's no SignalGroup, that means nothing is
# connected
return None
return self._signal_group.emit(signame, self,
*args, **kwargs)
def connect(self, signame, cb, *args, **kwargs):
"""
Connect a callback (with optional arguments) to the given
signal.
* signame : the name of the signal
* cb : the callback (needs to be a callable)
* args/kwargs : (optional) arguments
"""
if not hasattr(self, "_signal_group"):
self._signal_group = self.SignalGroup(self)
return self._signal_group.connect(signame,
cb, args, kwargs)
def disconnect(self, sigid):
"""
Disconnect signal using give signal id
"""
if not hasattr(self, "_signal_group"):
raise Exception("This class doesn't have any signals !")
self._signal_group.disconnect(sigid)
def disconnect_by_function(self, function):
"""
Disconnect signal using give signal id
"""
if not hasattr(self, "_signal_group"):
raise Exception("This class doesn't have any signals !")
self._signal_group.disconnect_by_function(function)
disconnect_by_func = disconnect_by_function
@classmethod
def get_signals(cls):
""" Get the full list of signals implemented by this class """
sigs = {}
for cla in cls.mro():
if "__signals__" in cla.__dict__:
sigs.update(cla.__signals__)
if cla == Signallable:
break
return sigs
if __name__ == "__main__":
print "lol"
| [
"mathieu.duponchelle@epitech.eu"
] | mathieu.duponchelle@epitech.eu |
4a6a8081f0a61267663b27c945c0579c07a15037 | 585bac463cb1919ac697391ff130bbced73d6307 | /325_MaximumSizeSubarraySumEqualsk /solution.py | 6417978633d4d8ceb321da2c951a69503bfe6ecb | [] | no_license | llgeek/leetcode | ce236cf3d3e3084933a7a4a5e8c7766f7f407285 | 4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c | refs/heads/master | 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | class Solution:
def maxSubArrayLen(self, nums, k):
maxlen = 0
presum = dict()
accsum = 0
for idx, num in enumerate(nums):
accsum += num
# if num == k:
# maxlen = max(maxlen, 1)
if accsum == k:
maxlen = idx+1
if accsum - k in presum:
maxlen = max(maxlen, idx-presum[accsum-k])
if accsum not in presum:
presum[accsum] = idx
return maxlen
if __name__ == '__main__':
# nums = [1, -1, 66, -2, 3]
nums = [-1, 0,0,0,2]
k = 0
print(Solution().maxSubArrayLen(nums, k)) | [
"linlinchen@Linlins-MacBook-Pro.local"
] | linlinchen@Linlins-MacBook-Pro.local |
c1b2c1b26af07056fd80e5dca67a5001815af546 | 093ac6d8f8398536b98455cc5db4ac3cbeb1a96d | /debugger-tools/gdb-loader.py | 541f98f65fc969b9adecaecc26b2424d0a1abe96 | [] | no_license | roswell/clasp | 721316c5605f716f16eb036beb7dbb16974d26af | 8ba34fc54a34d3a5f14af01ede1ba53e99da56e9 | refs/heads/main | 2021-06-08T14:59:50.260028 | 2021-05-07T05:31:33 | 2021-05-07T05:31:33 | 91,808,485 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | import os
import importlib
#
# The wrapper module
#
inspector_mod = None
debugger_mod = None
dir = os.path.dirname(os.path.expanduser(__file__))
print( "\n\n\nLoading clasp gdb python extension from directory = %s" % dir)
sys.path.insert(0,dir)
def maybeReloadModules():
global inspector_mod, debugger_mod
if (inspector_mod == None):
inspector_mod = importlib.import_module("clasp_inspect")
else:
importlib.reload(inspector_mod)
if (debugger_mod == None):
debugger_mod = importlib.import_module("backends.gdb")
else:
importlib.reload(debugger_mod)
class LispPrint (gdb.Command):
def __init__ (self):
super (LispPrint, self).__init__ ("lprint", gdb.COMMAND_USER)
def invoke (self, arg, from_tty):
global inspector_mod, debugger_mod
maybeReloadModules()
inspector_mod.do_lisp_print(debugger_mod,arg)
class LispHead (gdb.Command):
def __init__ (self):
super (LispHead, self).__init__ ("lhead", gdb.COMMAND_USER)
def invoke (self, arg, from_tty):
global inspector_mod, debugger_mod
maybeReloadModules()
inspector_mod.do_lisp_head(debugger_mod,arg)
class LispInspect (gdb.Command):
def __init__ (self):
super (LispInspect, self).__init__ ("linspect", gdb.COMMAND_USER)
def invoke (self, arg, from_tty):
global inspector_mod, debugger_mod
maybeReloadModules()
inspector_mod.do_lisp_inspect(debugger_mod,arg)
class LispTest (gdb.Command):
def __init__ (self):
super (LispTest, self).__init__ ("ltest", gdb.COMMAND_USER)
def invoke (self, arg, from_tty):
global inspector_mod, debugger_mod
maybeReloadModules()
inspector_mod.do_lisp_test(debugger_mod,arg)
LispInspect()
LispPrint()
LispHead()
LispTest()
print("lprint <address> - print lisp object in compact form")
print("linspect <address> - inspect lisp object - all fields")
print("lhead <address> - dump the clients header")
print("ltest <address> - test module reloading")
print("python-interactive <expr> - (or pi) interactive Python session\n")
| [
"meister@temple.edu"
] | meister@temple.edu |
5f5c946119f93d0807da026d011f603897bc22be | 2e74c7339c63385172629eaa84680a85a4731ee9 | /functions/adding_machine/adding_machine/summarizers.py | e2c7e2177df872c5c6880129cd2080aec0c8204f | [] | no_license | zhusui/ihme-modeling | 04545182d0359adacd22984cb11c584c86e889c2 | dfd2fe2a23bd4a0799b49881cb9785f5c0512db3 | refs/heads/master | 2021-01-20T12:30:52.254363 | 2016-10-11T00:33:36 | 2016-10-11T00:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,857 | py | from __future__ import division
import pandas as pd
import agg_engine as ae
import os
import super_gopher
from functools32 import lru_cache
try:
from hierarchies import dbtrees
except:
from hierarchies.hierarchies import dbtrees
import numpy as np
from scipy import stats
from multiprocessing import Pool
import itertools
from db import EpiDB
this_file = os.path.abspath(__file__)
this_path = os.path.dirname(this_file)
@lru_cache()
def get_age_weights():
query = """
SELECT age_group_id, age_group_weight_value
FROM shared.age_group_weight
WHERE gbd_round_id = 3"""
db = EpiDB('epi')
eng = db.get_engine(db.dsn_name)
aws = pd.read_sql(query, eng)
return aws
@lru_cache()
def get_age_spans():
query = """
SELECT age_group_id, age_group_years_start, age_group_years_end
FROM shared.age_group"""
db = EpiDB('epi')
eng = db.get_engine(db.dsn_name)
ags = pd.read_sql(query, eng)
return ags
def get_pop(filters={}):
query = """
SELECT o.age_group_id, year_id, o.location_id, o.sex_id, pop_scaled
FROM mortality.output o
LEFT JOIN mortality.output_version ov using (output_version_id)
LEFT JOIN shared.age_group a using (age_group_id)
LEFT JOIN shared.location l using (location_id)
LEFT JOIN shared.sex s using (sex_id)
WHERE ov.is_best = 1
AND year_id >= 1980 AND year_id <= 2015"""
for k, v in filters.iteritems():
v = np.atleast_1d(v)
v = [str(i) for i in v]
query = query + " AND {k} IN ({vlist})".format(
k=k, vlist=",".join(v))
db = EpiDB('cod')
eng = db.get_engine(db.dsn_name)
pop = pd.read_sql(query, eng)
return pop
def combine_sexes_indf(df):
draw_cols = list(df.filter(like='draw').columns)
index_cols = list(set(df.columns) - set(draw_cols))
index_cols.remove('sex_id')
csdf = df.merge(
pop,
on=['location_id', 'year_id', 'age_group_id', 'sex_id'])
# assert len(csdf) == len(df), "Uh oh, some pops are missing..."
csdf = ae.aggregate(
csdf[index_cols+draw_cols+['pop_scaled']],
draw_cols,
index_cols,
'wtd_sum',
weight_col='pop_scaled')
csdf['sex_id'] = 3
return csdf
def combine_ages(df, gbd_compare_ags=False):
age_groups = {
22: (0, 200),
27: (0, 200)}
if gbd_compare_ags:
age_groups.update({
1: (0, 5),
23: (5, 15),
24: (15, 50),
25: (50, 70),
26: (70, 200)})
index_cols = ['location_id', 'year_id', 'measure_id', 'sex_id']
if 'cause_id' in df.columns:
index_cols.append('cause_id')
if 'sequela_id' in df.columns:
index_cols.append('sequela_id')
if 'rei_id' in df.columns:
index_cols.append('rei_id')
draw_cols = list(df.filter(like='draw').columns)
results = []
for age_group_id, span in age_groups.items():
if age_group_id in df.age_group_id.unique():
continue
# Get aggregate age cases
if age_group_id != 27:
wc = 'pop_scaled'
aadf = df.merge(ags)
aadf = aadf[
(span[0] <= aadf.age_group_years_start) &
(span[1] >= aadf.age_group_years_end)]
aadf.drop(
['age_group_years_start', 'age_group_years_end'],
axis=1,
inplace=True)
len_in = len(aadf)
aadf = aadf.merge(
pop,
on=['location_id', 'year_id', 'age_group_id', 'sex_id'],
how='left')
assert len(aadf) == len_in, "Uh oh, some pops are missing..."
else:
wc = 'age_group_weight_value'
aadf = df.merge(aw, on='age_group_id', how='left')
assert len(aadf) == len(df), "Uh oh, some weights are missing..."
aadf = ae.aggregate(
aadf[index_cols+draw_cols+[wc]],
draw_cols,
index_cols,
'wtd_sum',
weight_col=wc)
aadf['age_group_id'] = age_group_id
results.append(aadf)
results = pd.concat(results)
return results
def get_estimates(df):
""" Compute summaries """
summdf = df.copy()
summdf['mean'] = summdf.filter(like='draw').mean(axis=1)
summdf['median'] = np.median(
summdf.filter(like='draw').values,
axis=1)
summdf['lower'] = stats.scoreatpercentile(
summdf.filter(like='draw').values,
per=2.5,
axis=1)
summdf['upper'] = stats.scoreatpercentile(
summdf.filter(like='draw').values,
per=97.5,
axis=1)
nondraw_cols = set(summdf.columns)-set(summdf.filter(like='draw').columns)
return summdf[list(nondraw_cols)]
def pct_change(df, start_year, end_year, change_type='pct_change',
index_cols=None):
""" Compute pct change: either arc or regular pct_change (rate or num).
For pct_change in rates or arc pass in a df in rate space.
Otherwise, pass in a df in count space."""
# set up the incoming df to be passed into the math part
draw_cols = list(df.filter(like='draw').columns)
if not index_cols:
index_cols = list(set(df.columns) - set(draw_cols + ['year_id']))
df_s = df[df.year_id == start_year]
df_e = df[df.year_id == end_year]
df_s.drop('year_id', axis=1, inplace=True)
df_e.drop('year_id', axis=1, inplace=True)
df_s = df_s.merge(
df_e,
on=index_cols,
suffixes=(str(start_year), str(end_year)))
sdraws = ['draw_%s%s' % (d, start_year) for d in range(1000)]
edraws = ['draw_%s%s' % (d, end_year) for d in range(1000)]
# do the math
if change_type == 'pct_change':
cdraws = ((df_s[edraws].values - df_s[sdraws].values) /
df_s[sdraws].values)
emean = df_s[edraws].values.mean(axis=1)
smean = df_s[sdraws].values.mean(axis=1)
cmean = (emean - smean) / smean
# when any start year values are 0, we get division by zero = NaN/inf
cdraws[np.isnan(cdraws)] = 0
cdraws[np.isinf(cdraws)] = 0
cmean[np.isnan(cmean)] = 0
cmean[np.isinf(cmean)] = 0
elif change_type == 'arc':
# can't take a log of 0, so replace 0 with a miniscule number
adraws = sdraws + edraws
if (df_s[adraws].values == 0).any():
df_s[adraws] = df_s[adraws].replace(0, 1e-9)
gap = end_year - start_year
cdraws = np.log(df_s[edraws].values / df_s[sdraws].values) / gap
emean = df_s[edraws].values.mean(axis=1)
smean = df_s[sdraws].values.mean(axis=1)
cmean = np.log(emean / smean) / gap
else:
raise ValueError("change_type must be 'pct_change' or 'arc'")
# put the dataframes back together
cdraws = pd.DataFrame(cdraws, index=df_s.index, columns=draw_cols)
cdraws = cdraws.join(df_s[index_cols])
cmean = pd.DataFrame(cmean, index=df_s.index, columns=['pct_change_means'])
cdraws = cdraws.join(cmean)
cdraws['year_start_id'] = start_year
cdraws['year_end_id'] = end_year
cdraws = cdraws[
index_cols +
['year_start_id', 'year_end_id', 'pct_change_means'] +
draw_cols]
# output
return cdraws
def transform_metric(df, to_id, from_id):
"""Given a df, it's current metric_id (from_id)
and it's desired metric_id (to_id), transform metric space!"""
to_id = int(to_id)
from_id = int(from_id)
# TODO: Expand this for the other metrics too.
# Right not just doing number and rate for the get_pct_change shared fn.
valid_to = [1, 3]
assert to_id in valid_to, "Pass either 1 or 3 for the 'to_id' arg"
valid_from = [1, 3]
assert from_id in valid_from, "Pass either 1 or 3 for the 'from_id' arg"
merge_cols = ['location_id', 'year_id', 'age_group_id', 'sex_id']
if not df.index.is_integer:
df.reset_index(inplace=True)
for col in merge_cols:
assert col in df.columns, "Df must contain %s" % col
# find years and sexes in the df
years = df.year_id.unique()
sexes = df.sex_id.unique()
ages = df.age_group_id.unique()
locations = df.location_id.unique()
# get populations for those years and sexes
pop = get_pop({'year_id': years, 'sex_id': sexes,
'age_group_id': ages, 'location_id': locations})
# transform
draw_cols = list(df.filter(like='draw').columns)
new_df = df.merge(pop, on=merge_cols, how='inner')
if (to_id == 3 and from_id == 1):
for i in draw_cols:
new_df['%s' % i] = new_df['%s' % i] / new_df['pop_scaled']
elif (to_id == 1 and from_id == 3):
for i in draw_cols:
new_df['%s' % i] = new_df['%s' % i] * new_df['pop_scaled']
else:
raise ValueError("'to_id' and 'from_id' must be two unique numbers")
# put the dfs back together
if 'metric_id' in new_df.columns:
new_df['metric_id'].replace(from_id, to_id, axis=1, inplace=True)
else:
new_df['metric_id'] = to_id
new_df.drop('pop_scaled', axis=1, inplace=True)
return new_df
def summarize_location(
location_id,
drawdir,
sg=None,
years=[1990, 1995, 2000, 2005, 2010, 2015],
change_intervals=None,
combine_sexes=False,
force_age=False,
draw_filters={},
calc_counts=False,
gbd_compare_ags=False):
drawcols = ['draw_%s' % i for i in range(1000)]
if sg is None:
spec = super_gopher.known_specs[2]
sg = super_gopher.SuperGopher(spec, drawdir)
if change_intervals:
change_years = [i for i in itertools.chain(*change_intervals)]
else:
change_years = []
change_df = []
summary = []
for y in years:
df = sg.content(
location_id=location_id, year_id=y, sex_id=[1, 2],
**draw_filters)
if force_age:
df = df[df.age_group_id.isin(range(2, 22))]
if combine_sexes:
df = df[df.sex_id != 3]
cs = combine_sexes_indf(df)
df = df.append(cs)
df = df.append(combine_ages(df, gbd_compare_ags))
df['metric_id'] = 3
if ('cause_id' in df.columns) and ('rei_id' not in df.columns):
denom = df.ix[df.cause_id == 294].drop('cause_id', axis=1)
if len(denom) > 0:
mcols = list(set(denom.columns)-set(drawcols))
pctdf = df.merge(denom, on=mcols, suffixes=('_num', '_dnm'))
num = pctdf.filter(like="_num").values
dnm = pctdf.filter(like="_dnm").values
pctdf = pctdf.reset_index(drop=True)
pctdf = pctdf.join(pd.DataFrame(
data=num/dnm, index=pctdf.index, columns=drawcols))
pctdf = pctdf[mcols+['cause_id']+drawcols]
pctdf['metric_id'] = 2
df = pd.concat([df, pctdf])
if calc_counts:
popdf = df[df.metric_id == 3].merge(pop)
popdf['metric_id'] = 1
popdf.ix[:, drawcols] = (
popdf[drawcols].values.T * popdf.pop_scaled.values).T
popdf.drop('pop_scaled', axis=1, inplace=True)
summary.append(get_estimates(popdf))
summary.append(get_estimates(df))
if y in change_years:
change_df.append(df)
if calc_counts:
change_df.append(popdf)
summary = pd.concat(summary)
if change_intervals is not None:
change_df = pd.concat(change_df)
changesumms = []
for ci in change_intervals:
changedf = pct_change(change_df, ci[0], ci[1])
changesumms.append(get_estimates(changedf))
changesumms = pd.concat(changesumms)
changesumms['median'] = changesumms['pct_change_means']
else:
changesumms = pd.DataFrame()
return summary, changesumms
def slw(args):
try:
s, cs = summarize_location(*args[0], **args[1])
return s, cs
except Exception, e:
print args
print e
return None
def launch_summaries(
model_version_id,
env='dev',
years=[1990, 1995, 2000, 2005, 2010, 2015],
file_pattern='all_draws.h5',
h5_tablename='draws'):
global pop, aw, ags
pop = get_pop()
aw = get_age_weights()
ags = get_age_spans()
drawdir = '/ihme/epi/panda_cascade/%s/%s/full/draws' % (
env, model_version_id)
outdir = '/ihme/epi/panda_cascade/%s/%s/full/summaries' % (
env, model_version_id)
try:
os.makedirs(outdir)
os.chmod(outdir, 0o775)
os.chmod(os.path.join(outdir, '..'), 0o775)
os.chmod(os.path.join(outdir, '..', '..'), 0o775)
except:
pass
lt = dbtrees.loctree(None, location_set_id=35)
locs = [l.id for l in lt.nodes]
sg = super_gopher.SuperGopher({
'file_pattern': file_pattern,
'h5_tablename': h5_tablename},
drawdir)
pool = Pool(10)
res = pool.map(slw, [(
(l, drawdir, sg, years), {}) for l in locs])
pool.close()
pool.join()
res = [r for r in res if isinstance(r, tuple)]
res = zip(*res)
summ = pd.concat([r for r in res[0] if r is not None])
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'mean', 'lower', 'upper']]
summfile = "%s/model_estimate_final.csv" % outdir
summ.to_csv(summfile, index=False)
os.chmod(summfile, 0o775)
csumm = pd.concat(res[1])
if len(csumm) > 0:
csumm = csumm[[
'location_id', 'year_start', 'year_end', 'age_group_id', 'sex_id',
'measure_id', 'median', 'lower', 'upper']]
csummfile = "%s/change_summaries.csv" % outdir
csumm.to_csv(csummfile, index=False)
os.chmod(csummfile, 0o775)
def summ_lvl_meas(args):
drawdir, outdir, location_id, measure_id = args
try:
os.makedirs(outdir)
os.chmod(outdir, 0o775)
os.chmod(os.path.join(outdir, '..'), 0o775)
os.chmod(os.path.join(outdir, '..', '..'), 0o775)
except:
pass
try:
sg = super_gopher.SuperGopher({
'file_pattern': '{measure_id}_{location_id}_{year_id}_{sex_id}.h5',
'h5_tablename': 'draws'},
drawdir)
print 'Combining summaries %s %s...' % (drawdir, measure_id)
summ, csumm = summarize_location(
location_id,
drawdir,
sg,
change_intervals=[(2005, 2015), (1990, 2015), (1990, 2005)],
combine_sexes=True,
force_age=True,
calc_counts=True,
draw_filters={'measure_id': measure_id},
gbd_compare_ags=True)
if 'cause' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'cause_id', 'mean', 'lower',
'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'cause_id', 'metric_id'])
elif 'sequela' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'sequela_id', 'mean', 'lower',
'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'sequela_id', 'metric_id'])
elif 'rei' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'rei_id', 'cause_id', 'mean',
'lower', 'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'rei_id', 'cause_id', 'metric_id'])
summfile = "%s/%s_%s_single_year.csv" % (
outdir, measure_id, location_id)
print 'Writing to file...'
summ = summ[summ['mean'].notnull()]
summ.to_csv(summfile, index=False)
os.chmod(summfile, 0o775)
if len(csumm) > 0:
if 'cause' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'cause_id',
'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'cause_id',
'metric_id'])
elif 'sequela' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'sequela_id',
'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'sequela_id',
'metric_id'])
elif 'rei' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'rei_id',
'cause_id', 'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'rei_id',
'cause_id', 'metric_id'])
csummfile = "%s/%s_%s_multi_year.csv" % (
outdir, measure_id, location_id)
csumm = csumm[
(csumm['median'].notnull()) & np.isfinite(csumm['median']) &
(csumm['lower'].notnull()) & np.isfinite(csumm['lower']) &
(csumm['upper'].notnull()) & np.isfinite(csumm['upper'])]
csumm.to_csv(csummfile, index=False)
os.chmod(csummfile, 0o775)
except Exception as e:
print e
def launch_summaries_como(draw_out_dirmap, location_id):
global pop, aw, ags
pop = get_pop({'location_id': location_id})
aw = get_age_weights()
ags = get_age_spans()
arglist = [(d, o, location_id, measure_id)
for d, o in draw_out_dirmap.iteritems()
for measure_id in [3, 5, 6, 22, 23, 24]]
pool = Pool(len(draw_out_dirmap)*3)
pool.map(summ_lvl_meas, arglist, chunksize=1)
pool.close()
pool.join()
| [
"nsidles@uw.edu"
] | nsidles@uw.edu |
67269e55398033362ab23e10f0576fc5aeae98ab | 2e1b5bd2d33f0beb965be77f1de2ae035c491125 | /chapter4/qt04_drag.py | f30b52e75694194da80bf8c948af65dfb20391a1 | [] | no_license | mandeling/PyQt5-1 | 1cf6778e767e5746640aa0458434751a226a2383 | 9334786e70b2657e0f94b6dad4714f2aa239d0cd | refs/heads/master | 2020-05-07T19:08:40.072960 | 2019-04-11T10:44:48 | 2019-04-11T10:44:48 | 180,799,901 | 1 | 0 | null | 2019-04-11T13:37:55 | 2019-04-11T13:37:55 | null | UTF-8 | Python | false | false | 887 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Combo(QComboBox):
def __init__(self, title, parent):
super(Combo, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
print(e)
if e.mimeData().hasText():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.addItem(e.mimeData().text())
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
lo = QFormLayout()
lo.addRow(QLabel('请把左边的文本拖曳到右边的下拉菜单中'))
edit = QLineEdit()
edit.setDragEnabled(True)
com = Combo('Button', self)
lo.addRow(edit, com)
self.setLayout(lo)
self.setWindowTitle('简单的拖曳例子')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec())
| [
"sqw123az@sina.com"
] | sqw123az@sina.com |
1bb398a7369351058579f2038fcb6b751f225a55 | f6b3d08c885d6265a90b8323e9633cd1eae9d15d | /fog05/web_api.py | e0106d9fc772dc61ee4643cf731c81a9ccbcca32 | [
"Apache-2.0"
] | permissive | kartben/fog05 | ed3c3834c574c1e9d13aea8129fd251cea7d63a2 | b2f6557772c9feaf9d6a9ad5997b5c2206527c91 | refs/heads/master | 2020-03-18T21:21:27.633610 | 2018-05-23T09:25:50 | 2018-05-23T09:25:50 | 135,276,221 | 0 | 0 | null | 2018-05-29T09:54:48 | 2018-05-29T09:54:47 | null | UTF-8 | Python | false | false | 34,780 | py | from jsonschema import validate, ValidationError
from fog05 import Schemas
from dstore import Store
from enum import Enum
import re
import uuid
import json
import fnmatch
import time
import urllib3
import requests
class RESTStore(object):
def __init__(self, root, host, port):
self.root = root
self.host = host
self.port = port
def get(self, uri):
endpoint = "http://{}:{}/get/{}".format(self.host, self.port, uri)
resp = requests.get(endpoint)
return json.loads(resp.text)
def resolve(self, uri):
return self.get(uri)
def put(self, uri, value):
endpoint = "http://{}:{}/put/{}".format(self.host, self.port, uri)
resp = requests.put(endpoint, data={'value': value})
return json.loads(resp.text)
def dput(self, uri, value=None):
if value is None:
value = self.args2dict(uri.split('#')[-1])
endpoint = "http://{}:{}/dput/{}".format(self.host, self.port, uri)
resp = requests.patch(endpoint, data={'value': value})
return json.loads(resp.text)
def getAll(self, uri):
return self.get(uri)
def resolveAll(self, uri):
return self.get(uri)
def remove(self, uri):
endpoint = "http://{}:{}/remove/{}".format(self.host, self.port, uri)
resp = requests.delete(endpoint)
return json.loads(resp.text)
def dot2dict(self, dot_notation, value=None):
ld = []
tokens = dot_notation.split('.')
n_tokens = len(tokens)
for i in range(n_tokens, 0, -1):
if i == n_tokens and value is not None:
ld.append({tokens[i - 1]: value})
else:
ld.append({tokens[i - 1]: ld[-1]})
return ld[-1]
def args2dict(self, values):
data = {}
uri_values = values.split('&')
for tokens in uri_values:
v = tokens.split('=')[-1]
k = tokens.split('=')[0]
if len(k.split('.')) < 2:
data.update({k: v})
else:
d = self.dot2dict(k, v)
data.update(d)
return data
class FOSRESTStore(object):
"Helper class to interact with the Store"
def __init__(self, host, port, aroot, droot):
self.aroot = aroot
self.droot = droot
self.actual = RESTStore(aroot, host, port)
self.desired = RESTStore(droot, host, port)
def close(self):
'''
Close the store
:return: None
'''
return None
class WebAPI(object):
'''
This class allow the interaction with fog05 using simple Python3 API
Need the distributed store
'''
def __init__(self, host, port, sysid=0, store_id="python-api-rest"):
self.a_root = 'afos://{}'.format(sysid)
self.d_root = 'dfos://{}'.format(sysid)
self.store = FOSRESTStore(host, port, self.a_root, self.d_root)
self.manifest = self.Manifest(self.store)
self.node = self.Node(self.store)
self.plugin = self.Plugin(self.store)
self.network = self.Network(self.store)
self.entity = self.Entity(self.store)
self.image = self.Image(self.store)
self.flavor = self.Flavor(self.store)
class Manifest(object):
'''
This class encapsulates API for manifests
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def check(self, manifest, manifest_type):
'''
This method allow you to check if a manifest is write in the correct way
:param manifest: a dictionary rapresenting the JSON manifest
:param manifest_type: the manifest type from API.Manifest.Type
:return: boolean
'''
if manifest_type == self.Type.ENTITY:
t = manifest.get('type')
try:
if t == 'vm':
validate(manifest.get('entity_data'), Schemas.vm_schema)
elif t == 'container':
validate(manifest.get('entity_data'), Schemas.container_schema)
elif t == 'native':
validate(manifest.get('entity_data'), Schemas.native_schema)
elif t == 'ros2':
validate(manifest.get('entity_data'), Schemas.ros2_schema)
elif t == 'usvc':
return False
else:
return False
except ValidationError as ve:
return False
if manifest_type == self.Type.NETWORK:
try:
validate(manifest, Schemas.network_schema)
except ValidationError as ve:
return False
if manifest_type == self.Type.ENTITY:
try:
validate(manifest, Schemas.entity_schema)
except ValidationError as ve:
return False
return True
class Type(Enum):
'''
Manifest types
'''
ENTITY = 0
IMAGE = 1
FLAVOR = 3
NETWORK = 4
PLUGIN = 5
class Node(object):
'''
This class encapsulates the command for Node interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def list(self):
'''
Get all nodes in the current system/tenant
:return: list of tuples (uuid, hostname)
'''
nodes = []
uri = '{}/*'.format(self.store.aroot)
infos = self.store.actual.resolveAll(uri)
for i in infos:
if len(i[0].split('/')) == 4:
node_info = json.loads(i[1])
nodes.append((node_info.get('uuid'), node_info.get('name')))
return nodes
def info(self, node_uuid):
"""
Provide all information about a specific node
:param node_uuid: the uuid of the node you want info
:return: a dictionary with all information about the node
"""
if node_uuid is None:
return None
uri = '{}/{}'.format(self.store.aroot, node_uuid)
infos = self.store.actual.resolve(uri)
if infos is None:
return None
return json.loads(infos)
def plugins(self, node_uuid):
'''
Get the list of plugin installed on the specified node
:param node_uuid: the uuid of the node you want info
:return: a list of the plugins installed in the node with detailed informations
'''
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
response = self.store.actual.get(uri)
if response is not None:
return json.loads(response).get('plugins')
else:
return None
def search(self, search_dict):
'''
Will search for a node that match information provided in the parameter
:param search_dict: dictionary contains all information to match
:return: a list of node matching the dictionary
'''
pass
class Plugin(object):
'''
This class encapsulates the commands for Plugin interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a plugin to a node or to all node in the system/tenant
:param manifest: the dictionary representing the plugin manifes
:param node_uuid: optional the node in which add the plugin
:return: boolean
'''
manifest.update({'status':'add'})
plugins = {"plugins": [manifest]}
plugins = json.dumps(plugins).replace(' ', '')
if node_uuid is None:
uri = '{}/*/plugins'.format(self.store.droot)
else:
uri = '{}/{}/plugins'.format(self.store.droot, node_uuid)
res = self.store.desired.dput(uri, plugins)
if res:
return True
else:
return False
def remove(self, plugin_uuid, node_uuid=None):
'''
Will remove a plugin for a node or all nodes
:param plugin_uuid: the plugin you want to remove
:param node_uuid: optional the node that will remove the plugin
:return: boolean
'''
pass
def list(self, node_uuid=None):
'''
Same as API.Node.Plugins but can work for all node un the system, return a dictionary with key node uuid and value the plugin list
:param node_uuid: can be none
:return: dictionary {node_uuid, plugin list }
'''
if node_uuid is not None:
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
response = self.store.actual.get(uri)
if response is not None:
return {node_uuid:json.loads(response).get('plugins')}
else:
return None
plugins = {}
uri = '{}/*/plugins'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
pl = json.loads(i[1]).get('plugins')
plugins.update({id: pl})
return plugins
def search(self, search_dict, node_uuid=None):
'''
Will search for a plugin matching the dictionary in a single node or in all nodes
:param search_dict: dictionary contains all information to match
:param node_uuid: optional node uuid in which search
:return: a dictionary with {node_uuid, plugin uuid list} with matches
'''
pass
class Network(object):
'''
This class encapsulates the command for Network element interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a network element to a node o to all nodes
:param manifest: dictionary representing the manifest of that network element
:param node_uuid: optional the node uuid in which add the network element
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is not None:
uri = '{}/{}/network/*/networks/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
else:
uri = '{}/*/network/*/networks/{}'.format(self.store.droot, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, net_uuid, node_uuid=None):
'''
Remove a network element form one or all nodes
:param net_uuid: uuid of the network you want to remove
:param node_uuid: optional node from which remove the network element
:return: boolean
'''
if node_uuid is not None:
uri = '{}/{}/network/*/networks/{}'.format(self.store.droot, node_uuid, net_uuid)
else:
uri = '{}/*/network/*/networks/{}'.format(self.store.droot, net_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def list(self, node_uuid=None):
'''
List all network element available in the system/teneant or in a specified node
:param node_uuid: optional node uuid
:return: dictionary {node uuid: network element list}
'''
if node_uuid is not None:
n_list = []
uri = '{}/{}/network/*/networks/'.format(self.store.aroot, node_uuid)
response = self.store.actual.resolveAll(uri)
for i in response:
n_list.append(json.loads(i[1]))
return {node_uuid: n_list}
nets = {}
uri = '{}/*/network/*/networks/'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
net = json.loads(i[1])
net_list = nets.get(id, None)
if net_list is None:
net_list = []
net_list.append(net)
nets.update({id: net_list})
return nets
def search(self, search_dict, node_uuid=None):
'''
Will search for a network element matching the dictionary in a single node or in all nodes
:param search_dict: dictionary contains all information to match
:param node_uuid: optional node uuid in which search
:return: a dictionary with {node_uuid, network element uuid list} with matches
'''
pass
class Entity(object):
'''
This class encapsulates the api for interaction with entities
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def __search_plugin_by_name(self, name, node_uuid):
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
all_plugins = self.store.actual.get(uri)
if all_plugins is None or all_plugins == '':
print('Cannot get plugin')
return None
all_plugins = json.loads(all_plugins).get('plugins')
search = [x for x in all_plugins if name.upper() in x.get('name').upper()]
if len(search) == 0:
return None
else:
print("handler {}".format(search))
return search[0]
def __get_entity_handler_by_uuid(self, node_uuid, entity_uuid):
uri = '{}/{}/runtime/*/entity/{}'.format(self.store.aroot, node_uuid, entity_uuid)
all = self.store.actual.resolveAll(uri)
for i in all:
k = i[0]
if fnmatch.fnmatch(k, uri):
# print('MATCH {0}'.format(k))
# print('Extracting uuid...')
regex = uri.replace('/', '\/')
regex = regex.replace('*', '(.*)')
reobj = re.compile(regex)
mobj = reobj.match(k)
uuid = mobj.group(1)
# print('UUID {0}'.format(uuid))
print("handler {}".format(uuid))
return uuid
def __get_entity_handler_by_type(self, node_uuid, t):
handler = None
handler = self.__search_plugin_by_name(t, node_uuid)
if handler is None:
print('type not yet supported')
print("handler {}".format(handler))
return handler
def __wait_atomic_entity_state_change(self, node_uuid, handler_uuid, entity_uuid, state):
while True:
time.sleep(1)
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.aroot, node_uuid, handler_uuid, entity_uuid)
data = self.store.actual.get(uri)
if data is not None:
entity_info = json.loads(data)
if entity_info is not None and entity_info.get('status') == state:
return
def __wait_atomic_entity_instance_state_change(self, node_uuid, handler_uuid, entity_uuid, instance_uuid, state):
while True:
time.sleep(1)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler_uuid, entity_uuid, instance_uuid)
data = self.store.actual.get(uri)
if data is not None:
entity_info = json.loads(data)
if entity_info is not None and entity_info.get('status') == state:
return
def add(self, manifest, node_uuid=None, wait=False):
'''
define, configure and run an entity all in one shot
:param manifest: manifest rapresenting the entity
:param node_uuid: optional uuid of the node in which the entity will be added
:param wait: flag for wait that everything is started before returing
:return: the instance uuid
'''
pass
def remove(self, entity_uuid, node_uuid=None, wait=False):
'''
stop, clean and undefine entity all in one shot
:param entity_uuid:
:param node_uuid:
:param wait:
:return: the instance uuid
'''
pass
def define(self, manifest, node_uuid, wait=False):
'''
Defines an atomic entity in a node, this method will check the manifest before sending the definition to the node
:param manifest: dictionary representing the atomic entity manifest
:param node_uuid: destination node uuid
:param wait: if wait that the definition is complete before returning
:return: boolean
'''
manifest.update({'status': 'define'})
handler = None
t = manifest.get('type')
try:
if t in ['kvm', 'xen']:
handler = self.__search_plugin_by_name(t, node_uuid)
validate(manifest.get('entity_data'), Schemas.vm_schema)
elif t in ['container', 'lxd']:
handler = self.__search_plugin_by_name(t, node_uuid)
validate(manifest.get('entity_data'), Schemas.container_schema)
elif t == 'native':
handler = self.__search_plugin_by_name('native', node_uuid)
validate(manifest.get('entity_data'), Schemas.native_schema)
elif t == 'ros2':
handler = self.__search_plugin_by_name('ros2', node_uuid)
validate(manifest.get('entity_data'), Schemas.ros2_schema)
elif t == 'usvc':
print('microservice not yet')
else:
print('type not recognized')
if handler is None:
return False
except ValidationError as ve:
print("Error in manifest {}".format(ve))
return False
entity_uuid = manifest.get('uuid')
entity_definition = manifest
json_data = json.dumps(entity_definition).replace(' ', '')
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.droot, node_uuid, handler.get('uuid'), entity_uuid)
res = self.store.desired.put(uri, json_data)
if res:
if wait:
self.__wait_atomic_entity_state_change(node_uuid,handler.get('uuid'), entity_uuid, 'defined')
return True
else:
return False
def undefine(self, entity_uuid, node_uuid, wait=False):
'''
This method undefine an atomic entity in a node
:param entity_uuid: atomic entity you want to undefine
:param node_uuid: destination node
:param wait: if wait before returning that the entity is undefined
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.droot, node_uuid, handler, entity_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def configure(self, entity_uuid, node_uuid, instance_uuid=None, wait=False):
'''
Configure an atomic entity, creation of the instance
:param entity_uuid: entity you want to configure
:param node_uuid: destination node
:param instance_uuid: optional if preset will use that uuid for the atomic entity instance otherwise will generate a new one
:param wait: optional wait before returning
:return: intstance uuid or none in case of error
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
if instance_uuid is None:
instance_uuid = '{}'.format(uuid.uuid4())
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=configure'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'configured')
return instance_uuid
else:
return None
def clean(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Clean an atomic entity instance, this will destroy the instance
:param entity_uuid: entity for which you want to clean an instance
:param node_uuid: destionation node
:param instance_uuid: instance you want to clean
:param wait: optional wait before returning
:return: boolean
'''
handler = yield from self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def run(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Starting and atomic entity instance
:param entity_uuid: entity for which you want to run the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to start
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=run'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'run')
return True
else:
return False
def stop(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Shutting down an atomic entity instance
:param entity_uuid: entity for which you want to shutdown the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to shutdown
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=stop'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'stop')
return True
else:
return False
def pause(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Pause the exectution of an atomic entity instance
:param entity_uuid: entity for which you want to pause the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to pause
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=pause'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'pause')
return True
else:
return False
def resume(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
resume the exectution of an atomic entity instance
:param entity_uuid: entity for which you want to resume the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to resume
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=resume'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'run')
return True
else:
return False
def migrate(self, entity_uuid, instance_uuid, node_uuid, destination_node_uuid, wait=False):
'''
Live migrate an atomic entity instance between two nodes
The migration is issued when this command is sended, there is a little overhead for the copy of the base image and the disk image
:param entity_uuid: ntity for which you want to migrate the instance
:param instance_uuid: instance you want to migrate
:param node_uuid: source node for the instance
:param destination_node_uuid: destination node for the instance
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler, entity_uuid, instance_uuid)
entity_info = self.store.actual.get(uri)
if entity_info is None:
return False
entity_info = json.loads(entity_info)
entity_info_src = entity_info.copy()
entity_info_dst = entity_info.copy()
entity_info_src.update({"status": "taking_off"})
entity_info_src.update({"dst": destination_node_uuid})
entity_info_dst.update({"status": "landing"})
entity_info_dst.update({"dst": destination_node_uuid})
destination_handler = self.__get_entity_handler_by_type(destination_node_uuid, entity_info_dst.get('type'))
if destination_handler is None:
return False
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.droot, destination_node_uuid, destination_handler.get('uuid'), entity_uuid, instance_uuid)
res = self.store.desired.put(uri, json.dumps(entity_info_dst).replace(' ', ''))
if res:
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res_dest = yield from self.store.desired.dput(uri, json.dumps(entity_info_src).replace(' ', ''))
if res_dest:
if wait:
self.__wait_atomic_entity_instance_state_change(destination_node_uuid, destination_handler.get('uuid'), entity_uuid, instance_uuid, 'run')
return True
else:
print("Error on destination node")
return False
else:
print("Error on source node")
return False
def search(self, search_dict, node_uuid=None):
pass
class Image(object):
'''
This class encapsulates the action on images
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Adding an image to a node or to all nodes
:param manifest: dictionary representing the manifest for the image
:param node_uuid: optional node in which add the image
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is None:
uri = '{}/*/runtime/*/image/{}'.format(self.store.droot, manifest.get('uuid'))
else:
uri = '{}/{}/runtime/*/image/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, image_uuid, node_uuid=None):
'''
remove an image for a node or all nodes
:param image_uuid: image you want to remove
:param node_uuid: optional node from which remove the image
:return: boolean
'''
if node_uuid is None:
uri = '{}/*/runtime/*/image/{}'.format(self.store.droot, image_uuid)
else:
uri = '{}/{}/runtime/*/image/{}'.format(self.store.droot, node_uuid, image_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def search(self, search_dict, node_uuid=None):
pass
class Flavor(object):
'''
This class encapsulates the action on flavors
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a computing flavor to a node or all nodes
:param manifest: dictionary representing the manifest for the flavor
:param node_uuid: optional node in which add the flavor
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is None:
uri = '{}/*/runtime/*/flavor/{}'.format(self.store.droot, manifest.get('uuid'))
else:
uri = '{}/{}/runtime/*/flavor/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, flavor_uuid, node_uuid=None):
'''
Remove a flavor from all nodes or a specified node
:param flavor_uuid: flavor to remove
:param node_uuid: optional node from which remove the flavor
:return: boolean
'''
if node_uuid is None:
uri = '{}/*/runtime/*/flavor/{}'.format(self.store.droot, flavor_uuid)
else:
uri = '{}/{}/runtime/*/flavor/{}'.format(self.store.droot, node_uuid, flavor_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def search(self, search_dict, node_uuid=None):
pass
def list(self, node_uuid=None):
'''
List all network element available in the system/teneant or in a specified node
:param node_uuid: optional node uuid
:return: dictionary {node uuid: network element list}
'''
if node_uuid is not None:
f_list = []
uri = '{}/{}/runtime/*/flavor/'.format(self.store.aroot, node_uuid)
response = self.store.actual.resolveAll(uri)
for i in response:
f_list.append(json.loads(i[1]))
return {node_uuid: f_list}
flavs = {}
uri = '{}/*/runtime/*/flavor/'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
net = json.loads(i[1])
flavs_list = flavs.get(id, None)
if flavs_list is None:
flavs_list = []
flavs_list.append(net)
flavs.update({id: flavs_list})
return flavs
'''
Methods
- manifest
-check
- node
- list
- info
- plugins
- search
- plugin
- add
- remove
- info
- list
- search
- network
- add
- remove
- list
- search
- entity
- add
- remove
- define
- undefine
- configure
- clean
- run
- stop
- pause
- resume
- migrate
- search
- images
- add
- remove
- search
- flavor
- add
- remove
- search
'''
| [
"gabriele.baldoni@gmail.com"
] | gabriele.baldoni@gmail.com |
c8de9587cfa328523bcd043b6d6c474b118c7a35 | cd8f75fab354c46b25f483f7d439e1f59275c585 | /Tareas/T04/t04.py | ed4def3d5ee0bcacdc8d16bbefb1516713e29f58 | [] | no_license | lechodiman/iic2233-2017-1 | 34efb8406f0cd4e6dae3ff088ab18f52cfd2693f | 4eeb63cb83712927117acaa353772de0eafea9f2 | refs/heads/master | 2022-12-12T15:19:51.597453 | 2018-02-11T00:55:38 | 2018-02-11T00:55:38 | 121,068,729 | 1 | 0 | null | 2022-12-08T00:02:08 | 2018-02-11T00:53:03 | Python | UTF-8 | Python | false | false | 36,901 | py | from random import randrange, choice, random, expovariate, uniform, triangular
from my_random import weighted_choice
import csv
from bernoulli_event import bernoulli
import matplotlib.pyplot as plt
class AdvancedProgramming:
'''
Class to control the course. It controls all the events in a semester
'''
dificulty = [2, 2, 3, 5, 7, 10, 7, 9, 1, 6, 6, 5]
def __init__(self, percentage_progress_tarea_mail, month_party, month_football):
self.integrantes_filename = 'integrantes.csv'
self.sections = {}
self.coordinator = None
self.teacher_assistants = []
self.task_assistants = []
self.percentaje_progress_tarea_mail = percentage_progress_tarea_mail
self.month_party = month_party
self.month_football = month_football
self.events_list = []
self.corte_agua_days = []
self.football_days = []
self.tareas_publication_days = []
self.controles_days = []
self.harder_tarea = False
self.fechas_tareas = []
self.fechas_publicacion_notas_act = []
self.fechas_ayudantias = []
self.fechas_catedras = []
@property
def controles_weeks(self):
return [int(day / 7) for day in self.controles_days]
@property
def corte_agua_weeks(self):
return [int(day / 7) for day in self.corte_agua_days]
@property
def all_students(self):
everyone = []
for section in self.sections.values():
everyone += section.students
return everyone
@property
def active_students(self):
'''Returns a list with the current active students '''
return [student for student in self.all_students if student.active]
@property
def dataframe_proms(self):
'''Returns a dictionary with all the average scores per evaluation '''
df = {'MATERIA': [i for i in range(12)]}
df['PROM CONTROLES'] = []
df['PROM ACT'] = []
df['PROM TAREAS'] = []
for week in range(0, 12):
notas_controles = [student.notas_controles[week] for student in self.active_students if week in student.notas_controles]
notas_actividades = [student.notas_act[week] for student in self.active_students if week in student.notas_act]
notas_tareas = [student.notas_tareas[week] for student in self.active_students if week in student.notas_tareas]
try:
control_avg = sum(notas_controles) / len(notas_controles)
except ZeroDivisionError:
control_avg = 'NaN'
try:
act_avg = sum(notas_actividades) / len(notas_actividades)
except ZeroDivisionError:
act_avg = 'NaN'
try:
tareas_avg = sum(notas_tareas) / len(notas_tareas)
except ZeroDivisionError:
tareas_avg = 'NaN'
df['PROM CONTROLES'].append(control_avg)
df['PROM ACT'].append(act_avg)
df['PROM TAREAS'].append(tareas_avg)
return df
@property
def list_tuples_prom(self):
'''Returns a list of tuples with : (materia_n, promedio_n) '''
list_tuples = []
for row in self.dataframe_proms['MATERIA']:
Sum = 0
n = 0
if self.dataframe_proms['PROM CONTROLES'][row] != 'NaN':
Sum += self.dataframe_proms['PROM CONTROLES'][row]
n += 1
if self.dataframe_proms['PROM ACT'][row] != 'NaN':
Sum += self.dataframe_proms['PROM ACT'][row]
n += 1
if self.dataframe_proms['PROM TAREAS'][row] != 'NaN':
Sum += self.dataframe_proms['PROM TAREAS'][row]
n += 1
avg = Sum / n
list_tuples.append((row, avg))
def add_person(self, person, section_number):
'''Adds a person depending on their rol '''
if section_number == '':
if isinstance(person, Coordinator):
self.coordinator = person
elif isinstance(person, TeacherAssistant):
if person not in self.teacher_assistants:
self.teacher_assistants.append(person)
elif isinstance(person, TaskAssistant):
if person not in self.task_assistants:
self.task_assistants.append(person)
else:
if section_number in self.sections:
section = self.sections[section_number]
else:
section = Section(section_number)
self.sections[section_number] = section
if isinstance(person, Student):
section.add_student(person)
elif isinstance(person, Proffessor):
section.add_proffesor(person)
def simulate_meeting_day(self, time_day):
'''It simulates a meeting day with a proffesor '''
print('[{}] Dia de atencion de profesores'.format(time_day))
time_week = int(time_day / 7)
for section in self.sections.values():
profe = section.proffesor
for student in section.students:
student.visit_proffesor(time_day, profe)
if time_week in self.corte_agua_weeks:
capacity = 6
else:
capacity = 10
profe.atender_students(time_day, capacity)
self.events_list.append(('meeting day', time_day + 7))
def simulate_corte_agua(self, time_day):
'''Simulates a corte de agua. Adds the date to a list, so the teacher's capacity is limited '''
self.events_list.append(('corte agua', int(time_day + expovariate(1 / 21))))
time_week = int(time_day / 7)
if len(self.corte_agua_weeks) != 0:
if time_week == self.corte_agua_weeks[-1]:
return
else:
print('[{}] Hubo corte de agua'.format(time_day))
self.corte_agua_days.append(time_day)
def simulate_party(self, time_day):
'''Simulates party '''
self.events_list.append(('party', int(time_day + expovariate(1 / 30))))
went_to_this_party = []
for i in range(min(len(self.active_students), 50)):
s = choice(self.active_students)
s.go_to_party(time_day)
while s in went_to_this_party:
s = choice(self.active_students)
went_to_this_party.append(s)
print('[{}] Hubo una fiesta. Fueron {} alumnos'.format(time_day, min(len(self.active_students), 50)))
def simulate_football(self, time_day):
'''Simulates a football event. Changes the harder tarea atribute to true '''
self.events_list.append(('football', int(time_day + expovariate(1 / 70))))
n_students = round(0.8 * len(self.active_students))
for i in range(n_students):
student = choice(self.active_students)
student.watch_football(time_day)
self.harder_tarea = True
self.football_days.append(time_day)
print('[{}] Hubo una partido de football. Fueron {} alumnos'.format(time_day, n_students))
def simulate_actividad(self, time_day):
'''Simulates an activity and add the publication event to the list '''
time_week = int(time_day / 7)
exigencia = 7 + randrange(1, 6) / AdvancedProgramming.dificulty[time_week]
notas_act = {}
for student in self.active_students:
nota = student.rendir_evaluacion(time_day, 'actividad', exigencia)
notas_act[student.id] = nota
self.events_list.append(('entrega notas actividad', time_day + 14, notas_act))
print('[{}] Hubo una actividad. Fueron {} alumnos'.format(time_day, len(self.active_students)))
def simulate_control(self, time_day):
'''Simulates a control and gets the grades. These grades will be published in their corresponding time '''
time_week = int(time_day / 7)
if len(self.controles_weeks) != 0:
if time_week == self.controles_weeks[-1] + 1:
return
if len(self.controles_days) > 5:
return
else:
self.controles_days.append(time_day)
time_week = int(time_day / 7)
exigencia = 7 + randrange(1, 6) / AdvancedProgramming.dificulty[time_week]
notas_controles = {}
for student in self.active_students:
nota = student.rendir_evaluacion(time_day, 'control', exigencia)
notas_controles[student.id] = nota
self.events_list.append(('entrega notas control', time_day + 14, notas_controles))
print('[{}] Hubo una control. Fueron {} alumnos'.format(time_day, len(self.active_students)))
def simulate_exam(self, time_day):
'''Simulates the exam and adds the grades publication event to the events list '''
materias_ordenadas = [tup[0] for tup in sorted(self.list_tuples_prom, key=lambda x: x[1])]
materias_to_evaluate = materias_ordenadas[0: 6] + materias_ordenadas[:-2]
exigencias = [7 + uniform(1, 5) / AdvancedProgramming.dificulty[i] for i in materias_to_evaluate]
notas_exam = {}
for student in self.active_students:
nota = student.rendir_examen(time_day, materias_to_evaluate, exigencias)
notas_exam[student.id] = nota
self.events_list.append('entrega notas examen', time_day + 14, notas_exam)
print('[{}] Hubo un examen. Fueron {} alumnos'.format(time_day, len(self.active_students)))
def simulate_realizar_tarea(self, time_day, exigencia):
'''Simulates the submit of a tarea. It does not include the sending mails process '''
if len(self.fechas_tareas) >= 5:
return
notas_tareas = {}
fecha_publicacion = self.fechas_tareas[-1]
for student in self.active_students:
nota = student.rendir_evaluacion(time_day, 'tarea', exigencia, fecha_publicacion)
notas_tareas[student.id] = nota
self.events_list.append(('entrega notas tareas', time_day + 14, notas_tareas))
print('[{}] Los alumnos subieron su tarea {}. Fueron {} alumnos'.format(time_day, len(self.fechas_tareas) - 1, len(self.active_students)))
def publicar_notas(self, time_day, notas, eval_name):
'''It sets the grades on the active students. '''
time_week = int(time_day / 7)
for i, nota in notas.items():
student = [s for s in self.all_students if s.id == i].pop()
if eval_name == 'actividad':
student.notas_act[time_week] = nota
student.update_confidence(time_day, n_a=nota)
elif eval_name == 'control':
student.notas_controles[time_week] = nota
student.update_confidence(time_day, n_c=nota)
elif eval_name == 'examen':
student.notas_exam[time_week] = nota
elif eval_name == 'tarea':
student.notas_tareas[time_week - 2] = nota
student.update_confidence(time_day, n_t=nota)
if eval_name == 'actividad':
self.fechas_publicacion_notas_act.append(time_day)
if len(self.fechas_publicacion_notas_act) == 4:
self.simulate_bota_ramos(time_day)
print('[{}] Se publicaron notas de {}. Fueron {} alumnos'.format(time_day, eval_name, len(self.active_students)))
def simulate_publicacion_tarea(self, time_day):
'''Simulates the meeting to set the exigencia and then it publishes the tarea. '''
if len(self.fechas_tareas) >= 5:
return
time_week = int(time_day / 7)
exigencia = 7 + randrange(1, 6) / AdvancedProgramming.dificulty[time_week]
if self.harder_tarea:
exigencia *= exigencia
self.harder_tarea = False
self.fechas_tareas.append(time_day)
self.events_list.append(('publicacion tarea', time_day + 14))
self.events_list.append(('realizar tarea', time_day + 14, exigencia))
print('[{}] Se publica la tarea {}. Fueron {} alumnos'.format(time_day, len(self.fechas_tareas), len(self.active_students)))
def simulate_catedra(self, time_day):
'''Simulate a catedra. First, updates the programming level, then simulates a control, then the tips and finally the actividad '''
if len(self.fechas_catedras) >= 12:
return
self.update_programming_level(time_day)
self.fechas_catedras.append(time_day)
time_week = int(time_day / 7)
if time_week <= 11:
self.events_list.append(('catedra', time_day + 7))
if bool(bernoulli(0.5)):
self.simulate_control(time_day)
for student in self.active_students:
if bool(bernoulli(0.5)):
student.listen_tip(time_day)
i = 0
while i <= 600:
student = choice(self.active_students)
n_questions = round(triangular(1, 10, 3))
i += n_questions
student.ask_questions(time_day, n_questions)
self.simulate_actividad(time_day)
print('[{}] Hubo una catedra. Fueron {} alumnos'.format(time_day, len(self.active_students)))
def simulate_ayudantia(self, time_day):
'''Checks if the ayudante is pro in a subject. If it is, then gives tip to everyone. '''
if len(self.fechas_ayudantias) >= 12:
return
self.fechas_ayudantias.append(time_day)
time_week = int(time_day / 7)
ayudantes_today = [choice(self.teacher_assistants) for i in range(2)]
ayu_1 = ayudantes_today[0]
ayu_2 = ayudantes_today[1]
if time_week in ayu_1.skilled_subjects:
for student in self.sections['1'].students + self.sections['3'].students:
student.listen_ayudantia(time_day)
if time_week in ayu_2.skilled_subjects:
for student in self.sections['2'].students:
student.listen_ayudantia(time_day)
self.events_list.append(('ayudantia', time_day + 7))
print('[{}] Hubo una ayudantia. Fueron {} alumnos'.format(time_day, len(self.active_students)))
def simulate_bota_ramos(self, time_day):
'''Simulates the bota de ramos event. But the s value was changed. '''
n = 0
for student in self.active_students:
s = student.confidence * 0.8 + student.promedio * 0.2
if s < 2:
student.active = False
n += 1
print('[{}] Hubo una bota de ramos. Botaron {} alumnos'.format(time_day, n))
def update_programming_level(self, time_day):
'''Updates programming level of every student '''
for student in self.active_students:
student.update_programming_level(time_day)
def run(self):
'''Run the simuation. It starts with some base events. '''
time_day = -1
self.events_list.append(('ayudantia', time_day + 5))
self.events_list.append(('meeting day', time_day + 6))
self.events_list.append(('catedra', time_day + 7))
self.events_list.append(('publicacion tarea', time_day + 14))
self.events_list.append(('football', int(time_day + expovariate(1 / 70))))
self.events_list.append(('party', int(time_day + expovariate(1 / 30))))
self.events_list.append(('corte agua', int(time_day + expovariate(1 / 21))))
self.events_list.sort(key=lambda x: x[1])
while len(self.events_list) != 0:
event_tuple = self.events_list[0]
self.events_list = self.events_list[1:]
event = event_tuple[0]
time_day = event_tuple[1]
# if int(time_day / 7) > 11:
# self.simulate_exam(time_day)
if event == 'catedra':
self.simulate_catedra(time_day)
elif event == 'ayudantia':
self.simulate_ayudantia(time_day)
elif event == 'meeting day':
self.simulate_meeting_day(time_day)
elif event == 'football':
self.simulate_football(time_day)
elif event == 'party':
self.simulate_party(time_day)
elif event == 'corte agua':
self.simulate_corte_agua(time_day)
elif event == 'entrega notas actividad':
notas = event_tuple[2]
self.publicar_notas(time_day, notas, 'actividad')
elif event == 'entrega notas control':
notas = event_tuple[2]
self.publicar_notas(time_day, notas, 'control')
elif event == 'entrega notas examen':
notas = event_tuple[2]
self.publicar_notas(time_day, notas, 'examen')
print('FIN SIMULACION')
break
elif event == 'entrega notas tareas':
notas = event_tuple[2]
self.publicar_notas(time_day, notas, 'tarea')
elif event == 'realizar tarea':
exigencia = event_tuple[2]
self.simulate_realizar_tarea(time_day, exigencia)
elif event == 'publicacion tarea':
self.simulate_publicacion_tarea(time_day)
if 'entrega notas control' not in [i[0] for i in self.events_list] and\
'entrega notas actividad' not in [i[0] for i in self.events_list] and\
'entrega notas tareas' not in [i[0] for i in self.events_list] and\
time_day > 80:
self.simulate_exam(time_day + 5)
self.events_list.sort(key=lambda x: x[1])
class Section:
'''It has students and a professor '''
def __init__(self, section_number):
self.students = []
self.proffesor = None
self.section_number = section_number
def add_student(self, student):
if student not in self.students:
self.students.append(student)
def add_proffesor(self, proffesor):
if self.proffesor is None:
self.proffesor = proffesor
class Person:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Coordinator(Person):
'''Mavrakis '''
def __init__(self, name):
super().__init__(name)
class Proffessor(Person):
'''In charge of a section '''
def __init__(self, name):
super().__init__(name)
self.cola = []
def atender_students(self, time_day, capacity):
for i in range(min(len(self.cola), capacity)):
student = choice(self.cola)
self.cola.remove(student)
student.meeting_days.append(time_day)
self.cola.clear()
class TeacherAssistant(Person):
def __init__(self, name):
super().__init__(name)
self.skilled_subjects = [randrange(0, 12) for i in range(3)]
class TaskAssistant(Person):
def __init__(self, name):
super().__init__(name)
class Student(Person):
'''Class to simulate a student '''
nota_esperada = {(1.1, 3.9): [(0, 2), (0, 3), (0, 1), (0, 2), (0, 3), (0, 4), (0, 3), (0, 2), (0, 1), (0, 4), (0, 2), (0, 2)],
(4.0, 5.9): [(3, 4), (4, 6), (2, 4), (3, 5), (4, 7), (5, 7), (4, 6), (3, 5), (2, 4), (5, 7), (3, 5), (3, 7)],
(6.0, 6.9): [(5, 6), (7, 7), (5, 6), (6, 7), (8, 8), (8, 9), (7, 8), (6, 7), (5, 6), (8, 9), (6, 7), (8, 8)],
(7.0, 7.0): [(7, ), (8, ), (7, ), (8, ), (9, ), (10, ), (9, ), (8, ), (7, ), (10, ), (8, ), (9, )]}
dificulty = [2, 2, 3, 5, 7, 10, 7, 9, 1, 6, 6, 5]
def __init__(self, name, prob_40_credits, prob_50_credits, prob_55_credits,
prob_60_credits, prob_visit_proffesor, initial_level_confidence_inf, initial_level_confidence_sup):
super().__init__(name)
self.initial_confidence = randrange(initial_level_confidence_inf, initial_level_confidence_sup + 1)
choices = [(40, prob_40_credits), (50, prob_50_credits), (55, prob_55_credits),
(60, prob_60_credits)]
self.total_credits = weighted_choice(choices)
if self.total_credits == 40:
self.horas_totales_semanas = {i: randrange(10, 26) for i in range(0, 12)}
elif self.total_credits == 50:
self.horas_totales_semanas = {i: randrange(10, 16) for i in range(0, 12)}
elif self.total_credits == 55:
self.horas_totales_semanas = {i: randrange(5, 16) for i in range(0, 12)}
elif self.total_credits == 60:
self.horas_totales_semanas = {i: randrange(10, 11) for i in range(0, 12)}
self.horas_estudiadas = {i: 0 for i in range(0, 12)}
self.horas_tareas = {i: 0 for i in range(0, 12)}
self.manejo_contenidos = {i: 0 for i in range(0, 12)}
self.personality = choice(['efficient', 'artistic', 'theoretical'])
self.programming_levels_dict = {i: 0 for i in range(0, 12)}
self.prob_visit_proffesor = prob_visit_proffesor
self.notas_act = dict()
self.notas_examen = dict()
self.notas_controles = dict()
self.notas_tareas = dict()
self.initial_programmation_lvl = randrange(2, 11)
self.catedra_help_days = []
self.tips_days = []
self.ayudantia_tips_days = []
self.party_days = []
self.meeting_days = []
self.football_days = []
self.active = True
self.confidence = randrange(2, 13)
self.id = next(Student.get_id)
self.initial_confidence = self.confidence
def id_():
i = 0
while True:
yield i
i += 1
get_id = id_()
@property
def promedio(self):
'''Returns the average score to the current date '''
act_avg = sum(v for k, v in self.notas_act.items()) / len(self.notas_act) if len(self.notas_act) != 0 else None
examen_avg = list(self.notas_examen.values()).pop() if len(self.notas_examen) != 0 else None
controles_avg = sum(v for k, v in self.notas_controles.items()) / len(self.notas_controles) if len(self.notas_controles) != 0 else None
tareas_avg = sum(v for k, v in self.notas_tareas.items()) / len(self.notas_tareas) if len(self.notas_tareas) != 0 else None
Sum = 0
n = 0
if act_avg:
Sum += act_avg
n += 1
if examen_avg:
Sum += examen_avg
n += 1
if controles_avg:
Sum += controles_avg
n += 1
if tareas_avg:
Sum += tareas_avg
n += 1
return Sum / n if n != 0 else 1.0
@property
def tips_weeks(self):
'''returns a list with the dates but in weeks '''
return [int(tips_day / 7) for tips_day in self.tips_days]
@property
def party_weeks(self):
return [int(party_day / 7) for party_day in self.party_days]
@property
def catedra_help_weeks(self):
return [int(day / 7) for day in self.catedra_help_days]
@property
def ayudantia_tips_weeks(self):
return [int(day / 7) for day in self.ayudantia_tips_days]
@property
def hangover_days(self):
'''returns a list with days with hangover, ie, 2 days after party '''
hang = []
for day in self.party_days:
hang.append(day + 1)
hang.append(day + 2)
return hang
@property
def meeting_weeks(self):
return [int(meeting_day / 7) for meeting_day in self.meeting_days]
def update_programming_level(self, time_day):
'''This gets updated every catedra '''
time_week = int(time_day / 7)
v = 0.08 if time_week in self.meeting_weeks else 0
w = 0.015 if time_week in self.party_weeks else 0
previous = self.initial_programmation_lvl if time_week == 0 else self.programming_levels_dict[time_week - 1]
self.programming_levels_dict[time_week] = 1.05 * (1 - + w - v) * previous
def listen_tip(self, time_day):
'''Add the day when the student listened a tip to a list '''
self.tips_days.append(time_day)
def visit_proffesor(self, time_day, proffesor):
'''Adds the day when the student went to visit the teacher to a list '''
if self.promedio <= 5.0:
proffesor.cola.append(self)
else:
if bool(bernoulli(0.2)):
proffesor.cola.append(self)
def go_to_party(self, time_day):
self.party_days.append(time_day)
def update_confidence(self, time_day, n_a=False, n_t=False, n_c=False):
'''Every time an evaluation is submitted, this gets updated. Updates the confindence'''
x = 1 if n_a else 0
y = 1 if n_t else 0
z = 1 if n_c else 0
scores_confidence = 0
if bool(x):
n_a_esperada = self.get_nota_esperada(time_day - 14)
scores_confidence += 3 * (n_a - n_a_esperada)
if bool(y):
n_t_esperada = self.get_nota_esperada(time_day - 14)
scores_confidence += 5 * (n_t - n_t_esperada)
if bool(z):
n_c_esperada = self.get_nota_esperada(time_day - 14)
scores_confidence += 1 * (n_c - n_c_esperada)
self.confidence += scores_confidence
def get_nota_esperada(self, time_day):
'''Returns the spected score acording to the table '''
horas_estudiadas = int(self.get_horas_estudiadas(time_day))
for rango_notas, horas in Student.nota_esperada.items():
if horas[int(time_day / 7)][0] <= horas_estudiadas <= horas[int(time_day / 7)][1]:
return round(uniform(rango_notas[0], rango_notas[1]), 2)
def get_horas_estudiadas(self, time_day):
'''Calculates and returns the amount of hours, depending on the events. '''
self.horas_estudiadas = {i: 0 for i in range(0, 12)}
for i in range(time_day):
time_week = int(i / 7)
horas_por_dia = 0.3 * self.horas_totales_semanas[time_week] / 7
if i not in self.hangover_days and i not in self.football_days:
self.horas_estudiadas[time_week] += horas_por_dia
return self.horas_estudiadas[int(time_day / 7)]
def get_horas_tareas(self, time_day, publication_date):
'''Calculates and returns the amount of hours dedicated to a tarea. '''
horas = 0
for i in range(publication_date, time_day):
time_week = int(i / 7)
horas_por_dia = 0.7 * self.horas_totales_semanas[time_week] / 7
if i not in self.hangover_days and i not in self.football_days:
horas += horas_por_dia
return horas
def watch_football(self, time_day):
'''Adds the date to a list, so it can be used to update other atributes '''
self.football_days.append(time_day)
def get_manejo_contenidos(self, time_day):
'''Calculates and returns the contents skills. It returns the contents skills of the last week. '''
self.get_horas_estudiadas(time_day)
for i in range(time_day):
time_week = int(time_day / 7)
x = 1.0
if time_week in self.tips_weeks:
x *= 1.1
if time_week in self.ayudantia_tips_weeks:
x *= 1.1
if time_week in self.catedra_help_weeks:
n = self.catedra_help_weeks.count(time_week)
x *= 1.0 + 0.01 * n
self.manejo_contenidos[time_week] = (self.horas_estudiadas[time_week] / Student.dificulty[time_week]) * x
return self.manejo_contenidos[int(time_day / 7)]
def rendir_evaluacion(self, time_day, eval_name, exigencia, publication_date=False):
'''It calculate the grade in every evaluation. It does not support exams '''
time_week = int(time_day / 7)
if eval_name == 'actividad':
pep_8 = 0.7 * self.get_manejo_contenidos(time_day) + \
0.2 * self.programming_levels_dict[time_week] + \
0.1 * self.confidence
functionality = 0.3 * self.get_manejo_contenidos(time_day) + \
0.7 * self.programming_levels_dict[time_week] + \
0.1 * self.confidence
contents = 0.7 * self.get_manejo_contenidos(time_day) + \
0.2 * self.programming_levels_dict[time_week] + \
0.1 * self.confidence
total = 0.4 * functionality + 0.4 * contents + 0.2 * pep_8
nota = max(total * 7 / exigencia, 1)
if self.personality == 'efficient':
if time_week == 4 or time_week == 7:
nota = min(nota + 1, 7)
elif self.personality == 'artistic':
if time_week == 8 or time_week == 11:
nota = min(nota + 1, 7)
elif self.personality == 'theoretical':
if time_week == 5:
nota = min(nota + 1, 7)
return nota
elif eval_name == 'control':
functionality = 0.3 * self.get_manejo_contenidos(time_day) + \
0.2 * self.programming_levels_dict[time_week] + \
0.5 * self.confidence
contents = 0.7 * self.get_manejo_contenidos(time_day) + \
0.05 * self.programming_levels_dict[time_week] + \
0.25 * self.confidence
total = 0.3 * functionality + 0.7 * contents
nota = max(total * 7 / exigencia, 1)
return nota
elif eval_name == 'tarea':
pep_8 = 0.5 * self.get_horas_tareas(time_day, publication_date) + 0.5 * self.programming_levels_dict[time_week]
contents = 0.7 * self.get_manejo_contenidos(time_day) + 0.1 * self.programming_levels_dict[time_week] + 0.2 * self.get_horas_tareas(time_day, publication_date)
functionality = 0.5 * self.get_manejo_contenidos(time_day) + 0.1 * self.programming_levels_dict[time_week] + 0.4 * self.get_horas_tareas(time_day, publication_date)
if self.personality == 'efficient':
pep_8 *= 1.1
contents *= 1.1
functionality *= 1.1
elif self.personality == 'artistic':
pep_8 *= 1.2
elif self.personality == 'theoretical':
pep_8 *= 0.9
contents *= 0.9
functionality *= 0.9
total = 0.4 * functionality + 0.4 * contents + 0.2 * pep_8
nota = max(total * 7 / exigencia, 1)
return nota
def rendir_examen(self, time_day, materias, exigencias):
'''Calculates the grade in the case of exam. Returns the value '''
time_week = int(time_day / 7)
notas_preguntas = []
for materia, exigencia in zip(materias, exigencias):
contents = 0.5 * self.manejo_contenidos[time_week] + 0.1 * self.programming_levels_dict[time_week] + 0.4 * self.confidence
functionality = 0.3 * self.manejo_contenidos[time_week] + 0.2 * self.programming_levels_dict[time_week] + 0.5 * self.confidence
total_pregunta = 0.3 * functionality + 0.7 * contents
nota_pregunta = max(total_pregunta * 7 / exigencia, 1)
notas_preguntas.append(nota_pregunta)
nota_final = sum(notas_preguntas) / len(notas_preguntas)
if self.personality == 'theoretical':
nota_final = min(nota_final + 1, 7)
return nota_final
def ask_questions(self, time_day, n_questions):
'''Simulates the questions asked to an assistant. Adds to a list so it can be used to update other attributes '''
for i in range(n_questions):
self.catedra_help_days.append(time_day)
def listen_ayudantia(self, time_day):
'''Used when an assistant gives a super ayudantia. Adds date to a list '''
self.ayudantia_tips_days.append(time_day)
class Simulation:
'''Class to control all the instances of AdvancedProgramming. It can show the final statistics '''
def __init__(self, prob_40_credits, prob_50_credits, prob_55_credits, prob_60_credits,
prob_visit_proffesor, prob_atraso_mavrakis, percentaje_progress_tarea_mail, month_party,
month_football, initial_level_confidence_inf, initial_level_confidence_sup):
self.prob_40_credits = prob_40_credits
self.prob_50_credits = prob_50_credits
self.prob_55_credits = prob_55_credits
self.prob_60_credits = prob_60_credits
self.prob_visit_proffesor = prob_visit_proffesor
self.prob_atraso_mavrakis = prob_atraso_mavrakis
self.percentaje_progress_tarea_mail = percentaje_progress_tarea_mail
self.month_football = month_football
self.month_party = month_party
self.initial_level_confidence_inf = initial_level_confidence_inf
self.initial_level_confidence_sup = initial_level_confidence_sup
self.escenarios_filename = 'escenarios.csv'
def load(self):
'''Loads all the csv files '''
self.IIC = AdvancedProgramming(self.percentaje_progress_tarea_mail, self.month_party, self.month_football)
with open('integrantes.csv', 'r', encoding='utf-8') as f:
csv_reader = csv.reader(f, delimiter=',')
header = next(csv_reader)
for row in csv_reader:
name = row[0]
rol = row[1]
section_number = row[2]
if rol == 'Profesor':
person = Proffessor(name)
elif rol == 'Coordinación':
person = Coordinator(name)
elif rol == 'Docencia':
person = TeacherAssistant(name)
elif rol == 'Tareas':
person = TaskAssistant(name)
elif rol == 'Alumno':
person = Student(name, self.prob_40_credits, self.prob_50_credits,
self.prob_55_credits, self.prob_60_credits, self.prob_visit_proffesor,
self.initial_level_confidence_inf, self.initial_level_confidence_sup)
self.IIC.add_person(person, section_number)
def get_global_statistics(self):
botaron = len(self.IIC.all_students) - len(self.IIC.active_students)
avg_confidence = (sum([alumno.initial_confidence for alumno in self.IIC.all_students]) / len(self.IIC.all_students) +\
sum([alumno.confidence for alumno in self.IIC.active_students]) / len(self.IIC.active_students)) / 2
print('[1] Cantidad total de alumnos que botaron el ramo: {}'.format(botaron))
print('[2] Promedio de confianza al inicio y al final del ramo: {}'.format(avg_confidence))
def get_personal_statistics(self):
loop = True
while loop:
user_name = input('Ingrese el nombre completo del alumno').title()
try:
alumno = [i for i in self.IIC.all_students if i.name == user_name].pop()
except IndexError:
loop = True
else:
loop = False
avg_prog_lvl = sum([v for k, v in alumno.programming_levels_dict.items()]) / len(alumno.programming_levels_dict)
print('Nivel programacion promedio: '.format(avg_prog_lvl))
print('Confianza final: '.format(alumno.confidence))
x = [i for i in range(12)]
y = [v for k, v in alumno.manejo_contenidos.items()]
plt.plot(x, y)
plt.title('Manejo contenidos vs semanas')
plt.xlabel('Semanas')
plt.ylabel('Manejo contenidos')
plt.show()
print('Notas actividades: ')
for k, v in alumno.notas_act.items():
print(k, v)
print('Notas tareas: ')
for k, v in alumno.notas_tareas.items():
print(k, v)
print('Notas controles: ')
for k, v in alumno.notas_controles.items():
print(k, v)
print('Nota examen: ')
for k, v in alumno.notas_examen.items():
print(k, v)
def get_graphs(self):
x = self.IIC.dataframe_proms['MATERIA']
y_1 = self.IIC.dataframe_proms['PROM CONTROLES']
y_2 = self.IIC.dataframe_proms['PROM TAREAS']
y_3 = self.IIC.dataframe_proms['PROM ACT']
plt.plot(x, y_1, label='PROM CONTROLES')
plt.plot(x, y_2, label='PROM TAREAS')
plt.plot(x, y_3, label='PROM ACT')
print("""*******************************************************
*** ***
*** Bienvenido a Avanzacion Programada ***
*** ***
*******************************************************""")
print("------------------------------------------------------")
print("Bienvenido a Avanzacion Programada, aca podras simular el curso \n\
de la dimension de Mavrakis")
s = Simulation(0.1, 0.7, 0.15, 0.05, 0.2, 0.1, 0.5, 1 / 30, 1 / 70, 2, 12)
s.load()
curso = s.IIC
| [
"lechodiman@uc.cl"
] | lechodiman@uc.cl |
7755fbe30daf087ea1b362b26e78416cadd75210 | 04e6bcdbcb8d0e3a40bd62792e70fca10641c8c7 | /src/sentry/models/integrationfeature.py | 8f8e15b118cfe531d8b697fe18e6a1381e69bc01 | [
"BSD-2-Clause"
] | permissive | andrzej-tests-1/sentry-app | f711b1a05c8a7d28e8d8e023b8feffc72dc7202e | fd920bb0e6a4956f57f3dfe4301768e1c4b0d4d8 | refs/heads/master | 2020-05-30T16:50:56.477815 | 2019-06-02T15:58:55 | 2019-06-02T15:58:55 | 189,854,339 | 0 | 0 | BSD-3-Clause | 2019-06-02T20:30:53 | 2019-06-02T14:09:39 | Python | UTF-8 | Python | false | false | 2,338 | py | from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, Model
class Feature(object):
API = 0
ISSUE_LINK = 1
STACKTRACE_LINK = 2
EVENT_HOOKS = 3
@classmethod
def as_choices(cls):
return (
(cls.API, 'integrations-api'),
(cls.ISSUE_LINK, 'integrations-issue-link'),
(cls.STACKTRACE_LINK, 'integrations-stacktrace-link'),
(cls.EVENT_HOOKS, 'integrations-event-hooks'),
)
@classmethod
def as_str(cls, feature):
if feature == cls.API:
return 'integrations-api'
elif feature == cls.ISSUE_LINK:
return 'integrations-issue-link'
elif feature == cls.STACKTRACE_LINK:
return 'integrations-stacktrace-link'
elif feature == cls.EVENT_HOOKS:
return 'integrations-event-hooks'
@classmethod
def description(cls, feature):
if feature == cls.API:
return "This integration can utilize the Sentry API (with the permissions granted) to pull data or update resources in Sentry!"
elif feature == cls.ISSUE_LINK:
return "This integration can allow your organization to create or link Sentry issues to another service!"
elif feature == cls.STACKTRACE_LINK:
return "This integration allows your organization to open a line in Sentry's stack trace in another service!"
elif feature == cls.EVENT_HOOKS:
return "This integration allows your organization to forward events to another service!"
class IntegrationFeature(Model):
__core__ = False
sentry_app = FlexibleForeignKey('sentry.SentryApp')
user_description = models.TextField(null=True)
feature = BoundedPositiveIntegerField(
default=0,
choices=Feature.as_choices(),
)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_integrationfeature'
def feature_str(self):
return Feature.as_str(self.feature)
@property
def description(self):
if self.user_description:
return self.user_description
else:
return Feature.description(self.feature)
| [
"noreply@github.com"
] | noreply@github.com |
ab0e131dbda35a73953eb849d62f9e43b2ed13f0 | 507667654ae3b93b1b19588b114631683f5a1e1b | /Python_Programming/Lab7/MakeAmericaTweetAgain/env/Scripts/getTweets.py | bbe15aa19c9b2fca524ae7a270c0f69a6725f169 | [] | no_license | ndchoate/UC-Fall-2016 | dba54ee14101bf410ec2a39b1252eb520cff10d6 | a99ec47dbb872390629ce28927713cdbc5bffcd0 | refs/heads/master | 2021-01-18T19:53:13.382445 | 2016-11-24T21:30:09 | 2016-11-24T21:30:09 | 69,291,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | from sys import argv
import time
import json
from api import getAPI
REQUEST_DELAY = 5
MAX_REQUESTS = 5
def main():
try:
#arg = "BotNdc"
arg = argv[1]
api = getAPI()
tweetResults = []
tweetIndex = api.user_timeline(screen_name=arg, count=1)[0].id
time.sleep(REQUEST_DELAY)
for request in range(MAX_REQUESTS):
tweets = api.user_timeline(screen_name=arg, include_retweets=False, max_id=tweetIndex)
for tweet in tweets:
tweetResults.append(tweet.text)
tweetIndex = tweet.id
time.sleep(REQUEST_DELAY)
except IndexError:
print("Program Missing Arg. Twitter Handle")
except Exception as e:
print("Program Failure. Error: {}".format(e))
finally:
with open('{}Tweets'.format(arg), 'w') as saveFile:
json.dump(tweetResults, saveFile)
if __name__ == '__main__':
main() | [
"ndchoate@gmail.com"
] | ndchoate@gmail.com |
e215ba31603bbe6754adacf620abbefadcf26cc9 | 8dba02fc002912c569c410f6a106cec835b2bf4d | /blogsrc/articles/migrations/0008_auto_20201214_1249.py | cd8405eb82abdea601e8296051543cbdfc841afd | [
"MIT"
] | permissive | kemalayhan/personal-blog | f2e5a8caf92b69191bc129628d12a0f3ae2981f6 | 4f6a33144d02c921b68f021b5571798385830e74 | refs/heads/main | 2023-02-27T14:37:49.855511 | 2021-01-30T21:00:14 | 2021-01-30T21:00:14 | 320,511,378 | 1 | 1 | MIT | 2021-01-07T20:25:57 | 2020-12-11T08:21:53 | CSS | UTF-8 | Python | false | false | 341 | py | # Generated by Django 3.1.4 on 2020-12-14 09:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_tag_slug'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-created']},
),
]
| [
"kemalayhan013@gmail.com"
] | kemalayhan013@gmail.com |
bd6f55e0c0d9e665f54c9263747e6c31ef9509e6 | 26c54c424dbe79fcd1962bddcbbb218d090eb6fc | /extras1roteiro/credito.py | 48e7a1139be475ae08f430c6bb8ef9f9c3cd210d | [] | no_license | Anap123/python-repo | dbfa4cad586ba5b3bffdc26e70c6170048e59260 | 2532a7cebe88b9987109a93fb5e6c486a5152b22 | refs/heads/master | 2020-08-17T12:52:49.437324 | 2019-05-21T20:55:57 | 2019-05-21T20:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | sal = (int(input()))
comp = (int(input()))
pm = sal*0.3 - comp
if(comp > sal*0.3):
pm = 0
print("%.2f"%pm)
| [
"arthur.mts@gmail.com"
] | arthur.mts@gmail.com |
d065c578a4c2e8291e5137cdf169e777db14c922 | 8b2d5faac1484195335db8729fca7db8994fdab2 | /15_rolling apply and maping functions.py | f7c11c5761503b59a3d74f3504dd1960efdb2499 | [] | no_license | bututoubaobei/python_pandas | 10776899fe6fb2033a16e287b30ef3bbd194592e | d789b5e409991c9a242e6a05b0e6682aa2a8c6e9 | refs/heads/master | 2021-11-24T12:19:17.023468 | 2018-02-08T18:03:05 | 2018-02-08T18:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import quandl
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
from statistics import mean
style.use('fivethirtyeight')
api_key="h6wdhv7vRv8o7FyNaWTj"
def create_labels(cur_hpi,fut_hpi):
if fut_hpi>cur_hpi:
return 1
else:
return 0
# we can do moving average in pandas
def moving_average(values):
return mean(values)
housing_data=pd.read_pickle('HPI.pickle')
# to see the percent changes
housing_data=housing_data.pct_change()
# handle the "na" and "-inf"
housing_data.replace([np.inf,-np.inf],np.nan, inplace=True)
housing_data['US_HPI_future']=housing_data['United States'].shift(-1)
housing_data.dropna(inplace=True)
# print(housing_data[['US_HPI_future','United States']].head())
housing_data['label']=list(map(create_labels,housing_data['United States'],housing_data['US_HPI_future']))
print(housing_data.head())
housing_data['ma_apply_example']=pd.rolling_apply(housing_data['M30'],10,moving_average)
# the last five datas
print(housing_data.tail())
| [
"qiumingming7@gmail.com"
] | qiumingming7@gmail.com |
7b7827a383449551327598c28dc91bad8daa96ff | 5ee26a8f7414c25a30e11d1014c0899df9bfb731 | /Adapter/Python/Solider.py | f82d203e8a23842e153b28f1da5b95573a24400a | [] | no_license | JalalMirzayev/DesignPatterns | fd88890c24706cee11f3ac7830cca5a2961191b8 | a179794740c83b8992d7544843e7449ff10f573c | refs/heads/master | 2021-05-24T13:41:15.612138 | 2020-04-06T20:44:42 | 2020-04-06T20:44:42 | 253,587,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from adapter.EnemyAttacker import EnemyAttacker
import random
class Solider(EnemyAttacker):
def make_damage(self):
print(f"Solider inflicts {random.randint(0, 10)} damage points.")
def make_move(self):
print(f"Solider moves {random.randint(0, 10)} steps forward.")
def set_name(self, name):
print(f"The solider is named {name}.")
| [
"noreply@github.com"
] | noreply@github.com |
214bde21462492a67176cebc14801baccff32864 | ffaac8893bedff9c911a032c3f06f9a963fe9b01 | /Instances/content_Instance.py | ec81d00bb388c3781c61e5de6592a4ca1964a32b | [] | no_license | Planet-KIM/planet_python_WebScraping | 0300dd8f022c9d5abae3e58ac3fbd6629b8acb18 | 76281b9cf06ae17bc4a8f47ca04191ef099b2629 | refs/heads/master | 2021-03-21T16:38:02.779291 | 2020-04-27T12:29:44 | 2020-04-27T12:29:44 | 247,312,251 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import sys
import io
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
def changeUtf8():
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
changeUtf8()
class Content:
'''
글 페이지 전체에 사용할 기반 클래스
'''
def __init__(self, url, title, body):
self.url = url
self.title = title
self.body = body
def print(self):
'''
출력 결과를 원하는 대로 바꿀 수 있는 함수
'''
print("URL : {}".format(self.url))
print("TITLE : {}".format(self.title))
print("BODY : {}".format(self.body))
class Website():
'''
웹사이트 구조에 관한 정보를 저장할 클래스
'''
def __init__(self, name, url, titleTag, bodyTag):
self.name = name
self.url = url
self.titleTag = titleTag
self.bodyTag = bodyTag
def getPage(url):
req = requests.get(url)
return BeautifulSoup(req.text, 'html.parser')
def scrapeNYTimes(url):
bs = getPage(url)
title = bs.find('h1').text
lines = bs.select('div.StoryBodyCompanionColumn div p')
body = '\n'.join([line.text for line in lines])
return Content(url, title, body)
def scrapeBrookings(url):
bs = getPage(url)
title = bs.find('h1').text
body = bs.find('div', {'class', 'post-body'}).text
return Content(url, title, body)
url = '''https://www.brookings.edu/blog/future-development/2018/01/26/delivering-inclusive-urban-access-3-uncomfortable-truths/'''
content = scrapeBrookings(url)
print('Title : {}'.format(content.title))
print('URL : {}\n'.format(content.url))
print(content.body)
url = '''https://www.nytimes.com/2018/01/25/opinion/sunday/silicon-valley-immortality.html'''
content = scrapeNYTimes(url)
print('Title : {}'.format(content.title))
print('URL: {}\n'.format(content.url))
print(content.body)
| [
"55446103+KIM-DO-WON@users.noreply.github.com"
] | 55446103+KIM-DO-WON@users.noreply.github.com |
9c3f0d5c4c2ed881f3c67606e658c371f5c2d090 | 977713cb1a1cd7ad3a5b3e5b121162ad793968e5 | /settings.py | 04838b8e9558cb273ee92d49028c5197cc4bb640 | [
"MIT"
] | permissive | ServiceLearningB/ServiceLearning | 77c3632ea054f6751e2d95a5d97f5e4a4db97e9d | 739b3073ab6d401d5f075ac82197437ba15b97e5 | refs/heads/master | 2021-01-10T15:36:36.047828 | 2016-03-31T21:17:52 | 2016-03-31T21:17:52 | 55,176,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,574 | py | """
Django settings for Project project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mr%9u&_#g!%d)1*2irhoc2nry@8%(3993n%8jqefo6pjn=thm9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'submit_reports',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "static", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
#TEMPLATE_DEBUG = True
if DEBUG:
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, "static", "static-only")
MEDIA_ROOT = os.path.join(BASE_DIR, "static", "media")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static", "static"),
)
| [
"roconnorc@gmail.com"
] | roconnorc@gmail.com |
bd67f78d612f4ed481565ee3e50e68afd658b000 | a32347b238effbe6e6bdb9ec04de54f1d91ef95a | /recommender/recommender/web/routes.py | ff8062fa954d217251aba5ea5b5047cefca7bbef | [
"MIT"
] | permissive | ScJa/projectr | 7f4e4ebcb2b38b6ed25e49929564d0855f0b69ac | 91298713edeab2c93932d1b372c58564458f2118 | refs/heads/master | 2021-01-09T20:34:52.939109 | 2016-07-17T11:57:56 | 2016-07-17T11:57:56 | 63,527,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,469 | py | from flask import Blueprint, json
from recommender.core import Recommender
from recommender.database.providers import DatabaseUserDataProvider, DatabaseProjectDataProvider, DatabaseSkillDataProvider
from recommender.util import log
index = Blueprint("index", __name__)
logger = log.getLogger("routes")
def recommend(positionIds, projectId, n=10):
recommendations = dict()
try:
userDataProvider = DatabaseUserDataProvider()
projectDataProvider = DatabaseProjectDataProvider()
skillDataProvider = DatabaseSkillDataProvider()
recommender = Recommender(userDataProvider, projectDataProvider, skillDataProvider)
recommender.prepare(projectId)
for positionId in positionIds:
data = dict()
count = 0
for user, score in recommender.recommend(positionId, n=n):
data[count] = {'id': user.user.id, 'email': user.user.email, 'firstName':user.user.firstName,
'lastName':user.user.lastName, 'score': score}
count += 1
recommendations[positionId] = data
except Exception as e:
logger.exception(e)
return recommendations
@index.route("/recommend/position/<int:positionId>")
def recommendPosition(positionId):
"""
Endpoint for getting recommendations for a single position.
:param positionId: The database ID of the position.
:return: JSON list
"""
logger.info("Got request for position %s" % positionId)
projectId = DatabaseProjectDataProvider().getProjectForPositionId(positionId).id
data = recommend([positionId], projectId)[positionId]
logger.info("Returning: %s" % data)
return json.dumps(data)
@index.route("/recommend/position/<int:positionId>/<int:n>")
def recommendPositionN(positionId, n):
"""
Endpoint for getting N recommendations for a single position.
:param positionId: The database ID of the position.
:param n: The number of recommendations to return.
:return: JSON list
"""
logger.info("Got request for position %s" % positionId)
projectId = DatabaseProjectDataProvider().getProjectForPositionId(positionId).id
data = recommend([positionId], projectId, n=n)[positionId]
logger.info("Returning: %s" % data)
return json.dumps(data)
@index.route("/recommend/project/<int:projectId>")
def recommendAll(projectId):
"""
for getting recommendations for all positions of a project.
:param projectId: The database ID of the project.
:return: JSON list
"""
logger.info("Got request for project %s" % projectId)
positionIds = [position.id for position in DatabaseProjectDataProvider().getOpenProjectPositions(projectId)]
data = recommend(positionIds, projectId)
logger.info("Returning: %s" % data)
return json.dumps(data)
@index.route("/")
def hello():
return """<html><body><table style="width: 100%; border: solid 1px;">
<h2 style="color: #00897b;">Projectr Recommender</h2>
<tr>
<td>Recommendations for a single position:</td>
<td>/recommend/position/int:positionId</td>
</tr>
<tr>
<td>N recommendations for a single position:</td>
<td>/recommend/position/int:positionId/int:n</td>
</tr>
<tr>
<td>Recommendations for all project positions:</td>
<td>/recommend/project/int:projectId</td>
</tr>
</table></body></html>""" | [
"jakob.schneidr.ga@gmail.com"
] | jakob.schneidr.ga@gmail.com |
31c28ed7776b4678c3ed73a4c1eedb7b626cc4fa | 030225b2ed6eba7671bb337dcf37f9928aaa8d21 | /test.py | 79b8793fcc2237a1f74e69774e562f00cb954d5d | [] | no_license | hyznlp/python | e76ccc604f654db64eb49eadd016ff67c9f67606 | f5aeecc18edcab254cf8e4f806511a67d9276452 | refs/heads/master | 2022-10-30T12:43:32.375725 | 2020-06-15T05:45:35 | 2020-06-15T05:45:35 | 271,713,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | class Node():
def __init__(self, item):
self.item = item
self.left = None
self.right = None
class SortTree():
def __init__(self):
self.root = None
def addNode(self, item):
node = Node(item)
if self.root == None:
self.root = node
return
cur = self.root
while cur:
if cur.item < item:
if cur.right == None:
cur.right = node
return
else:
cur = cur.right
else:
if cur.left == None:
cur.left = node
return
else:
cur = cur.left
def middle(self, root):
if root == None:
return
self.middle(root.left)
print(root.item)
self.middle(root.right)
sort1 = SortTree()
alist = [3,8,5,4,1,9,7,2,6]
for i in alist:
sort1.addNode(i)
sort1.middle(sort1.root) | [
"dan@huodans-MacBook-Air.local"
] | dan@huodans-MacBook-Air.local |
b2c6b469d5b851da3ef72e607d6b2a3165fca6be | 133db51055e034962b376e832d1f97bb6cc5e468 | /blockchain.py | 523370cb25049b0156d81aaf9f61f3ec2ba0ec39 | [] | no_license | sgr0691/blockchain-exercise | ca701d112aff03a5beaf3b35b18450cfa4ed5404 | 5f243fde9f17238363a3d9b7f4df8e3412626873 | refs/heads/master | 2021-04-12T12:11:50.323871 | 2018-03-26T19:11:59 | 2018-03-26T19:11:59 | 126,773,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,454 | py | import hashlib
import json
from time import time
from urllib.parse import urlparse
from uuid import uuid4
import requests
from flask import Flask, jsonify, request
class Blockchain:
def __init__(self):
self.current_transactions = []
self.chain = []
self.nodes = set()
# Create the genesis block
self.new_block(previous_hash='1', proof=100)
def register_node(self, address):
"""
Add a new node to the list of nodes
:param address: Address of node. Eg. 'http://192.168.0.5:5000'
"""
parsed_url = urlparse(address)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
# Accepts an URL without scheme like '192.168.0.5:5000'.
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def valid_chain(self, chain):
"""
Determine if a given blockchain is valid
:param chain: A blockchain
:return: True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{last_block}')
print(f'{block}')
print("\n-----------\n")
# Check that the hash of the block is correct
if block['previous_hash'] != self.hash(last_block):
return False
# Check that the Proof of Work is correct
if not self.valid_proof(last_block['proof'], block['proof'], last_block['previous_hash']):
return False
last_block = block
current_index += 1
return True
def resolve_conflicts(self):
"""
This is our consensus algorithm, it resolves conflicts
by replacing our chain with the longest one in the network.
:return: True if our chain was replaced, False if not
"""
neighbours = self.nodes
new_chain = None
# We're only looking for chains longer than ours
max_length = len(self.chain)
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check if the length is longer and the chain is valid
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
# Replace our chain if we discovered a new, valid chain longer than ours
if new_chain:
self.chain = new_chain
return True
return False
def new_block(self, proof, previous_hash):
"""
Create a new Block in the Blockchain
:param proof: The proof given by the Proof of Work algorithm
:param previous_hash: Hash of previous Block
:return: New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1]),
}
# Reset the current list of transactions
self.current_transactions = []
self.chain.append(block)
return block
def new_transaction(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block
:param sender: Address of the Sender
:param recipient: Address of the Recipient
:param amount: Amount
:return: The index of the Block that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
@property
def last_block(self):
return self.chain[-1]
@staticmethod
def hash(block):
"""
Creates a SHA-256 hash of a Block
:param block: Block
"""
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def proof_of_work(self, last_block):
"""
Simple Proof of Work Algorithm:
- Find a number p' such that hash(pp') contains leading 4 zeroes
- Where p is the previous proof, and p' is the new proof
:param last_block: <dict> last Block
:return: <int>
"""
last_proof = last_block['proof']
last_hash = self.hash(last_block)
proof = 0
while self.valid_proof(last_proof, proof, last_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof, last_hash):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
# Instantiate the Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mined a new coin.
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='0.0.0.0', port=port) | [
"sgr0691@gmail.com"
] | sgr0691@gmail.com |
7535fc5d441d5f35fcc565e962a1adef5798d705 | de4193a9497d188c885d97aadd726c25bac60a33 | /CHAPTER8/exercise/13.py | db402a196803aaba80c821b6e151620697e1e899 | [] | no_license | seungbinpark/PoseEstimation | 58880bf679ebffc401479040b9446bece6519032 | 09a8d0d4daae04e0667a9da600bab38b9c22febf | refs/heads/main | 2023-02-26T14:39:37.952302 | 2021-02-03T13:34:45 | 2021-02-03T13:34:45 | 327,631,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import numpy as np, cv2, math
from Common.interpolation import rotate_pt
"""def calc_angle(pts):
d1 = np.subtract(pts[1], pts[0])
d2 = np.subtract(pts[2], pts[0])
angle1 = cv2.fastAtan2(float(d1[1]), float(d1[0]))
angle2 = cv2.fastAtan2(float(d2[1]), float(d2[0]))
return (angle2 - angle1)"""
def draw_point(x, y):
pts.append([x, y])
print("좌표:", len(pts), [x,y])
cv2.circle(tmp, (x,y), 2, 255, 2)
cv2.imshow("image", tmp)
def onMouse(event, x, y, flags, param):
global tmp, pts
if(event == cv2.EVENT_LBUTTONDOWN and len(pts) == 0): draw_point(x, y)
if(event == cv2.EVENT_LBUTTONUP and len(pts) == 1): draw_point(x, y)
if len(pts) == 2:
print("기울기: %3.2F" % ((pts[1][1]-pts[0][1])/(pts[1][0]-pts[0][0])))
pts=[]
cv2.line(image, pts[0], pts[1], 0, 3, cv2.LINE_A)
image = cv2.imread("images/image.jpg", cv2.IMREAD_GRAYSCALE)
if image is None: raise Exception("영상파일 읽기 오류")
tmp = np.copy(image)
pts = []
cv2.imshow("image", image)
cv2.setMouseCallback("image", onMouse, 0)
cv2.waitKey(0) | [
"noreply@github.com"
] | noreply@github.com |
b21be155127dfa573d83e56b21b60b4f6aec47e8 | 103a5d2a2bc5b97edaecc2f41d050546daed9a6c | /data/flightVolume/merge_data.py | 586f3530ba1f0c8c86206acb8cbbfb11877da8bf | [] | no_license | saadiyahhPrivate/FP-The-Impact-of-COVID-19 | 94a8a94bc3ca22eeb902f39962c5d0e0f99655d0 | 89716a44108ee371dc01bf51c3a1407d13d8554f | refs/heads/master | 2023-03-06T16:26:25.213701 | 2021-02-17T05:19:13 | 2021-02-17T05:19:13 | 264,024,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import pandas as pd
import geopandas as gpd
import json
commercial = pd.read_csv('number-of-commercial-flights.csv')
commercial = commercial.rename(columns={"Number of flights": "commercialFlights", "7-day moving average": "7DayMovingAvg_Commercial"})
allFlights = pd.read_csv('total-number-of-flights.csv')
allFlights = allFlights.rename(columns={"Number of flights": "totalFlights", "7-day moving average": "7DayMovingAvg_Total"})
# merging the two files
mergedData = commercial.merge(allFlights, how='inner', left_on='DateTime', right_on='DateTime')
# calculated field
mergedData['NonCommercialFlights'] = mergedData.apply(lambda row: row.totalFlights - row.commercialFlights, axis = 1)
# mergedData.to_json(r'final_flight_data.json', orient='records', lines=True)
mergedData.to_json(r'final_flight_data.json', orient='records')
commercial = pd.read_csv('number-of-commercial-flights.csv')
allFlights = pd.read_csv('total-number-of-flights.csv')
commercial['type'] = 'commercial flights only'
allFlights['type'] = 'all flights'
concatted = pd.concat([commercial, allFlights], sort=True)
concatted.to_json(r'final_flight_data_concatted.json', orient='records')
sorted = concatted.sort_values(by='DateTime')
sorted.to_json(r'final_flight_data_concatted_sorted.json', orient='records')
| [
"saadiyah@mit.edu"
] | saadiyah@mit.edu |
1d6bcb58dc1aec8b0217d5471a49dfeea89d522d | 97507a2e349f6aee37bf851ccf3184893c97c621 | /Python/2019_03_20_Problem_64_ Knight_Tours.py | 0c1e5143c75bb836d3a4b73d8f5bf52081595d76 | [] | no_license | BaoCaiH/Daily_Coding_Problem | 935d581f3ac9cb5b72e871191c4d5f93413ab294 | 97eae3ee806756f4d646d600f434b1e68164ad34 | refs/heads/master | 2020-04-17T04:04:45.630222 | 2020-03-10T11:46:49 | 2020-03-10T11:46:49 | 166,213,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | #!/usr/bin/env python
# coding: utf-8
# ## 2019 March 20th
#
# Problem: A knight's tour is a sequence of moves by a knight on a chessboard such that all squares are visited once.
#
# Given N, write a function to return the number of knight's tours on an N by N chessboard.
# In[ ]:
def possible_moves(N, board, start):
tours = 0
moves = []
x = start[0]
y = start[1]
if x >= 2:
if y >= 1:
if board[x - 2][y - 1] != 1:
moves.append((x - 2, y - 1))
if y <= N - 2:
if board[x - 2][y + 1] != 1:
moves.append((x - 2, y + 1))
if x <= N - 3:
if y >= 1:
if board[x + 2][y - 1] != 1:
moves.append((x + 2, y - 1))
if y <= N - 2:
if board[x + 2][y + 1] != 1:
moves.append((x + 2, y + 1))
if y >= 2:
if x >= 1:
if board[x - 1][y - 2] != 1:
moves.append((x - 1, y - 2))
if x <= N - 2:
if board[x + 1][y - 2] != 1:
moves.append((x + 1, y - 2))
if y <= N - 3:
if x >= 1:
if board[x - 1][y + 2] != 1:
moves.append((x - 1, y + 2))
if x <= N - 2:
if board[x + 1][y + 2] != 1:
moves.append((x + 1, y + 2))
return moves
def knight_moves(N, board, lst):
if len(lst) == N * N:
return 1
tours = 0
moves = possible_moves(N, board, lst[-1])
if not moves:
return 0
for i, j in moves:
lst.append((i, j))
board[i][j] = 1
tours += knight_moves(N, board, lst)
board[i][j] = 0
lst.pop()
return tours
def knight_tours(N):
tours = 0
for i in range(N):
for j in range(N):
board = [[0 for _ in range(N)] for _ in range(N)]
board[i][j] = 1
tours += knight_moves(N, board, [(i, j)])
return tours
# In[11]:
run = input("It's going to take a long time to run, do you want to continue (N/y)")
if run != 'y':
run = "N"
if run == 'y':
knight_tours(5)
| [
"caihongbao280996@gmail.com"
] | caihongbao280996@gmail.com |
a8b38035f99e59895f47f6e1228d4b88ad66a723 | 3a168e9a045a967917e27ac1d86ae67cb8e0ea2a | /sampleDecorator.py | edffb154b8e07e49685cf25b0617f4a1188b686b | [] | no_license | Defixer/sample_python_decorator | 7e419cd481456ab855f227fce6919faccd6df2b7 | c4020871a56890a418678f46193d818bae1eb1d5 | refs/heads/master | 2020-03-31T07:09:48.680950 | 2018-10-08T02:55:13 | 2018-10-08T02:55:13 | 152,010,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py |
def talk(func):
def shout(wordy):
print("HOY" + func(wordy))
return shout
@talk
def your_name(wordy):
word = " {}".format(wordy).upper() + "!"
return word
your_name("kups") | [
"jp.mendoza017@gmail.com"
] | jp.mendoza017@gmail.com |
efecd6e8598ad283a82bc7fe6aab0b6dec4ceea3 | 5c333d9afed7ecf1feba34c41764184b70f725ea | /scripts/test.py | 22d789f0c247add83cb748c9a559e96f2bcd14b5 | [] | no_license | NMGRL/pychrondata | 4e3573f929b6a465fa959bfe5b5bdfe734514b8c | 0d805ca6b7e5377f253d80ad93749b1d4253cb50 | refs/heads/master | 2020-12-24T16:35:39.308745 | 2016-03-09T18:37:47 | 2016-03-09T18:37:47 | 15,424,677 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #!Extraction
def main():
'''
start at 0 zoom
focus
take picture
increment zoom
take picture
start at 100 zoom
focus
take picture....
'''
for i in range(10):
info('info {}'.format(i))
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
3cb3590aa14685e5f32a7a6934622cf1bec1de7b | 3861bb54eb7967b20265d71bd7bf7f69dfc83277 | /TicTacToe/game_TicTacToe.py | d643e618d7033518db5e591f71e39e6dff598e37 | [] | no_license | dariusstroe/Python_MiniProjects | 1ccb0045e9c6805804ac68e16ce29b9f8d1318e8 | 55c8b674cd47b9c7fc231673b81ab4916575a611 | refs/heads/main | 2023-03-05T10:31:10.250110 | 2021-02-18T12:38:41 | 2021-02-18T12:38:41 | 340,039,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py | import math
import time
from player_TicTacToe import HumanPlayer, RandomComputerPlayer,GeniusComputerPlayer
class TicTacToe():
def __init__(self):
self.board = self.make_board()
self.current_winner = None
@staticmethod
def make_board():
return [' ' for _ in range(9)]
def print_board(self):
for row in [self.board[i*3:(i+1) * 3] for i in range(3)]:
print('| ' + ' | '.join(row) + ' |')
@staticmethod
def print_board_nums():
# 0 | 1 | 2
number_board = [[str(i) for i in range(j*3, (j+1)*3)] for j in range(3)]
for row in number_board:
print('| ' + ' | '.join(row) + ' |')
def make_move(self, square, letter):
if self.board[square] == ' ':
self.board[square] = letter
if self.winner(square, letter):
self.current_winner = letter
return True
return False
def winner(self, square, letter):
# check the row
row_ind = math.floor(square / 3)
row = self.board[row_ind*3:(row_ind+1)*3]
# print('row', row)
if all([s == letter for s in row]):
return True
col_ind = square % 3
column = [self.board[col_ind+i*3] for i in range(3)]
# print('col', column)
if all([s == letter for s in column]):
return True
if square % 2 == 0:
diagonal1 = [self.board[i] for i in [0, 4, 8]]
# print('diag1', diagonal1)
if all([s == letter for s in diagonal1]):
return True
diagonal2 = [self.board[i] for i in [2, 4, 6]]
# print('diag2', diagonal2)
if all([s == letter for s in diagonal2]):
return True
return False
def empty_squares(self):
return ' ' in self.board
def num_empty_squares(self):
return self.board.count(' ')
def available_moves(self):
return [i for i, x in enumerate(self.board) if x == " "]
def play(game, x_player, o_player, print_game=True):
if print_game:
game.print_board_nums()
letter = 'X'
while game.empty_squares():
if letter == 'O':
square = o_player.get_move(game)
else:
square = x_player.get_move(game)
if game.make_move(square, letter):
if print_game:
print(letter + ' makes a move to square {}'.format(square))
game.print_board()
print('')
if game.current_winner:
if print_game:
print(letter + ' wins!')
return letter # ends the loop and exits the game
letter = 'O' if letter == 'X' else 'X' # switches player
time.sleep(.8)
if print_game:
print('It\'s a tie!')
if __name__ == '__main__':
x_player =HumanPlayer('X')
o_player = GeniusComputerPlayer('O')
t = TicTacToe()
play(t, x_player, o_player, print_game=True) | [
"noreply@github.com"
] | noreply@github.com |
adb78f4021e80526d4619b8d432b810cf041161e | f1be1c3dd57de4534817ff0e75ee117f346c7cf7 | /python/Lexer.py | 519cb7b5eec58ee08b5fe8d485078748e15ffa88 | [] | no_license | rahutchinson/LED-Javascript | fd8f5355c80034a043a723edaba1991ba913c5d4 | 200289f6f9043a8b37a0b15976d8580b98f0f91d | refs/heads/master | 2021-03-30T17:34:17.953634 | 2018-05-14T21:21:58 | 2018-05-14T21:21:58 | 76,993,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,733 | py | '''
Finite state machine tokenizer
(c) Nelson Rushton, Texas Tech CS
March 2017
'''
# whiteChar(c) iff c is a whitespace character.
def whiteChar(c): return (c in " \r\n\t\v")
# lex(s)
def lex(s):
i = 0
tokens = [ ]
# invariants:0 <= i <= len(s), tokens = lex(s[:i]),
while i < len(s):
if whiteChar(s[i]):
i = i+1
elif i < len(s)-1 and s[i:i+2] == "//":
# skip the comment until the next return or new line character
i = i+2
while i < len(s) and s[i] not in "\r\n":
i = i+1
else:
# process the longest possible token
tok = munch(s,i)
tokens.append(tok)
i = i + len(tok)
return tokens
# If 0<= i < len(s) and s[i:] begins with a token, then munch(s,i)
# is longest token that is a prefix of s[i:]
def munch(s,i):
A,j = 'em',i
# invariants: i <= j <= len(s), A is the state that describes s[i:j]
while True:
if j == len(s): break # end of string
A = newState(A,s[j])
# A is now the state that *would* result if we process
# one more character.
if A == 'err': break
# A is not 'err', so good with one more character
j = j+1
return s[i:j]
'''
A *state* is a string . States *describe* strings as given below:
1. 'em' describes str iff str is empty
2. 'id' describes str iff str is an identifier
3. 'num' describes str iff is a numeral
4. 'err' describes str iff str is not a prefix of any token
'''
# If state A describes a string and c is a character, than newState(A,c)
# describes the string A+c.
def isSpecial(c):
return c in "<>=*+-^|,~.{}()[]\/&:;$"
def newState(A,c):
if A=='em':
if c.isalpha() or c == '`': return 'id'
elif c.isdigit(): return 'num'
elif c == '"': return 'str'
# elif c == "(": return 'left_paren'
elif c == ".": return 'period'
elif isSpecial(c):
if c == '<': return '<_extendable'
elif c == '>': return '>_extendable'
elif c == '=': return '=_extendable'
elif c == ':': return ':_extendable'
else: return 'non-extendable'
elif A=='id':
if (c.isalpha() or c.isdigit() or c == '_'): return 'id'
elif A == 'num':
if c.isdigit(): return 'num'
elif c == "(": return 'left_paren'
elif c == ".": return 'period'
elif A == 'str':
if c == '"': return 'end_str'
else: return 'str'
elif A == 'left_paren':
if c.isdigit(): return 'repeat_dig'
elif A == 'period':
if c.isdigit(): return 'decimal'
if c == '(': return 'left_paren'
if c == '.': return '.._end'
elif A == 'decimal':
if c.isdigit(): return 'decimal'
if c == '(': return 'left_paren'
elif A == 'repeat_dig':
if c.isdigit(): return 'repeat_dig'
elif c == '.': return 'repeat_1.'
elif A == 'repeat_1.':
if c == '.': return 'repeat_2.'
elif A == 'repeat_2.':
if c == ')': return 'end_repeat'
elif A == '<_extendable':
if c == '=': return '<=_extendable'
elif A == '>_extendable':
if c == '=': return 'non-extendable'
elif A == '<=_extendable':
if c == '>': return 'non-extendable'
elif A == ':_extendable':
if c == '=': return 'non-extendable'
return 'err'
def open_file_as_string(filename):
with open(filename, "r") as file:
s = file.read()
return s
def preprocess_codeblocks(file):
code_block = False
code_array = []
code_string = ''
last_char = None
for char in file:
if last_char:
if last_char=='/' and char == '$':
code_block = True
if last_char=='$' and char == '/':
code_block = False
code_array += [code_string[1:-2]]
code_string = ''
if code_block:
code_string += char
last_char = char
return ''.join(code_array).replace('\n',' ')
def preprocess_definitions(token_array):
list_of_definitions = []
tokens_in_buffer = []
current_def = []
last_token = ' '
paren_open = False
for token in token_array + [' ']:
if last_token[0].isalpha() and last_token != 'iff' and token == "(":
paren_open = True
tokens_in_buffer += [token]
elif paren_open and last_token == ")" and token in ["iff",":="]:
paren_open = False
list_of_definitions += [current_def]
current_def = tokens_in_buffer
tokens_in_buffer = []
elif last_token[0].isalpha() and token in ["iff",":="]:
paren_open = False
list_of_definitions += [current_def]
current_def = [last_token]
tokens_in_buffer = []
elif paren_open and last_token == ")" and token not in ["iff",":="]:
current_def += tokens_in_buffer
tokens_in_buffer = [token]
paren_open = False
elif paren_open:
tokens_in_buffer += [token]
elif token[0].isalpha() and not paren_open:
current_def += [last_token]
tokens_in_buffer = [token]
else:
current_def += [last_token]
tokens_in_buffer = []
last_token = token
if tokens_in_buffer != [] and tokens_in_buffer != [' ']:
current_def += tokens_in_buffer
if current_def != []:
list_of_definitions += [current_def]
for defi in list_of_definitions:
if ":=" in defi or "iff" in defi:
pass
else:
list_of_definitions.remove(defi)
return list_of_definitions
| [
"beauregardmiller@gmail.com"
] | beauregardmiller@gmail.com |
8c2611ad5852420460e9005177ed6e1296572354 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/a01.py | d912fcfad8c297d61001f2e370b33ba78e5a3e2d | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'a01':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
640fb00e3598e3cae1b911e36f126913a0a24f0c | 7bf6897af68b2001dbacc43fe0afd83effb16f47 | /DataStorage.py | b15bed57473576c71fbb88de26e580035c047364 | [] | no_license | senjay/WechatAnalyseAndRobot | 62cfba0cd6bcc493c94fa2f9ef5a8af49a0ac54a | 64c01ed0b6f7a106e0b94048927b2775f9f451bd | refs/heads/master | 2022-11-03T18:28:40.464458 | 2019-06-21T16:46:00 | 2019-06-21T16:46:00 | 193,124,214 | 1 | 1 | null | 2022-11-01T10:06:41 | 2019-06-21T15:48:13 | Python | UTF-8 | Python | false | false | 3,067 | py | import wxpy
import openpyxl
import os
class DataStorage:
def __init__(self,bot,qlist) -> None:
super().__init__()
self.msgtype='获取好友信息'
self.qlist=qlist
self.bot = bot
self.friend_all = self.bot.friends()
self.loginName = self.friend_all[0].raw.get('NickName')
self.createDir()
def findFriendList(self):
msg='正在获取好友信息\n\n'+'*' * 30 + '\n'
self.qlist.put([self.msgtype,msg])
self.qlist.join()
lis=[]
for a_friend in self.friend_all[1:]:
UserName= a_friend.raw.get('UserName',None)
NickName = a_friend.raw.get('NickName',None)
RemarkName=a_friend.raw.get('RemarkName',None)
Sex = a_friend.raw.get('Sex',None)
Sex ={1:"男",2:"女",0:"未设置"}.get(a_friend.raw.get('Sex',None),None)
City = a_friend.raw.get('City',None)
Province = a_friend.raw.get('Province',None)
Signature = a_friend.raw.get('Signature',None)
#HeadImgUrl = a_friend.raw.get('HeadImgUrl',None)
HeadImgUrl=self.saveHeadImg(UserName)
list_0=[UserName,NickName,RemarkName,Sex,Province,City,Signature,HeadImgUrl,'空','空']#给ishuman headtag留空
lis.append(list_0)
msg='好友信息获取完毕\n\n'+'*' * 30 + '\n'
self.qlist.put([self.msgtype,msg])
self.qlist.join()
return lis
def saveHeadImg(self,UserName):
img = self.bot.core.get_head_img(userName=UserName)
filename = UserName + ".jpg"
path=os.path.join(self.userheadimg,filename)
try:
with open(path, 'wb') as f:
f.write(img)
except Exception as e:
print(repr(e))
return os.path.abspath(path)
def createDir(self):
if os.path.exists('userdata') != True:
os.mkdir('userdata')
self.userdir=os.path.join('userdata',self.loginName)
self.userheadimg = os.path.join('userdata', self.loginName, 'headImg')
if os.path.exists(self.userdir) != True:
os.mkdir(self.userdir)
if os.path.exists(self.userheadimg) != True:
os.mkdir(self.userheadimg)
self.qlist.put([self.msgtype, '正在创建文件夹\n\n'+'*' * 30 + '\n'+'路径为:'+os.path.abspath(self.userdir)+'\n\n'+'*' * 30 + '\n'])
self.qlist.join()
def saveExcel(self,lis):
wb=openpyxl.Workbook()
sheet=wb.worksheets[0]
row = ['UserName','NickName', 'RemarkName', 'Sex','Province','City','Signature','HeadImgUrl','IsHuman','HeadImgTags']
sheet.append(row)
for item in lis:
sheet.append(item)
savepath=os.path.join(self.userdir,'friend.xlsx')
wb.save(savepath)
self.qlist.put([self.msgtype, '好友信息保存完毕\n\n'+'*' * 30 + '\n'])
self.qlist.join()
def getFriendData(bot,qlist):
datastorage = DataStorage(bot, qlist)
lis = datastorage.findFriendList()
datastorage.saveExcel(lis)
| [
"760832791@qq.com"
] | 760832791@qq.com |
10e0194e87911711b252133a359ff84c1fcb4710 | 6b1f5f6597d51125e0cfae59300ea75153e3e498 | /Test/msbaopoSpider.py | 07d4afc70a7ff3c28c5f76b1bd22402fcbfd74d8 | [] | no_license | zhangzongbo/spider | 3eba26e212703cdd1ddbf760c1ffa623f03f08ad | 8dfd98b1e2e2f5a4401c4f682dda2016939f2d0c | refs/heads/master | 2020-03-25T14:12:25.996603 | 2019-08-13T07:52:19 | 2019-08-13T07:52:19 | 143,855,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,256 | py | import requests
import urllib.parse
import time
from bs4 import BeautifulSoup
def getHtml():
BASE_URL = 'http://ms31.haixing8.cn/login.php?d=login.log&cid=0&stateid=1&u=21367898&s={}' # 21758788
login_url = 'http://ms.haixing8.cn/commreg/channel.php?d=login.startover&spid=&clienttype=WAP2'
headers = {
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Origin': 'http://92msjs.com',
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'http://92msjs.com/commreg/channel.php?d=login.start&clienttype=WAP2',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,nl;q=0.7,zh-TW;q=0.6',
# 'Cookie': 'ms_user_name=13300000000; clienttype=WAP2; PHPSESSID=me979ih369ropj3c3in7fpaa06'
}
# username : 3-13 数字或字母
# password : 4-20 数字或字母
mapList = list(range(1000, 100000))
for i in mapList:
payload = {'username': i,
'password': '123456', 'submit': r'%E7%A1%AE%E5%AE%9A'}
html = requests.post(login_url, headers=headers,
data=payload, allow_redirects=False)
result = valid(html.headers['Location'])
mark = "id:{},login {}".format(i, result)
print(mark)
if result == "SUCCESS":
with open('msjs-username-password.txt', 'a', encoding='utf-8') as f:
f.write(mark + '\n')
def valid(location_url):
if 'regselectsvr' in location_url:
return ('SUCCESS')
elif 'err_msg' in location_url:
return ('FAILURE')
else:
return ('NONE!')
if __name__ == "__main__":
getHtml()
# loca = 'http://ms.haixing8.cn/commreg/channel.php?d=login.regselectsvr&u=21367898&gmsid=6953c9692492f0404e25bf90b016be85&clienttype=WAP2'
# loca = 'http://ms.haixing8.cn/commreg/channel.php?d=login.start&spid=&clienttype=WAP2&show_err=1023&err_msg=%E6%82%A8%E8%BE%93%E5%85%A5%E7%9A%84%E5%AF%86%E7%A0%81%E9%94%99%E8%AF%AF%EF%BC%8C%E8%AF%B7%E9%87%8D%E6%96%B0%E8%BE%93%E5%85%A5%EF%BC%884-20%E4%B8%AA%E6%95%B0%E5%AD%97%E6%88%96%E5%AD%97%E7%AC%A6%EF%BC%89'
# valid(loca)
''' CURL
curl -Ls -w %{url_effective} -o /dev/null 'http://ms.haixing8.cn/commreg/channel.php?d=login.startover&spid=&clienttype=WAP2'
-H 'Connection: keep-alive'
-H 'Pragma: no-cache'
-H 'Cache-Control: no-cache'
-H 'Origin: http://92msjs.com'
-H 'Upgrade-Insecure-Requests: 1'
-H 'Content-Type: application/x-www-form-urlencoded'
-H 'User-Agent: Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36'
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
-H 'Referer: http://92msjs.com/commreg/channel.php?d=login.start&clienttype=WAP2'
-H 'Accept-Encoding: gzip, deflate'
-H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,nl;q=0.7,zh-TW;q=0.6'
-H 'Cookie: ms_user_name=13300000000; clienttype=WAP2; PHPSESSID=me979ih369ropj3c3in7fpaa06'
--data 'username=13300000000&password=000000&submit=%E7%A1%AE%E5%AE%9A'
--compressed
http://ms.haixing8.cn/commreg/channel.php?d=login.regselectsvr&u=21367898&gmsid=0f96b261ec3376d5a0746e11ddd7ae80&clienttype=WAP2%
'''
'''example
def yunsite():
'url'
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'Host': 'pan.baidu.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
url = 'https://pan.baidu.com/s/1c0rjnbi'
html = requests.get(url, headers=headers, allow_redirects=False)
return html.headers['Location']
'''
| [
"zhangzongbo1994@gmail.com"
] | zhangzongbo1994@gmail.com |
a32d53da6229348480d5344cd6c35bd4746c2a57 | f21fd87d0dd288f4d905003c0ea67607f6f67d71 | /components/wifi_provisioning/python/security/security1.py | ac56b80135ba3403a688fae87f5673c7cef868cd | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | makserge/esp-va-sdk | e67d08690f1d6b4dcbc9a115d57a22b3e2cea423 | 2279bbb6a1f7a5df80330b211dda17c030edcc4d | refs/heads/master | 2020-04-17T09:01:18.622639 | 2019-01-18T17:44:55 | 2019-01-18T17:44:55 | 166,440,330 | 0 | 0 | NOASSERTION | 2019-01-18T16:40:34 | 2019-01-18T16:40:34 | null | UTF-8 | Python | false | false | 5,331 | py | # Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from security import *
import proto_python
import curve25519
import Crypto.Cipher.AES
import Crypto.Util.Counter
import Crypto.Hash.SHA256
import session_pb2
class security_state:
REQUEST1 = 0
RESPONSE1_REQUEST2 = 1
RESPONSE2 = 2
FINISHED = 3
class Security1(Security):
def __init__(self, pop, verbose):
self.session_state = security_state.REQUEST1
self.pop = pop
self.verbose = verbose
Security.__init__(self, self.security1_session)
def security1_session(self, response_data):
if (self.session_state == security_state.REQUEST1):
self.session_state = security_state.RESPONSE1_REQUEST2
return self.setup0_request()
if (self.session_state == security_state.RESPONSE1_REQUEST2):
self.session_state = security_state.RESPONSE2
self.setup0_response(response_data)
return self.setup1_request()
if (self.session_state == security_state.RESPONSE2):
self.session_state = security_state.FINISHED
self.setup1_response(response_data)
return None
else:
print "Unexpected state"
return None
def __generate_key(self):
self.client_private_key = curve25519.genkey()
self.client_public_key = curve25519.public(self.client_private_key)
def _xor_two_str(self, a, b):
ret = ''
for i in range(max(len(a), len(b))):
num = hex(ord(a[i%len(a)]) ^ ord(b[i%(len(b))]))[2:]
if len(num) == 0:
num = '00'
if len(num) == 1:
num = '0'+ num
ret = ret + num
return ret.decode('hex')
def _print_verbose(self, data):
if (self.verbose):
print "++++ " + data + " ++++"
def setup0_request(self):
setup_req = session_pb2.SessionData()
setup_req.sec_ver = session_pb2.SecScheme1
self.__generate_key()
setup_req.sec1.sc0.client_pubkey = self.client_public_key
self._print_verbose("client_public_key:\t" + setup_req.sec1.sc0.client_pubkey.encode('hex'))
return setup_req.SerializeToString()
def setup0_response(self, response_data):
setup_resp = proto_python.session_pb2.SessionData()
setup_resp.ParseFromString(response_data.decode('hex'))
self._print_verbose("Security version:\t" + str(setup_resp.sec_ver))
if setup_resp.sec_ver != session_pb2.SecScheme1:
print "Incorrect sec scheme"
exit(1)
self._print_verbose("device_pubkey:\t"+setup_resp.sec1.sr0.device_pubkey.encode('hex'))
self._print_verbose("device_random:\t"+setup_resp.sec1.sr0.device_random.encode('hex'))
sharedK = curve25519.shared(self.client_private_key, setup_resp.sec1.sr0.device_pubkey)
self._print_verbose("Shared Key:\t" + sharedK.encode('hex'))
if len(self.pop) > 0:
h = Crypto.Hash.SHA256.new()
h.update(self.pop)
digest = h.digest()
sharedK = self._xor_two_str(sharedK, digest)
self._print_verbose("New Shared Key xored with pop:\t" + sharedK.encode('hex'))
ctr = Crypto.Util.Counter.new(128, initial_value=long(setup_resp.sec1.sr0.device_random.encode('hex'), 16))
self._print_verbose("IV " + hex(long(setup_resp.sec1.sr0.device_random.encode('hex'), 16)))
self.cipher = Crypto.Cipher.AES.new(sharedK, Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.client_verify = self.cipher.encrypt(setup_resp.sec1.sr0.device_pubkey)
self._print_verbose("Client Verify:\t" + self.client_verify.encode('hex'))
def setup1_request(self):
setup_req = proto_python.session_pb2.SessionData()
setup_req.sec_ver = session_pb2.SecScheme1
setup_req.sec1.msg = proto_python.sec1_pb2.Session_Command1
setup_req.sec1.sc1.client_verify_data = self.client_verify
return setup_req.SerializeToString()
def setup1_response(self, response_data):
setup_resp = proto_python.session_pb2.SessionData()
setup_resp.ParseFromString(response_data.decode('hex'))
if setup_resp.sec_ver == session_pb2.SecScheme1:
self._print_verbose("Device verify:\t"+setup_resp.sec1.sr1.device_verify_data.encode('hex'))
enc_client_pubkey = self.cipher.encrypt(setup_resp.sec1.sr1.device_verify_data)
self._print_verbose("Enc client pubkey:\t "+enc_client_pubkey.encode('hex'))
else:
print ("Unsupported security protocol")
return -1
def encrypt_data(self, data):
return self.cipher.encrypt(data)
def decrypt_data(self, data):
return self.cipher.decrypt(data)
| [
"amit@espressif.com"
] | amit@espressif.com |
8e1527afd907d991eb35c32c39b6233d4569f100 | c5545d337921d1f15f569f687fb2104b6fd0ede0 | /eshopper/migrations/0005_auto__add_wishlist.py | 3b7868c7f17e592abf11c254ef3eaeef138f5d42 | [] | no_license | darshith0000/Eshopper | 32aae0b24b568b2179bd96e0e07ccde2a632a534 | 7a29154e78d4a59ac7d34bca9baed7efc4d026ae | refs/heads/master | 2021-03-12T23:12:50.926936 | 2015-02-16T14:55:06 | 2015-02-16T14:55:06 | 30,871,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,910 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Wishlist'
db.create_table(u'eshopper_wishlist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('product_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shop.Product'])),
))
db.send_create_signal(u'eshopper', ['Wishlist'])
def backwards(self, orm):
# Deleting model 'Wishlist'
db.delete_table(u'eshopper_wishlist')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'eshopper.products': {
'Meta': {'object_name': 'Products', '_ormbases': ['shop.Product']},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'})
},
u'eshopper.wishlist': {
'Meta': {'object_name': 'Wishlist'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Product']"}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_shop.product_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
}
}
complete_apps = ['eshopper'] | [
"darshithb@mindfiresolutions.com"
] | darshithb@mindfiresolutions.com |
1eb2f715857b7860d37606c858a8ac2c834a2f58 | 87fb0ae5563512bf4cfe2754ea92e7f4173f753f | /Chap_08/Ex_181.py | ba2d1b32e7dabdca85b694f6e4635d4a64b0e168 | [] | no_license | effedib/the-python-workbook-2 | 87291f5dd6d369360288761c87dc47df1b201aa7 | 69532770e6bbb50ea507e15f7d717028acc86a40 | refs/heads/main | 2023-08-21T13:43:59.922037 | 2021-10-12T20:36:41 | 2021-10-12T20:36:41 | 325,384,405 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | # Possible Change
# Read both the dollar amount and the number of coins from user and display a message indicating whether or not the
# entered amount can be formed using the number of coins indicated.
def possibleChange(dollars, coins, index=0):
coins_list = [0.25, 0.10, 0.05, 0.01]
dollars = round(dollars, 2)
if dollars == 0 and coins == 0:
return True
elif index >= len(coins_list):
return False
print("index '{:.2f}'\t{:.2f} dollars\t{:.2f} coins".format(coins_list[index], dollars, coins))
if dollars == 0 or coins == 0:
dollars += coins_list[index]
coins += 1
index += 1
return possibleChange(dollars, coins, index)
elif (dollars / coins) in coins_list:
return True
else:
if dollars >= coins_list[index]:
dollars -= coins_list[index]
coins -= 1
else:
index += 1
return possibleChange(dollars, coins, index)
def main():
total = float(input('Enter the total amount: '))
coin = int(input('How many coins do you want to use? '))
for i in range(1, (coin+1)):
print("{} coins:\t{}".format(coin, possibleChange(total, coin)))
if __name__ == "__main__":
main()
| [
"cicciodb@hotmail.it"
] | cicciodb@hotmail.it |
6cc46adae3cba68f1726db300fc0a2849212a8e0 | b43de8e080cf910133b7e468341abea74bb4f3af | /conv_wf2rt.py | 83bb41bb6c4758e246073570393ee4e9a0f225bd | [] | no_license | ivanovdmitri/sFLASH_analysis | 12fa9e10d9443da17420ddf9b5445da1982f7696 | 953c254cd8fb0439f76da56612b14d10fc6af6ee | refs/heads/main | 2023-03-28T08:46:14.086947 | 2021-03-27T22:20:29 | 2021-03-27T22:20:29 | 349,868,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,097 | py | #!/usr/bin/env python
import argparse
from ROOT import TFile, TTree
from array import array
import itertools
import numpy as np
import sys
import os
import re
def read_xlsx_sheet(xlsx_file,sheet_name):
# one can directly read excel files if
# python-xlrd package has been installed
try:
import xlrd
except ImportError:
sys.stderr.write("ERROR: package python-xlrd not intalled ")
sys.stderr.write("but an .xlsx file \n\'{:s}\' given!\n".\
format(xlsx_file))
sys.stderr.write("Either convert it to .csv format or ")
sys.stderr.write("install python-xlrd on your system!\n")
sys.exit(2)
with xlrd.open_workbook(xlsx_file) as wb:
try:
sheet=wb.sheet_by_name(sheet_name)
except:
sys.stderr.write("ERROR: no sheet named \'{:s}\' in file {:s}!\n".\
format(sheet_name,xlsx_file))
sys.exit(2)
if sheet.nrows < 1:
sys.stderr.write("ERROR: sheet \'{:s}\' in file {:s} has no rows!\n".\
format(sheet_name,xlsx_file))
sys.exit(2)
header=map(lambda s: str(s.encode("ascii")), sheet.row_values(0))
values=[]
for row in range(1,sheet.nrows):
row_values={}
for col in range(0,sheet.ncols):
v=sheet.cell(row,col).value
if type(v) == unicode:
v = str(v.encode("ascii"))
row_values[header[col]] = v
values.append(row_values)
return (header, values)
def read_csv_file(csv_file):
# one can directly read CSV files if
# python-csv package has been installed
try:
import csv
except ImportError:
sys.stderr.write("ERROR: package python-csv not intalled ")
sys.stderr.write("but a .csv file {:s} given! \nFigure out ".format(csv_file))
sys.stderr.write("how to install python-csv package on your system !\n")
sys.exit(2)
header=[]
values=[]
with open(csv_file) as csvfile:
reader = csv.reader(csvfile)
header_read = next(reader,None)
header.extend(header_read)
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row_values={}
for name,val in row.iteritems():
if type(val) == unicode:
val = str(val.encode("ascii"))
row_values[name]=val
values.append(row_values)
return (header,values)
class settings_class:
def __init__(self):
# Variables that should be present in run settings CVS file
# (e.g. sFLASH_run3_settings.cvs that's made from
# sFLASH_run3_settings.xlsx)
self.SCOPE=int(0) # oscilloscope ID
self.CHANNEL=int(0) # channel ID of the oscilloscope
self.PMT_SERIAL=str("") # PMT serial code, if applies, or COIL
# if the coil is what's plugged in to the
# scope channel
self.CONNECTED_TO=str("") # brief description of what's plugged in
# to the scope channel
self.REMARKS=str("") # additional remarks about what's plugged
# in to the scope channel
def load_settings(settings_file):
sys.stdout.write("loading settings file {:s} ...\n".format(settings_file))
if settings_file.endswith(".csv"):
(header,values) = read_csv_file(settings_file)
elif settings_file.endswith(".xlsx"):
(header,values) = read_xlsx_sheet(settings_file,"settings")
else:
sys.stderr.write("ERROR: file {:s} doesn\'t end with .csv or .xlsx !".format(settings_file));
sys.exit(2)
settings = []
for row in values:
pmti=settings_class()
for name,val in row.iteritems():
vars(pmti)[name] = type(vars(pmti)[name])(val)
settings.append(pmti)
sys.stdout.write("settings file {:s} loaded successfully\n".format(settings_file))
sys.stdout.flush()
return (header,settings)
class conditions_class:
def __init__(self):
# Variables that should be present in the CSV conditions file
# (E.G. sFLASH_run3_conditions.csv is made from sFLASH_run3_conditions.xlsx by using SaveAs (CVS format) of MS Excel or OpenOffice Spreadsheet)
self.yyyymmdd_start=int(0) # date+time at the sub-run start
self.hhmmss_start=int(0)
self.yyyymmdd_end=int(0) # date+time at the sub-run end
self.hhmmss_end=int(0)
self.run_id=int(0) # ID of the sub-run
self.run_type=int(0) # type of the sub-run (beam or UVLED ?)
self.rl=int(0) # number of radiation lenghts
self.nevent=int(0) # number of events
self.blind=int(0) # blind open or closed?
self.shutter=int(0) # shutter open or closed?
self.status=int(0) # sub-run status, good or bad?
self.E_GeV=float(0) # Beam energy in GeV
self.C_per_Vs=float(0) # COIL waveform integral to charge converter in Coulombs per (Voltage * Second)
self.PMT_1_HV=float(0) # PMT high voltage
self.PMT_2_HV=float(0)
self.PMT_3_HV=float(0)
self.PMT_4_HV=float(0)
self.PMT_A_HV=float(0)
self.PMT_B_HV=float(0)
self.PMT_C_HV=float(0)
# Parameters to be used in PMT calibration formula:
# Gain (in NPE_per_Vs) = 1.0 / [ EXP(PMT_X_ALPHA)*(PMT_X_HV-PMT_X_OFFSET)^PMT_X_BETA
self.PMT_1_OFFSET=float(0) # OFFSET in the PMT calibration formula
self.PMT_1_ALPHA=float(0) # ALPHA parameter for the PMT calibration formula
self.PMT_1_BETA=float(0) # BETA parameter for the PMT calibration formula
self.PMT_2_OFFSET=float(0)
self.PMT_2_ALPHA=float(0)
self.PMT_2_BETA=float(0)
self.PMT_3_OFFSET=float(0)
self.PMT_3_ALPHA=float(0)
self.PMT_3_BETA=float(0)
self.PMT_4_OFFSET=float(0)
self.PMT_4_ALPHA=float(0)
self.PMT_4_BETA=float(0)
self.PMT_A_OFFSET=float(0)
self.PMT_A_ALPHA=float(0)
self.PMT_A_BETA=float(0)
self.PMT_B_OFFSET=float(0)
self.PMT_B_ALPHA=float(0)
self.PMT_B_BETA=float(0)
self.PMT_C_OFFSET=float(0)
self.PMT_C_ALPHA=float(0)
self.PMT_C_BETA=float(0)
# Energy tracing factors for active regions up to the blind (TB) and up to the shutter (TS)
# as calculated by FLUKA and GEANT4 simulations and the corresponding raytracing routines.
self.PMT_1_TB_FK=float(0) # Energy tracing factor for the active region until blind calculated by FLUKA simulation, [MeV / pC]
self.PMT_2_TB_FK=float(0)
self.PMT_3_TB_FK=float(0)
self.PMT_4_TB_FK=float(0)
self.PMT_A_TB_FK=float(0)
self.PMT_B_TB_FK=float(0)
self.PMT_C_TB_FK=float(0)
self.PMT_1_TS_FK=float(0) # Energy tracing factor for the active region until shutter calculated by FLUKA simulation, [MeV / pC]
self.PMT_2_TS_FK=float(0)
self.PMT_3_TS_FK=float(0)
self.PMT_4_TS_FK=float(0)
self.PMT_A_TS_FK=float(0)
self.PMT_B_TS_FK=float(0)
self.PMT_C_TS_FK=float(0)
self.PMT_1_TB_G4=float(0) # Energy tracing factor for the active region until blind calculated by Geant4 simulation, [MeV / pC]
self.PMT_2_TB_G4=float(0)
self.PMT_3_TB_G4=float(0)
self.PMT_4_TB_G4=float(0)
self.PMT_A_TB_G4=float(0)
self.PMT_B_TB_G4=float(0)
self.PMT_C_TB_G4=float(0)
self.PMT_1_TS_G4=float(0) # Energy tracing factor for the active region until shutter calculated by Geant4 simulation, [MeV / pC]
self.PMT_2_TS_G4=float(0)
self.PMT_3_TS_G4=float(0)
self.PMT_4_TS_G4=float(0)
self.PMT_A_TS_G4=float(0)
self.PMT_B_TS_G4=float(0)
self.PMT_C_TS_G4=float(0)
def load_conditions(conditions_file):
sys.stdout.write("loading conditions file {:s} ...\n".format(conditions_file))
if conditions_file.endswith(".csv"):
(header,values) = read_csv_file(conditions_file)
elif conditions_file.endswith(".xlsx"):
(header,values) = read_xlsx_sheet(conditions_file,"conditions")
else:
sys.stderr.write("ERROR: file {:s} doesn\'t end with .csv or .xlsx !".format(conditions_file));
sys.exit(2)
conditions = {}
for row in values:
cond=conditions_class()
for name,val in row.iteritems():
vars(cond)[name] = type(vars(cond)[name])(val)
conditions[cond.run_id] = cond
sys.stdout.write("conditions file {:s} loaded successfully\n".format(conditions_file))
sys.stdout.flush()
return (header,conditions)
class wf_class:
def __init__(self):
# class that describes the waveform data in RunNNNNNN-YYYYMMDD-HHMMSS-wf-N.csv type of file
self.t0=float(0)
self.seq=int(0)
self.dt=[float(0),float(0),float(0),float(0)]
self.data=[[],[],[],[]]
def __str__(self):
return '{0:.3f} {1:d} {2:.6e} {3:.6e} {4:.6e} {5:.6e} {6:d}'\
.format(self.t0,self.seq,self.dt[0],\
self.dt[1],self.dt[2],self.dt[3],len(self.data[0]))
def parse_wf_file_name(fname):
pattern_run_header=re.compile(r"""
.*Run(?P<run_id>\d{6})
-(?P<yyyymmdd>\d{8})-(?P<hhmmss>\d{6})-wf-(?P<scope_id>\d).csv
""",re.VERBOSE)
match=pattern_run_header.match(fname)
if match == None:
raise Exception("{:s} is not a RunNNNNNN-YYYYMMDD-HHMMSS-wf-N.csv type file".format(fname))
run_id = int(match.group("run_id"))
yyyymmdd=int(match.group("yyyymmdd"))
hhmmss=int(match.group("hhmmss"))
scope_id=int(match.group("scope_id"))
return (run_id,yyyymmdd,hhmmss,scope_id)
def parse_waveform_data_file(infile):
sys.stdout.write("parsing waveform data file {:s} ...\n".format(infile))
Waveforms=[]
lines=map(lambda s: s.strip(), open(infile,"r").readlines())
wf=wf_class()
i=0
while i < len(lines):
line=lines[i]
if line == "t0,seq":
# header of a new waveform. store the previously filled waveform if it
# has been filled
if(len(wf.data[0])>0):
Waveforms.append(wf)
# check if got an empty header at the end of the file
if i+5 > len(lines):
sys.stderr.write("warning: empty waveform (no waveform data) at the end of the file:\n");
for k in range(i,len(lines)):
sys.stderr.write("{:s} line {:d}: {:s}\n".format(infile,k,lines[k]))
i= len(lines)
continue
# store header lines
header_lines=lines[i:i+5]
# check if this is an emtpy waveform or not
wf_empty=False
for j in range(1,5):
if (header_lines[j] == "t0,seq"):
sys.stderr.write("warning: empty waveform (no waveform data):\n");
for k in range(0,j+1):
sys.stderr.write("{:s} line {:d}: {:s}\n".format(infile,i+k,lines[i+k]))
i=i+j
wf_empty=True
break
if wf_empty:
continue
# make sure that the header isn't corrupted. if it is corrupted,
# continue reading until a new header is found or the end of the file
# is reached.
corrupted_header=False
if header_lines[4] != "WaveForm Data":
sys.stderr.write("warning: corrupted header:\n");
for k in range(0,5):
sys.stderr.write("{:s} line {:d}: {:s}\n".format(infile,i+k,lines[i+k]))
i=i+5
corrupted_header=True
break
if corrupted_header:
while (i < len(lines) and lines[i] != "t0,seq"):
i=i+1
continue # be sure to re-iterate all the above code on the new header
# start a new waveform
wf=wf_class()
# fill the header information for the new waveform
wf.t0=float(header_lines[1].split(',')[0])
wf.seq=int(header_lines[1].split(',')[1])
for k in range(0,4):
wf.dt[k]=float(header_lines[3].split(',')[k])
wf.data=[[],[],[],[]]
i=i+5
continue
l=line.split(',')
for k in range(0,4):
wf.data[k].append(float(l[k]))
i=i+1
if(i == len(lines)):
Waveforms.append(wf)
wf=None
sys.stdout.write("done parsing waveform data file {:s} \n".format(infile))
sys.stdout.flush();
return Waveforms
def convert_waveform_files(header_and_settings,wf0_infile,wf1_infile,header_and_conditions,outfile):
def make_rt_branch(name,vals_read,n_item_variable_name=""):
# take the name of the variables, the list of values,
# and a variable (stored in ROOT tree) that describes
# the number of item, (if empty, assume the number of entries is 1)
# and prepare all necessary components of a ROOT - tree branch:
# (branch name, branch container, branch descriptor)
# and return this as a list of the three things above
if(len(vals_read) > 1 and len(n_item_variable_name) == 0):
sys.stderr.write("ERROR: make_rt_branch: number of values passed greater than 1 but no")
sys.stderr.write("root tree variable name given that describes the number of items!\n")
sys.exit(2)
n_item_descriptor="";
if len(vals_read) > 1:
n_item_descriptor = "["+n_item_variable_name+"]";
if type(vals_read[0]) == str:
vals_adj=map(lambda s : s+"\0", vals_read)
l_max=max(map(lambda s: len(s), vals_adj))
vals=map(lambda s : s+(l_max-len(s))*"\0", vals_adj)
vals_array=np.arange(len(vals)*l_max,dtype='b').reshape((len(vals),l_max),order="C")
i=0
for val in vals:
j=0
for v in val:
vals_array[i][j] = ord(v)
j=j+1
i=i+1
return (name,vals_array,name+n_item_descriptor+"["+str(l_max)+"]/B")
elif type(vals_read[0]) == int:
vals_array=array("i")
vals_array.extend(vals_read)
return (name,vals_array,name+n_item_descriptor+"/I")
elif type(vals_read[0]) == float:
vals_array=array("d")
vals_array.extend(vals_read)
return (name,vals_array,name+n_item_descriptor+"/D")
else:
sys.stderr.write("ERROR: unsupported data type!\n")
sys.exit(2)
# settings variables that apply to all run_id's of the global run
settings_rt_branches=[]
if len(header_and_settings) == 2:
(header,settings) = header_and_settings
if(len(settings) > 0):
# ROOT tree variable that describes the number of condition information entries
settings_rt_branches.append(make_rt_branch("n_connected",[int(len(settings))]))
for name in header:
vals_read=map(lambda s: vars(s)[name], settings)
settings_rt_branches.append(make_rt_branch(name,vals_read,settings_rt_branches[0][0]))
# waveforms from both oscilloscopes for the particular sub-run,
# labeled by the run_id variable. Must have two waveform files,
# from the two oscilloscopes, that have the same run_id
waveform_static_rt_branches=[]
Waveforms0 = parse_waveform_data_file(wf0_infile)
Waveforms1 = parse_waveform_data_file(wf1_infile)
nwf0=len(Waveforms0)
nwf1=len(Waveforms1)
nwf=nwf0
if(nwf0 != nwf1):
sys.stderr.write("WARNING: number of waveforms in file {:s} {%d} is not the same\n".format(wf0_infile,nwf0));
sys.stderr.write("as the number of waveforms in file {:s} {:d}!\n".format(wf1_infile,nwf1));
if nwf1 < nw0:
nwf = nwf1
(run_id,yyyymmdd,hhmmss,scope_id) = parse_wf_file_name(wf0_infile)
waveform_static_rt_branches.append(make_rt_branch("run_id",[int(run_id)]))
waveform_static_rt_branches.append(make_rt_branch("yyyymmdd",[int(yyyymmdd)]))
waveform_static_rt_branches.append(make_rt_branch("hhmmss",[int(hhmmss)]))
# look at the parsed conditions variables listed for each sub-run
# pick out a set of conditions that's relevant for the waveforms being parsed,
# i.e. a set of conditions that correspond to the run_id of the waveforms
conditions_rt_branches=[]
if len(header_and_conditions) == 2:
(header,conditions) = header_and_conditions
if run_id in conditions.keys():
relevant_conditions=[conditions[run_id]]
for name in header:
vals_read=map(lambda s: vars(s)[name], relevant_conditions)
conditions_rt_branches.append(make_rt_branch(name,vals_read))
# Initialize ROOT file and allocate the ROOT tree linked to
# that file
f = TFile(outfile,"recreate")
if f.IsZombie():
exit(2)
t = TTree("tsFLASHwf","Tree with SFLASH 2018 Waveform Data")
# set the branches for the overall run settings, if the run settings data is available
# as well the condition and simulation branches for the particular sub run,
# mapped out by the run_id of the waveforms, if the corresponding run conditions data is available
for rt_branch in itertools.chain(settings_rt_branches,conditions_rt_branches,waveform_static_rt_branches):
t.Branch(*rt_branch)
# set the wavform branches for the particular sub run mapped out by the run_id
# variables of these branches may vary from pulse to pulse
ntmax=5000
t00 = array("d",[0.0])
seq0 = array("i",[0])
t01 = array("d",[0.0])
seq1 = array("i",[0])
nt0 = array("i",[0])
nt1 = array("i",[0])
dt_coil = array("d",[0.0])
dt_pmt_1 = array("d",[0.0])
dt_pmt_4 = array("d",[0.0])
dt_pmt_c = array("d",[0.0])
dt_pmt_2 = array("d",[0.0])
dt_pmt_3 = array("d",[0.0])
dt_pmt_a = array("d",[0.0])
dt_pmt_b = array("d",[0.0])
wfti0 = array("i",ntmax*[0])
wfti1 = array("i",ntmax*[0])
wf_coil = array("d",ntmax*[0.0])
wf_pmt_1 = array("d",ntmax*[0.0])
wf_pmt_4 = array("d",ntmax*[0.0])
wf_pmt_c = array("d",ntmax*[0.0])
wf_pmt_2 = array("d",ntmax*[0.0])
wf_pmt_3 = array("d",ntmax*[0.0])
wf_pmt_a = array("d",ntmax*[0.0])
wf_pmt_b = array("d",ntmax*[0.0])
t.Branch("t00",t00,"t00/D")
t.Branch("seq0",seq0,"seq0/I")
t.Branch("t01",t01,"t01/D")
t.Branch("seq1",seq1,"seq1/I")
t.Branch("dt_coil",dt_coil,"dt_coil/D")
t.Branch("dt_pmt_1",dt_pmt_1,"dt_pmt_1/D")
t.Branch("dt_pmt_4",dt_pmt_4,"dt_pmt_4/D")
t.Branch("dt_pmt_c",dt_pmt_c,"dt_pmt_c/D")
t.Branch("dt_pmt_2",dt_pmt_2,"dt_pmt_2/D")
t.Branch("dt_pmt_3",dt_pmt_3,"dt_pmt_3/D")
t.Branch("dt_pmt_a",dt_pmt_a,"dt_pmt_a/D")
t.Branch("dt_pmt_b",dt_pmt_b,"dt_pmt_b/D")
t.Branch("nt0",nt0,"nt0/I")
t.Branch("nt1",nt1,"nt1/I")
t.Branch("wfti0",wfti0,"wfti0[nt0]/I")
t.Branch("wf_coil",wf_coil,"wf_coil[nt0]/D")
t.Branch("wf_pmt_1",wf_pmt_1,"wf_pmt_1[nt0]/D")
t.Branch("wf_pmt_4",wf_pmt_4,"wf_pmt_4[nt0]/D")
t.Branch("wf_pmt_c",wf_pmt_c,"wf_pmt_c[nt0]/D")
t.Branch("wfti1",wfti1,"wfti1[nt1]/I")
t.Branch("wf_pmt_2",wf_pmt_2,"wf_pmt_2[nt1]/D")
t.Branch("wf_pmt_3",wf_pmt_3,"wf_pmt_3[nt1]/D")
t.Branch("wf_pmt_a",wf_pmt_a,"wf_pmt_a[nt1]/D")
t.Branch("wf_pmt_b",wf_pmt_b,"wf_pmt_b[nt1]/D")
# combine the waveforms into event and fill the tree
for iwf in range(0,nwf):
wf0 = Waveforms0[iwf]
t00[0] = wf0.t0
seq0[0] = wf0.seq
dt_coil[0] = wf0.dt[0]
dt_pmt_1[0] = wf0.dt[1]
dt_pmt_4[0] = wf0.dt[2]
dt_pmt_c[0] = wf0.dt[3]
nt0[0] = len(wf0.data[0])
for i in range(0,nt0[0]):
wfti0[i] = i
wf_coil[i] = wf0.data[0][i]
wf_pmt_1[i] = wf0.data[1][i]
wf_pmt_4[i] = wf0.data[2][i]
wf_pmt_c[i] = wf0.data[3][i]
wf1 = Waveforms1[iwf]
t01[0] = wf1.t0
seq1[0] = wf1.seq
dt_pmt_2[0] = wf1.dt[0]
dt_pmt_3[0] = wf1.dt[1]
dt_pmt_a[0] = wf1.dt[2]
dt_pmt_b[0] = wf1.dt[3]
nt1[0] = len(wf1.data[0])
for i in range(0,nt1[0]):
wfti1[i] = i
wf_pmt_2[i] = wf1.data[0][i]
wf_pmt_3[i] = wf1.data[1][i]
wf_pmt_a[i] = wf1.data[2][i]
wf_pmt_b[i] = wf1.data[3][i]
t.Fill()
t.Write()
f.Close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("files",nargs="*",
help="pass RunNNNNNN-YYYYMMDD-HHMMSS-wf-N.csv file names w/o prefixes or switches,"+
"and the number of files must be even because there are two oscilloscopes")
parser.add_argument("-i", action="store", dest="listfile",
help=' <string> give an ascii list file with paths to a bunch of RunNNNNNN-YYYYMMDD-HHMMSS-wf-N.csv files (even number of files)')
parser.add_argument("--tty", action='store_true',
default=False,
dest="tty_input",
help="pipe RunNNNNNN-YYYYMMDD-HHMMSS-wf-N.csv file names from stdin (even number of files)")
parser.add_argument("-settings", action='store',dest='settings_file',default="sFLASH_run3_settings.xlsx",
help="<string> Give a .csv or .xlsx file with scope/channel/PMT settings for the entire run period"+\
" (sFLASH_run3_settings.xlsx)")
parser.add_argument('-conditions', action='store',dest='conditions_file',default="sFLASH_run3_conditions.xlsx",
help="<string> Give a .csv or .xlsx file with conditions for all sub-runs of run3 (sFLASH_run3_conditions.xlsx)")
parser.add_argument("-o", action="store", dest="outdir", default="./",
help="<string >specify the output directory for the root tree files")
parser.add_argument("-good", action="store_true", dest="good_runs_only", default=False,
help="Use this flag to parse only good runs (according to the conditions file) and skip all others\n")
if (len(sys.argv)==1):
sys.stdout.write("\n");
sys.stdout.write("Convert sFLASH RUN3 (November 2018 run) waveform files, settings, and conditions, into root trees, last update October 2019\n")
sys.stdout.write("DI <dmiivanov@gmail.com>\n")
parser.print_help()
sys.stdout.write("\n\n")
sys.exit(2)
args = parser.parse_args()
infiles_rel=[]
if args.files != None:
infiles_rel.extend(args.files)
if args.listfile != None:
with open(args.listfile,"r") as f:
infiles_rel.extend(map(lambda s: s.strip(), f.readlines()))
if len(infiles_rel) < 1:
sys.stderr.write("No input files\n")
sys.exit(2)
for infile in infiles_rel:
if not os.path.isfile(infile):
sys.stderr.write("ERROR: {0:s} file not found\n".format(infile))
sys.exit(2)
infiles=map(lambda s: os.path.abspath(s), infiles_rel)
outdir=str(args.outdir).rstrip('/')
if not os.path.isdir(outdir):
sys.stdout.write("ERROR: output directory doesn\'t exist!\n");
sys.exit(2)
# run settings
if os.path.isfile(args.settings_file):
HeaderAndSettings = load_settings(os.path.abspath(args.settings_file))
else:
sys.stderr.write("WARNING: run3 settings file {:s} not found, parsing data\n".format(args.settings_file))
sys.stderr.write("without knowing which PMT/COIL is connected to which scope!\n")
HeaderAndSettings = []
# prepare waveform infile pairs
infile_pairs_all={}
infile_pairs_good={}
for infile in infiles:
(run_id,yyyymmdd,hhmmss,scope_id) = parse_wf_file_name(infile)
ind=run_id*100000000*1000000+yyyymmdd*1000000+hhmmss
if ind not in infile_pairs_all.keys():
infile_pairs_all[ind] = {}
infile_pairs_all[ind][scope_id] = infile
for ind,fpair in infile_pairs_all.iteritems():
if len(fpair) < 2:
sys.stderr.write("warning: run_id {:d} yyyymmdd {:d} hhmmss {:d}: number of waveform files less than 2!\n".
format(ind//100000000//1000000,(ind%(100000000*1000000))//1000000,ind%1000000))
continue
if len(fpair) > 2:
sys.stderr.write("warning: run_id {:d} yyyymmdd {:d} hhmmss {:d}: number of waveform files greater than 2!\n".
format(ind//100000000//1000000,(ind%(100000000*1000000))//1000000,ind%1000000))
continue
infile_pairs_good[ind] = fpair
# run conditions
if os.path.isfile(args.conditions_file):
HeaderAndConditions = load_conditions(os.path.abspath(args.conditions_file))
else:
if args.good_runs_only:
sys.stderr.write("ERROR: requested parsing only good runs but conditions file\n")
sys.stderr.write("that determines which runs are good is absent!\n")
sys.exit(2)
sys.stderr.write("WARNING: run3 conditions file {:s} not found, parsing data\n".format(args.conditions_file))
sys.stderr.write("without any conditions and calibration information!\n")
HeaderAndConditions=[]
if(len(infile_pairs_good) < 1):
sys.stderr.write("WARNING: don\'t have any good pairs of waveform files to analyze\n")
# convert waveform data files to root trees
for ind,fpair in infile_pairs_good.iteritems():
wf0_infile=fpair[0]
wf1_infile=fpair[1]
# if parsing of only good runs is requested
if args.good_runs_only:
# skip the run if it's not in the list of runs
# for which conditions are available (conditions should be
# avaialble for all good runs)
run_id=ind//100000000//1000000
if not HeaderAndConditions[1].has_key(run_id):
continue
# skip the run if its status is not 1 (good run)
if HeaderAndConditions[1][run_id].status != 1:
continue
outfile=outdir+"/"+os.path.basename(wf0_infile)
outfile=outfile.replace("-0.csv",".root")
convert_waveform_files(HeaderAndSettings,wf0_infile,wf1_infile,HeaderAndConditions,outfile)
sys.stdout.write("\nDone\n")
if __name__ == "__main__":
main()
| [
"dmiivanov@gmail.com"
] | dmiivanov@gmail.com |
116c66d9f3c1b4f5e2c4991742de3a8413bbff56 | 854b220c25dc886f77c237437c370782a68c8bb2 | /proyectos_de_ley/api/api_responses.py | f94452466a9934b2e9df3b1b8c8aaa98a4e6592c | [
"MIT"
] | permissive | MrBaatezu/proyectos_de_ley | b6bb672b5bcc3c8ca2b6327ee96083466356560d | 56cf6f2f1df6483d2057235132a376b068877407 | refs/heads/master | 2021-01-18T01:10:12.683082 | 2015-10-29T00:44:52 | 2015-10-29T00:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework_csv import renderers
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class CSVResponse(HttpResponse):
"""
An HttpResponse that renders its content into CSV.
"""
def __init__(self, data, **kwargs):
content = CSVRenderer().render(data)
kwargs['content_type'] = 'text/csv'
super(CSVResponse, self).__init__(content, **kwargs)
class CSVRenderer(renderers.CSVRenderer):
media_type = 'text/csv'
format = 'csv'
def render(self, data, media_type=None, renderer_context=None):
return super(CSVRenderer, self).render(data, media_type, renderer_context)
| [
"aniversarioperu1@gmail.com"
] | aniversarioperu1@gmail.com |
6d0d251cece6178b91ef559d667dbfe116dc0430 | d93c58cf12ac5cf264581a7c9f075674f7f07a00 | /src/DjangoDemo/urls.py | a85dff43a38121f09c164180034b9899d2d789aa | [] | no_license | xiayuncheng1991/DjangoDemo | 818bd33ddfbb7c0e846e2f95d2938b536f2d4104 | 1131998759a90c17a8a7d7dcba30c90d5278f92b | refs/heads/master | 2021-01-15T19:28:01.265080 | 2014-03-09T14:46:53 | 2014-03-09T14:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from blog.views import *
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'DjangoDemo.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/$', archive),
)
| [
"1762284198@qq.com"
] | 1762284198@qq.com |
8e156c837f59e202d492217138849ac70f7caacc | cef2e6549f8b1cd0601ec7d2636b6810a1fa297f | /wikipedia.py | 46c5e21c1f9dbec16ce1f317d448cb772063da54 | [] | no_license | gdebenito/scrapyng-wikipedia | f88831a80cac6ef9ae55fdccb29e8304a769cdc0 | 79e8508fa14cf6c8b61979da34e51036c14911c2 | refs/heads/main | 2023-04-16T21:29:54.385971 | 2021-04-25T09:26:39 | 2021-04-25T09:26:39 | 361,383,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | import scrapy
class WikipediaSpider(scrapy.Spider):
name = "wikipedia"
start_urls = [
'https://es.wikipedia.org/wiki/Parque_nacional_de_Yellowstone',
# 'https://es.wikipedia.org/wiki/Persona_5',
]
def parse(self, response):
for row in response.xpath('//table[contains(@class, "infobox")]/tbody/tr'):
# Parsear Propiedad: Localidad, etc
headerText = row.xpath('./th/text()').get()
headerLink = row.xpath('./th/a/text()').get()
header = headerText if headerText != None else headerLink
# Parsear link
linkRaw = row.xpath('./td//a/@href').get()
if linkRaw != None and linkRaw.startswith('/'):
link = (response.url + linkRaw)
else:
link = linkRaw
yield {
'header': header,
'content': row.xpath('./td//a/text()').get(),
'link': link,
}
| [
"gdebenitocassado@gmail.com"
] | gdebenitocassado@gmail.com |
c33c5d21ce909bc806b78c0dde5a40c39d15fbd5 | 00d7e9321d418a2d9a607fb9376b862119f2bd4e | /utils/pdf_figure_stamper.py | 4628dbee41fd552c97249ac0bbeb5cd6de0b08e4 | [
"MIT"
] | permissive | baluneboy/pims | 92b9b1f64ed658867186e44b92526867696e1923 | 5a07e02588b1b7c8ebf7458b10e81b8ecf84ad13 | refs/heads/master | 2021-11-16T01:55:39.223910 | 2021-08-13T15:19:48 | 2021-08-13T15:19:48 | 33,029,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #!/usr/bin/env python
import os
from pims.files.pdfs.pdfjam import CpdfAddTextCommand
from pims.files.utils import listdir_filename_pattern
# return list of PDF files matching filename pattern criteria (not having STAMPED in filename)
def get_pdf_files(dirpath, fname_pat):
"""return list of PDF files for this drop number (i.e. drop dir)"""
tmp_list = listdir_filename_pattern(dirpath, fname_pat)
# filter tmp_list to ignore previous run's _cpdf_ filenames
return [ x for x in tmp_list if "STAMPED" not in x ]
if __name__ == "__main__":
# get list of analysis template plot PDFs
sensor = '121f02'
sensor_suffix = '010'
fname_pat = '.*' + sensor + sensor_suffix + '_gvtm_pops_.*_EML_analysis.pdf'
dirpath = '/home/pims/dev/matlab/programs/special/EML/hb_vib_crew_Vehicle_and_Crew_Activity/plots'
pdf_files = sorted(get_pdf_files(dirpath, fname_pat))
c = 0
for f in pdf_files:
c += 1
print 'page %02d %s' % (c, f)
#cpdf -prerotate -add-text "${F}" ${F} -color "0.5 0.3 0.4" -font-size 6 -font "Courier" -pos-left "450 5" -o ${F/.pdf/_cpdf_add-text.pdf}
color = "0.5 0.3 0.4"
font = "Courier"
font_size = 6
pos_left = "450 5"
cat = CpdfAddTextCommand(f, color, font, font_size, pos_left)
cat.run()
| [
"silversnoopy2002@gmail.com"
] | silversnoopy2002@gmail.com |
a650d4a5998d225d3e704c26dccdce127acde442 | 3f742d4ce80f50481df6030304cfa9d7eedce09a | /addBinary.py | 13ad76d9365b98cbee8261e06964afd275a07427 | [] | no_license | guangyi/Algorithm | 12c48206ddb560bd11b7e3f68223ee8cc8f436f0 | 7e747ed1b11a06edc117dd5627ede77409b6a259 | refs/heads/master | 2021-01-10T21:08:15.867142 | 2014-08-26T05:44:44 | 2014-08-26T05:44:44 | 20,144,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # @param a, a string
# @param b, a string
# @return a string
def addBinary(self, a, b):
lenA = len(a)
lenB = len(b)
result = ''
idxA = lenA - 1
idxB = lenB -1
carry = 0
while idxA >= 0 or idxB >= 0:
# when run out of index, use '0' to replace
strA = '0' if idxA < 0 else a[idxA]
strB = '0' if idxB < 0 else b[idxB]
# bin function will return string start with '0b'
string = bin(int(strA) + int(strB) + carry).replace('0b','')
result = string[-1] + result
# if there is no carry carry set to 0
carry = int(string[-2]) if len(string) > 1 else 0
idxA -= 1
idxB -= 1
if carry != 0: result = str(carry) + result
return result
print Solution().addBinary('111', '1')
print Solution().addBinary('0', '0') | [
"zhouguangyi2009@gmail.com"
] | zhouguangyi2009@gmail.com |
16d51e454824f67b4b41ef3ca55f13c9e221bf28 | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /HackerRank/ajob_subsequence_bis.py | 71a54d19dcac4f7ce8161b46f701309c0454498c | [] | no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python3
# cf ajob_subsequence.py
# Method 2: using Lucas's theorem
def digits(n):
D = []
while n:
n,d = divmod(n,P)
D.append(d)
return D
def inv(n):
return pow(n,P-2,P)
def binom(n,p):
if 0<=p<=n:
return (Fact[n] * inv((Fact[p]*Fact[n-p])%P)) % P
return 0
def binom_lucas(n,k):
assert 0<=k<=n
Dn = digits(n)
Dk = digits(k)
while len(Dk)<len(Dn):
Dk.append(0)
res = 1
for ni,ki in zip(Dn,Dk):
res = (res * binom(ni,ki)) % P
return res
if __name__=='__main__':
T = int(input())
for _ in range(T):
N,K,P = map(int,input().split())
Fact = [1]*P
for i in range(2,P):
Fact[i] = (Fact[i-1]*i) % P
print(binom_lucas(N+1,K+1))
| [
"blg@gmx.com"
] | blg@gmx.com |
8da9c2a940d0fcc58984d08e74744b976b5c49d6 | 120805ddea9478dc2fb2ecdff585e331fbf99916 | /manage.py | fa0bc3052519c3fde9e262e1541a77e6dbb20655 | [] | no_license | alexeydukhovich/yandex_backend_school | f9de8183197d322a652ae0055fe7fdf0661ef0f7 | 4cfc3f122946980b3ca1bc4c3682de186e380183 | refs/heads/master | 2020-07-11T10:16:18.503190 | 2019-08-26T17:48:00 | 2019-08-26T17:48:00 | 204,511,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import create_app
from core.database import db
app = create_app()
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
manager.run() | [
"alexeydukhovich@gmail.com"
] | alexeydukhovich@gmail.com |
3d68f1509239d5a47991a10e89e8b0245d0eda81 | 90553c367927a4a0c613f2b716cb34556af06684 | /src/api.py | 0c0c8a9c59d2e7b87df93a146ed9df8b51a2d959 | [] | no_license | ngkc1996/url-shortener-backend | 1428615d779b8d422f2d928f03ec9286d5a798a7 | 76e2072c9d965f3987753787aeba0fef86acc94d | refs/heads/main | 2023-08-02T09:05:24.062874 | 2021-09-20T09:09:31 | 2021-09-20T09:09:31 | 405,525,360 | 0 | 0 | null | 2021-09-20T09:09:32 | 2021-09-12T02:12:21 | Python | UTF-8 | Python | false | false | 2,453 | py | import os
from http import HTTPStatus
from flask import request, redirect
from flask_restful import Resource
from src.db import add_url, get_document_by_id, get_document_by_url, decrement_document_by_id
from src.check_regex import check_url_valid
BASE_URL = os.environ.get("BASE_URL")
class URLShortener(Resource):
def post(self):
try:
data = request.get_json()
url = data.get("url", None)
num_uses = data.get("num_uses", 1)
# check validity of url
if url is None or url == "":
return {"message": "URL not given."}, HTTPStatus.BAD_REQUEST
if not url.startswith(("http://", "https://")):
url = "http://" + url
if not check_url_valid(url):
return {"message": "URL is not valid."}, HTTPStatus.BAD_REQUEST
# check if url exists in db
# is_retrieved, data = get_document_by_url(url)
# if not is_retrieved:
# return data, HTTPStatus.INTERNAL_SERVER_ERROR
# if data:
# return {"url": BASE_URL + data["_id"]}, HTTPStatus.CREATED
# add url if not exists
is_retrieved, data = add_url(url, num_uses)
if not is_retrieved:
return data, HTTPStatus.INTERNAL_SERVER_ERROR
return {"url": BASE_URL + data["_id"]}, HTTPStatus.CREATED
except Exception as e:
return {"message": str(e)}, HTTPStatus.INTERNAL_SERVER_ERROR
class URLRedirect(Resource):
def get(self, id):
try:
# check if num uses is > 0
is_retrieved, data = get_document_by_id(id)
if not is_retrieved:
return data, HTTPStatus.INTERNAL_SERVER_ERROR
if not data:
return {"message": "URL not found."}, HTTPStatus.BAD_REQUEST
num_uses = data["num_uses"]
if num_uses == 0:
return {"message": "There are no remaining uses for this URL."}, HTTPStatus.BAD_REQUEST
# update num uses and redirect
is_retrieved, data2 = decrement_document_by_id(id)
if not is_retrieved:
return data2, HTTPStatus.INTERNAL_SERVER_ERROR
url = data["url"]
return redirect(url, code=HTTPStatus.PERMANENT_REDIRECT)
except Exception as e:
return {"message": str(e)}, HTTPStatus.INTERNAL_SERVER_ERROR
| [
"ngkc1996@gmail.com"
] | ngkc1996@gmail.com |
5ffa6b2852b346708c25363f91526d06e20b67ea | e43f21fb134fd5609f910b0decdee30e53023982 | /DrawFigures720/analysisExps.py | b28a470c274f1c2fd61e09382d0095fc4d11d8af | [] | no_license | rubiruchi/TEexp | bafb1d956c6720b08935b2f06d82641af0688ab3 | dbd219016873ae4281194c1ec7e1854f6277d797 | refs/heads/master | 2022-10-16T03:37:44.284910 | 2020-06-11T04:04:50 | 2020-06-11T04:04:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,701 | py | #summarize we want which kind of figures
#1. Z<=1 the performance ratio, 2. Z>1 the throughput ratio
#3. the performance increase for each alg?
#compare the whole throughput between step 1: min(max(le)) with step 2: maximize whole network throughput ?
#4. path utilization CDF
#5. flow entries CCDF
#6. link utilizations CDF for alg comparison, and for step 1, step 2, optimal comparison (like for Custom)
#7. fairness: each s-t satisfied how many demands
#8. robustness: after single link failure, data plane reacting (TODO can satisfy how many rerouting traffic and loss how many)
#after single link failure, control plane reacting: throughput after recomputing TE
#9. computing time
#10. latency (path length or better use gscale testbed to get the real data TODO)
#11. reacting to TM changes? or path updating or?
from readJSON import readJSON, readJSONLeband
from readOptimal import readOptimal, readOptimalLeband
from drawFigures import drawFigure, drawBenefit
from CDF import cdf, ccdf, drawCDFandCCDF
from numpy import *
def analysisZ(demands, pathpre, topo, algdic, comp, figurepath):#compare): #for #1 and #2
Zless1={}
Zlarge1={}
for d in demands:
#Zless1[d]={}
#Zlarge1[d]={}
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#Z,leband=readOptimal(filename)#TODO here need to be consistant for each anamethods
fileOptimal=readOptimal(filename)
Z=fileOptimal['Z']
if Z<=1:#fileOptimal['Z']<=1:
Zless1[d]={}
Zless1[d]['Optimal']=Z#fileOptimal['Z']
elif Z<2.5:
Zlarge1[d]={}
Zlarge1[d]['Optimal']=Z#fileOptimal['Z']
print "============"
for d in Zless1:#demands:
for alg in algdic:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
Zless1[d][alg]=fileJSON['Z']
#TODO first get the result for hardnop
print "============"
for d in Zlarge1:
for alg in algdic:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
Zlarge1[d][alg]=[]
Zlarge1[d][alg].append(fileJSON['Z'])#Save also throughput before second step and after second step
Zlarge1[d][alg].append(fileJSON['total throuput before the second step'])
Zlarge1[d][alg].append(fileJSON['total throuput after the second step'])
#TODO first get the result for hardnop
print "============"
print Zless1
print Zlarge1
#drawBenefit(Zless1,1,4,'Z','Performance ratio (Zalg/Zopt)','Z-gscale-1-hardnop-Zless1.pdf')
drawBenefit(Zless1,1,6,'Z','Performance ratio (Zalg/Zopt)','Z-'+topo+'-1-'+comp+'-Zless1all.pdf', figurepath)
drawBenefit(Zlarge1,0,6,'Z','Throughput ratio (Talg/(sum(TM)/Zopt))','Z-'+topo+'-1-'+comp+'-Zlarge1-TZall.pdf', figurepath)
#TODO here we need to make clear why we can not call the drawBenefit at the same twice
return Zless1, Zlarge1#TODO test it
def analysisAlgT(demands, pathpre, topo, alg, figurepath): #for #3
Zprogram= [] #TODO or demand?
bandallostep1 = []# Smore: use only minimize the maximum link utilization
bandallostep2 = []
Zhardnop= []
hardbandallostep1 = []
hardbandallostep2 = []
for d in demands:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-program.json"
#[Z,Tstep1,Tstep2]=readJSON(filename)
fileJSON=readJSON(filename)#TODO here we need to get the value we want
Z=fileJSON['Z']
Tstep1=fileJSON['total throuput before the second step']
Tstep2=fileJSON['total throuput after the second step']
Zprogram.append(Z)
bandallostep1.append(Tstep1)
bandallostep2.append(Tstep2)
#for d in demands:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-hardnop.json"
#[Z,Tstep1,Tstep2]=readJSON(filename)#TODO here we need to get the value we want
Z=fileJSON['Z']
Tstep1=fileJSON['total throuput before the second step']
Tstep2=fileJSON['total throuput after the second step']
Zhardnop.append(Z)
hardbandallostep1.append(Tstep1)
hardbandallostep2.append(Tstep2)
figname=topo+"-"+alg+"-program-hardnop.pdf"
drawFigure(Zprogram, bandallostep1, bandallostep2, Zhardnop, hardbandallostep1, hardbandallostep2, figname, figurepath)
def analysisPaths(Zless1, demands, pathpre, topo, algdic, compare, figurepath):
#TODO TODO add ? path analysis for Zlarge1?
if len(Zless1)==0:
Zless1={}
Zlarge1={}
for d in demands:
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#Z,leband,lest,steband=readOptimal(filename)
fileOptimal=readOptimal(filename)
Z=fileOptimal['Z']
if Z<=1:#fileOptimal['Z']<=1:
Zless1[d]={}
Zless1[d]['Optimal']=Z#fileOptimal['Z']
elif Z<2.5:
Zlarge1[d]={}
Zlarge1[d]['Optimal']=Z#fileOptimal['Z']
print "============"
ndzles = len(Zless1)
algPathSet={}
stpUtilize={}#This can be initialized only once then record the path utilization for each TM
# { alg : { stpair : { path1 : %x, path2: %y, ...}}}
#or { alg : { stpair : { path1 : [%x, di,...] , path2 : [%y, dj,...], ...} ... } ... }
for d in Zless1:#demands:
#TODO analysis path utilization and when Z<1 we can some how find the relationship
#between optimal link using for each s-t pair compared with other algs
for alg in algdic:
if alg not in stpUtilize:
algPathSet[alg]={}
stpUtilize[alg]={}
for comp in compare:#here actually includes only one method like ["hardnop"]
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
Zless1[d][alg]=fileJSON['Z']
key = "paths after the second step (maximize total throuput)"
#TODO analysis the path utilization
#for i in 1:n, j in 1:n @when i != j
# demand["$(hosts[i]) $(hosts[j])"] = line[(i-1) * n + j] / 2^20 # to Megabyte
stpband={}
#print key
#print fileJSON[key]
pathsband=fileJSON[key]
#print pathsband
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
#print s,t,sttemp
#print s,t
sv=int(s[1:])#TODO for gscale we can do like this but for Cernet we need to read hosts
tv=int(t[1:])
if (s,t) not in stpUtilize[alg]:
stpUtilize[alg][(s,t)]={}
for path in pathsband[st]:
#TODO at least for ksp each time the path set is the same
if str(path) not in stpUtilize[alg][(s,t)]:
algPathSet[alg][str(path)]=0
stpUtilize[alg][(s,t)][str(path)]=[0]
if pathsband[st][path]>0:
stpUtilize[alg][(s,t)][str(path)][0]+=1*1.0/ndzles
stpUtilize[alg][(s,t)][str(path)].append(d)
algPathSet[alg][str(path)]+=1*1.0/ndzles
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
#TODO cal or get demand for corresponding topo and d (JSON file)
#leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
#print 'Step 2',stpband,leband#TODO not right
#first get the result for hardnop
print "============"
print 'Zless1',Zless1,len(Zless1)
print 'stpUtilize',stpUtilize
dictx={}
dicty={}
for alg in algdic:
data=[algPathSet[alg][key] for key in algPathSet[alg]]
#x,y=ccdf(data)
x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,1,'Path utilization','Path-'+topo+'-1-Zless1-'+comp+'-cdf.pdf',figurepath)
def analysisFlowEntries(d, pathpre, topo, algdic, comp, figurepath):
dictx={}
dicty={}
for alg in algdic:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="number of path through each node"
#compute the y=p(X>x)
data=[]
temp=fileJSON[key]
for e in temp:
if e=='average':
print e+' '+str(temp[e])
continue
else:
data.append(temp[e])
print data
x,y=ccdf(data)
dictx[alg]=x
dicty[alg]=y
print "============"
#drawCDFandCCDF(dictx,dicty,2,0,'# flow entries','flow-gscale-1-'+comp+'-ccdf.pdf',figurepath)
drawCDFandCCDF(dictx,dicty,6,0,'# flow entries','flow-'+topo+'-1-'+comp+'-ccdf.pdf',figurepath)
def analysisLinksU(d, hostsdemand, pathpre, topo, algdic, comp, figurepath):
dictx={}
dicty={}
#demand49gscale=[2.2578562426522303e8... #TODO TODO This can still be used as our demand is not change for gscale, Cernet,
# but more precise way is to use a function to get he corrsponding demand line
leband={}
for alg in algdic:#here is 'Custom' only for this function
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="number of path, total flow, and link utiliaztion of each edge"
#print key
#print fileJSON[key]
data=[]
temp=fileJSON[key]
for e in temp:
if e=='average':
#print e+' number of path, total flow, and link utiliaztion of each edge '+str(temp[e])
print e+' '+str(temp[e])
continue
else:
data.append(temp[e][1]*1.0/1000)
#elist=str(e).split()
#print elist
#leband[(elist[0],elist[1])]=temp[e][1]
# key="number of path through each node"
# #compute the y=p(X>=x)
# data=[]
# temp=fileJSON[key]
# for e in temp:
# if e=='average':
# print e+' '+str(temp[e])
# continue
# else:
# data.append(temp[e])
print data
#x,y=ccdf(data)
x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
print "============"
#if alg<>"Custom":#TODO TODO here only analysis Custom
# continue#TODO here we use our custom (maximum number of edge disjoint paths) to compute the TM/Z
key='Z'
Z=fileJSON[key]
key="paths after the first step (minimize maximum link utilization)"
#for i in 1:n, j in 1:n @when i != j
# demand["$(hosts[i]) $(hosts[j])"] = line[(i-1) * n + j] / 2^20 # to Megabyte
stpband={}
#print key
#print fileJSON[key]
pathsband=fileJSON[key]
#print pathsband
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
#print s,t,sttemp
#print s,t
sv=int(s[1:])
tv=int(t[1:])
for path in pathsband[st]:
if pathsband[st][path]>0:
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*hostsdemand[str(st)]*1.0/pow(2,20)/1000#demandd[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
print 'TM/Z',stpband,leband#TODO here for Cernet need to change
data=[leband[key] for key in leband]
x,y=cdf(data)
#dictx['TM (Custom)']=x
#dicty['TM (Custom)']=y
dictx['TM ('+alg+')']=x#TODO test this one
dicty['TM ('+alg+')']=y
data=[leband[key]*1.0/Z for key in leband]
x,y=cdf(data)
#dictx['TM/Z (Custom)']=x
#dicty['TM/Z (Custom)']=y
dictx['TM/Z ('+alg+')']=x
dicty['TM/Z ('+alg+')']=y
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#leband=readOptimal(filename)#TODO calculate here?
leband=readOptimalLeband(filename)
data=[leband[key]*1.0/1000 for key in leband]
print 'optimal',data
x,y=cdf(data)
dictx['Minimize(max(le))']=x
dicty['Minimize(max(le))']=y
#dictx['Optimal']=x
#dicty['Optimal']=y
#drawCDFandCCDF(dictx,dicty,2,0,'link utilization','le-gscale-1-hardnop-ccdfd50.pdf')
#drawCDFandCCDF(dictx,dicty,2,1,'link utilization','le-gscale-1-hardnop-cdftest40.pdf')
#drawCDFandCCDF(dictx,dicty,2,1,'link utilization','le-gscale-1-program-cdftest30.pdf')
drawCDFandCCDF(dictx,dicty,4,1,'link utilization','le-'+topo+'-1-'+alg+'-'+comp+'-cdfcomp1algTMZstep1-d'+str(d)+'.pdf',figurepath)
def analysisAlgsLinksU(d, pathpre, topo, algdic, comp, figurepath):
dictx={}
dicty={}
for alg in algdic:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="number of path, total flow, and link utiliaztion of each edge"
#print key
#print fileJSON[key]
data=[]
temp=fileJSON[key]
for e in temp:
if e=='average':
#print e+' number of path, total flow, and link utiliaztion of each edge '+str(temp[e])
print e+' '+str(temp[e])
continue
else:
data.append(temp[e][1]*1.0/1000)
print data
#x,y=ccdf(data)
x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
print "============"
#drawCDFandCCDF(dictx,dicty,2,0,'link utilization','le-gscale-1-hardnop-ccdfd50.pdf')
drawCDFandCCDF(dictx,dicty,6,1,'link utilization','le-'+topo+'-1-'+comp+'-cdfalld'+str(d)+'.pdf', figurepath)
def analysisPathlength(Zless1, demands, pathpre, topo, algdic, compare, figurepath):
#TODO here Zless1 actually can stands for a lot of things?
#TODO TODO consider for Z<=1 only or any Z?,
#the difference is that for Z>1 the total throughput of each alg after step 2 may not be the same
#TODO each s-t first get the average path length?, then draw CCDF for each alg (for all the s-t pairs)
if len(Zless1)==0:
Zless1={}
Zlarge1={}
for d in demands:
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#Z,leband,lest,steband=readOptimal(filename)
fileOptimal=readOptimal(filename)
Z=fileOptimal['Z']
if Z<=1:#fileOptimal['Z']<=1:
Zless1[d]={}
Zless1[d]['Optimal']=Z#fileOptimal['Z']
elif Z<2.5:
Zlarge1[d]={}
Zlarge1[d]['Optimal']=Z#fileOptimal['Z']
print "============"
ndzles = len(Zless1)
algPathSet={}
stpUtilize={}#This can be initialized only once then record the path utilization for each TM
# { alg : { stpair : { path1 : %x, path2: %y, ...}}}
#or { alg : { stpair : { path1 : [%x, di,...] , path2 : [%y, dj,...], ...} ... } ... }
algstpathlen={}
for d in Zless1:#demands:
#TODO analysis path utilization and when Z<1 we can some how find the relationship
#between optimal link using for each s-t pair compared with other algs
for alg in algdic:
if alg not in stpUtilize:
algPathSet[alg]={}
stpUtilize[alg]={}
algstpathlen[alg]={}
for comp in compare:#here actually includes only one method like ["hardnop"]
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
Zless1[d][alg]=fileJSON['Z']
key = "paths after the second step (maximize total throuput)"
stpband={}
pathsband=fileJSON[key]
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
sv=int(s[1:])#TODO for gscale we can do like this but for Cernet we need to read hosts
tv=int(t[1:])
plentemp=0
pntemp=0
if (s,t) not in stpUtilize[alg]:
stpUtilize[alg][(s,t)]={}
algstpathlen[alg][(s,t)]=[0,0,100,0]
#first average path length, second average path number?, min len, max len?
for path in pathsband[st]:
#TODO at least for ksp each time the path set is the same
if str(path) not in stpUtilize[alg][(s,t)]:
algPathSet[alg][str(path)]=0
stpUtilize[alg][(s,t)][str(path)]=[0]
if pathsband[st][path]>0:
pntemp+=1
stpUtilize[alg][(s,t)][str(path)][0]+=1*1.0/ndzles
stpUtilize[alg][(s,t)][str(path)].append(d)
algPathSet[alg][str(path)]+=1*1.0/ndzles
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
plentemp+=len(ptemp)-1
if len(ptemp)-1<algstpathlen[alg][(s,t)][2]:
algstpathlen[alg][(s,t)][2]=len(ptemp)-1
if len(ptemp)-1>algstpathlen[alg][(s,t)][3]:
algstpathlen[alg][(s,t)][3]=len(ptemp)-1
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
#print plentemp,pntemp,algstpathlen[alg][(s,t)][0]
algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]+plentemp*1.0/pntemp/1.0/ndzles
algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]+pntemp/1.0/ndzles
#algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]/1.0/ndzles
#algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]/1.0/ndzles
#TODO cal or get demand for corresponding topo and d (JSON file)
#leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
#print 'Step 2',stpband,leband#TODO not right
#first get the result for hardnop
#print "============"
print 'Zless1',Zless1,len(Zless1)
#print 'stpUtilize',stpUtilize
print 'algstpathlen ',algstpathlen
dictx={}
dicty={}
for alg in algdic:
data=[algstpathlen[alg][key][0] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Average path length of each s-t pair','Pathlength-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][1] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'# used path of each s-t pair','Pathnum-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the longest used s-t path','Pathmaxlen-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the shortest used s-t path','Pathmaxlen-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
def analysisPathlength1(Zrelated, lessorlargeorall, demands, pathpre, topo, algdic, compare, figurepath):
#TODO here Zrelated actually can stands for a lot of things?
#TODO TODO consider for Z<=1 only or any Z?,
#the difference is that for Z>1 the total throughput of each alg after step 2 may not be the same
#TODO each s-t first get the average path length?, then draw CCDF for each alg (for all the s-t pairs)
Zinname=''
if len(Zrelated)==0:
Zless1={}
Zlarge1={}
for d in demands:
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#Z,leband,lest,steband=readOptimal(filename)
fileOptimal=readOptimal(filename)
Z=fileOptimal['Z']
if Z<=1:#fileOptimal['Z']<=1:
Zless1[d]={}
Zless1[d]['Optimal']=Z#fileOptimal['Z']
elif Z<2.5:
Zlarge1[d]={}
Zlarge1[d]['Optimal']=Z#fileOptimal['Z']
print "============"
if lessorlargeorall==3:
Zrelated=demands
Zinname='all'
elif lessorlargeorall==2:
Zrelated=Zless1
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zrelated=Zlarge1
Zinname='Zless1new'
else:
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1new'
ndzles = len(Zrelated)
algPathSet={}
stpUtilize={}#This can be initialized only once then record the path utilization for each TM
# { alg : { stpair : { path1 : %x, path2: %y, ...}}}
#or { alg : { stpair : { path1 : [%x, di,...] , path2 : [%y, dj,...], ...} ... } ... }
algstpathlen={}
for d in Zrelated:#demands:
#TODO analysis path utilization and when Z<1 we can some how find the relationship
#between optimal link using for each s-t pair compared with other algs
for alg in algdic:
if alg not in stpUtilize:
algPathSet[alg]={}
stpUtilize[alg]={}
algstpathlen[alg]={}
for comp in compare:#here actually includes only one method like ["hardnop"]
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
#Zrelated[d][alg]=fileJSON['Z']
key = "paths after the second step (maximize total throuput)"
stpband={}
pathsband=fileJSON[key]
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
sv=int(s[1:])#TODO for gscale we can do like this but for Cernet we need to read hosts
tv=int(t[1:])
plentemp=0
pntemp=0
if (s,t) not in stpUtilize[alg]:
stpUtilize[alg][(s,t)]={}
algstpathlen[alg][(s,t)]=[0,0,100,0]
#first average path length, second average path number?, min len, max len?
for path in pathsband[st]:
#TODO at least for ksp each time the path set is the same
if str(path) not in stpUtilize[alg][(s,t)]:
algPathSet[alg][str(path)]=0
stpUtilize[alg][(s,t)][str(path)]=[0]
if pathsband[st][path]>0:
pntemp+=1
stpUtilize[alg][(s,t)][str(path)][0]+=1*1.0/ndzles
stpUtilize[alg][(s,t)][str(path)].append(d)
algPathSet[alg][str(path)]+=1*1.0/ndzles
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
plentemp+=len(ptemp)-1
if len(ptemp)-1<algstpathlen[alg][(s,t)][2]:
algstpathlen[alg][(s,t)][2]=len(ptemp)-1
if len(ptemp)-1>algstpathlen[alg][(s,t)][3]:
algstpathlen[alg][(s,t)][3]=len(ptemp)-1
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
#print plentemp,pntemp,algstpathlen[alg][(s,t)][0]
algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]+plentemp*1.0/pntemp/1.0/ndzles
algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]+pntemp/1.0/ndzles
#algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]/1.0/ndzles
#algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]/1.0/ndzles
#TODO cal or get demand for corresponding topo and d (JSON file)
#leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
#print 'Step 2',stpband,leband#TODO not right
#first get the result for hardnop
#print "============"
#print 'Zrelated',Zrelated,len(Zrelated)
#print 'stpUtilize',stpUtilize
#print 'algstpathlen ',algstpathlen
dictx={}
dicty={}
for alg in algdic:
data=[algstpathlen[alg][key][0] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Average path length of each s-t pair','Pathlength-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][1] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'# used path of each s-t pair','Pathnum-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the longest used s-t path','Pathmaxlen-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][2] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the shortest used s-t path','Pathminlen-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
#1. fairness: get the average? satisfied demand ratio (%) for each s-t pair, consider whether include Zopt<=1(will mostly be 1) , Z>1
#2. robustness: as we have shown that each s-t almost all use 1 path only (only one path has weight)
#therefore, we are hard to reroute in data plane use the normalized weight (TODO or just send packet out using equal weight to each healthy tunnel), ? TODO,
#2.1 get the percent that how many single link failure lead to some s-t pair unreachable
#2.2 get the Histogram of P(T>95%), P(T>95%), P(T>95%), P(T>95%), P(T>95%)
#2.3 get the throughput ratio CDF
def fairness(Zrelated, lessorlargeorall, pathpre, topo, algdic, comp, figurepath):
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1'
dictx={}
dicty={}
algstdratio={}
algstdratioH={}
for alg in algdic:
#if alg not in algstdratio:
# algstdratio[alg]=[]
alldsumtemp=[]#mat([])
for d in Zrelated:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="number of path and demand ratio of each st pair"
# "h22 h17": [3.0,46.54156150185264],
#compute the y=p(X>x)
data=[]
temp=fileJSON[key]
if 'average' in temp:
del temp['average']
if len(alldsumtemp)==0:
alldsumtemp=mat([0]*len(temp))
data=[float(temp[st][1]) for st in sorted(temp)]
data=mat(data)
alldsumtemp=alldsumtemp+data
#print alldsumtemp
#print alldsumtemp
alldsumtemp=alldsumtemp*1.0/100/len(Zrelated)
algstdratio[alg]=alldsumtemp.tolist()[0]
print algstdratio[alg]
x,y=ccdf(algstdratio[alg])
dictx[alg]=x
dicty[alg]=y
#keylist=["P(T > 90%)","P(T > 95%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"]
percy=[0,0,0,0,0]
for i in range(len(x)):
percx=x[i]
if percy[0]==0 and percx>=0.9:
if percx==0.9 or i==0:
percy[0]=y[i]
else:
percy[0]=y[i-1]
elif percy[1]==0 and percx>=0.95:
if percx==0.95 or i==0:
percy[1]=y[i]
else:
percy[1]=y[i-1]
elif percy[2]==0 and percx>=0.99:
if percx==0.99 or i==0:
percy[2]=y[i]
else:
percy[2]=y[i-1]
elif percy[3]==0 and percx>=0.999:
if percx==0.999 or i==0:
percy[3]=y[i]
else:
percy[3]=y[i-1]
elif percy[4]==0 and percx>=0.9999:
if percx==0.9999 or i==0:
percy[4]=y[i]
else:
percy[4]=y[i-1]
#print percy
#print "============"
algstdratioH[alg]=percy
#print algstdratio
print algstdratioH
#drawCDFandCCDF(dictx,dicty,2,0,'# flow entries','flow-gscale-1-'+comp+'-ccdf.pdf',figurepath)
drawCDFandCCDF(dictx,dicty,6,0,'Satisfied demand ratio of each s-t pair','dratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
#TODO draw Histogram for "P(T > 95%)","P(T > 90%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"
def robustness(Zrelated, lessorlargeorall, pathpre, topo, algdic, comp, figurepath):
#key="Z, total throuput, and throuput ratio at single edge failure"
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1'
keylist=["P(T > 90%)","P(T > 95%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"]
dictx={}
dicty={}
#algetratio={}
algetratioH={}
algefailunreach={}
for alg in algdic:
#if alg not in algstdratio:
# algstdratio[alg]=[]
alldsumtemp=[]#mat([])
alldsumH=[]
dunreachtemp=[]
for d in Zrelated:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="Z, total throuput, and throuput ratio at single edge failure"
#"s6 s7": [3.1532651852755422, 14672.0837296913, 91.61506879097249],
#compute the y=p(X>x)
data=[]
dataH=[]
unreachN=0
temp=fileJSON[key]
if 'average' in temp:
del temp['average']
if len(alldsumH)==0:
alldsumH=mat([0]*len(keylist))
for prob in keylist:
dataH.append(temp[prob])
del temp[prob]
dataH=mat(dataH)
alldsumH=alldsumH+dataH
# TODO this can not use mat to add all , as some link down may lead to "some pairs have no path"
for k in temp:
if temp[k]=="some pairs have no path":
unreachN=unreachN+1
dunreachtemp.append(unreachN)
if len(alldsumtemp)==0:
alldsumtemp=mat([0]*len(temp))
#data=[float(temp[e][2]) for e in sorted(temp)]
#data=mat(data)
#alldsumtemp=alldsumtemp+data
#print alldsumtemp
#print alldsumtemp
alldsumH=alldsumH*1.0/len(Zrelated)#remember it is % is OK
algetratioH[alg]=alldsumH.tolist()[0]
#alldsumtemp=alldsumtemp*1.0/100/len(Zrelated)
#algetratio[alg]=alldsumtemp.tolist()[0]
#print algetratio[alg]
algefailunreach[alg]=dunreachtemp
#x,y=ccdf(algetratio[alg])
x,y=ccdf(algefailunreach[alg])
dictx[alg]=x
dicty[alg]=y
print "============"
print algetratioH
print algefailunreach
#print algstdratio
#drawCDFandCCDF(dictx,dicty,2,0,'# flow entries','flow-gscale-1-'+comp+'-ccdf.pdf',figurepath)
#drawCDFandCCDF(dictx,dicty,6,0,'Satisfied whole throughput ratio','tratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
#drawCDFandCCDF(dictx,dicty,6,0,'Percent of unreachable s-t pairs','tratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
#drawCDFandCCDF(dictx,dicty,6,0,'# unreachable s-t pairs','tratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
| [
"czhang226-c@my.cityu.edu.hk"
] | czhang226-c@my.cityu.edu.hk |
df2ff3db7a4108d0c2ebdb1e4027c6e6897ddf3f | 4ddedf2a3829d7cead057da3ed2ffcffc153786e | /6_google_trace/SONIA/testing/feature_encoder/BPNN/cluster/ann/cluster_4.py | f832fe7fc0ffacb1d946f6351bc72a3b4f6f55c4 | [
"MIT"
] | permissive | thieu1995/machine_learning | b7a854ea03f5559a57cb93bce7bb41178596033d | 40595a003815445a7a9fef7e8925f71d19f8fa30 | refs/heads/master | 2023-03-03T10:54:37.020952 | 2019-09-08T11:42:46 | 2019-09-08T11:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,838 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 15:49:27 2018
@author: thieunv
Cluster --> Update 10% matrix weight dua tren c_tung_hidden va dist_tung_hidden
Ket qua toi` hon cluster 3 (Toi nhat)
"""
import tensorflow as tf
import numpy as np
from scipy.spatial import distance
from math import exp, sqrt
import copy
from random import randint
from operator import itemgetter
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn import preprocessing
class Model(object):
def __init__(self, dataset_original, list_idx, epoch, batch_size, sliding, learning_rate, positive_number, stimulation_level):
self.dataset_original = dataset_original
self.train_idx = list_idx[0]
self.test_idx = list_idx[1]
self.epoch = epoch
self.batch_size = batch_size
self.sliding = sliding
self.learning_rate = learning_rate
self.positive_number = positive_number
self.stimulation_level = stimulation_level
self.min_max_scaler = preprocessing.MinMaxScaler()
self.standard_scaler = preprocessing.StandardScaler()
def preprocessing_data(self):
train_idx, test_idx, dataset_original, sliding = copy.deepcopy(self.train_idx), copy.deepcopy(self.test_idx), copy.deepcopy(self.dataset_original), copy.deepcopy(self.sliding)
## Get original dataset
dataset_split = dataset_original[:test_idx + sliding]
training_set = dataset_original[0:train_idx+sliding]
testing_set = dataset_original[train_idx+sliding:test_idx+sliding]
training_set_transform = self.min_max_scaler.fit_transform(training_set)
testing_set_transform = self.min_max_scaler.transform(testing_set)
dataset_transform = np.concatenate( (training_set_transform, testing_set_transform), axis=0 )
self.dataset_scale = copy.deepcopy(dataset_transform)
## Handle data with sliding
dataset_sliding = dataset_transform[:len(dataset_transform)-sliding]
for i in range(sliding-1):
dddd = np.array(dataset_transform[i+1: len(dataset_transform)-sliding+i+1])
dataset_sliding = np.concatenate((dataset_sliding, dddd), axis=1)
## Split data to set train and set test
self.X_train, self.y_train = dataset_sliding[0:train_idx], dataset_sliding[sliding:train_idx+sliding, 0:1]
self.X_test = dataset_sliding[train_idx:test_idx-sliding]
self.y_test = dataset_split[train_idx+sliding:test_idx]
print("Processing data done!!!")
def preprocessing_data_2(self):
train_idx, test_idx, dataset_original, sliding = copy.deepcopy(self.train_idx), copy.deepcopy(self.test_idx), copy.deepcopy(self.dataset_original), copy.deepcopy(self.sliding)
## Transform all dataset
dataset_split = dataset_original[:test_idx + sliding]
dataset_transform = self.min_max_scaler.fit_transform(dataset_split)
self.dataset_scale_2 = copy.deepcopy(dataset_transform)
## Handle data with sliding
dataset_sliding = dataset_transform[:len(dataset_transform)-sliding]
for i in range(sliding-1):
dddd = np.array(dataset_transform[i+1: len(dataset_transform)-sliding+i+1])
dataset_sliding = np.concatenate((dataset_sliding, dddd), axis=1)
## Split data to set train and set test
self.X_train_2, self.y_train_2 = dataset_sliding[0:train_idx], dataset_sliding[sliding:train_idx+sliding, 0:1]
self.X_test_2 = dataset_sliding[train_idx:test_idx-sliding]
self.y_test_2 = dataset_split[train_idx+sliding:test_idx]
print("Processing data done!!!")
def encoder_features(self):
train_X = copy.deepcopy(self.X_train)
stimulation_level, positive_number = self.stimulation_level, self.positive_number
### Qua trinh train va dong thoi tao cac hidden unit (Pha 1 - cluster data)
# 2. Khoi tao hidden thu 1
hu1 = [0, Model.get_random_input_vector(train_X)] # hidden unit 1 (t1, wH)
list_hu = [copy.deepcopy(hu1)] # list hidden units
matrix_Wih = copy.deepcopy(hu1[1]).reshape(1, hu1[1].shape[0]) # Mang 2 chieu
# training_detail_file_name = full_path + 'SL=' + str(stimulation_level) + '_Slid=' + str(sliding) + '_Epoch=' + str(epoch) + '_BS=' + str(batch_size) + '_LR=' + str(learning_rate) + '_PN=' + str(positive_number) + '_CreateHU.txt'
m = 0
while m < len(train_X):
list_dist_mj = [] # Danh sach cac dist(mj)
# number of hidden units
for j in range(0, len(list_hu)): # j: la chi so cua hidden thu j
dist_sum = 0.0
for i in range(0, len(train_X[0])): # i: la chi so cua input unit thu i
dist_sum += pow(train_X[m][i] - matrix_Wih[j][i], 2.0)
list_dist_mj.append([j, sqrt(dist_sum)])
list_dist_mj_sorted = sorted(list_dist_mj, key=itemgetter(1)) # Sap xep tu be den lon
c = list_dist_mj_sorted[0][0] # c: Chi so (index) cua hidden unit thu c ma dat khoang cach min
distmc = list_dist_mj_sorted[0][1] # distmc: Gia tri khoang cach nho nhat
if distmc < stimulation_level:
list_hu[c][0] += 1 # update hidden unit cth
neighbourhood_node = 1 + int( 0.9 * len(list_hu) )
for i in range(0, neighbourhood_node ):
c_temp = list_dist_mj_sorted[i][0]
dist_temp = list_dist_mj_sorted[i][1]
hic = exp(- (dist_temp * dist_temp) )
delta = (positive_number * hic) * (train_X[m] - list_hu[c_temp][1])
list_hu[c_temp][1] += delta
matrix_Wih[c_temp] += delta
# Tiep tuc vs cac example khac
m += 1
if m % 100 == 0:
print "distmc = {0}".format(distmc)
print "m = {0}".format(m)
else:
print "Failed !!!. distmc = {0}".format(distmc)
list_hu.append([0, copy.deepcopy(train_X[m]) ])
print "Hidden unit thu: {0} duoc tao ra.".format(len(list_hu))
matrix_Wih = np.append(matrix_Wih, [copy.deepcopy(train_X[m])], axis = 0)
for hu in list_hu:
hu[0] = 0
# then go to step 1
m = 0
### +++
### +++ Get the last matrix weight
self.matrix_Wih = copy.deepcopy(matrix_Wih)
self.list_hu_1 = copy.deepcopy(list_hu)
print("Encoder features done!!!")
def transform_features(self):
temp1 = []
for i in range(0, len(self.X_train)):
Sih = []
for j in range(0, len(self.matrix_Wih)): # (w11, w21) (w12, w22), (w13, w23)
Sih.append(np.tanh( Model.distance_func(self.matrix_Wih[j], self.X_train[i])))
temp1.append(np.array(Sih))
temp2 = []
for i in range(0, len(self.X_test)):
Sih = []
for j in range(0, len(self.matrix_Wih)): # (w11, w21) (w12, w22), (w13, w23)
Sih.append(np.tanh( Model.distance_func(self.matrix_Wih[j], self.X_test[i])))
temp2.append(np.array(Sih))
self.S_train = np.array(temp1)
self.S_test = np.array(temp2)
print("Transform features done!!!")
def draw_loss(self):
plt.figure(1)
plt.plot(range(self.epoch), self.loss_train, label="Loss on training per epoch")
plt.xlabel('Iteration', fontsize=12)
plt.ylabel('Loss', fontsize=12)
def draw_predict(self):
plt.figure(2)
plt.plot(self.y_test_inverse)
plt.plot(self.y_pred_inverse)
plt.title('Model predict')
plt.ylabel('Real value')
plt.xlabel('Point')
plt.legend(['realY... Test Score RMSE= ' + str(self.score_test_RMSE) , 'predictY... Test Score MAE= '+ str(self.score_test_MAE)], loc='upper right')
def draw_data_train(self):
plt.figure(3)
plt.plot(self.X_train[:, 0], self.X_train[:, 1], 'ro')
plt.title('Train Dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_data_test(self):
plt.figure(4)
plt.plot(self.X_test[:, 0], self.X_test[:, 1], 'ro')
plt.title('Test Dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_center(self):
plt.figure(5)
plt.plot(self.matrix_Wih[:, 0], self.matrix_Wih[:, 1], 'ro')
plt.title('Centers Cluter KMEANS')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_dataset(self):
plt.figure(6)
plt.plot(dataset_original, 'ro')
plt.title('Original dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_scale_dataset(self):
plt.figure(7)
plt.plot(self.dataset_scale, 'ro')
plt.title('Scale dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_scale_dataset_2(self):
plt.figure(8)
plt.plot(self.dataset_scale_2, 'ro')
plt.title('Scale dataset _ 2')
plt.ylabel('Real value')
plt.xlabel('Real value')
def fit(self):
self.preprocessing_data()
self.preprocessing_data_2()
self.encoder_features()
self.transform_features()
self.draw_data_train()
self.draw_data_test()
self.draw_center()
self.draw_dataset()
self.draw_scale_dataset()
self.draw_scale_dataset_2()
@staticmethod
def distance_func(a, b):
return distance.euclidean(a, b)
@staticmethod
def sigmoid_activation(x):
return 1.0 / (1.0 + exp(-x))
@staticmethod
def get_random_input_vector(train_X):
return copy.deepcopy(train_X[randint(0, len(train_X)-1)])
@staticmethod
def get_batch_data_next(trainX, trainY, index, batch_size):
real_index = index*batch_size
if (len(trainX) % batch_size != 0 and index == (len(trainX)/batch_size +1) ):
return (trainX[real_index:], trainY[real_index:])
elif (real_index == len(trainX)):
return ([], [])
else:
return (trainX[real_index: (real_index+batch_size)], trainY[real_index: (real_index+batch_size)])
## Load data frame
#full_path_name="/mnt/volume/ggcluster/spark-2.1.1-bin-hadoop2.7/thieunv/machine_learning/6_google_trace/data/"
#full_path= "/mnt/volume/ggcluster/spark-2.1.1-bin-hadoop2.7/thieunv/machine_learning/6_google_trace/FLNN/results/notDecompose/data10minutes/univariate/cpu/"
file_name = "Fuzzy_data_sampling_617685_metric_10min_datetime_origin.csv"
full_path_name = "/home/thieunv/university/LabThayMinh/code/6_google_trace/data/"
full_path = "/home/thieunv/university/LabThayMinh/code/6_google_trace/tensorflow/testing/"
df = read_csv(full_path_name+ file_name, header=None, index_col=False, usecols=[0], engine='python')
dataset_original = df.values
stimulation_level = [0.25] #[0.10, 0.2, 0.25, 0.50, 1.0, 1.5, 2.0] # [0.20]
positive_numbers = [0.01] #[0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.20] # [0.1]
learning_rates = [0.25] #[0.005, 0.01, 0.025, 0.05, 0.10, 0.12, 0.15] # [0.2]
sliding_windows = [2] #[ 2, 3, 5] # [3]
epochs = [2800] #[100, 250, 500, 1000, 1500, 2000] # [500]
batch_sizes = [32] #[8, 16, 32, 64, 128] # [16]
list_num = [(2800, 4170)]
pl1 = 1 # Use to draw figure
#pl2 = 1000
so_vong_lap = 0
for list_idx in list_num:
for sliding in sliding_windows:
for sti_level in stimulation_level:
for epoch in epochs:
for batch_size in batch_sizes:
for learning_rate in learning_rates:
for positive_number in positive_numbers:
febpnn = Model(dataset_original, list_idx, epoch, batch_size, sliding, learning_rate, positive_number, sti_level)
febpnn.fit()
so_vong_lap += 1
if so_vong_lap % 5000 == 0:
print "Vong lap thu : {0}".format(so_vong_lap)
print "Processing DONE !!!"
| [
"nguyenthieu2102@gmail.com"
] | nguyenthieu2102@gmail.com |
476abbd5b335730c6e16e3a8b6cb4482c8c5263c | 3bec21e0d6a9e6df61fdd45da9f813a00e758fd3 | /Insomnia_Ranking/preprocess.py | 3c17f8d99a4c8732982855dfbc9ba1cb82039491 | [] | no_license | Sungwon-Han/Learning-Sleep-Quality-from-Daily-Logs | e5d1f87018b3dd2ca84a829dea744bf91f0d0c6e | a505d7f32392c9ca57fc79c40a21255fd4ebd4ce | refs/heads/master | 2021-06-28T19:15:39.729479 | 2020-09-23T06:39:43 | 2020-09-23T06:39:43 | 169,042,617 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,947 | py | import pandas as pd
import numpy as np
import pyprind
def max_min_normalization(sleep_activity_nap):
min_ = sleep_activity_nap.min()[3:]
max_ = sleep_activity_nap.max()[3:]
sleep_activity_nap_feature = list(sleep_activity_nap.columns)[3:]
userId = sleep_activity_nap[["userId","month","date"]]
sleep_activity_nap = (sleep_activity_nap[sleep_activity_nap_feature]-min_)/(max_-min_)
sleep_activity_nap = pd.concat([userId,sleep_activity_nap],axis=1)
return sleep_activity_nap
def Dict_user_data(sleep_activity_nap):
user_Id = list(set(sleep_activity_nap["userId"]))
month = list(set(sleep_activity_nap["month"]))
date = [[i+23 for i in range(8)],[j+1 for j in range(31)],[k+1 for k in range(3)]]
dict_user_data = {}
for i in pyprind.prog_bar(range(len(user_Id))):
user_data = []
for j in range(len(month)):
for k in range(len(date[j])):
user_data_date = sleep_activity_nap[(sleep_activity_nap["userId"]==user_Id[i])&(sleep_activity_nap["month"]==month[j])&(sleep_activity_nap["date"]==date[j][k])]
user_data_constrain = np.array(user_data_date[user_data_date["sleep_end_time"]==max(user_data_date["sleep_end_time"])])[0,3:]
user_data.append(user_data_constrain)
user_data = np.array(user_data)
dict_user_data[user_Id[i]] = user_data
return dict_user_data
def Dict_user_data_2(user_Id,sleep_activity_nap):
dict_user_data = {}
for i in pyprind.prog_bar(range(len(user_Id))):
#user_data = []
#for j in range(len(month)):
#for k in range(len(date[j])):
user_data = sleep_activity_nap[(sleep_activity_nap["userId"]==user_Id[i])]
#user_data_constrain = np.array(user_data_date[user_data_date["sleep_end_time"]==max(user_data_date["sleep_end_time"])])[0,3:]
#user_data.append(user_data_constrain)
user_data = np.array(user_data)[:,1:]
dict_user_data[user_Id[i]] = user_data
return dict_user_data
def Dict_user_window_sf(user_Id,dict_user_data,window_size,sleep_efficiency_location):
dict_user_window,dict_user_sleep_efficency = {},{}
for i in range(len(user_Id)):
user_data = dict_user_data[user_Id[i]]
user_window = []
user_sleep_efficiency = []
for j in range(user_data.shape[0]-window_size):
user_window.append(user_data[j:j+window_size,:])
user_sleep_efficiency.append(user_data[j+window_size,sleep_efficiency_location])
dict_user_window[user_Id[i]] = np.array(user_window)
dict_user_sleep_efficency[user_Id[i]] = np.array(user_sleep_efficiency)
return dict_user_window,dict_user_sleep_efficency
def Dict_user_window_sf_diff(user_Id,dict_user_window,dict_user_sleep_efficency,thr):
dict_user_window_diff,dict_user_sf_diff = {},{}
for i in range(len(user_Id)):
user_window_diff ,user_sf_diff= [],[]
for j in range(dict_user_window[user_Id[i]].shape[0]):
user_sf_diff.append([])
user_window_diff.append([])
for k in range(len(user_Id)):
if user_Id[i] != user_Id[k]:
user_window_diff[j].append(dict_user_window[user_Id[i]][j]-dict_user_window[user_Id[k]][j])
if dict_user_sleep_efficency[user_Id[i]][j]-dict_user_sleep_efficency[user_Id[k]][j] > thr:
user_sf_diff[j].append(0)
else:
user_sf_diff[j].append(1)
user_window_diff = np.array(user_window_diff)
user_sf_diff = np.array(user_sf_diff)
dict_user_window_diff[user_Id[i]] = user_window_diff
dict_user_sf_diff[user_Id[i]] = user_sf_diff
return dict_user_window_diff,dict_user_sf_diff
def Dict_X_Y_seperate(user_Id,dict_user_window_diff,dict_user_sf_diff,train_No):
dict_user_X_train,dict_user_X_test,dict_user_Y_train,dict_user_Y_test = {},{},{},{}
for i in range(len(user_Id)):
dict_user_X_train[user_Id[i]] = dict_user_window_diff[user_Id[i]][:train_No,:]
dict_user_X_test[user_Id[i]] = dict_user_window_diff[user_Id[i]][train_No:,:]
dict_user_Y_train[user_Id[i]] = dict_user_sf_diff[user_Id[i]][:train_No,:]
dict_user_Y_test[user_Id[i]] = dict_user_sf_diff[user_Id[i]][train_No:,:]
return dict_user_X_train,dict_user_Y_train,dict_user_X_test,dict_user_Y_test
def X_Y_train(user_Id,dict_user_X_train,dict_user_Y_train):
X_train ,Y_train= [],[]
for i in range(len(user_Id)):
X_train.append(dict_user_X_train[user_Id[i]])
Y_train.append(dict_user_Y_train[user_Id[i]])
X_train = np.array(X_train)
Y_train = np.array(Y_train)
train_size = X_train.shape[0]*X_train.shape[1]*X_train.shape[2]
X_train = X_train.reshape((train_size,X_train.shape[3],X_train.shape[4]))
Y_train = Y_train.reshape((train_size,1))
return X_train,Y_train
| [
"noreply@github.com"
] | noreply@github.com |
33dee721b6270508e69fbb4d7fab0c50ba9838d6 | f375f6edff092bac8e1c7d9628a37d837d8d5206 | /organize/forms.py | 37b795e63d67dd505b28b1f5e2028c6f7ea25e7b | [] | no_license | SumedhaShetty/HeadSpaceCoders | 4d92534c71fbc3b023f5dac57a128cf1003481f8 | 432dffbf1c2e74d7cedaf8532e7725628a83af3a | refs/heads/master | 2023-05-01T18:25:44.478177 | 2020-02-23T06:12:43 | 2020-02-23T06:12:43 | 242,450,880 | 0 | 1 | null | 2023-04-21T20:47:52 | 2020-02-23T03:37:20 | JavaScript | UTF-8 | Python | false | false | 215 | py | from django import forms
from .models import Event
class PostForm(forms.ModelForm):
class Meta:
model = Event
fields= [
"title",
"content",
"img",
] | [
"sumedhashetty8@gmail.com"
] | sumedhashetty8@gmail.com |
a9cd81676f816e00ef69cf3442787107109adc24 | 7834e7a48399b156401ea62c0c6d2de80ad421f5 | /docs/sphinx/conf.py | 6a6e8b3e6216ce0aa57a8b196300a395552f700e | [
"MIT"
] | permissive | vojnovski/pysparkling | b9758942aba0d068f6c51797c8fb491cf59c3401 | 21b36464371f121dc7963dac09d300e7235f587e | refs/heads/master | 2020-04-08T18:33:55.707209 | 2016-07-27T15:12:59 | 2016-07-27T15:12:59 | 62,555,929 | 0 | 0 | null | 2016-07-04T11:06:18 | 2016-07-04T11:06:18 | null | UTF-8 | Python | false | false | 10,067 | py | # -*- coding: utf-8 -*-
#
# pysparkling documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 7 12:37:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# extract version from __init__.py
pysparkling_init_filename = os.path.join(
os.path.dirname(__file__),
'..',
'..',
'pysparkling',
'__init__.py',
)
with open(pysparkling_init_filename, 'r') as f:
version_line = [l for l in f if l.startswith('__version__')][0]
PYSPARKLING_VERSION = version_line.split('=')[1].strip()[1:-1]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysparkling'
copyright = u'2015-2016, a project started by Sven Kreiss'
author = u'Sven Kreiss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PYSPARKLING_VERSION
# The full version, including alpha/beta/rc tags.
release = PYSPARKLING_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'images/logo-w600.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparklingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysparkling.tex', u'pysparkling Documentation',
u'Sven Kreiss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysparkling', u'pysparkling Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysparkling', u'pysparkling Documentation',
author, 'pysparkling', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"me@svenkreiss.com"
] | me@svenkreiss.com |
0a117f861b3f60e9a1df9e1a1af6745c5cfb7da5 | 55bd8a722a993b5831e7b5556c4c6d60dd388984 | /TRIMbatch/batch.py | e5291769b02d0dfeafabe3677fe51fac532a9800 | [] | no_license | bundseth/TRIFIC | 88a3b18ec47622fb78f320b29a711c24d22d4b7c | 15ffabeeeb083a15b2b1cc606f46805fc3081896 | refs/heads/master | 2021-08-31T08:11:41.635000 | 2017-12-20T18:38:09 | 2017-12-20T18:38:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,832 | py | import os
import pathlib
import subprocess
from . import compoundparse
from . import ionparse
class Batch:
def __init__(self,saveto,ion,mass,energy,number,angle=0,corr=0,autosave=10000):
# Batch object must be initialized with an ion and location for the Batch object to write a .IN file to within the TRIMDATA directory created in the cloned TRIFIC dir
# (an example location could be 'TRIFIC 11-19-2017' where all batch files and simulation outputs for a single experiment will be stored). To define the ion,
# give the symbol (e.g. 'Ga' for Gallium), its mass in amu (e.g. 80 for 80Ga), its energy in keV (e.g. 381600 for 80Ga @ 4.77MeV/u), and the number of ions
# to simulate (50 is usually a reasonable number). Other ion parameters such as the Angle, Bragg Correction, and AutoSave Number are set to TRIM defaults
# (0, 0, and 10000 respectively) as they do not need to be changed for most simulations.
self.saveto = str(saveto)
self.ion = ion
self.mass = mass
self.energy = energy
self.number = number
self.angle = angle
self.corr = corr
self.autosave = autosave
self._atoms = ionparse.ionparse()
self._compounds = compoundparse.compoundparse()
###### Check for legitimate inputs ######
# ion symbol must be valid; if we can't find the atomic number, quit out; this is a quick and dirty search
self._Z1 = 0
for atnb, atdata in self._atoms.items():
if atdata['Symbol'] == self.ion:
self._Z1 = atnb
if self._Z1 == 0:
raise ValueError('Please enter a valid chemical symbol (H - U)')
# check number and autosave are integers
if any(isinstance(arg,int) is False for arg in [number,autosave]):
raise ValueError('Please enter an integer for number and autosave values')
# check mass, energy, angle and corr are numbers
for val in [mass, energy, angle, corr]:
try:
val = int(val)
except ValueError:
print('Please enter a number for mass, energy, angle and correction')
# check numerical arguments are all positive
if any(numarg <= 0 for numarg in [mass,energy,number,autosave]):
raise ValueError('Only positive values are accepted for ion parameters')
self._homedir = os.path.expanduser('~')
self._fnames = [] # stores file names written using data from this object
# create empty dictionary for target layers (not a list so that layers may be defined out of order)
self._layers = {}
def addTargetLayer(self,lnumber,lname,width=0,unit='Ang',density=0,pressure=0,corr=1,gas=0,compound=True):
# Adds a layer to the list of target layers: give integer number to layer to define it's position in the list (begin with 1); give name as a string
# (this must exactly match a named compound in TRIM's compound directory or a chemical symbol of an atom, if a compound does not exist, define it at the bottom
# of compoundparse.py); give width (default unit is Angstrom unless indicated otherwise by the next argument; mandatory gas boolean (the state of a layer is not
# automatically pulled from the default TRIM directories, so it is mandatory for the user to explicitly give a True or False argument here so that the simulation
# does not spew garbage); optional change unit (default is Angstrom, but may use 'cm' or 'um' for convenience); optional density in g/cm3 (leave blank if you would
# like to use the TRIM default, most are okay but some are quite off, so be wary); optional pressure in Torr (if layer is a gas, may give pressure in Torr, in which
# case we will use ideal gas scaling from TRIM default (taken to be given at STP), a user defined density will always take precedence over a given pressure, and an
# error will be thrown if a pressure is given for a non-gas layer); optional compound correction may be given (this is calculated on-the-fly by TRIM and is not
# accessible (yet) to this interface, the default 1 works fine for most simulations, but if this correction is desired, the fastest way would be to use the TRIM gui
# and copy the value it spits out here); an optional compound boolean (if left blank, we will assume the name of the layer given should be found in the compound
# directory, if made False, we will assume the layer is a single atom to be found in TRIM's atom directory, if the layer name is not found in either place, an error
# will be thrown)
# NOTE: If using a layer that doesn't exist in the TRIM compound directory, simply hardcode your new layer at the bottom of the compound parser (a template is given). All
# that is needed is the compound's density and stoich. After defining once, the layer may be used freely afterwards just like any other compound.
# NOTE: There must be a layer 1 and a unique layer number for each layer. If there are gaps, an error will be raised saying that layers are missing.
###### Check for legitimate inputs ######
# Check for valid atom/compound name
if compound == False:
goodatom = False
for atnb, atdata in self._atoms.items():
if atdata['Symbol'] == lname:
goodatom = True
if goodatom == False:
raise ValueError('Single atom layer not found')
if compound == True:
if lname not in self._compounds.keys():
raise ValueError('Compound not found. Check name matches in compound directory, or add your own in the parser')
# Check for valid width
try:
val = int(width)
except ValueError:
print('Width must be a number')
# Check for valid unit
if unit not in ['Ang','cm','um']:
raise ValueError('Unit must be Ang, cm or um')
# Check for valid density
try:
val = int(density)
except ValueError:
print('Density must be a number in g/cm3')
if density != 0 or gas == False:
# density takes precedence over pressure
pressure = 0
# Check for valid pressure
try:
val = int(pressure)
except ValueError:
print('Pressure must be a number in Torr')
# Check for valid compound correction
try:
val = int(corr)
except ValueError:
print('Compound correction must be a number')
# Make sure gas bool is properly defined
if isinstance(gas,bool) is False:
raise ValueError('Enter boolean True/False for gas variable')
# Make sure all numerical values are positive or zero
if any(numarg < 0 for numarg in [width,density,pressure,corr]):
raise ValueError('Only positive values accepted for target layer parameters')
###### Continue with valid inputs ######
if unit == 'um':
width *= 10000
if unit == 'cm':
width *= 100000000
self._layers[str(lnumber)] = {
'Name': lname,
'Width': width,
'Density': density,
'Corr': corr,
'Gas': gas,
'Pressure': pressure,
'Compound': compound
}
def nextIon(self,ion,mass,energy,number,angle=0,corr=0,autosave=10000):
# Given an existing batch object, changes the ion data and writes another .IN file with the same target info
self.ion = ion
self.mass = mass
self.energy = energy
self.number = number
self.angle = angle
self.corr = corr
self.autosave = autosave
self._Z1 = 0
for atnb, atdata in self._atoms.items():
if atdata['Symbol'] == self.ion:
self._Z1 = atnb
if self._Z1 == 0:
raise ValueError('Please enter a valid chemical symbol (H - U)')
# check number and autosave are integers
if any(isinstance(arg,int) is False for arg in [number,autosave]):
raise ValueError('Please enter an integer for number and autosave values')
# check mass, energy, angle and corr are numbers
for val in [mass, energy, angle, corr]:
try:
val = int(val)
except ValueError:
print('Please enter a number for mass, energy, angle and correction')
# check numerical arguments are all positive
if any(numarg <= 0 for numarg in [mass,energy,number,autosave]):
raise ValueError('Only positive values accepted for ion parameters')
self.makeBatch()
def makeBatch(self):
# Method writes .IN file for TRIM to run in batch mode
###### Check target layers are ok ######
# make sure that the layering order is sensical ie. layer keys proceed '1' to '# of layers'
self._nolayers = len(self._layers.keys())
for i in range(1,self._nolayers+1):
if str(i) not in self._layers.keys():
raise ValueError('Missing layers')
###### create file to write to ######
self._fnames.append(str(self.mass)+self.ion+str(self.energy)+'.txt')
###### get target parameters ######
# get atomic makeup of layers
self._layermakeup = [] # list corresponding to number of atoms in each layer, in order
for i in range(1,self._nolayers+1):
if self._layers[str(i)]['Compound'] == False:
# look up atom in atom dictionary
for atnb, atdata in self._atoms.items():
if atdata['Symbol'] == self._layers[str(i)]['Name']:
self._layers[str(i)]['Atom List'] = [[int(atnb), 1.0]]
if self._layers[str(i)]['Density'] == 0:
if self._layers[str(i)]['Gas'] == True:
self._layers[str(i)]['Density'] = atdata['GasDens']
else:
self._layers[str(i)]['Density'] = atdata['Density']
self._layermakeup.append(1)
else:
# look up compound in compound dictionary
self._layers[str(i)]['Atom List'] = self._compounds[self._layers[str(i)]['Name']]['Stoich']
if self._layers[str(i)]['Density'] == 0:
self._layers[str(i)]['Density'] = self._compounds[self._layers[str(i)]['Name']]['Density']
self._layermakeup.append(len(self._layers[str(i)]['Atom List']))
if self._layers[str(i)]['Pressure'] != 0:
self._layers[str(i)]['Density'] *= self._layers[str(i)]['Pressure']/760
self._layers[str(i)]['Pressure'] = 0 # set back to 0 so density is not scaled for future ions
self._nolayeratoms = sum(self._layermakeup)
# compile atomic data for layers
self._targetatoms = [] # list of dictionaries for each atom, indexed by position in layers
for i in range(1,self._nolayers+1):
for j in self._layers[str(i)]['Atom List']:
self._targetatoms.append( {
'Symbol': self._atoms[str(j[0])]['Symbol'],
'Z': j[0],
'Mass': self._atoms[str(j[0])]['Natural Weight'],
'Stoich': j[1],
'Disp': self._atoms[str(j[0])]['Disp'],
'Latt': self._atoms[str(j[0])]['Latt'],
'Surf': self._atoms[str(j[0])]['Surf']
})
###### write .IN file ######
# write ion data and options as input by user below (some are hardcoded)
# parameters have been checked during target and ion input methods, so we should end up with a 'good' batch file (can't account for ignorance)
# create directories if they do not already exist
savetodir = os.path.join(self._homedir,'TRIFIC','TRIMDATA',self.saveto)
pathlib.Path(os.path.join(savetodir,'IN')).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(savetodir,'OUT')).mkdir(parents=True, exist_ok=True)
# write to a file given ion information; will overwrite any existing file with the same name (same ion data)
with open(os.path.join(savetodir,'IN',self._fnames[-1]),'w') as infile:
print('==> SRIM-2013.00 This file controls TRIM Calculations.', end='\r\n', file=infile)
print('Ion: Z1 , M1, Energy (keV), Angle,Number,Bragg Corr,AutoSave Number.', end='\r\n', file=infile)
print('{} {} {} {} {} {} {}'.format(self._Z1, self.mass, self.energy, 0, self.number, 0, 10000), end='\r\n', file=infile)
print('Cascades(1=No;2=Full;3=Sputt;4-5=Ions;6-7=Neutrons), Random Number Seed, Reminders', end='\r\n', file=infile)
print('{} {} {}'.format(1, 0, 0), end='\r\n', file=infile)
print('Diskfiles (0=no,1=yes): Ranges, Backscatt, Transmit, Sputtered, Collisions(1=Ion;2=Ion+Recoils), Special EXYZ.txt file', end='\r\n', file=infile)
print('{} {} {} {} {} {}'.format(0, 0, 0, 0, 2, 0), end='\r\n', file=infile)
print('Target material : Number of Elements & Layers', end='\r\n', file=infile)
print('\"{} ({}) into '.format(self.ion, self.energy), end='', file=infile)
for i in range(1,self._nolayers+1):
if i == self._nolayers:
print('{}\" '.format(self._layers[str(i)]['Name']), end='', file=infile)
else:
print('{}+'.format(self._layers[str(i)]['Name']), end='', file=infile)
print('{} {}'.format(self._nolayeratoms, self._nolayers), end='\r\n', file=infile)
print('PlotType (0-5); Plot Depths: Xmin, Xmax(Ang.) [=0 0 for Viewing Full Target]', end='\r\n', file=infile)
print('{} {} {}'.format(5, 0, 0), end='\r\n', file=infile)
print('Target Elements: Z Mass(amu)', end='\r\n', file=infile)
for i in range(len(self._targetatoms)):
print('Atom {} = {} = {} {}'.format(i+1, self._targetatoms[i]['Symbol'], self._targetatoms[i]['Z'], self._targetatoms[i]['Mass']), end='\r\n', file=infile)
# print layer header
print('Layer Layer Name / Width Density ', end='', file=infile)
for i in range(len(self._targetatoms)):
print('{}({}) '.format(self._targetatoms[i]['Symbol'], self._targetatoms[i]['Z']), end='', file=infile)
print('', end='\r\n', file=infile)
print('Numb. Description (Ang) (g/cm3) ', end='', file=infile)
for i in range(len(self._targetatoms)):
print('Stoich ', end='', file=infile)
print('', end='\r\n', file=infile)
# print layer information, this is the clunkiest part
printedstoich = 0 # track printing of stoichiometry for each atom in each layer
for i in range(1,self._nolayers+1):
print(' {} \"{}\" {} {} '.format(i, self._layers[str(i)]['Name'], self._layers[str(i)]['Width'], self._layers[str(i)]['Density']), end='', file=infile)
for j in range(printedstoich):
print('{} '.format(0), end='', file=infile)
for j in range(printedstoich,printedstoich+len(self._layers[str(i)]['Atom List'])):
print('{} '.format(self._targetatoms[j]['Stoich']), end='', file=infile)
printedstoich += 1
for j in range(self._nolayeratoms-printedstoich):
print('{} '.format(0), end='', file=infile)
print('', end='\r\n', file=infile)
# print gas details for each layer
print('0 Target layer phases (0=Solid, 1=Gas)', end='\r\n', file=infile)
for i in range(1,self._nolayers+1):
if self._layers[str(i)]['Gas'] == True:
print('1 ', end='', file=infile)
else:
print('0 ', end='', file=infile)
print('', end='\r\n', file=infile)
# print compound correction for each layer
print('Target Compound Corrections (Bragg)', end='\r\n', file=infile)
for i in range(1,self._nolayers+1):
print('{} '.format(self._layers[str(i)]['Corr']), end='', file=infile)
print('', end='\r\n', file=infile)
# print target atom displacement energies
print('Individual target atom displacement energies (eV)', end='\r\n', file=infile)
for i in range(self._nolayeratoms):
print('{} '.format(self._targetatoms[i]['Disp']), end='', file=infile)
print('', end='\r\n', file=infile)
# print target atom lattice binding energies
print('Individual target atom lattice binding energies (eV)', end='\r\n', file=infile)
for i in range(self._nolayeratoms):
print('{} '.format(self._targetatoms[i]['Latt']), end='', file=infile)
print('', end='\r\n', file=infile)
# print target atom surface binding energies
print('Individual target atom surface binding energies (eV)', end='\r\n', file=infile)
for i in range(self._nolayeratoms):
print('{} '.format(self._targetatoms[i]['Surf']), end='', file=infile)
print('', end='\r\n', file=infile)
print('Stopping Power Version (1=2011, 0=2011)', end='\r\n', file=infile)
print(' 0', end='\r\n', file=infile)
def batchFiles(self):
return self._fnames
def Sim(saveto,fs):
homedir = os.path.expanduser('~')
os.chdir(os.path.join(homedir,'.wine','drive_c','Program Files (x86)','SRIM-2013'))
if saveto not in os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA')):
raise ValueError('Given directory not found')
for f in fs:
if f not in os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'IN')):
print(f,'not found in given directory')
else:
filetosim = f
tocopy = os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'IN',filetosim)
topaste = os.path.join(homedir,'.wine','drive_c','Program Files (x86)','SRIM-2013','TRIM.IN')
subprocess.call(['cp',tocopy,topaste])
subprocess.call(['wine','TRIM.exe'])
copyto = os.path.join(homedir,'.wine','drive_c','Program Files (x86)','SRIM-2013','SRIM Outputs','COLLISON.txt')
pasteto = os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'OUT',filetosim)
subprocess.call(['cp',copyto,pasteto])
def PIDPlot(saveto,fs,Xrange=0,Yrange=0,Xbins=50,Ybins=50):
# Creates PID plots (using existing code) given a list of file names and a location where to look for them.
# Takes up to 4 additional arguments to be passed to the plotter (args are checked to disallow potential shell insertion).
# grids arg should be '12', '13', or '23' depending on how the anode signals in TRIFIC are collected.
# bins arg determines how many bins exist in the x and y axes of the histogram. 50-100 is often a reasonable default.
# Setting Xrange (Yrange) forces the x-axis (y-axis) range of the plot. 0 (default) lets the plotter pick a reasonable value given the range of the data.
homedir = os.path.expanduser('~')
if saveto not in os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA')):
raise ValueError('Given directory not found')
elif any(f not in os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'OUT')) for f in fs):
raise ValueError('File not found in given directory')
elif any(isinstance(kwarg,int) is False for kwarg in [Xbins,Ybins,Xrange,Yrange]):
raise ValueError('Plotter arguments (bins, ranges) must be integers')
os.chdir(os.path.join(homedir,'TRIFIC'))
toplot = []
for f in fs:
toplot.append(os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'OUT',f))
tocall12 = './TRIFICsim 12 '
tocall13 = './TRIFICsim 13 '
tocall23 = './TRIFICsim 23 '
for f in toplot:
tocall12 = tocall12+f+' '
tocall13 = tocall13+f+' '
tocall23 = tocall23+f+' '
tocall12 = tocall12+'| ./csv2h2 -nx '+str(Xbins)+' -ny '+str(Ybins)+' -rx '+str(Xrange)+' -ry '+str(Yrange)+' -gn 12'
tocall13 = tocall13+'| ./csv2h2 -nx '+str(Xbins)+' -ny '+str(Ybins)+' -rx '+str(Xrange)+' -ry '+str(Yrange)+' -gn 13'
tocall23 = tocall23+'| ./csv2h2 -nx '+str(Xbins)+' -ny '+str(Ybins)+' -rx '+str(Xrange)+' -ry '+str(Yrange)+' -gn 23'
subprocess.Popen(tocall12,shell=True)
subprocess.Popen(tocall13,shell=True)
subprocess.Popen(tocall23,shell=True)
# block and then kill histograms if user did not close them properly
input("Press Enter to quit...")
subprocess.run("killall csv2h2",shell=True)
def getFiles(saveto):
# returns names of files in existing simulation directory for ease of plotting already simulated ions
homedir = os.path.expanduser('~')
if saveto not in os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA')):
raise ValueError('Given directory not found')
return os.listdir(os.path.join(homedir,'TRIFIC','TRIMDATA',saveto,'OUT'))
| [
"undsethb2@gmail.com"
] | undsethb2@gmail.com |
5405bae6fd3682fa18b2ac967334cf490035517b | 3949aaab457f08908c96db9eaa6105f4c0c7f6c5 | /canny_edge.py | 8910134e5bf7627e9de864665651ccfa761edbb1 | [] | no_license | darshan1504/Computer-Vision-Assignment | a0ab261faeb28e5e5fbf41ca3d2eabb47565b9f1 | 9ac6d84bc32a9e8b467da5e38bb874a88544bc96 | refs/heads/master | 2021-05-15T11:19:46.336600 | 2017-10-25T19:51:00 | 2017-10-25T19:51:00 | 108,318,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,033 | py | # Importing all the libraries
import cv2
import pylab as plt
import numpy as np
img = cv2.imread('image.jpg');
# convert to grayscale using open cv function
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# Plot the original image in the plot area
plt.subplot(2, 2, 1)
plt.imshow(gray_image, cmap='gray') # Plots or shows the image and also sets the image to gray
plt.title('Original Image')
plt.xticks([]), plt.yticks([]) # Get or set the x-limits and y-limits of the current tick locations and labels.
# checking the rows and column length of the image and storing them in the variables
image_rows = img.shape[0]
image_cols = img.shape[1]
# Smoothing using OpenCV gaussing
gaussian_blur_image = cv2.GaussianBlur(gray_image, (3, 3), 0)
# Calculating the derrivative using Sobel
# Gradient-x using OpenCV sobel
gradient_x = cv2.Sobel(gaussian_blur_image, cv2.CV_16S , 1, 0, ksize=3)
# Gradient-y using OpenCV sobel
gradient_y = cv2.Sobel(gaussian_blur_image, cv2.CV_16S , 0, 1, ksize=3)
# Absolute of gradient-x and gradient-y
abs_grad_x = cv2.convertScaleAbs(gradient_x)
abs_grad_y = cv2.convertScaleAbs(gradient_y)
# Calculate the weighted sums using OpenCV
dst = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
# Calculating the grad_dir for thr non-max suppression, merging both image derivatives (in both X and Y grad_dir) to find the grad_dir and final image
grad_dir = np.arctan2(gradient_y, gradient_x)
# Creating the gradient angle to sectors depending on the pixel values in the gradient direction Matrix
for x in range(image_rows):
for y in range(image_cols):
if (grad_dir[x][y] < 22.5 and grad_dir[x][y] >= 0) or \
(grad_dir[x][y] >= 157.5 and grad_dir[x][y] < 202.5) or \
(grad_dir[x][y] >= 337.5 and grad_dir[x][y] <= 360):
grad_dir[x][y] = 0
elif (grad_dir[x][y] >= 22.5 and grad_dir[x][y] < 67.5) or \
(grad_dir[x][y] >= 202.5 and grad_dir[x][y] < 247.5):
grad_dir[x][y] = 45
elif (grad_dir[x][y] >= 67.5 and grad_dir[x][y] < 112.5) or \
(grad_dir[x][y] >= 247.5 and grad_dir[x][y] < 292.5):
grad_dir[x][y] = 90
else:
grad_dir[x][y] = 135
non_max_supression = dst.copy()
# calculation the non max suppression for each gradient direction angle
# checking for the pixels behind and ahead and set them to zero if selected pixel is small from neighbours
for x in range(1, image_rows - 1):
for y in range(1, image_cols - 1):
if grad_dir[x][y] == 0:
if (dst[x][y] <= dst[x][y + 1]) or \
(dst[x][y] <= dst[x][y - 1]):
non_max_supression[x][y] = 0
elif grad_dir[x][y] == 45:
if (dst[x][y] <= dst[x - 1][y + 1]) or \
(dst[x][y] <= dst[x + 1][y - 1]):
non_max_supression[x][y] = 0
elif grad_dir[x][y] == 90:
if (dst[x][y] <= dst[x + 1][y]) or \
(dst[x][y] <= dst[x - 1][y]):
non_max_supression[x][y] = 0
else:
if (dst[x][y] <= dst[x + 1][y + 1]) or \
(dst[x][y] <= dst[x - 1][y - 1]):
non_max_supression[x][y] = 0
# applying the hysterisis threshold on the non-max suppressed image
# We have the suppressed image we have to take two threshold values
# Setting up the two threshold values
# Try changing the values of the high threshold and low threshold for different outputs
# usualy keeping the high and low threshold as high_threshold = 2 low_threshold
high_threshold = 45
low_threshold = 25
# storing the pixel values which are higher then the high threshold they contribute to final edges
strong_edges = (non_max_supression > high_threshold)
# Strong has value 2, weak has value 1
thresholded_edges = np.array(strong_edges, dtype=np.uint8) + (non_max_supression > low_threshold)
# Tracing edges with hysteresis, Find weak edge pixels near strong edge pixels
final_edges = strong_edges.copy() # Creating copy of strong edges
new_pixels = []
for r in range(1, image_rows - 1):
for c in range(1, image_cols - 1):
if thresholded_edges[r, c] != 1:
continue # Not a weak pixel
# If the gradient at a pixel connected to an edge pixel is between Low and High then declare it an edge pixel directly or via pixels between Low and High
local_patch = thresholded_edges[r - 1:r + 2, c - 1:c + 2]
patch_max = local_patch.max()
if patch_max == 2:
new_pixels.append((r, c))
final_edges[r, c] = 1
# Extend strong edges based on current pixels
while len(new_pixels) > 0:
new_pix = []
for r, c in new_pixels:
for dr in range(-1, 2):
for dc in range(-1, 2):
if dr == 0 and dc == 0:
continue
r2 = r + dr
c2 = c + dc
if thresholded_edges[r2, c2] == 1 and final_edges[r2, c2] == 0:
# Copy this weak pixel to final result
new_pix.append((r2, c2))
final_edges[r2, c2] = 1
new_pixels = new_pix
cv_canny_edges = cv2.Canny(img,100,200)
plt.subplot(2, 2, 2)
plt.imshow(final_edges, cmap='gray') # Plots or shows the image and also sets the image to gray
plt.title('Finale Edge Image')
plt.xticks([]), plt.yticks([]) # Get or set the x-limits and y-limits of the current tick locations and labels.
plt.subplot(2, 2, 3)
plt.imshow(gray_image, cmap='gray') # Plots or shows the image and also sets the image to gray
plt.title('Original Image')
plt.xticks([]), plt.yticks([]) # Get or set the x-limits and y-limits of the current tick locations and labels.
plt.subplot(2, 2, 4)
plt.imshow(cv_canny_edges, cmap='gray') # Plots or shows the image and also sets the image to gray
plt.title('Finale Edge Image using OpenCV')
plt.xticks([]), plt.yticks([]) # Get or set the x-limits and y-limits of the current tick locations and labels.
plt.show() | [
"djethwa2810@gmail.com"
] | djethwa2810@gmail.com |
056eb32c8505ccab7a062ed6637a7159eb4ccbe2 | ea92859e68b6f51c5e9b80196e98fbb209b4dc73 | /build/chapter4/catkin_generated/pkg.develspace.context.pc.py | 3e3ba7efddce9124ef4bc85625458ac4d9f103ab | [] | no_license | Guo-ziwei/ROS | 2a36ccd22ebb7084ffd89d55a144565bd8400f59 | 8dbae0d31676cf9a1864d7b9e7f35f83dfa021e0 | refs/heads/master | 2021-07-11T01:15:45.117417 | 2019-03-12T09:28:15 | 2019-03-12T09:28:15 | 153,623,513 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "chapter4"
PROJECT_SPACE_DIR = "/home/guoziwei/ROS/devel"
PROJECT_VERSION = "0.0.0"
| [
"mechanicbeiyou@gmail.com"
] | mechanicbeiyou@gmail.com |
33516c24ec951e32d2454058cccb932ff632af1d | 9855a6472fa9cd0a0ed75d5d1110eb5450e38c35 | /django_mailbox/runtests.py | f5b0ff3b0c41ddd22e10232d108f622b41e04984 | [] | no_license | JessAtBlocBoxCo/blocbox | efef025333b689e4c9e0fb6a7bfb2237fcdc72a0 | 0966fd0ba096b2107bd6bd05e08c43b4902e6ff2 | refs/heads/master | 2020-04-11T04:30:25.792700 | 2015-09-22T04:41:34 | 2015-09-22T04:41:34 | 23,008,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | #!/usr/bin/env python
import sys
from os.path import dirname, abspath
try:
from django import setup
except ImportError:
pass
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django_mailbox',
]
)
from django.test.simple import DjangoTestSuiteRunner
def runtests(*test_args):
if not test_args:
test_args = ['django_mailbox']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
try:
# ensure that AppRegistry has loaded
setup()
except NameError:
# This version of Django is too old for an app registry.
pass
runner = DjangoTestSuiteRunner(
verbosity=1,
interactive=False,
failfast=False
)
failures = runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| [
"jess@blocbox.co"
] | jess@blocbox.co |
30d69ee46b287f605e3b00585551ac99490e0da6 | 28a75dffa5c69658dc5237d0c725f2727fdbad66 | /core/migrations/0003_auto_20161230_0845.py | ee617fbcf84ce5158c579f61762015f6997614d9 | [] | no_license | Scaledesk/pepsi_app | d6e5d53cb08eda1db3b67bbf4391386567662a56 | c83428dd08b55d2f054c3c518e4d1d1b02d6a2c6 | refs/heads/master | 2021-04-29T09:24:08.915514 | 2017-12-21T10:16:30 | 2017-12-21T10:16:30 | 77,633,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-30 08:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20161230_0843'),
]
operations = [
migrations.AlterModelOptions(
name='courseonevideoques',
options={'verbose_name': 'Course One Video Question', 'verbose_name_plural': 'Course One Video Questions'},
),
]
| [
"deepak.bartwal@outlook.com"
] | deepak.bartwal@outlook.com |
5801d61a76bb6014cefa063ced6f04c6567f17bd | 3e50738d43130403c6263db59fa89d9bd09277b8 | /backend/ceph_perf_api/ceph_perf_api/urls.py | 6ec507c94071bf648e025441dadd35e477814b2b | [] | no_license | alswell/lctc-ceph-performance-workbench | e52a89225cdd81d32961b828203ddae28877833a | aab4acff8c089a2dd818c8bc3e18b79bdf227601 | refs/heads/master | 2021-01-02T08:13:42.939546 | 2017-10-16T02:06:09 | 2017-10-16T02:06:09 | 98,964,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | """ceph_perf_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/v1/', include('api.v1.urls')),
url(r'^api/v2/', include('api.v2.urls')),
]
| [
"zhouning2@lenovo.com"
] | zhouning2@lenovo.com |
1c59577d6902a03d8bd0d2ab797126dd295d5699 | c6c470017412d55c79c331ad8663ca8769605c17 | /app/util/base/base_dao.py | 606b3c5a2897d2957d633f300bc76d589af8359c | [] | no_license | 9Echo/gc-goods-allocation | 0d96fabe5ce38f3290ea7a22261ac6bc5fcfff4a | 5fb62820fa697ffc45931c4c19a9b0775feb1fc5 | refs/heads/master | 2023-04-13T12:15:28.479707 | 2021-04-26T11:53:03 | 2021-04-26T11:53:03 | 298,716,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | # -*- coding: utf-8 -*-
# @Time : 2019/11/12 14:40
# @Author : Zihao.Liu
import traceback
import pymysql
from pymysql import MySQLError
from app.util.db_pool import db_pool_ods
class BaseDao:
"""封装数据库操作基础类"""
def select_one(self, sql, values=None):
_ = self
conn = None
cursor = None
try:
conn = db_pool_ods.connection()
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
if values:
cursor.execute(sql, values)
else:
cursor.execute(sql)
return cursor.fetchone()
except Exception as e:
traceback.print_exc()
raise MySQLError
finally:
cursor.close()
conn.close()
def select_all(self, sql, values=None):
_ = self
conn = None
cursor = None
try:
conn = db_pool_ods.connection()
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
if values:
cursor.execute(sql, values)
else:
cursor.execute(sql)
return cursor.fetchall()
except Exception as e:
traceback.print_exc()
raise MySQLError
finally:
cursor.close()
conn.close()
def execute(self, sql, values=None):
_ = self
conn = None
cursor = None
try:
conn = db_pool_ods.connection()
cursor = conn.cursor()
if values:
cursor.execute(sql, values)
else:
cursor.execute(sql)
conn.commit()
except Exception as e:
traceback.print_exc()
conn.rollback()
raise MySQLError
finally:
cursor.close()
conn.close()
def executemany(self, sql, values=None):
_ = self
conn = None
cursor = None
try:
conn = db_pool_ods.connection()
cursor = conn.cursor()
if values:
cursor.executemany(sql, values)
else:
cursor.executemany(sql)
conn.commit()
except Exception as e:
traceback.print_exc()
conn.rollback()
raise MySQLError
finally:
cursor.close()
conn.close()
def execute_many_sql(self, sql_list, values):
_ = self
conn = None
cursor = None
try:
conn = db_pool_ods.connection()
cursor = conn.cursor()
cursor.execute(sql_list[0])
cursor.executemany(sql_list[1], values)
conn.commit()
except Exception as e:
traceback.print_exc()
conn.rollback()
raise MySQLError
finally:
if cursor:
cursor.close()
if conn:
conn.close()
| [
"1094015147@qq.com"
] | 1094015147@qq.com |
b3c5dba2d752effee260014af048c20f6498d839 | c2e316c3d47f519c28a332f6a9cf8e10c9a1bb6e | /End_to_End_ML/regression_model/config/config.py | 8eeefad8f68ab65afe4ee7362610252c3a7d1f0e | [] | no_license | iRahulPandey/DataScience | 9e84ad99f1b46d24cf723ec78a647b7ff71ee5f7 | c40f2b1abd72327f9d4553c4ad4101c73919faf0 | refs/heads/master | 2023-04-08T00:22:09.285158 | 2021-04-11T13:11:34 | 2021-04-11T13:11:34 | 347,886,245 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # import library
import pathlib as pl
import sys
# path to the root folder
root_path = r".\regression_model"
# path to package root
PACKAGE_ROOT = pl.Path(root_path).resolve()
# path to dataset
DATASET_PATH = PACKAGE_ROOT / "datasets"
print(DATASET_PATH)
# path to trained model
TRAINED_MODEL_PATH = PACKAGE_ROOT / "trained_model"
# pipline name
PIPELINE_NAME = "linear_regression"
PIPELINE_SAVE_FILE = f"{PIPELINE_NAME}_output_v_"
# file name
TRAINING_DATA_FILE = "train.csv"
TESTING_DATA_FILE = "test.csv"
# feature name
FEATURES = ["X1", "X2", "X3"]
TARGET = "TARGET"
# uniform distributed features
UNIFORM_DISTRIBUTED_FEATURES = ["X1", "X3"]
# normal distributed feature
NORMAL_DISTRIBUTED_FEATURES = ["X2"] | [
"rpandey1901@gmail.com"
] | rpandey1901@gmail.com |
4b41a372e6be94215e21b79d21ea497aa8ffa11f | 7372d91b86166621402c52ceb9fbccec4f9b8035 | /display.py | c8c8a125cd5f1c98ada8a233e2f7ad064fe9905c | [] | no_license | bsthowell/ngrams | f4bdbde0ed4eb8db39bbe6b364fd9ce4d303cd2d | 377fdfae1231d0719032440488c0d00f29c8ce52 | refs/heads/master | 2021-01-11T04:16:10.145410 | 2016-10-18T02:43:36 | 2016-10-18T02:43:36 | 71,193,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
#occ = occurrences
import sys
import matplotlib.pyplot as plt
import numpy as np
import seaborn
OCC_SCALING = 0.01
OCC_SCALING = 1
LINE_LEN = 15
def find_end_points(year, occ, grad):
theta = np.arctan(grad)
d_year = LINE_LEN * np.cos(theta)
d_occ = LINE_LEN * np.sin(theta) * OCC_SCALING
year_ep = year + d_year
occ_ep = occ + d_occ
return year_ep, occ_ep
fig = plt.figure()
for line in sys.stdin:
year, occ, grad, count = line.strip().split(' ')
occ = 100 * float(occ)
year_ep, occ_ep = find_end_points(int(year), occ, float(grad))
year_pair = [year, year_ep]
occ_pair = [occ, occ_ep]
plt.plot(year_pair, occ_pair, 'k')
plt.xlabel('Year')
plt.ylabel('% of Peak Popularity')
plt.title('Empirical Directional Field of Word Popularity over Time')
plt.show()
| [
"bsthowell@gmail.com"
] | bsthowell@gmail.com |
3b98d9957343a21a0180bd5459bf78102ba1e140 | 7a7e2201642a730460dd4d3b0441df3710898787 | /PythonWidget/utils/dict_add_property.py | 8bbc35f19b7e2e8cb75fa7aefa989c3814ba339e | [
"BSD-3-Clause"
] | permissive | xiaodongxiexie/python-widget | 87118cbd75927f2f181fc5c9ff1a0fbd1c12af27 | 58fd929ee57884a73a1d586c7b891c82b9727f93 | refs/heads/master | 2023-04-02T03:13:51.929149 | 2023-03-23T02:17:21 | 2023-03-23T02:17:21 | 89,505,063 | 188 | 55 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | # -*- coding: utf-8 -*-
# @Author: xiaodong
# @Date: 2017-11-21 16:34:12
# @Last Modified by: xiaodong
# @Last Modified time: 2017-11-23 16:24:03
from collections import abc
from keyword import iskeyword
class DictAddProperty:
def __init__(self, mapping):
self.__data = {}
for key, value in mapping.items():
if iskeyword(key):
key += '_'
self.__data[key] = value
def __getattr__(self, name):
if hasattr(self.__data, name):
return getattr(self.__data, name)
else:
return DictAddProperty.build(self.__data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableSequence):
return [cls.build(item) for item in obj]
else:
return obj
if __name__ == '__main__':
test = {'a': 1, 'b': 2, 'c': 3}
test2 = {'d': 4, 'e': 5, 'f': 6, 'class': 'CLASS'}
test['g'] = test2
t = DictAddProperty(test)
print (t.a, t.b, t.g.d, t.g.e, t.g.class_) | [
"1027887088@qq.com"
] | 1027887088@qq.com |
625be6865689feaf51c9b0b8de50380c2cbb2a69 | fc8c4d46dcad7c768a06e8ce31a8aa4ee32c8256 | /week6/heap_sort_05135902補交.py | abeb16f0d4b39f56b55a089af514ea6a3e501a01 | [] | no_license | wangshuti/DSA | 75cfb34af69998a743b99d557b6a72ba96eb1f69 | 7a3380b06ace3c3a68ee93aff19f89799b5dc6b8 | refs/heads/master | 2020-07-30T19:27:55.810783 | 2020-01-10T04:13:26 | 2020-01-10T04:13:26 | 210,332,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import math
class Solution():
def adjust(self, heap, k, n):
root = k
tmp = heap[root]
child = 2 * root
while child <= n:
if child < n and heap[child] < heap[child + 1]:
child = child + 1
if tmp > heap[child]:
break
else:
heap[math.floor(child / 2)] = heap[child]
child = child * 2
heap[math.floor(child / 2)] = tmp
def heap_sort(self, heap):
heap.insert(0, 0);
n = len(heap) - 1;
i = math.floor(n / 2)
for x in range(i, 0, -1):
Solution().adjust(heap, x, n)
i = n - 1
for y in range(i, 0, -1):
temp = heap[1]
heap[1] = heap[y + 1]
heap[y + 1] = temp
Solution().adjust(heap, 1, y)
return heap[1:]
# In[2]:
heap = Solution().heap_sort([3,2,-4,6,4,2,19])
# In[3]:
print(heap)
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
56de2963e60e6e11338f4093d9a14ad286f5e4a4 | 5992c932bf01602a0f33710113659e928cb15f93 | /ask/qa/admin.py | cbad346012d084778cf2cd815ca41c01078a5866 | [] | no_license | ArystanK/stepik | 9efe280f3503067123c12455f00020dc994702cc | 52cc2d6fbe956887f02cbceb6254501e9c55e4c4 | refs/heads/master | 2023-03-31T14:57:53.305721 | 2021-03-13T12:40:44 | 2021-03-13T12:40:44 | 347,365,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from django.contrib import admin
from qa.models import Question, Answer
# Register your models here.
# http://www.djangobook.com/en/2.0/chapter06.html
admin.site.register(Question)
admin.site.register(Answer)
| [
"aarystan@outlook.com"
] | aarystan@outlook.com |
cfa26a0c985f3dc65c0bb7b976cb22b6b8e96f4e | b44d0ae229c16a0bd65ea1a664c136b5423a0e8b | /encrypt.py | 09e6110d7c89cbad17a2aad39cbff410cec791a7 | [
"MIT"
] | permissive | aidankirk617/Steganography | dcf78db3f48c0df0313ecbe5b99633056ec0a431 | a49f5d37b5da26e07e42b405008f369e301f41f4 | refs/heads/main | 2023-07-18T02:54:35.550510 | 2023-03-18T18:03:28 | 2023-03-18T18:03:28 | 403,672,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,347 | py | ##############################################################
# Project 4 CS 352 : Functional Programming
# Date: 5/07/21
# Authors: Aidan Kirk, Melanney Orta
# Description:
#
# This file uses the Pillow Image library to encrypt a message
# that the user inputs into an image that can later be decrypted.
##############################################################
def ascii_con(message): # Pure function
"""This function converts data into binary.
This function takes a given message and converts it into binary form to be
used later when it is encrypted into an image.
Args:
message: The data that will be converted to binary
Returns:
convert: The converted data
"""
def formatting(char):
"""Changes a character into its binary form.
This function takes its given character and converts it into binary
Args:
char: The character to convert
Returns:
The converted character
"""
return format(ord(char), '08b')
convert = map(formatting, message) # map() higher order function
return list(convert)
def png_change(jpg, message):
"""This function modifies the image
This function modifies the data of the given image to change the pixels of
the image in order to hide the message within.
Args:
jpg: The data of the image
message: The message to be encrypted into the image
"""
convert = ascii_con(message)
jpg_iter = iter(jpg)
for i in range(len(convert)):
bit_len = 8
section = jpg_iter.__next__()[:3]
iterate = (lambda sec: sec + sec + sec)(section) # Lambda
jpg = [val for val in iterate] # List comprehension
for j in range(bit_len):
if convert[i][j] == '0':
if jpg[j] % 2 != 0:
jpg[j] -= 1
elif convert[i][j] == '1':
if jpg[j] % 2 == 0:
if jpg[j] != 0:
jpg[j] -= 1
else:
jpg[j] += 1
if i == len(convert) - 1:
if jpg[-1] % 2 == 0 and jpg[-1] != 0:
jpg[-1] -= 1
else:
jpg[-1] += 1
else:
if jpg[-1] % 2 != 0:
jpg[-1] -= 1
jpg = tuple(jpg)
yield jpg[0:3]
yield jpg[3:6]
yield jpg[6:9]
def encrypt_jpg(new_image, message):
"""This function puts pixels in the image.
This function actually puts the modified pixels from png_change into the
image to encode the message using a nested function.
Args:
new_image: The copy of the original image that will have the message
message: The message to be encrypted into the image
Returns:
nested_encrypt: A function that will be called later
"""
width = new_image.size[0]
(x, y) = (0, 0)
def nested_encrypt(): # Closure
"""This function does the work of putting pixels into the image
This function does the actual work of putting modified pixels that
will encrypt the message into the image
"""
nonlocal x, y
for jpg in png_change(new_image.getdata(), message):
new_image.putpixel((x, y), jpg)
if x == width - 1:
x = 0
y += 1
else:
x += 1
return nested_encrypt # Return a function definition
def encrypter(img_file, image_opener, message):
"""This function calls other functions to encrypt the image
This function calls the other functions in this file to actually encrypt
the image with the message provided (or modified by user). It will throw an
error if the message given is empty.
Args:
img_file: the file to encrypt
image_opener: the function that opens image files
message: the message to encrypt in the image
"""
image = image_opener(img_file)
if len(message) == 0:
raise ValueError('Message is empty')
new_image = image.copy()
enc = encrypt_jpg(new_image, message)
enc()
# This var below can be edited to change the name of the encrypted file
new_image_name = "secret.png"
new_image.save(new_image_name, str(new_image_name.split(".")[1].upper()))
print("Done")
| [
"noreply@github.com"
] | noreply@github.com |
6e2565371a7c9484f4c5fe11f0bbe409bbbf233c | 229e77f42680d6558b70179e6366de3be17d4c53 | /data_loader.py | 8372c6284839fd78005be09a2c013adbdd50c8ab | [] | no_license | leibovic/mrscutronadotcom | 33e2059fd3dcef71adf72fe457518da09420c4dd | 0267992c122615c9b28b88b8b3fe49b6c9af26aa | refs/heads/master | 2020-05-29T11:57:52.802493 | 2013-11-23T02:04:58 | 2013-11-23T02:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | import model
import csv
from datetime import date
import datetime
import time
import urllib
def load_comments():
comment_id = 1
timestamp = datetime.datetime.now()
user_id = 1
post_pk = 6
content = 'comment'
new_comment = model.Comment(comment_id=comment_id,timestamp=timestamp,user_id=user_id,post_pk=post_pk, content=content)
model.session.add(new_comment)
model.session.commit()
def load_users():
with open("data/users","rb") as f:
reader=csv.reader(f,delimiter='\n')
for row in reader:
data = row[0].split('|')
first_name = data[0]
last_name = data[1]
email = data[2]
password = data[3]
period = data[4]
school_id = data[5]
salt=data[6]
new_user = model.User(first_name=first_name,last_name=last_name,email=email,password=password,period=period,school_id=school_id,salt=salt)
model.session.add(new_user)
model.session.commit()
f.close()
def load_notes():
with open("data/notes","rb") as f:
reader=csv.reader(f,delimiter='\n')
for row in reader:
data = row[0].split('|')
id = data[0]
link = data[1]
link = urllib.quote(link)
ndate = time.strptime(data[2],"%d-%b-%Y")
ndate = date(ndate[0],ndate[1],ndate[2])
desc = data[3]
new_notes = model.Notes(id=id, link=link,created_on=ndate, description=desc)
model.session.add(new_notes)
model.session.commit()
f.close()
def load_posts():
with open("data/posts","rb") as f:
reader=csv.reader(f,delimiter='\n')
for row in reader:
data = row[0].split('|')
post_id = data[0]
ndate = time.strptime(data[1],"%d-%b-%Y")
ndate = date(ndate[0],ndate[1],ndate[2])
content = data[2]
user_id = data[3]
title = data[4]
new_posts = model.Post(post_id=post_id, timestamp=ndate, content=content, user_id=user_id,title=title)
model.session.add(new_posts)
model.session.commit()
f.close()
| [
"katiemthom@Katies-MacBook-Pro.local"
] | katiemthom@Katies-MacBook-Pro.local |
055ec397b411b35fba3e8146761d0addd4610000 | a7dea2e55794c6c7161c37ecef1792aacb77db16 | /ResumenContenido/manage.py | 71d6dbce4cf8d0cd90b9a4146c841251cefc9b4d | [] | no_license | Kyntal/RepasoContenido | 186caaa6a6c1ccbfd2c057251bf9ecfa3b04f0dc | 51af993e140afecd7d062532343af5a11120935c | refs/heads/master | 2020-04-05T12:06:51.324805 | 2018-11-09T13:39:44 | 2018-11-09T13:39:44 | 156,859,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ResumenContenido.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"clandres1985@gmail.com"
] | clandres1985@gmail.com |
261e6d817f1c6e5c39aa39a4651ecd0813d56ffb | 300a2f9f1d5e74bced0e21b7fa14d8afdfc68f66 | /test7.py | 057f45f97b3bddfc66720653aad7c859d7a03b71 | [] | no_license | makoflexite/stepik---auto-tests-course1 | 108748cc73116573b8dcf8ee7deeb745ef931d99 | db4f605fdb03c0436f9ee83b2d0a5fc59d0a0042 | refs/heads/master | 2021-05-20T02:10:32.995306 | 2020-04-01T10:25:22 | 2020-04-01T10:25:22 | 252,141,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
button = browser.find_element_by_class_name("btn")
button.click()
new_window = browser.window_handles[1]
browser.switch_to_window(new_window)
field1 = browser.find_element_by_css_selector("label>span:nth-child(2)")
x = field1.text
y = calc(x)
input2 = browser.find_element_by_id("answer")
input2.send_keys(y)
button = browser.find_element_by_class_name("btn")
button.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| [
"mako@flexite.com"
] | mako@flexite.com |
c3ab52e0c857c71ffaabff7df542b4872c48dbcf | 87f574548a321a668f325bc3d120a45366b0b76b | /studioadmin/views/email_users.py | 7f409efcb24684c5ca97d5f8c036492e52fb13ac | [] | no_license | judy2k/pipsevents | 1d19fb4c07e4a94d285e6b633e6ae013da0d1efd | 88b6ca7bb64b0bbbbc66d85d2fa9e975b1bd3081 | refs/heads/master | 2021-01-14T11:11:26.616532 | 2016-10-07T20:47:39 | 2016-10-07T20:55:13 | 36,600,721 | 0 | 0 | null | 2015-05-31T11:51:14 | 2015-05-31T11:51:14 | null | UTF-8 | Python | false | false | 11,607 | py | import ast
import logging
from math import ceil
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.shortcuts import HttpResponseRedirect, render
from django.utils.safestring import mark_safe
from django.core.mail.message import EmailMultiAlternatives
from booking.models import Event, Booking
from booking.email_helpers import send_support_email
from studioadmin.forms import EmailUsersForm, ChooseUsersFormSet, \
UserFilterForm
from studioadmin.views.helpers import staff_required, url_with_querystring
from activitylog.models import ActivityLog
logger = logging.getLogger(__name__)
@login_required
@staff_required
def choose_users_to_email(request,
template_name='studioadmin/choose_users_form.html'):
userfilterform = UserFilterForm(prefix='filter')
if 'filter' in request.POST:
event_ids = request.POST.getlist('filter-events')
lesson_ids = request.POST.getlist('filter-lessons')
if event_ids == ['']:
if request.session.get('events'):
del request.session['events']
event_ids = []
elif '' in event_ids:
event_ids.remove('')
else:
request.session['events'] = event_ids
if lesson_ids == ['']:
if request.session.get('lessons'):
del request.session['lessons']
lesson_ids = []
elif '' in lesson_ids:
lesson_ids.remove('')
else:
request.session['lessons'] = lesson_ids
if not event_ids and not lesson_ids:
usersformset = ChooseUsersFormSet(
queryset=User.objects.all().order_by('first_name', 'last_name')
)
else:
event_and_lesson_ids = event_ids + lesson_ids
bookings = Booking.objects.filter(event__id__in=event_and_lesson_ids)
user_ids = set([booking.user.id for booking in bookings
if booking.status == 'OPEN'])
usersformset = ChooseUsersFormSet(
queryset=User.objects.filter(id__in=user_ids)
.order_by('first_name', 'last_name')
)
userfilterform = UserFilterForm(
prefix='filter',
initial={'events': event_ids, 'lessons': lesson_ids}
)
elif request.method == 'POST':
userfilterform = UserFilterForm(prefix='filter', data=request.POST)
usersformset = ChooseUsersFormSet(request.POST)
if usersformset.is_valid():
event_ids = request.session.get('events', [])
lesson_ids = request.session.get('lessons', [])
users_to_email = []
for form in usersformset:
# check checkbox value to determine if that user is to be
# emailed; add user_id to list
if form.is_valid():
if form.cleaned_data.get('email_user'):
users_to_email.append(form.instance.id)
request.session['users_to_email'] = users_to_email
return HttpResponseRedirect(url_with_querystring(
reverse('studioadmin:email_users_view'), events=event_ids, lessons=lesson_ids)
)
else:
# for a new GET, remove any event/lesson session data
if request.session.get('events'):
del request.session['events']
if request.session.get('lessons'):
del request.session['lessons']
usersformset = ChooseUsersFormSet(
queryset=User.objects.all().order_by('first_name', 'last_name'),
)
return TemplateResponse(
request, template_name, {
'usersformset': usersformset,
'userfilterform': userfilterform,
'sidenav_selection': 'email_users',
}
)
@login_required
@staff_required
def email_users_view(request, mailing_list=False,
template_name='studioadmin/email_users_form.html'):
if mailing_list:
subscribed, _ = Group.objects.get_or_create(name='subscribed')
users_to_email = subscribed.user_set.all()
else:
users_to_email = User.objects.filter(
id__in=request.session['users_to_email']
)
if request.method == 'POST':
form = EmailUsersForm(request.POST)
test_email = request.POST.get('send_test', False)
if form.is_valid():
subject = '{}{}'.format(
form.cleaned_data['subject'],
' [TEST EMAIL]' if test_email else ''
)
from_address = form.cleaned_data['from_address']
message = form.cleaned_data['message']
cc = form.cleaned_data['cc']
# bcc recipients
email_addresses = [user.email for user in users_to_email]
email_count = len(email_addresses)
number_of_emails = ceil(email_count / 99)
if test_email:
email_lists = [[from_address]]
else:
email_lists = [email_addresses] # will be a list of lists
# split into multiple emails of 99 bcc plus 1 cc
if email_count > 99:
email_lists = [
email_addresses[i : i + 99]
for i in range(0, email_count, 99)
]
host = 'http://{}'.format(request.META.get('HTTP_HOST'))
try:
for i, email_list in enumerate(email_lists):
ctx = {
'subject': subject,
'message': message,
'number_of_emails': number_of_emails,
'email_count': email_count,
'is_test': test_email,
'mailing_list': mailing_list,
'host': host,
}
msg = EmailMultiAlternatives(
subject,
get_template(
'studioadmin/email/email_users.txt').render(
ctx
),
bcc=email_list,
cc=[from_address]
if (i == 0 and cc and not test_email) else [],
reply_to=[from_address]
)
msg.attach_alternative(
get_template(
'studioadmin/email/email_users.html').render(
ctx
),
"text/html"
)
msg.send(fail_silently=False)
if not test_email:
ActivityLog.objects.create(
log='{} email with subject "{}" sent to users {} by'
' admin user {}'.format(
'Mailing list' if mailing_list else 'Bulk',
subject, ', '.join(email_list),
request.user.username
)
)
except Exception as e:
# send mail to tech support with Exception
send_support_email(
e, __name__, "Bulk Email to students"
)
ActivityLog.objects.create(
log="Possible error with sending {} email; "
"notification sent to tech support".format(
'mailing list' if mailing_list else 'bulk'
)
)
if not test_email:
ActivityLog.objects.create(
log='{} email error '
'(email subject "{}"), sent by '
'by admin user {}'.format(
'Mailing list' if mailing_list else 'Bulk',
subject, request.user.username
)
)
if not test_email:
messages.success(
request,
'{} email with subject "{}" has been sent to '
'users'.format(
'Mailing list' if mailing_list else 'Bulk',
subject
)
)
return HttpResponseRedirect(reverse('studioadmin:users'))
else:
messages.success(
request, 'Test email has been sent to {} only. Click '
'"Send Email" below to send this email to '
'users.'.format(
from_address
)
)
# Do this if form not valid OR sending test email
event_ids = request.session.get('events', [])
lesson_ids = request.session.get('lessons', [])
events = Event.objects.filter(id__in=event_ids)
lessons = Event.objects.filter(id__in=lesson_ids)
if form.errors:
totaleventids = event_ids + lesson_ids
totalevents = Event.objects.filter(id__in=totaleventids)
messages.error(
request,
mark_safe(
"Please correct errors in form: {}".format(form.errors)
)
)
form = EmailUsersForm(
initial={
'subject': "; ".join(
(str(event) for event in totalevents)
)
}
)
if test_email:
form = EmailUsersForm(request.POST)
else:
event_ids = ast.literal_eval(request.GET.get('events', '[]'))
events = Event.objects.filter(id__in=event_ids)
lesson_ids = ast.literal_eval(request.GET.get('lessons', '[]'))
lessons = Event.objects.filter(id__in=lesson_ids)
totaleventids = event_ids + lesson_ids
totalevents = Event.objects.filter(id__in=totaleventids)
form = EmailUsersForm(
initial={
'subject': "; ".join((str(event) for event in totalevents))
}
)
return TemplateResponse(
request, template_name, {
'form': form,
'users_to_email': users_to_email,
'sidenav_selection': 'mailing_list'
if mailing_list else 'email_users',
'events': events,
'lessons': lessons,
'mailing_list': mailing_list
}
)
| [
"rebkwok@gmail.com"
] | rebkwok@gmail.com |
82129cfc274273c3eef0e57fffe16503c8fb6a19 | ced9931dbb22a52e67dc381a09318692292d96c4 | /webevent_calendar_ripper.py | 8a261e08ba5a60f01a863b01208fbecc419b888c | [
"MIT"
] | permissive | dpgettings/webevent_calendar_ripper | e4095acedcecf8749db0001b03bffaeb5257ac83 | a74db62749642b47098591517c0d9e27ca0a00ee | refs/heads/master | 2021-01-22T05:19:50.291877 | 2014-01-06T04:18:46 | 2014-01-06T04:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,553 | py | """
To Do:
1. Option to specify filename for output .ics file
2.
"""
import time
import urllib2
from bs4 import BeautifulSoup as BS
import re
from collections import OrderedDict
# ##############################
# Utils
# ##############################
# URL of WebEvent CGI interface
cgi_url = 'http://calendar.ufl.edu/cgi-bin/webevent/webevent.cgi'
# Current UTC Time
time_struct = time.gmtime()
utc_mmdd = '{0:02d}{1:02d}'.format(time_struct.tm_mon, time_struct.tm_mday)
utc_hhmmss = '{0:02d}{1:02d}{2:02d}'.format(time_struct.tm_hour, time_struct.tm_min, time_struct.tm_sec)
current_year = str(time_struct.tm_year)
# time in ical string
ical_time_string = current_year + utc_mmdd +'T'+ utc_hhmmss +'Z'
# Timezone (standard and DST)
std_tz_hour = int(time.timezone / 3600.)
dst_tz_hour = int(time.altzone / 3600.)
# ##############################################
# UTILS -- Convert vcal event to ical event
# ##############################################
# SubStrings to Kill from each .vcs file string
bad_vcal_substring_list = [u'BEGIN: VCALENDAR\nVERSION: 1.0\n', u'\nEND: VCALENDAR\n',
u'BEGIN: VEVENT\n', u'END: VEVENT']
# ical format required keys and defaults
ical_defaults = OrderedDict()
ical_defaults['BEGIN'] = 'VEVENT'
ical_defaults['DTSTART'] = ''
ical_defaults['DTEND'] = ''
ical_defaults['DTSTAMP'] = ical_time_string
ical_defaults['UID'] = ''
ical_defaults['CREATED'] = ical_time_string
ical_defaults['DESCRIPTION'] = ''
ical_defaults['LAST-MODIFIED'] = ical_time_string
ical_defaults['LOCATION'] = ''
ical_defaults['SEQUENCE'] = '0'
ical_defaults['STATUS'] = 'CONFIRMED'
ical_defaults['SUMMARY'] = ''
ical_defaults['TRANSP'] = 'OPAQUE'
ical_defaults['END'] = 'VEVENT'
#
def convert_vcal_to_ical(vcal_string):
"""
String input is Unicode string
"""
ical_dict = {}
ical_string_list = []
# ---------------------
# Initial Cleaning
# ---------------------
# Remove Bad vcal substrings
for bad_substring in bad_vcal_substring_list:
vcal_string = vcal_string.replace(bad_substring, '')
# 'CLASS:' lines
class_line_list = re.findall('(CLASS\:.*\n)', vcal_string, re.U)
for class_line in class_line_list:
vcal_string = vcal_string.replace(class_line, '')
# -----------------------------
# Convert vcal string to dict
# -----------------------------
vcal_data_dict = {}
# Step 1: Parse out Keys
vcal_keys_raw = re.findall('([A-Z\-]*: )', vcal_string, re.U)
# Step 2: Split vcal String into Keys and Data
# --------------------------------------
vcal_data_list = []
# Make First Split
vcal_string = vcal_string.split(vcal_keys_raw[0])[-1]
#
for raw_key_ind,raw_key in enumerate(vcal_keys_raw[1:]):
# Split vcal string based on the raw key substring
vcal_string_split = vcal_string.split(raw_key, 1)
# Add Extracted Data to List
vcal_data_list.append(vcal_string_split[0])
# Keep Residual for next splitting
vcal_string = vcal_string_split[-1]
# Add the last residual string (the last piece of extracted line data)
vcal_data_list.append(vcal_string)
# Step 3: Process Keys, Add Keys and Data to vcal Data Dictionary
# -------------------------------------------------------
for raw_key,vcal_line_data_raw in zip(vcal_keys_raw, vcal_data_list):
# Process Key
vcal_line_key = raw_key.replace(': ','')
# Process Data
vcal_line_data = vcal_line_data_raw.rsplit('\n', 1)[0]
# Add to Dictionary
vcal_data_dict[vcal_line_key] = vcal_line_data
# ------------------------------
# Fix the Date of All-Day Events
# ------------------------------
if vcal_data_dict['DTSTART'] == vcal_data_dict['DTEND']:
# *** UGLY HACK ***
# Check if event time consistent with midnight
# (if so, change to all-day event)
# (if not, leave time as-is -- will be added later)
# *****************
event_hour_str = vcal_data_dict['DTSTART'].split('T')[-1][0:2]
event_hour_int = int(event_hour_str)
if event_hour_int == std_tz_hour or event_hour_int == dst_tz_hour:
# Event Time is Consistent with Midnight
# (Make event an All-Day Event)
# -----------------------------
# Parse out start date
start_date_str = vcal_data_dict['DTSTART'].split('T')[0]
start_date_int = int(start_date_str)
# Increment to get end date
end_date_int = start_date_int + 1
# Make Final DTSTART and DTEND entries
ical_dict['DTSTART'] = ';VALUE=DATE:'+ start_date_str
ical_dict['DTEND'] = ';VALUE=DATE:'+ str(end_date_int)
# ------------------------------
# Build ical-Format String
# ------------------------------
for ical_key in ical_defaults:
# Part 1: Get data values from ical and vcal dictionaries
# -----------------------------------------------------
# Check if key has already been added to ical_dict
# (for handling special cases like the start/end dates)
if ical_key not in ical_dict:
# Check for Value in vcal Dict
if ical_key not in vcal_data_dict:
# No vcal Value for this key -- Add default from ical_default
ical_dict[ical_key] = ':'+ ical_defaults[ical_key]
else:
# There is a vcal value for this key -- Check length of string
if len(vcal_data_dict[ical_key]) == 0:
# If vcal data string is empty -- use default
ical_dict[ical_key] = ':'+ ical_defaults[ical_key]
else:
# If vcal string is non-empty -- use vcal string
ical_dict[ical_key] = ':'+ vcal_data_dict[ical_key]
# Part 2: Make Dictionary Entries into Strings, Append to list
# -----------------------------------------------------------
ical_string_list.append(ical_key + ical_dict[ical_key])
# Part 3: Join List with newlines, return
# ---------------------------------------
ical_data_string = '\n'.join(ical_string_list)
return ical_data_string
# ##############################
# Downloading Calendar
# ##############################
def download_calendar(year=current_year, cal_type='academic', debug=False):
#def download_calendar(**kwargs):
"""
Deals with details of calendar CGI interface
kwargs -- cal_type, year
"""
# Error Checking
# ---------------
# The year must be a numeric type convertable to integer
try: year = int(year)
except: raise TypeError('Year must be a number')
# Calendar Types are restricted
assert 'academic' in cal_type or 'athletic' in cal_type
# Construct URL
# --------------
cal_dict = {'academic':'cal3', 'athletic':'cal4'}
cal_url = '{0:s}?cmd=listyear&cal={1:s}&y={2:d}'.format(cgi_url, cal_dict[cal_type], year)
# Get HTML from cal page
# -------------------------
cal_socket = urllib2.urlopen(cal_url)
cal_page_html = cal_socket.read()
return cal_page_html
# ##############################
# Parsing HTML
# ##############################
def parse_calendar(cal_page_html=None):
"""
Deals with details of internal calendar-page HTML formatting
Returns list of calendar event IDs
"""
# Make sure we actually got something
assert cal_page_html is not None
# -----------------------------------
# Parse Out Event IDs
# -----------------------------------
# List for eventIDs parsed from calendar page html
event_id_list = []
# Parse with BeautifulSoup
cal_page_soup = BS(cal_page_html)
# list of tags with listeventtitle class -- eventIDs are embedded in some of these
eventtitle_tag_list = cal_page_soup.find_all('div', class_='listeventtitle')
# Loop Through listeventtitle tags
for eventtitle_ind,eventtitle_tag in enumerate(eventtitle_tag_list):
# This gets the eventtitle <a> tag which has the eventID embedded in the HREF
event_link_tag = eventtitle_tag.find('a')
# Skip over dummy eventtitle tags
if event_link_tag is None:
continue
# Get the href string
event_link_string = event_link_tag['href']
# Parse out the eventID
raw_event_id_string = re.findall('(&id=\d{6}&)', event_link_string)[0]
event_id_string = raw_event_id_string.replace('&','').split('=')[-1]
# Add the eventID to the list
event_id_list.append(event_id_string)
return event_id_list
# ##############################
# Downloading Event .vcs Data
# ##############################
def download_event_data(**kwargs):
"""
Deals with details of data export from the WebEvent API
"""
# -----------------
# Get List of EventIDs on Desired Calendar
# -----------------
# Download Calendar HTML
cal_page_html = download_calendar(**kwargs)
# Parse Calendar HTML for list of eventIDs
event_id_list = parse_calendar(cal_page_html=cal_page_html)
# -----------------
# Get Event Data
# -----------------
event_data_list = []
# This is how to get event data from the WebEvent API using eventIDs
base_url = '{0:s}?cmd=e2vcal'.format(cgi_url)
# Loop over eventIDs
for event_ind,event_id_string in enumerate(event_id_list):
# # **************************************
# if event_ind>2: break
# # **************************************
# Construct URL of vcs data
event_data_url = '{0:s}&id={1:s}'.format(base_url, event_id_string)
# Download Event Data
event_data_socket = urllib2.urlopen(event_data_url)
event_data_ascii = event_data_socket.read()
# Decode into Unicode string
#event_data = event_data_ascii.decode('utf-8')
event_data = event_data_ascii.decode('latin-1')
# Append to list of event data strings
event_data_list.append(event_data)
return event_data_list
# ##############################
# Making ical file
# ##############################
def make_ical(**kwargs):
# ----------------------
# Get List of Event Data
# ----------------------
# List of Unicode Strings
event_data_list = download_event_data(**kwargs)
# =====================================
# Construct valid(-ish) iCal File
# =====================================
cleaned_event_data_list = []
# ----------------------------
# Convert Event Data Entries
# ----------------------------
# Loop through event data entries
for event_data_string in event_data_list:
# Send to conversion function
# (Unicode in, Unicode out)
ical_data_string = convert_vcal_to_ical(event_data_string)
# Append to List of cleaned event entries
# (Still Unicode)
cleaned_event_data_list.append(ical_data_string)
# ----------------------------
# Add Header and Footer
# ----------------------------
# Header
ical_header_list = ['BEGIN:VCALENDAR', 'VERSION:2.0', 'CALSCALE:GREGORIAN', 'METHOD:PUBLISH',
'X-WR-CALNAME:Group Meetings', 'X-WR-TIMEZONE:America/New_York', 'X-WR-CALDESC:',
'BEGIN:VTIMEZONE', 'TZID:America/New_York', 'X-LIC-LOCATION:America/New_York',
'BEGIN:DAYLIGHT', 'TZOFFSETFROM:-0500', 'TZOFFSETTO:-0400', 'TZNAME:EDT',
'DTSTART:19700308T020000', 'RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU', 'END:DAYLIGHT',
'BEGIN:STANDARD', 'TZOFFSETFROM:-0400', 'TZOFFSETTO:-0500', 'TZNAME:EST',
'DTSTART:19701101T020000', 'RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU', 'END:STANDARD',
'END:VTIMEZONE']
ical_header = '\n'.join(ical_header_list)
# Footer
ical_footer = 'END:VCALENDAR\n'
# Join Into ical file
# -------------------
ical_file_string = ''
ical_file_string += ical_header
ical_file_string += '\n'
ical_file_string += '\n'.join(cleaned_event_data_list)
ical_file_string += '\n'
ical_file_string += ical_footer
# ------------
# Return
# ------------
return ical_file_string
# ##############################
# Command-Line Invocation
# ##############################
if __name__ == '__main__':
import argparse
# Command-Line Argument Parsing
parser = argparse.ArgumentParser(description='Forcibly exporting the University of Florida WebEvent calendar to an iCalendar format file.')
parser.add_argument('--cal', default='academic', type=str,
help="Which calendar to rip. Must be either 'academic' or 'athletic'.",
choices=['academic','athletic'], dest='cal_type')
parser.add_argument('--year', default=2014, type=int,
help="Calendar year to rip. Must be convertable to int-type.",
dest='year')
args = parser.parse_args()
# Call cal-ripper
ical_file_string = make_ical(year=args.year, cal_type=args.cal_type)
# Write to File
# ----------------------------
output_filename = '{0:s}_{1:s}.ics'.format(args.cal_type, str(args.year))
# Write
with open(output_filename, 'w') as f:
f.write(ical_file_string.encode('utf-8'))
print 'Wrote: '+ output_filename
| [
"daniel.p.gettings@gmail.com"
] | daniel.p.gettings@gmail.com |
5f5183b00d36f0f2487f1167dba4c6665c2b0648 | 58f2ae3c3034a9fc5218a4a9d15e671d24d6f5d8 | /urlybird/breveurl/views.py | 35123906cc207f17d29a2111d33453059b00b2e0 | [] | no_license | sovello/urly-bird | c92101780a9ef495382884202b8fbc4025000624 | e08581bed189854decff35af6670acff1ec9de51 | refs/heads/master | 2021-01-19T19:05:35.226208 | 2015-06-25T14:42:01 | 2015-06-25T14:42:01 | 37,679,859 | 0 | 1 | null | 2015-06-18T19:21:55 | 2015-06-18T19:21:55 | null | UTF-8 | Python | false | false | 4,850 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import View, DetailView, UpdateView, ListView, CreateView
from django.views.generic.base import RedirectView
from django.views.generic.detail import SingleObjectMixin
from django.utils.decorators import method_decorator
from hashids import *
import json
from .models import Bookmark
from click.models import Click
# Create your views here.
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class IndexView(ListView):
header = "Hola Muchacho"
template_name = 'breveurl/index.html'
model = Bookmark
paginate_by = 10
context_object_name = 'bookmark_list'
class UserView(LoginRequiredMixin, ListView):
header = "BreveURL - Home"
model = User
template_name = 'breveurl/home.html'
paginate_by = 10
context_object_name = 'bookmark_list'
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(User, pk = self.request.user.id)
return super(UserView,self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(UserView, self).get_context_data(**kwargs)
context['current_user'] = self.object
return context
def get_queryset(self):
return self.object.bookmarks.all();
class BookMarkMixin(object):
fields = ('url', 'description', 'tags')
@property # set the message for each action
def success_msg(self):
return NotImplemented
def form_valid(self, form):
messages.info(self.request, self.success_msg)
if not self.request.user.is_authenticated():
form.instance.user = User.objects.get(username='breveurl')
else:
form.instance.user = User.objects.get(id=self.request.user.id)
form.instance.breveurl = shortenURL()
return super(BookMarkMixin, self).form_valid(form)
class BookmarkCreateView(BookMarkMixin, CreateView):
model = Bookmark
success_msg = 'Bookmark added successfully'
template_name = 'breveurl/create_bookmark.html'
success_url = '/breveurl/home/'
class BookMarkCreateAnonymous(BookMarkMixin, CreateView):
def form_valid(self, form):
form.instance.user = AnonymousUser.objects.get()
form.instance.breveurl = shortenURL()
return super(BookMarkCreateAnonymous, self).form_valid(form)
class BookmarkUpdateView(LoginRequiredMixin, BookMarkMixin, UpdateView):
model = Bookmark
success_msg = "Bookmark updated successfully"
template_name = 'breveurl/update_bookmark.html'
success_url = '/breveurl/home/'
def shortenURL(anonymous = True):
from hashids import Hashids
import random
hashids = Hashids(salt="Kilimokwanza", min_length=4)
if len(Bookmark.objects.all()) == 0:
lastentry = Bookmark()
lastentry.id = 0
else:
lastentry = Bookmark.objects.latest('id')
return hashids.encrypt(lastentry.id+1)
def delete_bookmark(request):
if request.method == 'POST':
bookmark_id = request.POST.get('the_bookmark') # this was set in the jQuery
response_data = {} # preparing the response data
bookmark = Bookmark(id=bookmark_id, user=request.user)
bookmark.delete()
response_data['messages'] = 'Bookmark delete successfully!'
response_data['delete_node'] = bookmark_id
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
)
else:
return HttpResponse(
json.dumps({"That was another post"}),
content_type="application/json"
)
class BreveURLRedirectView():
permanent = False
query_string = True
def takemethere(request, urlid):
from datetime import datetime
if request.method=="GET":
print("We got {}".format(urlid))
tinyurl = Bookmark.objects.get(breveurl = urlid)
click = Click()
click.bookmark = tinyurl
click.ip_address = request.META['REMOTE_ADDR']
click.accessed_at = datetime.now()
if request.user.is_anonymous():
click.user = User.objects.get(username = 'breveurl')
else:
click.user = request.user
click.save()
return redirect(tinyurl.url, permanent=False)
| [
"sovellohpmgani@gmail.com"
] | sovellohpmgani@gmail.com |
d0452e6ba4cd264c160db8efac12a5b7888c33f0 | febc7b300d502c0dccd2fe0b0a7fdd707af2a450 | /portfolio-project/jobs/migrations/0001_initial.py | eede75bbbc9407ced16dd0b46ae5445fa6ef73c9 | [] | no_license | OJVELEZ/portfolio | 7d0b5a429524299807e128712b5cc61cf91c2248 | 6467b6fb5c6ebf19ed6ac149bd40f29d11a7b65d | refs/heads/main | 2022-12-25T19:15:59.997981 | 2020-10-07T19:22:47 | 2020-10-07T19:22:47 | 302,137,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | # Generated by Django 3.0.3 on 2020-10-07 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| [
"ojvelez@gmail.com"
] | ojvelez@gmail.com |
b747686cd2550844b2734decf0654e9fecd0328d | 6c551955fc9ed4e42e575e972c303d5ea22971b7 | /ebook_converter_bot/__init__.py | ececf1d475bd63bb5b638a5dd8cfe37ea94a4194 | [
"MIT"
] | permissive | budikesuma/ebook-converter-bot | df04cf1d25fd18f0c7d406448b5207c49e9fbfa1 | a6496bf4cbcf7b446b4898678c33db1debee3554 | refs/heads/master | 2023-08-23T11:32:37.717575 | 2021-11-05T18:15:31 | 2021-11-05T18:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | """ Bot initialization"""
import json
import logging
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
from sys import stdout, stderr
WORK_DIR = Path(__package__).absolute()
PARENT_DIR = WORK_DIR.parent
# read bot config
with open(f'{PARENT_DIR}/config.json', 'r') as f:
CONFIG = json.load(f)
API_KEY = CONFIG['api_key']
API_HASH = CONFIG['api_hash']
BOT_TOKEN = CONFIG['tg_bot_token']
BOT_ID = CONFIG['tg_bot_id']
TG_BOT_ADMINS = CONFIG['tg_bot_admins']
# locale
LOCALE_PATH = WORK_DIR / "data/locales"
LANGUAGES = ['ar', 'en', 'tr']
_ = json.loads(Path(WORK_DIR / "data/locales/locales.json").read_text(encoding="utf-8-sig"))
LOCALES = [_[i] for i in LANGUAGES]
for code, locale in zip(LANGUAGES, LOCALES):
locale["code"] = code
# Logging
LOG_FILE = PARENT_DIR / 'last_run.log'
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(name)s [%(module)s.%(funcName)s:%(lineno)d]: %(message)s"
FORMATTER: logging.Formatter = logging.Formatter(LOG_FORMAT)
handler = TimedRotatingFileHandler(LOG_FILE, when="d", interval=1, backupCount=3)
logging.basicConfig(filename=str(LOG_FILE), filemode='w', format=LOG_FORMAT)
OUT = logging.StreamHandler(stdout)
ERR = logging.StreamHandler(stderr)
OUT.setFormatter(FORMATTER)
ERR.setFormatter(FORMATTER)
OUT.setLevel(logging.INFO)
ERR.setLevel(logging.WARNING)
LOGGER = logging.getLogger()
LOGGER.addHandler(OUT)
LOGGER.addHandler(ERR)
LOGGER.addHandler(handler)
LOGGER.setLevel(logging.INFO)
| [
"ysh-alsager@hotmail.com"
] | ysh-alsager@hotmail.com |
15a59428a27529aafc46c577811104b43b63a731 | 460027c62df6a6939c342d2d2f49a727c8fc955c | /src/nuxeo/jcr/interfaces.py | 0cd0ba5c447f9981fb9a2c9e36f5c777740674bf | [] | no_license | nuxeo-cps/zope3--nuxeo.jcr | ef6d52272835fa14375308bf5a51dbee68b2252a | 88e83d30232226ad71b6f24a2c00e5ad9ba5e603 | refs/heads/main | 2023-01-23T19:56:27.515465 | 2006-10-20T16:54:01 | 2006-10-20T16:54:01 | 317,994,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | ##############################################################################
#
# Copyright (c) 2006 Nuxeo and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# Author: Florent Guillaume <fg@nuxeo.com>
# $Id$
"""Capsule JCR interfaces.
"""
from zope.interface import Interface
from ZODB.POSException import ConflictError # for reimport
class ProtocolError(ValueError):
pass
class IJCRController(Interface):
"""Commands between Zope and the JCR bridge.
All commands are synchronous.
The commands may also return JCR events, if some have been sent.
They are accumulated and can be read by ``getPendingEvents()``.
"""
def connect():
"""Connect the controller to the server.
"""
def login(workspaceName):
"""Login to a given workspace.
This is the first command sent. It creates a session on the
JCR side and puts it into a transaction.
Returns the root node UUID.
"""
def prepare():
"""Prepare the current transaction for commit.
May raise a ConflictError.
"""
def commit():
"""Commit the prepared transaction, start a new one.
"""
def abort():
"""Abort the current transaction, start a new one.
"""
def checkpoint(uuid):
"""Checkpoint: checkin and checkout
"""
def restore(uuid, versionName=''):
"""Restore a node.
Return list of uuids to deactivate.
"""
def getNodeTypeDefs():
"""Get the schemas of the node type definitions.
Returns a string containing a set of CND declarations.
System types may be omitted.
"""
def getNodeType(uuid):
"""Get the type of a node.
"""
def getNodeStates(uuids):
"""Get the state of several nodes.
Additional node states may be returned, to improve network
transfers.
Returns a mapping of UUID to a tuple (`name`, `parent_uuid`,
`children`, `properties`, `deferred`).
- `name` is the name of the node,
- `parent_uuid` is the UUID of the node's parent, or None if
it's the root,
- `children` is a sequence of tuples representing children
nodes, usually (`name`, `uuid`, `type`), but for a child with
same-name siblings, (`name`, [`uuid`s], `type`),
- `properties` is a sequence of (`name`, `value`),
- `deferred` is a sequence of `name` of the remaining deferred
properties.
An error is returned if there's no such UUID.
"""
def getNodeProperties(uuid, names):
"""Get the value of selected properties.
Returns a mapping of property name to value.
An error is returned if the UUID doesn't exist or if one of the
names doesn't exist as a property.
"""
def sendCommands(commands):
"""Send a sequence of modification commands to the JCR.
`commands` is an iterable returning tuples of the form:
- 'add', parent_uuid, name, node_type, props_mapping, token
- 'modify', uuid, props_mapping
- 'remove', uuid
- 'order' XXX
A JCR save() is done after the commands have been sent.
Returns a mapping of token -> uuid, which gives the new UUIDs
for created nodes.
"""
def getPendingEvents():
"""Get pending events.
The pending events are sent asynchronously by the server and
accumulated until read by this method.
"""
def getPath(uuid):
"""Get the path of a given UUID.
Returns the path or None.
The path is relative to the JCR workspace root.
"""
def searchProperty(prop_name, value):
"""Search the JCR for nodes where prop_name = 'value'.
Returns a sequence of (uuid, path).
The paths are relative to the JCR workspace root.
"""
def move(uuid, dest_uuid, name):
"""Move the document to another container.
"""
def copy(uuid, dest_uuid, name):
"""Copy the document to another container.
"""
| [
"devnull@localhost"
] | devnull@localhost |
5a4e0307e782f952605dbf2d809b6b997e45ba16 | b92d45aac3edec2783cfd2a99fab597761f33199 | /axeshop/urls.py | a22f5d197a7bc47f8c5ad1dd3c6b4707188767b4 | [] | no_license | TonyAJ7/simple_shop | 8ae01c086aa9e44070460701966ac523ff72c5ff | bccc796174581aa852a89d23bf476a27b062363a | refs/heads/main | 2023-08-02T16:04:20.685280 | 2021-10-03T12:02:03 | 2021-10-03T12:02:03 | 413,057,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | """axeshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('cart/', include('cart.urls', namespace='cart')),
path('', include('onlineshop.urls', namespace='onlineshop')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"djabbaroov@gmail.com"
] | djabbaroov@gmail.com |
15db9912c847ddc41fe4a73c01f6bf56cbbb57e7 | a19b4b0c8648c4e943ab6e80d67c43fb5c179e67 | /score_predict_site/venv/Scripts/pip-script.py | aad60e709fd04606ac9507905f064ea125533b05 | [] | no_license | jiayouxujin/cs17_depot | f1d9c5d6da2c419ee1132544ce828917f7c12774 | 91908f5b2c1e16352ed3677e7ff077bba15452de | refs/heads/master | 2020-04-11T13:10:44.254388 | 2019-03-14T13:36:43 | 2019-03-14T13:36:43 | 161,806,403 | 0 | 5 | null | 2019-03-14T13:36:44 | 2018-12-14T15:46:07 | Python | UTF-8 | Python | false | false | 434 | py | #!C:\Users\93531\Documents\GitHub\cs17_depot\score_predict_site\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"1319039722@qq.com"
] | 1319039722@qq.com |
55ab31a1fb8e40f0be7d7c50c80d783ea3e9c73e | b2a1fff667cacc70544438a4ca35f4e009a10455 | /normalizing_flows/models/optimization/__init__.py | 722c81ed1620c3803616520ecc3af150fc2b55b9 | [
"MIT"
] | permissive | NahuelCostaCortez/normalizing-flows | 34cb07915c2f99dfc29bef4964b092559a9a74cf | f365a0f1b2a05662e73965b9255d6127cf44e64c | refs/heads/master | 2023-06-26T23:34:41.616682 | 2021-06-09T11:41:12 | 2021-06-09T11:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | from .schedules import LinearWarmupSchedule | [
"brian.groenke@colorado.edu"
] | brian.groenke@colorado.edu |
8335c2807bbb6b0b6c7021c995b6e04e41dd8a69 | 5ae6f679ecb6fd1d8d5dee221c371f43ddd9d05b | /src/pb/entities/regencia/cristaleria.py | e7ea50d2330f774c418e0bffefc88f359eb3d173 | [] | no_license | estalg/CeleqBackEnd | 02e858399a13a186ff3551e7a30f8f52805ea774 | 5f08bf710c97fe1211a88a15c92522462d083806 | refs/heads/master | 2022-04-08T10:15:43.509301 | 2020-03-04T19:33:43 | 2020-03-04T19:33:43 | 234,360,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from sqlalchemy import Column, String, Integer
from ..entity import Base
from marshmallow import Schema, fields
class Cristaleria(Base):
__tablename__ = 'cristaleria'
nombre = Column(String, primary_key=True)
material = Column(String, primary_key=True)
capacidad = Column(String, primary_key=True)
cantidad = Column(Integer)
caja = Column(String)
def __init__(self, nombre, material, capacidad, cantidad, caja):
self.nombre = nombre
self.material = material
self.capacidad = capacidad
self.cantidad = cantidad
self.caja = caja
class CristaleriaSchema(Schema):
nombre = fields.Str()
material = fields.Str()
capacidad = fields.Str()
cantidad = fields.Int()
caja = fields.Str() | [
"estivenalg@gmail.com"
] | estivenalg@gmail.com |
457cb1c7a96954037fd6b141b28a35ae28da34ea | 4596bb41523caedba4d0aa2b60731aa687bc3bf5 | /entries/models.py | 98b1153855cf6f8c0af9ee5303a6aeb7cd233f00 | [] | no_license | naveenijeri/Blog_App | 5f38c80352306891c7bff3d5048605601a942f7c | 5908253fb8aeebf764ed88b073eb6a94fbd49d01 | refs/heads/master | 2022-12-10T11:28:32.767460 | 2020-03-18T07:41:54 | 2020-03-18T07:41:54 | 248,153,899 | 0 | 0 | null | 2022-12-08T03:49:44 | 2020-03-18T06:19:16 | Python | UTF-8 | Python | false | false | 519 | py | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
# Create your models here.
class Entry(models.Model):
entry_title=models.CharField(max_length=50)
entry_text=models.TextField()
entry_date=models.DateTimeField(auto_now_add=True)
entry_author=models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,on_delete=models.CASCADE)
class Meta:
verbose_name_plural="entries"
def __str__(self):
return self.entry_title
| [
"naveen.ijeri123@gmail.com"
] | naveen.ijeri123@gmail.com |
e8b7a1389f35f4a9c774f36a26469d3e1f246357 | 0301df471aa9d0a957676c56cbf98b384c13fd47 | /menu.py | 422d0cb60988050b86fa33efb68caf5279522edd | [] | no_license | thotran2015/FinalProject | 3798ad5e23d162e371a7132a9937bd5a5eb25677 | 770e8c0738ca37704207d19767a7918a34646178 | refs/heads/main | 2023-04-25T20:28:36.186978 | 2021-05-21T17:02:42 | 2021-05-21T17:02:42 | 364,755,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,341 | py | # import kivy module
import kivy
# base Class of your App inherits from the App class.
# app:always refers to the instance of your application
from kivy.app import App
# this restrict the kivy version i.e
# below this kivy version you cannot
# use the app or software
kivy.require('1.9.0')
# to use this must have to import it
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from animation import MetronomeWidget
from kivy.uix.dropdown import DropDown
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.checkbox import CheckBox
from kivy.clock import Clock
from testing_filechooser import FileSelector
from kivy.core.window import Window
from animation import BG_COLOR, TEXT_COLOR
from kivy.uix.floatlayout import FloatLayout
CONFIGURE_COLOR = (1, 0, 0, 1) # red
class CustomDropDown(Widget):
# Dropdown Variables
def __init__(self, options=['Moving Block', 'Pulsing']):
Widget.__init__(self)
self.vis_dropdown = DropDown()
for vis_opt in options:
btn = Button(text=vis_opt, height=40, size_hint_y=None)
btn.bind(on_release=lambda b: self.vis_dropdown.select(b.text))
self.vis_dropdown.add_widget(btn)
# create a big main button
self.mainbutton = Button(text='Tempo Visual', size_hint=(None, None))
self.mainbutton.bind(on_release=self.vis_dropdown.open)
self.vis_dropdown.bind(on_select=lambda instance, x: setattr(self.mainbutton, 'text', x))
self.add_widget(self.mainbutton)
# create App class
class TabbedPanelApp(App):
def __init__(self):
App.__init__(self)
Window.clearcolor = BG_COLOR
self.metronome = MetronomeWidget(1, False)
self.title = 'Intuitive Metronome (IM)'
self.layout = GridLayout(cols=1, padding=10)
self.config_button = Button(text="Configure", font_size=24, size_hint_x=0.35, bold=True, color=TEXT_COLOR,
background_color=CONFIGURE_COLOR)
self.mode = 'Moving Block'
self.accomp = False
self.selected_file = None
self.viz_feedback = None
self.accomp_feedback = None
self.file_chooser = FileSelector()
Clock.schedule_interval(self.update, 1)
def build(self):
self.vis_dropdown = DropDown()
popup_layout = self.build_popup_layout(self.vis_dropdown)
self.config_popup = Popup(title='Metronome Settings', content=popup_layout,
size_hint=(None, None), size=(700, 500), title_size=24)
# Attach close button press with popup.dismiss action
self.close_button.bind(on_press=self.config_popup.dismiss)
self.build_app_layout()
# Clock.schedule_interval(self.update, 0.2)
return self.layout
def update(self, *args):
self.mode = self.tempo_vis_button.text
self.selected_file = self.file_chooser.selected_file
if self.selected_file == 'No file selected':
self.accomp = False
else:
self.accomp = True
if 'midi' in self.selected_file:
self.metronome.accomp_file = self.selected_file
self.accomp_feedback.text = 'Accompaniment: %s' % self.accomp
def is_pulsing(x):
if x == 'Pulsing':
return True, 'Tempo Vis.: Pulsing'
elif x == 'Moving Block' or x == 'Tempo Visual':
return False, 'Tempo Vis.: Moving Block'
is_pulsed, vis_text = is_pulsing(self.mode)
self.metronome.pulsing = is_pulsed
self.viz_feedback.text = vis_text
def build_app_layout(self):
top_row = GridLayout(cols=3, size_hint_y=0.2)
self.viz_feedback = Label(text='Tempo Vis.: %s' % self.mode, font_size=24, bold=True, color=TEXT_COLOR)
self.accomp_feedback = Label(text='Accompaniment: %s' % self.accomp, font_size=24, bold=True, color=TEXT_COLOR)
# Attach a callback for the button press event
self.config_button.bind(on_press=self.onButtonPress)
top_row.add_widget(self.viz_feedback)
top_row.add_widget(self.accomp_feedback)
top_row.add_widget(self.config_button)
self.layout.add_widget(top_row)
self.layout.add_widget(self.metronome)
def build_popup_layout(self, vis_dropdown):
# Configuration Feature
popup_layout = GridLayout(cols=1, padding=5)
self.close_button = Button(text="Close", size_hint_y=0.2, font_size=24)
config_layout = GridLayout(cols=2)
# Add dropdown
vis_opts = GridLayout(cols=1, padding=(30, 0, 30, 95))
for vis_opt in ['Moving Block', 'Pulsing']:
btn = Button(text=vis_opt, height=40, size_hint_y=None, font_size=24)
btn.bind(on_release=lambda b: vis_dropdown.select(b.text))
vis_dropdown.add_widget(btn)
# create a big main button
self.tempo_vis_button = Button(text='Moving Block', font_size=24, size_hint=(0.7, None))
self.tempo_vis_button.bind(on_release=vis_dropdown.open)
vis_dropdown.bind(on_select=lambda instance, x: setattr(self.tempo_vis_button, 'text', x))
vis_opts.add_widget(Label(text='Select Tempo Visual Below', font_size=24))
vis_opts.add_widget(self.tempo_vis_button)
config_layout.add_widget(vis_opts)
# Add checkbox, Label and Widget
acc_check = GridLayout(cols=1, padding=(20, 15, 20, 95))
selector_label = Label(text='Accompaniment', font_size=25)
file_selector = self.file_chooser.overview_layout
acc_check.add_widget(selector_label)
acc_check.add_widget(file_selector)
config_layout.add_widget(acc_check)
# Add 1st and 2nd item
popup_layout.add_widget(config_layout)
popup_layout.add_widget(self.close_button)
return popup_layout
# Instantiate the modal popup and display
# On button press - Create a popup dialog with a label and a close button
def onButtonPress(self, button):
self.config_popup.open()
# run the App
if __name__ == '__main__':
TabbedPanelApp().run()
| [
"thotran9@mit.edu"
] | thotran9@mit.edu |
1ffbd9a7b4755de5985c1918e01e034631e1fc93 | dea3132777935c321973e2ec0af47aa3cbf1f191 | /11 HMM/hmm_template.py | fe547aa79d56d526617c10e6e953ad5eda2dc09f | [] | no_license | SBangslund/SE04_AI | c14a11b1db0bbf8fd642b289d6ecdd6256dbb48f | 7a2f5ac41e7b25b4b10a4033d2c940a79d1fd0ff | refs/heads/master | 2022-05-09T06:10:07.110424 | 2020-04-30T14:39:12 | 2020-04-30T14:39:12 | 247,294,833 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | import numpy as np
"""
Hidden Markov Model using Viterbi algorithm to find most
likely sequence of hidden states.
The problem is to find out the most likely sequence of states
of the weather (hot, cold) from a describtion of the number
of ice cream eaten by a boy in the summer.
"""
def main():
np.set_printoptions(suppress=True)
states = np.array(["initial", "hot", "cold", "final"])
# To simulate starting from index 1, we add a dummy value at index 0
observationss = [
[None, 3, 1, 3],
[None, 3, 3, 1, 1, 2, 2, 3, 1, 3],
[None, 3, 3, 1, 1, 2, 3, 3, 1, 2],
]
# Markov transition matrix
# transitions[start, end]
transitions = np.array([[.0, .8, .2, .0], # Initial state
[.0, .6, .3, .1], # Hot state
[.0, .4, .5, .1], # Cold state
[.0, .0, .0, .0], # Final state
])
# P(v|q)
# emission[state, observation]
emissions = np.array([[.0, .0, .0, .0], # Initial state
[.0, .2, .4, .4], # Hot state
[.0, .5, .4, .1], # Cold state
[.0, .0, .0, .0], # Final state
])
for observations in observationss:
print("Observations: {}".format(' '.join(map(str, observations[1:]))))
probability = compute_forward(states, observations, transitions, emissions)
print("Probability: {}".format(probability))
path = compute_viterbi(states, observations, transitions, emissions)
print("Path: {}".format(' '.join(path)))
print('')
def inclusive_range(a, b):
return range(a, b + 1)
def compute_forward(states, observations, transitions, emissions):
pass
def compute_viterbi(states, observations, transitions, emissions):
return []
def argmax(sequence):
# Note: You could use np.argmax(sequence), but only if sequence is a list.
# If it is a generator, first convert it: np.argmax(list(sequence))
return max(enumerate(sequence), key=lambda x: x[1])[0]
if __name__ == '__main__':
main()
| [
"s.bangslund@hotmail.dk"
] | s.bangslund@hotmail.dk |
3aaf3331c8160b13805128f0a48758614d163f12 | e5f7d7706062b7807daafaf5b670d9f273440286 | /stocks/admin.py | 3da94a24279993788e7694d3af8b4fe75814404d | [] | no_license | fchampalimaud/flydb | bd01839c163aa34277091f454f8ad38e3fd45dc4 | 2d3ad9ff5903a26070258f707228334cd765a647 | refs/heads/master | 2021-06-17T15:38:25.517946 | 2018-01-17T16:16:00 | 2018-01-17T16:16:00 | 185,334,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from pathlib import Path
from django.contrib import admin
from django.apps import apps
app = apps.get_app_config(Path(__file__).parent.name)
for model in app.get_models():
admin.site.register(model)
| [
"hugo.cachitas@research.fchampalimaud.org"
] | hugo.cachitas@research.fchampalimaud.org |
74f037f36854ed429ba78246687bfa075c1ec9b2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/689.py | ade9216b11543d12d4e0795d77c30c952d9e8947 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | from itertools import \
product, \
permutations, \
combinations, \
combinations_with_replacement
from functools import reduce, lru_cache
from math import floor,ceil,inf,sqrt
def intercept_time(i,j):
if i[1] == j[1]:
return inf
else:
return (j[0]-i[0])/(i[1]-j[1])
def intercept_distance(i,j):
if intercept_time(i,j) < 0:
return inf
else:
return intercept_time(i,j)*i[1] + i[0]
def solve(D, horses):
horses.sort()
while len(horses) > 1:
if intercept_distance(horses[-2], horses[-1]) < D:
del horses[-2]
else:
del horses[-1]
return D / intercept_time(horses[0], (D,0))
if __name__ == '__main__':
import sys,re
data = iter(sys.stdin.read().splitlines())
T = int(next(data))
for (case_num, case) in enumerate(data):
D,N = map(int, case.split())
horses = []
for _ in range(N):
horses.append(tuple(map(int, next(data).split())))
print('Case #{}: {}'.format(case_num+1, solve(D, horses)))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
eae9990fc7d211b95d53ce21001e26a1141bf5a6 | be566f2c2370295895fdcdcffa3de1f7ae0dfc5c | /sugarcrm/sugarerror.py | e610a1e61e02e7fb7598fe98d923044881111ad6 | [] | no_license | sboily/xivo-sugarcrm | f7836ec20520caf529ccf7f13a2f65580ed77d2b | ee9d682dea366629fe85d127f3490c03ffe672cb | refs/heads/master | 2016-09-05T18:02:11.813369 | 2014-02-13T00:58:04 | 2014-02-13T00:58:04 | 15,716,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #
# sugarerror.py
#
# Exceptions to be raised when various problems occur while running sugarcrm software
#
class SugarError(Exception):
def __init__(self, data):
self.name = data['name']
self.description = data['description']
self.number = data['number']
def is_invalid_session(self):
return self.number == 11
def is_invalid_login(self):
return self.number == 10
class SugarUnhandledException(Exception):
pass
def is_error(data):
try:
return data["name"] != None and data["description"] != None
except KeyError:
return False
| [
"sboily@proformatique.com"
] | sboily@proformatique.com |
62ef9dfed9b46fa60576c418807f15dfdaffd9b1 | 15c60e42bcda3d4b4b4c4faf62344a29e416e2fe | /cmd.py | 753a64b35afd675f59126c7246a2b58146adafd4 | [] | no_license | luobingfirst/everest | a1cb936e4ee551391c23a52d4f48c4c968b16f58 | 32016a960f50b533a8e385a2992ef4dbace8429d | refs/heads/master | 2020-06-03T16:28:30.961905 | 2019-07-14T22:49:38 | 2019-07-14T22:49:38 | 191,649,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,580 | py | import yaml
import time
from kubernetes import client, config
class cmd:
def __init__(self):
config.load_kube_config()
self.coreApi = client.CoreV1Api()
self.appsApi = client.AppsV1Api()
self.customObjectsApi = client.CustomObjectsApi()
def getPercentFromWeight(self, selector, weight, namespace="default", adjusts={}):
"""
Get percent according to weight policy
:param selector: The selecotr for all pods.
:type: String
:param weight: The target weight, with subset(category) name as keys and related weight as value, e.g.{"idle":30,"normal":50, "busy":20}. Be sure that it cover all category but the sum is not necessary to be 100.
:type: Dict
:param namespace: The namespace of pods.
:type: String
:para adjusts: Adjust the number of pods in specified categories, with subset(category) name as keys and number of pods(could be positive or negative) as value.
:type: Dict
:return: percent and ditribution
:rtype: Dict
"""
#get number of pods for each category, stored in distribution
if (len(selector)>0):
selector = selector + ", "
distribution = {}
dissum = 0
for k,v in weight.items() :
category = selector + "status=" + k
pods = self.coreApi.list_namespaced_pod(namespace, label_selector=category, watch=False)
pods = pods.items
distribution[k] = len(pods)
dissum = dissum + (v*distribution[k])
nPods = sum(distribution.values())
if (nPods <=0):
return
#print("adjusts : " + str(adjusts))
#print("ditribution : " + str(distribution))
#print("sum of effective percent : "+str(dissum))
#print("target weight" + str(weight))
#adjust ditribution and percent
for k,v in adjusts.items():
distribution[k] = distribution[k] + v
dissum = dissum + v*weight[k]
#calculate percent
percent = {}
for k,v in distribution.items():
percent[k] = int(v*weight[k]*100/dissum)
#make sure the sum of percent is 100
res = 100 - sum(percent.values())
lastKey = percent.keys()[len(percent)-1]
percent[lastKey] = percent[lastKey] + res
print("target percent : " + str(percent))
return ({"percent":percent, "distribution":distribution})
def scaleWithWeightPolicy(self, deployment, nPods, vsName, selector, weight, namespace="default"):
"""
Scale # of pods and change percent according to weight policy
:param deployment: The name of deployment.
:type: String
:param nPods: Number of pods in deployment.
:type: String/Int
:param vsName: The name of virtual service.
:type: String
:param selector: The selecotr for all pods.
:type: String
:param weight: The target weight, with subset(category) name as keys and related weight as value, e.g.{"idle":30,"normal":50, "busy":20}. Be sure that it cover all category but the sum is not necessary to be 100.
:type: Dict
:param namespace: The namespace of pods.
:type: String
"""
self.scale(deployment, nPods, namespace)
time.sleep(1)
ret = self.getPercentFromWeight(selector, weight, namespace)
percent = ret["percent"]
self.patchVSWeight(vsName, percent, namespace)
def changeStatusWithWeightPolicy(self, pod, status, vsName, selector, weight, namespace="default"):
"""
Set status and change percent according to weight policy
:param pod: The name of pod.
:type: String
:param status: The target status of pod.
:type: String
:param vsName: The name of virtual service.
:type: String
:param selector: The selecotr for all pods.
:type: String
:param weight: The target weight, with subset(category) name as keys and related weight as value, e.g.{"idle":30,"normal":50, "busy":20}. Be sure that it cover all category but the sum is not necessary to be 100.
:type: Dict
:param namespace: The namespace of pods.
:type: String
"""
#get original status of the pod
thePodInfo = self.coreApi.list_namespaced_pod(namespace, field_selector="metadata.name="+pod, watch=False)
if (len(thePodInfo.items)<0):
return
thePod = thePodInfo.items[0]
thePodStatus = thePod.metadata.labels["status"]
if (thePodStatus == status):
return
ret = self.getPercentFromWeight(selector, weight, namespace, {thePodStatus:-1, status:1})
percent = ret["percent"]
distribution = ret["distribution"]
#the order of set pod status and modify vs matters
# only "thePodStatus" category may change from 1 to 0 (category descrease)
# only target "status" category may change from 0 to 1 (category increase)
# IF only category descrease, modify first and set second
# IF only category increase, set first and modify second
# IF both, modify 1 to 0, set, modify 0 to 1
# Since the percent is the final state, we need to change it to middle state in thrid case
# To combine case, we can do the following:
# 1) modify to X if category descrease, X is final state if no category increase, otherwise X is middle state
# 2) set state
# 3) modify to final state if category increase
tmpPod = ""
for k,v in percent.items():
if (not (k==status or k==thePodStatus) and v>0):
tmpPod = k
break
pStatus = percent[status]
if (distribution[thePodStatus]==0):
if (distribution[status]==1):
if (not tmpPod==""):
percent[status] = 0
percent[tmpPod] = percent[tmpPod] + pStatus
self.patchVSWeight(vsName, percent, namespace)
else:
self.patchVSWeight(vsName, percent, namespace)
self.setStatus(pod, status, namespace)
if (distribution[status]==1):
if (distribution[thePodStatus]==0 and not tmpPod==""):
percent[status] = pStatus
percent[tmpPod] = percent[tmpPod] - pStatus
self.patchVSWeight(vsName, percent, namespace)
def setStatus(self, pod, status, namespace="default"):
print("Set status of " + namespace + "." + pod + " to " + status)
body = {
"metadata": {
"labels": {
"status": status
}
}
}
ret = self.coreApi.patch_namespaced_pod(pod, namespace, body)
#print(ret)
def scale(self, deployment, nPods, namespace="default"):
print("Set # of replicas of " + namespace + "." + deployment + " to " + nPods)
body = {
"spec": {
"replicas": int(nPods)
}
}
ret = self.appsApi.patch_namespaced_deployment_scale(deployment, namespace, body)
def listAllPods(self):
ret = self.coreApi.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
def deployVS(self, yamlFile, namespace="default"):
version = "v1alpha3"
group = "networking.istio.io"
plural = "virtualservices"
#body = yaml.load(open(yamlFile))
body = list(yaml.safe_load_all(open(yamlFile)))[0]
ret = self.customObjectsApi.create_namespaced_custom_object(group, version, namespace, plural, body)
#print(ret)
def deployDR(self, yamlFile, namespace="default"):
version = "v1alpha3"
group = "networking.istio.io"
plural = "destinationrules"
#body = yaml.load(open(yamlFile))
body = list(yaml.safe_load_all(open(yamlFile)))[0]
ret = self.customObjectsApi.create_namespaced_custom_object(group, version, namespace, plural, body)
#print(ret)
def patchVSWeight(self, vsName, percent, namespace="default"):
"""
Change the percent(weight in vs) of vs
:param vsName: The name of virtual service.
:type: String
:param percent: The weight to modify with subset name as keys and related percent as value, e.g.{"idle":30,"normal":50}. Be sure that the sum of resulted weight should be exact 100 or this function will fail
:type: Dict
:param namespace: The namespace of virtual service.
:type: String
"""
print("change "+ vsName + " weight to " + str(percent))
version = "v1alpha3"
group = "networking.istio.io"
plural = "virtualservices"
vs = self.customObjectsApi.get_namespaced_custom_object(group, version, namespace, plural, vsName)
for rule in vs["spec"]["http"]:
for route in rule["route"]:
category = route["destination"]["subset"]
newWeight = percent.get(category, -1)
if ( newWeight >= 0 ):
route["weight"] = newWeight
#print(vs)
self.customObjectsApi.patch_namespaced_custom_object(group, version, namespace, plural, vsName, vs) | [
"bing.luo@futurewei.com"
] | bing.luo@futurewei.com |
86fb9e94b090262fcacb889a69841a84681fbe62 | a129f03748a75d4345e67f89a1c2f8b989f32028 | /app/user/views.py | b726d365f1bf257f59c28f9131412c80afb09efe | [
"MIT"
] | permissive | berkayersever/recipe-app-api | 5df365dc5d2487da8a53b726c20619e336c0af76 | 9cab2cd8a32e0bf420f4731bf768fb856f216352 | refs/heads/master | 2022-12-03T11:03:06.681861 | 2020-03-27T01:14:03 | 2020-03-27T01:14:03 | 247,578,582 | 0 | 0 | MIT | 2022-11-22T05:25:42 | 2020-03-16T00:50:07 | Python | UTF-8 | Python | false | false | 935 | py | from rest_framework import authentication, generics, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import AuthTokenSerializer, UserSerializer
class CreateUserView(generics.CreateAPIView):
"""Creates a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Creates a new auth token for the user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manages the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieves and returns authentication user"""
return self.request.user
| [
"berkayersever@sabanciuniv.edu"
] | berkayersever@sabanciuniv.edu |
0a623363e3be9cc953a45672ce090585346ee7d8 | 7a22f54ceaa21d4d396be6f6887d9a382137843f | /largest last word.py | fdbab534cc12135cdc4849b93e823ca496e23437 | [] | no_license | ramdharam/MyPythonPrograms | 5834d18d0e181bc0f6d4c9e11854537c7442930a | 6e4b43925d4378a0b314a6bce1c58377fc1eaa35 | refs/heads/master | 2020-03-20T12:31:54.131641 | 2018-06-15T02:41:23 | 2018-06-15T02:41:23 | 137,432,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | class Solution:
# @param A : string
# @return an integer
def lengthOfLastWord(self, A):
if len(A) == 0:
return 0
wordstart, wordend = 0,0
isStart = False
for i in xrange(len(A)):
if wordstart == 0 and wordend==0 and isStart == False and A[i] != ' ':
wordstart = i
isStart = True
elif isStart == True and A[i] == ' ':
wordend = i-1
isStart = False
elif isStart == False and A[i] != ' ':
wordstart = i
isStart = True
if isStart == True:
wordend = i
print (wordend, wordstart)
out = (wordend - wordstart) +1
if out > 0:
return out
else:
return 0
a = Solution()
print(a.lengthOfLastWord("Hello World ")) | [
"ramdharam@gmail.com"
] | ramdharam@gmail.com |
860cae97491071490e0addb396e65b5c40b370d2 | 676aa014105615e6808727023e2b7e520e55ed85 | /new3.py | 325058827c79fb541eeb6c188ca3b3504f964c10 | [] | no_license | woodypeckers/aizhaoyou | 84708ce5587299d82af8f5868c1eab62424a8dd2 | d3e8675cc7e00b4f2edfe2379f93ec45cd521649 | refs/heads/master | 2021-01-21T20:42:39.781472 | 2017-06-18T08:22:02 | 2017-06-18T08:22:02 | 94,673,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | name = tom
flag = False
if name == "luren"
flag = True
else
print name
| [
"1172941844@qq.com"
] | 1172941844@qq.com |
5a0fbe6057d13a313a9e06fd66c0856d1c9f6828 | 7cb684bef6a03ef2b4ed6ee92e2f9bd8f92bf699 | /python/fit_dengue_my_big.py | 46fa9b98e8c10151332b95e602afa839e05542df | [] | no_license | fccoelho/paperLM1 | a36e70b17539ce5546cc0421c236617085ae6220 | f188e36afcc0b53e1b7ad0aa85c29c4fc09734f6 | refs/heads/master | 2021-01-09T05:58:06.851216 | 2016-03-27T21:13:06 | 2016-03-27T21:13:06 | 23,622,514 | 5 | 2 | null | 2015-06-22T18:46:46 | 2014-09-03T14:00:45 | TeX | UTF-8 | Python | false | false | 8,572 | py | #
# Multi-year fit of influenza epidemics
#
import pyximport;
pyximport.install(pyimport=False)
from BIP.Bayes.Melding import FitModel
from scipy.integrate import odeint
from scipy.interpolate import interp1d
import scipy.stats as st
import numpy as np
import pylab as P
import copy
from collections import defaultdict
import datetime
import sys
import pandas as pd
import numba
from numba import jit
beta = 1.0 # Transmission coefficient
b1 = 19.9
b0 = 1.5 # low beta during winters
eta = .0 # infectivity of asymptomatic infections relative to clinical ones. FIXED
epsilon = 2.8 # latency rate
mu = 0 # nat/mortality rate
m = 1e-6 # influx of cases
tau = 1 # recovery rate. FIXED
N = 1 # Population of Rio
#~ Ss = {0: s0, 1: s1, 2: s2} # Multiple Ss map
# Initial conditions
inits = np.array([1, 0.001, 0.0]) # initial values for state variables.
def model(theta):
# setting parameters
s0, s1, s2, s3, s4, s5, s6, s7, s8, s9 = theta
Ss = {0: s0, 1: s1, 2: s2, 3: s3, 4: s4, 5: s5, 6: s6, 7: s7, 8: s8, 9: s9} # Multiple Ss map
@jit('f8[:](f8[:],f8)')
def sir(y, t):
'''ODE model'''
S, I, R = y
beta = 0 if t > 728 else iRt(t) * tau#(Ss[ycode[int(t)]])
#~ if (calc_R0s()<1.1).any():
#~ print "flat"
lamb = (beta * (I+m) * S)
return np.array([-lamb, #dS/dt
lamb - tau * I, #dI/dt
tau * I, #dR/dt
])
@jit('f8[:,:](f8[:],f8)')
def jac(y,t):
S, I, R = y
beta = 0 if t > 728 else iRt(t) * tau
return np.array([[-(I + m)*beta, -S*beta, 0],
[(I + m)*beta, S*beta - tau, 0],
[0, tau, 0]])
Y = np.zeros((wl, 3))
# Initial conditions
for i in range(len(Ss)):
t0 = t0s[i]
tf = tfs[i]
#print t0,tf
if i>0:
#~ inits[1] = Y[t0-1,1]
#~ print inits
inits[1] = dt['I'][t0-1] if N-Ss[i] > dt['I'][t0-1] else N-Ss[i]
inits[0] = Ss[i]; # Define S0
inits[-1] = N - sum(inits[:2]) # Define R(0)
Y[t0:tf, :] = odeint(sir, inits, np.arange(t0, tf, 1), Dfun=jac) #,tcrit=tcrit)
#inits = Y[-1, :]
return Y
def prepdata(fname, sday=0, eday=None, mao=7):
"""
Prepare the data for the analysis.
Parameters:
file: path to data
sday: Starting day of the inference
eday: final day
mao: Moving average's Order
"""
data = pd.read_csv(fname, header=0, delimiter=',', skiprows=[1, 2, 3], parse_dates=True)
# slicing to the desired period
data = data[sday:eday]
pop = pd.read_csv("pop_rio_1980-2012.csv", header=0, delimiter=',', index_col=0)
dates = [datetime.datetime.strptime(d, "%Y-%m-%d") for d in data.start]
pop_d = np.array([pop.loc[d.year] for d in dates]) # population on each date
eday = len(df) if eday is None else eday
# print data.dtype.names
incidence = data.cases # daily incidence
# Converting incidence to Prevalence
dur = 1. / tau # infectious period
rawprev = np.convolve(incidence, np.ones(dur), 'same')
rawprev.shape = rawprev.size, 1
rawprev /= pop_d
# P.plot(dates, incidence, label='Incidence')
# P.plot(dates, rawprev, label='Prevalence')
# P.setp(P.gca().xaxis.get_majorticklabels(), rotation=45)
# P.grid()
# P.legend()
# P.figure()
# P.plot(dates, data.Rt, label=r'$R_t$')
# P.plot(dates, data.lwr, 'r-.')
# P.plot(dates, data.upr, 'r-.')
# P.setp(P.gca().xaxis.get_majorticklabels(), rotation=45)
# P.show()
# Doing moving average of order mao
if mao > 1:
sw = np.ones(mao, dtype=float) / mao #smoothing window
prev = np.convolve(rawprev, sw, 'same') #Smoothing data (ma)
else:
prev = rawprev
# sw = np.ones(6, dtype=float) / 6 #smoothing window
# rt_smooth = np.convolve(data.Rt2, sw, 'same')
Rt = fix_rt(data.Rt)
d = {'time': dates, 'I': np.nan_to_num(prev), 'Rt': Rt}
return d
@np.vectorize
def fix_rt(rt):
"""
Replace both NANs and INFs by zero
:param rt: reproductive number, scalar
:return: fixed rt
"""
if np.isnan(rt):
return 0
elif np.isinf(rt):
return 0
else:
return rt
# # running the analysys
if __name__ == "__main__":
dt = prepdata('aux/data_Rt_dengue_big.csv', 0, 728, 1)
modname = "Dengue_S0_big"
# print dt['I'][:,1]
#~ ycode = year_code(dt['time'])
tcrit = [i for i in xrange(len(dt['time'])) if i]
# Defining start and end of the simulations
t0s = [0, # Start of the 1996 epidemic
dt['time'].index(datetime.datetime(1997, 12, 15)), # Start of the 1998 epidemic
dt['time'].index(datetime.datetime(1998, 12, 21)), # Start of the 1999 epidemic
dt['time'].index(datetime.datetime(1999, 12, 13)), # Start of the 2000 epidemic
dt['time'].index(datetime.datetime(2000, 12, 18)), # Start of the 2001 epidemic
dt['time'].index(datetime.datetime(2001, 9, 10)), # Start of the 2002 epidemic
dt['time'].index(datetime.datetime(2005, 8, 15)), # Start of the 2006 epidemic
dt['time'].index(datetime.datetime(2006, 9, 25)), # Start of the 2007 epidemic
dt['time'].index(datetime.datetime(2007, 8, 27)), # Start of the 2008 epidemic
dt['time'].index(datetime.datetime(2008, 9, 1)), # Start of the 2009 epidemic
]
tfs = t0s[1:] + [len(dt['time'])]
tfs = [ dt['time'].index(datetime.datetime(1996, 7, 29)), # end of the 1996 epidemic
dt['time'].index(datetime.datetime(1998, 10, 12)), # end of the 1998 epidemic
dt['time'].index(datetime.datetime(1999, 8, 23)), # end of the 1999 epidemic
dt['time'].index(datetime.datetime(2000, 10, 2)), # end of the 2000 epidemic
dt['time'].index(datetime.datetime(2001, 9, 10)), # end of the 2001 epidemic
dt['time'].index(datetime.datetime(2002, 9, 2)), # end of the 2002 epidemic
dt['time'].index(datetime.datetime(2006, 7, 31)), # end of the 2006 epidemic
dt['time'].index(datetime.datetime(2007, 8, 27)), # end of the 2007 epidemic
dt['time'].index(datetime.datetime(2008, 9, 1)), # end of the 2008 epidemic
725, # end of the 2009 epidemic
]
print tfs
# Interpolated Rt
iRt = interp1d(np.arange(dt['Rt'].size), np.array(dt['Rt']), kind='linear', bounds_error=False, fill_value=0)
P.plot(dt['Rt'],'*')
P.plot(np.arange(0, 728, .2), [iRt(t) for t in np.arange(0, 728, .2)])
#print type(dt['Rt'])
#print [iRt(t) for t in np.arange(0, 728, .2)]
#~ tfs = np.array(tfs)-1
#~ print len(sindex), len(dt['I']), len(dt['time'])
#~ print t0s,tfs
#~ P.figure()
#~ P.gca().xaxis_date()
#~ P.plot(dt['time'],dt['I'],'*-')
#~ P.plot(dt['time'],dt['I'][:,1],'*-')
#~ P.plot(dt['time'],bstep[:len(dt['time'])],'-*', label='bstep')
#~ P.plot(dt['time'],ycode[:len(dt['time'])],'-+', label='ycode')
#~ P.plot(dt['time'],.001*sindex[:len(dt['time'])],'-v', label='sindex')
#~ P.legend()
#~ P.gcf().autofmt_xdate()
#~ P.show()
tnames = ['s_{}'.format(i) for i in range(len(t0s))]
#~ print tnames
nt = len(tnames)
pnames = ['S', 'I', 'R']
nph = len(pnames)
wl = dt['I'].shape[0] #window length
nw = len(dt['time']) / wl #number of windows
tf = wl * nw #total duration of simulation
inits[1] = dt['I'][0]
print inits
#~ print calc_R0s()
y = model([.999*N]*nt)
#print y
P.figure()
P.plot(dt['I'], '*')
P.plot(y[:, 1])
top = y[:, 1].max()
P.vlines(t0s,0,top, colors='g')
P.vlines(tfs,0,top, colors='r')
P.legend([pnames[1]])
P.show()
#Priors and limits for all countries
tpars = [(2, 1)]*nt#[(1, 2),(1, 2),(1, 2),(1, 2),(1, 2), (2, 1),(1, 2),(1, 2), (2, 1), (1, 2),]
tlims = [(0, 1)] * nt
F = FitModel(1000, model, inits, tf, tnames, pnames,
wl, nw, verbose=1, burnin=200, constraints=[])
F.set_priors(tdists=nt * [st.beta],
tpars=tpars,
tlims=tlims,
pdists=[st.beta] * nph, ppars=[(1, 1)] * nph, plims=[(0, 1)] * nph)
F.run(dt, 'DREAM', likvar=1e-4, pool=False, ew=0, adjinits=False, dbname=modname, monitor=['I', 'S'])
#~ print F.AIC, F.BIC, F.DIC
#print F.optimize(data=dt,p0=[s0,s1,s2], optimizer='scipy',tol=1e-55, verbose=1, plot=1)
F.plot_results(['S', 'I'], dbname=modname, savefigs=1)
| [
"lmax.procc@gmail.com"
] | lmax.procc@gmail.com |
e5f8a8990eaa19e1d7f405e37646c5086cba3203 | 85660d4d8743a9ee5040a68f99f3c45f49c3b3ba | /main.py | 10eb3326ff294dccb767118036cc053323706a54 | [] | no_license | francisco-avalos/LA_covid19_data_cases | fc09d7bcb8b84909812da0d78a4df3470cdeaef2 | 58daae3b67da5e7fdda8642f0e349fc0684da97a | refs/heads/master | 2023-04-05T14:34:49.194144 | 2021-04-17T05:36:42 | 2021-04-17T05:36:42 | 294,277,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,298 | py |
import re
import requests
from bs4 import BeautifulSoup
from functions.web_scrape import convert_scrapped_data_to_dataframe
from functions.la_cases import return_cases, return_cases_NonResidential, rcac_df, hss_df, es_df, nr_df, return_cases_ResCong, return_cases_home, return_cases_educational
from functions.web_scrape import rcac_section, LAC_NR_section, lac_hss_section, lac_es_section
from parse_address.parse_functions import parse_address
from functions.la_cases import add_ZipCode
# Residential Congragate Settings
r1, r2, r3, r4, r5, No_columns = rcac_section()
cases = return_cases_ResCong(r1, r2, r3, r4, r5)
RCAC_DF = convert_scrapped_data_to_dataframe(cases, data_length=No_columns)
RCAC_DF = rcac_df(RCAC_DF)
RCAC_DF.to_csv(r'~/Desktop/Residual_Congregate_and_Acute_Care_Settings.csv', index=False)
RCAC_DF = parse_address(RCAC_DF, RCAC_DF['city_name'])
RCAC_DF.columns = ['location_name','city_name','number_of_confirmed_staff','number_of_confirmed_residents','total_deaths','city',
'state']
RCAC_DF.to_csv(r'~/Desktop/Residual_Congregate_and_Acute_Care_Settings(Parsed).csv', index=False)
RCAC_DF = add_ZipCode(RCAC_DF)
RCAC_DF.to_csv(r'~/Desktop/Residual_Congregate_and_Acute_Care_Settings(Parsed_and_ZipCode).csv', index=False)
# Non-Residential Settings
p1, p2, p3, p4, p5, p6, p7, No_columns = LAC_NR_section()
cases = return_cases_NonResidential(p1, p2, p3, p4, p5, p6, p7)
NR_DF = convert_scrapped_data_to_dataframe(cases, data_length=No_columns)
NR_DF = nr_df(NR_DF)
NR_DF.to_csv(r'~/Desktop/LA_County_Non-Residential_Settings.csv', index=False)
NR_DF = parse_address(NR_DF, NR_DF['address'])
NR_DF.columns = ['setting_name','address','total_confirmed_staff','total_confirmed_non_staff','street_address','city',
'state','zipcode']
# NR_DF.columns = ['location_name','address','total_confirmed_staff','total_confirmed_non_staff','street_address','city',
# 'state','zipcode']
# NR_DF.columns = ['location_name','address','total_confirmed_staff','street_address','city','state','zipcode']
# NR_DF.columns = ['location_name','address','total_confirmed_staff','total_non_confirmed_symptomatic_staff','street_address','city',
# 'state','zipcode']
NR_DF.to_csv(r'~/Desktop/LA_County_Non-Residential_Settings(Parsed).csv', index=False)
## Homeless Service Settings
pat1, pat2, pat3, pat4, pat5, No_columns = lac_hss_section()
cases = return_cases_home(pat1, pat2, pat3, pat4, pat5)
HSS_DF = convert_scrapped_data_to_dataframe(cases, data_length=No_columns)
HSS_DF = hss_df(HSS_DF)
HSS_DF.to_csv(r'~/Desktop/LA_County_Homeless_Service_Settings.csv', index=False)
# Educational Settings
pat1, pat2, pat3, No_columns = lac_es_section()
cases = return_cases_educational(pat1, pat2, pat3)
ES_DF = convert_scrapped_data_to_dataframe(cases, data_length=No_columns)
ES_DF = es_df(ES_DF)
ES_DF.to_csv(r'~/Desktop/LA_County_Educational_Settings.csv', index=False)
# print(ES_DF)
ES_DF = parse_address(ES_DF, ES_DF['address'])
# print(ES_DF.head(60))
ES_DF.columns = ['location_name','address','total_confirmed_staff','total_confirmed_students','street_address','city','state',
'zipcode']
ES_DF.to_csv(r'~/Desktop/LA_County_Educational_Settings(Parsed).csv', index=False)
| [
"avalosjr.francisco@gmail.com"
] | avalosjr.francisco@gmail.com |
0899b95451cbc880db36e6cbd9db263eb3aacee2 | 65d8e08503a1642f91d3fae36147a6c60af8afb0 | /api.py | ab05580fbca914df59a76b049aaa444eccda7602 | [] | no_license | hermixy/nanoScada | 96539fffd6fc2d4d25efdad13f1ce6e6c059dc6b | 2926999b3fd6310ccf9ec8ad4196e4c494d39b9e | refs/heads/master | 2020-12-30T17:32:07.711121 | 2014-06-28T19:53:36 | 2014-06-28T19:53:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | import time
import json
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.myTest
coll = db.messageSensor
resp = {"type": "collection", "data":[]}
def makeResp (m, query):
resp['data'] = []
resp['query']=query
for d in m:
print d
resp['data'].append(d)
def findClients ():
print "findClients"
m = coll.find().distinct("client")
print m
makeResp(m, "findClients")
def findTags (client):
m = coll.find({'client':client}).distinct("name")
makeResp(m, "findTags")
def findValsClientTag (client, tag):
m= coll.find ({"client":client, "name": tag},{'ts':1,'val':1, '_id':0}).sort("ts",1).limit(10)
makeResp(m, "findValsClientTag")
def findMinuteValsClientTag (client, tag):
tmax = time.time()
tmin = tmax - 60
m = coll.find ({"client":client, "name": tag, "ts": {"$gte":tmin, "$lt":tmax}},{'ts':1,'val':1, '_id':0}).sort("ts",1)
makeResp(m, "findMinuteValsClientTag")
def findHourValsClientTag (client, tag):
tmax = time.time()
tmin = tmax - 3600
m= coll.find ({"client":client, "name": tag, "ts": {"$gte":tmin, "$lt":tmax}},{'ts':1,'val':1, '_id':0}).sort("ts",1)
makeResp(m, "findHourValsClientTag")
def findLastValsClientTag (client, tag):
m = coll.find({"client":client, "name":tag},{'ts':1,'val':1, '_id':0}).sort("ts",-1).limit(1)
makeResp(m, "findLastValsClientTag")
# http://cookbook.mongodb.org/patterns/date_range/
def findByPeriod (client, min0, max0, sort):
if ( max0 >= min0):
m= coll.find ({"client":client, "ts": {"$gt":min0, "$lt":max0}}).sort("ts",sort)
makeResp (m, "findByPeriod")
def findMinute (client, ts, sort):
tm = ts - 60
findByPeriod (client, tm, ts, sort)
def findHour (client, ts, sort):
tm = ts - 3600
findByPeriod (client, tm, ts, sort)
def findlastMinute (client,sort):
ts = time.time () - 60
m = coll.find({"client":client, "ts": {"$gt":ts}}).sort("ts",sort)
makeResp(m, "findlastMinute")
def findlastHour (client, sort):
ts = time.time () - 3600
m = coll.find({"client":client, "ts": {"$gt":ts}}).sort("ts",sort)
makeResp(m, "findlastHour")
def findLast (client):
m = coll.find({"client":client}).sort("ts",-1).limit(1)
makeResp(m, "findLast")
def findFirst (client):
m = coll.find({"client":client}).sort("ts",1).limit(1)
makeResp(m, "findFirst")
def calAverage (listVal):
pass
def average (data):
average = 0
if len (resp['data']):
for d in resp['data']:
average += d['val']
average = average/len(resp['data'])
print average
def averageLastHour (client):
findlastHour (client,1)
average (resp['data'])
def averageLastMinute (client):
findlastMinute (client,1)
average (resp['data'])
def mainT ():
#findClients()
#findLast ("pepe")
#findFirst ("pepe")
#findTags ("et001")
#findByPeriod("pepe", 1403717170, 1403717212, 1)
#findHour("pepe", 1403717170, 1)
#averageLastMinute ("xaltu")
#findValsClientTag ("pepe", "TE-01" )
#findHourValsClientTag ("pepe", "TE-01")
findLastValsClientTag ("pepe", "TE-01")
print resp
if __name__ == "__main__":
mainT()
pass
| [
"lmpizarro@gmail.com"
] | lmpizarro@gmail.com |
ca80a1230dfd9a397f560b80bbe5b0331c23f062 | 30b063c58d774376bc7e4424376d3fff0c276d42 | /python/socket/udpecho_interactive/udpBroadcast.py | 986e0473654a5e493845e95c027f146e134902c9 | [
"MIT"
] | permissive | simonlovgren/tests | e2ec7e33379373fced50b1f9da850d74cdb8fbeb | 58e9a2c471edd65c5ddaee97428aa6d0413873d5 | refs/heads/master | 2023-04-06T23:26:36.152930 | 2023-03-31T20:52:07 | 2023-03-31T20:52:07 | 76,958,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Main entrypoint for UDP chat test.
'''
'''
MIT License
Copyright (c) 2019 Simon Lövgren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import argparse
import socket
import threading
import signal
globalStop = threading.Event()
class EchoClient():
def __init__( self, port ):
self.port = port
# Socket
self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP )
self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST, 1 )
self.socket.settimeout( 0.01 )
# Recv-thread
self.receive_thread = threading.Thread( target = self._receive_thread )
self.receive_thread.daemon = True
self.receive_thread.start()
def _receive_thread( self ):
while ( not globalStop.is_set() ):
try:
data, addr = self.socket.recvfrom( 2048 )
if ( len( data ) > 0 ):
print( f'-> {data}' )
except socket.timeout as e:
# Just a timeout
pass
except socket.error as e:
print( f'Socket error: [{e}]' )
def run( self ):
while( not globalStop.is_set() ):
try:
data = input()
self.socket.sendto( data.encode(), ( '<broadcast>', self.port ) )
except socket.error as e:
print( f'Socket error: [{e}]' )
except EOFError as e:
pass
'''
Command line stuff
'''
def killHandler( signum, stackFrame ):
print( f'Exiting.' )
# Signal kill event
globalStop.set()
def main( port ):
print( f'Sending to port {port}')
# Register SIGTERM and SIGINT handler
signal.signal( signal.SIGINT, killHandler )
signal.signal( signal.SIGTERM, killHandler )
client = EchoClient( port )
client.run()
def parseargs():
parser = argparse.ArgumentParser( description = 'UDP broadcast client.' )
# remote client to connect to
parser.add_argument(
'--port'
,action = 'store'
,metavar = '<port>'
,help = 'Port to broadcast to.'
,type = int
,default = '8000'
)
return parser.parse_args()
pass
if __name__ == "__main__":
args = parseargs()
main(
args.port
) | [
"lovgren.simon@gmail.com"
] | lovgren.simon@gmail.com |
e6d2771b543c2d19deacd0ce9a4d50f734e645ad | 25692e58dceec1f5be4c7930d353bacafd3ff7b0 | /dbfs/바이러스.py | 27b5755cdc5037631740804def3f183bfe85bae4 | [] | no_license | ub1n/Algorithm | a8617fc56d934e99370c367af364f308431423d6 | c9761941082b678a2882d04db8887afb0d664737 | refs/heads/master | 2023-06-11T11:11:52.573748 | 2021-07-02T13:32:09 | 2021-07-02T13:32:09 | 375,415,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import sys
from collections import deque
n=int(sys.stdin.readline())
m=int(sys.stdin.readline())
graph=[[] for i in range(n+1)]
for i in range(m):
a,b=map(int,(sys.stdin.readline().split()))
graph[a].append(b)
graph[b].append(a)
answer=0
visited=[False]*(n+1)
def dfs(graph,v,visited):
visited[v]=True
for i in graph[v]: #현재노드와 연결된 다른 노드를 재귀적으로 방문
if not visited[i]:
global answer
answer+=1
dfs(graph,i,visited)
dfs(graph,1,visited)
print(answer) | [
"bin951024@naver.com"
] | bin951024@naver.com |
dd4af8ccd881c4ab3f6b34e12aaf051cde9aa1dd | aada09f621fe43869191ac8a119e54ab1c319b5b | /MachineLearning_Carrier_L3/02 ARIMA_code/telecomm_ARIMA.py | eb9e193d1b85fe034a5620785cb726eac5d91aad | [] | no_license | rouxero/Data-Mining-course | a1e582d8de2ef7ba4a61fb5e415dabee0efb6963 | 1108ea2c83d01eb9023ce5c3531cd990f79481e7 | refs/heads/master | 2020-05-24T21:45:58.195354 | 2018-10-22T10:12:14 | 2018-10-22T10:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,429 | py | #!-*- coding:utf-8 -*-
import random
import time
import datetime
import pandas as pd
import numpy as np
from statsmodels.tsa.arima_model import ARMA
import sys
from dateutil.relativedelta import relativedelta
from copy import deepcopy
import matplotlib.pyplot as plt
start = '2016-12-31'
end = time.strftime('%Y-%m-%d')
datestart = datetime.datetime.strptime(start, '%Y-%m-%d')
dateend = datetime.datetime.strptime(end, '%Y-%m-%d')
dayincomedict = {}
value = 9000
while datestart < dateend:
datestart += datetime.timedelta(days=1)
dayDate = datestart.strftime('%Y-%m-%d')
income = random.randint(value, 12110)
dayincomedict[dayDate] = income
dayincomelist = sorted(dayincomedict.items(), key=lambda x: x[0], reverse=False)
print len(dayincomelist)
dayindex = []
dayincome = []
for item in dayincomelist:
dayindex.append(item[0])
dayincome.append(item[1])
print dayindex
print dayincome
from pandas.core.frame import DataFrame
import pandas as pd
daydict = {
"date": dayindex,
"income": dayincome
}
df = DataFrame(daydict)
# print df.head()
df = df.set_index('date')
df.index = pd.to_datetime(df.index)
ts = df['income']
# print ts.head().index
# def draw_ts(timeseries):
# timeseries.plot()
# plt.title('date & income')
# plt.ylabel("income(w)")
# plt.show()
# draw_ts(ts)
from statsmodels.tsa.stattools import adfuller
#判断时序数据稳定性
# def test_stationarity(timeseries):
# # 这里以一年为一个窗口,每一个时间t的值由它前面12个月(包括自己)的均值代替,标准差同理。
# rolmean = pd.rolling_mean(timeseries, window=12)
# rolstd = pd.rolling_std(timeseries, window=12)
# # plot rolling statistics:
# fig = plt.figure()
# fig.add_subplot()
# orig = plt.plot(timeseries, color='blue', label='Original')
# mean = plt.plot(rolmean, color='red', label='rolling mean')
# std = plt.plot(rolstd, color='black', label='Rolling standard deviation')
# plt.legend(loc='best')
# plt.title('Rolling Mean & Standard Deviation')
# plt.show(block=False)
# # Dickey-Fuller test:
# print 'Results of Dickey-Fuller Test:'
# dftest = adfuller(timeseries, autolag='AIC')
# # dftest的输出前一项依次为检测值,p值,滞后数,使用的观测数,各个置信度下的临界值
# dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
# for key, value in dftest[4].items():
# dfoutput['Critical value (%s)' % key] = value
#
# print dfoutput
#
#
# # ts = data['#Passengers']
# test_stationarity(ts)
# #由于原数据值域范围比较大,为了缩小值域,同时保留其他信息,常用的方法是对数化,取log。
ts_log = np.log(ts)
#Moving Average--移动平均
# moving_avg = pd.rolling_mean(ts_log,12)
# plt.plot(ts_log ,color = 'blue')
# plt.plot(moving_avg, color='red')
# plt.show()
#然后作差:
# ts_log_moving_avg_diff = ts_log-moving_avg
# ts_log_moving_avg_diff.dropna(inplace = True)
# test_stationarity(ts_log_moving_avg_diff)
# halflife的值决定了衰减因子alpha: alpha = 1 - exp(log(0.5) / halflife)
# expweighted_avg = pd.ewma(ts_log,halflife=12)
# ts_log_ewma_diff = ts_log - expweighted_avg
# test_stationarity(ts_log_ewma_diff)
#
#Differencing--差分
ts_log_diff = ts_log - ts_log.shift()
ts_log_diff.dropna(inplace=True)
# test_stationarity(ts_log_diff)
#
# #3.Decomposing-分解
# # 分解(decomposing) 可以用来把时序数据中的趋势和周期性数据都分离出来:
# from statsmodels.tsa.seasonal import seasonal_decompose
# def decompose(timeseries):
# # 返回包含三个部分 trend(趋势部分) , seasonal(季节性部分) 和residual (残留部分)
# decomposition = seasonal_decompose(timeseries)
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
# plt.subplot(411)
# plt.plot(ts_log, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal, label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
# plt.tight_layout()
# return trend, seasonal, residual
# #
# # 消除了trend 和seasonal之后,只对residual部分作为想要的时序数据进行处理
# trend , seasonal, residual = decompose(ts_log)
# residual.dropna(inplace=True)
# test_stationarity(residual)
#
#ACF and PACF plots:
# from statsmodels.tsa.stattools import acf, pacf
# lag_acf = acf(ts_log_diff, nlags=20)
# lag_pacf = pacf(ts_log_diff, nlags=20, method='ols')
# #Plot ACF:
# plt.subplot(121)
# plt.plot(lag_acf)
# plt.axhline(y=0,linestyle='--',color='gray')
# plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
# plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
# plt.title('Autocorrelation Function')
#
# #Plot PACF:
# plt.subplot(122)
# plt.plot(lag_pacf)
# plt.axhline(y=0,linestyle='--',color='gray')
# plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
# plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
# plt.title('Partial Autocorrelation Function')
# plt.tight_layout()
# plt.show()
#
from statsmodels.tsa.arima_model import ARIMA
# # model = ARIMA(ts_log, order=(1, 1, 0))
# # results_ARIMA = model.fit(disp=-1)
# # plt.plot(ts_log_diff)
# # plt.plot(results_AR.fittedvalues, color='red')
# # plt.title('RSS: %.4f'% sum((results_AR.fittedvalues-ts_log_diff)**2))
# # plt.show()
model = ARIMA(ts_log, order=(1, 1, 0))
results_ARIMA = model.fit(disp=-1)
reslist= model.predict(dayindex,"2018-07-22","2018-08-22")
print reslist
# # plt.plot(ts_log_diff)
# # plt.plot(results_MA.fittedvalues, color='red')
# # plt.title('RSS: %.4f'% sum((results_MA.fittedvalues-ts_log_diff)**2))
#
model = ARIMA(ts_log, order=(1, 1, 1))
# results_ARIMA = model.fit(disp=-1)
# plt.plot(ts_log_diff)
# plt.plot(results_ARIMA.fittedvalues, color='red')
# plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2))
# plt.show()
#
#ARIMA拟合的其实是一阶差分ts_log_diff,predictions_ARIMA_diff[i]是第i个月与i-1个月的ts_log的差值。
#由于差分化有一阶滞后,所以第一个月的数据是空的,
# predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
# print predictions_ARIMA_diff.head()
#累加现有的diff,得到每个值与第一个月的差分(同log底的情况下)。
#即predictions_ARIMA_diff_cumsum[i] 是第i个月与第1个月的ts_log的差值。
# predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
#先ts_log_diff => ts_log=>ts_log => ts
#先以ts_log的第一个值作为基数,复制给所有值,然后每个时刻的值累加与第一个月对应的差值(这样就解决了,第一个月diff数据为空的问题了)
#然后得到了predictions_ARIMA_log => predictions_ARIMA
# predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
# predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
# predictions_ARIMA = np.exp(predictions_ARIMA_log)
# plt.figure()
# plt.plot(ts)
# plt.plot(predictions_ARIMA)
# plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))
# plt.show()
| [
"wang_feicheng@163.com"
] | wang_feicheng@163.com |
e45a1fac5b581c35a286bd8251ccc0e3f6475205 | ee4a0698f75aa2500bf2ce1b5e5331bc8b57157a | /myproject/course/models.py | 738e4fda86e0a0457361f274b8a857115dd7a817 | [] | no_license | coderrohanpahwa/one_to_one_model | 5398732410027bfad91c5d5db01e528397c87703 | df4fd8ce89d74d41d49671ba8dd5759b80af3d43 | refs/heads/main | 2022-12-25T14:12:31.253350 | 2020-10-06T08:58:38 | 2020-10-06T08:58:38 | 301,669,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.db import models
from django.contrib.auth.models import User
from .views import k
# Create your models here.
class Answer(models.Model):
user=models.OneToMany(User,models.CASCADE)
answer=models.CharField(max_length=100) | [
"coderrohanpahwa@gmail.com"
] | coderrohanpahwa@gmail.com |
911e9801008fc4558b02c6013c59c09bcf1cd5be | f6844123ffd7e2b848503fbd6534248c6b891683 | /dimdimkun/solve.py | bdd93ba14c81a43cda684c18fc0bd92ddc71a82b | [] | no_license | nikkoenggaliano/My-Write-Up | 43178acd23afcc8116af8a473cef2a501867d6cc | 0c94cc73fb9ed9e3c197ef0b2e4f7507fb649a77 | refs/heads/master | 2023-01-15T01:44:02.165347 | 2020-11-27T11:02:26 | 2020-11-27T11:02:26 | 154,408,886 | 6 | 2 | null | 2020-10-01T03:34:19 | 2018-10-23T23:17:15 | CSS | UTF-8 | Python | false | false | 318 | py | #!/usr/bin/env python
from random import choice
import string
s = string.printable
while True:
j = 0
final = ""
key1 = "%c%c%c%c"%(choice(s), choice(s), choice(s) ,choice(s))
padd = "-"
final += key1 + padd + key1 + padd + key1 + padd + key1
for i in final:
j += ord(i)
if j == 1655:
print(final)
| [
"nikkoenggaliano@gmail.com"
] | nikkoenggaliano@gmail.com |
ec41abbb22c1161cd04b0b518c6148e10ad97bd8 | 237c47c072df514689d9fab1fea698fb67aba025 | /signal_coverage/catalogue/models.py | c53b878139a07e189e9df41a55428c5530f127ee | [] | no_license | pivarnikjan/signal_coverage | aa51eb4e44fa28c0b3cc610478b6ce574dfd19da | 6c05926fa01c4aa2f78a9336b448bcff9fb16d95 | refs/heads/master | 2021-01-11T10:25:45.367289 | 2016-12-17T10:42:52 | 2016-12-17T10:42:52 | 76,205,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from django.db import models
class SignalCoverage(models.Model):
STATUSES = (
('Up', 'UP'),
('Down', 'DOWN'),
)
site_name = models.CharField(max_length=50)
site_lat = models.DecimalField(default=48.0, max_digits=12, decimal_places=9)
site_lon = models.DecimalField(default=20.0, max_digits=12, decimal_places=9)
site_status = models.CharField(max_length=4, choices=STATUSES)
cpe_cell = models.CharField(max_length=50)
cpe_imei = models.IntegerField()
cpe_imsi = models.IntegerField()
cpe_lat = models.DecimalField(default=48.0, max_digits=12, decimal_places=9)
cpe_lon = models.DecimalField(default=20.0, max_digits=12, decimal_places=9)
cpe_status = models.CharField(max_length=4, choices=STATUSES)
| [
"pivarnikjan@gmail.com"
] | pivarnikjan@gmail.com |
21f1b08d9f5ac07d16ff087a4f5e8c424965f906 | fc72eaf5a143087a9a60b6e4cfc8401f4ec6adb6 | /downloader.py | 1a5a7b33e261013722d3e45668d37a11e090595a | [] | no_license | ciancolo/TesiAndroid | 22968ad5c057390e7084abfbc7f8b1531ea56e58 | f327f6fde98d9f3178eda5557837f562fbef0116 | refs/heads/master | 2021-05-30T16:06:40.465739 | 2016-03-24T08:12:34 | 2016-03-24T08:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import urllib
import sqlite3
import os
conn=sqlite3.connect("/home/michele/Scrivania/Tesi/Crawler/database.db")
c=conn.cursor()
#url="http://f.giveawaycrew.com/f.php?p=4&i=com.camelgames.fantasyland&v=1.24.0&h=N0Y4UWdjYlN1K1BVd0VYanRqdTl2Zz09&d=K010RjU3RlVxcEdTT1c5T0hFdHpyQT09"
c.execute("select identificativo,titolo,linkDownload from prova2 where scaricato='false' LIMIT 1")
ris=c.fetchone()
testfile = urllib.URLopener()
testfile.retrieve(ris[2], "App/"+ris[1])
c.execute("UPDATE prova2 set scaricato=? WHERE identificativo=?",('true',int(ris[0]),))
conn.commit()
conn.close()
| [
"michelezanchi94@gmail.com"
] | michelezanchi94@gmail.com |
8b9a2fbe28639624c74adccfd823ce4364586294 | a1d786fe318dd4b1570e6706208e141842c99b36 | /Project2.py | f60a1c7193f7f89d87b1ea3ebea20ed4c991cfdb | [] | no_license | stevenyeh/Project2 | 2563a5480f1c5b4f90fa93554a269ee0c9428171 | aa629f7731a4c2598331f4f15b0a8b6ba4a39f52 | refs/heads/master | 2021-01-10T03:29:10.178037 | 2016-01-15T01:13:02 | 2016-01-15T01:13:02 | 49,543,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,372 | py | import pandas
import pandasql
import ggplot
import numpy as np
import matplotlib.pyplot as plt
import csv
from datetime import datetime
import scipy
import scipy.stats
import statsmodels.api as sm
import sys
#Wrangling Subway Data
def num_rainy_days(filename):
'''
Run a SQL query on a dataframe of
weather data.
'''
weather_data = pandas.read_csv(filename)
q = """
SELECT COUNT(*) FROM weather_data WHERE rain = 1;
"""
#Execute SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
def max_temp_aggregate_by_fog(filename):
weather_data = pandas.read_csv(filename)
q = """
SELECT fog, MAX(maxtempi) FROM weather_data GROUP BY fog;
"""
foggy_days = pandasql.sqldf(q.lower(), locals())
return foggy_days
def avg_weekend_temperature(filename):
weather_data = pandas.read_csv(filename)
q = """
SELECT avg(meantempi)
FROM weather_data
WHERE cast(strftime('%w', date) as integer) = 0
OR cast(strftime('%w', date) as integer) = 6
"""
mean_temp_weekends = pandasql.sqldf(q.lower(), locals())
return mean_temp_weekends
def avg_min_temperature(filename):
weather_data = pandas.read_csv(filename)
q = """
SELECT avg(mintempi) FROM weather_data WHERE mintempi > 55 AND rain = 1;
"""
avg_min_temp_rainy = pandasql.sqldf(q.lower(), locals())
return avg_min_temp_rainy
def fix_turnstile_data(filenames):
'''
update each row in the text file so there is only one entry per row.
'''
for name in filenames:
f_in = open(name, 'r')
f_out = open('updated_' + name, 'w')
reader_in = csv.reader(f_in, delimiter = ',')
writer_out = csv.writer(f_out, delimiter = ',')
for line in reader_in:
for k in range(0, (len(line)-3)/5):
line_out = [line[0], line[1], line[2], line[k*5+3], line[k*5+4], line[k*5+5], line[k*5+6], line[k*5+7]]
writer_out.writerow(line_out)
f_in.close()
f_out.close()
def create_master_turnstile_file(filenames, output_file):
'''
takes the files in the list filenames, which all have the
columns 'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn', and consolidates
them into one file located at output_file. There's one row with the column
headers, located at the top of the file. The input files do not have column header
rows of their own.
'''
with open(output_file, 'w') as master_file:
master_file.write('C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\n')
for filename in filenames:
with open(filename, 'r') as file_in:
for row in file_in:
master_file.write(row)
def filter_by_regular(filename):
'''
reads the csv file located at filename into a pandas dataframe,
and filters the dataframe to only rows where the 'DESCn' column has the value 'REGULAR'.
'''
turnstile_data = pandas.read_csv(filename)
turnstile_data = pandas.DataFrame(turnstile_data)
turnstile_data = turnstile_data[turnstile_data.DESCn == 'REGULAR']
return turnstile_data
def get_hourly_entries(df):
'''
This function should change cumulative entry numbers to a count of entries since the last reading
(i.e., entries since the last row in the dataframe).
1) Create a new column called ENTRIESn_hourly
2) Assign to the column the difference between ENTRIESn of the current row
and the previous row. Any NaN is replaced with 1.
'''
shift = df.ENTRIESn.shift(1)
df['ENTRIESn_hourly'] = df.ENTRIESn - shift
df['ENTRIESn_hourly'][0] = 1
shift.fillna(value = 1, inplace = True)
df.fillna(value = 1, inplace = True)
return df
def get_hourly_exits(df):
'''
same as before, just with exits
'''
shift = df.EXITSn.shift(1)
df['EXITSn_hourly'] = df.EXITSn - shift
df['EXITSn_hourly'][0] = 0
shift.fillna(value = 0, inplace = True)
df.fillna(value = 0, inplace = True)
return df
def time_to_hour(time):
'''
extracts the hour part from the input variable time
and returns it as an integer.
'''
hour = int(time[0:2])
return hour
def reformat_subway_dates(date):
'''
The dates in MTA subway data are formatted in the format month-day-year.
The dates in weather underground data are formatted year-month-day.
Takes as its input a date in month-day-year format,
and returns a date in the year-month-day format.
'''
date_formatted = datetime.strftime(datetime.strptime(date, "%m-%d-%y"), "%Y-%m-%d")
return date_formatted
#Analyzing Subway Data
def entries_histogram(turnstile_weather):
'''
Plots two histograms on the same axes to show hourly
entries when raining vs. when not raining.
The skewed histograms show that you cannot run the Welch's T test since it assumes normality.
'''
plt.figure()
(turnstile_weather['ENTRIESn_hourly'][turnstile_weather['rain'] == 1]).hist(bins = 200, label = 'Rain') # your code here to plot a historgram for hourly entries when it is raining
(turnstile_weather['ENTRIESn_hourly'][turnstile_weather['rain'] == 0]).hist(bins = 200, alpha = 0.5, label = 'Non-Rainy') # your code here to plot a historgram for hourly entries when it is not raining
plt.title('Rain vs. Non-Rainy Days')
plt.xlabel('ENTRIESn_hourly')
plt.ylabel('Frequency')
plt.legend()
plt.xlim([0, 4000])
return plt
def mann_whitney_plus_means(turnstile_weather):
'''
Takes the means and runs the Mann Whitney U-test on the
ENTRIESn_hourly column in the turnstile_weather dataframe.
Returns:
1) the mean of entries with rain
2) the mean of entries without rain
3) the Mann-Whitney U-statistic and p-value comparing the number of entries
with rain and the number of entries without rain
P-value from test suggests that the distribution of number of entries is statistically different
between rainy and non rainy days (reject the null)
'''
rain = turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly']
norain = turnstile_weather[turnstile_weather['rain'] == 0]['ENTRIESn_hourly']
with_rain_mean = np.mean(rain)
without_rain_mean = np.mean(norain)
U,p = scipy.stats.mannwhitneyu(rain, norain, use_continuity = False)
return with_rain_mean, without_rain_mean, U, p
def linear_regression(features, values):
features = sm.add_constant(features)
model = sm.OLS(values, features)
results = model.fit()
params = results.params[1:]
intercept = results.params[0]
return intercept, params
def predictions(dataframe):
'''
predict the ridership of
the NYC subway using linear regression with gradient descent.
'''
features = dataframe[['rain', 'precipi', 'Hour', 'fog']]
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = features.join(dummy_units)
values = dataframe['ENTRIESn_hourly']
# Perform linear regression
intercept, params = linear_regression(features, values)
predictions = intercept + np.dot(features, params)
return predictions
def plot_residuals(turnstile_weather, predictions):
#histogram of the residuals
plt.figure()
(turnstile_weather['ENTRIESn_hourly'] - predictions).hist()
return plt
def compute_r_squared(data, predictions):
SST = ((data - np.mean(data)) ** 2).sum()
SSReg = ((predictions - data) ** 2).sum()
r_squared = 1 - SSReg / SST
return r_squared
#Visualizing Subway Data
def plot_weather_data(turnstile_weather):
turnstile_weather['HOUR'] = turnstile_weather['Hour']
hour_group = turnstile_weather.groupby('Hour')
hour_mean = hour_group.aggregate(np.mean)
plot = ggplot(hour_mean, aes(x = 'HOUR', y = 'ENTRIESn_hourly')) + \
geom_point() + \
geom_line() + \
ggtitle('Average Ridership Based on Hour') + \
stat_smooth(color = 'red') + \
xlab('Hour') + \
ylab('Average Entries')
pandas.options.mode.chained_assignment = None
return plot
'''
def plot_weather_data(turnstile_weather):
plot = ggplot(turnstile_weather, aes(x = 'precipi', y = 'ENTRIESn_hourly')) + \
geom_point() + \
geom_line()
return plot
'''
| [
"yeh.steven1@gmail.com"
] | yeh.steven1@gmail.com |
0934476d88d102d3ecdad97f93ea2db2840cd912 | 8081704ffd2f9620ddd04b09e7e601ea0ef93f62 | /auth_api/api.py | 05d4263376d033f210a037614b9cea3e01e57125 | [] | no_license | bharris62/djangoTrelloClone | 3a985ff9f8f140a3ec001cfb060c8f544b2333ab | 015426faecd9ccc8b4e6db7b38c47b3a9fe132aa | refs/heads/master | 2020-08-31T05:57:06.201338 | 2017-06-15T23:24:51 | 2017-06-15T23:24:51 | 94,392,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | from django.contrib.auth import authenticate, login, logout
from rest_framework import status, views
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from .serializers import UserSerializer
class LoginView(views.APIView):
@method_decorator(csrf_protect)
def post(self, request):
user = authenticate(
username=request.data.get("username"),
password=request.data.get("password")
)
if user is None or not user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'Username or password incorrect'
}, status=status.HTTP_401_UNAUTHORIZED)
login(request, user)
return Response(UserSerializer(user).data)
class LogoutView(views.APIView):
def get(selfself, request):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT) | [
"blakebharris@gmail.com"
] | blakebharris@gmail.com |
24ed08ee2440a58029972c74cc667b12b8251f36 | 9cb3b5e2117377cfda66a69ee7032dd4c688175b | /test/sagemaker_tests/huggingface_pytorch/training/integration/sagemaker/test_smmp.py | a4e68175fa6f87f586b545e090fa9bcb4bd5e4db | [
"Apache-2.0"
] | permissive | mbencherif/deep-learning-containers | 30e10ac616f2d578040373303a52ed18e3550bf2 | 6d75e645fec20c61922ce64893d84713c55b2a1e | refs/heads/master | 2023-04-16T02:06:54.156977 | 2021-04-28T22:03:53 | 2021-04-28T22:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,450 | py | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from ...integration import (DEFAULT_TIMEOUT)
from sagemaker.huggingface import HuggingFace
from ...integration.sagemaker.timeout import timeout
import sagemaker
# hyperparameters, which are passed into the training job
hyperparameters = {
'model_name_or_path': 'roberta-large',
'task_name': 'mnli',
'per_device_train_batch_size': 8,
'per_device_eval_batch_size': 4,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'max_steps': 5,
}
# configuration for running training on smdistributed Model Parallel
mpi_options = {
"enabled": True,
"processes_per_host": 8,
}
smp_options = {
"enabled": True,
"parameters": {
"microbatches": 2,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 2,
"ddp": True,
}
}
distribution = {
"smdistributed": {"modelparallel": smp_options},
"mpi": mpi_options
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/notebooks.git', 'branch': 'master'}
@pytest.mark.processor("gpu")
@pytest.mark.integration("smmp")
@pytest.mark.model("hf_qa_smmp")
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
def test_smmp_gpu(sagemaker_session, framework_version, ecr_image, instance_type, py_version, dist_gpu_backend):
# instance configurations
instance_type = 'ml.p3.16xlarge'
instance_count = 1
volume_size = 400
huggingface_estimator = HuggingFace(entry_point='run_glue.py',
source_dir='./sagemaker/04_distributed_training_model_parallelism/scripts/',
git_config=git_config,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
role='SageMakerRole',
image_uri=ecr_image,
distribution=distribution,
py_version=py_version,
hyperparameters=hyperparameters,
sagemaker_session=sagemaker_session)
huggingface_estimator.fit(job_name=sagemaker.utils.unique_name_from_base('test-hf-pt-qa-smmp'))
@pytest.mark.processor("gpu")
@pytest.mark.integration("smmp")
@pytest.mark.model("hf_qa_smmp_multi")
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
@pytest.mark.multinode(2)
def test_smmp_gpu_multinode(sagemaker_session, framework_version, ecr_image, instance_type, py_version, dist_gpu_backend):
instance_type = 'ml.p3.16xlarge'
instance_count = 2
volume_size = 400
huggingface_estimator = HuggingFace(entry_point='run_glue.py',
source_dir='./sagemaker/04_distributed_training_model_parallelism/scripts/',
git_config=git_config,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
role='SageMakerRole',
image_uri=ecr_image,
distribution=distribution,
py_version=py_version,
hyperparameters=hyperparameters,
sagemaker_session=sagemaker_session)
huggingface_estimator.fit(job_name=sagemaker.utils.unique_name_from_base('test-hf-pt-qa-smmp-multi'))
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.