blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdb2b5e0fbdd0e7be2ed5ef38db11c91bd706249 | 30dee3a1031c0520b1ba33aa08cb8524f0f8ef29 | /old_quizzes-and-answers-in-course.py | ada7637964c3d32a2360994bc401308ade5f0b7b | [
"MIT"
] | permissive | gqmaguirejr/Canvas-tools | 561848c7cf9cfc905db2d5ee37ac5815ed3911eb | 8a6fc3af1ebeeffc6578d6ed470329f6f796aa4a | refs/heads/master | 2023-04-13T03:30:34.312603 | 2023-04-07T12:10:01 | 2023-04-07T12:10:01 | 164,110,439 | 33 | 10 | MIT | 2022-04-13T14:22:25 | 2019-01-04T13:25:02 | Python | UTF-8 | Python | false | false | 14,590 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# ./quizzes-and-answers-in-course.py course_id
#
# Output: XLSX spreadsheet with quizzes in course
#
#
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./list_your_courses.py --config config-test.json
#
# Example:
# ./quizzes-in-course.py 11
#
# ./quizzes-in-course.py --config config-test.json 11
#
#
# documentation about using xlsxwriter to insert images can be found at:
# John McNamara, "Example: Inserting images into a worksheet", web page, 10 November 2018, https://xlsxwriter.readthedocs.io/example_images.html
#
# G. Q. Maguire Jr.
#
# based on earlier list-quizzes.py
#
# 2019.01.05
#
import requests, time
import pprint
import optparse
import sys
import json
# Use Python Pandas to create XLSX files
import pandas as pd
# use lxml to access the HTML content
from lxml import html
# use the request pacek to get the HTML give an URL
import requests
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
def get_course_info(course_id):
# Use the Canvas API to get the course info
#GET /api/v1/courses/:id
url = "{0}/courses/{1}".format(baseUrl, course_id)
if Verbose_Flag:
print("in course info url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting course info: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
return page_response
def list_quizzes(course_id):
quizzes_found_thus_far=[]
# Use the Canvas API to get the list of quizzes for the course
#GET /api/v1/courses/:course_id/quizzes
url = "{0}/courses/{1}/quizzes".format(baseUrl, course_id)
if Verbose_Flag:
print("in list_quizzes url: {}".format(url))
extra_parameters={'per_page': '100'}
r = requests.get(url, headers = header, params=extra_parameters)
if Verbose_Flag:
print("result of getting quizzes: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
quizzes_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links.get('next', False):
r = requests.get(r.links['next']['url'], headers=header, params=extra_parameters)
if Verbose_Flag:
print("result of getting quizzes for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
quizzes_found_thus_far.append(p_response)
return quizzes_found_thus_far
def list_quiz_questions(course_id, quiz_id):
questions_found_thus_far=[]
# Use the Canvas API to get the list of questions for a quiz in the course
# GET /api/v1/courses/:course_id/quizzes/:quiz_id/questions
url = "{0}/courses/{1}/quizzes/{2}/questions".format(baseUrl, course_id, quiz_id)
if Verbose_Flag:
print("url: {}".format(url))
extra_parameters={'per_page': '100'}
r = requests.get(url, headers = header, params=extra_parameters)
if Verbose_Flag:
print("result of getting questions: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
questions_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links.get('next', False):
r = requests.get(r.links['next']['url'], headers=header, params=extra_parameters)
if Verbose_Flag:
print("result of getting questions for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
questions_found_thus_far.append(p_response)
return questions_found_thus_far
def list_quiz_submissions(course_id, quiz_id):
submissions_found_thus_far=[]
# Get all quiz submissions
# GET /api/v1/courses/:course_id/quizzes/:quiz_id/submissions
#Parameter Type Description
#include[] string
#Associations to include with the quiz submission.
# Allowed values: submission, quiz, user
url = "{0}/courses/{1}/quizzes/{2}/submissions".format(baseUrl, course_id, quiz_id)
if Verbose_Flag:
print("url: {}".format(url))
extra_parameters={'include[]': 'submission',
'per_page': '100'}
r = requests.get(url, params=extra_parameters, headers = header)
if Verbose_Flag:
print("result of getting submissions: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
qs=page_response.get('quiz_submissions', [])
for p_response in qs:
submissions_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links.get('next', False):
r = requests.get(r.links['next']['url'], headers=header, params=extra_parameters)
if Verbose_Flag:
print("result of getting submissions for a paginated response: {}".format(r.text))
page_response = r.json()
qs=page_response.get('quiz_submissions', [])
for p_response in qs:
submissions_found_thus_far.append(p_response)
return submissions_found_thus_far
def update_question_type_stats(question):
global question_type_stats
qt=question.get('question_type', None)
qt_number=question_type_stats.get(qt, 0)
if qt_number == 0:
question_type_stats[qt]=1
else:
question_type_stats[qt]=qt_number+1
def make_dir_for_urls(url, target_dir):
global Verbose_Flag
# the URLs have the form: https://canvas.kth.se/courses/11/quizzes/39141/history?quiz_submission_id=759552&version=1
# remove prefix
prefix="courses/"
prefix_offset=url.find(prefix)
if prefix_offset > 0:
url_tail=url[prefix_offset+len(prefix):]
parts_of_path=url_tail.split('/')
if Verbose_Flag:
print(parts_of_path)
course_id=parts_of_path[0]
quiz_id=parts_of_path[2]
quiz_submission_part=parts_of_path[3].split('=')
if Verbose_Flag:
print(quiz_submission_part)
quiz_submission_id=quiz_submission_part[1].split('&')[0]
if Verbose_Flag:
print(quiz_submission_id)
dir_to_create="{0}/{1}/{2}/{3}".format(target_dir, course_id, quiz_id, quiz_submission_id)
print("Creating directory: {}".format(dir_to_create))
Path(dir_to_create).mkdir(parents=True, exist_ok=True)
return dir_to_create
def main():
global Verbose_Flag
global question_type_stats
default_picture_size=128
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('-t', '--testing',
dest="testing",
default=False,
action="store_true",
help="Do not create the directories only make the XLSX files"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV : {}".format(sys.argv[1:]))
print("VERBOSE : {}".format(options.verbose))
print("REMAINING : {}".format(remainder))
print("Configuration file : {}".format(options.config_filename))
initialize(options)
if (len(remainder) < 1):
print("Insuffient arguments - must provide course_id\n")
return
else:
course_id=remainder[0]
if Verbose_Flag:
print("course_id={0}, type={1}".format(course_id, type(course_id)))
course_info=get_course_info(course_id)
quizzes=list_quizzes(course_id)
question_type_stats=dict()
target_dir="./Quiz_Submissions"
if options.testing:
target_dir="./Quiz_Submissions-testing"
if course_info:
course_info_df=pd.json_normalize(course_info)
if (quizzes):
quizzes_df=pd.json_normalize(quizzes)
# below are examples of some columns that might be dropped
#columns_to_drop=[]
#quizzes_df.drop(columns_to_drop,inplace=True,axis=1)
# the following was inspired by the section "Using XlsxWriter with Pandas" on http://xlsxwriter.readthedocs.io/working_with_pandas.html
# set up the output write
writer = pd.ExcelWriter('quizzes-'+course_id+'.xlsx', engine='xlsxwriter')
if course_info:
course_info_df.to_excel(writer, sheet_name='Course')
quizzes_df.to_excel(writer, sheet_name='Quizzes')
for q in sorted(quizzes, key=lambda x: x['id']):
qi=list_quiz_questions(course_id, q['id'])
qi_df=pd.json_normalize(qi)
qi_df.to_excel(writer, sheet_name=str(q['id']))
for question in qi:
update_question_type_stats(question)
#Verbose_Flag=True
qs=list_quiz_submissions(course_id, q['id'])
#Verbose_Flag=False
if Verbose_Flag:
print("quiz submission {0} {1}".format(q['id'], qs))
qs_df=pd.json_normalize(qs)
qs_df.to_excel(writer, sheet_name='s_'+str(q['id']))
for submission in qs:
results_url=submission.get('result_url', None)
if results_url and not options.testing:
make_dir_for_urls(results_url, target_dir)
# At this point I want to fetch all of the version of each quiz submission and save the results to files
# converting the URLs of the form 'https://canvas.kth.se/courses/:course_id/quizzes/:quiz_id/history?quiz_submission_id=:submission_id&version=:version:number'
# into file names of the form: :course_id_:quiz_id_quiz_submission_id=:submission_id_version_:version:number
#
# for submission in qs:
# results_url=submission.get('result_url', None)
# if results_url:
# attempt=submission['attempt']
# print("attempt={}".format(attempt))
# submitted_quiz = requests.get(results_url)
# submission_html=submitted_quiz.text
# if submission_html and len(submission_html) > 0:
# print("found a submission: {}".format(submission_html))
# # look for the div with id="questions"
# document = html.document_fromstring(submission_html)
# # questions = document.xpath('//*[@id="questions"]/div/*[@class="display_question"]')
# questions = document.xpath('//*[@id="questions"]')
# for a_question in questions:
# a_question_id=a_question.attrib['id']
# a_question_class=a_question.attrib['class']
# print("question id={0} class={1}".format(a_question_id, a_question_class))
# input=a_question.find('input')
# if input:
# # type="text" name="question_346131" value="redish-green"
# input_type=input.attrib['type']
# input_name=input.attrib['name']
# input_value=input.attrib['value']
# print("input type={0], name={1}, value={2}".format(input_type, input_name, input_value))
# Close the Pandas Excel writer and output the Excel file.
writer.save()
if len(question_type_stats) > 0:
print("question_type_stats={}".format(question_type_stats))
if __name__ == "__main__": main()
| [
"maguire@kth.se"
] | maguire@kth.se |
178870ae4f352d2175231795d0352c7127475e6b | fda6a1be714d8e27a5d8dd3df795df45538f2fe7 | /graphene/tests/issues/test_1394.py | 39374381996bfc665becda97026ba781a21fd060 | [
"MIT"
] | permissive | graphql-python/graphene | 6badaaa97c8ad78552a656f9da9ed577cfc37add | 93cb33d359bf2109d1b81eaeaf052cdb06f93f49 | refs/heads/master | 2023-08-05T02:48:36.967050 | 2023-07-26T07:43:40 | 2023-07-26T07:43:40 | 43,056,951 | 8,187 | 1,088 | MIT | 2023-09-01T19:59:19 | 2015-09-24T09:18:18 | Python | UTF-8 | Python | false | false | 947 | py | from ...types import ObjectType, Schema, String, NonNull
class Query(ObjectType):
hello = String(input=NonNull(String))
def resolve_hello(self, info, input):
if input == "nothing":
return None
return f"Hello {input}!"
schema = Schema(query=Query)
def test_required_input_provided():
"""
Test that a required argument works when provided.
"""
input_value = "Potato"
result = schema.execute('{ hello(input: "%s") }' % input_value)
assert not result.errors
assert result.data == {"hello": "Hello Potato!"}
def test_required_input_missing():
"""
Test that a required argument raised an error if not provided.
"""
result = schema.execute("{ hello }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message
== "Field 'hello' argument 'input' of type 'String!' is required, but it was not provided."
)
| [
"thomas@loftorbital.com"
] | thomas@loftorbital.com |
03ec16a4f9d655c170924a97b87023e79aba4f7a | 4aae1f8c2ca143ab4c2b5374f1f707b1b494569e | /apps/likes/__init__.py | e3a7bbc7cd4d8e5e5df06e7f7a03712c460cd257 | [] | no_license | liqd/a4-speakup | 49405c786e28168f698f3e003cb17a9795bfb354 | 418ed69c58664ed50acd045a3ccf19938759bc79 | refs/heads/master | 2021-12-21T22:29:42.512764 | 2021-05-05T15:08:17 | 2021-05-05T15:09:24 | 176,737,678 | 3 | 1 | null | 2021-12-14T08:04:36 | 2019-03-20T13:16:48 | Python | UTF-8 | Python | false | false | 46 | py | default_app_config = 'apps.likes.apps.Config'
| [
"r.mader@liqd.net"
] | r.mader@liqd.net |
4113e64831f110dafe2cd400ae322012ef92fce8 | a58a31782a2592ae660ef70cc881d071e073d8c5 | /utils/napi_dl | e8f3ab2d12c02a15886ad7c8f0fb253246bc902f | [
"MIT"
] | permissive | akrasuski1/utils | 9502f95d0c2443e296c199bc5738b27d031103c0 | 0f3909f0c76a7ebfc7c278a5b1f21dfb308f9661 | refs/heads/master | 2020-03-29T05:37:43.346072 | 2019-08-05T18:44:14 | 2019-08-05T18:44:14 | 149,590,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | #!/usr/bin/python
# reversed napi 0.16.3.1
#
# by gim,krzynio,dosiu,hash 2oo8.
#
#
#
# last modified: 6-I-2oo8
#
# 4pc0h f0rc3
#
# do dzialania potrzebny jest p7zip-full (tak sie nazywa paczka w debianie)
#
# POZDRAWIAMY NASZYCH FANOW!
import md5,sys,urllib,os
def f(z):
idx = [ 0xe, 0x3, 0x6, 0x8, 0x2 ]
mul = [ 2, 2, 5, 4, 3 ]
add = [ 0, 0xd, 0x10, 0xb, 0x5 ]
b = []
for i in xrange(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(z[i], 16)
v = int(z[t:t+2], 16)
b.append( ("%x" % (v*m))[-1] )
return ''.join(b)
for fname in sys.argv[1:]:
d = md5.new();
d.update(open(fname).read(10485760))
str = "http://napiprojekt.pl/unit_napisy/dl.php?l=PL&f="+d.hexdigest()+"&t="+f(d.hexdigest())+"&v=other&kolejka=false&nick=&pass=&napios="+os.name
open("napisy.7z","w").write(urllib.urlopen(str).read())
nazwa=fname[:-3]+'txt'
if (os.system("/usr/bin/7z x -y -so -piBlm8NTigvru0Jr0 napisy.7z 2>/dev/null >\""+nazwa+"\"")):
print "nie ma napisa do filmu"
os.remove(nazwa)
else:
os.system("cp \"" + nazwa + "\" /tmp/xd")
os.system("iconv /tmp/xd -f cp1250 -t utf8 > \"" + nazwa + "\"")
os.system("mv \"" + nazwa + "\" \"" + nazwa.replace(".txt", ".sub") + "\"")
print "napisy pobrano, milordzie!"
os.remove("napisy.7z")
| [
"noreply@github.com"
] | akrasuski1.noreply@github.com | |
4ecbb98a37468d3dec006f4f2834447706076527 | 5194ef59d10c839b79eff460a4d87a11f509ef64 | /services/user_service.py | 565f9aaa01d03c374b91f19c16d96c938d5d5a92 | [] | no_license | holbertra/blog_flask_app | 3166e615ff5d2d03d06ed20da43be10b436ceefd | faf279a2ae9ba6850d5253f0eb1f000b702b5240 | refs/heads/master | 2021-06-26T07:39:45.567777 | 2019-11-05T21:54:32 | 2019-11-05T21:54:32 | 219,604,635 | 0 | 0 | null | 2021-03-20T02:07:43 | 2019-11-04T21:59:13 | Python | UTF-8 | Python | false | false | 424 | py |
def create_user(email, password, f_name, l_name):
# 1. Check if email already exists in dB
# 2. Hash the password
# 3. Create the user object
# 4. Save to the dBase
pass
def delete_user(id):
# Grab user from dB
# Delete the user
pass
def update_user(id, data):
# Grab the user from dB
# Update the user's fields in dB
# Commit changes
def get_user(id):
# Grab user from dB | [
"holbertra@gamil.com"
] | holbertra@gamil.com |
69464c3b9cc44fc360e52b78b6397ca102998b16 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=43/sched.py | 96e86f88d4ec9f87aad6a16a3dbd922eb335bcd5 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | -S 1 -X RUN -Q 0 -L 2 132 400
-S 0 -X RUN -Q 0 -L 2 104 300
-S 0 -X RUN -Q 0 -L 2 93 300
-S 0 -X RUN -Q 0 -L 2 56 300
-S 2 -X RUN -Q 1 -L 1 50 400
-S 2 -X RUN -Q 1 -L 1 46 300
-S 2 -X RUN -Q 1 -L 1 45 150
-S 2 -X RUN -Q 1 -L 1 42 125
-S 3 -X RUN -Q 2 -L 1 35 175
-S 3 -X RUN -Q 2 -L 1 34 125
-S 3 -X RUN -Q 2 -L 1 32 200
-S 3 -X RUN -Q 2 -L 1 28 125
-S 4 -X RUN -Q 3 -L 1 24 125
-S 4 -X RUN -Q 3 -L 1 21 125
-S 4 -X RUN -Q 3 -L 1 20 175
-S 4 -X RUN -Q 3 -L 1 8 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f9ab6ed6db28ab211c6720b20b9566e5ca10a9ef | cb280ddcc6dafa8fa62c588c7e4694909c4c2504 | /DONTFUCKINGTOUCHTHIS/variable_args.py | b0d4ed04019ee695c9478078f748a56485651637 | [] | no_license | chayapatr/discord | 78b383080f0eff7f4509571458a72057066c3dfa | e6ccec50b7650407f7e77a372c0f0afc1db13ea0 | refs/heads/master | 2023-05-20T14:10:38.888898 | 2021-06-11T16:09:40 | 2021-06-11T16:09:40 | 374,484,413 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from discord.ext import commands
@commands.command()
async def variable_args(ctx,*args):
"""
<a compiled function>
"""
_idx = 0
def input(*argumentholder):
nonlocal _idx
_idx += 1
return args[_idx - 1]
async def print(*message):
await ctx.send(''.join([str(x) for x in message]))
n = int(input())
sum = 0
for i in range(n):
x = int(input())
sum += x
await print(sum)
@variable_args.error
async def clear_error(ctx,error):
print(error)
def setup(bot):
bot.add_command(variable_args)
| [
"31594543+chayapatr@users.noreply.github.com"
] | 31594543+chayapatr@users.noreply.github.com |
4d4fb661acc5759a206df3f75663a1c781265c8f | cfd73856a182cc1b77fa195e07a3a063f80399ae | /index/migrations/0011_student_meli_code.py | 4f52a444c9af80e3f20e75cb18691beff3702d03 | [] | no_license | engAmirEng/madrese-rebuild | 99df6dd734bb57b843356ea8316c21894cc09af2 | 004e28add0732fc7cb6844d7c0639c8cfc93f67c | refs/heads/master | 2023-05-23T07:20:32.220648 | 2021-06-14T12:28:05 | 2021-06-14T12:28:05 | 374,588,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # Generated by Django 3.2.3 on 2021-06-07 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0010_remove_student_meli_code'),
]
operations = [
migrations.AddField(
model_name='student',
name='meli_code',
field=models.CharField(default=1, max_length=10),
preserve_default=False,
),
]
| [
"eng.amir.bu@gmail.com"
] | eng.amir.bu@gmail.com |
34d05e4b0c303576e8b2e8d6f7fe00333b0f00f2 | 5c24c60d35c78031e73b22ce4ea651e4dd6f6875 | /nearestNeighbor.py | 3b3828500a016db9212a31b104204f0416354195 | [] | no_license | danerbrear/k-nearest-neighbor | 449dd2ed196962e298d4216c44603229bea4c09b | 87a031d21cc70a618b926a7e23c7a2239ea09de8 | refs/heads/master | 2022-12-26T03:48:50.324243 | 2020-10-04T22:58:58 | 2020-10-04T22:58:58 | 301,175,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | import pandas
from minHeap import MinHeap
# Calculates the euclidian distance between two rows.
# Skips the class column that we are trying to classify in euclidian distance calculation
def findDistance(trainingRow, testRow, classCol):
testRowDrop = testRow[1].drop(classCol)
squaredSum = 0
for i, item in testRowDrop.iteritems():
squaredSum += ((trainingRow[i] - item) ** 2)
distance = squaredSum ** 0.5
return distance
# Decides a class and returns 1 if wrong and 0 if corrent
def nearestNeighbor(trainingSet, testRow, classCol, k):
minHeap = MinHeap(len(trainingSet.index))
for row in trainingSet.iterrows():
dropRow = row[1].drop(classCol)
dist = findDistance(dropRow, testRow, classCol)
minHeap.insert((row, dist))
minHeap.minHeap()
# Determine class by k neighbor voting
votes = {}
for i in range(k):
minVal = minHeap.remove()
if minVal[0][1][classCol] in votes:
votes[minVal[0][1][classCol]] += 1
else:
votes[minVal[0][1][classCol]] = 1
maxVotes = 0
predictedClass = ""
for key in votes:
if votes[key] > maxVotes:
maxVotes = votes[key]
predictedClass = key
return 0 if (predictedClass == testRow[1][classCol]) else 1 | [
"dane.brear@gmail.com"
] | dane.brear@gmail.com |
ca20c58619461ea1b7b9b9f01b15d56cf9b586d4 | 49a249ed1f91ccce93a33b695cb728fd3f69403d | /HelloWorld/view.py | 15d7192e71b4393f4fbde918360a8d705434a042 | [] | no_license | kakasaber/hello_project | e0eccf16b3d75947a01b518209c13fc65743bdc3 | 52612c1e715a0aa8247c31ddc241573af838d394 | refs/heads/master | 2022-06-14T03:12:25.310066 | 2020-05-07T08:57:03 | 2020-05-07T08:57:03 | 262,000,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.http import HttpResponse
def hello(request):
return HttpResponse("hello world !!!");
| [
"839823628@qq.com"
] | 839823628@qq.com |
b92fc3a8d0296972556872e96c0f6ac63b2e2613 | 07eac477192d5d9f4eadaaf3d5dfbd5e8deb1c59 | /sample.py | 50095b5bcc0d60001e5ff1d0f4febf062a5009fd | [
"Apache-2.0"
] | permissive | cisco-ie/nx-os-grpc-python | 90ad08718265ae7307ab478e89a2c164144dbb26 | 1b07d569b758222316c589abb1b8e6092d47f1f9 | refs/heads/master | 2022-12-14T20:03:00.020675 | 2019-01-04T16:19:37 | 2019-01-04T16:19:37 | 137,940,497 | 4 | 0 | Apache-2.0 | 2022-12-08T02:15:54 | 2018-06-19T20:10:51 | Python | UTF-8 | Python | false | false | 1,758 | py | #!/usr/bin/env python
"""Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from nxos_grpc import Client
import secrets
logging.basicConfig(level=logging.DEBUG)
client = Client(secrets.hostname, secrets.username, secrets.password)
print(
json.dumps(
client.get_oper(
'Cisco-NX-OS-device:System/nd-items/inst-items/dom-items/Dom-list/if-items/If-list/vaddrstat-items/VaddrStat-list',
namespace='http://cisco.com/ns/yang/cisco-nx-os-device'
).as_dict(),
sort_keys=True,
indent=4
)
)
print(
json.dumps(
client.get(
'Cisco-NX-OS-device:System/nd-items/inst-items/dom-items/Dom-list/if-items/If-list/vaddrstat-items/VaddrStat-list',
namespace='http://cisco.com/ns/yang/cisco-nx-os-device',
request_id=1
).as_dict(),
sort_keys=True,
indent=4
)
)
print(
json.dumps(
client.get_config(
'Cisco-NX-OS-device:System/nd-items/inst-items/dom-items/Dom-list/if-items/If-list/vaddrstat-items/VaddrStat-list',
namespace='http://cisco.com/ns/yang/cisco-nx-os-device',
request_id=2
).as_dict(),
sort_keys=True,
indent=4
)
)
| [
"remcampb@cisco.com"
] | remcampb@cisco.com |
ea531889bf01ff9b71405fc6ad2e84ec1a764813 | ba8f5d23d9878a25b30a32cf16e8833f93b25853 | /source_py2/python_toolbox/nifty_collections/emitting_weak_key_default_dict.py | 46c4c7701214a78895301bc8c7a7931a9b878581 | [
"MIT"
] | permissive | nastako/python_toolbox | af520cbec1468c8e0aae0b3b1c467ca5623af45b | 9713fd728608818630ee409ac6a6fdaf863af31b | refs/heads/master | 2020-12-11T09:07:19.681161 | 2015-01-16T21:26:37 | 2015-01-16T21:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,427 | py | # Copyright 2009-2015 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `EmittingWeakKeyDefaultDict` class.
See its documentation for more details.
'''
from .weak_key_default_dict import WeakKeyDefaultDict
class EmittingWeakKeyDefaultDict(WeakKeyDefaultDict):
'''
A key that references keys weakly, has a default factory, and emits.
This is a combination of `weakref.WeakKeyDictionary` and
`collections.defaultdict`, which emits every time it's modified.
The keys are referenced weakly, so if there are no more references to the
key, it gets removed from this dict.
If a "default factory" is supplied, when a key is attempted that doesn't
exist the default factory will be called to create its new value.
Every time that a change is made, like a key is added or removed or gets
its value changed, we do `.emitter.emit()`.
'''
def __init__(self, emitter, *args, **kwargs):
super(EmittingWeakKeyDefaultDict, self).__init__(*args, **kwargs)
self.emitter = emitter
def set_emitter(self, emitter):
'''Set the emitter that will be emitted every time a change is made.'''
self.emitter = emitter
def __setitem__(self, key, value):
result = \
super(EmittingWeakKeyDefaultDict, self).__setitem__(key, value)
if self.emitter:
self.emitter.emit()
return result
def __delitem__(self, key):
result = super(EmittingWeakKeyDefaultDict, self).__delitem__(key)
if self.emitter:
self.emitter.emit()
return result
def pop(self, key, *args):
""" D.pop(k[,d]) -> v, remove specified key and return the
corresponding value. If key is not found, d is returned if given,
otherwise KeyError is raised """
result = super(EmittingWeakKeyDefaultDict, self).pop(key, *args)
if self.emitter:
self.emitter.emit()
return result
def popitem(self):
""" D.popitem() -> (k, v), remove and return some (key, value)
pair as a 2-tuple; but raise KeyError if D is empty """
result = super(EmittingWeakKeyDefaultDict, self).popitem()
if self.emitter:
self.emitter.emit()
return result
def clear(self):
""" D.clear() -> None. Remove all items from D. """
result = super(EmittingWeakKeyDefaultDict, self).clear()
if self.emitter:
self.emitter.emit()
return result
def __repr__(self):
return '%s(%s, %s, %s)' % (
type(self).__name__,
self.emitter,
self.default_factory,
dict(self)
)
def __reduce__(self):
"""
__reduce__ must return a 5-tuple as follows:
- factory function
- tuple of args for the factory function
- additional state (here None)
- sequence iterator (here None)
- dictionary iterator (yielding successive (key, value) pairs
This API is used by pickle.py and copy.py.
"""
if self.default_factory:
parameters = (self.emitter, self.default_factory)
else: # not self.default_factory
parameters = (self.emitter)
return (type(self), parameters, None, None, self.iteritems()) | [
"ram@rachum.com"
] | ram@rachum.com |
f6204b97311c0a68ac6fb78da25a8bf916595d58 | 69e318f2b60175108bc74ee669bfe16287a71cb6 | /plugins/modules/fortios_system_dns.py | db4b40e5be9e400dcaf07963bd254f700f9a60db | [] | no_license | chillancezen/ansible-galaxy-fortios-collection | 5268a5fd97fb4594772349b8d89cb818ec54b3bd | 66a331cd4493d1b0f49798d5c2cd6ef5aeba84d3 | refs/heads/master | 2022-04-09T19:20:59.073193 | 2020-03-26T07:17:09 | 2020-03-26T07:17:09 | 250,185,374 | 0 | 0 | null | 2020-03-26T07:06:16 | 2020-03-26T07:06:16 | null | UTF-8 | Python | false | false | 11,555 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_dns
short_description: Configure DNS in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and dns category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
system_dns:
description:
- Configure DNS.
default: null
type: dict
suboptions:
cache_notfound_responses:
description:
- Enable/disable response from the DNS server when a record is not in cache.
type: str
choices:
- disable
- enable
dns_cache_limit:
description:
- Maximum number of records in the DNS cache.
type: int
dns_cache_ttl:
description:
- Duration in seconds that the DNS cache retains information.
type: int
domain:
description:
- Search suffix list for hostname lookup.
type: list
suboptions:
domain:
description:
- DNS search domain list separated by space (maximum 8 domains)
required: true
type: str
ip6_primary:
description:
- Primary DNS server IPv6 address.
type: str
ip6_secondary:
description:
- Secondary DNS server IPv6 address.
type: str
primary:
description:
- Primary DNS server IP address.
type: str
retry:
description:
- Number of times to retry (0 - 5).
type: int
secondary:
description:
- Secondary DNS server IP address.
type: str
source_ip:
description:
- IP address used by the DNS server as its source IP.
type: str
timeout:
description:
- DNS query timeout interval in seconds (1 - 10).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure DNS.
fortios_system_dns:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_dns:
cache_notfound_responses: "disable"
dns_cache_limit: "4"
dns_cache_ttl: "5"
domain:
-
domain: "<your_own_value>"
ip6_primary: "<your_own_value>"
ip6_secondary: "<your_own_value>"
primary: "<your_own_value>"
retry: "11"
secondary: "<your_own_value>"
source_ip: "84.230.14.43"
timeout: "14"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_dns_data(json):
option_list = ['cache_notfound_responses', 'dns_cache_limit', 'dns_cache_ttl',
'domain', 'ip6_primary', 'ip6_secondary',
'primary', 'retry', 'secondary',
'source_ip', 'timeout']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_dns(data, fos):
vdom = data['vdom']
system_dns_data = data['system_dns']
filtered_data = underscore_to_hyphen(filter_system_dns_data(system_dns_data))
return fos.set('system',
'dns',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_dns']:
resp = system_dns(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_dns": {
"required": False, "type": "dict", "default": None,
"options": {
"cache_notfound_responses": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"dns_cache_limit": {"required": False, "type": "int"},
"dns_cache_ttl": {"required": False, "type": "int"},
"domain": {"required": False, "type": "list",
"options": {
"domain": {"required": True, "type": "str"}
}},
"ip6_primary": {"required": False, "type": "str"},
"ip6_secondary": {"required": False, "type": "str"},
"primary": {"required": False, "type": "str"},
"retry": {"required": False, "type": "int"},
"secondary": {"required": False, "type": "str"},
"source_ip": {"required": False, "type": "str"},
"timeout": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"fshen01@fortinet.com"
] | fshen01@fortinet.com |
d9134ca744b326f6493509d9fd732548b3eab6f4 | ebb0e7a0ce0b3f7432be2b5d5755e41b593f7222 | /main.py | 230383e2555652deb8628b7c3d1c473a34aab912 | [] | no_license | JudeOsborn/angularjsgaetodo | ef7c79cbc34b1926821b78c839481f444b2e5367 | 044680c6a902009d9f4e486ecf0868d9f5fc3bf1 | refs/heads/master | 2021-01-01T05:31:04.042680 | 2013-01-28T22:52:44 | 2013-01-28T22:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | import webapp2
from google.appengine.ext.webapp import template
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(template.render('index.html', {}))
app = webapp2.WSGIApplication([
('/', MainHandler),
], debug=True)
| [
"jude@potatolondon.com"
] | jude@potatolondon.com |
5f5d38dc11d4fc235b9841b3e9e24c71dba0d06b | 15e14e59f647750de29b8fae64b9be86bc5d78ef | /testsuite.py | fd0a8cafe4a193ffae390b0475a631294b34caeb | [] | no_license | adityabettadapura/TicTacToe-game | 10243f8df359a567fb7a7e706fa390e4a3e7cb58 | bcbb2cb1668afb6933701e8ee1052f3f468aae5b | refs/heads/master | 2021-01-10T23:30:48.605360 | 2016-10-01T05:37:05 | 2016-10-01T05:37:05 | 69,721,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | import unittest
from board import Board
from pseudorandomai import PseudoRandomAI
class TestBoard(unittest.TestCase):
"""
Test suit to test the board class.
Test cases implemented:
testEmptyBoard -> Tests if the board is empty
testIsSpaceFree -> Tests if the given space is free on the board
testWinner -> Tests if the player with the specified letter has won
testInvalidMove -> Tests if the specified move is invalid
testBoardFull -> Tests if the board is full
"""
def testEmptyBoard(self):
"""
Tests if the board is empty
"""
board = Board()
self.assertEqual(board.isEmpty(), True)
board.makeMove(0, 'X')
self.assertEqual(board.isEmpty(), False)
def testIsSpaceFree(self):
"""
Tests if the given space is free on the board
"""
board = Board()
board.makeMove(5, 'O')
self.assertEqual(board.isSpaceFree(5), False)
def testWinner(self):
"""
Tests if the player with the specified letter has won the board
"""
board = Board()
board.makeMove(0, 'X')
board.makeMove(1, 'X')
board.makeMove(2, 'X')
self.assertEqual(board.isWinner('X'), True)
def testInvalidMove(self):
"""
Tests if the specified move is invalid
"""
board = Board()
board.makeMove(0, 'X')
self.assertEqual(board.makeMove(0, 'X'), False)
def testBoardFull(self):
"""
Tests if the board is full
"""
board = Board()
for i in range(9):
board.makeMove(i, 'X')
self.assertEqual(board.isFull(), True)
class TestAI(unittest.TestCase):
"""
Test suit to test the Pseudorandom AI.
Test cases implemented:
testNextMove -> Tests the next move returned by the AI
testNoMove -> Tests if the getNextMove returns None if all the spaces are filled
"""
def testNextMove(self):
"""
Tests the next move returned by the AI
"""
ai = PseudoRandomAI()
# If corners are free, choose the corners
board = Board()
self.assertIn(ai.getNextMove(board, 'X'), [0, 2, 6, 8])
board.makeMove(0, 'X')
board.makeMove(1, 'O')
board.makeMove(2, 'X')
board.makeMove(6, 'O')
self.assertEqual(ai.getNextMove(board, 'X'), 8)
# If corners, aren't free, choose the center if free
board.makeMove(8, 'X')
self.assertEqual(ai.getNextMove(board, 'X'), 4)
# If center is not free, choose any other
board.makeMove(4, 'O')
self.assertIn(ai.getNextMove(board, 'X'), [1, 3, 5, 7])
def testNoMove(self):
"""
Tests if the getNextMove returns None if all the spaces are filled
"""
ai = PseudoRandomAI()
board = Board()
board.makeMove(0, 'X')
board.makeMove(1, 'O')
board.makeMove(2, 'X')
board.makeMove(3, 'O')
board.makeMove(4, 'X')
board.makeMove(5, 'O')
board.makeMove(6, 'X')
board.makeMove(7, 'O')
board.makeMove(8, 'X')
self.assertEqual(ai.getNextMove(board, 'X'), None)
| [
"aditya.b.shankar@gmail.com"
] | aditya.b.shankar@gmail.com |
d7a6f1cc323844bf66510192f3c73feeae42f90c | 317eff8f4735d76ba961a174a686bd4d00af28ce | /names/names.py | f818c279a81e48a5573cd6d3179cfe030d2dbfca | [
"MIT"
] | permissive | lucywyman/hamper-names | 95ced474ded2c3a4c8734c4c01673a393df4d1f8 | 94cd0ea9e533723fb7997213bd076218b567bcc7 | refs/heads/master | 2021-01-01T05:31:38.703271 | 2014-02-26T03:46:23 | 2014-02-26T03:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | import random
import re
from datetime import datetime
from hamper.interfaces import ChatCommandPlugin, Command
class Names(ChatCommandPlugin):
'''Random Name Generator: !name to generate a random name'''
name = 'names'
priority = 0
class Names(Command):
regex = r'^names$'
name = 'names'
def command(self, bot, comm, groups):
l1 = ["Pixelated ", "Linus ", "Mr.", "Doctor ", "Fernando ", "Bacon ",
"Mario ", "Professor ", "Velociraptor ", "Baby monkey ", "Richard ",
"Luigi ", "Peach ", "Batman ", "Macafee ", "Mozilla ", "Luxe ", "Yoshi ",
"Uzbekistan ", "Stanley ", "Stefon ", "Ayn ", "Hans ", "Hipster ", "Cueball ",
"YOLO ", "Hamper ", "Lady ", "Radnall ", "Stephen ", "HP ", "Stud " ]
l2 = ["Octocat", "McGee", "Fiddlesticks", "Torvalds", "Munroe", "Kitten",
"Muffin", "Rasta Primavera", "Fiddlesticks", "Dangerzone", "Jobs", "Stallman",
"Moneybags", "Muffin", "Heisenberg", "Zaboomafoo", "Honey", "Fox", "Hawking",
"Lovecraft", "Rand", "Vim", "the 34th"]
name1 = random.choice(l1)
name2 = random.choice(l2)
bot.reply(comm, str(name1)+str(name2))
names = Names()
| [
"wyman.lucy@gmail.com"
] | wyman.lucy@gmail.com |
150c5fd8c3bd60bd78428844d15da7c125771b39 | 3937f340aafed20f7b3bb9e36a62d0dfe6ca985d | /CrispyProject/WebApp/forms.py | 02e71ff1a3beb904dbd38af4006ac6e1f687b0b3 | [] | no_license | sunnywralph/Django7AM | 8f6f7e52847882d35ee7f7c4c263c5e90c79b6da | ffef6c6e5ab5231416ca743ebae299622eab9791 | refs/heads/master | 2022-04-25T15:04:45.733504 | 2020-05-05T14:53:57 | 2020-05-05T14:53:57 | 261,496,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django import forms
from WebApp.models import Person
# Fields with Validation
class EmpForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
| [
"sunnywralph@gmail.com"
] | sunnywralph@gmail.com |
2e75e364412047116527b67d7526a7d754275785 | ba27bff7331d6ffb7297f20b27851096da9490e7 | /Lesson-1/Env/bin/easy_install-3.7 | b69ddaa977e22098d3d9a467baa7ba786703c2f3 | [] | no_license | aandaldi/Learn-Python-Django | 1ba54a52c82dc6654f0b6234ce1e4b8e28224aa8 | f26b958ae3f317ba045c816f4ef4f51b2455059b | refs/heads/master | 2020-11-27T20:03:38.165930 | 2020-09-27T16:29:54 | 2020-09-27T16:29:54 | 229,584,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | 7 | #!/home/aandaldi/Documents/TrainInternal/Learn-Python-Django/Lesson-1/Env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"aandaldi@gmail.com"
] | aandaldi@gmail.com |
598105808d121d339072a4f7214de032ba831a6d | 36c57834cf7546bfdeee88c035f63f0a7a9a634f | /word_lm.py | c93e327b37b23b970f3d43cbccb9b9135dc60c0e | [] | no_license | GitSujal/Sentence-Generation-with-RNN | 9d285d83893c89e81e551bd3d0b276595ae566ab | e3c8b87b8f55659ce7f63e9e363f1777fccfac13 | refs/heads/master | 2020-04-24T13:12:30.514503 | 2019-02-22T02:19:45 | 2019-02-22T02:19:45 | 171,979,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,330 | py | import tensorflow as tf
import numpy as np
import reader
import time
import random
class PTBInput(object):
"""Input Data"""
def __init__(self, config, raw_data = None, name = None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
if raw_data is not None:
self.epoch_size = ((len(raw_data) // batch_size) -1) // num_steps
self.input_data, self.targets = reader.inputProducer(raw_data, batch_size, num_steps, name = name)
class PTBModel(object):
"""PTB Model"""
def __init__(self, is_training, config, input_):
self._is_training = is_training
self._input = input_
self.batch_size = input_.batch_size
self.num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
#Initialize one-hot encoding matrix
embedding = tf.get_variable("embedding", [vocab_size, size], dtype = tf.float32)
#input_data is batch_size X num_steps per iteration till epoch_size
#inputs is of size batch_size X num_steps X hidden_size
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
#Ouput is of shape [batch_size X size]
output, state = self._build_rnn_graph_lstm(inputs, config, is_training)
softmax_w = tf.get_variable("softmax_w", [size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])
self._logits = logits
self._output_probs = tf.nn.softmax(logits)
loss = tf.contrib.seq2seq.sequence_loss(logits,input_.targets,tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),average_across_timesteps=False,average_across_batch=True)
# Update the cost
self._cost = cost = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(config.learning_rate)
self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step = tf.train.get_or_create_global_step())
def _get_lstm_cell(self, config, is_training):
return tf.contrib.rnn.BasicLSTMCell(config.hidden_size, forget_bias=0.0, state_is_tuple=True,reuse=not is_training)
def _build_rnn_graph_lstm(self, inputs, config, is_training):
def make_cell():
cell = self._get_lstm_cell(config, is_training)
#Using dropout
if is_training and config.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = config.keep_prob)
return cell
#Multilayer RNN
cell = tf.contrib.rnn.MultiRNNCell([make_cell() for _ in range(config.num_layers)], state_is_tuple = True)
#With state_is_tuple set to True, hidden layer consisting of cell and hidden-to-output states is represented by tuple (c,h)
#So initial state has size num_layers X [batch_size X (h,c))*size]
#With state_is_tuple set to false, initial state is represented by
#a concatenated matrix of shape [batch_size, num_layers * (h,c) * size]
self._initial_state = cell.zero_state(config.batch_size, tf.float32)
self._prime_initial_state = cell.zero_state(config.batch_size, tf.float32)
state = self._initial_state
startCharTensor = tf.constant(value = config.startCharID, dtype = tf.int32, shape = [config.batch_size])
#Outputs is a tensor of shape [batch_size X num_steps X size]
#state is LSTM Tuple of shape [batch_size X size] for a sequence of hidden layers
#Output is of shape [batch_size X num_steps] rows and [size] columns
#Weight shared across all time steps (softmax) is operated on batch_size *num_steps character vectors
#logits is of shape [batch_size * num_steps vocab_size]
initMatrix = tf.constant(value = 0.05, shape = [config.batch_size, config.hidden_size], dtype = tf.float32)
initCell = tf.contrib.rnn.LSTMStateTuple(c = initMatrix, h = initMatrix)
initMultiCell = tuple(initCell for i in range(config.num_layers))
self._prime_initial_state = initMultiCell
outputs = []
with tf.variable_scope("RNN"):
for time_step in range(self.num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
startCharMatchTensor = tf.reshape(tf.cast(tf.equal(startCharTensor, self._input.input_data[:, time_step]), tf.float32), shape = [config.batch_size, 1])
startCharMismatchTensor = tf.reshape(tf.cast(tf.not_equal(startCharTensor, self._input.input_data[:, time_step]), tf.float32), shape = [config.batch_size, 1])
state = tuple((tf.add(tf.multiply(self._prime_initial_state[i].c, startCharMatchTensor), tf.multiply(state[i].c, startCharMismatchTensor)), tf.add(tf.multiply(self._prime_initial_state[i].h, startCharMatchTensor), tf.multiply(state[i].h, startCharMismatchTensor))) for i in range(config.num_layers))
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])
return output, state
"""
# Simplified Version
inputs = tf.unstack(inputs, num=self.num_steps, axis=1)
outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,initial_state=self._initial_state)
output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])
return output, state
"""
@property
def input(self):
return self._input
@property
def logits(self):
return self._logits
@property
def train_op(self):
return self._train_op
@property
def cost(self):
return self._cost
@property
def output_probs(self):
return self._output_probs
@property
def final_state(self):
return self._final_state
@property
def initial_state(self):
return self._initial_state
def run_epoch(session, model, generate_model, corpus, eval_op=None, verbose = False):
"""
Runs the model on the given data
"""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
for step in range(model.input.epoch_size):
cost, state, _ = session.run([model.cost, model.final_state, model.train_op], {model.initial_state: state})
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % ( step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size / (time.time() - start_time)))
print(GenerateSentence(session, generate_model, corpus))
return np.exp(costs/iters)
def sample(a, temperature=1.0):
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
r = random.random() # range: [0,1)
total = 0.0
for i in range(len(a)):
total += a[i]
if total>r:
return i
return len(a)-1
def GenerateSentence(session, model, corpus, verbose = False):
id_to_char = corpus.id_to_char
startCharID = corpus.char_to_id[corpus.startChar]
stopCharID = corpus.char_to_id[corpus.stopChar]
state = session.run(model.initial_state)
_input = np.matrix([[startCharID]])
batchItr = 0
batchSize = 500
text = ""
while batchItr < batchSize:
output_probs, state = session.run([model.output_probs, model.final_state], {model.input.input_data : _input, model.initial_state:state})
#primaryIndex = np.argpartition(output_probs[0][0], -10)[-10:]
#x = random.choice(primaryIndex)
x = sample(output_probs[0][0], 0.8)
_input = np.matrix([[x]])
if x == stopCharID:
text += '\n'
else:
text += id_to_char[x] + ' '
batchItr += 1
return text
class TrainConfig(object):
init_scale = 0.01
learning_rate = 0.50
vocab_size = 214
max_grad_norm = 5
hidden_size = 250
keep_prob = 0.5
batch_size = 20
num_steps = 40
num_layers = 2
max_max_epoch = 2
startCharID = 0
stopCharID = 0
class GenerateConfig(object):
init_scale = 0.01
learning_rate = 0.50
max_grad_norm = 5
vocab_size = 214
keep_prob = 1.0
hidden_size = 250
batch_size = 1
num_steps = 1
num_layers = 2
startCharID = 0
stopCharID = 0
def main(_):
print("Start")
print("Preparing Corpus")
corpus = reader.Corpus()
startCharID = corpus.char_to_id[corpus.startChar]
stopCharID = corpus.char_to_id[corpus.stopChar]
print("Getting Configurations")
train_config = TrainConfig()
train_config.vocab_size = corpus.vocab_size
train_config.startCharID = startCharID
train_config.stopCharID = stopCharID
generate_config = GenerateConfig()
generate_config.vocab_size = corpus.vocab_size
generate_config.startCharID = startCharID
generate_config.stopCharID = stopCharID
print(train_config.vocab_size)
print(train_config.startCharID)
print(train_config.stopCharID)
print("Setting up Graph")
with tf.Graph().as_default():
#initializer = tf.random_uniform_initializer(-train_config.init_scale, train_config.init_scale)
initializer = tf.contrib.layers.xavier_initializer()
print("Train")
with tf.name_scope("Train"):
train_input = PTBInput(config = train_config, raw_data = corpus.train_set, name = "TrainInput")
with tf.variable_scope("Model", reuse = None, initializer= initializer):
train_model = PTBModel(is_training = True, config = train_config, input_=train_input)
tf.summary.scalar("Training Loss", train_model.cost)
with tf.name_scope("Valid"):
valid_input = PTBInput(config = train_config, raw_data = corpus.valid_set, name = "ValidInput")
with tf.variable_scope("Model", reuse = True, initializer = initializer):
valid_model = PTBModel(is_training = False, config = train_config, input_=valid_input)
tf.summary.scalar("Validation Loss", valid_model.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config = generate_config, raw_data = corpus.test_set, name = "TestInput")
with tf.variable_scope("Model", reuse = True, initializer = initializer):
test_model = PTBModel(is_training = False, config = generate_config, input_ = test_input)
with tf.name_scope("Generate"):
generate_input = PTBInput(config = generate_config, raw_data = corpus.test_set, name = "GenerateInput")
with tf.variable_scope("Model", reuse = True, initializer = initializer):
generate_model = PTBModel(is_training = False, config = generate_config, input_ = generate_input)
models = {"Train":train_model, "Valid":valid_model, "Test":test_model, "Generate":generate_model}
print("Executing Graph")
with tf.Session() as sess:
saver = tf.train.Saver()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess, coord = coord)
sess.run(tf.global_variables_initializer())
saver.restore(sess, 'model/savedModelValidL2H250N40-1000')
for i in range(train_config.max_max_epoch):
train_perplexity = run_epoch(session = sess, model = train_model, generate_model = generate_model, corpus = corpus, eval_op = train_model.train_op, verbose = True)
print("Epoch %d Train perplexity %.3f" % (i+1, train_perplexity))
genDoc = GenerateSentence(session=sess, model=generate_model, corpus=corpus, verbose = False)
print(genDoc)
saver.save(sess, "model/savedModelValidL2H250N40", global_step = 1000)
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.app.run()
| [
"070bex442@ioe.edu.np"
] | 070bex442@ioe.edu.np |
28b1921adc9695c38dd720678bc747adc2d6a752 | 8971fcd9806f86d844edc623ef849adfadad066f | /TTHAnalysis/python/tools/nanoAOD/jetmetGrouper.py | 848c86c9eedcde18f67e066331d0f9c80b3e8a63 | [] | no_license | cms-susy-soft-leptons/cmgtools-lite | a7117d457f79da5cd7dbd6bd987d436d853ad45a | e8ee654a6960dada93930b8d2ad0bc51f961530a | refs/heads/104X_dev_nano_sos_plotter_tidy | 2020-08-31T00:32:15.411155 | 2020-03-27T17:24:17 | 2020-03-27T17:24:17 | 218,534,810 | 0 | 0 | null | 2020-03-27T17:24:18 | 2019-10-30T13:32:34 | Python | UTF-8 | Python | false | false | 5,878 | py | import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
# group jec components
# taken from https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit?ouid=111820255692530107608&usp=sheets_home&ths=true
import os
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
import math
class jetmetGrouper(Module):
def __init__(self, groups, jetbranch, metbranch, dumpMore=[]):
self.groups = groups
self.jetbranch = jetbranch
self.metbranch = metbranch
self.dumpMore = dumpMore
def beginJob(self):
pass
def endJob(self):
pass
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.wrappedOutputTree = wrappedOutputTree
for group in self.groups:
for sign in ['Up','Down']:
self.wrappedOutputTree.branch("%s_pt_jes%s%s"%(self.jetbranch,group,sign) , "F", lenVar="n%s"%self.jetbranch)
self.wrappedOutputTree.branch("%s_pt_jes%s%s"%(self.metbranch,group,sign) , "F")
self.wrappedOutputTree.branch("%s_phi_jes%s%s"%(self.metbranch,group,sign) , "F")
for br in self.dumpMore:
self.wrappedOutputTree.branch(**br)
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def analyze(self, event):
"""Nominal variables"""
jets = Collection(event, '%s'%self.jetbranch)
met = Object(event, self.metbranch)
metPt = getattr(met, "pt_nom", "pt")
metPhi = getattr(met, "phi_nom", "phi")
metPx = metPt*math.cos(metPhi)
metPy = metPt*math.sin(metPhi)
for group in self.groups:
for sign in ['Up','Down']:
jetVar = []
metPxVar = metPx
metPyVar = metPy
for j in jets:
thePt = getattr(j, "pt_nom", "pt")
thePhi = getattr(j, "phi")
jetVar.append(0)
for comp in self.groups[group]:
jetVar[-1] = (jetVar[-1]**2 + (getattr(j,"pt_jes"+ comp + sign)-thePt)**2)**0.5
metPxVar = metPxVar - (1 if sign == 'Up' else -1) * jetVar[-1] * math.cos(thePhi)
metPyVar = metPyVar - (1 if sign == 'Up' else -1) * jetVar[-1] * math.sin(thePhi)
jetVar[-1] = thePt + (1 if sign == 'Up' else -1) * jetVar[-1]
self.wrappedOutputTree.fillBranch("%s_pt_jes%s%s"%(self.jetbranch,group,sign), jetVar )
self.wrappedOutputTree.fillBranch("%s_pt_jes%s%s"%(self.metbranch,group,sign), (metPxVar**2 + metPyVar**2)**0.5)
self.wrappedOutputTree.fillBranch("%s_phi_jes%s%s"%(self.metbranch,group,sign), math.atan2(metPyVar, metPxVar))
for br in self.dumpMore:
self.wrappedOutputTree.fillBranch(br['name'], getattr(event, br['name']))
return True
moreVars = [
{'name' : 'Jet_pt_nom' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'MET_pt_nom' ,'rootBranchType' : 'F'},
{'name' : 'MET_pt_unclustEnUp' ,'rootBranchType' : 'F'},
{'name' : 'MET_pt_unclustEnDown' ,'rootBranchType' : 'F'},
{'name' : 'MET_phi_unclustEnUp' ,'rootBranchType' : 'F'},
{'name' : 'MET_phi_unclustEnDown' ,'rootBranchType' : 'F'},
{'name' : 'Jet_pt_jerUp' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'MET_pt_jerUp' ,'rootBranchType' : 'F'},
{'name' : 'MET_phi_jerUp' ,'rootBranchType' : 'F'},
{'name' : 'Jet_pt_jerDown' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'MET_pt_jerDown' ,'rootBranchType' : 'F'},
{'name' : 'MET_phi_jerDown','rootBranchType' : 'F'},
]
moreVars2017 = [
{'name' : 'Jet_pt_nom' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'METFixEE2017_pt_nom' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_pt_unclustEnUp' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_pt_unclustEnDown' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_phi_unclustEnUp' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_phi_unclustEnDown' ,'rootBranchType' : 'F'},
{'name' : 'Jet_pt_jerUp' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'METFixEE2017_pt_jerUp' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_phi_jerUp' ,'rootBranchType' : 'F'},
{'name' : 'Jet_pt_jerDown' ,'rootBranchType' : 'F','lenVar': 'nJet'},
{'name' : 'METFixEE2017_pt_jerDown' ,'rootBranchType' : 'F'},
{'name' : 'METFixEE2017_phi_jerDown','rootBranchType' : 'F'},
]
groups = {'HF' : ['PileUpPtHF', 'RelativeJERHF', 'RelativePtHF'],
'BBEC1_year' : ['RelativeJEREC1', 'RelativePtEC1', 'RelativeStatEC'],
'FlavorQCD' : ['FlavorQCD'],
'RelativeSample_year': ['RelativeSample'],
'EC2' : ['PileUpPtEC2'],
'HF_year' : ['RelativeStatHF'],
'RelativeBal' : ['RelativeBal'],
'Absolute_year' : ['AbsoluteStat', 'RelativeStatFSR', 'TimePtEta'],
'BBEC1' : ['PileUpPtBB', 'PileUpPtEC1', 'RelativePtBB'],
'EC2_year' : ['RelativeJEREC2', 'RelativePtEC2'],
'Absolute' : ['AbsoluteMPFBias', 'AbsoluteScale', 'Fragmentation', 'PileUpDataMC', 'PileUpPtRef', 'RelativeFSR', 'SinglePionECAL', 'SinglePionHCAL']
}
jetMetCorrelate2016 = lambda : jetmetGrouper( groups, "Jet", "MET", dumpMore=moreVars)
jetMetCorrelate2017 = lambda : jetmetGrouper( groups, "Jet", "METFixEE2017", dumpMore=moreVars2017)
jetMetCorrelate2018 = lambda : jetmetGrouper( groups, "Jet", "MET", dumpMore=moreVars)
| [
"s.scruz.312@gmail.com"
] | s.scruz.312@gmail.com |
4335e43e879c0ef68bff953743aa51e096e7bc6b | abfa70e1da5b4ba8e465cdc046fa36e81386744a | /base_ml/10.5.Iris_RandomForest_Enum.py | cb324f72bde8cb4ca167d5b6c13a703a16f9b8bc | [] | no_license | superman666ai/crazy_project | f850819ff2287e345b67500111733bafa5629d1f | 99dcba0fe246ecaf3f556f747d44731a04231921 | refs/heads/master | 2020-05-15T09:32:56.523875 | 2019-05-16T00:57:23 | 2019-05-16T00:57:23 | 182,179,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestClassifier
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 黑体 FangSong/KaiTi
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type}, encoding="utf-8")
x_prime, y = np.split(data, (4,), axis=1)
feature_pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
plt.figure(figsize=(10, 9), facecolor='#FFFFFF')
for i, pair in enumerate(feature_pairs):
# 准备数据
x = x_prime[:, pair]
# 随机森林
clf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=4)
rf_clf = clf.fit(x, y.ravel())
# 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# 训练集上的预测结果
y_hat = rf_clf.predict(x)
y = y.reshape(-1)
c = np.count_nonzero(y_hat == y) # 统计预测正确的个数
# print '特征: ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]],
# print '\t预测正确数目:', c,
# print '\t准确率: %.2f%%' % (100 * float(c) / float(len(y)))
# 显示
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_hat = rf_clf.predict(x_test) # 预测值
y_hat = y_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.subplot(2, 3, i+1)
plt.pcolormesh(x1, x2, y_hat, cmap=cm_light) # 预测值
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark) # 样本
plt.xlabel(iris_feature[pair[0]], fontsize=14)
plt.ylabel(iris_feature[pair[1]], fontsize=14)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid()
plt.tight_layout(2.5)
plt.subplots_adjust(top=0.92)
plt.suptitle(u'随机森林对鸢尾花数据的两特征组合的分类结果', fontsize=18)
plt.show()
| [
"keepingoner@163.com"
] | keepingoner@163.com |
055c9c2d59fc6151d29538c92a2fbdda4fa1c983 | d4d4f39bcafb5cb1cd3b6a70fb3890889f18740d | /reconcile/utils/vaultsecretref.py | e72565f50fb8cc1b8943ff1d068cc6ce4b784c96 | [
"Apache-2.0"
] | permissive | rporres/qontract-reconcile | 08a79cfa694ea5e35370706d7c5eb8498fc456a1 | 079949357068b548bd6ba78369b68b3271530919 | refs/heads/master | 2023-09-05T11:33:12.623950 | 2022-12-22T10:41:22 | 2022-12-22T10:41:22 | 241,174,271 | 0 | 0 | Apache-2.0 | 2020-02-17T18:03:15 | 2020-02-17T18:03:15 | null | UTF-8 | Python | false | false | 974 | py | from dataclasses import dataclass
from typing import (
Optional,
cast,
)
from reconcile.utils.vault import (
VaultClient,
_VaultClient,
)
@dataclass
class VaultSecretRef:
_ALL_FIELDS = "all"
path: str
field: str
format: Optional[str] = None
version: Optional[int] = None
def get(self, field=None, default=None):
secret_content = self._resolve_secret()
if field:
return secret_content.get(field, default)
elif self.field == VaultSecretRef._ALL_FIELDS:
return secret_content
else:
return secret_content.get(self.field, default)
def _resolve_secret(self) -> dict[str, str]:
vault_client = cast(_VaultClient, VaultClient())
if self.field == VaultSecretRef._ALL_FIELDS:
return vault_client.read_all(self.__dict__)
else:
field_value = vault_client.read(self.__dict__)
return {self.field: field_value}
| [
"noreply@github.com"
] | rporres.noreply@github.com |
9dd940b5933f26f0c5f47d581160b7ba22f31fb6 | bea2e5924a62b76a767b3eb915abb3f95a225926 | /tensorflow_privacy/privacy/dp_query/dp_query.py | 480241019dde4b557a7fb073ff3ecc44002afc1b | [
"Apache-2.0",
"MIT"
] | permissive | tensorflow/privacy | 741ddc106e9b73384a1356bf915dc8f7f97ce768 | c92610e37aa340932ed2d963813e0890035a22bc | refs/heads/master | 2023-09-03T20:42:21.040653 | 2023-08-30T19:53:38 | 2023-08-30T19:54:08 | 162,747,292 | 1,881 | 493 | Apache-2.0 | 2023-09-14T19:55:15 | 2018-12-21T18:46:46 | Python | UTF-8 | Python | false | false | 12,074 | py | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface for differentially private query mechanisms.
The DPQuery class abstracts the differential privacy mechanism needed by DP-SGD.
The nomenclature is not specific to machine learning, but rather comes from
the differential privacy literature. Therefore, instead of talking about
examples, minibatches, and gradients, the code talks about records, samples and
queries. For more detail, please see the paper here:
https://arxiv.org/pdf/1812.06210.pdf
A common usage paradigm for this class is centralized DP-SGD training on a
fixed set of training examples, which we call "standard DP-SGD training."
In such training, SGD applies as usual by computing gradient updates from a set
of training examples that form a minibatch. However, each minibatch is broken
up into disjoint "microbatches." The gradient of each microbatch is computed
and clipped to a maximum norm, with the "records" for all such clipped gradients
forming a "sample" that constitutes the entire minibatch. Subsequently, that
sample can be "queried" to get an averaged, noised gradient update that can be
applied to model parameters.
In order to prevent inaccurate accounting of privacy parameters, the only
means of inspecting the gradients and updates of SGD training is via the use
of the below interfaces, and through the accumulation and querying of a
"sample state" abstraction. Thus, accessing data is indirect on purpose.
The DPQuery class also allows the use of a global state that may change between
samples. In the common situation where the privacy mechanism remains unchanged
throughout the entire training process, the global state is usually None.
"""
import abc
import collections
import tensorflow as tf
class DPQuery(metaclass=abc.ABCMeta):
"""Interface for differentially private query mechanisms.
Differential privacy is achieved by processing records to bound sensitivity,
accumulating the processed records (usually by summing them) and then
adding noise to the aggregated result. The process can be repeated to compose
applications of the same mechanism, possibly with different parameters.
The DPQuery interface specifies a functional approach to this process. A
global state maintains state that persists across applications of the
mechanism. For each application, the following steps are performed:
1. Use the global state to derive parameters to use for the next sample of
records.
2. Initialize a sample state that will accumulate processed records.
3. For each record:
a. Process the record.
b. Accumulate the record into the sample state.
4. Get the result of the mechanism, possibly updating the global state to use
in the next application.
5. Derive metrics from the global state.
Here is an example using the GaussianSumQuery. Assume there is some function
records_for_round(round) that returns an iterable of records to use on some
round.
```
dp_query = tensorflow_privacy.GaussianSumQuery(
l2_norm_clip=1.0, stddev=1.0)
global_state = dp_query.initial_global_state()
for round in range(num_rounds):
sample_params = dp_query.derive_sample_params(global_state)
sample_state = dp_query.initial_sample_state()
for record in records_for_round(round):
sample_state = dp_query.accumulate_record(
sample_params, sample_state, record)
result, global_state = dp_query.get_noised_result(
sample_state, global_state)
metrics = dp_query.derive_metrics(global_state)
# Do something with result and metrics...
```
"""
def initial_global_state(self):
"""Returns the initial global state for the DPQuery.
The global state contains any state information that changes across
repeated applications of the mechanism. The default implementation returns
just an empty tuple for implementing classes that do not have any persistent
state.
This object must be processable via tf.nest.map_structure.
Returns:
The global state.
"""
return ()
def derive_sample_params(self, global_state):
"""Given the global state, derives parameters to use for the next sample.
For example, if the mechanism needs to clip records to bound the norm,
the clipping norm should be part of the sample params. In a distributed
context, this is the part of the state that would be sent to the workers
so they can process records.
Args:
global_state: The current global state.
Returns:
Parameters to use to process records in the next sample.
"""
del global_state # unused.
return ()
@abc.abstractmethod
def initial_sample_state(self, template=None):
"""Returns an initial state to use for the next sample.
For typical `DPQuery` classes that are aggregated by summation, this should
return a nested structure of zero tensors of the appropriate shapes, to
which processed records will be aggregated.
Args:
template: A nested structure of tensors, TensorSpecs, or numpy arrays used
as a template to create the initial sample state. It is assumed that the
leaves of the structure are python scalars or some type that has
properties `shape` and `dtype`.
Returns: An initial sample state.
"""
pass
def preprocess_record(self, params, record):
"""Preprocesses a single record.
This preprocessing is applied to one client's record, e.g. selecting vectors
and clipping them to a fixed L2 norm. This method can be executed in a
separate TF session, or even on a different machine, so it should not depend
on any TF inputs other than those provided as input arguments. In
particular, implementations should avoid accessing any TF tensors or
variables that are stored in self.
Args:
params: The parameters for the sample. In standard DP-SGD training, the
clipping norm for the sample's microbatch gradients (i.e., a maximum
norm magnitude to which each gradient is clipped)
record: The record to be processed. In standard DP-SGD training, the
gradient computed for the examples in one microbatch, which may be the
gradient for just one example (for size 1 microbatches).
Returns:
A structure of tensors to be aggregated.
"""
del params # unused.
return record
@abc.abstractmethod
def accumulate_preprocessed_record(self, sample_state, preprocessed_record):
"""Accumulates a single preprocessed record into the sample state.
This method is intended to only do simple aggregation, typically just a sum.
In the future, we might remove this method and replace it with a way to
declaratively specify the type of aggregation required.
Args:
sample_state: The current sample state. In standard DP-SGD training, the
accumulated sum of previous clipped microbatch gradients.
preprocessed_record: The preprocessed record to accumulate.
Returns:
The updated sample state.
"""
pass
def accumulate_record(self, params, sample_state, record):
"""Accumulates a single record into the sample state.
This is a helper method that simply delegates to `preprocess_record` and
`accumulate_preprocessed_record` for the common case when both of those
functions run on a single device. Typically this will be a simple sum.
Args:
params: The parameters for the sample. In standard DP-SGD training, the
clipping norm for the sample's microbatch gradients (i.e., a maximum
norm magnitude to which each gradient is clipped)
sample_state: The current sample state. In standard DP-SGD training, the
accumulated sum of previous clipped microbatch gradients.
record: The record to accumulate. In standard DP-SGD training, the
gradient computed for the examples in one microbatch, which may be the
gradient for just one example (for size 1 microbatches).
Returns:
The updated sample state. In standard DP-SGD training, the set of
previous microbatch gradients with the addition of the record argument.
"""
preprocessed_record = self.preprocess_record(params, record)
return self.accumulate_preprocessed_record(sample_state,
preprocessed_record)
@abc.abstractmethod
def merge_sample_states(self, sample_state_1, sample_state_2):
"""Merges two sample states into a single state.
This can be useful if aggregation is performed hierarchically, where
multiple sample states are used to accumulate records and then
hierarchically merged into the final accumulated state. Typically this will
be a simple sum.
Args:
sample_state_1: The first sample state to merge.
sample_state_2: The second sample state to merge.
Returns:
The merged sample state.
"""
pass
@abc.abstractmethod
def get_noised_result(self, sample_state, global_state):
"""Gets the query result after all records of sample have been accumulated.
The global state can also be updated for use in the next application of the
DP mechanism.
Args:
sample_state: The sample state after all records have been accumulated. In
standard DP-SGD training, the accumulated sum of clipped microbatch
gradients (in the special case of microbatches of size 1, the clipped
per-example gradients).
global_state: The global state, storing long-term privacy bookkeeping.
Returns:
A tuple `(result, new_global_state, event)` where:
* `result` is the result of the query,
* `new_global_state` is the updated global state, and
* `event` is the `DpEvent` that occurred.
In standard DP-SGD training, the result is a gradient update comprising a
noised average of the clipped gradients in the sample state---with the
noise and averaging performed in a manner that guarantees differential
privacy.
"""
pass
def derive_metrics(self, global_state):
"""Derives metric information from the current global state.
Any metrics returned should be derived only from privatized quantities.
Args:
global_state: The global state from which to derive metrics.
Returns:
A `collections.OrderedDict` mapping string metric names to tensor values.
"""
del global_state
return collections.OrderedDict()
def _zeros_like(arg):
"""A `zeros_like` function that also works for `tf.TensorSpec`s."""
try:
arg = tf.convert_to_tensor(value=arg)
except (TypeError, ValueError):
pass
return tf.zeros(arg.shape, arg.dtype)
def _safe_add(x, y):
"""Adds x and y but if y is None, simply returns x."""
return x if y is None else tf.add(x, y)
class SumAggregationDPQuery(DPQuery):
"""Base class for DPQueries that aggregate via sum."""
def initial_sample_state(self, template=None):
"""Implements `tensorflow_privacy.DPQuery.initial_sample_state`."""
return tf.nest.map_structure(_zeros_like, template)
def accumulate_preprocessed_record(self, sample_state, preprocessed_record):
"""Implements `tensorflow_privacy.DPQuery.accumulate_preprocessed_record`.
"""
return tf.nest.map_structure(_safe_add, sample_state, preprocessed_record)
def merge_sample_states(self, sample_state_1, sample_state_2):
"""Implements `tensorflow_privacy.DPQuery.merge_sample_states`."""
return tf.nest.map_structure(tf.add, sample_state_1, sample_state_2)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
e018789b838cff91b024c6a1281149d8e177ca30 | b8f527b1f3e8b0e672200ad21519b70411604c71 | /roadmap/ledger/migrations/0008_auto__add_field_target_milestone.py | ec901f4da879195a801152b62530716b21c3dd07 | [] | no_license | adamauckland/RoadMap | c02e9ac67c2a46137ed3513138337bc0ea7821e1 | 84ce27cc574cf821f3fb8d570dafb5b696b124df | refs/heads/master | 2021-01-19T06:13:10.010274 | 2014-02-02T05:37:57 | 2014-02-02T05:37:57 | 4,406,724 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,867 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Target.project'
db.add_column('ledger_target', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ledger.Project'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Target.project'
db.delete_column('ledger_target', 'project_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ledger.assigned': {
'Meta': {'object_name': 'Assigned'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 3, 4, 8, 46, 44, 709169)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Location']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ledger.checklistitem': {
'Meta': {'object_name': 'ChecklistItem'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'order_index': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
'ledger.client': {
'Meta': {'object_name': 'Client'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'ledger.comment': {
'Meta': {'object_name': 'Comment'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 3, 4, 8, 46, 44, 706523)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ledger.dailybasic': {
'Meta': {'object_name': 'DailyBasic'},
'day': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Location']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Project']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {})
},
'ledger.email': {
'Meta': {'object_name': 'Email'},
'date_time': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'file_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'references': ('django.db.models.fields.CharField', [], {'max_length': '4000'})
},
'ledger.feed': {
'Meta': {'object_name': 'Feed'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'null': 'True', 'to': "orm['auth.User']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 3, 4, 8, 46, 44, 700180)'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4000'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ledger.file': {
'Meta': {'object_name': 'File'},
'file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'name': ('django.db.models.fields.TextField', [], {})
},
'ledger.issue': {
'Meta': {'object_name': 'Issue'},
'associated_media': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ledger.File']", 'symmetrical': 'False'}),
'delivery_notes': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue'", 'to': "orm['ledger.Item']"}),
'replicate_steps': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '4000'})
},
'ledger.item': {
'Meta': {'object_name': 'Item'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 3, 4, 8, 46, 44, 697761)'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'fixed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hours_estimated': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '5', 'decimal_places': '1'}),
'hours_total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '5', 'decimal_places': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Type']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Location']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Project']"}),
'priority': ('django.db.models.fields.related.ForeignKey', [], {'default': '2', 'to': "orm['ledger.Priority']"}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'targets': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ledger.Target']", 'symmetrical': 'False'}),
'validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'ledger.location': {
'Meta': {'object_name': 'Location'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'ledger.project': {
'Meta': {'object_name': 'Project'},
'deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'binder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Binder']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'ledger.note': {
'Meta': {'object_name': 'Note'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'ledger.notification': {
'Meta': {'object_name': 'Notification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ledger.priority': {
'Meta': {'object_name': 'Priority'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'ledger.binder': {
'Meta': {'object_name': 'Binder'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Client']", 'blank': 'True'}),
'default_project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'default_project'", 'null': 'True', 'to': "orm['ledger.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_url': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'producers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'producers'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'reporters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'reporters'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'team': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'ledger.requirement': {
'Meta': {'object_name': 'Requirement'},
'delivery_notes': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Item']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'ledger.target': {
'Meta': {'object_name': 'Target'},
'deadline': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ledger.Project']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'public': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ledger.trophy': {
'Meta': {'object_name': 'Trophy'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'ledger.type': {
'Meta': {'object_name': 'Type'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'ledger.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'gravatar_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['ledger']
| [
"adamauckland@gmail.com"
] | adamauckland@gmail.com |
0e5cd2b71cfca2920b63884ab1b03dedd57aecaa | 11763b1150a3a05db89c13dcd6152f8fcca87eaa | /designs/linear/homomorphic/latticebased/qtpiepublickey3.py | 19c3c2cc331daaa31305c3217bbc670ba8c7c944 | [] | no_license | acad2/crypto | 343c32fa25aaec73e169290579fc3d02c4b226f6 | cb283df4101fcd618a0478a0018273f00d0734ae | refs/heads/master | 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null | UTF-8 | Python | false | false | 2,559 | py | raise NotImplementedError("q needs to be randomized")
from math import log
from crypto.utilities import random_integer, modular_inverse, big_prime, modular_subtraction
N = 90539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663905398219996016670100164984335380923506018480655093350503827781686978776229638642089304344631494761269485972746732373941020070672786206415658964116130730308165771888427795803742667890483359830546442752189681755577087465203943328026696639053982199960166701001649843353809235060184806550933505038277816869787762296386420893043446314947612694859727467323739410200706727862064156589641161307303081657718884277958037426678904833598305464427521896817555770874652039433280266966390539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663
#1 + 33 + 32 = 66
# prq + e
# q + pirie
#65 + 97 + 32 =
#pq1 + e1 * (pq2 + e2)
#pq1(pq2 + e2) + e1(pq2 + e2)
#ppq1q2 + pq1e2 + pq2e1 + e1e2
# prrq1q2 + rq1e2 + rq2e1 + e1e2
#pq1 + e1 + pq2 + e2
#p(q1 + q2) + e1 + e2
def generate_pi(pi_size=65, n=N):
pi = random_integer(pi_size)
assert log(n, 2) - log(pi, 2) > 256, log(n, 2) - log(pi, 2)
return pi
def generate_pq(private_key, q_size=32, n=N):
p = modular_inverse(private_key, n)
q = random_integer(q_size)
pq = (p * q) % n
assert log(n, 2) - log(pq, 2) < 256
assert log(n, 2) - log(modular_inverse(pq, n), 2) < 256, (log(n, 2), log(n - modular_inverse(pq, n), 2))
return pq, q
def generate_keypair():
pi = generate_pi()
pq, q = generate_pq(pi)
public_key = pq
private_key = (pi, q)
return public_key, private_key
def encrypt(q, public_key, e_size=32, n=N):
assert n == N
e = random_integer(e_size)
return ((public_key * q) + e) % n
def decrypt(ciphertext, private_key, n=N, operation_count=1):
pi, r = private_key
pie_q = (pi * ciphertext) % n
q = pie_q % pi
return q / (r ** operation_count)
def test_encrypt_decrypt():
from unittesting import test_asymmetric_encrypt_decrypt
test_asymmetric_encrypt_decrypt("qtpiepublickey3", generate_keypair, encrypt, decrypt, iterations=10000)
if __name__ == "__main__":
test_encrypt_decrypt()
| [
"python_pride@protonmail.com"
] | python_pride@protonmail.com |
b7cfbc8a099c8df435f7c38b9a3f8c085431f511 | b94fd62e07517f2ce957944b68bf2d5b45d44b03 | /tests/test_view.py | 2a0bf331ea8f19340eb4550e4e7d3a0726667aa9 | [
"MIT"
] | permissive | luzpaz/floor-planner | 405ae92482347218ae88270bb4bfcef12e3f747c | 971334bb7054232035411419910bc860d5da9206 | refs/heads/main | 2023-02-21T15:51:38.510887 | 2021-01-14T20:36:16 | 2021-01-14T20:36:16 | 322,641,479 | 1 | 1 | MIT | 2020-12-18T16:03:58 | 2020-12-18T16:03:58 | null | UTF-8 | Python | false | false | 5,973 | py | import sdl2, sys, unittest
sys.path.append("..\src")
from app import App
from ctypes import c_int, pointer
from entities import UserText
from entity_types import EntityType
from text import Text
from textures import Textures
from view import View, FontSize
class TexturesTests(unittest.TestCase):
"""Tests for the Texture class (view.py)."""
def test_create_and_get(self):
"""Ensure textures can create and get a test texture.
"""
app = App()
app.view.textures.textures[0]\
= app.view.textures.create(
app.view.renderer, b'../res/textures/test.png')
texture = app.view.textures.get(0)
self.assertIsNotNone(texture)
# Ensure texture has expected size (matches png file).
width = pointer(c_int(0))
height = pointer(c_int(0))
sdl2.SDL_QueryTexture(texture, None, None, width, height)
width = width.contents.value
height = height.contents.value
self.assertEqual(width, 500)
self.assertEqual(height, 500)
def test_destructor(self):
"""Ensure textures and layers are cleared after calling unload.
"""
app = App()
app.view.textures.unload()
self.assertEqual(len(app.view.textures.textures), 0)
self.assertEqual(len(app.view.textures.layers), 0)
class ViewTests(unittest.TestCase):
"""Tests for the View class (view.py)."""
def test_initialization(self):
"""Ensure the view constructor initializes the SDL
components and textures.
"""
app = App()
self.assertIsNotNone(app.view.window)
self.assertIsNotNone(app.view.renderer)
self.assertIsInstance(app.view.textures, Textures)
def test_camera_values(self):
"""Ensure view takes in the UI camera's position and scale.
"""
app = App()
app.controller.camera.x = 500
app.controller.camera.y = 1000
app.controller.camera.scale = 0.75
app.view.update(app.model, app.controller)
self.assertEqual(int(app.view.camera_x), 500)
self.assertEqual(int(app.view.camera_y), 1000)
self.assertEqual(app.view.camera_scale, 0.75)
def test_empty_update_layers(self):
"""Ensure no entities are rendered onto the layer if entities are empty.
"""
app = App()
self.assertEqual(app.view.update_layer(
app.model, app.controller), 0)
def test_base_update_layers(self):
"""Ensure expected number of entities are rendered onto the layer.
"""
app = App()
for i in range(5):
app.model.add_line(EntityType.EXTERIOR_WALL)
for i in range(3):
app.model.add_window()
for i in range(2):
app.model.add_door()
self.assertEqual(app.view.update_layer(
app.model, app.controller), 50)
app.model.lines.clear()
app.model.windows.clear()
app.model.doors.clear()
app.model.square_vertices.clear()
def test_render_ui_text(self):
"""Ensure expected number of text displayers are rendered from the UI.
"""
app = App()
app.model.add_user_text('text')
self.assertEqual(app.view.render_ui_text(
app.controller), 3)
def test_empty_render_text(self):
"""Ensure render text returns None if the text is None or if the text
string is empty.
"""
app = App()
self.assertIsNone(app.view.render_relative_text(None))
self.assertIsNone(app.view.render_relative_text(Text()))
def test_render_text(self):
"""Ensure render_text completes rendering of a non-empty text.
"""
app = App()
text = Text()
text.text = 'Non empty text'
self.assertTrue(app.view.render_relative_text(text))
text.font = FontSize.MEDIUM
self.assertTrue(app.view.render_relative_text(text))
text.font = FontSize.LARGE
self.assertTrue(app.view.render_relative_text(text))
def test_center_text(self):
"""Ensures center_text returns the expected values for base cases.
"""
app = App()
app.view.screen_width = 1920
app.view.screen_height = 1080
self.assertEqual(app.view.center_text(250), 835)
self.assertEqual(app.view.center_text(0), 960)
def test_rendering_no_exceptions(self):
"""Ensure that functions that only render do not throw exceptions.
These functions must be tested interactively.
"""
app = App()
self.assertTrue(app.view.render_two_point_placement(
app.controller, app.model))
line = ((0, 0), (5, 5), 1)
self.assertTrue(app.view.render_line_placement(line))
self.assertTrue(app.view.render_mouse_selection(
app.controller))
self.assertTrue(app.view.render_user_text(
UserText('text')))
def test_switching_between_layers(self):
"""Ensure update layer renders only the number of entities there are
in each layer when switching between layers.
"""
app = App()
for i in range(4):
line = app.model.add_line(EntityType.EXTERIOR_WALL)
line.layer = 0
for i in range(2):
line = app.model.add_line(EntityType.EXTERIOR_WALL)
line.layer = 1
self.assertTrue(app.view.update_layer(app.model, app.controller), 4)
app.controller.current_layer = 1
self.assertTrue(app.view.update_layer(app.model, app.controller), 2)
def test_destructor(self):
"""Ensures destructor clears textures and sets SDL components to None.
"""
view = View()
view.exit()
self.assertIsNone(view.window)
self.assertIsNone(view.renderer)
self.assertEqual(len(view.textures.textures), 0)
if __name__ == '__main__':
unittest.main() | [
"fby1@pitt.edu"
] | fby1@pitt.edu |
216d90aedd4f538aa0e623757e4370ab52ba3ec6 | 69cbf846db3cb82bef809c822e11c8f93e000b24 | /Python/ContactUI.py | 6d77a7fcc1d2705b7e005f9cc0fb8160fd88407f | [] | no_license | jlee512/SWEN503_4LanguagesAssignment | c5042a38b497b474281721393a0efd6f6c70ebd7 | d73b04dac6fbb310950ab2301631cf1eee77ab77 | refs/heads/master | 2021-08-23T04:30:30.508043 | 2017-12-03T08:30:03 | 2017-12-03T08:30:03 | 112,530,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,263 | py | import sys
from MySqlDb import MySqlDb
from Contact import Contact
from ContactDAO import ContactDAO
contacts_in_focus = []
searchedContacts = []
def main(args):
session = True
initial_screen()
while session:
selection = top_menu_selection()
if selection == 1:
# Add contact to list
contact_name = add_contact_to_list()
if contact_name is not None:
print("-------------------------------------------")
print("Thanks " + contact_name + " has been added successfully")
print("-------------------------------------------")
print()
else:
print("Sorry, we weren't able to add your contact to the list, please try again")
print()
elif selection == 2:
# Remove a contact
removed_contact_name = remove_a_contact()
if removed_contact_name is not None:
print("-------------------------------------------")
print("Thanks, " + removed_contact_name + " has been removed")
print("-------------------------------------------")
else:
print("Sorry we weren't able to remove your contact from the list, please try again")
print()
elif selection == 3:
# Update a contact
updated_contact_name = update_a_contact()
if updated_contact_name is not None:
print("-------------------------------------------")
print("Thanks, " + updated_contact_name + " has been updated successfully")
print("-------------------------------------------")
print()
else:
print("Sorry we weren't able to update your contact, please try again")
print()
elif selection == 4:
# Search for a contact
search_contact_by_name()
elif selection == 5:
# print all contacts
print_all_contacts()
elif selection == 6:
# Quit
session = False
print("Thanks for using the contacts viewer")
return
elif selection == 7:
print("You have not entered a number corresponding to a menu item, please try again")
continue
elif selection == 8:
print("You have not entered a number within the list of menu items (1 to 6), please try again")
continue
def initial_screen():
print("*******************************************")
print("\tWelcome to your contacts viewer")
print("*******************************************")
print_all_contacts()
def top_menu_selection():
selection = -1
print("-------------------------------------------")
print("\t What would you like to do?")
print("-------------------------------------------")
print("\t 1) Add a new contact")
print("\t 2) Delete a contact")
print("\t 3) Update a contact")
print("\t 4) Search your contacts list by name")
print("\t 5) View all contacts")
print("\t 6) Quit")
print("\t Please type the number of your selection and hit ENTER")
print("\t", end="")
input_string = input('')
try:
selection = int(input_string)
if selection < 7 and selection > 0:
return selection
else:
return 8
except:
return 7
def print_all_contacts():
contacts_in_focus = ContactDAO.get_all()
if contacts_in_focus is not None:
print("Contacts: ")
print("----------")
for index, contact in enumerate(contacts_in_focus):
print("" + str(index + 1) + ") ", end="")
contact.print_to_console()
else:
print("Looks like you haven't added any contacts yet...")
return contacts_in_focus
def search_contact_by_name():
print("-------------------------------------------")
print("\t Search for a contact by name")
print("-------------------------------------------")
print("\t Please enter the name of the contact you would like to search for and hit ENTER")
print("\t", end="")
input_string = input("")
search_results = print_search_results(input_string)
def print_search_results(input):
searchedContacts = ContactDAO.search_by_name(input)
if searchedContacts is not None:
print("Contacts: ")
print("----------")
for index, contact in enumerate(searchedContacts):
print("" + str(index + 1) + ") ", end="")
contact.print_to_console()
else:
print("Sorry we couldn't find any contacts with that name")
print("Please try different spelling or view all contacts at the main menu")
return searchedContacts
def add_contact_to_list():
name = ""
name_check = False
email = None
phone_number = None
group_to_add = ""
groups = []
groups_finished = False
print("-------------------------------------------")
print("\t Add a contact")
print("-------------------------------------------")
while not name_check:
print("\t Please enter a contact name and hit ENTER")
print("\t", end="")
name = input('')
if len(name) == 0:
print("\t No contact name entered, please try again")
else:
name_check = True
print("\t Please enter a contact email and hit ENTER")
print("\t [To leave email empty, just hit ENTER]")
print("\t", end="")
email = input('')
print("\t Please enter a contact phone number and hit ENTER")
print("\t", end="")
phone_number = input('')
while not groups_finished:
print("\t Please enter a group name and hit ENTER")
print("\t", end="")
group_to_add = input('')
if len(group_to_add) == 0:
print("\t No group name entered, please try again")
else:
groups.append(group_to_add)
print("\t Group added successfully")
print("\t To add another group, type '1' and hit ENTER, otherwise hit ENTER")
print("\t", end="")
add_more = input('')
if add_more == "1":
groups_finished = False
else:
groups_finished = True
contact = Contact(name, email, phone_number)
contact.set_groups(groups)
add_status = ContactDAO.add_a_contact(contact)
if add_status:
return name
else:
return ""
def remove_a_contact():
print("-------------------------------------------")
print("\t Remove a contact")
print("-------------------------------------------")
contacts = print_all_contacts()
print("\t Please enter the number of the contact you would like to remove and hit ENTER")
print("\t", end="")
input_string = input('')
try:
selection = int(input_string)
if selection <= len(contacts) and selection > 0:
removed_name = ContactDAO.remove_a_contact(contacts[selection - 1])
return removed_name
else:
return None
except:
return None
def update_a_contact():
print("-------------------------------------------")
print("\t Update a contact")
print("-------------------------------------------")
contacts = print_all_contacts()
print("\t Please enter the number of the contact you would like to update and hit ENTER")
print("\t", end="")
input_string = input('')
try:
selection = int(input_string)
if selection <= len(contacts) and selection > 0:
edit_contact = contacts[selection -1]
print("\t Updating contact: " + edit_contact.get_name())
print("\t", end="")
name = ""
name_check = False
email = None
phone_number = None
group_to_add = ""
groups = []
groups_finished = False
while not name_check:
print("\t Please enter a contact name and hit ENTER")
print("\t Just hit ENTER to leave name unchanged")
print("\t", end="")
name = input('')
if len(name) == 0 or name == edit_contact.get_name():
print()
print("\t\t Name will not be changed")
print()
name_check = True
name = edit_contact.get_name()
else:
print()
print("\t\t Name will be changed to: " + name)
print()
name_check = True
print("\t Please enter a contact email and hit ENTER")
print("\t [To leave unchanged, just hit ENTER]")
print("\t", end="")
email = input('')
if len(email) == 0:
print()
print("\t\t Email will not be changed")
print()
email = edit_contact.get_email()
else:
print()
print("\t\t Email will be changed to: " + email)
print()
print("\t Please enter a contact phone number and hit ENTER")
print("\t [To leave unchanged, just hit ENTER]")
print("\t", end="")
phone_number = input('')
if len(phone_number) == 0:
print()
print("\t\t Phone number will not be changed")
print()
phone_number = edit_contact.get_phone_number()
else:
print()
print("\t\t Phone number will be changed to: " + phone_number)
print()
while not groups_finished:
print("Current groups are: ")
print("\t\t", end="")
for group in edit_contact.get_groups():
print("| " + group + " |", end="")
print()
print()
print("\t To add to these, press 1 and hit ENTER")
print("\t To start with no groups, press 2 and hit ENTER")
print("\t", end="")
add_group_input = input("")
try:
add_group_selection = int(add_group_input)
if add_group_selection < 3 and add_group_selection > 0:
if add_group_selection == 1:
groups = edit_contact.get_groups()
while not groups_finished:
print("\t Please enter a group name and hit ENTER")
print("\t", end="")
group_to_add = input('')
if len(group_to_add) == 0:
print("\t No group name entered, please try again")
else:
groups.append(group_to_add)
print("\t Group added successfully")
print("\t To add another group, type '1' and hit ENTER, otherwise hit ENTER")
print("\t", end="")
add_more = input('')
if add_more == "1":
groups_finished = False
else:
groups_finished = True
elif add_group_selection == 2:
while not groups_finished:
print("\t Please enter a group name and hit ENTER")
print("\t", end="")
group_to_add = input('')
if len(group_to_add) == 0:
print("\t No group name entered, please try again")
else:
groups.append(group_to_add)
print("\t Group added successfully")
print("\t To add another group, type '1' and hit ENTER, otherwise hit ENTER")
print("\t", end="")
add_more = input('')
if add_more == "1":
groups_finished = False
else:
groups_finished = True
except:
print("\t Invalid input, please select a menu item number. Please try again...")
contact = Contact(name, email, phone_number)
contact.set_groups(groups)
edited_contact_name = ContactDAO.update_a_contact(contact, edit_contact.get_name())
if edited_contact_name is not None:
return edited_contact_name
else:
return None
else:
return None
except:
return None
if __name__ == '__main__':
main(sys.argv) | [
"julian.j.lees@gmail.com"
] | julian.j.lees@gmail.com |
d9b5eab018f13379b620080611d1b3282b38000d | 27fdb210daacc29a7031c2f8e0e2cf28f6e78e48 | /mnc/tes_mnc.py | 208cfc1b237a35bf9d0ca5d5537a67b5099b2d1b | [] | no_license | cubefreaker/webautomation | e79774bef554ce3b5796d3deefec3f41ea34ff69 | be86def2540ef72d84ca2a0281ac4e5f53b738ca | refs/heads/master | 2021-06-13T00:46:47.216292 | 2019-03-28T07:19:32 | 2019-03-28T07:19:32 | 178,146,195 | 0 | 0 | null | 2021-06-01T23:32:36 | 2019-03-28T07:06:41 | Python | UTF-8 | Python | false | false | 3,606 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re, random, string
from POM import Login, InvAirline, InvCreate
import names
import datetime
from selenium.webdriver.common.action_chains import ActionChains
class InvoicingAirline(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
self.driver.implicitly_wait(30)
# self.base_url = "http://opsifin-qa.azurewebsites.net/MNC/"
self.verificationErrors = []
self.accept_next_alert = True
def test_invoicing_airline(self):
driver = self.driver
url = "http://opsifin-qa.azurewebsites.net/MNC/"
driver.get(url)
time.sleep(1)
driver.get(url)
login = Login(driver)
login.user_name('S2')
login.password('123456789')
login.sign_in()
inv = InvAirline(driver)
inv.get_url(url)
inv.add()
inv.customer('Retail HO')
inv.pax_type('Adult')
inv.pax_f_name()
inv.pax_l_name()
inv.pax_email()
inv.pax_title()
inv.phone()
inv.h_phone()
inv.o_phone()
inv.remarks('Remark 1', 'Remark 2', 'Remark 3', 'Remark 4', 'Remark 5', 'Remark 6')
inv.supplier()
inv.flight_type()
inv.pnr_code()
inv.ticket_no()
inv.ticketed_date()
inv.booked_date()
inv.issuer()
inv.add_route()
inv.airlines()
inv.flight_no()
inv.f_class()
inv.origin()
inv.dep_date()
inv.destination()
inv.arr_date()
inv.save_route()
inv.base_fare()
inv.air_tax()
inv.iwjr()
inv.add_charge()
inv.pax_service()
inv.insurance()
inv.other()
inv.markup()
inv.service_fee()
inv.comm_type()
inv.commission()
inv.incentive()
inv.save_det()
inv.confirm_comm()
inv.search_pnr()
inv.clear_date()
inv.search()
inv.select_pnr()
inv.create()
crt = InvCreate(driver)
crt.cn_in_to()
crt.division()
crt.sub_division()
crt.remark()
crt.cn_out_to()
crt.handler_div()
crt.handler_iss()
crt.stamp_duty()
crt.cn_in()
crt.cn_out()
crt.discount()
crt.jurnal_prev()
crt.create_inv()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"hamzah_habibi@rocketmail.com"
] | hamzah_habibi@rocketmail.com |
17db166d40352aadbf0c2a9e1f90f8f20f001161 | 7d51c68984804ed4ed73aa6f345cce0043595e67 | /backend/src/stock/urls.py | 87356cff4d33548b8de875ce027289d7cba11d6b | [] | no_license | Liang-Rui/Stock-Portfolio-Management-System | 1b0b1b9f5b1bc8348be6da04d630ae2b62009d5d | 8d8daa77d23807f0541879273416f617b895bd64 | refs/heads/master | 2022-12-26T11:52:52.917286 | 2020-03-01T12:50:02 | 2020-03-01T12:50:02 | 243,514,962 | 17 | 5 | null | 2022-12-08T07:24:22 | 2020-02-27T12:33:16 | JavaScript | UTF-8 | Python | false | false | 2,213 | py | from django.urls import path
from .DAO import SectorDAO, SymbolDAO
from .Controller import HomeController, StockController, SectorController, testController
urlpatterns = [
#detail of a stock
path('stocks/<symbol>/chart/', StockController.chart, name='chart'),
path('stocks/<symbol>/profile/', StockController.profile, name='profile'),
path('stocks/<symbol>/predict/history/', StockController.predictHistory, name='predictHistory'),
path('stocks/<symbol>/predict/future/', StockController.predictFutureRquest, name='predictFutureRquest'),
# purchase and sell
path('purchase/', StockController.purchase, name='purchase'),
path('delete/', StockController.delete, name='delete'),
path('holdings/', HomeController.holdings, name='holdings'),
path('update/', StockController.update, name='update'),
path("profolio/", StockController.profolio, name='profolio'),
# add or remove symbol from Watching_List
path('add/', StockController.add, name='add'),
path("remove/", StockController.remove, name='remove'),
#return all stocks
path('stocks/', HomeController.stocks, name='stocks'),
path('updateSymbolRequest/', HomeController.updateSymbolRequest, name='updateSymbolRequest'),
path('updateStockRequest/', HomeController.updateStockRequest, name='updateStockRequest'),
path('updateStockHistoryPredictionRequest/', HomeController.updateStockHistoryPredictionRequest, name='updateStockHistoryPredictionRequest'),
path('updateStockFuturePredictionModelRequest/', HomeController.updateStockFuturePredictionModelRequest, name='updateStockFuturePredictionModelRequest'),
path('updateUserProfile/', HomeController.updateUserProfileRequest, name='updateUserProfileRequest'),
path('getUserProfile/', HomeController.getUserProfileRequest, name='getUserProfileRequest'),
#news list
path('news/', HomeController.news_list, name='news'),
path('news/<symbol>/<page>', HomeController.load_more_news, name='news'),
path('news/search/<symbol>/<keyword>/<page>', HomeController.search_news, name='news'),
path('recommendedStocks/', HomeController.fama, name='fama')
]
| [
"380509333@qq.com"
] | 380509333@qq.com |
7dd6496070834b3b495a11050f46c105eace37c4 | 3ab41c937eb1487cf660998d30722d03ce521c2d | /nginx_proxy/Host.py | 5d781221230d0e3b94f32dac0a7af7d81af8f3fe | [] | no_license | matti/nginx-proxy | 53fb75b1429d9fb6153d03ecaf063d6233bcc070 | 589c790d7b95157ba7a0029197d73822c273e027 | refs/heads/master | 2023-08-02T23:37:38.094604 | 2020-01-16T16:48:43 | 2020-01-16T16:48:43 | 235,169,972 | 0 | 0 | null | 2023-07-23T03:20:00 | 2020-01-20T18:31:02 | null | UTF-8 | Python | false | false | 2,063 | py | from nginx_proxy import Container
from nginx_proxy.Location import Location
class Host:
"""
It is equivalent to a nginx Server block.
It contains the locations and information about which containers serve the location.
"""
def __init__(self, hostname, port, scheme="http"):
self.port = port
self.hostname = hostname
self.locations: dict[str:Location] = {} # the map of locations.and the container that serve the locations
self.container_set: set = set()
self.scheme = scheme
def set_external_parameters(self, host, port):
self.hostname = host
self.port = port
def add_container(self, location: str, container: Container, websocket=False, http=True):
if location not in self.locations:
self.locations[location] = Location(location, is_websocket_backend=websocket, is_http_backend=http)
elif websocket:
self.locations[location].websocket = self.locations[location].websocket or websocket
self.locations[location].http = self.locations[location].http or http
self.locations[location].add(container)
self.container_set.add(container.id)
def remove_container(self, container_id):
removed = False
deletions = []
if container_id in self.container_set:
for path, location in self.locations.items():
removed = location.remove(container_id) or removed
if location.isEmpty():
deletions.append(path)
for path in deletions:
del self.locations[path]
if removed:
self.container_set.remove(container_id)
return removed
def isEmpty(self):
return len(self.container_set) == 0
def isManaged(self):
return False
def is_redirect(self):
return False
def __repr__(self):
return str({
"scheme": self.scheme,
"locations": self.locations,
"server_name": self.hostname,
"port": self.port})
| [
"sudipbhattarai100@gmail.com"
] | sudipbhattarai100@gmail.com |
895c19a3cedd2b72649601878b3397f719f8b1f1 | bd05c719ff1e4d7f7102b626d9cc6a935b68ccde | /venv/Scripts/pip3-script.py | fcac2e1b698dc14af04bac848660225adc147654 | [] | no_license | phineasGuosz/MiniApp | e39092c49de211f4d06d1c448373cad54a2b7a26 | 4121b08d11d39f8bec51be2b2001a68acf80ea2b | refs/heads/master | 2020-04-13T03:57:43.605199 | 2018-12-24T07:09:31 | 2018-12-24T07:09:31 | 162,946,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!D:\personal\tools\airtest_project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip3'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip3')()
)
| [
"guoshenzhen@zhuanzhuan.com"
] | guoshenzhen@zhuanzhuan.com |
977420e75e2f75b663bf1bb2d39ed627644ec118 | f5be33a36e13f8fe5d2d691d1320ab1424168340 | /injections/opt/bin/openbox-exit | 6ceb646670a441421b2b7361f7b1edb7f89b12fd | [] | no_license | relthyg/debian-openbox-live-cd | 205a9975aa0ba98cf957c88bb9a50dedaaaadafe | b49bbaaf199f81cbccce2644b9c5685d4993ac15 | refs/heads/master | 2023-04-16T17:47:21.522678 | 2019-09-15T09:35:05 | 2019-09-15T09:35:05 | 181,933,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import os
import getpass
class obrx_exit:
def disable_buttons(self):
self.cancel.set_sensitive(False)
self.logout.set_sensitive(False)
self.lock.set_sensitive(False)
self.suspend.set_sensitive(False)
self.reboot.set_sensitive(False)
self.shutdown.set_sensitive(False)
def cancel_action(self,btn):
self.disable_buttons()
gtk.main_quit()
def logout_action(self,btn):
self.disable_buttons()
self.status.set_label("Exiting Openbox, please standby...")
os.system("openbox --exit")
def lock_action(self,btn):
self.disable_buttons()
self.status.set_label("Locking your session, please standby...")
os.system("obrx-lock")
gtk.main_quit()
def suspend_action(self,btn):
self.disable_buttons()
self.status.set_label("Suspending, please standby...")
os.system("obrx-lock")
os.system("systemctl suspend")
gtk.main_quit()
def reboot_action(self,btn):
self.disable_buttons()
self.status.set_label("Rebooting, please standby...")
os.system("systemctl reboot")
def shutdown_action(self,btn):
self.disable_buttons()
self.status.set_label("Shutting down, please standby...")
os.system("systemctl poweroff")
def create_window(self):
self.window = gtk.Window()
title = "Exit?"
self.window.set_title(title)
self.window.set_border_width(5)
# self.window.set_size_request(250, 300)
self.window.set_resizable(False)
self.window.set_keep_above(True)
self.window.stick
self.window.set_position(1)
self.window.connect("delete_event", gtk.main_quit)
windowicon = self.window.render_icon(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
self.window.set_icon(windowicon)
#Create HBox for buttons
self.button_box = gtk.VBox()
self.button_box.show()
#Cancel button
self.cancel = gtk.Button(stock = gtk.STOCK_CANCEL)
self.cancel.set_border_width(4)
self.cancel.connect("clicked", self.cancel_action)
self.button_box.pack_start(self.cancel)
self.cancel.show()
#Logout button
self.logout = gtk.Button("Log _out")
self.logout.set_border_width(4)
self.logout.connect("clicked", self.logout_action)
self.button_box.pack_start(self.logout)
self.logout.show()
#Lock button
self.lock = gtk.Button("_Lock Screen / Switch User")
self.lock.set_border_width(4)
self.lock.connect("clicked", self.lock_action)
self.button_box.pack_start(self.lock)
self.lock.show()
#Suspend button
self.suspend = gtk.Button("_Suspend")
self.suspend.set_border_width(4)
self.suspend.connect("clicked", self.suspend_action)
self.button_box.pack_start(self.suspend)
self.suspend.show()
#Reboot button
self.reboot = gtk.Button("_Reboot")
self.reboot.set_border_width(4)
self.reboot.connect("clicked", self.reboot_action)
self.button_box.pack_start(self.reboot)
self.reboot.show()
#Shutdown button
self.shutdown = gtk.Button("_Power off")
self.shutdown.set_border_width(4)
self.shutdown.connect("clicked", self.shutdown_action)
self.button_box.pack_start(self.shutdown)
self.shutdown.show()
#Create HBox for status label
self.label_box = gtk.HBox()
self.label_box.show()
self.status = gtk.Label()
self.status.show()
self.label_box.pack_start(self.status)
#Create VBox and pack the above HBox's
self.vbox = gtk.VBox()
self.vbox.pack_start(self.button_box)
self.vbox.pack_start(self.label_box)
self.vbox.show()
self.window.add(self.vbox)
self.window.show()
def __init__(self):
self.create_window()
def main():
gtk.main()
if __name__ == "__main__":
go = obrx_exit()
main()
| [
"relthyg@posteo.de"
] | relthyg@posteo.de | |
04c86ab4501d9e593fbf92d6f70e57b2a45945d8 | 02caea61071770d9e54009dd6a8148c59d068691 | /resnet_cutout.py | 36796cb7cdb51b1847a45dfd7df397ac36e35b41 | [] | no_license | 1234cc/NNDL_Final | 72ad12039a027df19256295945ab5c344fe0edca | e12b305390cbbf93e56a80fde3df5a844a48c150 | refs/heads/main | 2023-05-31T14:40:11.587353 | 2021-06-22T11:28:21 | 2021-06-22T11:28:21 | 379,217,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,950 | py | '''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import csv
import numpy as np
from resnet import *
from utils import progress_bar, mixup_data, mixup_criterion
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--sess', default='mixup_default', type=str, help='session id')
parser.add_argument('--seed', default=0, type=int, help='rng seed')
parser.add_argument('--alpha', default=1., type=float, help='interpolation strength (uniform=1., ERM=0.)')
parser.add_argument('--decay', default=1e-4, type=float, help='weight decay (default=1e-4)')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
batch_size = 128
base_learning_rate = 0.1
if use_cuda:
# data parallel
n_gpu = torch.cuda.device_count()
batch_size *= n_gpu
base_learning_rate *= n_gpu
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7.' + args.sess + '_' + str(args.seed))
net = checkpoint['net']
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch'] + 1
torch.set_rng_state(checkpoint['rng_state'])
else:
print('==> Building model..')
net = ResNet18()
result_folder = './results/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
logname = result_folder + 'resnet_cutout' + '.csv'
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net)
print('Using', torch.cuda.device_count(), 'GPUs.')
cudnn.benchmark = True
print('Using CUDA..')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=base_learning_rate, momentum=0.9, weight_decay=args.decay)
# Training
"""输入为:样本的size和生成的随机lamda值"""
def rand_bbox(size, lam):
W = size[2]
H = size[3]
"""1.论文里的公式2,求出B的rw,rh"""
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
"""2.论文里的公式2,求出B的rx,ry(bbox的中心点)"""
cx = np.random.randint(W)
cy = np.random.randint(H)
# 限制坐标区域不超过样本大小
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
"""3.返回剪裁B区域的坐标值"""
return bbx1, bby1, bbx2, bby2
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
r = np.random.rand(1)
optimizer.zero_grad()
if r < 0.3:
"""1.设定lamda的值,服从beta分布"""
lam = np.random.beta(1.0, 1.0)
"""2.生成剪裁区域B"""
bbx1, bby1, bbx2, bby2 = rand_bbox(inputs.size(), lam)
"""3.将区域裁剪"""
inputs[:, :, bbx1:bbx2, bby1:bby2] = 0
# compute output
"""4.将生成的新的训练样本丢到模型中进行训练"""
output = net(inputs)
loss = criterion(output, targets)
else:
# compute output
output = net(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
return (train_loss/batch_idx, 100.*correct/total)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
best_acc = acc
checkpoint(acc, epoch)
return (test_loss/batch_idx, 100.*correct/total)
def checkpoint(acc, epoch):
# Save checkpoint.
print('Saving..')
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'rng_state': torch.get_rng_state()
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.t7.' + args.sess + '_' + str(args.seed))
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate at 100 and 150 epoch"""
lr = base_learning_rate
if epoch <= 9 and lr > 0.1:
# warm-up training for large minibatch
lr = 0.1 + (base_learning_rate - 0.1) * epoch / 10.
if epoch >= 100:
lr /= 10
if epoch >= 150:
lr /= 10
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if not os.path.exists(logname):
with open(logname, 'w') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(['epoch', 'train loss', 'train acc', 'test loss', 'test acc'])
for epoch in range(100):
adjust_learning_rate(optimizer, epoch)
train_loss, train_acc = train(epoch)
print('epoch {} train loss:{}'.format(epoch + 1, train_loss))
test_loss, test_acc = test(epoch)
print('epoch {} test acc:{}'.format(epoch + 1, test_acc))
with open(logname, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow([epoch, train_loss, train_acc.item(), test_loss, test_acc.item()])
torch.save(net.state_dict(),"./saved_resnet_cutout.pt") | [
"noreply@github.com"
] | 1234cc.noreply@github.com |
19d4a7847e9d2bef6719fa3718f7556eb5d650ca | 2ea66258df8c51273ded673263b77299ca630582 | /studentapp/views.py | b7a3ba027af5ec7c968f1bfc69499781d5a0b5e8 | [] | no_license | bharati-garde/Djangoapp | 24ccab6a3388ced1392d2193c21db590a7651598 | 5a559512016de2f91587923053fe048fba3230c2 | refs/heads/master | 2023-03-30T12:54:04.790899 | 2021-04-04T14:39:27 | 2021-04-04T14:39:27 | 354,531,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.shortcuts import render
from django.http import HttpResponse
all_posts=[
{
'student':
}
]
# Create your views here.
def index(request):
return render(request, 'home.html')
| [
"gardebharati123@gmail.com"
] | gardebharati123@gmail.com |
07df39e3739e6549e961ca1e5a760d3d09761b66 | 3d7fd563ffe67c45b6657fd576124a4e43480f8f | /caelum/stellar_system_model/universe_model.py | 23e8f437189f31529581513200f6e5e6fabff200 | [
"Apache-2.0"
] | permissive | rosspalmer/Caelum | 33aae122ebabeb9926e0c2f9bc60fbe9f6c4eca2 | c9adadf743e9b58a6cd099f3b8b8cbe035aca02e | refs/heads/master | 2021-01-12T04:01:43.707210 | 2017-01-09T15:30:49 | 2017-01-09T15:30:49 | 77,473,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py |
class Universe(object):
def __init__(self, systems):
self.systems = systems
| [
"rossbpalmer@gmail.com"
] | rossbpalmer@gmail.com |
e28e03f51b5728b5cd8d5a3e6be5fab18d6c2317 | 836fcb1fb4db3d2b6d0d9b54c3f916bc599a0b62 | /JoinString.py | d0a0ef082b74b2d11792b60a7158a7a550e80db7 | [] | no_license | opickers90/Python3-1 | cf8c544ee17107f535e4431fbe67eb4e218fff70 | 04b2338ddfb7c554cc123677a769b2f4dafbdc5b | refs/heads/master | 2020-04-19T11:01:35.793376 | 2019-02-02T11:11:04 | 2019-02-02T11:11:04 | 168,155,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | winter_trees_lines = ['All the complicated details', 'of the attiring and', 'the disattiring are completed!', 'A liquid moon', 'moves gently among', 'the long branches.', 'Thus having prepared their buds', 'against a sure winter', 'the wise trees', 'stand sleeping in the cold.']
def join_string(lst):
lst = "\n".join(lst)
return lst
winter_trees_full = join_string(winter_trees_lines)
print(winter_trees_full)
| [
"taufik@LI-320s"
] | taufik@LI-320s |
82d38bc793a3cb3316e97701f18d89204d533635 | 223093bd62fb14cef138a9e99ac771804201d694 | /grgrSIte/wsgi.py | 9a03414154e9e153a8360aa2f6b52c740e0347b0 | [] | no_license | albertove/grgrApp | a48f263c67c89f147ee94cd27f63e67637fe5b62 | c2f5533a886653e1dd26812c585b5587807f9914 | refs/heads/master | 2021-01-15T16:28:58.134691 | 2017-02-10T08:27:02 | 2017-02-10T08:27:02 | 81,542,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for grgrSIte project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "grgrSIte.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"alvega27@gmail.com"
] | alvega27@gmail.com |
c731e200e23ca2544520bae18655637937d939d8 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/aio/operations/_usages_operations.py | 4fb31d3652c09b18a8730846a6ade439f9280d2f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 5,262 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsageListResult"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_01_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| [
"noreply@github.com"
] | ebencarek.noreply@github.com |
346257657882bc451e07c22b1ab4eab2cfa406a0 | a1f014188a5567b701efbf5f296f88a572304689 | /python_code/飞机大战/全屏和调整窗口大小,图像变换/py_查看系统分辨率.py | 31b238c74e8003ecdc86b11b19ae5064b2bdea21 | [] | no_license | YL-python/yl_python_code | 6cbecf96fd37cc74e108939d6898a92b3539ec2a | d286c73b6d109cd996c7fd79d67d90c47afb31d3 | refs/heads/master | 2022-11-17T07:42:54.021558 | 2020-01-07T08:43:08 | 2020-01-07T08:43:08 | 219,503,414 | 0 | 1 | null | 2022-10-21T19:00:35 | 2019-11-04T13:04:35 | Python | UTF-8 | Python | false | false | 59 | py | import pygame
pygame.init()
pygame.display.list_modes()
| [
"2336739721@qq.com"
] | 2336739721@qq.com |
827d87964f00c97693149e430240ca70051e51cc | 329f5ea133a3928bc0587fa9da91600465e6b7f4 | /development/wmap_script_WM.py | 9b4326d818ce1e835d9b0ce83cadb7301c4a2a2f | [] | no_license | seeleylab/wmap | 3f3db3b25e0ddd7d0a1e8e01048368390fe32778 | 56c72133623fb2c323e6d47b7af9457251a6a62c | refs/heads/master | 2021-01-17T15:25:29.851602 | 2017-07-19T16:54:50 | 2017-07-19T16:54:50 | 46,843,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,893 | py | ####################################################### w-map script 1/29/16 ########################################################################
#THIS SOFTWARE IS PROVIDED BY THE SEELEY LAB "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SEELEY LAB BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
#GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#Import modules needed
import os
import glob
import pandas
########################################################### USER OPTIONS ###########################################################################
#Prompt user to choose type of w-map.
processing_type = raw_input('1. Enter FC for functional connectivity w-map, WMA for gray matter atrophy w-map, or WBD for whole brain degree w-map.\n')
if processing_type != 'FC' and processing_type != 'WMA' and processing_type != 'WBD':
processing_type = raw_input('Error--Specified processing type is not a valid option. Enter FC for functional connectivity w-map or WMA for white matter atrophy w-map.\n')
else:
pass
#If FC w-maps are wanted, prompt user for processedfmri folder name and seed folder name.
if processing_type == 'FC':
processedfmri_folder = raw_input('\nAll subjects are assumed to have a processedfmri_TRCNnSFmDI folder. If your subjects have a processedfmri folder with a different extension, specify that folder below (e.g. processedfmri_RTCsNsSFmI). Otherwise, hit Enter.\n')
if processedfmri_folder == '':
processedfmri_folder = 'processedfmri_TRCNnSFmDI'
else:
pass
seed_folder = raw_input('Specify the folder containing the seed results for each subject (e.g. stats_FC_L_PostCen_4--42_-20_56_roi):\n')
#If WBD w-maps are wanted, prompt user for processedfmri folder name.
if processing_type == 'WBD':
processedfmri_folder = raw_input('\nAll subjects are assumed to have a processedfmri_TRCNnSFmDI folder. If your subjects have a processedfmri folder with a different extension, specify that folder below (e.g. processedfmri_RTCsNsSFmI). Otherwise, hit Enter.\n')
if processedfmri_folder == '':
processedfmri_folder = 'processedfmri_TRCNnSFmDI'
else:
pass
#Prompt user for Excel spreadsheet containing subjdir column and covariate columns.
xl_f = raw_input('\n2. Enter the path of the Excel spreadsheet containing the subjects you want w-maps for.\n')
while not os.path.isfile(xl_f):
xl_f = raw_input('Error--Specified spreadsheet is not a valid file path. Enter the path of the Excel spreadsheet containing the subjects you want w-maps for.\n')
df = pandas.read_excel(xl_f) #Read in spreadsheet as dataframe
covs = list(df.columns.values)[1:] #Read in covariates
#If FC w-maps are wanted, check that each subjdir directory has a seed con_0001.nii file necessary for creating the w-maps.
if processing_type == 'FC':
print('Checking each subject for con_0001.nii image...')
for i in df.ix[:,'subjdir']:
if os.path.exists(i+'/'+processedfmri_folder+'/'+seed_folder+'/con_0001.nii'):
pass
else:
print('Error--'+i+'/'+processedfmri_folder+'/'+seed_folder+'/con_0001.nii does not exist. Check and try again.')
print('Finished checking.')
#If WMA w-maps are wanted, check that each subjdir directory has a smwc2* file necessary for creating the w-maps.
if processing_type == 'WMA':
print('Checking each subject for smwc2 image...')
for i in df.ix[:,'subjdir']:
if len(glob.glob(os.path.split(i)[0]+'/struc/SPM12_SEG_Full/smwc2*')) == 1:
pass
else:
print('Error--'+os.path.split(i)[0]+'/struc/SPM12_SEG_Full/smwc2* does not exist. Run segmentation and try again.')
print('Finished checking.')
#If WBD w-maps are wanted, check that each subjdir directory has a whole_brain_degree.nii file necessary for creating the w-maps.
if processing_type == 'WBD':
print('Checking each subject for whole_brain_degree.nii image...')
for i in df.ix[:,'subjdir']:
if os.path.exists(i+'/'+processedfmri_folder+'/whole_brain_degree/whole_brain_degree.nii'):
pass
else:
print('Error--'+i+'/'+processedfmri_folder+'/whole_brain_degree/whole_brain_degree.nii does not exist. Check and try again.')
print('Finished checking.')
#Prompt user for the directory containing all of the HC regression model files.
HC_model = raw_input('\n3. Enter the directory containing the HC regression model. Please make sure that the order of the covariate columns in your spreadsheet matches the order of the beta maps in your HC regression model. If not, press Control-C to exit.\n')
while not os.path.isdir(HC_model):
HC_model = raw_input('Error--Specified HC regression model is not a valid directory. Enter the directory containing the HC regression model.\n')
#Prompt user for the mask.
mask = raw_input('\n4. Enter the path of the whole brain mask that the w-maps will be masked to. Hit Enter if you do not want to mask your w-maps.\n')
if mask != '' and os.path.isfile(mask) == False:
mask = raw_input('Error--Specified mask is not a valid file path. Enter the path of the mask.\n')
#Prompt user for a suffix which will be appended to all results folder names.
suffix = raw_input('\n5. Enter a concise descriptive suffix for your w-map analysis results folders. Do not use spaces. (e.g. 12GRNps_vs_120HC)\n')
########################################################### CALCULATIONS ###########################################################################
os.system('fslmaths '+HC_model+'/ResMS.nii -sqrt '+HC_model+'/sqrt_res') #Calculate denominator for all subjects (HC regr model sqrt residuals map)
denominator = HC_model+'/sqrt_res'
for index,row in df.iterrows(): #Loop through each subject and define paths for FC and WMA options. Then...
subj = row
if processing_type == 'FC':
wmapdir = subj['subjdir']+'/'+processedfmri_folder+'/'+seed_folder+'/wmap_'+suffix
actual_map = subj['subjdir']+'/'+processedfmri_folder+'/'+seed_folder+'/con_0001.nii'
elif processing_type == 'WMA':
wmapdir = os.path.split(subj['subjdir'])[0]+'/struc/SPM12_SEG_Full/wmap_WM_'+suffix
actual_map = glob.glob(os.path.split(subj['subjdir'])[0]+'/struc/SPM12_SEG_Full/smwc2*')[0]
elif processing_type == 'WBD':
wmapdir = subj['subjdir']+'/'+processedfmri_folder+'/whole_brain_degree/wmap_'+suffix
actual_map = subj['subjdir']+'/'+processedfmri_folder+'/whole_brain_degree/whole_brain_degree.nii.gz'
if os.path.exists(wmapdir): #...check if they have already been run. Skip if they have, or else...
print(os.path.split(subj['subjdir'])[0]+' has already been run! Will be skipped.')
else:
os.system('mkdir '+wmapdir) #...create a "wmap" folder for each subject to catch output
os.chdir(wmapdir); f = open('log', 'w') #...open a log file in each subject's "wmap" folder
map_pred_for_subj = wmapdir+'/map_pred_for_subj' #...define path for map predicted for subject
predmaps_for_covs_list = [] #...calculate each subject's predicted maps for each covariate
for j in range(1, len(covs)+1):
beta_map = HC_model+'/beta_000'+str(j+1)+'.nii'
subj_value = str(subj[j])
predmap_for_cov = wmapdir+'/predmap_for_'+covs[j-1]
os.system('fslmaths '+beta_map+' -mul '+subj_value+' '+predmap_for_cov)
predmaps_for_covs_list.append(predmap_for_cov)
f.write('fslmaths '+beta_map+' -mul '+subj_value+' '+predmap_for_cov+'\n\n') #...record commands which create pred maps for each cov
predmaps_for_covs_str = '' #...turn predicted maps for each covariate into a string
for k in range(0,len(predmaps_for_covs_list)):
predmaps_for_covs_str += ' -add '+predmaps_for_covs_list[k]
os.system('fslmaths '+HC_model+'/beta_0001.nii'+predmaps_for_covs_str+' '+map_pred_for_subj) #...calculate map predicted for subject
f.write('fslmaths '+HC_model+'/beta_0001.nii'+predmaps_for_covs_str+' '+map_pred_for_subj+'\n\n') #...record command which creates it
os.system('fslmaths '+map_pred_for_subj+' -sub '+actual_map+' '+wmapdir+'/numerator') #...calculate numerator (predicted - actual)
numerator = wmapdir+'/numerator'
f.write('fslmaths '+map_pred_for_subj+' -sub '+actual_map+' '+wmapdir+'/numerator\n\n') #...record command which creates numerator
if mask == '':
os.system('fslmaths '+numerator+' -div '+denominator+' '+wmapdir+'/wmap') #...calculate unmasked w-map
f.write('fslmaths '+numerator+' -div '+denominator+' '+wmapdir+'/wmap') #...record command which creates unmasked wmap
else:
os.system('fslmaths '+numerator+' -div '+denominator+' -mas '+mask+' '+wmapdir+'/wmap') #...calculate masked w-map
f.write('fslmaths '+numerator+' -div '+denominator+' -mas '+mask+' '+wmapdir+'/wmap') #...record command which creates masked wmap
f.close() #...close log file
print 'wmap created for '+subj[0]
| [
"jersdeng@gmail.com"
] | jersdeng@gmail.com |
15ef5004b2572ff9174ddb088ab88d8f57f7735e | ac9e7b423275443e64f487544d1eebda3e20b01f | /myproject/routing.py | 801fd28342749663f605b9fc8f3f1ddfb70a58fc | [] | no_license | grub007/django_channel | f2ac838b61b5c0973a12ba1e6cc4b2c453f88660 | 77970ad61cb3bc14566cbaaa6bfc3f24f3d89d2b | refs/heads/master | 2021-10-02T04:06:03.947674 | 2018-11-29T02:19:36 | 2018-11-29T02:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# chat/routing.py
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^ws/chat/(?P<room_name>[^/]+)/$', consumers.ChatConsumer),
] | [
"43468404+grub007@users.noreply.github.com"
] | 43468404+grub007@users.noreply.github.com |
0ca7dcfdb63818c310ac8486ddb3feb58098ec7f | 680e4e0ed43e10b912f20bc18f7d87e1194eb95c | /day37/6.创建修改用户.py | febb4d23ee94dc7253a1e24f851fe6ee28c06072 | [] | no_license | eson27/untitled | f685bdd53b852e10c95368e79c98078387cde337 | afbbfe3842ed74001ce95561159f1191c13ba5f4 | refs/heads/master | 2022-12-04T05:33:12.397230 | 2020-08-22T11:02:34 | 2020-08-22T11:02:34 | 286,338,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | '''
mysql> create user 'wqlai'@'%' identified by '123'; -->创建远程用户
mysql> create user 'wqlai'@'loctalhost' identified by '123';-->创建本机用户
mysql> GRANT ALL PRIVILEGES ON *.* TO 'wqlai'@'loctalhost' IDENTIFIED BY '123'; -->授权用户
mysql> show grants for 'wqlaid'@'loctalhost' -->查看权限
mysql> SELECT DISTINCT CONCAT('User: ''',user,'''@''',host,''';') AS query FROM mysql.user; -->查看所有用户
''' | [
"33880870@qq.com"
] | 33880870@qq.com |
a5d224beebfcd52990ca9bbc0a042568b8ac5461 | 871b21f5d5127acbc32c0e415c6b4d00d989a0c7 | /tests/guest_test.py | a9ef3038434f9ccd658620ee6a1380817f291b06 | [] | no_license | emils5/week_02_weekend_hw | 6b091a9369bc8128094aa8c70bd02789026d7cfc | eed1247c75debf2d32523e68ddf78f442dc2833c | refs/heads/main | 2023-06-03T15:52:58.008552 | 2021-06-25T20:17:20 | 2021-06-25T20:17:20 | 380,221,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import unittest
from classes.guest import *
from classes.song import *
class TestGuest(unittest.TestCase):
def setUp(self):
self.guest1 = Guest("Alex", 10.00, ("Oasis", "Wonderwall"))
self.guest2 = Guest("Mark", 12.00, ("Blur", "Song 2"))
self.guest3 = Guest("Joe", 15.00, ("Killers", "Mr Brightside"))
self.guest4 = Guest("Jen", 18.00, ("Oasis", "Wonderwall"))
self.guest5 = Guest("Liz", 20.00,("Oasis", "Wonderwall"))
def test_guest_has_name(self):
self.assertEqual("Alex", self.guest1.name)
def test_guest_has_wallet(self):
self.assertEqual(10.00, self.guest1.wallet)
def test_guest_has_fav_song(self):
self.assertEqual(("Oasis", "Wonderwall"), self.guest1.fav_song)
| [
"emil.shiva@gmail.com"
] | emil.shiva@gmail.com |
0e3ad8df74ea57b55f8f1821978adc44e2249f32 | a96d350d9f970b33263f1b293de9c1f6e5064edc | /experiments/metrics_clustering.py | 2c55789e4c698a895e9bae6b67ae3fd59892402c | [] | no_license | monghimng/ULAB-Community-Detection-Circulo | 64fe64ab8af376e7039dcf73f982df095a9ab798 | c2b04d20f1a990c0c246b54b2f0c6ddbf8ee593d | refs/heads/master | 2021-01-22T17:49:19.920765 | 2017-03-15T06:46:19 | 2017-03-15T06:46:19 | 85,038,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | import json
import glob
import os
from collections import Counter
import numpy as np
import argparse
from scipy.stats import itemfreq
from scipy.cluster.vq import kmeans2, whiten
metric_list = [
"Conductance",
"Cut Ratio",
"Degree StatisticsBiased Kurtosis",
"Density",
"Expansion",
"Cohesiveness",
"Flake Out Degree Fraction",
]
NUM_DIMENSIONS = len(metric_list)
def run_experiment(metrics_path, dataset_name):
num_comms = 0
files_analyzed = 0
#go through quickly to determine how many communities you have
for f in glob.glob(metrics_path+"/"+dataset_name+"--*--*.json"):
json_f = open(f)
j = json.load(json_f)
json_f.close()
num_comms+=len(j['metrics']['Density']['results'])
files_analyzed+=1
if(files_analyzed == 0):
print("No files to analyze")
return
print("Files Analyzed: ", files_analyzed)
print("Running kmeans on ", num_comms, " communities")
matrix = np.zeros((num_comms, NUM_DIMENSIONS))
comm_count = 0
gt_start = -1
gt_end = -1
for i, f in enumerate(glob.glob(metrics_path+"/"+dataset_name+"--*--*.json")):
print(f)
json_f = open(f)
j = json.load(json_f)
json_f.close()
metrics = j['metrics']
#get the number of comms for this file
add_comms = len(metrics['Density']['results']) + comm_count
if f == metrics_path+"/"+dataset_name+"--groundtruth--0.json":
gt_start = comm_count
gt_end = add_comms
dim_idx=0
for metric_name in metric_list:
results = metrics[metric_name]['results']
try:
matrix[comm_count:add_comms,dim_idx] = results
except Exception as e:
print(result_dict['results'])
print("Error: ",e)
dim_idx+=1
if dim_idx == NUM_DIMENSIONS:
break
comm_count=add_comms
matrix_norm = whiten(matrix)
centroid, label = kmeans2(matrix_norm, k=3)
freq = itemfreq(label[gt_start:gt_end])
m = max(freq, key=lambda y: y[1])
ratio = float(m[1])/(gt_end-gt_start)
print("Groundtruth similarity: ", ratio)
print("Frequency of groundtruth communities as part of centroids")
print(freq)
i = gt_start
print("GroundTruth Centroids range: ", gt_start, "-> ", gt_end)
while i < gt_end:
# print(label[i])
i+=1
def main():
# Parse user input
parser = argparse.ArgumentParser(description='Experiment clustering community detection results')
parser.add_argument('metrics_dir', help="path to metrics dir")
parser.add_argument('dataset', help='dataset name.')
args = parser.parse_args()
run_experiment(args.metrics_dir, args.dataset)
if __name__ == "__main__":
main()
| [
"paulm@lab41.org"
] | paulm@lab41.org |
a1605723b7c638d4a6bd3f932779f4ef3a80e9cc | 12ca549ce389b8c640a9387b655d060424bfe7fe | /utils_plots.py | db814b53d80206071e4a53fac2e3f1e874d9c0c9 | [] | no_license | valeriomieuli/LLwE | 617417c521bb915c3f97989f21763e4662b937b6 | 4dd64017d1a2ccd6e5c6aa96a0699ff89af47713 | refs/heads/master | 2020-12-12T14:22:18.809565 | 2020-01-15T19:03:25 | 2020-01-15T19:03:25 | 234,149,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,670 | py | import matplotlib.pyplot as plt
import numpy as np
def train_plot(train_result, fname):
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_figheight(4)
fig.set_figwidth(9)
ax1.plot(train_result.epoch, train_result.history["loss"], label="train_loss", color='red')
ax1.set(xlabel='Epochs', ylabel='Loss')
ax2.plot(train_result.epoch, train_result.history["acc"], label="train_acc", color='blue')
ax2.set_ylim(bottom=0, top=1)
ax2.set(xlabel='Epochs', ylabel='Accuracy')
plt.subplots_adjust(wspace=0.3)
plt.savefig(fname)
def accuracy_barchart(accuracies, phase, data_split):
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
if height > 0:
ax.text(rect.get_x() + rect.get_width() / 2., 1.025 * height, '%.3f' % height,
ha='center', va='bottom', rotation=45)
assert phase in ['autoencoders', 'experts']
assert data_split in ['train', 'valid', 'test']
datasets = list(accuracies.keys())
accuracies = [accuracies[dataset] for dataset in datasets]
n_tasks = len(accuracies)
x_labels = ['Task: 1:' + str(i + 2) for i in range(n_tasks - 1)]
x = np.arange(n_tasks - 1)
width = .1
fig, ax = plt.subplots()
fig.set_figheight(9)
fig.set_figwidth(12)
fig.tight_layout(pad=12)
ax.set_ylim(bottom=0, top=1.1)
ax.set_ylabel('Accuracy')
ax.set_title(phase.upper())
ax.set_xticks(x)
ax.set_xticklabels(x_labels)
bars = [None for _ in range(len(x) + 1)]
if len(x) % 2 == 0:
bars[int(n_tasks / 2)] = ax.bar(x, accuracies[int(n_tasks / 2)], width=width, label=datasets[int(n_tasks / 2)])
for i in reversed(range(0, int(n_tasks / 2))):
bars[i] = ax.bar(x - width * (int(n_tasks / 2 - i)), accuracies[i], width=width, label=datasets[i])
for i in range(int(n_tasks / 2 + 1), n_tasks):
bars[i] = ax.bar(x + width * (i - int(n_tasks / 2)), accuracies[i], width=width, label=datasets[i])
else:
for i in reversed(range(0, int(n_tasks / 2))):
bars[i] = ax.bar(x - (width / 2 + width * (n_tasks / 2 - i - 1)), accuracies[i], width=width,
label=datasets[i])
for i in range(int(n_tasks / 2), n_tasks):
bars[i] = ax.bar(x + (width / 2 + width * (i - n_tasks / 2)), accuracies[i], width=width, label=datasets[i])
for b in bars:
autolabel(b)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.92))
plt.savefig(data_split + '_' + phase + '_acc_barchart.jpg')
| [
"valeriomieuli@gmail.com"
] | valeriomieuli@gmail.com |
99fc35038d7495bb7459467889aae17cb6fa9414 | 36f29bcc978a59d5d638cea0223dc21403307b61 | /management/languages/migrations/0001_initial.py | a644d5c952f1ab7c5616ebba2112d290024434e1 | [] | no_license | AndrewCherabaev/management_app | 85f0797e7e8a97dcab3ea126ebdea4ee14df5de6 | 7cd865223d6eca9911a60de7e6971a1e1fd8518d | refs/heads/master | 2023-02-28T14:47:43.665698 | 2021-02-05T11:01:33 | 2021-02-05T11:01:33 | 333,758,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # Generated by Django 3.1.5 on 2021-01-27 16:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extension', models.CharField(max_length=8)),
('name', models.CharField(max_length=64)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Technology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='languages.language')),
],
options={
'verbose_name_plural': 'Technologies',
'ordering': ['name'],
},
),
]
| [
"andrew@MacBook-Air-Andrej.local"
] | andrew@MacBook-Air-Andrej.local |
307d6b115817fb5d58a48c79a97dfe61c32912fa | ea093184cd328aaeeb45837ed74f70fc296254b5 | /Milestone 5/2) Model Deploy to Web - Flask/bin/chardetect | 424d437107738a326ef2fc5ea97331a3d475eb3b | [] | no_license | nurul-ds/Data-Mining-and-Prediction--Crude-Oil-Price | 2d44e9cb6d6f5995970372a6b0a79127fb222e2d | 36bffda5a2ed9c89934fc537b665496200250ca3 | refs/heads/main | 2023-01-24T05:17:24.752898 | 2020-12-11T08:46:55 | 2020-12-11T08:46:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/Users/Nurullainy/VirtualenvironmentMac/web_deployment_flask/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"lainy.cp@gmail.com"
] | lainy.cp@gmail.com | |
904dcd8c052b8edec0a26f84e9ec3b3534378511 | fd9837ffdd0ee405b990e0971b4501b8cfc2d7bc | /errors.py | 027a54050382763559edbe8da4546c6ce5b14f4a | [
"MIT"
] | permissive | vsjha18/nsetools | 277b9c91e172d45bcdc60209ef930b505d6b82fa | 07714d686d6e70e3cff05581a6fa1670229d73be | refs/heads/master | 2023-06-09T20:55:47.108908 | 2023-06-04T19:34:01 | 2023-06-04T19:34:01 | 28,475,626 | 782 | 441 | MIT | 2023-08-17T17:18:26 | 2014-12-25T08:07:51 | Python | UTF-8 | Python | false | false | 251 | py | class BhavcopyNotAvailableError(Exception):
"""this error could occur in case you download bhavcopy for the dates
when the market was close"""
pass
class DateFormatError(Exception):
"""in case the date format is errorneous"""
pass | [
"vivejha@cisco.com"
] | vivejha@cisco.com |
eeaf201358b733d340ba20b8541a19ccc863938e | 8e7279bc3de368e85129b8e59f12cbdbd8621da1 | /myenv/bin/gifmaker.py | b0dd02f4131ba154bff4296b4730f87a960d0ce0 | [] | no_license | banziha104/dstagram2 | 34f5dca6deb9c19c03fa523d6e4b1c97f60e14d4 | 12dbecb4a727fe67faffc1b2208bd4b5152a8672 | refs/heads/master | 2021-07-09T23:51:17.262219 | 2017-10-10T11:18:45 | 2017-10-10T11:18:45 | 105,170,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #!/Users/iyeongjun/Downloads/dstagram_2nd/myenv/bin/python3.6
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
| [
"they6687@naver.com"
] | they6687@naver.com |
6c9a757789b1a6dd4dfd1e6c27ff0e1986fc7ed3 | 6f8c9ae5169eb23e6e523dcd492881ae3df334fb | /Models/BERTs/data.py | 85953587e218fa9502d2397661a4a155b7cc194a | [
"Apache-2.0"
] | permissive | huangyedi2012/Chinese-sentence-pair-modeling | 4fb916b8402c48c3e59abe01d9f7293d2aa912de | 90adbc5c121832ce3e4a4057e30417a6ec5e7ebc | refs/heads/master | 2023-01-31T16:29:42.579073 | 2020-12-18T17:03:56 | 2020-12-18T17:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,093 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 00:03:18 2020
@author: 31906
"""
from torch.utils.data import Dataset
import pandas as pd
import torch
class DataPrecessForSentence(Dataset):
"""
对文本进行处理
"""
def __init__(self, bert_tokenizer, df, max_word_len = 64):
"""
bert_tokenizer :分词器
"""
self.bert_tokenizer = bert_tokenizer
self.max_seq_len = max_word_len
self.seqs, self.seq_masks, self.seq_segments, self.labels = self.get_input(df)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.seqs[idx], self.seq_masks[idx], self.seq_segments[idx], self.labels[idx]
# 获取文本与标签
def get_input(self, df):
"""
通对输入文本进行分词、ID化、截断、填充等流程得到最终的可用于模型输入的序列。
入参:
dataset : pandas的dataframe格式,包含三列,第一,二列为文本,第三列为标签。标签取值为{0,1},其中0表示负样本,1代表正样本。
max_seq_len : 目标序列长度,该值需要预先对文本长度进行分别得到,可以设置为小于等于512(BERT的最长文本序列长度为512)的整数。
出参:
seq : 在入参seq的头尾分别拼接了'CLS'与'SEP'符号,如果长度仍小于max_seq_len,则使用0在尾部进行了填充。
seq_mask : 只包含0、1且长度等于seq的序列,用于表征seq中的符号是否是有意义的,如果seq序列对应位上为填充符号,
那么取值为1,否则为0。
seq_segment : shape等于seq,因为是单句,所以取值都为0。
labels : 标签取值为{0,1},其中0表示负样本,1代表正样本。
"""
sentences_1 = df['s1'].values
sentences_2 = df['s2'].values
labels = df['label'].values
# 切词
tokens_seq_1 = list(map(self.bert_tokenizer.tokenize, sentences_1)) # [sentence_len, token_len]的列表
tokens_seq_2 = list(map(self.bert_tokenizer.tokenize, sentences_2))
# 获取定长序列及其mask
result = list(map(self.trunate_and_pad, tokens_seq_1, tokens_seq_2))
seqs = [i[0] for i in result]
seq_masks = [i[1] for i in result]
seq_segments = [i[2] for i in result]
return torch.Tensor(seqs).type(torch.long), torch.Tensor(seq_masks).type(torch.long),torch.Tensor(seq_segments).type(torch.long), torch.Tensor(labels).type(torch.long)
def trunate_and_pad(self, tokens_seq_1, tokens_seq_2):
"""
1. 如果是单句序列,按照BERT中的序列处理方式,需要在输入序列头尾分别拼接特殊字符'CLS'与'SEP',
因此不包含两个特殊字符的序列长度应该小于等于max_seq_len-2,如果序列长度大于该值需要那么进行截断。
2. 对输入的序列 最终形成['CLS',seq,'SEP']的序列,该序列的长度如果小于max_seq_len,那么使用0进行填充。
入参:
seq_1 : 输入序列,在本处其为单个句子。
seq_2 : 输入序列,在本处其为单个句子。
max_seq_len : 拼接'CLS'与'SEP'这两个特殊字符后的序列长度
出参:
seq : 在入参seq的头尾分别拼接了'CLS'与'SEP'符号,如果长度仍小于max_seq_len,则使用0在尾部进行了填充。
seq_mask : 只包含0、1且长度等于seq的序列,用于表征seq中的符号是否是有意义的,如果seq序列对应位上为填充符号,
那么取值为1,否则为0。
seq_segment : shape等于seq,如果是单句,取值都为0;双句按照0/1切分
"""
# 对超长序列进行截断,sentence1不超过154,sentence2不超过46
if len(tokens_seq_1) > ((self.max_seq_len - 3)//2):
tokens_seq_1 = tokens_seq_1[0:((self.max_seq_len - 3)//2)]
if len(tokens_seq_2) > ((self.max_seq_len - 3)//2):
tokens_seq_2 = tokens_seq_2[0:((self.max_seq_len - 3)//2)]
# 分别在首尾拼接特殊符号
seq = ['[CLS]'] + tokens_seq_1 + ['[SEP]'] + tokens_seq_2 + ['[SEP]']
seq_segment = [0] * (len(tokens_seq_1) + 2) + [1] * (len(tokens_seq_2) + 1)
# ID化
seq = self.bert_tokenizer.convert_tokens_to_ids(seq)
# 根据max_seq_len与seq的长度产生填充序列
padding = [0] * (self.max_seq_len - len(seq))
# 创建seq_mask:表明seq长度有意义,padding无意义
seq_mask = [1] * len(seq) + padding
# 创建seq_segment
seq_segment = seq_segment + padding
# 对seq拼接填充序列
seq += padding
assert len(seq) == self.max_seq_len
assert len(seq_mask) == self.max_seq_len
assert len(seq_segment) == self.max_seq_len
return seq, seq_mask, seq_segment | [
"3190620639@qq.com"
] | 3190620639@qq.com |
2ffb58c61746bd4443255405e2153cd4dd1e6dda | 1b3c51fee2fa4b8c3d06e4004b4656efd498962a | /postprocessing/AIDA-Interchange-Format-master/python/tests/scaling_test.py | 40b1b6a185d2bf4e67c5377ce578eddc37d42fb9 | [
"MIT"
] | permissive | lianglili/uiuc_ie_pipeline_finegrained_source_code | ff2ce622ba4c7cc81a70d7d013fd180422002fff | 458bcd6ddfd4372391924a5f1e5e91089289d4f4 | refs/heads/master | 2023-01-03T21:31:33.154388 | 2020-10-31T03:11:04 | 2020-10-31T03:11:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,300 | py | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
import time
import random
from aida_interchange import aifutils
from aida_interchange.aida_rdf_ontologies import SEEDLING_TYPES_NIST
from rdflib import URIRef
class ScalingTest():
filename = "scalingdata.ttl"
LDC_NS = "https://tac.nist.gov/tracks/SM-KBP/2019/LdcAnnotations#"
g = aifutils.make_graph()
system = aifutils.make_system_with_uri(g, 'http://www.test.edu/testSystem')
# beginning sizes of data
entity_count = 128000
event_count = 38400
relations_count = 200
assertion_count = 1500
entity_index = 1
event_index = 1
relation_index = 1
assertion_index = 1
# utility values, so taht we can easily create random things
abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
entity_resource_list = []
def run_scaling_test(self):
for ii in range(200):
print("trying : Entity count: ", self.entity_count)
start_time = int(round(time.time() * 1000))
self.run_single_test()
end_time = int(round(time.time() * 1000))
duration = (start_time - end_time) / 1000
size = 0
f = open(self.filename)
if os.path.isfile(self.filename):
size = os.path.getsize(self.filename)
size /= 1000000.
print("Size of output: ", size, " duration: ", duration)
# double size of entities and events after every iteration
self.increase()
def run_single_test(self):
# adds entities and events and wrties to file
for ii in range(self.entity_count):
self.add_entity()
for i in range(self.event_count):
self.add_event()
self.write_to_file(self.filename)
def add_entity(self):
# add an entity
entity_resource = aifutils.make_entity(self.g, self.get_entity_uri(), self.system)
self.entity_resource_list.append(entity_resource)
# sometimes add hasName, textValue, or numericValue, NOTE: This does not check type!!!
rand = random.random()
if rand < 0.15:
aifutils.mark_name(self.g, entity_resource, self.get_random_string(5))
elif rand < 0.3:
aifutils.mark_text_value(self.g, entity_resource, self.get_random_string(7))
elif rand < 0.4:
aifutils.mark_numeric_value_as_double(self.g, entity_resource, random.random())
# set the type
type_to_use = self.get_random_entity()
type_assertion = aifutils.mark_type(self.g, self.get_assertion_uri(), entity_resource, type_to_use, self.system, 1.0)
self.add_justification_and_private_data(type_assertion)
def add_event(self):
# add an event
event_resource = aifutils.make_event(self.g, self.get_event_uri(), self.system)
# add the type
event_type_string = self.EVENT_TYPES[random.randint(0, len(self.EVENT_TYPES)) - 1]
type_resource = SEEDLING_TYPES_NIST[event_type_string]
type_assertion = aifutils.mark_type(self.g, self.get_assertion_uri(), event_resource, type_resource, self.system, 1.0)
self.add_justification_and_private_data(type_assertion)
# make two arguments
for i in range(2):
arg = URIRef(SEEDLING_TYPES_NIST[event_type_string] + self.get_random_suffix())
argument = aifutils.mark_as_argument(self.g, event_resource, arg, self.get_random_entity(), self.system,
0.785, self.get_assertion_uri())
self.add_justification_and_private_data(argument)
def add_justification_and_private_data(self, resource):
docId = self.get_random_doc_id()
# justify the type assertion
aifutils.mark_text_justification(self.g, resource, docId, 1029, 1033, self.system, 0.973)
# add some private data
aifutils.mark_private_data(self.g, resource, "{ 'provenance' : '" + docId + "' }", self.system)
def increase(self):
self.entity_count *= 2
self.event_count *= 2
def get_uri(self, uri):
return self.LDC_NS + uri
def get_entity_uri(self):
self.entity_index += 1
return self.get_uri("entity-" + str(self.entity_index))
def get_event_uri(self):
self.event_index += 1
return self.get_uri("event-" + str(self.event_index))
def get_relation_uri(self):
self.relation_index += 1
return self.get_uri("relation-" + str(self.relation_index))
def get_assertion_uri(self):
self.assertion_index += 1
return self.get_uri("assertion-" + str(self.assertion_index))
def get_test_system_uri(self):
return self.get_uri("testSystem")
def get_random_doc_id(self):
s = ""
if random.getrandbits(1) == 1:
s += "IC"
else:
s += "HC"
s += "00"
s += "" + str((random.randint(0, 1000)))
s += self.abc[random.randint(0, len(self.abc) - 1)]
s += self.abc[random.randint(0, len(self.abc) - 1)]
s += self.abc[random.randint(0, len(self.abc) - 1)]
return s
def get_random_string(self, length):
s = ""
for i in range(0, length):
s += self.abc[random.randint(0, len(self.abc) - 1)]
return s
def get_random_entity(self):
return URIRef("https://tac.nist.gov/tracks/SM-KBP/2018/ontologies/SeedlingOntology#" + \
self.ENTITY_TYPES[random.randint(0, len(self.ENTITY_TYPES) - 1)])
def get_random_suffix(self):
s = "_" + self.ROLES[random.randint(0, len(self.ROLES) - 1)]
return s
def write_to_file(self, testname):
print("\n\n", testname, "\n\n")
file = open(testname, "w")
file.write(str(self.g.serialize(format='turtle')))
file.close()
ENTITY_TYPES = ["Person", "Organization", "Location", "Facility", "GeopoliticalEntity", "FillerType",
"Business.DeclareBankruptcy", "Business.End", "Business.Merge", "Business.Start",
"Conflict.Attack", "Conflict.Demonstrate",
"Contact.Broadcast", "Contact.Contact", "Contact.Correspondence", "Contact.Meet",
"Existence.DamageDestroy",
"Government.Agreements", "Government.Legislate", "Government.Spy", "Government.Vote",
"Inspection.Artifact", "Inspection.People",
"Justice.Acquit", "Justice.Appeal", "Justice.ArrestJail", "Justice.ChargeIndict", "Justice.Convict",
"Justice.Execute", "Justice.Extradite", "Justice.Fine", "Justice.Investigate", "Justice.Pardon",
"Justice.ReleaseParole", "Justice.Sentence", "Justice.Sue", "Justice.TrialHearing",
"Life.BeBorn", "Life.Die", "Life.Divorce", "Life.Injure", "Life.Marry",
"Manufacture.Artifact",
"Movement.TransportArtifact", "Movement.TransportPerson",
"Personnel.Elect", "Personnel.EndPosition", "Personnel.Nominate", "Personnel.StartPosition",
"Transaction.Transaction", "Transaction.TransferControl", "Transaction.TransferMoney",
"Transaction.TransferOwnership",
"GeneralAffiliation.APORA", "GeneralAffiliation.MORE", "GeneralAffiliation.OPRA",
"GeneralAffiliation.OrganizationWebsite", "GeneralAffiliation.PersonAge", "GeneralAffiliation.Sponsorship",
"Measurement.Count",
"OrganizationAffiliation.EmploymentMembership", "OrganizationAffiliation.Founder",
"OrganizationAffiliation.InvestorShareholder", "OrganizationAffiliation.Leadership",
"OrganizationAffiliation.Ownership", "OrganizationAffiliation.StudentAlum",
"PartWhole.Membership", "PartWhole.Subsidiary",
"PersonalSocial.Business", "PersonalSocial.Family", "PersonalSocial.RoleTitle",
"PersonalSocial.Unspecified",
"Physical.LocatedNear", "Physical.OrganizationHeadquarter", "Physical.OrganizationLocationOrigin",
"Physical.Resident"]
EVENT_TYPES = [
"Business.DeclareBankruptcy", "Business.End", "Business.Merge", "Business.Start",
"Conflict.Attack", "Conflict.Demonstrate",
"Contact.Broadcast", "Contact.Contact", "Contact.Correspondence", "Contact.Meet",
"Existence.DamageDestroy",
"Government.Agreements", "Government.Legislate", "Government.Spy", "Government.Vote",
"Inspection.Artifact", "Inspection.People",
"Justice.Acquit", "Justice.Appeal", "Justice.ArrestJail", "Justice.ChargeIndict", "Justice.Convict",
"Justice.Execute", "Justice.Extradite", "Justice.Fine", "Justice.Investigate", "Justice.Pardon",
"Justice.ReleaseParole", "Justice.Sentence", "Justice.Sue", "Justice.TrialHearing",
"Life.BeBorn", "Life.Die", "Life.Divorce", "Life.Injure", "Life.Marry",
"Manufacture.Artifact",
"Movement.TransportArtifact", "Movement.TransportPerson",
"Personnel.Elect", "Personnel.EndPosition", "Personnel.Nominate", "Personnel.StartPosition",
"Transaction.Transaction", "Transaction.TransferControl", "Transaction.TransferMoney",
"Transaction.TransferOwnership"]
ROLES = ["Attacker", "Instrument", "Place", "Target", "Time", "Broadcaster",
"Place", "Time", "Participant", "Place", "Participant", "Time",
"Participant", "Affiliate", "Affiliation", "Affiliation", "Person",
"Entity", "Sponsor", "Defendant", "Prosecutor", "Adjudicator",
"Defendant", "Agent", "Instrument", "Victim", "Artifact",
"Manufacturer", "Agent", "Artifact", "Destination", "Instrument",
"Origin", "Time", "Agent", "Destination", "Instrument", "Origin",
"Person", "Employee", "Organization", "Person", "Entity", "Place",
"Beneficiary", "Giver", "Recipient", "Thing", "Time"];
if __name__ == "__main__":
ScalingTest().run_scaling_test() | [
"limanlingcs@gmail.com"
] | limanlingcs@gmail.com |
d9c94007b05b243ba95ace0dae93928d09561f45 | bf0800eee5a43f600ab3ebd99d3486846d9f4834 | /blog/views.py | aa024c57c30b7e613d9d778655bff923cef2a3e5 | [] | no_license | wonsik1012/my-first-blog | 6de17de4bd13a9d36650ad2070b07190461dbd3e | e0124e41b8dabf20e23af5d969e55a2238841dba | refs/heads/master | 2020-07-03T10:07:38.321239 | 2019-08-13T11:48:21 | 2019-08-13T11:48:21 | 201,874,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,924 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse
from django.template.loader import get_template
from django.template.context import RequestContext
import folium
from IPython.display import HTML, display
import numpy as np
import osmnx as ox
import networkx as nx
from sklearn.neighbors import KDTree
import folium
import folium.plugins
import pandas as pd
from folium import plugins
import requests
import googlemaps
import numbers
import math
from geopy.geocoders import Nominatim
def show_map(request):
G = ox.graph_from_place('종로구')
a = ox.elevation.add_node_elevations(G, 'AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q', max_locations_per_batch=350, pause_duration=0.02)
b =ox.elevation.add_edge_grades(G, add_absolute=True)
nodes,edge = ox.graph_to_gdfs(b)
edge.head()
gmaps_key = "AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q"
gmaps = googlemaps.Client(key=gmaps_key)
geolocator = Nominatim()
class GeoUtil:
"""
Geographical Utils
"""
@staticmethod
def degree2radius(degree):
return degree * (math.pi/180)
@staticmethod
def get_harversion_distance(x1, y1, x2, y2, round_decimal_digits=5):
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2 <= 90
R = 6371 # 지구의 반경(단위: km)
dLon = GeoUtil.degree2radius(x2-x1)
dLat = GeoUtil.degree2radius(y2-y1)
a = math.sin(dLat/2) * math.sin(dLat/2) \
+ (math.cos(GeoUtil.degree2radius(y1)) \
*math.cos(GeoUtil.degree2radius(y2)) \
*math.sin(dLon/2) * math.sin(dLon/2))
b = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return round(R * b, round_decimal_digits)
def seeshortestway(x1,x2):
#loc1 = ox.geocode(x1)
#loc2 = ox.geocode(x2)
place1=gmaps.geocode(x1)
lat1=place1[0]['geometry']['location']['lat']
lng1=place1[0]['geometry']['location']['lng']
place2=gmaps.geocode(x2)
lat2=place2[0]['geometry']['location']['lat']
lng2=place2[0]['geometry']['location']['lng']
loc1=(lat1,lng1)
loc2=(lat2,lng2)
#KD트리를 이용하면 최단거리를 쉽고 효율적으로 찾아준다.
tree = KDTree(nodes[['y', 'x']], metric='euclidean')
loc1_idx = tree.query([loc1], k=1, return_distance=False)[0]
loc2_idx = tree.query([loc2], k=1, return_distance=False)[0]
closest_node_to_loc1 = nodes.iloc[loc1_idx].index.values[0]
closest_node_to_loc2 = nodes.iloc[loc2_idx].index.values[0]
route = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='length')
onlygodoroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='grade_abs')
impedanceroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='impedance')
#distance=nx.shortest_path_length(G, closest_node_to_loc1,closest_node_to_loc2)
graderoute = []
impedance = []
for i in range(len(onlygodoroute)):
lng = G.node[onlygodoroute[i]]['x']
lat = G.node[onlygodoroute[i]]['y']
b = [lat,lng]
graderoute.append(b)
for i in range(len(impedanceroute)):
lng = G.node[impedanceroute[i]]['x']
lat = G.node[impedanceroute[i]]['y']
b = [lat,lng]
impedance.append(b)
m = ox.plot_route_folium(G, route, route_color='navy',tiles='stamen toner')
antpath = plugins.AntPath(locations=graderoute,color='purple')
antpath.add_to(m)
antpath = plugins.AntPath(locations=impedance,color='red')
antpath.add_to(m)
#folium.PolyLine(graderoute, color="purple", weight=4).add_to(m)
#folium.PolyLine(impedance, color="red", weight=4).add_to(m)
kw = {
'prefix': 'fa',
'color': 'green',
'icon': 'arrow-up'
}
ka = {
'prefix': 'fa',
'color': 'blue',
'icon': 'arrow-up'
}
icon1 = folium.Icon(angle=45, **kw)
folium.Marker(location=loc1, icon=icon1,popup=x1, tooltip='출발').add_to(m)
icon2 = folium.Icon(angle=180, **ka)
folium.Marker(location=loc2, icon=icon2, popup=x2,tooltip='도착').add_to(m)
#lium.Marker(location=loc1,
# icon=folium.Icon(color='red'), popup=x1, tooltip='출발').add_to(m)
#folium.Marker(location=loc2,
#icon=folium.Icon(color='blue'),popup=x2, tooltip='도착').add_to(m)
dobo=4
add = []
for i in range(len(route)-1):
lng1 = G.node[route[i]]['x']
lat1 = G.node[route[i]]['y']
lng2 = G.node[route[i+1]]['x']
lat2 = G.node[route[i+1]]['y']
result =GeoUtil.get_harversion_distance(lng1,lat1,lng2,lat2)
add.append(result)
noroundkm = sum(add)
km = round(noroundkm,1)
noroundminute = (km/dobo)*60
minute = round(noroundminute,1)
print('거리는',km,'KM 이며, ','시간은', minute,'분 걸립니다.')
return m
m=seeshortestway('안국역 3호선', '북촌생활사박물관')
a = m.save("blog/templates/blog/map.html")
context = {'my_map': m}
return render(request, 'blog/map.html', context)
| [
"you@example.com"
] | you@example.com |
cf955fac02a1f41b9fbbdd9537a6c15107c1a7d0 | 13892da0ca7b4ec973be46b6b42ce0ed46ad6c47 | /preprocess_infomation.py | 3d0460cc56529ebdbbf2801f2aa93364b2b54c10 | [] | no_license | VermaShr/image-captioning | 1e1e6839fcbd2b3ee73107cbca7db94bcffe0841 | 410b6ec1dd35de6e082188d7c69dd77a0cf457be | refs/heads/master | 2020-04-24T15:29:00.905611 | 2019-02-22T14:27:20 | 2019-02-22T14:27:20 | 172,069,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,978 | py | """
Before running this code make sure that
1) Images have been segregated into train,test and validation
2) Pycocotool have been installed
3) SELECT MODE
ATTENTION/ WITHOUT ATTTENTION
Modify the paths to local paths
"""
import cPickle
import os
import numpy as np
import re
from utilities import log
from nltk.tokenize import word_tokenize
import sys
from pycocotools.coco import COCO
import tensorflow as tf
import tensorflow.python.platform
from tensorflow.python.platform import gfile
############################################################
# CHANGE THE VALUES DEFINED HERE
PYCOCOTOOLS_PATH ="/home/cul10kk/ML_project/coco/PythonAPI"
MODE= 0 ##### Mode 0 - no attention , 1- attention
#CNN model
model_dir = "/home/cul10kk/CS224n_project/inception"
############################################################
sys.path.append(PYCOCOTOOLS_PATH)
# DIRECTORIES
captions_dir = "coco/annotations/"
data_dir = "coco/data/"
val_img_dir = "coco/images/val/"
test_img_dir = "coco/images/test/"
train_img_dir = "coco/images/train/"
########################################################
#PREPROCESS THE VALIDATION AND TEST IMAGES
########################################################
# image locations
val_img_paths = [val_img_dir + file_name for file_name in\
os.listdir(val_img_dir) if ".jpg" in file_name]
test_img_paths = [test_img_dir + file_name for file_name in\
os.listdir(test_img_dir) if ".jpg" in file_name]
train_img_paths = [train_img_dir + file_name for file_name in\
os.listdir(train_img_dir) if ".jpg" in file_name]
val_img_ids = np.array([])
for val_img_path in val_img_paths:
img_name = val_img_path.split("/")[3]
img_id = img_name.split("_")[2].split(".")[0].lstrip("0")
img_id = int(img_id)
val_img_ids = np.append(val_img_ids, img_id)
test_img_ids = np.array([])
for test_img_path in test_img_paths:
img_name = test_img_path.split("/")[3]
img_id = img_name.split("_")[2].split(".")[0].lstrip("0")
img_id = int(img_id)
test_img_ids = np.append(test_img_ids, img_id)
#cPickle.dump(val_img_ids, open(os.path.join("coco/data/", "val_img_ids"), "wb"))
#cPickle.dump(test_img_ids, open(os.path.join("coco/data/", "test_img_ids"), "wb"))
##########################################################
##########################################################
#Preprocess the caption information
##########################################################
# val_img_ids and test_img_ids RESUED
##########################################################
word_counts = {}
vocabulary = []
# DEFINING THE VOCABULARY
captions_file = "coco/annotations/captions_train2014.json"
coco = COCO(captions_file)
print("Defining the vocabulary")
for sentence in tqdm(coco.all_captions()):
for w in word_tokenize(sentence.lower()):
word_counts[w] = word_counts.get(w, 0) + 1.0
# get the captions for training and validation
def get_captions(type_of_data):
captions_file = "coco/annotations/captions_%s2014.json" % type_of_data
coco = COCO(captions_file)
# extract the information for the coco API
img_ids = [coco.anns[ann_id]['image_id'] for ann_id in coco.anns]
captions = [coco.anns[ann_id]['caption'] for ann_id in coco.anns]
caption_id = [coco.anns[ann_id]['id'] for ann_id in coco.anns]
caption_id_2_img_id = img_ids
if type_of_data == "train":
train_caption_id_2_caption = dict(zip(caption_id,captions))
elif type_of_data == "val":
# validation and test need to be handled sparately
if img_id in test_img_ids:
test_caption_id_2_caption[caption_id] = caption
elif img_id in val_img_ids:
val_caption_id_2_caption[caption_id] = caption
train_caption_id_2_caption = {}
test_caption_id_2_caption = {}
val_caption_id_2_caption = {}
caption_id_2_img_id = {}
get_captions("train")
get_captions("val")
cPickle.dump(caption_id_2_img_id,
open(os.path.join(data_dir, "caption_id_2_img_id"), "wb"))
pretrained_words = []
word_vectors = []
with open(os.path.join(captions_dir, "glove.6B.300d.txt")) as file:
for line in file:
line_elements = line.split(" ")
#word
word = line_elements[0]
#get the word vector
word_vector = line_elements[1:]
# store
pretrained_words.append(word)
word_vectors.append(word_vector)
# get all words that have a pretrained word embedding:
vocabulary = []
for word in word_counts:
word_count = word_counts[word]
if word_count >= 5 and word in pretrained_words:
vocabulary.append(word)
# add "<SOS>", "<UNK>" and "<EOS>" to the vocabulary:
vocabulary.insert(0, "<EOS>")
vocabulary.insert(0, "<UNK>")
vocabulary.insert(0, "<SOS>")
#cPickle.dump(vocabulary,
# open(os.path.join(data_dir, "vocabulary"), "wb"))
#replaces words that are not in the vocabulary and appends the <Start> <end>
ef fix_tokenize(self,caption_dict,name)
print("Tokenizing %s"%name)
for caption_id,caption in tqdm(caption_dict.items()):
# prepend the caption with an <SOS> token;
tokenized_caption = []
caption.insert(0, "<SOS>")
caption.append("<EOS>")
for word_index in range(len(caption)):
word = caption[word_index]
if word not in vocabulary:
word = "<UNK>"
caption[word_index] = word
word_id = vocabulary.index(word)
# append the caption with an <EOS> token:
tokenized_caption.append(word_id)
tokenized_caption = np.array(tokenized_caption)
if(name == "train"):
train_caption_id_2_caption[caption_id] = tokenized_caption
caption_length = len(caption)
if caption_length not in train_caption_length_2_caption_ids:
train_caption_length_2_caption_ids[caption_length] = [caption_id]
else:
train_caption_length_2_caption_ids[caption_length].append(caption_id)
no_of_captions = len(caption_ids)
train_caption_length_2_no_of_captions[caption_length] = no_of_captions
log("Tokenized %s"%name)
fix_tokenize(train_caption_id_2_caption,"train")
fix_tokenize(val_caption_id_2_captioni ,"val")
fix_tokenize(test_caption_id_2_caption ,"test")
cPickle.dump(train_caption_id_2_caption, open(os.path.join(data_dir,
"train_caption_id_2_caption"), "wb"))
cPickle.dump(test_caption_id_2_caption, open(os.path.join(data_dir,
"test_caption_id_2_caption"), "wb"))
cPickle.dump(val_caption_id_2_caption, open(os.path.join(data_dir,
"val_caption_id_2_caption"), "wb"))
cPickle.dump(train_caption_length_2_no_of_captions,
open(os.path.join("coco/data/",
"train_caption_length_2_no_of_captions"), "wb"))
cPickle.dump(train_caption_length_2_caption_ids,
open(os.path.join("coco/data/",
"train_caption_length_2_caption_ids"), "wb"))
##########################################################
##########################################################
#Preprocess the caption information
##########################################################
# Reuses
# vocabulary
# word_vector
##########################################################
word_vec_dim = 300
vocab_size = len(vocabulary)
pretrained_words = []
word_vectors = []
with open(os.path.join(captions_dir, "glove.6B.300d.txt")) as file:
for line in file:
# remove the new line char at the end:
line = line.strip()
# seperate the word from the word vector:
line_elements = line.split(" ")
word = line_elements[0]
word_vector = line_elements[1:]
# save:
pretrained_words.append(word)
word_vectors.append(word_vector)
# create an embedding matrix where each row is the pretrained word vector
embeddings_matrix = np.zeros((vocab_size, word_vec_dim))
for vocab_index, word in enumerate(vocabulary):
if vocab_index % 1000 == 0:
print vocab_index
log(str(vocab_index))
if word not in ["<SOS>", "<UNK>", "<EOS>"]: # (the special tokens are initialized with zero vectors)
word_embedd_index = pretrained_words.index(word)
word_vector = word_vectors[word_embedd_index]
# convert into a numpy array:
word_vector = np.array(word_vector)
# convert everything to floats:
word_vector = word_vector.astype(float)
# add to the matrix:
embeddings_matrix[vocab_index, :] = word_vector
# save the embeddings_matrix to disk:
cPickle.dump(embeddings_matrix,
open(os.path.join(data_dir, "embeddings_matrix"), "wb"))
##########################################################
##########################################################
# RUN A PRETAINED CNN to get the image output
##########################################################
##########################################################
# WITHOUT ATTENTION
##########################################################
def load_pretrained_CNN():
# define where the pretrained inception model is located:
path_to_saved_model = os.path.join(model_dir,
"classify_image_graph_def.pb")
with gfile.FastGFile(path_to_saved_model, "rb") as model_file:
# create an empty GraphDef object:
graph_def = tf.GraphDef()
# import the model definitions:
graph_def.ParseFromString(model_file.read())
_ = tf.import_graph_def(graph_def, name="")
def extract_img_features(img_paths, demo=False):
img_id_2_feature_vector = {}
# load the Inception-V3 model:
load_pretrained_CNN()
with tf.Session() as sess:
# get the second-to-last layer in the Inception-V3 model (this
# is what we will use as a feature vector for each image):
second_to_last_tensor = sess.graph.get_tensor_by_name("pool_3:0")
for step, img_path in enumerate(img_paths):
if step % 100 == 0:
print step
log(str(step))
# read the image:
img_data = gfile.FastGFile(img_path, "rb").read()
try:
# get the img's corresponding feature vector:
feature_vector = sess.run(second_to_last_tensor,
feed_dict={"DecodeJpeg/contents:0": img_data})
except:
print "JPEG error for:"
print img_path
print "******************"
log("JPEG error for:")
log(img_path)
log("******************")
else:
# # flatten the features to an np.array:
feature_vector = np.squeeze(feature_vector)
if not demo:
# get the image id:
img_name = img_path.split("/")[3]
img_id = img_name.split("_")[2].split(".")[0].lstrip("0")
img_id = int(img_id)
else: # (if demo:)
# we're only extracting features for one img, (arbitrarily)
# set the img id to 0:
img_id = 0
# save the feature vector and the img id:
img_id_2_feature_vector[img_id] = feature_vector
return img_id_2_feature_vector
##########################################################
##########################################################
# WITH ATTENTION
##########################################################
# reuses
##########################################################
def extract_img_features_attention(img_paths, demo=False):
# load the Inception-V3 model:
load_pretrained_CNN()
# load the parameters for the feature vector transform:
transform_params = cPickle.load(open("coco/data/img_features_attention/transform_params/numpy_params"))
W_img = transform_params["W_img"]
b_img = transform_params["b_img"]
with tf.Session() as sess:
# get the third-to-last layer in the Inception-V3 model (a tensor
# of shape (1, 8, 8, 2048)):
img_features_tensor = sess.graph.get_tensor_by_name("mixed_10/join:0")
# reshape the tensor to shape (64, 2048):
img_features_tensor = tf.reshape(img_features_tensor, (64, 2048))
# apply the img transorm (get a tensor of shape (64, 300)):
linear_transform = tf.matmul(img_features_tensor, W_img) + b_img
img_features_tensor = tf.nn.sigmoid(linear_transform)
for step, img_path in enumerate(img_paths):
if step % 10 == 0:
print step
log(str(step))
# read the image:
img_data = gfile.FastGFile(img_path, "rb").read()
try:
# get the img features (np array of shape (64, 300)):
img_features = sess.run(img_features_tensor,
feed_dict={"DecodeJpeg/contents:0": img_data})
#img_features = np.float16(img_features)
except:
print "JPEG error for:"
print img_path
print "******************"
log("JPEG error for:")
log(img_path)
log("******************")
else:
if not demo:
# get the image id:
img_name = img_path.split("/")[3]
img_id = img_name.split("_")[2].split(".")[0].lstrip("0")
img_id = int(img_id)
else: # (if demo:)
# we're only extracting features for one img, (arbitrarily)
# set the img id to -1:
img_id = -1
# save the img features to disk:
cPickle.dump(img_features,
open("coco/data/img_features_attention/%d" % img_id, "wb"))
##########################################################
# Call based on if attention is needed or not
##########################################################
if(MODE == 1):
# create a list of the paths to all imgs:
img_paths = val_img_paths + test_img_paths + train_img_paths
# extract all features:
extract_img_features_attention(img_paths)
else:
# get the feature vectors for all val imgs:
val_img_id_2_feature_vector = extract_img_features(val_img_paths)
# save on disk:
cPickle.dump(val_img_id_2_feature_vector,
open("coco/data/val_img_id_2_feature_vector", "wb"))
print "val done!"
log("val done!")
test_img_id_2_feature_vector = extract_img_features(test_img_paths)
# save on disk:
cPickle.dump(test_img_id_2_feature_vector,
open("coco/data/test_img_id_2_feature_vector", "wb"))
print "test done!"
log("test done!")
# get the feature vectors for all train imgs:
train_img_id_2_feature_vector = extract_img_features(train_img_paths)
# save on disk:
cPickle.dump(train_img_id_2_feature_vector,
open("coco/data/train_img_id_2_feature_vector", "wb"))
| [
"sverma654@gmail.com"
] | sverma654@gmail.com |
897284ff5a0bb4cecf47a40890334017332a2bed | 29b37c07c55d3b571ab2afefeeb88a43425dc30d | /priority_queue_112.py | d53db054f9a969761cac5c29a5e5ce82b4ff371d | [] | no_license | nearmo/heap_ordered_binary_tree | ad9fc8f7df089168a5d662015d56237a68deefe8 | 7ef26618ac9750cb7e6671fbcaf2fa4cc4ed4900 | refs/heads/master | 2022-08-31T15:10:44.052399 | 2020-05-24T09:25:10 | 2020-05-24T09:25:10 | 266,489,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,489 | py | #!/usr/bin/env python3
from math import log, floor
from statistics import mean
from re import findall
class PQ(object):
'''Initialises instance'''
def __init__(self):
self.d = {}
self.N = 0
'''swaps the position of two values'''
def exch(self, i, j):
self.d[i], self.d[j] = self.d[j], self.d[i]
'''inserts a new element into the heap at position N'''
def insert(self, v):
self.N += 1
self.d[self.N] = v
self.swim(self.N)
'''element moves up in the tree until its parent node is larger than itself'''
def swim(self, k):
while k > 1 and self.d[k // 2] < self.d[k]:
self.exch(k // 2, k)
k = k // 2
'''returns True if the heap is empty False otherwise'''
def is_empty(self):
return self.size() == 0
'''returns the size of the heap'''
def size(self):
return self.N
'''returns the max of the heap which resides at the top'''
def getMax(self):
return self.d[1]
''' Return the bigger of nodes i and j'''
def bigger(self, i, j):
try:
return max([i, j], key=self.d.__getitem__)
except KeyError:
return i
''' Deletes and returns the maximum node in the heap.'''
def delMax(self):
v = self.d[1]
self.exch(1, self.N)
del(self.d[self.N])
self.N -= 1
self.sink(1)
return v
''' Node k sinks down the heap until its child node is smaller than itself'''
def sink(self, k):
# While there is a child left
while 2 * k <= self.N:
# Indexing of left child
j = 2 * k
# Select bigger child
j = self.bigger(j, j + 1)
# Done if >= both children
if self.d[k] >= self.d[j]:
break
# Swap with larger child
self.exch(k, j)
k = j
''' Returns the size of a heap based on the number of rows'''
def len(self, row):
if row == 1:
return 1
return self.len(row - 1) + 2 ** (row - 1)
''' Returns the base layer of the heap in str form'''
def base(self):
self.a_len = len(self.d)
self.no_row = floor(log(self.a_len, 2) + 1)
self.t_len = self.len(self.no_row)
t_base = 2 ** (self.no_row - 1)
a_base = t_base - (self.t_len - self.a_len)
return ' '.join(self.elements()[-a_base:])
''' Returns a list of lists consisting of the elements in each individual row'''
def rows(self):
elements = self.elements()
rows = []
for i in range(self.no_row):
row = []
j = 0
while j < len(elements) and j < 2 ** i:
row.append(elements[j])
j += 1
rows.append(row)
elements = elements[2 ** i:]
return rows
''' Returns a list of each element in the heap in indexed form'''
def elements(self):
elements = []
for v in self.d.values():
elements.append(str(v))
return elements
''' Returns the second last layer of the heap so as to function as a base
for each layer above'''
def layer(self):
layer = ' ' * self.t_len
p = 1
for i in range(len(self.rows()[-2])):
n = self.rows()[-2][i]
layer = layer[:p] + n + layer[p + len(n):]
p += 4
return layer
''' Returns a list of strings of each layer in the heap from the second
upward'''
def lines(self):
line = ' ' * self.t_len
lines = [line] * (self.no_row - 2)
lines.append(self.layer())
#i represents every line to be outputted
for i in range(1, len(lines)):
i = -i - 1
#j represents the position of every element in the line of i
for j in range(1, len(self.rows()[i - 1]) + 1):
#l and h represents to two child nodes of j
l, h = self.rows()[i][(j * 2) - 2], self.rows()[i][(j * 2) - 1]
l, h = lines[i + 1].find(l), lines[i + 1].find(h)
p = mean([l, h])
n = self.rows()[i - 1][j - 1]
lines[i] = lines[i][:p] + n + lines[i][(p + len(n)):]
return lines
''' Returns the heap-ordered binary tree in string format'''
def __str__(self):
base = self.base()
lines = self.lines()
return '\n'.join(lines) + '\n' + base
| [
"diarmuid.b.8612@gmail.com"
] | diarmuid.b.8612@gmail.com |
67e9456095b1afadb01c1e62757f4091ad90eb1c | 2c3de3d359a7a7cb7ea6dfa0a4f1c444681b68de | /Django/Lecture3/tasks/urls.py | 872e69835f1b3da25e18f6f68395cf28df290063 | [] | no_license | pcolt/CS50-Web-Programming | a2fea01b0eeaa1c902c4c52127c4d8ca0e19b09e | acb96587d0a69b482a35ff4d185adfb7f7495c93 | refs/heads/master | 2023-01-05T12:16:49.433162 | 2020-10-29T10:29:27 | 2020-10-29T10:29:27 | 277,560,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("add", views.add, name="add")
]
| [
"pcolturi@fastmail.com"
] | pcolturi@fastmail.com |
e59916694f2e23ff667f5791bf356795499f119c | 29f50b18e84d7de9746855b2b2f5522f28c9dc11 | /ohbugztracker/tracker/views.py | c208e4a05698a6c2c15597eeb32e4730adaa31c2 | [] | no_license | rishikeshjadhav/python-samples | b1f3a53271537c8a9e881090a811f3ab2bceb886 | 1a09d8f22950232e22153c2ab7f6c112c8237a29 | refs/heads/master | 2020-03-25T10:45:19.759651 | 2018-08-06T09:18:28 | 2018-08-06T09:18:28 | 143,704,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | from django.shortcuts import render
from .models import Project, Task
from .forms import ProjectForm
from django.shortcuts import redirect
# Create your views here.
def index(request):
projects = Project.objects.all()
return render(request, 'index.html', {'projects': projects})
def add_new_project(request):
form = ProjectForm()
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
description = form.cleaned_data['description']
p = Project.objects.create(title=title, description=description)
if p.pk > 0:
return redirect('index')
return render(request, 'project/add.html', {'form': form})
def myview(request):
employees = [
{
'Name': 'User One',
'EmpId': 'EMP001',
'Designation': 'CEO'
},
{
'Name': 'User Two',
'EmpId': 'EMP002',
'Designation': 'Vice President'
}
]
return render(request, 'myview.html', {'employees': employees})
| [
"rishijadhav2792@gmail.com"
] | rishijadhav2792@gmail.com |
64f4aecdc4ba0856009744c04d0a8cef73e58ae7 | 77db6591c5884204d6016bfa89b33691bac38813 | /load.py | b2a9c8a3983643329620f4d7f7cd949b5ccd27f0 | [] | no_license | jbukoski/iltf-signal-webmap-suite | 4fc0aafa977e911a1071872f7adbaf2e7d0da37c | b8374e9cfcc80501a8f632721a7cb9b76e668f6b | refs/heads/master | 2021-03-27T11:20:37.174667 | 2020-12-31T18:03:20 | 2020-12-31T18:03:20 | 79,853,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import os
from django.contrib.gis.utils import LayerMapping
from . import models
*_shp = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', '', '*.shp'))
def run(verbose=True):
*_lm = LayerMapping(
models.*, *_shp, *_mapping,
transform=False, encoding='iso-8859-1'
)
*_lm.save(strict=True, verbose=verbose)
| [
"jacob.bukoski@yale.edu"
] | jacob.bukoski@yale.edu |
080be39125123bae5847497f01f0fbc35593ef1f | 24a1639df824e325c3a1b750b98caa121b5729c0 | /utils.py | 79919f7ac4d33786e643ef89c2342cfcb5dd2967 | [] | no_license | Weigoss/sacn_dgl | e31deaede5ea510e3e6f67da3bf01528dd336db2 | 619838c72171913aa43b097ac3276c5b5ecaf607 | refs/heads/main | 2023-02-02T07:07:49.481210 | 2020-12-23T13:42:10 | 2020-12-23T13:42:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """
code from https://github.com/dmlc/dgl/blob/master/examples/pytorch/gat/utils.py
"""
import numpy as np
import torch
class EarlyStopping:
def __init__(self, patience=10):
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
def step(self, acc, model):
score = acc
if self.best_score is None:
self.best_score = score
self.save_checkpoint(model)
elif score < self.best_score:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(model)
self.counter = 0
return self.early_stop
def save_checkpoint(self, model):
'''Saves model when validation loss decrease.'''
torch.save(model.state_dict(), 'es_checkpoint.pt') | [
"maqy1995@163.com"
] | maqy1995@163.com |
c597f9e8df6464ea3de140f45ce2a54156280d96 | 29fdcdde4aae7502d8170cc44f4af635fbcce5b5 | /MySQLPython/update_data_5.py | dde1923a3fc9c3c16d98636ad7bf003095dbb906 | [] | no_license | shivam0071/exploringPython | bcbd0d03f8f473077d3777d1396ef4638e5c6cee | ace9646e59ba20068e383704430efe4946833090 | refs/heads/master | 2022-05-05T00:34:24.642204 | 2022-04-01T06:13:38 | 2022-04-01T06:13:38 | 81,352,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import pymysql
conn = pymysql.connect(host = 'localhost',user = 'root',password='hidden',db = 'pythontestdb')
mycursor = conn.cursor()
# sql = "UPDATE ninja SET attack='Shadow Clone Jutsu' where name = 'Naruto'"
# mycursor.execute(sql)
# conn.commit()
mycursor.execute("SELECT * from ninja where name='Naruto'")
print(mycursor.fetchall())
# FROM RASENGAN to Shadow CLone Jutsu | [
"shivam0071@users.noreply.github.com"
] | shivam0071@users.noreply.github.com |
7e9e19a672cc628c6de3bc8e5c6bc27e7e767af9 | e0ecbc6352c442370e90238ae7dd71f4fb78cfa9 | /visual_question_answer/model.py | 76ddc06e1dbeaa8d56d4f2b52510d2d2034316d4 | [] | no_license | xiaogang00/Deep-learning-on-Tensorflow | 22a13b4f9f4db5934b710fdd887d9e88d22ef72d | 7f9cd740bf330ae2fc7ae77636ded068e2aa405c | refs/heads/master | 2021-01-17T14:34:14.490989 | 2017-10-27T03:48:14 | 2017-10-27T03:48:14 | 84,092,564 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,652 | py | import math
import os
import tensorflow as tf
import numpy as np
from base_model import *
from utils.nn import *
from episodic_memory import *
class QuestionAnswerer(BaseModel):
def build(self):
""" Build the model. """
self.build_cnn()
self.build_rnn()
def build_cnn(self):
""" Build the CNN. """
print("Building the CNN part...")
if self.cnn_model=='vgg16':
self.build_vgg16()
elif self.cnn_model=='resnet50':
self.build_resnet50()
elif self.cnn_model=='resnet101':
self.build_resnet101()
else:
self.build_resnet152()
print("CNN part built.")
def build_vgg16(self):
""" Build the VGG16 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_1_feats = convolution(imgs, 3, 3, 64, 1, 1, 'conv1_1')
conv1_1_feats = batch_norm(conv1_1_feats, 'bn1_1', is_train, bn, 'relu')
conv1_2_feats = convolution(conv1_1_feats, 3, 3, 64, 1, 1, 'conv1_2')
conv1_2_feats = batch_norm(conv1_2_feats, 'bn1_2', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_2_feats, 2, 2, 2, 2, 'pool1')
conv2_1_feats = convolution(pool1_feats, 3, 3, 128, 1, 1, 'conv2_1')
conv2_1_feats = batch_norm(conv2_1_feats, 'bn2_1', is_train, bn, 'relu')
conv2_2_feats = convolution(conv2_1_feats, 3, 3, 128, 1, 1, 'conv2_2')
conv2_2_feats = batch_norm(conv2_2_feats, 'bn2_2', is_train, bn, 'relu')
pool2_feats = max_pool(conv2_2_feats, 2, 2, 2, 2, 'pool2')
conv3_1_feats = convolution(pool2_feats, 3, 3, 256, 1, 1, 'conv3_1')
conv3_1_feats = batch_norm(conv3_1_feats, 'bn3_1', is_train, bn, 'relu')
conv3_2_feats = convolution(conv3_1_feats, 3, 3, 256, 1, 1, 'conv3_2')
conv3_2_feats = batch_norm(conv3_2_feats, 'bn3_2', is_train, bn, 'relu')
conv3_3_feats = convolution(conv3_2_feats, 3, 3, 256, 1, 1, 'conv3_3')
conv3_3_feats = batch_norm(conv3_3_feats, 'bn3_3', is_train, bn, 'relu')
pool3_feats = max_pool(conv3_3_feats, 2, 2, 2, 2, 'pool3')
conv4_1_feats = convolution(pool3_feats, 3, 3, 512, 1, 1, 'conv4_1')
conv4_1_feats = batch_norm(conv4_1_feats, 'bn4_1', is_train, bn, 'relu')
conv4_2_feats = convolution(conv4_1_feats, 3, 3, 512, 1, 1, 'conv4_2')
conv4_2_feats = batch_norm(conv4_2_feats, 'bn4_2', is_train, bn, 'relu')
conv4_3_feats = convolution(conv4_2_feats, 3, 3, 512, 1, 1, 'conv4_3')
conv4_3_feats = batch_norm(conv4_3_feats, 'bn4_3', is_train, bn, 'relu')
pool4_feats = max_pool(conv4_3_feats, 2, 2, 2, 2, 'pool4')
conv5_1_feats = convolution(pool4_feats, 3, 3, 512, 1, 1, 'conv5_1')
conv5_1_feats = batch_norm(conv5_1_feats, 'bn5_1', is_train, bn, 'relu')
conv5_2_feats = convolution(conv5_1_feats, 3, 3, 512, 1, 1, 'conv5_2')
conv5_2_feats = batch_norm(conv5_2_feats, 'bn5_2', is_train, bn, 'relu')
conv5_3_feats = convolution(conv5_2_feats, 3, 3, 512, 1, 1, 'conv5_3')
conv5_3_feats = batch_norm(conv5_3_feats, 'bn5_3', is_train, bn, 'relu')
self.permutation = self.get_permutation(14, 14)
conv5_3_feats.set_shape([self.batch_size, 14, 14, 512])
conv5_3_feats_flat = self.flatten_feats(conv5_3_feats, 512)
self.conv_feats = conv5_3_feats_flat
self.conv_feat_shape = [196, 512]
self.imgs = imgs
self.is_train = is_train
def basic_block(self, input_feats, name1, name2, is_train, bn, c, s=2):
""" A basic block of ResNets. """
branch1_feats = convolution_no_bias(input_feats, 1, 1, 4*c, s, s, name1+'_branch1')
branch1_feats = batch_norm(branch1_feats, name2+'_branch1', is_train, bn, None)
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, s, s, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = branch1_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def basic_block2(self, input_feats, name1, name2, is_train, bn, c):
""" Another basic block of ResNets. """
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, 1, 1, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = input_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def build_resnet50(self):
""" Build the ResNet50 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, bn, 128)
res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, bn, 128)
res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, bn, 128)
res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, bn, 256)
res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, bn, 256)
res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, bn, 256)
res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, bn, 256)
res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, bn, 256)
res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, bn, 256)
res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.imgs = imgs
self.is_train = is_train
def build_resnet101(self):
""" Build the ResNet101 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
temp = res3a_feats
for i in range(1, 4):
temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, bn, 128)
res3b3_feats = temp
res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, bn, 256)
temp = res4a_feats
for i in range(1, 23):
temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, bn, 256)
res4b22_feats = temp
res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.imgs = imgs
self.is_train = is_train
def build_resnet152(self):
""" Build the ResNet152 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
temp = res3a_feats
for i in range(1, 8):
temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, bn, 128)
res3b7_feats = temp
res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, bn, 256)
temp = res4a_feats
for i in range(1, 36):
temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, bn, 256)
res4b35_feats = temp
res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.img_files = img_files
self.is_train = is_train
def get_permutation(self, height, width):
""" Get the permutation corresponding to a snake-like walk as decribed by the paper. Used to flatten the convolutional feats. """
permutation = np.zeros(height*width, np.int32)
for i in range(height):
for j in range(width):
permutation[i*width+j] = i*width+j if i%2==0 else (i+1)*width-j-1
return permutation
def flatten_feats(self, feats, channels):
""" Flatten the feats. """
temp1 = tf.reshape(feats, [self.batch_size, -1, channels])
temp1 = tf.transpose(temp1, [1, 0, 2])
temp2 = tf.gather(temp1, self.permutation)
temp2 = tf.transpose(temp2, [1, 0, 2])
return temp2
def build_rnn(self):
""" Build the RNN. """
print("Building the RNN part...")
params = self.params
bn = params.batch_norm
is_train = self.is_train
batch_size = self.batch_size
dim_hidden = params.dim_hidden
dim_embed = params.dim_embed
max_ques_len = params.max_ques_len
num_facts = self.conv_feat_shape[0]
dim_fact = self.conv_feat_shape[1]
num_words = self.word_table.num_words
self.word_weight = np.exp(-np.array(self.word_table.word_freq)*self.class_balancing_factor)
if not self.train_cnn:
facts = tf.placeholder(tf.float32, [batch_size, num_facts, dim_fact])
else:
facts = self.conv_feats
questions = tf.placeholder(tf.int32, [batch_size, max_ques_len])
question_lens = tf.placeholder(tf.int32, [batch_size])
answers = tf.placeholder(tf.int32, [batch_size])
answer_weights = tf.placeholder(tf.float32, [batch_size])
gru = tf.nn.rnn_cell.GRUCell(dim_hidden)
# Initialize the word embedding
idx2vec = np.array([self.word_table.word2vec[self.word_table.idx2word[i]] for i in range(num_words)])
if params.fix_embed_weight:
emb_w = tf.convert_to_tensor(idx2vec, tf.float32)
else:
emb_w = weight('emb_w', [num_words, dim_embed], init_val=idx2vec, group_id=1)
# Encode the questions
with tf.variable_scope('Question'):
word_list = tf.unpack(questions, axis=1)
ques_embed = [tf.nn.embedding_lookup(emb_w, word) for word in word_list]
ques_embed = tf.transpose(tf.pack(ques_embed), [1, 0, 2])
all_states, final_state = tf.nn.dynamic_rnn(gru, ques_embed, dtype=tf.float32)
question_enc = []
for k in range(batch_size):
current_ques_enc = tf.slice(all_states, [k, question_lens[k]-1, 0], [1, 1, dim_hidden])
question_enc.append(tf.squeeze(current_ques_enc))
question_enc = tf.pack(question_enc)
#ques_enc = final_state
# Encode the facts
with tf.name_scope('InputFusion'):
with tf.variable_scope('Forward'):
forward_states, _ = tf.nn.dynamic_rnn(gru, facts, dtype=tf.float32)
with tf.variable_scope('Backward'):
reversed_facts = tf.reverse(facts, [False, True, False])
backward_states, _ = tf.nn.dynamic_rnn(gru, reversed_facts, dtype=tf.float32)
backward_states = tf.reverse(backward_states, [False, True, False])
facts_enc = forward_states + backward_states
# Episodic Memory Update
with tf.variable_scope('EpisodicMemory'):
episode = EpisodicMemory(dim_hidden, num_facts, question_enc, facts_enc, params.attention, is_train, bn)
memory = tf.identity(question_enc)
# Tied memory weights
if params.tie_memory_weight:
with tf.variable_scope('Layer') as scope:
for t in range(params.memory_step):
if params.memory_update == 'gru':
memory = gru(episode.new_fact(memory), memory)[0]
else:
fact = episode.new_fact(memory)
expanded_memory = tf.concat(1, [memory, fact, question_enc])
memory = fully_connected(expanded_memory, dim_hidden, 'EM_fc', group_id=1)
memory = batch_norm(memory, 'EM_bn', is_train, bn, 'relu')
scope.reuse_variables()
# Untied memory weights
else:
for t in range(params.memory_step):
with tf.variable_scope('Layer%d' %t) as scope:
if params.memory_update == 'gru':
memory = gru(episode.new_fact(memory), memory)[0]
else:
fact = episode.new_fact(memory)
expanded_memory = tf.concat(1, [memory, fact, question_enc])
memory = fully_connected(expanded_memory, dim_hidden, 'EM_fc', group_id=1)
memory = batch_norm(memory, 'EM_bn', is_train, bn, 'relu')
memory = dropout(memory, 0.5, is_train)
# Compute the result
with tf.variable_scope('Result'):
expanded_memory = tf.concat(1, [memory, question_enc])
logits = fully_connected(expanded_memory, num_words, 'dec', group_id=1)
results = tf.argmax(logits, 1)
all_probs = tf.nn.softmax(logits)
probs = tf.reduce_max(all_probs, 1)
# Compute the loss
with tf.variable_scope('Loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, answers)
loss0 = cross_entropy * answer_weights
loss0 = tf.reduce_sum(loss0) / tf.reduce_sum(answer_weights)
if self.train_cnn:
loss1 = params.weight_decay * (tf.add_n(tf.get_collection('l2_0')) + tf.add_n(tf.get_collection('l2_1')))
else:
loss1 = params.weight_decay * tf.add_n(tf.get_collection('l2_1'))
loss = loss0 + loss1
# Build the solver
if params.solver == 'adam':
solver = tf.train.AdamOptimizer(params.learning_rate)
elif params.solver == 'momentum':
solver = tf.train.MomentumOptimizer(params.learning_rate, params.momentum)
elif params.solver == 'rmsprop':
solver = tf.train.RMSPropOptimizer(params.learning_rate, params.decay, params.momentum)
else:
solver = tf.train.GradientDescentOptimizer(params.learning_rate)
tvars = tf.trainable_variables()
gs, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 3.0)
opt_op = solver.apply_gradients(zip(gs, tvars), global_step=self.global_step)
self.facts = facts
self.questions = questions
self.question_lens = question_lens
self.answers = answers
self.answer_weights = answer_weights
self.loss = loss
self.loss0 = loss0
self.loss1 = loss1
self.opt_op = opt_op
self.results = results
self.probs = probs
print("RNN part built.")
def get_feed_dict(self, batch, is_train, feats=None):
""" Get the feed dictionary for the current batch. """
if is_train:
# training phase
img_files, questions, question_lens, answers = batch
imgs = self.img_loader.load_imgs(img_files)
answer_weights = self.word_weight[answers]
if self.train_cnn:
return {self.imgs: imgs, self.questions: questions, self.question_lens: question_lens, self.answers: answers, self.answer_weights: answer_weights, self.is_train: is_train}
else:
return {self.facts: feats, self.questions: questions, self.question_lens: question_lens, self.answers: answers, self.answer_weights: answer_weights, self.is_train: is_train}
else:
# testing or validation phase
img_files, questions, question_lens = batch
imgs = self.img_loader.load_imgs(img_files)
if self.train_cnn:
return {self.imgs: imgs, self.questions: questions, self.question_lens: question_lens, self.is_train: is_train}
else:
return {self.facts: feats, self.questions: questions, self.question_lens: question_lens, self.is_train: is_train}
| [
"872310734@qq.com"
] | 872310734@qq.com |
29e16f0faaa4866bc0815c2235ece255f754032e | d5eb2fe5d49b581562ae2bc660d08ca80a03d331 | /PythonSandbox/src/misc/num_digits_in_integer.py | 6757434647672cd9e95d213417d05eed2cbab5ac | [] | no_license | mcxu/code-sandbox | fd5aa2e593057901d281a0e74db8957777b06cf3 | a785231582bda8578f79982e2dcddd2f2ab559b4 | refs/heads/master | 2023-07-10T02:07:24.180947 | 2023-07-08T03:31:48 | 2023-07-08T03:31:48 | 130,493,607 | 4 | 2 | null | 2023-01-15T22:53:29 | 2018-04-21T16:49:40 | Python | UTF-8 | Python | false | false | 130 | py | '''
Given an integer n, return a map showing the counts of each single digit in n.
Condition: Do not convert n into string.
'''
| [
"michaelxu79@gmail.com"
] | michaelxu79@gmail.com |
dcf96b29b3add29777afb682281c2208544869c8 | dd4f833a6f4ef2cdbb7c2c56e2f98edf2ea2527b | /81_search_in_rotated_sorted_array_2/sol.py | 318a7056f5067545dd317108ecd4aecaaa4f5e02 | [] | no_license | lianke123321/leetcode_sol | 7db8f041103bd6f2710582ceb8a6778b36cd534e | ea492ec864b50547214ecbbb2cdeeac21e70229b | refs/heads/master | 2020-04-06T15:50:12.988755 | 2016-10-21T07:47:29 | 2016-10-21T07:47:29 | 41,214,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
if not nums:
return False
low = 0
high = len(nums) - 1
while low <= high:
mid = low + (high - low) / 2
if target == nums[mid]:
return True
# tricky part
while low < mid and nums[low] == nums[mid]:
low += 1
# the first half is ordered
if nums[low] <= nums[mid]:
# target is in the first half
if nums[low] <= target <= nums[mid]:
high = mid - 1
else:
low = mid + 1
# the second half is ordered
else:
# target is in the second half
if nums[mid] <= target <= nums[high]:
low = mid + 1
else:
high = mid - 1
return False
def search_self(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = (lo + hi) >> 1
if nums[mid] == target:
return True
elif lo == hi:
return False
if nums[mid] > nums[hi]:
if nums[lo] <= target < nums[mid]:
hi = mid - 1
else:
lo = mid + 1
elif nums[mid] < nums[hi]:
if nums[mid] < target <= nums[hi]:
lo = mid + 1
else:
hi = mid
else:
hi -= 1
return False
| [
"ankeli@cs.stonybrook.edu"
] | ankeli@cs.stonybrook.edu |
5ea5fe910e1ef86b506005a39e879e50f77d83f4 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/473_matchsticks_to_square.py | 99245aad8635cdcb9f58acde68ea9d0399c61f3b | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
l = sum(matchsticks)
if l % 4 != 0:
return False
size = l // 4
matchsticks.sort()
return self._dfs(matchsticks, len(matchsticks)-1, [size] * 4)
def _dfs(self, ms, pos, sizes):
if pos == -1:
return sum(sizes) == 0
for i in range(4):
if sizes[i] < ms[pos]:
continue
sizes[i] -= ms[pos]
if self._dfs(ms, pos - 1, sizes):
return True
sizes[i] += ms[pos]
return False
| [
"ypeng1@andrew.cmu.edu"
] | ypeng1@andrew.cmu.edu |
0d289d32188e4293e36a4834feff7c0180528811 | 3900320dcf41439677a978e31540af197fcb9eac | /notes/Data_Structures/Class_notes/stacks-and-queues/stacks_and_queues.py | 4466fecad2420df535ccfa79bb08278f2bf3d514 | [] | no_license | youngalx2/computer_science | 80b9e843a098ceaa5c109c1283d5785093f7015a | 84a591d8a964358f0417c506092c90339020c861 | refs/heads/main | 2023-08-14T02:07:06.616839 | 2021-09-29T06:48:26 | 2021-09-29T06:48:26 | 406,182,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | from collections import deque
# make a new queue
our_queue = deque()
# we are going to limit pop() append()
our_stack = []
# start with a stack [1, 2, 3, 4, 5]
our_stack.append(1)
our_stack.append(2)
our_stack.append(3)
our_stack.append(4)
our_stack.append(5)
# how do we get that first element over to my queue
our_queue.append(our_stack.pop())
our_queue.append(our_stack.pop())
our_queue.append(our_stack.pop())
our_queue.append(our_stack.pop())
our_queue.append(our_stack.pop())
# we want a stack [5, 4, 3, 2, 1] so we can
our_stack.append(our_queue.popleft())
our_stack.append(our_queue.popleft())
our_stack.append(our_queue.popleft())
our_stack.append(our_queue.popleft())
our_stack.append(our_queue.popleft())
print(our_stack.pop())
print(our_stack.pop())
print(our_stack.pop())
print(our_stack.pop())
print(our_stack.pop())
# this helped us reverse our order of our stack
| [
"89937900+youngalx2@users.noreply.github.com"
] | 89937900+youngalx2@users.noreply.github.com |
ab1d0cabf2fcd61e81b9bbbfbbb6d74b4ae138b9 | 739db760e9082f2aefa45b104fde33f8ee97c7c7 | /setup.py | 3923fc7b6f7a5aacd5cf471d20e958a3a41e927d | [] | no_license | hetaov/study | 69e21b2b6b6dc97b9197291f0b04d8277bc12234 | 8a9edafd3c657fa3805d05fe6800db7771277004 | refs/heads/master | 2021-01-17T18:02:28.637007 | 2017-12-19T08:52:23 | 2017-12-19T08:52:23 | 70,954,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import distutils.core
try:
import setuptools
except ImportError:
pass
packages=[]
distutils.core.setup(
name='study',
version = '0.4.2',
packages=['test_module'],
author='Innovaser',
author_email='rancavil@innovaser.cl',
install_requires=packages
)
| [
"ross.he@semioe.com"
] | ross.he@semioe.com |
71fb701ac72e5e8ae42a06887313228a0134a9d3 | 9de9ff94ad37ea2e2a4e7d96c12acd3101a471e1 | /sec22-threads/threadCommunicationUsingWaitAndNotify.py | 57d33c2ca0a80ae10af370f6844fad4630997988 | [] | no_license | paularodriguez/python-core-and-advance-course | 0c2d0ef14566a9b30e8f92455bdb9029d7e14342 | 7e6cf274bae80f772151b60c2f4e67e9872c442a | refs/heads/master | 2020-08-29T17:25:46.338347 | 2019-11-20T21:10:39 | 2019-11-20T21:10:39 | 218,110,675 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from threading import *
from time import sleep
class Producer:
def __init__(self):
self.products = []
self.c = Condition()
def produce(self):
self.c.acquire()
for i in range(1,5):
self.products.append("Product " + str(i))
sleep(1)
print("Item Added")
# Notify and release the lock
self.c.notify()
self.c.release()
class Consumer:
def __init__(self, prod):
self.prod = prod
def consume(self):
self.prod.c.acquire()
self.prod.c.wait(timeout=0)
self.prod.c.release()
print("Orders Shipped ", self.prod.products)
# Class instances
p = Producer()
c = Consumer(p)
t1 = Thread(target=p.produce)
t2 = Thread(target=c.consume)
t1.start()
t2.start()
| [
"paula.rp93@gmail.com"
] | paula.rp93@gmail.com |
883d4fe08ad71cc98c2aa4886028014fb52da007 | dcbcfcf9e8eb5b1666fd4b10c9183c9a64e47c0b | /GMMn.py | ad20169a373541022b33ccbc47b5b3b9fed6457a | [] | no_license | 2ahmedabdullah/GMM | 364bab9a45160fbed36693de3aa59bb7465b9a0d | 439a2ba3e5238bc5beec5da401adc28bee737b49 | refs/heads/master | 2023-08-01T07:24:26.314945 | 2021-09-17T17:19:19 | 2021-09-17T17:19:19 | 93,623,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 21 14:34:41 2018
@author: 1449486
"""
import numpy as np
import pandas as pd
confg = pd.read_csv("//01hw755449/Shared/ABDUL/GMM_complete/config1.txt")
confg=confg.transpose()
confg.columns = confg.iloc[0]
confg=confg.iloc[1:]
#confg=confg.transpose()
from sklearn import mixture
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import glob
import pandas as pd
# get data
val = (confg['datapath'])
val=(", ".join(val))
path =val
#path=r'\\01hw755449\Shared\ABDUL\GMM_complete\PS'
filenames = glob.glob(path + "/*.csv")
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename))
#selecting broker ID from data and aggregrating
dat=[]
for j in range(0,len(dfs)):
df= pd.DataFrame(columns=dfs[0].columns, index=dfs[0].index)
df=dfs[j]
dd=df.loc[df['brokerID'] == int(confg.brokerID)]#enter broker id here
dd=np.array(dd)
dat.append(dd)
data1= np.vstack(dat)
c= list(dfs[0])
data=pd.DataFrame(data1,columns=c)
dataa=data.loc[:, data.columns != 'brokerID']
#splitting training and test set
train=dataa.loc[data['showedUsualBehavior'] == 1]
test=dataa.loc[data['showedUsualBehavior'] == 0]
#removing the labels
train=train.loc[:, train.columns != 'showedUsualBehavior']
test=test.loc[:, test.columns != 'showedUsualBehavior']
#normalization of the dataset
train1=np.array(train)
train_norm=(train1-train1.mean())/train1.std()
test1=np.array(test)
test_norm=(test1-train1.mean())/train1.std()
#selection of no. of attributes and training size for modelling
no_attributes=(confg.attribute)
l_dataa=len(dataa.index)
train_size=int(float(confg.train_split)*l_dataa)
trainrow=train_norm[0:train_size,0:int(no_attributes)]
test1=test_norm[:,0:int(no_attributes)]
test2=train_norm[train_size:l_dataa,0:int(no_attributes)]
#concatenating anomalies with data without any anomaly
df1 = pd.DataFrame(test1)
df2 = pd.DataFrame(test2)
testt=pd.concat([df1,df2])
testt=np.array(testt)
# Fit the GMMs
from sklearn.mixture import GMM
gmm =mixture.GMM(n_components=int(confg.k))
gmm.fit(trainrow)
# Distribution parameters
means=(gmm.means_)
covar=(gmm.covars_)
weights=(gmm.weights_)
#probabliltiy pdf values
prob=[0]*int(confg.k)
pred=[0]*len(testt)
for j in range(0,len(testt)):
testrow=testt[j,:]
for i in range(0,int(confg.k)):
prob[i]=multivariate_normal.pdf(testrow,mean=means[i],cov=covar[i])
prob=np.array(prob)
sumprob=sum(prob)
if(sumprob<float(confg.mu)):
pred[j]='Anomaly'
else:
pred[j]='ok'
pred=pd.DataFrame(pred)
expected=[0]*len(testt)
for j in range(0,len(testt)):
if (j<len(test1)):
expected[j]='Anomaly'
else:
expected[j]='ok'
expected=pd.DataFrame(expected)
result=pd.concat([expected,pred],axis=1)
result=pd.DataFrame(result.values, columns = ["Expected", "Predicted"])
#building a confusion matrix
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(expected,pred)
cm
#confusion matrix heat map
import seaborn as sn
sn.heatmap(cm, annot=True)
# Precision: tp/(tp+fp):
recall=cm[0,0]/(cm[0,0]+cm[0,1])
# Recall: tp/(tp + fn):
precision=cm[1,1]/(cm[1,0]+cm[1,1])
# F-Score: 2 * precision * recall /(precision + recall):
fscore = 2 * precision * recall / (precision + recall)
fscore
'''
names=list(data)
plt.hist(data[names[1]], color = 'blue', edgecolor = 'black',bins = int(20))
answer=[]
for i in range(0,len(testt)):
if(result[i]=='Anomaly'):
answer.append(i+1)
answer=np.array(answer)
#AIC and BIC plots
n_estimators = np.arange(1, 90)
gmms = [GMM(n, n_iter=1000).fit(train1) for n in n_estimators]
bics = [gmm.bic(train1) for gmm in gmms]
aics = [gmm.aic(train1) for gmm in gmms]
plt.plot(n_estimators, bics, label='BIC')
plt.plot(n_estimators, aics, label='AIC')
plt.legend();
'''
| [
"noreply@github.com"
] | 2ahmedabdullah.noreply@github.com |
a5f1163c05b92d47232715c5533421c1260f3dbb | 885a3624e8211bd34c04edef632fd4a75e20c23c | /stats/migrations/0004_country.py | 6b89583b4caac17eafc6a1cdefd8ea0edd967a37 | [] | no_license | dlee3458/coronavirus-pandemic | b41bc4547338b0d4710bba39cf3c9551f74ae41d | 566fa96dfee37ae543ed70a63db460c5c57020c9 | refs/heads/main | 2023-06-03T07:12:31.076251 | 2021-06-23T08:16:47 | 2021-06-23T08:16:47 | 341,119,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Generated by Django 3.0.2 on 2020-05-27 08:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0003_record'),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('death_count', models.IntegerField()),
('confirmed_count', models.IntegerField()),
('recovered_count', models.IntegerField()),
],
),
]
| [
"dlee@daniels-mbp.home"
] | dlee@daniels-mbp.home |
11352cd17ac2b55df4e0c0aa5c2b18f16ea0440d | 3646bd3a3737f39e5b6301d6ad17f2d4268b005c | /dc1.py | 9a43bd0e1279d86752547a08de95e3e72f1e6126 | [] | no_license | supria68/dailychallenge | 8aa1a072032e4e2b718636a6d53d843837f1146d | be5ce1304f4795cc0aea30ba57a3a1f7b214d726 | refs/heads/main | 2023-03-22T05:21:51.390876 | 2021-03-22T20:37:07 | 2021-03-22T20:37:07 | 335,982,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | """
Your goal is to create a function that removes the first and last letters of a string. Strings with two characters or less are considered invalid. You can choose to have your function return null or simply ignore.
"""
def solution(somestr):
if len(somestr) > 2:
return somestr[1:-1]
return None
def goingwild(somestr):
if len(somestr) > 2:
temp = list(somestr)
temp.pop()
temp.reverse()
temp.pop()
temp.reverse()
return ''.join(temp)
return None
if __name__ == "__main__":
# my testcases
print(solution('hello whatsup?')) # ello whatsup
print(solution('d4')) # None
print(goingwild('hello whatsup?')) # ello whatsup
| [
"noreply@github.com"
] | supria68.noreply@github.com |
2504e5242e6b6ab80063100a440a1bfb8f0179f0 | f17d2a88c566b884272f5468f57cbe7201af7f22 | /app/core/migrations/0002_tag.py | 34b9ec20533042a4b716c555023f098daf484ec2 | [
"MIT"
] | permissive | hosam7asko77/recipe-app-api | 44710cd9c82ce91fec0f20904689aa591d430f97 | 695fcfaba18e285aa4a8c0b58ec66ee9720edf08 | refs/heads/master | 2020-09-01T14:04:40.466348 | 2019-11-11T14:58:57 | 2019-11-11T14:58:57 | 218,975,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Generated by Django 2.2.6 on 2019-11-11 12:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"hosam7asko1993@gmail.com"
] | hosam7asko1993@gmail.com |
2bc69e113e4e87e4a6c1835a4f492b11cf98a7ae | 6dc9183cb416fc28997cdfb508befd6ef681832c | /paper16/qc2c.py | 36b0fe5a79e62071bcce6c42639d1fa564f1e9c2 | [] | no_license | wiwa/qc | b7a622f6735ba3252990562a4a8522860d199b13 | a3760d71b96369e714c69bf48a07c5776d12c496 | refs/heads/master | 2021-03-27T11:43:57.343196 | 2016-12-19T04:22:10 | 2016-12-19T04:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | from numpy import *
from scipy import integrate
from scipy.constants import hbar, pi
import pylab as p
# electron relaxation time between singlet triplet state
# s. pasini paper referenced t in units of "tau_p"??
tau = 1e-11
tau=1
# Params in THz
w = 1e11
D = 1e12 # Delta from long ago
D = 1e-40
i_ = 1.0j
def iw(t):
return i_*w*t
def X_factory(theta_f, a, b, antisym):
def X_sym(t):
_1 = theta_f(t) / 2
_2 = (a - _1) * cos((2 * pi / tau) * t)
_3 = -a * cos((4 * pi / tau) * t)
return _1 + _2 + _3
return X_sym
def dChis_dt_factory(X):
# wraps the coupled diff eqs.
def dChis_dt(t, Chi):
a = Chi[0] # Chi_plus dot
b = Chi[1] # Chi_minus dot
c = -1.0j/hbar # 1/j = -j
a_dot = D*a + (hbar / tau)*b*X(t)
b_dot = (hbar / tau)*a*X(t) - D*b
return c*array([ a_dot, b_dot ])
return dChis_dt
def theta_f(t):
return w * t
a1_sym = -2.159224 * (1/tau)
# ZVODE-- At T (=R1) and step size H (=R2), the
# corrector convergence failed repeatedly
# or with abs(H) = HMIN
# a1_sym = 0
# a2_sym = -5.015588 * (1/tau) # for pi/2
# (First-order) Symmetric
X1_sym = X_factory(theta_f, a1_sym, None, False)
dChiSym_dt = dChis_dt_factory(X1_sym)
# Setup orthogonal spinors
X_pi = X1_sym(pi)
delta = sqrt(D**2 + ((hbar / tau)**2)*(X_pi**2))
N_chi = 1/sqrt( (hbar*X_pi)**2 + (delta - D)**2 )
Chi_plus_0 = N_chi * array([ hbar * X_pi, (delta - D)*tau ])
Chi_minus_0 = N_chi * array([ (delta - D)*tau, -hbar * X_pi ])
Chi_0 = Chi_plus_0 + Chi_minus_0
# Integration "params"
steps = 100.
t0 = 0
t1 = tau
dt = t1/steps # 100 steps
# Init integrator
sol = integrate.ode(dChiSym_dt).set_integrator('zvode')
sol.set_initial_value(Chi_0, t0)
# Intermediate variables
Chi_ = []
# Evolve X, stashing info in X_.
# As we evolve X through t, obtain a_e, b_e, and psi info.
while sol.successful() and sol.t < t1:
t_ = sol.t + dt
res = sol.integrate(t_)
Chi_.append(res)
success = sol.successful()
print(success)
if success:
Chi = array(Chi_)
ts = linspace(t0, t1, len(Chi)) # time as x coordinate
a, b = Chi.T # Transverse=
## Plotting
f1 = p.figure()
p.plot(ts, a, 'r-', label='Chi[0]')
p.plot(ts, b, 'b-', label='Chi[1]')
p.legend(loc='best')
p.xlabel('time ')
p.ylabel('value')
p.title('Chi_pi symmetric')
p.show()
| [
"fallenpwr@gmail.com"
] | fallenpwr@gmail.com |
9984771e8d626ada77fcf03acc254f38d7ad94e0 | c6743ba5c320cf24f7beeea0ab3a0a3b62f35b18 | /train_gn.py | d3c8e08457bbf79da79dacb7eba8b2257719971b | [] | no_license | yi-xiao-wang/gn.pytorch | 3df7348c82aab89552e54aa387e3673811dd373c | c55d75fb5d8c5c92e3d98414c3db863ad72ba965 | refs/heads/master | 2021-10-08T17:03:48.906420 | 2018-12-15T06:06:08 | 2018-12-15T06:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,339 | py | import torch.utils.data as data
from torch.utils.data import DataLoader
import numpy as np
import networkx as nx
import torch.optim as optim
import matplotlib.pyplot as plt
from gn_models import init_graph_features, FFGN
import torch
from tensorboardX import SummaryWriter
from datetime import datetime
import os
from dataset import SwimmerDataset
from utils import *
from tqdm import tqdm
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print(opt)
dset = SwimmerDataset('swimmer.npy')
dset_eval = SwimmerDataset('swimmer_test.npy')
use_cuda = True
dl = DataLoader(dset, batch_size=200, num_workers=0, drop_last=True)
dl_eval = DataLoader(dset_eval, batch_size=200, num_workers=0, drop_last=True)
G1 = nx.path_graph(6).to_directed()
G_target = nx.path_graph(6).to_directed()
#nx.draw(G1)
#plt.show()
node_feat_size = 6
edge_feat_size = 3
graph_feat_size = 10
gn = FFGN(graph_feat_size, node_feat_size, edge_feat_size).cuda()
if opt.model != '':
gn.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(gn.parameters(), lr = 1e-4)
schedular = optim.lr_scheduler.StepLR(optimizer, 5e4, gamma = 0.975)
savedir = os.path.join('./logs','runs',
datetime.now().strftime('%B%d_%H:%M:%S'))
writer = SummaryWriter(savedir)
step = 0
normalizers = torch.load('normalize.pth')
in_normalizer = normalizers['in_normalizer']
out_normalizer = normalizers['out_normalizer']
std = in_normalizer.get_std()
for epoch in range(300):
for i,data in tqdm(enumerate(dl), total = len(dset) / 200 + 1):
optimizer.zero_grad()
action, delta_state, last_state = data
action, delta_state, last_state = action.float(), delta_state.float(), last_state.float()
if use_cuda:
action, delta_state, last_state = action.cuda(), delta_state.cuda(), last_state.cuda()
init_graph_features(G1, graph_feat_size, node_feat_size, edge_feat_size, cuda=True, bs = 200)
load_graph_features(G1, action, last_state, delta_state,bs=200, noise = 0.02, std = std)
G_out = gn(in_normalizer.normalize(G1))
init_graph_features(G_target, graph_feat_size, node_feat_size, edge_feat_size, cuda=True, bs=200)
load_graph_features(G_target, action, delta_state, None, bs=200, norm = False, noise = 0)
G_target_normalized = out_normalizer.normalize(G_target)
loss = build_graph_loss2(G_out, G_target_normalized)
loss.backward()
if step % 10 == 0:
writer.add_scalar('loss', loss.data.item(), step)
step += 1
for param in gn.parameters():
if not param.grad is None:
param.grad.clamp_(-3,3)
optimizer.step()
schedular.step()
if step % 10000 == 0:
torch.save(
gn.state_dict(),
savedir +
'/model_{}.pth'.format(step))
iter = 0
sum_loss = 0
#evaluation loop, done every epoch
for i,data in tqdm(enumerate(dl_eval)):
action, delta_state, last_state = data
action, delta_state, last_state = action.float(), delta_state.float(), last_state.float()
if use_cuda:
action, delta_state, last_state = action.cuda(), delta_state.cuda(), last_state.cuda()
init_graph_features(G1, graph_feat_size, node_feat_size, edge_feat_size, cuda=True, bs = 200)
load_graph_features(G1, action, last_state, delta_state, bs=200, noise = 0)
G_out = gn(in_normalizer.normalize(G1))
init_graph_features(G_target, graph_feat_size, node_feat_size, edge_feat_size, cuda=True, bs=200)
load_graph_features(G_target, action, delta_state, None, bs=200, norm = False, noise = 0)
G_target_normalized = out_normalizer.normalize(G_target)
loss = build_graph_loss2(G_out, G_target_normalized)
sum_loss += loss.data.item()
iter += 1
writer.add_scalar('loss_eval', sum_loss / float(iter), step) | [
"xf1280@gmail.com"
] | xf1280@gmail.com |
10ad290fce78ce96c8d16dd4306668b6a6b91f45 | 0aa5c371d51b8c90108a35ab9094ec1122422107 | /labs/lab08.py | 6339f984e3e52232dfc87f83c2629572bf333d85 | [] | no_license | caseyzlei/cs61a | b0cfc7fb8f2b725e2f2478313ce336fefd3f386c | 88131410dabf936abb02e6224e6ede650f9d2b2b | refs/heads/main | 2023-02-05T09:28:20.037392 | 2020-12-21T23:24:16 | 2020-12-21T23:24:16 | 323,467,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | def convert_link(link):
"""Takes a linked list and returns a Python list with the same elements.
>>> link = Link(1, Link(2, Link(3, Link(4))))
>>> convert_link(link)
[1, 2, 3, 4]
>>> convert_link(Link.empty)
[]
"""
"*** YOUR CODE HERE ***"
if link == Link.empty:
return []
else:
return [link.first] + convert_link(link.rest)
def every_other(s):
"""Mutates a linked list so that all the odd-indiced elements are removed
(using 0-based indexing).
>>> s = Link(1, Link(2, Link(3, Link(4))))
>>> every_other(s)
>>> s
Link(1, Link(3))
>>> odd_length = Link(5, Link(3, Link(1)))
>>> every_other(odd_length)
>>> odd_length
Link(5, Link(1))
>>> singleton = Link(4)
>>> every_other(singleton)
>>> singleton
Link(4)
"""
"*** YOUR CODE HERE ***"
if s is Link.empty or s.rest is Link.empty:
return None
else:
s.rest = s.rest.rest
every_other(s.rest)
def cumulative_mul(t):
"""Mutates t so that each node's label becomes the product of all labels in
the corresponding subtree rooted at t.
>>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)])
>>> cumulative_mul(t)
>>> t
Tree(105, [Tree(15, [Tree(5)]), Tree(7)])
"""
"*** YOUR CODE HERE ***"
value = t.label
if t.is_leaf:
pass
for b in t.branches:
cumulative_mul(b)
value = value * b.label
t.label = value
def has_cycle(link):
"""Return whether link contains a cycle.
>>> s = Link(1, Link(2, Link(3)))
>>> s.rest.rest.rest = s
>>> has_cycle(s)
True
>>> t = Link(1, Link(2, Link(3)))
>>> has_cycle(t)
False
>>> u = Link(2, Link(2, Link(2)))
>>> has_cycle(u)
False
"""
"*** YOUR CODE HERE ***"
keep = []
while link is not Link.empty:
for link in keep:
return True
keep = keep.append(link)
return False
def has_cycle_constant(link):
"""Return whether link contains a cycle.
>>> s = Link(1, Link(2, Link(3)))
>>> s.rest.rest.rest = s
>>> has_cycle_constant(s)
True
>>> t = Link(1, Link(2, Link(3)))
>>> has_cycle_constant(t)
False
"""
"*** YOUR CODE HERE ***"
def reverse_other(t):
"""Mutates the tree such that nodes on every other (odd-depth) level
have the labels of their branches all reversed.
>>> t = Tree(1, [Tree(2), Tree(3), Tree(4)])
>>> reverse_other(t)
>>> t
Tree(1, [Tree(4), Tree(3), Tree(2)])
>>> t = Tree(1, [Tree(2, [Tree(3, [Tree(4), Tree(5)]), Tree(6, [Tree(7)])]), Tree(8)])
>>> reverse_other(t)
>>> t
Tree(1, [Tree(8, [Tree(3, [Tree(5), Tree(4)]), Tree(6, [Tree(7)])]), Tree(2)])
"""
"*** YOUR CODE HERE ***"
class Link:
"""A linked list.
>>> s = Link(1)
>>> s.first
1
>>> s.rest is Link.empty
True
>>> s = Link(2, Link(3, Link(4)))
>>> s.first = 5
>>> s.rest.first = 6
>>> s.rest.rest = Link.empty
>>> s # Displays the contents of repr(s)
Link(5, Link(6))
>>> s.rest = Link(7, Link(Link(8, Link(9))))
>>> s
Link(5, Link(7, Link(Link(8, Link(9)))))
>>> print(s) # Prints str(s)
<5 7 <8 9>>
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __repr__(self):
if self.rest is not Link.empty:
rest_repr = ', ' + repr(self.rest)
else:
rest_repr = ''
return 'Link(' + repr(self.first) + rest_repr + ')'
def __str__(self):
string = '<'
while self.rest is not Link.empty:
string += str(self.first) + ' '
self = self.rest
return string + str(self.first) + '>'
class Tree:
"""
>>> t = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t.label
3
>>> t.branches[0].label
2
>>> t.branches[1].is_leaf()
True
"""
def __init__(self, label, branches=[]):
for b in branches:
assert isinstance(b, Tree)
self.label = label
self.branches = list(branches)
def is_leaf(self):
return not self.branches
def map(self, fn):
"""
Apply a function `fn` to each node in the tree and mutate the tree.
>>> t1 = Tree(1)
>>> t1.map(lambda x: x + 2)
>>> t1.map(lambda x : x * 4)
>>> t1.label
12
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t2.map(lambda x: x * x)
>>> t2
Tree(9, [Tree(4, [Tree(25)]), Tree(16)])
"""
self.label = fn(self.label)
for b in self.branches:
b.map(fn)
def __contains__(self, e):
"""
Determine whether an element exists in the tree.
>>> t1 = Tree(1)
>>> 1 in t1
True
>>> 8 in t1
False
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> 6 in t2
False
>>> 5 in t2
True
"""
if self.label == e:
return True
for b in self.branches:
if e in b:
return True
return False
def __repr__(self):
if self.branches:
branch_str = ', ' + repr(self.branches)
else:
branch_str = ''
return 'Tree({0}{1})'.format(self.label, branch_str)
def __str__(self):
def print_tree(t, indent=0):
tree_str = ' ' * indent + str(t.label) + "\n"
for b in t.branches:
tree_str += print_tree(b, indent + 1)
return tree_str
return print_tree(self).rstrip()
| [
"noreply@github.com"
] | caseyzlei.noreply@github.com |
5a6597d356ef6a5f9227e2b52931dcdad487a02e | b3103d810f1e1f2032ec77f7e1e4282bdac7d70f | /createDics.py | 8b4da7c5cc823aacf17326cdee591d8976e91c6c | [] | no_license | ozgedincsoy/SeniorProject | 9031c4a1c96095f7c70450df411e05f637f02078 | 5ab5d05f0baae436fd2b770a2c1b4231d9840a8c | refs/heads/master | 2020-05-23T16:14:21.619200 | 2019-05-15T17:38:45 | 2019-05-15T17:38:45 | 186,844,663 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | py | import re
import pymongo
from pymongo import MongoClient
import pickle
"""
This script creates geneInformation.pickle and docToGenes.pickle files.
"""
def get_two_genes_contained(gene1, gene2):
res = []
for doc in docs:
if gene1 in docs[doc] and gene2 in docs[doc]:
res.append(doc)
return res
#print('Mongo version', pymongo.__version__)
client = MongoClient('localhost', 27017)
db = client.pubmed
collection = db.abstracts_copy14m
f = open("analyzedfiles/union.txt", "r")
file = f.read()
f_a = open("analyzedfiles/autism.txt", "r")
file_a = f_a.read()
f_e = open("analyzedfiles/epilepsy.txt", "r")
file_e = f_e.read()
genes = {}
docs = {}
genes_a = {}
docs_a = {}
genes_e = {}
docs_e = {}
gene_l = {}
matches = re.findall("([^\s]+)\s+([0-9]+)\s+(.*?)[\s\n]*([0-9]+)\s+([0-9]+)\s+([a-zA-Z](?:.*?|)*[\s\n]+)((?:[0-9]+,)*[0-9]+),[\s\n]", file, re.MULTILINE | re.DOTALL)
matches_a = re.findall("([^\s]+)\s+([0-9]+)\s+(.*?)[\s\n]*([0-9]+)\s+([0-9]+)\s+([a-zA-Z](?:.*?|)*[\s\n]+)((?:[0-9]+,)*[0-9]+),[\s\n]", file_a, re.MULTILINE | re.DOTALL)
matches_e = re.findall("([^\s]+)\s+([0-9]+)\s+(.*?)[\s\n]*([0-9]+)\s+([0-9]+)\s+([a-zA-Z](?:.*?|)*[\s\n]+)((?:[0-9]+,)*[0-9]+),[\s\n]", file_e, re.MULTILINE | re.DOTALL)
for m in matches:
doc_ids = m[6].split(',')
matched_term_list = m[5].split(' | ')
matched_term_list = matched_term_list[:len(matched_term_list)-1]
genes[m[0]] = { "hugo" : m[1] , "name" : m[2], "occurrence" : m[3], "paperNum" : m[4], "matchedTerms" : matched_term_list, "docs" : doc_ids}
gene_l[m[0]] = m[0]
for g in m[2]:
gene_l[g] = m[0]
for gene in genes:
for doc_id in genes[gene]['docs']:
gene_list = docs.get(doc_id, [])
gene_list.append(gene)
docs[doc_id] = gene_list
for m in matches_a:
doc_ids = m[6].split(',')
matched_term_list = m[5].split(' | ')
matched_term_list = matched_term_list[:len(matched_term_list)-1]
genes_a[m[0]] = { "hugo" : m[1] , "name" : m[2], "occurrence" : m[3], "paperNum" : m[4], "matchedTerms" : matched_term_list, "docs" : doc_ids}
for gene in genes_a:
for doc_id in genes_a[gene]['docs']:
gene_list = docs_a.get(doc_id, [])
gene_list.append(gene)
docs_a[doc_id] = gene_list
for m in matches_e:
doc_ids = m[6].split(',')
matched_term_list = m[5].split(' | ')
matched_term_list = matched_term_list[:len(matched_term_list)-1]
genes_e[m[0]] = { "hugo" : m[1] , "name" : m[2], "occurrence" : m[3], "paperNum" : m[4], "matchedTerms" : matched_term_list, "docs" : doc_ids}
for gene in genes_e:
for doc_id in genes_e[gene]['docs']:
gene_list = docs_e.get(doc_id, [])
gene_list.append(gene)
docs_e[doc_id] = gene_list
two_dim = {}
for gene in genes:
two_dim[gene] = []
if gene in genes_a:
two_dim[gene].append(float(genes_a[gene]["occurrence"])/float(genes_a[gene]["paperNum"]))
else:
two_dim[gene].append(0.0)
if gene in genes_e:
two_dim[gene].append(float(genes_e[gene]["occurrence"])/float(genes_e[gene]["paperNum"]))
else:
two_dim[gene].append(0.0)
sorted_y = sorted(two_dim.items(), key=lambda kv: kv[1][1], reverse=True)
counter= 0
sorted_x = sorted(two_dim.items(), key=lambda kv: kv[1][0], reverse=True)
with open('weighted.pickle', 'wb') as handle:
pickle.dump(two_dim, handle, protocol=pickle.HIGHEST_PROTOCOL)
for key, value in sorted_x:
counter+=1
if (key,value) in sorted_y:
print(key,value)
print("\n-----\n")
for document in collection.find():
pmid = document["MedlineCitation"]["PMID"]
break
if pmid in docs:
print(docs[pmid])
#print(tokenizer.tokenize(document["MedlineCitation"]["Article"]["Abstract"]["AbstractText"][0])) # iterate the cursor
#print("b")
with open('pickles/geneInformation.pickle', 'wb') as handle:
pickle.dump(genes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('pickles/docToGenes.pickle', 'wb') as handle:
pickle.dump(docs, handle, protocol=pickle.HIGHEST_PROTOCOL)
| [
"ozgedincsoy@Ozges-MacBook-Pro.local"
] | ozgedincsoy@Ozges-MacBook-Pro.local |
029412551ed3eb0069eec0f73a51a8eb8ab161f8 | eada2d13ea45c94f14c6537aef178c0ebe3ae3f0 | /Rice Fundamentals of Computing specialisation/Principles of Computing/Part 2/Week 8 Min Max Tic Tac Toe.py | 0eee011ac4c5523bf1e0dceb868484769afe9af8 | [] | no_license | leonlinsx/ABP-code | 1b77ef370121f67cc9a424b59944e2eb9dcef3b7 | 480e3f5786561332943c24fe15cf23a5c4835cf5 | refs/heads/master | 2023-03-03T14:01:38.396222 | 2021-02-14T20:33:52 | 2021-02-14T20:33:52 | 274,461,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | """
Mini-max Tic-Tac-Toe Player
Board class is here: https://www.coursera.org/learn/principles-of-computing-2/supplement/qMWrV/tttboard-class
test suite here: http://www.codeskulptor.org/#user48_IVQKbrFzXY_52.py
practice nim game here: http://www.codeskulptor.org/#user48_nim_recursive_template_0.py
"""
import poc_ttt_gui
import poc_ttt_provided as provided
#import user48_IVQKbrFzXY_52 as tester
# Set timeout, as mini-max can take a long time
import codeskulptor
codeskulptor.set_timeout(60)
# SCORING VALUES - DO NOT MODIFY
SCORES = {provided.PLAYERX: 1,
provided.DRAW: 0,
provided.PLAYERO: -1}
#provided.EMPTY is 1
#provided.PLAYERX 2
#provided.PLAYERO 3
#provided.DRAW 4
def mm_move(board, player):
"""
Make a move on the board.
Returns a tuple with two elements. The first element is the score
of the given board and the second element is the desired move as a
tuple, (row, col).
"""
# if game over, return score and invalid move
game_over = board.check_win()
if game_over:
return SCORES[game_over], (-1, -1)
moves = board.get_empty_squares()
score_move_list = []
for move in moves:
# need to make a copy and not edit original board
board_copy = board.clone()
board_copy.move(move[0], move[1], player)
# switch player and evaluate new board after the move
new_player = provided.switch_player(player)
# print board_copy, move, new_player
copy_score, _ = mm_move(board_copy, new_player)
# if you multiply the score by SCORES[player] then you can always maximize
# if playerx wins on playerx turn, copy_score 1 * 1
# if playero wins on playero turn, copy_score -1 * -1
if copy_score * SCORES[player] == 1:
return copy_score, move
score_move_list.append((copy_score, move))
# print score_move_list
# get the max score and the associated move
max_score = max(elem[0] * SCORES[player] for elem in score_move_list)
# print max_score
for score, move in score_move_list:
if score * SCORES[player] == max_score:
return score, move
# codeskulptor doesn't have max key functionality
# return max(score_move_list, key = lambda x: x[0] * SCORES[player])
def move_wrapper(board, player, trials):
"""
Wrapper to allow the use of the same infrastructure that was used
for Monte Carlo Tic-Tac-Toe.
"""
move = mm_move(board, player)
assert move[1] != (-1, -1), "returned illegal move (-1, -1)"
return move[1]
# Test game with the console or the GUI.
# Uncomment whichever you prefer.
# Both should be commented out when you submit for
# testing to save time.
#provided.play_game(move_wrapper, 1, False)
#poc_ttt_gui.run_gui(3, provided.PLAYERO, move_wrapper, 1, False)
#tester.run_suite(mm_move) | [
"noreply@github.com"
] | leonlinsx.noreply@github.com |
b0d281e7e1d1edddc317844f590fe586d45dbebe | 67c030e79325f0ae780dd36129170e8ecd45d156 | /week2/soclient.py | be5114997f781305f2ef86ab82c643ac1734816e | [] | no_license | James-Dolan/bootcamp | bf4f293ad6d16cdebc5c1c2b95e36dfc1d853eb3 | c4f6ae3c5b331fcfddaae0fe791a8bf5cdd7ac84 | refs/heads/master | 2021-06-24T09:20:39.615973 | 2017-08-22T01:44:14 | 2017-08-22T01:44:14 | 101,021,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """Pentester lab bootcamp
week 2 - http client with sockets
James Dolan
"""
import sys
import socket
def main():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
print('failed ot create socket, error code: ' + str(msg[0]) + ' , Error message : ' + msg[1])
sys.exit()
print('socket created\n')
try:
host = '127.0.0.1'
except socket.gaierror:
print('host failed\n')
port = 80
s.connect((host, port))
print('connected\n')
message = b"GET / HTTP/1.0\r\n\r\n"
try:
s.sendall(message)
except socket.error:
print('send failed\n' + str(msg[0]) + ' \n Error message : ' + msg[1])
sys.exit()
print(s.recv(4096))
s.close
if __name__=='__main__':
main()
| [
"jamesdolan503@gmail.com"
] | jamesdolan503@gmail.com |
26f957332c23ca4ad82b9af6f0c37a65ad657120 | 365a1710edf7f022c3d08138882e43c449f4d77a | /setup.py | 48529a96c6512ce5df9bdab12427a79c74c4b635 | [] | no_license | lglassy/kapro | fff90fc601a22386216d9bca98b5b512ee20d393 | 4246133af8e6da42413028f145967048a43c248c | refs/heads/master | 2020-04-09T20:30:12.561196 | 2018-12-06T02:46:05 | 2018-12-06T02:46:05 | 160,575,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py |
# standard modules
import glob
import os
import os.path
import platform
import shutil
import setuptools
import sys
import sysconfig
setuptools.setup(
name = 'kapro',
version = '0.1',
description = 'small document database with sqlite storage engine',
author = 'Lou Glassy',
packages = setuptools.find_packages('src'),
package_dir = { '': 'src' },
)
# end of file
| [
"lou@dianthus.example.org"
] | lou@dianthus.example.org |
884cb5ac643297896d0e2af6e2bacd1ea9d099ee | ae4bc46b4aed7d67275bb451759f0df7c626a720 | /connect3.py | 3d7e94bb7d3303897c5a531334e8b50269c52b20 | [] | no_license | Airoure/search-engine | f7bb5b6bffe6f7397c6c63382a1e4593f0646eca | 0ea379b390b873a0e1ca0b277fce5d645e82f55a | refs/heads/master | 2020-08-09T19:18:20.701585 | 2019-12-08T14:25:23 | 2019-12-08T14:25:23 | 214,152,812 | 0 | 5 | null | 2019-12-08T14:25:25 | 2019-10-10T10:24:47 | Python | UTF-8 | Python | false | false | 3,946 | py | import pygame,sys,random
from pygame.locals import *
#定义颜色变量 目标方块的颜色
yellowColor = pygame.Color(255,255,0)
#贪吃蛇的颜色
greenColor = pygame.Color(0,250,154)
#背景颜色
blackColor = pygame.Color(255,255,255)
def gameOver():
pygame.quit()
sys.exit()
def main():
# 初始化pygame
pygame.init()
# 控制游戏速度
fpsColck = pygame.time.Clock()
# 创建pygame显示层
playSurface = pygame.display.set_mode((800, 800))
pygame.display.set_caption('贪吃的陈文杰')
#初始化贪吃蛇的起始坐标
snakePosition = [0, 0]
# 初始化贪吃蛇的长度
snakeBody = [[100,0],[80,0],[60,0]]
# 初始化目标方块的坐标
targetPosition = [400, 400]
# 初始化一个目标方块的标记 目的:用来判断是否吃掉这个目标方块
targerflag = 1
# 初始化方向
direction = 'right'
# 定义一个方向变量
changeDirection = direction
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_RIGHT:
changeDirection = 'right'
if event.key == K_LEFT:
changeDirection = 'left'
if event.key == K_UP:
changeDirection = 'up'
if event.key == K_DOWN:
changeDirection = 'down'
# 对应键盘Esc键
if event.key == K_ESCAPE:
pygame.event.post(pygame.event.Event(QUIT))
# 避免贪吃蛇穿过身体
if changeDirection == 'left' and not direction == 'right':
direction = changeDirection
# 避免贪吃蛇穿过身体
if changeDirection == 'right' and not direction == 'left':
direction = changeDirection
# 避免贪吃蛇穿过身体
if changeDirection == 'up' and not direction == 'down':
direction = changeDirection
# 避免贪吃蛇穿过身体
if changeDirection == 'down' and not direction == 'up':
direction = changeDirection
if direction == 'down':
snakePosition[1] += 20
# 增加蛇的长度
snakeBody.insert(0, list(snakePosition))
# 判断目标方块是否被吃掉
if snakePosition[0] == targetPosition[0] and snakePosition[1] == targetPosition[1]:
targerflag = 0
else:
snakeBody.pop()
if targerflag == 0:
x = random.randrange(1, 40)
y = random.randrange(1, 40)
targetPosition = [int(x * 20), int(y * 20)]
targerflag = 1
# 绘制pygame的显示层
playSurface.fill(blackColor)
for Position in snakeBody:
pygame.draw.rect(playSurface, greenColor, Rect(Position[0], Position[1], 15, 15))
pygame.draw.rect(playSurface, yellowColor, Rect(targetPosition[0], targetPosition[1], 20, 20))
for num in range(0, len(snakeBody)):
tail = 0
if(snakeBody[0]==snakeBody[num] and num>1):
tail = 1
break
if(tail==1):
break
pygame.display.flip()
if snakePosition[0]> 800 or snakePosition[0] < 0:
if snakePosition[0]>= 800:
snakePosition = [0, snakePosition[1]]
else:
snakePosition = [800, snakePosition[1]]
elif snakePosition[1] > 800 or snakePosition[1] < 0:
if snakePosition[1] >= 800:
snakePosition = [snakePosition[0], 0]
else:
snakePosition = [snakePosition[0], 800]
fpsColck.tick(10)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Airoure.noreply@github.com |
c4c85a2c4f053ec3eb0bfe5c78a4750d02825bab | 7f8b60a3a12f6ac61fce04f2e3ef2185c99418cb | /test_scraping_streeteasy.py | 4d4e42317508ad7d9850323a6195aa386384f97a | [] | no_license | vikivayner/CIS-9650-Project- | 44a8ae04acc8d0a3544b0799c09af28a8caa61d4 | aa2960a3c37ca6825c2e53157505f6d1b24cc24a | refs/heads/master | 2023-06-18T15:44:00.436088 | 2021-05-14T01:36:17 | 2021-05-14T01:36:17 | 364,033,714 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
streeteasy_url = 'https://streeteasy.com/2-bedroom-apartments-for-sale/murray-hill/price:-2000000'
req = Request(streeteasy_url, headers={'User-Agent': 'Chrome/90.0.4430.93'})
make_request = urlopen(req)
read_html = make_request.read()
html_soup = BeautifulSoup(read_html, 'html.parser')
apts_for_sale = html_soup.find_all('li', class_='searchCardList--listItem')
filename = 'test_scrape_streeteasy.csv'
f = open(filename, 'w')
headers = 'Title, Address, Price \n'
f.write(headers)
for listing in apts_for_sale:
title = listing.find('p', class_='listingCardLabel-grey').text
address = listing.find('address', class_='listingCard-addressLabel').text
pricelist = listing.find('span', class_='price').text.strip('$').split(',')
price = ''.join(pricelist)
f.write(title + ',' + address + ',' + price + ',')
f.close() | [
"mohammad.shafique1@baruchmail.cuny.edu"
] | mohammad.shafique1@baruchmail.cuny.edu |
85f970aac1289aa71773cf2f9f5fee61ae7a289f | a939e018333a9ecd26ddc618f99835b7eb381686 | /mapred_parser/user_merger/.svn/text-base/reducer.py.svn-base | c71b89a519663ca1e57f7a5f17e75be85bb0ab96 | [] | no_license | cash2one/crawl_youtube | bff5ba254001c2f31f770e55a4aca39bc54e45ee | 0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc | refs/heads/master | 2021-01-16T22:30:17.800282 | 2016-02-18T11:50:09 | 2016-02-18T11:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | #!/usr/bin/python
# coding=utf-8
import os
import sys
import time
import base64
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../')
from le_crawler.proto.video.ttypes import OriginalUser
from le_crawler.common.utils import str2mediavideo, thrift2str
user_merge_field = set(['user_name', 'url', 'portrait_url', 'play_num', 'fans_num'])
class MergeItem:
def __init__(self):
self.reset('')
def reset(self, user_url=None):
self._data = []
self._user_url = user_url
self._url = None # used only in which length of self._data is 1
self._user = None
def get_user_url(self):
return self._user_url
def add_item(self, user_url, out_type, url, data_base64):
is_out_video = out_type == 'video'
self._data.append((data_base64, is_out_video))
self._url = url
def _merge_user(self, datas):
new_user = OriginalUser()
for k, v in new_user.__dict__.iteritems():
if k not in user_merge_field or v:
continue
for data in datas:
old_v = getattr(data[0].user, k)
if old_v:
setattr(new_user, k, old_v)
if k != 'url':
for item in user_merge_field:
old_v = getattr(data[0].user, item)
if not getattr(new_user, item) and old_v:
setattr(new_user, item, old_v)
break
new_user.update_time = int(time.time())
self._user = new_user
def _print_video(self, datas):
for data in datas:
data[0].user = self._user
video_str = thrift2str(data[0])
if not video_str:
sys.stderr.write('ERROR: failed in thrift2str. %s\n' % data[0].url)
continue
video_base64 = base64.b64encode(video_str)
if not video_base64:
sys.stderr.write('ERROR: failed in base64 encode. %s\n' % data[0].url)
continue
print 'unique' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
if data[1]:
print 'video' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
def print_item(self):
if not self._data:
return
if len(self._data) == 1:
print 'unique' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
if self._data[0][1]:
print 'video' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
return
for idx, data_group in enumerate(self._data):
try:
data = str2mediavideo(base64.b64decode(data_group[0]))
except:
sys.stderr.write('ERROR: failed in base64 decode. %s\n' % self._user_url)
self._data[idx] = (data, data_group[1])
self._data = [item for item in self._data if item[0]]
self._data.sort(cmp=lambda x, y: (y[0].user.update_time or 0) - (x[0].user.update_time or 0))
self._merge_user(self._data)
self._print_video(self._data)
def main():
merge_item = MergeItem()
while 1:
line = sys.stdin.readline()
if not line:
break
line_data = line.strip().split('\t', 3)
if len(line_data) != 4:
sys.stderr.write(str(len(line_data)) + ' ' + str(line_data) + '\n')
continue
user_url, url, out_type, data_base64 = line_data
if user_url == 'None':
print 'unique' + '\t' + url + '\t' + user_url + '\t' + data_base64
if out_type == 'video':
print 'video' + '\t' + url + '\t' + user_url + '\t' + data_base64
continue
if user_url == merge_item.get_user_url():
merge_item.add_item(user_url, out_type, url, data_base64)
else:
merge_item.print_item()
merge_item.reset(user_url)
merge_item.add_item(user_url, out_type, url, data_base64)
merge_item.print_item()
if __name__ == '__main__':
main()
| [
"zjc0516@126.com"
] | zjc0516@126.com | |
b2980f93a9d385c0eb93ed900e2512561a230796 | 435730a7ac4aad11b435d7f9c599b2639c79be00 | /Pertemuan5/QTextEdit/DemoQTextEdit.py | 54967c5434294a603e33405ea28d413117bdae08 | [] | no_license | firmangaluhs/PraktikumGUI | 1986540913c6c2ed38a0cae2585f61032d65f69c | bde9f9345b06d41189da443ff2200f635c876a0e | refs/heads/master | 2020-05-14T03:24:49.973146 | 2019-07-07T10:43:06 | 2019-07-07T10:43:06 | 181,693,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class DemoQTextEdit(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 200)
self.move(300, 300)
self.setWindowTitle('Demo QTextEdit')
self.label1 = QLabel()
self.label1.setText('No. HP')
self.phoneEdit = QLineEdit()
vbox1 = QVBoxLayout()
vbox1.addWidget(self.label1)
vbox1.addWidget(self.phoneEdit)
self.label2 = QLabel()
self.label2.setText('Pesan')
self.messageEdit = QTextEdit()
vbox2 = QVBoxLayout()
vbox2.addWidget(self.label2)
vbox2.addWidget(self.messageEdit)
vbox3 = QVBoxLayout()
vbox3.addLayout(vbox1)
vbox3.addLayout(vbox2)
self.sendButton = QPushButton('&Kirim SMS')
self.cancelButton = QPushButton('&Batal')
hbox = QHBoxLayout()
hbox.addStretch()
hbox.addWidget(self.sendButton)
hbox.addWidget(self.cancelButton)
layout = QVBoxLayout()
layout.addLayout(vbox3)
horizontalLine = QFrame();
horizontalLine.setFrameShape(QFrame.HLine)
horizontalLine.setFrameShadow(QFrame.Sunken)
layout.addWidget(horizontalLine)
layout.addLayout(hbox)
self.setLayout(layout)
if __name__ == '__main__':
a = QApplication(sys.argv)
form = DemoQTextEdit()
form.show()
a.exec_()
| [
"noreply@github.com"
] | firmangaluhs.noreply@github.com |
1a6eeb03504902d1be2747c7e0afa6028dc43054 | 00ad9fbdec554c35ffd95d5406ef7a880444ac8b | /serve.py | 485e7b27c5e739d3f2aea261a311be724d387cb4 | [] | no_license | AI-Passionner/word-recognition-ocr | 460dc2792c90216239f4cdb0808bca51c3d9315a | f9e451f9f6eb6b6a9f38ebbc9eb068d32fb4bd4c | refs/heads/master | 2022-11-09T01:23:30.361590 | 2020-06-28T06:20:23 | 2020-06-28T06:20:23 | 266,566,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | import cv2
import argparse
from src.predict import RecognitionUtils
from src.analyze_document import AnalyzeDocument
from src.image_util import ImageUtils
def main(image_file, output_path):
image = cv2.imread(image_file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# remove lines for better txt detection
bw_img = ImageUtils.remove_lines(gray, 120, 100) # white space and black text
cv2.imwrite(output_path + '/' + 'line_removed.png', bw_img)
ret, img_for_ext = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
cv2.imwrite(output_path + '/' + 'img_for_extr.png', img_for_ext)
block_bboxs = AnalyzeDocument.extract_blocks(img_for_ext)
img_h, img_w = img_for_ext.shape[:2]
masked_img = image.copy()
for bbox in block_bboxs:
l, t, r, b = bbox
l, t, r, b = max(0, l - 4), max(0, t - 4), min(img_w, r + 2), min(img_h, b + 2)
cv2.rectangle(masked_img, (l, t), (r, b), (0, 0, 255), 2)
cv2.imwrite(output_path + '/' + 'block_img.png', masked_img)
word_images = AnalyzeDocument.extract_word_images(img_for_ext, block_bboxs)
recog_words = RecognitionUtils.recognize(word_images)
text = ' '.join(recog_words[k]['Text'] for k in recog_words)
print(text)
with open(output_path + '/' + 'output.txt', 'w') as f:
f.write(text)
return
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="image file")
ap.add_argument("-o", "--output", type=str, help="path to output model and textract texts")
args = vars(ap.parse_args())
# args = {
# 'image': './test/test_1/test_1.png',
# 'output': './test/test_1'
# }
if __name__ == '__main__':
main(args['image'], args['output'])
| [
"dlaohu.github@gmail.com"
] | dlaohu.github@gmail.com |
3f2cee0071989d5dddcf5e06d71d0c53ccf74a79 | 19ddab74600f71700a6b693281d0180d5271f295 | /leetcode/119_杨辉三角2.py | 5ca9f3c4a447aa5bf40bb8293558abdaa26cfa73 | [] | no_license | zhulf0804/Coding.Python | 4d55a430da1a8077c81feba65c13ac654aaf094a | 46ab03e23d15ebd5434ef4dd5ae99130000b00a5 | refs/heads/master | 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from typing import List
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1, 1]
pre = [1, 1]
for i in range(2, rowIndex+1):
cur = [1] * (i + 1)
for j in range(1, i):
cur[j] = pre[j] + pre[j - 1]
pre = cur
return cur
rowIndex = 3
s = Solution()
print(s.getRow(3)) | [
"zhulf0804@gmail.com"
] | zhulf0804@gmail.com |
1ff85fe747c7f0ba49485d7457fca4fd3c24c8c2 | 52f25de526703905b08f6945b484f10bf6636591 | /jupyterlab_hdf/_version.py | a30f283eb29770948e018983e369cacc90d7b1e4 | [
"BSD-3-Clause"
] | permissive | hephaex/jupyterlab-hdf5 | e5f69d6949419c951f2dd2d7c679a3e7b1e406ad | 77eb5bedd5ef27758015af53ef03cc4d05d123c0 | refs/heads/master | 2022-10-17T23:42:09.656059 | 2020-06-16T03:34:58 | 2020-06-16T03:34:58 | 272,601,270 | 0 | 0 | null | 2020-06-16T03:32:14 | 2020-06-16T03:32:13 | null | UTF-8 | Python | false | false | 149 | py | # -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
__version__ = '0.4.0'
| [
"mklein@jhu.edu"
] | mklein@jhu.edu |
d7436d052f3fa5cb85da70a7387765afbdd5c1b3 | 2235e32f6c2f345bcb21f37328d4e114c2402ab4 | /kdguoguo.py | dc01f3030ac0493c94b62264979ab609d72cc395 | [] | no_license | leipengkai/scrapy | b47e0d4a40e642044dd5e85e98fc742670c8cd73 | 5ad14ffd8d531b53b20bc1ba2d9463f4e992db2f | refs/heads/master | 2021-07-02T22:56:56.422439 | 2020-08-30T09:00:27 | 2020-08-30T09:00:27 | 154,592,528 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
# binary = FirefoxBinary('/usr/bin/firefox')
# browser = webdriver.Firefox(firefox_binary=binary)
# 无界面版chrome使用方法
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'")
browser = webdriver.Chrome(chrome_options=chrome_options,executable_path='/usr/local/bin/chromedriver')
# 无界面版chrome使用方法
# browser = webdriver.Chrome()
# Firefox()
try:
browser.get('https://www.guoguo-app.com/')
input = browser.find_element_by_id('J_SearchInput') # J_SearchBtn
time.sleep(1)
try:
close = browser.find_element_by_xpath("html/body/div/div/a").click()
except:
pass
wait = WebDriverWait(browser, 10)
input.send_keys('3868932225395') # 顺丰的不支持 821438763102
input.send_keys(Keys.ENTER)
wait.until(EC.presence_of_element_located((By.ID, 'J_SearchBtn')))
btn = browser.find_element_by_id('J_SearchBtn')
btn.click()
time.sleep(1)
try:
lis = browser.find_elements_by_tag_name('span')
if len(lis)>10:
lis = lis[6:-4]
for i in lis:
print(i.text)
except:
print('没有加载到信息,不去做处理')
pass
finally:
# browser.close()
pass | [
"leipengkai@gmail.com"
] | leipengkai@gmail.com |
1e40eb8ebf6126ef170356dc390a90a0bdfcd028 | 2cc0058845c55602d123b673d03d0f65401539bf | /KTextExtract2.py | d827b53ff88867a9f184967822ba5b7fbb8b6e38 | [] | no_license | SSKudos/PyCodes | 5295abf3392fa64719912ca4cd52aa7bc3d3212a | 939fb5af3e868dd538520ee908676122cd51da5d | refs/heads/main | 2023-04-30T20:03:38.157990 | 2021-04-29T19:52:01 | 2021-04-29T19:52:01 | 311,798,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from PIL import Image
import cv2
import pytesseract
import os
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def main():
path = r'C:\Users\CHIJINDU\Desktop\ml learn'
# path for the folder for getting the raw images
# iterating the images inside the folder
for imageName in os.listdir(path):
inputPath= os.path.join(path, imageName)
img_cv = cv2.imread(inputPath)
img_rgb = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
#img = Image.open(inputPath)
#applying ocr using pytesseract for python
text = pytesseract.image_to_string(img_rgb, lang='eng')
print(text)
main()
| [
"noreply@github.com"
] | SSKudos.noreply@github.com |
8c538d44113ba9e50da23185ac2eaee258c5557c | aa3848f5207219119bbad65b7423d08f0d4734e1 | /minxss_library/pass_planning_tool/python/minxss_email.py | afc08a87c9775780b4b9895eb21eefa9e1a04562 | [] | no_license | minxsscubesat/minxsscubesat | 81e4857378a1365339266b1ed757c638880899e9 | 77567b5be1a6da0607d093968ea2672c0cc9058a | refs/heads/master | 2023-08-31T01:54:59.370941 | 2023-08-28T17:01:21 | 2023-08-28T17:01:21 | 130,722,400 | 4 | 2 | null | 2023-03-31T00:55:41 | 2018-04-23T16:00:32 | IDL | UTF-8 | Python | false | false | 10,857 | py | from jd_utc_time import now_in_jd
from jd_utc_time import jd_to_minutes
import smtplib
import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
import os
import sys
import re
num_hours_between_emails = 2
enable_email = 1
#Send an email with the associated error text (or informational text).
#Also, only send every hour or so unless error message is different (see constant above)
class email(object):
def __init__(self, emails_noerr, emails_err, sat_name, cfg):
#declare all error types in this format, so we can track when they occurred
self.errNoPassTimes_jd = 0 #time, in JD, that a "NoPassTimes" error occured
self.errPassAboutToOccur_jd = 0
self.errNoPassScript_jd = 0
self.errNoFile_jd = 0 #generic, couldn't find a file errors
self.script_name = "unchanged, there is a code error"
self.script_file_location = ""
#populate email list with default values
self.toaddrs_err = emails_err
self.toaddrs_noerr = emails_noerr
#config settings
self.fromaddr = cfg.email_username
self.station_name = cfg.station_name
self.sat_name = sat_name
self.cfg = cfg
def __call__(self,texttype):
emailtext = None
if(texttype == "NoPassTimes"):
tdiff = now_in_jd() - self.errNoPassTimes_jd
if(tdiff*24>num_hours_between_emails):
self.errNoPassTimes_jd = now_in_jd()
email_type = "critical_error"
emailtext = "No future pass times detected! Perhaps the script that generates pass times has failed! Chicken little."
if(texttype == "PassAboutToOccur"):
tdiff = now_in_jd() - self.errPassAboutToOccur_jd
if(tdiff*24>num_hours_between_emails):
self.errPassAboutToOccur_jd = now_in_jd()
email_type = "prepass_info"
emailtext = "A Pass is about to occur! It will happen in <=15 minutes!"
if(texttype == "NoPassScript"):
tdiff = now_in_jd() - self.errNoPassScript_jd
if(tdiff*24>num_hours_between_emails):
self.errNoPassScript_jd = now_in_jd()
email_type = "critical_error"
emailtext = "No Pass Script available, or next file is not *.prc! Please fix before the pass or we'll run the default script!"
if(texttype == "NoFile"):
tdiff = now_in_jd() - self.errNoFile_jd
if(tdiff*24>num_hours_between_emails):
self.errNoFile_jd = now_in_jd()
email_type = "critical_error"
emailtext = "Pass Automation could not find a file it expected to exist! Please debug!"
if texttype == "DopplerEngage":
tdiff = now_in_jd() - self.errNoFile_jd
if tdiff * 24 > num_hours_between_emails:
self.errNoFile_jd = now_in_jd()
email_type = "critical_error"
emailtext = "Pass Automation could not engage the Doppler correction. Make sure that the Gpredict Radio Controller window is visible on screen. Click Engage if it is not already. Leave the Radio Controller window visible to continue automation."
if emailtext != None:
print(emailtext)
self.SendEmail(emailtext,email_type,"","")
def PassResults(self, results, info):
#construct the body of the email
email_body = "{0} Pass completed at station {1}.\r\n".format(info.sat_name, info.station_name)
if(info.is_shortened == 1):
newlength = jd_to_minutes(info.end_jd_adjusted) - jd_to_minutes(info.start_jd_adjusted)
if(newlength < 0):
newlength = 0
email_body = "{0} Pass CANCELED at station {1}. It overlapped with a higher priority satellite.\r\n".format(info.sat_name, info.station_name)
else:
email_body = "SHORTENED " + email_body
email_body = email_body + "Elevation: " + str(round(info.elevation,2)) + " degrees\r\n"
if(info.is_shortened == 1):
email_body = email_body + "Length: " + str(round(newlength,2)) + " minutes (shortened from " + str(round(info.length_minutes,2)) + " minutes)\r\n"
else:
email_body = email_body + "Length: " + str(round(info.length_minutes,2)) + " minutes\r\n"
email_body = email_body + "kB data downlinked: " + str(results.bytes_downlinked_data/1000) + "\r\n"
email_body = email_body + "Script: " + self.script_name + "\r\n\r\n"
if(len(results.errors_array)>0):
email_type = "errors_during_pass"
email_body = email_body + "ERRORS OCCURED!\r\n\r\nErrors from EventLog:\r\n"
email_body = email_body + "============================================\r\n"
for line in results.errors_array:
email_body = email_body + line
email_body = email_body + "============================================\r\n\r\n"
elif(results.bytes_downlinked_data == 0):
email_type = "no_data"
email_body = email_body + "Pass provided no data, but this was not unexpected given the max elevation.\r\n\r\n"
else:
email_type = "successful_pass"
email_body = email_body + "PASS WAS SUCCESSFUL!\r\n\r\n"
email_body = email_body + "Was in Sun: " + str(info.sunlight) + "\r\n"
email_body = email_body + "TLM filename: " + results.tlm_filename + "\r\n"
#email_body = email_body + "cmdTry and cmdSuccess prints:\r\n"
#email_body = email_body + "============================================\r\n"
#for line in results.cmdTrySucceed_arr:
# email_body = email_body + line
#email_body = email_body + "============================================\r\n\r\n"
self.SendEmail(email_body, email_type, results.eventlog_filepath, results.csv_filepath)
def StoreScriptName(self,script_name):
self.script_name = script_name
def StoreScriptLocation(self,script_file_location):
self.script_file_location = script_file_location
# Taken, in part, from: https://docs.python.org/3/library/email-examples.html#email-examples
def SendEmail(self, email_body, email_type, eventlog_filepath, csv_filepath):
if(email_type == "critical_error" or email_type == "errors_during_pass"):
iserror = 1
else:
iserror = 0
#Create the subject
now = datetime.datetime.now()
datestring = str(now.year) + "." + str(now.month) + "." + str(now.day) + " " + str(now.hour) + ":" + str(now.minute) + ":" + str(now.second)
if(email_type == "critical_error"):
subject = "CRITICAL: {0}: Error at ".format(self.sat_name)
toaddrs = self.toaddrs_err
elif(email_type == "prepass_info"):
subject = "{0}: Status update at ".format(self.sat_name)
toaddrs = self.toaddrs_noerr
elif(email_type == "errors_during_pass"):
subject = "{0}: Pass Results: ERROR! at ".format(self.sat_name)
toaddrs = self.toaddrs_err
elif(email_type == "successful_pass"):
subject = "{0}: Pass Results: Success! at ".format(self.sat_name)
toaddrs = self.toaddrs_noerr
elif(email_type == "no_data"):
subject = "{0}: Pass Results: No Data Received at ".format(self.sat_name)
toaddrs = self.toaddrs_noerr
else:
subject = "MinXSS: Fatal code error! Contact developer. At "
toaddrs = self.toaddrs_err
subject = subject + datestring + " (" + self.station_name + ")"
# Credentials
username = self.cfg.email_username #'minxss.ops.wind2791@gmail.com'
password = self.cfg.email_password #'minxssgroundstation'
#print(email_body)
#print(toaddrs)
#Only send if there are any recipients
if(len(toaddrs)>0):
#put everything into a message object
COMMASPACE = ', '
#msg = MIMEText(email_body)
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = self.fromaddr
msg['To'] = COMMASPACE.join(toaddrs)
msg.attach(MIMEText(email_body))
if(os.path.isfile(eventlog_filepath)):
with open(eventlog_filepath, "rb") as file:
part = MIMEApplication(file.read(), Name=os.path.basename(eventlog_filepath))
part['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(eventlog_filepath)
msg.attach(part)
if(os.path.isfile(csv_filepath)):
with open(csv_filepath, "rb") as file:
part = MIMEApplication(file.read(), Name=os.path.basename(csv_filepath))
part['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(csv_filepath)
msg.attach(part)
if(os.path.isfile(self.script_file_location)):
with open(self.script_file_location, "rb") as file:
part = MIMEApplication(file.read(), Name=os.path.basename(self.script_file_location))
part['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(self.script_file_location)
msg.attach(part)
if(enable_email == 1):
# The actual mail send
server = smtplib.SMTP(self.cfg.email_server) #'smtp.gmail.com:587'
server.starttls() #encryption enabled
server.login(username,password)
server.sendmail(self.fromaddr, toaddrs, msg.as_string())
server.quit()
else:
print("Subject: " + subject)
print("Body: ")
print(email_body)
else:
print("ERROR: Tried to send email '",email_body,"', but no recipients were listed in the email config file!")
# for testing of this file only!!
def main(script):
print("")
print("")
print("")
print("*******************************")
#email = email() #initialize
thisemail = email()
thisemail("NoPassTimes", "colden_laptop")
print("------------------------------------")
thisemail("PassAboutToOccur", "colden_laptop")
#email.SendEmail("PassAboutToOccur", "colden_laptop")
print("*******************************")
print("")
print("")
print("")
if __name__ == '__main__':
main(*sys.argv)
| [
"jmason86@gmail.com"
] | jmason86@gmail.com |
8d577c5e41a227361a97632f725917813da62b79 | c6d6842e25373f3169b294a8edbba3e47969fe64 | /xkin/pose.py | 0b923a3c50d9e9647e26e47592c44c2e0dfd8c6a | [
"BSD-2-Clause"
] | permissive | fpeder/pyXKin | ec072a23e9f29007a516a39a9957e54bc107ea71 | ce8a450a5bdf2074bbdf79783e4bdffe21812c45 | refs/heads/master | 2020-04-21T15:43:16.334612 | 2014-10-22T17:53:15 | 2014-10-22T17:53:15 | 25,595,407 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import config
import numpy as np
import pickle
from features import ConvexityDefects, FourierDescriptors
class PoseClassifier():
BUFFLEN = config.BUFFLEN
NONE = config.HAND_NONE
buffer = []
def __init__(self, classifier=None):
self._classifier = classifier
def run(self, contour):
if self._classifier:
curr_pose = self._classifier.run(contour)
if len(self.buffer) <= self.BUFFLEN:
self.buffer.append(curr_pose)
pose = self.NONE
else:
self.buffer.pop(0)
self.buffer.append(curr_pose)
pose = self._majority_voting()
return pose
def _majority_voting(self):
buffer = np.array(self.buffer)
votes = [len(np.where(buffer == x)[0]) for x in buffer]
pose = np.argsort(votes)[-1]
return buffer[pose]
class OpenCloseClassifier():
DEFECTS_DEPTH_FACTOR = config.DEFECTS_DEPTH_FACTOR
NUM_DEFECTS = config.NUM_DEFECTS
OPEN = config.HAND_OPEN
CLOSE = config.HAND_CLOSE
NONE = config.HAND_NONE
def __init__(self):
self._cvxdefects = ConvexityDefects()
def run(self, contour):
(defects, box) = self._cvxdefects.run(contour)
if not defects:
return self.NONE
if defects == None:
return self.CLOSE
if self._is_open(defects, box):
pose = self.OPEN
else:
pose = self.CLOSE
return pose
def _is_open(self, defects, box):
asd = (box[2] * box[3]) / 2.0
num = defects.size
mean = defects[:, :, -1].mean()
c1 = mean >= (float(asd) / self.DEFECTS_DEPTH_FACTOR)
c2 = num >= self.NUM_DEFECTS
return (c1 and c2)
class MultiPoseClassifier():
def __init__(self, src):
self._fourier_desc = FourierDescriptors()
self._models = self._load_models(src)
def run(self, contour):
desc = self._fourier_desc.run(contour)
dist = np.zeros(len(self._models))
for i, model in enumerate(self._models):
dist[i] = model['model'].mahalanobis([desc])
pose = self._models[np.argsort(dist)[0]]['id']
return pose
def _load_models(self, src):
models = []
for s in src:
data = pickle.load(open(s, 'rb'))
models.append({'id': data['id'], 'model': data['model']})
return models
| [
"f.peder@gmail.com"
] | f.peder@gmail.com |
48c1e248c0c54b9df4c45b1abc82c3c75f4870a9 | ae65873c3584cef7139066b224daad04410af6d2 | /MySQL.py | a042fc4c7860d159e362459d73edbfefca29ad33 | [] | no_license | rajatkashyap/Python | 2240c7472d07803c460c7a55d570e20694b694f9 | f74c85c65b0e209a5f7ab25b653d42835222faaf | refs/heads/master | 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 | Python | UTF-8 | Python | false | false | 324 | py | from mysql.connector import (connection)
cnx = connection.MySQLConnection(user='root', password='rajat',host='127.0.0.1',database='db')
cursor = cnx.cursor()
query=("select * from jobs")
cursor.execute(query)
for (city_id,city_name,country_id,x) in cursor:
print city_id,city_name,country_id
cnx.close()
| [
"rajatkashyap@Rajats-MBP.T-mobile.com"
] | rajatkashyap@Rajats-MBP.T-mobile.com |
47468736654f20982f2e66cb18e9eb353ee808b0 | 0942ec222ef8f179238d051427ac2ac4a0a9a785 | /gen_initialization_files_for_c_code/s2v_c.py | e4c6bea4957b8e06be96d34fceb9dbc33e078660 | [] | no_license | DhananjayKimothi/seq2vec | c36fa1ad3d1051bbd1def3864aaabb2f387241cc | c656c4897cebc9cf0c2ac48445b3fb1d6e86b6d9 | refs/heads/master | 2021-05-20T18:03:28.037386 | 2020-09-14T02:10:25 | 2020-09-14T02:10:25 | 65,794,859 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,905 | py | from collections import namedtuple
import re
import numpy as np
from Bio import SeqIO
import warnings
import time
import pickle as pkl
from MakeVocab1 import BuildVocab
from DocVecsArray import DocvecsArray
from numpy import random
from collections import defaultdict
file = "yeast_seq.fasta"
input_path = r"../data"
output_path = r"../data/inpfiles"# save folder
class seq_slicing(object):
def __init__(self, w_s, w_type, n_w):
"""
w_s is word size, w_type is for selecting how you want to
slice the sequence: 'overlap' or 'non_overlap' ; n_w is special parameter
required only when non overlap is used, ex: for w_s = 3, n_w = '...?'
"""
self.w_s = w_s
self.w_type = w_type
self.n_w = n_w
def slices(self, seq):
if self.w_type == 'overlap':
words = []
for i in range(0, len(seq) - self.w_s + 1):
words.append(seq[i:i + self.w_s])
return words
if self.w_type == 'non_overlap':
words = re.findall(self.n_w, seq)
seq_len = len(seq)
p = seq_len // self.w_s # floored quotient of seq_len
words = words[0:p]
seq1 = seq
xx = np.zeros(self.w_s - 1) # to delete 1st index, del seq1[i]
xx = xx.astype(np.int)
words_list = []
words2 = []
for j in xx:
seq1 = list(seq1)
del seq1[j]
seq1 = "".join(seq1)
seq_len = len(seq1)
words1 = re.findall(self.n_w, seq1)
p = seq_len // self.w_s
words1 = words1[0:p]
words2.extend(words1)
words.extend(words2)
return words
class TaggedDocument(namedtuple('TaggedDocument', 'words tags')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s,%s)' % (self.__class__.__name__, self.words, self.tags)
# return '%s(%s,%s, %s)' % (self.__class__.__name__, self.words, self.tags)
class LabeledSentence(TaggedDocument):
def __init__(self, *args, **kwargs):
warnings.warn('LabeledSentence has been replaced by TaggedDocument', DeprecationWarning)
class LabeledLineSentence(seq_slicing):
def __init__(self, filename,ids,w_s, w_type, n_w):
super(LabeledLineSentence, self).__init__(w_s=w_s, w_type=w_type, n_w=n_w)
self.filename = filename
#self.SeqDict = SeqIO.to_dict(SeqIO.parse(filename, "fasta")) # dictonary: keys as fasta ids
self.fastaDict = defaultdict()
self.ids = ids
def read_fasta_file_as_dict(self):
"""
It reads fasta file as a dictionary with
keys as sequence IDs (without '>')
and the value as a sequence (excluding "\n" if there is any)
"""
IdsL = []
# file_ = os.path.join(path,fileN)
# file = open(file_,'r')
lines_iter = iter(open(self.filename,'r'))
for line in lines_iter:
if line[0] == '>':
seq = ''
id = line.strip().replace('>','')
self.fastaDict[id] = ''
IdsL.append(id)
else:
self.fastaDict[id] += line.strip()
def __iter__(self):
self.read_fasta_file_as_dict()
i = 0
for key in self.ids:
cls_lbl = key.split('_')
key = key.split('\n')
key = key[0]
seq = self.fastaDict[key]
#seq = seq_record.seq
#seq = str(seq)
kmer_list = self.slices(seq) # word size
#tag = seq_record.id # tag is key
# yield TaggedDocument(cls_lbl[2], kmer_list,tags=[tag])
yield TaggedDocument(kmer_list,tags=[key])
# yield LabeledSentence(kmer_list, tags=['SEQ_%s' % i])
i = i+1
def sequence_ids(file_train_p):
IdsL = []
file_train = open(file_train_p,'r')
for line in file_train:
if line[0] == '>':
line = line.replace('>', '')
line = line.replace('\r\n','')
IdsL.append(line)
file_train.close()
return IdsL
class Seq2Vec(BuildVocab):
def __init__(self,sequences,path_no,file_no):
self.min_count = 0
self.sample = 0
self.max_vocab_size = 100000
self.docvecs = DocvecsArray()
self.hs = 1
self.vector_size = 100 # check this --its a vector size
self.layer1_size = self.vector_size
self.negative = 0
self.sorted_vocab = 0 # want to sort vocab on the basis of frequency ?
self.null_word = 0 # check this
self.dm = 1
self.dm_concat = 0
self.seed = 0
self.random = random.RandomState(self.seed)
self.hashfxn = hash
self.total_words = 0
self.build_vocab(sequences) # the function is defined in BuildVocab
self.path_no = path_no
self.file_no = file_no
"""
Saving initializations
"""
# path = r"E:\SuperVec_Codes_25072018\Data\50_classes\DifferentDimension"
x = self.file_no
index2word_py = open(os.path.join(output_path, 'index2word' + str(x) + '.pkl'),'w')
pkl.dump(self.index2word, index2word_py)
vocab_py = open(output_path + '\\vocab' + str(x) + '.pkl', 'w') # kmers, paths and codes
pkl.dump(self.vocab, vocab_py)
doctag_py = open(output_path + '\doctag' + str(x) + '.pkl',
'w') # doctag initialization
pkl.dump(self.docvecs.doctag_syn0, doctag_py)
kmer_py = open(output_path + '\kmer' + str(x) + '.pkl', 'w') # kmer initialization
pkl.dump(self.syn0, kmer_py)
vocab_py.close()
doctag_py.close()
kmer_py.close()
index2word_py.close()
def main(path_no,file_no):
word_size = 3 # size of each word
window_type = 'non_overlap' # can choose overlap or non overlap
n_w = '...?' # used when non overlaping window is selecte
filepath = os.path.join(input_path,file)
print(filepath)
IdsL = sequence_ids(filepath)
# print(IdsL)
seq_corpus = LabeledLineSentence(filepath, IdsL, w_s=word_size, w_type=window_type, n_w=n_w)
Seq2Vec(seq_corpus,path_no,file_no)
if __name__ == '__main__':
import sys
import os
path_no = 0
for i in range(0,1):
file_no = i
print("here")
main(path_no,file_no)
### Run vocab_text.py
""""
system arguments ---- path and file number
"""
| [
"n9581766@qut.edu.au"
] | n9581766@qut.edu.au |
3c76d049c02a264cc34aca167338b6b7b25db7aa | b8cc6f837fdc9b590d76b30e889e613db4509117 | /SimEunJu/leetcode/42.py | c09f3006e4139d94dda276aa8676302e10582b69 | [] | no_license | Inflearn-everyday/study | f7ca07d36a82caf5a315499dd594e2fabcadd911 | caac77c222c97d174d5d81cb16c28479c537db4a | refs/heads/main | 2023-08-31T10:23:42.934374 | 2021-10-07T14:52:02 | 2021-10-07T14:52:02 | 380,748,309 | 5 | 4 | null | 2021-07-31T09:16:39 | 2021-06-27T13:35:36 | Java | UTF-8 | Python | false | false | 646 | py | class Solution:
def trap(self, height: List[int]) -> int:
if not height:
return 0
front = 0
back = len(height) - 1
frontMax = height[front]
backMax = height[back]
sum = 0
while front < back:
frontMax = max(frontMax, height[front])
backMax = max(backMax, height[back])
if frontMax <= backMax:
sum += frontMax - height[front]
front += 1
else:
sum += backMax - height[back]
back -= 1
return sum
| [
"noreply@github.com"
] | Inflearn-everyday.noreply@github.com |
de1d2fae4fc15e47d1181eb3bccc9285487cee64 | 339e1ac92c2b2dd2152e9aeb9f0f70cd8536ed0c | /assignment1/q2_sigmoid.py | 03ae0fff8bcae3b97ccb8d6120561ac5e7a165a6 | [] | no_license | luyiming/cs224n-assignments-winter2017 | 527e02586d0afb7ba17987f47220b88f1260555e | 2a0308de9803dc79f23fa98c6eab608151e8a8d7 | refs/heads/master | 2021-08-23T13:19:24.011602 | 2017-12-05T01:58:56 | 2017-12-05T01:58:56 | 112,084,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | #!/usr/bin/env python
import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
Arguments:
x -- A scalar or numpy array.
Return:
s -- sigmoid(x)
"""
### YOUR CODE HERE
s = 1 / (1 + np.exp(-x))
### END YOUR CODE
return s
def sigmoid_grad(s):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input s should be the sigmoid
function value of your original input x.
Arguments:
s -- A scalar or numpy array.
Return:
ds -- Your computed gradient.
"""
### YOUR CODE HERE
ds = s * (1 - s)
### END YOUR CODE
return ds
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print("Running basic tests...")
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print(f)
f_ans = np.array([
[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])
assert np.allclose(f, f_ans, rtol=1e-05, atol=1e-06)
print(g)
g_ans = np.array([
[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])
assert np.allclose(g, g_ans, rtol=1e-05, atol=1e-06)
print("You should verify these results by hand!\n")
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print("Running your tests...")
### YOUR CODE HERE
# raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
test_sigmoid()
| [
"luyimingchn@gmail.com"
] | luyimingchn@gmail.com |
af940d62dab3c20c019aeea9e1c219b8aa86a20d | 3b6e31c511a6330be4150faf6f6fef8b8b19ac7c | /script.py | 0cfdf1b81ee2caf62f57a9fbc70d79e68551fd60 | [
"MIT"
] | permissive | manuelparra/webscraping-empresas-universia | 7c003667a0c7ca4127bb2837b3b60a0d8b14a493 | 35149d7684c9b35560cbfa8061602effe23f82d4 | refs/heads/master | 2020-08-12T03:32:56.530508 | 2019-10-12T16:51:46 | 2019-10-12T16:51:46 | 214,680,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,776 | py | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
@description: This program can retrieve and save data in SQLite database,
this data is alocate at https://guiaempresas.universia.es
@author: Manuel Parra
@license: MIT
@date: 08/10/2019
@modified: 08/10/2019
"""
from bs4 import BeautifulSoup
from modules.nettest import chargetest
import datetime as dt
import requests
import sys
import csv
import sqlite3
import re
import os
import time
def testingOurConnection():
print("Testing the Internet connection, please wait!")
host = ['8.8.8.8', '8.8.4.4']
nt = chargetest(host)
if not nt.isnetup():
print("Your Internet connection is down!!, please try later!")
sys.exit()
print('The Ineternet connection is OK!...')
def makeTables(cursor):
cursor.executescript('''
CREATE TABLE IF NOT EXISTS UrlsUniversia (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
pos INTEGER,
url_universia TEXT UNIQUE,
revised INTEGER,
row_date DATE,
row_time DATE
);
CREATE TABLE IF NOT EXISTS Bussiness (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
id_url INTEGER ,
name TEXT,
address TEXT,
location TEXT,
province TEXT,
phone1 TEXT,
phone2 TEXT,
cnae TEXT,
webpage TEXT,
row_date DATE,
row_time DATE
);
''')
def retrievehttp(link):
print('Retrieving data from:', link)
while True:
try:
res = requests.get(link)
res.raise_for_status()
except Exception as exc:
print("There was a problem: %s" % (exc))
print("Pausing for a bit...")
time.sleep(5)
print("Trying again, please wait...")
continue
if res.status_code != requests.codes.ok:
print("We has a problem retrieving the data, check your code and" \
"try again! Bye")
sys.exit()
return BeautifulSoup(res.text, 'html.parser')
def getInfo(cur, id_url, url):
while True:
soup = retrievehttp(url)
tagName = soup.select('.h1ficha.fn.localbusiness')
try:
name = re.findall('>([\s\S]*)</', str(tagName[0]))[0]
except:
print("We has a problem to retrieving data, weiting 30 minits " \
"to try again.!")
time.sleep(1800)
continue
tagDirecction = soup.select('#situation_calle')
direction = re.findall('>([\s\S]*)</', str(tagDirecction[0]))[0]
tagLocality = soup.select('#situation_loc')
locality = re.findall('>([\s\S]*)</', str(tagLocality[0]))[0]
tagProvince = soup.select('#situation_prov')
province = re.findall('>([\s\S]*)</', str(tagProvince[0]))[0]
tagTelephone = soup.select('table td[itemprop="telephone"]')
telephone1 = None
if tagTelephone != []:
telephone1 = re.findall('>([\s\S]*)</', str(tagTelephone[0]))[0]
tagsTh = soup.select_one('table').select('th')
tagsTd = soup.select_one('table').select('td')
telephone2 = None
cnae = None
for i, th in enumerate(tagsTh):
if th.getText().strip() == 'Otros Teléfonos:':
telephone2 = tagsTd[i].getText()
elif th.getText().strip() == 'CNAE:':
cnae = tagsTd[i].getText()
if telephone2 != None:
if len(telephone2) > 18:
telephone2 = telephone2[0:9] + '/' + telephone2[9:18] + '/' \
+ telephone2[18:27]
elif len(telephone2) > 9:
telephone2 = telephone2[0:9] + '/' + telephone2[9:18]
web = None
tagWeb = soup.select('#texto_ficha > p > a')
if tagWeb != []:
web = tagWeb[0].get('href')
cur.execute("""
INSERT OR IGNORE INTO Bussiness (id_url, name, address, location,
province, phone1, phone2, cnae, webpage, row_date, row_time)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (id_url, name, direction, locality, province, telephone1, telephone2,
cnae, web, dt.date.today(), dt.datetime.now().strftime("%H:%M:%S")))
cur.execute("""
UPDATE UrlsUniversia SET revised = 1
WHERE id = ?
""", (id_url, ))
return cur
def exportDataCSV(loc):
# call function main
conn, cur, url, localidad = mainProcess(loc, False)
cur.execute("""
SELECT Bussiness.name, Bussiness.address, Bussiness.location,
Bussiness.province, Bussiness.phone1, Bussiness.phone2, Bussiness.cnae,
Bussiness.webpage, Bussiness.row_date, UrlsUniversia.url_universia
FROM Bussiness JOIN UrlsUniversia ON Bussiness.id_url = UrlsUniversia.id
WHERE Bussiness.location = ?
ORDER BY UrlsUniversia.pos ASC, Bussiness.location
""", (loc, ))
rows = cur.fetchall()
headList = ['Nombre', 'Dirección', 'Localidad', 'Provincia', 'Telefono 1',
'Telefono 2', 'CNAE', 'Pagina Web', 'Fecha data', 'Fuente']
dataBussiness = list()
dataBussiness.append(headList)
for row in rows:
nam, add, loc, pro, tel1, tel2, cna, web, fec, hor = row
dataBussiness.append([nam, add, loc, pro, tel1, tel2, cna, web, fec, hor])
fname = 'file/bussiness.csv'
print("Generando archivo csv con los datos en", fname)
with open(fname, 'w') as file:
write = csv.writer(file)
write.writerows(dataBussiness)
conn.close()
print("Se exportaron", len(dataBussiness)-1, "registros." )
print("Proceso finalizado!")
input("Presione Enter para continuar...")
def mainProcess(loc, testInternetConn):
# test our Internet connection
if testInternetConn:
testingOurConnection()
# make the conn object
conn = sqlite3.connect('bussines.sqlite')
# connection to database fro storage data in it
cur = conn.cursor()
# if not exists make tables
makeTables(cur)
url = 'https://guiaempresas.universia.es'
localidad = '/localidad/' + loc + '/'
return conn, cur, url, localidad
def downloadLinks(loc):
# call function main
conn, cur, url, localidad = mainProcess(loc, True)
dir = url + localidad
# retrieving data from link
soup = retrievehttp(dir)
paginations = soup.select('div.pagination-centered a[href*="Pagina"]')
if len(paginations) > 0:
paginas = int(paginations[len(paginations)-2].getText())
else:
paginas = 1
# Reading links to bussiness
# List for store bussiness
count = 1
dataList = []
for pag in range(1, (paginas+1), 1):
links = soup.select('table.ranking_einf a')
for link in links:
dataList.append((count, url + link.get('href'), 0, dt.date.today(),
dt.datetime.now().strftime("%H:%M:%S")))
count += 1
cur.executemany('''
INSERT OR IGNORE INTO UrlsUniversia (pos, url_universia,
revised, row_date, row_time)
VALUES (?, ?, ?, ?, ?)
''', dataList)
dataList = []
if pag <= (paginas-1):
dir = url + localidad + '?qPagina=' + str((pag + 1))
soup = retrievehttp(dir)
conn.commit()
conn.close()
print("Saving", (count-1), "company records at", localidad)
print("Proceso finalizado!")
input("Presione Enter para continuar...")
def downloadBussines():
# call function main
conn, cur, url, localidad = mainProcess('', True)
cur.execute('''
SELECT id, url_universia
FROM UrlsUniversia
WHERE revised = 0
ORDER BY id
''')
rows = cur.fetchall()
if len(rows) < 1:
print("No hay registros de empresas pendientes por descargar, " \
"ejecute la opcion 1.")
conn.close()
return None
print("Se inicia el proceso de dascarga de datos de", len(rows), \
"registros de empresas empresas pendientes por descargar en base de datos")
for row in rows:
cur = getInfo(cur, row[0], row[1])
conn.commit()
print("Se registraron", len(rows), "empresas en la base de datos!")
input("Presione Enter para continuar...")
def menu():
while True:
if sys.platform.lower()[:3] == 'lin':
os.system('clear') # NOTA para windows tienes que cambiar clear por cls
else:
os.system('cls')
print("Bienvenidos al sistema para descarga de datos de Universia!")
print("--------------------------------" * 3, "\n")
print("Menu de opciones:")
print("1) Obtener los link de empresas por localidad")
print("2) Obtener los datos de las empresas pendiente por descargar")
print("3) Generar reporte de empresas por localidad")
print("0) para salir del sistema\n")
opcion = input("Por favor, presione 1, 2 o 3 segun la opción que desea," \
"precione s para salir del sistema: ")
if opcion == '0':
print('Bye!')
break
elif opcion == '1':
loc = input('Escriba la localidad: ')
downloadLinks(loc)
elif opcion == '2':
downloadBussines()
elif opcion == '3':
loc = input('Escriba la localidad: ')
print("Generando archivo CSV con los registros en base de datos.")
exportDataCSV(loc)
else:
print("Opción invalida. Por favor, seleccione una opción valida.")
input("Presione la tecla Enter para continuar...")
if __name__ == '__main__':
menu()
| [
"manuelparra@live.com.ar"
] | manuelparra@live.com.ar |
b2d051a597b6cfb783aa64205d665a477b8ac166 | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /devel/turtlebot3-melodic-devel/turtlebot3_example/cmake/turtlebot3_example-genmsg-context.py | 09017c533fb2e6caa64d1dea49cebe24fae06a11 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Action.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionGoal.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionResult.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionFeedback.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Goal.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Result.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Feedback.msg"
services_str = ""
pkg_name = "turtlebot3_example"
dependencies_str = "std_msgs;geometry_msgs;actionlib_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "turtlebot3_example;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"bitwanghaili@gmail.com"
] | bitwanghaili@gmail.com |
a970e12a09b18bc2663033ddb2515f2215cd38bc | 84c26c42db76bc766b939b3a1cdb9707d51d503d | /Face Detection app/web.py | 1771266771b947d3fa1efc5a98223f82365287fa | [] | no_license | promudith/FaceDetectionwith_Python | faf22334b35c6e523393bb3e84cac7640cbe38d4 | fce7b983c34e959e63e9e886ed001d7a8a67d39e | refs/heads/main | 2023-07-22T15:57:04.518484 | 2021-08-17T19:21:42 | 2021-08-17T19:21:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py |
# import the opencv library
import cv2
# define a video capture object
vid = cv2.VideoCapture(0)
while(True):
# Capture the video frame
# by frame
ret, frame = vid.read()
# Display the resulting frame
cv2.imshow('frame', frame)
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
key = cv2.destroyAllWindows()
| [
"noreply@github.com"
] | promudith.noreply@github.com |
646f0b2eda9d9c628c8f94ed3cd089471b9cfd1f | 57099dcd45d8491597ac37e19e28af93d0a9b8d8 | /test_task/subdivisions/apps.py | e46ae660805c5e4a862f21088fd34f5ec143132f | [] | no_license | newilgreen/new_tast | fbc5ade1fc1d94ce91a382d6d8eb95719897412e | af40cf939cff3832435d76ef98fb76040a2fd7e9 | refs/heads/main | 2023-03-10T19:25:15.061702 | 2021-02-20T15:02:03 | 2021-02-20T15:02:03 | 340,678,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class SubdivisionsConfig(AppConfig):
name = 'subdivisions'
| [
"newilgreen@gmail.com"
] | newilgreen@gmail.com |
a6b4ac8a8d7738641cfc86d501e5c17708178113 | 6d10b16ea7c86b11290e91bd591cf8c0feea9e99 | /Dash/PROJ1/layouts.py | 6a1d44537c1739b96c3fd3e9135410442f282d07 | [] | no_license | ganeshkp/python | 80fcf613aec17b63c74b38642af2d1fa1cd3f9c2 | 5ef0803f9f77060efd472a19b79ce9cacc8d8178 | refs/heads/master | 2023-08-04T16:26:52.213129 | 2023-04-23T19:30:06 | 2023-04-23T19:30:06 | 226,738,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import dash_core_components as dcc
import dash_html_components as html
indexPage = html.Div([
html.H2("Select project to go Dashboard",
className="index_text",
# style={"textAlign":"center",
# "color":"green"}
),
dcc.Link("LCP",href="/lcp",
style={"textAlign": "center",
"color": "blue",
"fontSize": 30}
),
html.Br(),
dcc.Link("Kepco", href="/kepco",
style={"textAlign": "center",
"color": "blue",
"fontSize": 30}
)
])
layout1 = html.Div([
html.H3('LCP'),
dcc.Dropdown(
id='app-1-dropdown',
options=[
{'label': 'App 1 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='app-1-display-value'),
dcc.Link('Go to Main Page', href='/')
])
layout2 = html.Div([
html.H3('Kepco'),
dcc.Dropdown(
id='app-2-dropdown',
options=[
{'label': 'App 2 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='app-2-display-value'),
dcc.Link('Go to Main Page', href='/')
]) | [
"ganeshkumar.patil@gmail.com"
] | ganeshkumar.patil@gmail.com |
9732d7b5f9e7962a3d3464b9214263c3f7b72d33 | 28e55f8024a44bddbe0e2a394bc77801120591eb | /week0/count_lines_words_chars/solution.py | 91fe6fae123d42b27882a83f9b7f83f53bf10c04 | [] | no_license | snejy/Programming101 | 3e4bee0dabc754aa90e7ad0162bc972ba44e73e6 | ab6e0ace6d3baee7b809c6ae8d0e8122538d5d52 | refs/heads/master | 2020-03-26T02:21:49.785706 | 2014-09-16T11:51:34 | 2014-09-16T11:51:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | import sys
def count_chars(filename):
file = open(filename,'r')
contents = file.read()
file.close()
return len(contents)
def count_lines(filename):
file = open(filename,'r')
contents = file.read().split("\n")
file.close()
return len(contents)-1
def count_words(filename):
file = open(filename, 'r')
contents = file.read().split(" ")
file.close()
return len(contents)
def main():
if len(sys.argv)>2:
filename = sys.argv[2]
if sys.argv[1]=='chars':
print(count_chars(filename))
elif sys.argv[1]=='lines':
print(count_lines(filename))
elif sys.argv[1]=='words':
print(count_words(filename))
else:
print("Not enough arguments given")
if __name__ == '__main__':
main() | [
"snspasova@gmail.com"
] | snspasova@gmail.com |
874fe9a835b84741e6735b62eaaf9b1803cdb2ae | 76a3ac13e7316d5be29dc84e9584e3bf69aac26e | /add_to_library.py | 927307e261f2ff3d72706080e38bdff4a7849006 | [
"MIT"
] | permissive | scrubskip/add_to_yt_library | 78581d8f5cdb680f414c0a5ef0b164498367cd84 | 801202d4fc9929b053f945339ae6d31949cc4231 | refs/heads/main | 2023-07-17T01:57:51.568205 | 2021-08-28T03:54:59 | 2021-08-28T03:54:59 | 400,392,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | from ytmusicapi import YTMusic
import argparse
import csv
import re
SOUNDTRACK_ARTISTS = {'various artists'}
SOUNDTRACK_ALBUMS_REGEX = re.compile(r"original.*[soundtrack|recording]")
def main():
yt = YTMusic('headers_auth.json')
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str)
parser.add_argument("playlist", type=str)
parser.add_argument('--dry-run', dest='dry_run', action='store_true')
parser.add_argument('--no-dry-run', dest='dry_run', action='store_false')
args = parser.parse_args()
print(args.input)
if (args.dry_run):
playlistId = "test"
else:
playlistId = yt.create_playlist("Import 3", "imported song")
with open(args.input) as albums:
album_reader = csv.reader(albums, delimiter='\t')
errors = []
success = []
for album in album_reader:
print(album)
if search_and_add(yt, album, playlistId, args.dry_run):
success.append(album)
else:
errors.append(album)
print("Success: ", len(success))
print("Errors:")
print(*errors)
def search_and_add(yt, album, playlistId = None, dryRun = False):
results = yt.search(album[0] + " " + album[1], filter = "albums")
added = False
if results is not None:
for result in results:
## break if this is really similar otherwise move to the next one.
if (is_album_match(album[0], album[1], result)):
print(result['title'], " id: ", result['browseId'])
trackIds = add_album(yt, result['browseId'], dryRun)
if (not dryRun):
response = yt.add_playlist_items(playlistId, trackIds)
else:
print("would have added ", len(trackIds), " to ", playlistId)
added = True
break
return added
def is_album_match(artist, album, result):
artist = artist.lower()
album = album.lower()
result_title = str(result['title']).lower()
if is_soundtrack(artist, album):
## be more stringent: make sure the unique string is the first
unique_str = album if artist in SOUNDTRACK_ARTISTS else artist
return result_title.find(unique_str) == 0
else:
return result_title.find(album) != -1 or result_title.find(artist) != -1
def is_soundtrack(artist, album):
artist = artist.lower()
album = album.lower()
return artist in SOUNDTRACK_ARTISTS or SOUNDTRACK_ALBUMS_REGEX.match(album) is not None
def add_album(yt, albumId, dryRun = False):
album = yt.get_album(albumId)
trackIds = []
if (album is not None):
## loop through the tracks, add to library
for track in album['tracks']:
if not dryRun:
if 'feedbackTokens' in track and track['feedbackTokens']['add']:
response = yt.edit_song_library_status(track['feedbackTokens']['add'])
if (not response['feedbackResponses'][0]['isProcessed']):
print("Error adding ", track['title'], " from ", album['title'])
else:
if 'feedbackToken' in track:
print("already have ", track['title'])
elif 'feedbackTokens' in track:
print("would have added ", track['title'])
trackIds.append(track['videoId'])
return trackIds
if __name__ == '__main__':
main()
| [
"kip@kipster.org"
] | kip@kipster.org |
a35142b12aa3e9983848d2f2ff48f5e607b0e649 | 9ce10e9db9033b2bb2819418786da53dd15cdeab | /apps/cms/models.py | 54c92f043400c02a991328e93e209e930a8e4794 | [] | no_license | P79N6A/rmbbsDemo | 5db9085ee7726a10ad4fc0dd4d90e98491a8095f | 4c05d45ae8f598f6c64c19671e4bcbef828f3946 | refs/heads/master | 2020-05-25T13:39:18.692229 | 2019-05-21T11:43:09 | 2019-05-21T11:43:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | from exts import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash # flask提供的加密生成与解析密码
# CMS用户模型
class CMSUser(db.Model):
__tablename__ = 'cms_user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(50), nullable=False, unique=True)
_password = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(50), nullable=False, unique=True)
join_time = db.Column(db.DateTime, default=datetime.now)
def __init__(self, username, password, email):
self.username = username
self.password = password # self.password 等于调用self.password(password)
self.email = email
# 密码:对外的字段名是password
# 密码:对内的字段名是_password
@property
def password(self):
return self._password
@password.setter
def password(self, raw_password):
self._password = generate_password_hash(raw_password)
def check_password(self, raw_password):
return check_password_hash(self._password, raw_password)
@property
def permissions(self):
if not self.roles:
return 0
all_permissions = 0
for role in self.roles:
permissions = role.permissions
all_permissions |= permissions
return all_permissions
def has_permission(self, permission):
return permission == permission&self.permissions
def is_developer(self):
return self.has_permission(CMSPermission.ALL_PERMISSION)
# CMS用户权限类型
class CMSPermission(object):
ALL_PERMISSION = 0b11111111 # 所有权限
VISITOR = 0b00000001 # 访问者权限
POSTER = 0b00000010 # 管理帖子权限
COMMENTER = 0b00000100 # 管理评论权限
BOARDER = 0b00001000 # 管理板块权限
FRONTUSER = 0b00010000 # 管理前台用户权限
CMSUSER = 0b00100000 # 管理后台用户权限
# CMS用户与CMS角色关联表
cms_role_user = db.Table(
'cms_role_user',
db.Column('cms_role_id', db.Integer, db.ForeignKey('cms_role.id'), primary_key=True),
db.Column('cms_user_id', db.Integer, db.ForeignKey('cms_user.id'), primary_key=True)
)
# CMS角色模型
class CMSRole(db.Model):
__tablename__ = 'cms_role'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
desc = db.Column(db.String(200), nullable=False)
create_time = db.Column(db.DateTime, default=datetime.now)
permissions = db.Column(db.Integer, default=CMSPermission.VISITOR)
users = db.relationship('CMSUser', secondary=cms_role_user, backref='roles')
| [
"sancklan@163.com"
] | sancklan@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.