blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ef3cbcc77cab71f7a5eebd3a3f56af1d7ecfc36 | f3d0a99937bb29330bef5faf49ee691bda65e4f0 | /transaction/forms.py | 908b58c18630ebb746115ff6dc2e69940aa7a952 | [] | no_license | tandrone1/COMP307_FinalProject | 79c0e5eeecbacd08093ed2e85fd26c1cc2ed7dfe | a2e963468baeaae075c6fa223fde7e052b853167 | refs/heads/master | 2022-04-21T13:42:47.346554 | 2020-04-23T00:39:42 | 2020-04-23T00:39:42 | 249,532,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.forms import ModelForm
from django import forms
class checkoutForm(forms.Form):
buy = forms.CharField(label='buy', max_length=100) | [
"robertmseliga@gmail.com"
] | robertmseliga@gmail.com |
f3c9121ab59ec8991c1528f6a78a0e8f6c47001f | de7e78887293a68ef4b307a92adb951f1665e1e4 | /tests/test_mtnlion.py | 1873a3571c1e248dec26f0fd5bf473b6b062d9db | [
"MIT"
] | permissive | macklenc/mtnlion | 6716a2ceb60ac1e4c06c1606350cd0fec75fd3bf | ba2e93faeed3004d344a8c14f37a409da572271d | refs/heads/devel | 2021-11-24T17:15:00.941173 | 2019-04-05T03:26:18 | 2019-04-05T03:26:18 | 122,787,824 | 0 | 1 | MIT | 2021-06-09T17:40:08 | 2018-02-24T23:05:01 | Python | UTF-8 | Python | false | false | 1,004 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mtnlion` package."""
from click.testing import CliRunner
from mtnlion import cli
import pytest
# from mtnlion import mtnlion
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert "mtnlion.cli.main" in result.output
help_result = runner.invoke(cli.main, ["--help"])
assert help_result.exit_code == 0
assert "--help Show this message and exit." in help_result.output
| [
"cmacklen@uccs.edu"
] | cmacklen@uccs.edu |
547eb57dce3a613aced8d9b8e89e6d3265f33a59 | 283f976923d6fd01389a32131926b7fa7141e756 | /Chapter 5 exercises.py | bfb38fd255a708f58ab5499b6e341e347ebb7a0c | [] | no_license | peterscj/Beginner_Programming_Exercises | e3bd106c3a2c589e1f7e56cce23404944866ccf2 | ff61438b823855c4d5f2f092b9fb755efbd9e0de | refs/heads/master | 2021-08-30T16:20:06.593652 | 2017-12-18T16:11:36 | 2017-12-18T16:11:36 | 112,038,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,077 | py | '''
Chapter 5 Exercises
Exercise 1
Asdiscussed in the cahpter, string formatting could be used to simplify the
dateconvert2.py program. Go back and redo this program making use
of the string-formatting method.
Original program:
#--------------------------------------------------------------------------
def dateconvert2():
# get the day, month, and year
day, month, year = eval(input("Enter day, month, year >>> "))
date1 = str(month)+"/"+str(day)+"/"+str(year)
#months = ['January', 'February', 'March', 'April', 'May', 'June', 'July','August', 'September', 'October', 'November', 'December']
months = ['January','February','March','April','May',
'June', 'July', 'August', 'September', 'October', 'November', 'December']
monthStr = months[month-1]
date2 = monthStr + " " + str(day) + ", " + str(year)
print(date1)
print(date2)
print("First date: {0}/{1}/{2}".format(day, month, year))
print("Second date: {0} {1}, {2}".format(monthStr, day, year))
#--------------------------------------------------------------------------
'''
def date():
# get the day, month, and year
day, month, year = eval(input("Enter day, month, year >>> "))
months = ['January','February','March','April','May','June', 'July', 'August', 'September',
'October', 'November', 'December']
monthStr = months[month-1]
print("First date: {0}/{1}/{2}".format(month, day, year))
print("Second date: {0} {1}, {2}".format(monthStr, day, year))
'''
Exercise 2
A certain CS professor gives 5-point quizzes that are graded on the
scale 5-A, 4-B, 3-C, 2-D, 1-F. Write a program that accepts a quiz score
as an input and prints out the corresponding grade.
'''
def qs():
score = int(input("Enter quiz score >>> "))
# Store all scores in list in ascending order
possible_scores = ['1-F', '2-D', '3-C', '4-B', '5-A']
grade = possible_scores[score-1][2] # Use double index to identify grade
print("Quiz grade: ", grade)
'''
Exercise 3
A certain CS professor gives 100-point exams that are graded on the
scale 90-100:A, 80-89:B, 70-79:C, 60-69:D, <60:F. Write a program that
accepts an exam score as input and prints out the corresponding grade
'''
def quiz():
score = int(input("Enter quiz score >>> "))
# 11 copies of 'A' to include the score 100
# 'F' included as last item in string to accomodate score of 0
# This was the only non-decision statement method of
# attacking this problem that I could think of
string = 'F'*59 + 'D'*10 + 'C'*10 + 'B'*10 + 'A'*11 + 'F'
grade = string[score-1]
print(grade)
'''
Exercise 4
An acronym is a word formed by taking the first letters of the words in
phrase and making a word from the. For example, RAM is an acronym for
"random access memory." Write a program that allows the user to type in a
phrase and then outputs the acronym for that phrase. Note: the acronym
should be all uppercase, even if the words in the phrase are not
capitalized.
'''
def e4():
word = input("Enter phrase >>> ")
wordStr = word.split() # Split phrase into separate strings
acronym = ''
# Loops through words in the phrase, adds the first letter to
# each string to the acronym variable, and then capitalizes it
for i in wordStr:
acronym = acronym + i[0].capitalize()
print(acronym)
'''
Exercise 5
Numerologists claim to be able to determine a person's character traits
based on the "numeric value" of a name. The value of a name is
determined by summing up the values of the letters of the name where "a" is
1, "b" is 2, "c" is 3, etc., up to "z" being 26. For example, the name
"Zelle" would have the value 26 + 5 + 12 + 12 + 5 = 60 (which happens
to be a a very auspicious number, by the way).
Write a program that calculates the numeric values of all the names.
'''
def name_calc():
# Set entire alphabet as string
alphabet = 'abcdefghijklmnopqrstuvwxyz'
# Capitalize all characters because I didn't want
# to type the alphabet out again lol
alphabet = alphabet.upper()
enter_name = input('Enter name >>> ')
new_variable = 'test'
# This is the answer to exercise 6. Just had to employ an additional
# strong method to remove any spaces between names
name = enter_name.replace(' ', '')
name = name.upper()
# Standard accumulator pattern
name_value = 0
for i in name:
# Add to name_value the index position of each letter in the name
# Also, add 1 each time as well, since a is equal to index 0, and the
# exercise requires that a be equal to 1
name_value = name_value + alphabet.index(i)+1
return name_value
'''
Exercise 6
Expand your solution to the previous problem to allow the calculation
of a complete name shc as "John Marvin Zelle" or "John Jacob Jingleheimer
Smith". The total value is just the sub of the numeric values of all the names.
'''
# See comments in exercise 5 for the answer
'''
Exercise 7
A Caesar cipher is a simple substitution cipher based on the idea of shifting
each letter of the plaintext message a fixed number (called the key) of positions
in the alphabet. For example, if the key value is 2, the word "Sourpuss" would be
encoded as "Uqwtrwuu". The original message can be recovered by " reenconding" it
using the negative of the key.
Write a program that can encode and decode Caesar ciphers. The input to
the program will be a string of plaintext and the value of the key. The out-
put will be an encoded message where each character in the original message
is replaced by shifting it KEY characters in the Unicode character set.
For example, if ch is a character in the string and key is the amount to shift,
then the character that replaces ch can be calculated as: chr(ord(ch)) + key.
'''
# I'm going to skip writing out problem 8, because it addresses the obvious
# issue with the problem listed above, in that what happens when the shift
# moves past 'z'. The below output indicates how to constantly iterate over
# the alphabet string loop, so that if the shift is 1 and the letter is 'z',
# then the encoded version should be 'a'.
def ceasar_cipher():
word = input('Enter word >>> ' )
key = int(input('Enter key >>> ' ))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
code_word = ''
for i in word:
# Conditional logic maintains integrity of spaces
if i != ' ':
# Using remainder division to have loop move over
# entire string
shift = alphabet[(alphabet.index(i) + key) % 26]
code_word = code_word + shift
else:
code_word = code_word + ' '
return code_word
'''
Exercise 9
Write a program that couts the number of words in a sentence entered
by the user.
'''
def word_count():
sentence = input('Enter sentence >>> ')
# Split method divides string into list of string based on where
# spaces are, and then counts length of list.
return len(sentence.split())
'''
Exercise 10
Write a program that calculates the average word length in a sentence
entered by the user.
'''
def avg_word_length():
sentence = input('Enter sentence >>> ')
# See logic in exercise 9
word_list = sentence.split()
# Standard accumulator pattern
total = 0
for i in word_list:
total = total + len(i)
return total / len(word_list)
'''
Exercise 11
Write an improved version of the Chaos program from Chapter 1 that allows
a user to input two initial values and the number of iterations and
then prints a nicely formatted table showing how the values change over
time. For example, if the starting values were .25 and .26 with 10
iterations, the table might look like this (didn't feel like typing it
out lol)
'''
# chaos.py
##def main():
##
## print("This program illustrates a cahotic function")
## x = eval(input("Enter a number between 0 and 1: "))
## for i in range(10):
## x = 3.9 * x * (1 - x)
## print(x)
##
##main()
def chaos():
# print("This program illustrates a cahotic function")
x = eval(input("Enter a number between 0 and 100: ")) / 100
y = eval(input("Enter a second number between 0 and 100: ")) / 100
k = int(input("Enter number of iterations >>> "))
print('\nIndex', ' ', x, ' ', y)
print('--------------------------------------\n')
for i in range(k):
x = 3.9 * x * (1 - x)
y = 3.9 * y * (1 - y)
print("{0:2}".format(i+1), ' ', "{0:6f}".format(x),
' ', "{0:6f}".format(y))
'''
Exercise 12
Write an improved version of the future value program from Chapter 2.
Your program will prompt the user for the amount of the investment, the
annualized interest rate, and the number of years of the investment. The
program will then output a nicely formatted table that tracks the value of
the investment year by year.
'''
##def futval():
##
## print("This program calculates the future value")
## print("of a ten year investment.")
##
## principal = eval(input("Enter the initial principal: "))
## apr = eval(input("Enter the annual interest rate: "))
##
## for i in range(10):
## principal = principal * (1 + apr)
##
## print("The value of the investment in 10 years is: ", principal)
def futval():
print("This program calculates the future value")
print("of a ten year investment.")
principal = eval(input("Enter the initial principal: "))
apr = eval(input("Enter the annual interest rate: "))
# The "^" symbol centers the string in the specified width
print("\n{0:^}".format('Year'), "{0:^20}".format('Principal'))
print('------------------------------------------\n')
for i in range(10):
principal = principal * (1 + apr)
print("{0:2}".format(i), "{0:^23.2f}".format(principal))
'''
Exercise 13
Redo any of the previous programming problems to make them batch-oriented
(using text files for input and output)
'''
def ceasar_cipher_batch():
# Gather inputs for the batch file process
#input_file = input('Input file path >>> ' )
#output_file = input('Output file path >>> ')
input_file = 'C:/Users/Cole/Documents/Read files/the number of the beast.txt'
output_file = 'C:/Users/Cole/Documents/Read files/Write here.txt'
key = int(input('Enter key >>> ' )) # This is serious encryption
# Open input files
infile = open(input_file, 'r')
outfile = open(output_file, 'w')
alphabet = "abcdefghijklmnopqrstuvwxyz"
code_word = '' # Set empty string for encrypted message
for line in infile: # First loop processes each line in the file
for char in line: # Process each character in each line
if char != ' ' and char != '\n': # Ignore new line characters and spaces
# See original program for explanation of encryption logic
shift = alphabet[(alphabet.index(char) + key) % 26]
code_word = code_word + shift
elif char == ' ': # Maintain integrity of words by keeping spaces
# in the same locations
code_word = code_word + ' '
print(code_word, file=outfile) # Print the competed string to the write file
infile.close() # Close both files for bookkeeping purposes
outfile.close()
print('Process complete, please see: ', output_file)
'''
Exercise 14
Word count. A common utility on Unix/Linux systems is a small program
called "wc." This program analyzes a file to determine the number of
lines, words, and characters contained therein. Write your own version of
wc. The program should accept a file name as input and then print three
numbers howing the count of lines, words, and characters in the file.
'''
def wc():
# Can be modified to accept any filepath
input_file = 'C:/Users/Cole/Documents/Read files/the number of the beast.txt'
#input_file = input('Input file path >>> ' )
# Open file
f = open(input_file, 'r')
# Returns each line of the file as item in a list
contents = f.readlines()
# Line count
line_count = len(contents)
# Character Count
# Loop through each line of the file to remove newline characters
# Uses standard accumulator pattern
newline_removed = ''
for i in contents:
newline_removed = newline_removed + i.rstrip('\n')
character_count = len(list(newline_removed))
# Word count
# Split method creates list of words in a string
word_count = len(newline_removed.split())
# Close file
f.close()
# Print output for user
print('\n')
print('LINE COUNT: ', line_count, '\n')
print('CHARACTER COUNT (with spaces): ', character_count, '\n')
print('WORD COUNT: ', word_count, '\n')
'''
Exercise 15
Write a program to plot a horizontal bar chart of student exam scores.
Your program should get input from a file. The first line of the file contains
the count of the number of students in the file, and each subsequent line
contains a student's last name followed by a score in the range 0 to 100.
Your program should draw a horizontal rectangle for each student where
the lenghth of the bar represents the student's score. The bars should all
determine the size of the window and its coordinates. Bonus: label the
bars at the left end with the student name.
'''
from graphics import *
def e():
# Graphics library
# Get input from file, first line contains number of students in file,
# all lines after contains the student's last name followed by a score
# in range 0 - 100
# -----------
# File format must be as specified below:
# <header>
# <blank line>
# Student-<name>-<score>
# Student-<name>-<score>
# <1etc>
f = 'C:/Users/Cole/Documents/Read files/Exam scores.txt'
file = open(f, 'r')
# First line of file contains count of students, loop through it to identify
# digits and append to new string. Then, convert string to int for math.
# readline() operation reads next line of the file, so I saved this to
# a new string for analysis.
student_count_line = file.readline()
count_str = ''
for i in student_count_line:
if i.isdigit():
count_str = count_str + i
# Final saved count value
count = int(count_str)
# get student scores
# Step 1: Identify where student scores begin
line = file.readline()
while line != '\n':
line = file.readline()
# Variable line is now equal to the first line with a student's score in
# it.
# Step 2: Assign each student's last name
# exam_scores will be a list of lists containing the student's name
# and score
exam_scores = []
for line in file:
# The name and score for each student will be appended to the
# below list
name_and_score = []
# Remove newline character
line = line.rstrip('\n')
# Student names and scores are located between hyphen characters,
# so string slicing was used to isolate the necessary indexes of
# the string
name = line[(line.find('-')+1):line.rfind('-')]
score = line[line.rfind('-')+1:]
# Append both the name and score to the inner list
name_and_score.append(name)
name_and_score.append(score)
# Append the inner list to the list of list
exam_scores.append(name_and_score)
# Create window to diplay grade output
win = GraphWin("Exam Scores", count*120, count*120)
win.setCoords(-25, -25, 110, 110)
win.setBackground("white")
# Draw student names and bars
x = -13
y = -3
for i in exam_scores:
# Draw each student's name (first index in the list of list)
Text(Point(x,y), i[0]).draw(win)
# Draw a rectange corresponding to student's grade (second
# index in the list of list)
Rectangle(Point(x+15,y-3), Point(i[1], y+4)).draw(win)
Text(Point(int(i[1])+3, y), i[1]).draw(win)
y = y + (120/count)
# Draw axis
L1 = Point(2, -8)
L2 = Point(2,102)
L3 = Point(2, -8)
L4 = Point(102, -8)
Line(L1, L2).draw(win)
Line(L3, L4).draw(win)
print(exam_scores)
# Draw labels for the number of students
| [
"noreply@github.com"
] | peterscj.noreply@github.com |
8af8d59d58e58c46468bd6d99cfbfdecee3ecb7b | 30e3e64ccaf28a8c9c9d3667a36a0fb479f03884 | /django_2.2/bin/django-admin.py | 96f75f4c3001999624ba6bb53ecbccbe05a056f9 | [] | no_license | TrellixVulnTeam/Django_Framework_2_GN4L | b5c4c8548bbc455b537aeade328c2f849b17c920 | aaf022cb1a3ce6f60ef1228f3f5e5c4eb1842091 | refs/heads/master | 2023-06-17T04:13:45.923063 | 2021-07-16T07:57:50 | 2021-07-16T07:57:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | #!/Users/olegmaslov/work/python/geekbrains/src_lesson_1/step_1(adapt_to_django_2.0)/geekshop/django_2.2/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"danil129766@gmail.com"
] | danil129766@gmail.com |
d55e12de5a967d2a7526e9c3e807a20b32f9f98b | 0a26a6aaf03723a1dee334668b4da3e844d846d8 | /pyfu/util/columnize.py | dc79719edf035d13e7175c1aaf5a3784c6b2b3fd | [
"MIT"
] | permissive | danbarrese/pyfu | e855f227ebbf9e703093e38ac8711e2d6fb3564e | 4e3e4427685cb922ff4d5b32be55203397915e33 | refs/heads/master | 2022-12-14T07:13:58.612057 | 2019-11-12T16:46:57 | 2019-11-12T16:46:57 | 101,839,744 | 0 | 0 | MIT | 2021-06-01T22:08:26 | 2017-08-30T05:02:15 | Python | UTF-8 | Python | false | false | 156 | py | import columnize
__author__ = 'Dan Barrese'
__pythonver__ = '3.6'
def asdf(lines):
return columnize.columnize(lines)
print(asdf("abc a\nx xyz"))
| [
"danielbarrese@gmail.com"
] | danielbarrese@gmail.com |
b89c30de7c0c80fe25787e1e2ad58423f5dfce4a | cc600057b839853c40a644054fd66a777eb23226 | /vrcar/carcontrol/motorTest.py | 91e9f3f67d055f71cd10159fcaf0e2be5d613b20 | [] | no_license | GuanyiLi-Craig/pi | 7263e9a03b64221a9214e30142e2a3374e3539ea | c110f2de1b3b2e0ac6c2a73c602a960b55636634 | refs/heads/master | 2021-06-23T15:21:15.995767 | 2017-08-20T23:24:58 | 2017-08-20T23:24:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | # Picon Zero Motor Test
# Moves: Forward, Reverse, turn Right, turn Left, Stop - then repeat
# Press Ctrl-C to stop
#
# To check wiring is correct ensure the order of movement as above is correct
import piconzero as pz, time
#======================================================================
# Reading single character by forcing stdin to raw mode
import sys
import tty
import termios
def readchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if ch == '0x03':
raise KeyboardInterrupt
return ch
def readkey(getchar_fn=None):
getchar = getchar_fn or readchar
c1 = getchar()
if ord(c1) != 0x1b:
return c1
c2 = getchar()
if ord(c2) != 0x5b:
return c1
c3 = getchar()
return chr(0x10 + ord(c3) - 65) # 16=Up, 17=Down, 18=Right, 19=Left arrows
# End of single character reading
#======================================================================
speed = 100
print "Tests the motors by using the arrow keys to control"
print "Use , or < to slow down"
print "Use . or > to speed up"
print "Speed changes take effect when the next arrow key is pressed"
print "Press Ctrl-C to end"
print
pz.init()
# main loop
try:
while True:
keyp = readkey()
if keyp == 'w' or ord(keyp) == 16:
pz.forward(speed)
print 'Forward', speed
elif keyp == 'z' or ord(keyp) == 17:
pz.reverse(speed)
print 'Reverse', speed
elif keyp == 's' or ord(keyp) == 18:
pz.spinRight(speed)
print 'Spin Right', speed
elif keyp == 'a' or ord(keyp) == 19:
pz.spinLeft(speed)
print 'Spin Left', speed
elif keyp == '.' or keyp == '>':
speed = min(100, speed+10)
print 'Speed+', speed
elif keyp == ',' or keyp == '<':
speed = max (0, speed-10)
print 'Speed-', speed
elif keyp == ' ':
pz.stop()
print 'Stop'
elif ord(keyp) == 3:
break
time.sleep(0.5)
pz.stop()
except KeyboardInterrupt:
print
finally:
pz.cleanup()
| [
"bitforce.studio@gmail.com"
] | bitforce.studio@gmail.com |
0b2aba8b8cb7829320c43ba2b52cf1e83256685e | cedf062fa6e8e7d48509ddb7c7b3630e9dc8bf5a | /ProjectFinder.py | d99390fdbfa46757fefd97c7543560c8ca0efe06 | [] | no_license | asluis/Project-Finder | 13f80a332320312c2c39a3caceae4caffdf6e85b | 8101f18171db83ddd7e840582722028bdefddb05 | refs/heads/master | 2020-07-17T07:37:41.002648 | 2019-09-03T04:22:20 | 2019-09-03T04:22:20 | 205,976,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,412 | py | #This program was made by Luis Alvarez
import re
import sys
import imapclient
import pyzmail
import smtplib
emailPassword = "REDACTED"
emailUsername = "asluisinfo@gmail.com"
workEmail = "REDACTED"
#Establishes a connection with the domain of the email
server = imapclient.IMAPClient("imap.gmail.com", ssl = True)
#Logs in to a specific acc that matches the connection we made above
server.login(emailUsername, emailPassword)
#Specifies the specific folder within the email we want to access
selectInfo = server.select_folder("INBOX")
#Shows us how many messages are in the inbox
print("%d messages in inbox" % selectInfo[b'EXISTS'])
#Gives us a list of unique IDs for each email received from a specific sender
UIDs = server.search(["FROM", workEmail])
print(UIDs)
#Returns a list of unique IDs from a specific sender
myUID = selectInfo[b'EXISTS']
#Returns a specific email (thru its UID) as gibberish unreadable by humans
rawMsg = server.fetch([myUID], ["BODY[]", "FLAGS"])
rawMsg
#Gives us a version of the unreadable gibberish of a specific email (thru UID)
pyzMsg = pyzmail.PyzMessage.factory(rawMsg[myUID][b'BODY[]'])
pyzMsg
#this returns a
#normal string, although it still has a lot of escape characters
message = pyzMsg.text_part.get_payload().decode("UTF-8")
print("Message = " + str(message))
#=======Processing/parsing text seection========
projectRegex = re.compile(r"""
(
(\# \d{1,2} \s \( \w{1,3} \) )
( ([^.])? (.+)? ){3,5}
)
""", re.VERBOSE)
rawProjectsList = projectRegex.findall(message) #Creates a multideimensional list
#that contains a list of projects
#and each project has its components split up
#First group (item/element) contains entire project listing
print(str(rawProjectsList))
print(len(rawProjectsList)) #Prints number of projects available
cleanProjList = []
for i in range(0, len(rawProjectsList)):
cleanProjList.append(rawProjectsList[i][0])
#The for loop above cleans up the list to ONLY contain complete project details,
#not complete project details AND the same project details split up into different
#items within the list
#Now each individual entry is a complete project and can be parsed for location
#each entry is now something that can be sent as an email without any editing of
#the entry
#An entry will be sent as an email if it matches my list of work-able locations
print("========CLEAN===========")
print(str(cleanProjList))
#============================================================
#Creates a separate location list with a valid location
locationRegex = re.compile(r"""
(
\s?Los\sGatos\s?
|
\s?Campbell\s?
|
\s?Milpitas\s?
|
\s?Mountain\sView\s?
|
\s?Morgan\sHill\s?
|
\s?Gilroy\s?
|
\s?Sunnyvale\s?
|
\s?Menlo\sPark\s?
|
\s?Palo\sAlto\s?
|
\s?Stanford\s?
|
\s?Redwood\sCity\s?
|
\s?Scotts\sValley\s?
|
\s?Pleasanton\s?
|
\s?Newark\s?
|
\s?Union\sCity\s?
)
""", re.VERBOSE)
#Creates a list that shows which entries in the cleanProjList have valid locations
#0 = not a valid location, 1 = valid location
locationIndex = []
validProjList = []
for i in range(0, len(cleanProjList)):
locMatch = locationRegex.search(str(cleanProjList[i]))
if locMatch == None:
locationIndex.append(0)
else:
locationIndex.append(1)
#Exits program if no project is in a suitable location
if len(cleanProjList) == 0:
print("\nTHERE ARE NO SUITABLE PROJECTS.\nExiting program...")
server.logout()
sys.exit()
print("\n=====HERE ARE THE VALID LOCATIONS:=====\n")
print("Which project would you like? Enter a number from 1 to whatever\n\
the maximum number of projects there are. \nPLEASE NOTE:\n\
Do NOT enter the project number. Numbering starts at 1 from top to bottom.")
#Prints valid projects
for i in range(0, len(locationIndex)):
if locationIndex[i] == 1:
print("\n")
print(str(cleanProjList[i]))
validProjList.append(cleanProjList[i])
print("Which project would you like to go to? Enter 0 for none of the above")
answer = str(input())
stop = False;
while True:
if answer.isdigit() == False:
answer = str(input("Try again, enter a number:\n"))
continue
elif int(answer) > len(validProjList):
answer = str(input("Try again, enter a number:\n"))
continue;
elif int(answer) == 0:
stop = True
break;
else:
break;
#This is done to match the index of the list
answer = int(answer) - 1
if stop == False:
print("You chose number: " + str((answer + 1)) + " which is this project:\n")
print(validProjList[answer])
else:
print("You did not choose a project.")
server.logout()
sys.exit() #Exits if user says none
print("\n=============Response==============\n")
#==============Crafting a response section===================
response = "Hello,\n\nI am interested in the following job opportunity:\n" + validProjList[answer] + "\n\nThanks.\n\n"
signature = "- Luis"
response+= signature
print(response)
response = response.encode("utf-8")
#===========Sending Message============
outServer = smtplib.SMTP("smtp.gmail.com", 587)
outServer.ehlo()
outServer.starttls()
outServer.login(emailUsername, emailPassword)
outServer.sendmail(emailUsername, workEmail, response)
| [
"noreply@github.com"
] | asluis.noreply@github.com |
ed02d4f5bc906a860edeaf51d7a1bb6d5c25b65e | 0550a6655187cc7f2e23270782b29af786f2711d | /data/test/unit_tests/distances_test.py | 5d34bffb932f77ef1533e288f0296387e7ca4140 | [] | no_license | SCIInstitute/dSpaceX | 01135cf2d034660de1c4a796b65244bbadac2a5b | 8176401859315824d278b642038032b99208fb53 | refs/heads/master | 2023-01-11T16:17:44.716002 | 2021-02-14T00:02:57 | 2021-02-14T00:02:57 | 101,233,360 | 5 | 2 | null | 2022-12-29T09:44:50 | 2017-08-23T23:28:24 | C++ | UTF-8 | Python | false | false | 1,030 | py | import glob
import numpy as np
import os
import unittest
from data.test.unit_tests.generate_volumes import generate_volumes, generate_ground_truth_distance
from data.distances.nrrd_distances import calculate_hamming_distance_nrrd
class TestDistances(unittest.TestCase):
def setUp(self):
self.directory = './test_volumes/'
number_of_samples = 100
volume_size = 100
print('Generating volumes.')
generate_volumes(self.directory, number_of_samples, volume_size)
print('Generating ground truth')
self.gt_distance = generate_ground_truth_distance(number_of_samples, volume_size)
def test_volume_hamming_distance(self):
calc_distance = calculate_hamming_distance_nrrd(self.directory)
print('Testing equality.')
self.assertTrue(np.allclose(calc_distance, self.gt_distance))
def tearDown(self):
files = glob.glob(self.directory + '*')
for f in files:
os.remove(f)
if __name__ == '__main__':
unittest.main()
| [
"kyli.nmb@gmail.com"
] | kyli.nmb@gmail.com |
2a77811f2a9512b8708da398ab7a0e89efd4b157 | 14f50607da61f252f164a2fbe146d55ad72eccd9 | /classwork/networking/MultiClient.py | 82d4317fc3979e67ce99e6d0e3fe83c8265f22eb | [] | no_license | nyeddi/MyPracticeCode | ae1f55caad157caf9e7bd17b6cc36b8e2cc87c7a | 6de079c9f9a19d0270421a3d9a4969ed3e8cd417 | refs/heads/master | 2021-01-20T11:40:57.627045 | 2020-12-15T05:30:04 | 2020-12-15T05:30:04 | 47,257,419 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from socket import *
host = 'localhost'
name = raw_input('Enter name: ')
port = int(raw_input('Enter server port: '))
bufsiz = 1024
addr = (host, port)
tcpClient = socket(AF_INET , SOCK_STREAM)
tcpClient.connect(addr)
# sending name
tcpClient.send(name)
while True:
data = raw_input('> ')
if not data:
break
tcpClient.send(data)
raw_input('Enter to Quit')
tcpClient.close()
| [
"navk24@gmail.com"
] | navk24@gmail.com |
479524c784ad72ec8201cd61e2ded88131aef995 | 4e168d6dc15f1b9118685eeed61f0bd07f0ca52c | /api/routes/edit_blog.py | 183b9fe335d9aa753dfe755736f5ead712345e61 | [] | no_license | whyyouman/bloginterapi | c7941edb8401c2e7fe9896e83d4e5fe6a4012ade | 94e62b52fcc9a07a060f06ed5eb1b09c395b2c43 | refs/heads/main | 2023-05-09T17:58:12.262421 | 2021-06-22T14:12:26 | 2021-06-22T14:12:26 | 379,293,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from rest_framework.response import Response
from rest_framework.views import APIView
from ..models import Blog
from ..serializers import BlogApi
class EditBlog(APIView):
def post(self, request):
try:
if(request.method == "POST"):
ids = request.data['id']
image = request.data['image']
title = request.data['title']
content = request.data['content']
date = request.data['date']
Blog.objects.filter(id = ids).update(
image = image,
title = title,
content = content,
date = date,
)
return Response({'message': 'Success'})
except:
return Response({'message': 'Something Went Wrong !'})
| [
"jayant1998.sha@gmail.com"
] | jayant1998.sha@gmail.com |
0ae22456c9ad3e276f83136a13ba73091470d173 | af2104d96828857b328098391504d33956cdbd56 | /user/views.py | e04ae2d4ff92d4b753dafe21b4c7f167448809b3 | [] | no_license | boscoseries/python_django | 1f92997e2afd3722439094725faa8af00bce10b7 | aa38a6b4f121f3deadbb1bdd6385b54132a44c9f | refs/heads/master | 2021-03-18T18:14:44.678280 | 2020-03-13T17:04:35 | 2020-03-13T17:04:35 | 247,088,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from user.models import Person
from user.serializers import PersonSerializer
class PersonView(APIView):
def get_persons(self):
try:
return Person.objects.all()
except Person.DoesNotExist:
raise status.HTTP_400_BAD_REQUEST
def get(self, request, format=None):
queryset = Person.objects.all()
serializer = PersonSerializer(queryset, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
| [
"boscoseries@gmail.com"
] | boscoseries@gmail.com |
babb97b2bd3f510d5943b26d25dcb82a03757b7b | 4a0f1c7d39dd403c0f61a93c6647099bba52a304 | /lda2vec/embedding_mixture.py | 7d3ce482ad6582f96d89e6a31520ffe405ae6fde | [] | no_license | afcarl/Lda2vec-Tensorflow | c610dd1daf7714cf153b6e41bda0c65b851d08dc | c9a9207d0cd9250c770b9382abe91db5f2d46e3e | refs/heads/master | 2020-03-22T06:26:16.511571 | 2018-06-29T19:21:02 | 2018-06-29T19:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | import numpy as np
import tensorflow as tf
def _orthogonal_matrix(shape):
# Stolen from blocks:
# github.com/mila-udem/blocks/blob/master/blocks/initialization.py
M1 = np.random.randn(shape[0], shape[0])
M2 = np.random.randn(shape[1], shape[1])
# QR decomposition of matrix with entries in N(0, 1) is random
Q1, R1 = np.linalg.qr(M1)
Q2, R2 = np.linalg.qr(M2)
# Correct that NumPy doesn"t force diagonal of R to be non-negative
Q1 = Q1 * np.sign(np.diag(R1))
Q2 = Q2 * np.sign(np.diag(R2))
n_min = min(shape[0], shape[1])
return np.dot(Q1[:, :n_min], Q2[:n_min, :])
class EmbedMixture():
def __init__(self, n_documents, n_topics, n_dim, temperature=1.0,
W_in=None, factors_in=None, name=""):
self.n_documents = n_documents
self.temperature = temperature
self.name = name
# Sets the dropout value
#self.dropout = tf.placeholder_with_default(1., shape=[], name="dropout")
scalar = 1 / np.sqrt(n_documents + n_topics)
# self.W in original
if not isinstance(W_in, np.ndarray):
self.Doc_Embedding = tf.Variable(tf.random_normal([n_documents, n_topics], mean=0, stddev=50 * scalar),
name=self.name+ "_" +"doc_embedding")
else:
# Initialize the weights as a constant
init = tf.constant(W_in)
# Convert the weights to a tensorflow variable
self.Doc_Embedding = tf.get_variable(self.name+ "_" +"doc_embedding", initializer=init)
with tf.name_scope(self.name+ "_" +"Topics"):
# self.factors in original... Unnormalized embedding weights
if not isinstance(factors_in, np.ndarray):
self.topic_embedding = tf.get_variable(self.name+ "_" +"topic_embedding", shape=[n_topics, n_dim], dtype=tf.float32,
initializer=tf.orthogonal_initializer(gain=scalar))
else:
# Initialize the weights as a constant
init = tf.constant(factors_in)
# Convert the weights to a tensorflow variable
self.topic_embedding = tf.get_variable(self.name+ "_" +"topic_embedding", initializer=init)
#self.topic_embedding = tf.nn.dropout(topic_embedding, self.dropout, name="topic_dropout")
def __call__(self, doc_ids=None, update_only_docs=False):
# Get proportions from function below this one
proportions = self.proportions(doc_ids, softmax=True)
# multiply proportions by the factors_in
w_sum = tf.matmul(proportions, self.topic_embedding, name=self.name+ "_" +"docs_mul_topics")
return w_sum
def proportions(self, doc_ids=None, softmax=False):
# Given an array of document indices, return a vector for
# each document of just the unnormalized topic weights
if doc_ids == None:
w = self.Doc_Embedding
else:
w = tf.nn.embedding_lookup(self.Doc_Embedding, doc_ids, name=self.name+ "_" +"doc_proportions")
if softmax:
return tf.nn.softmax(w / self.temperature)
else:
return w
| [
"nxr9266@g.rit.edu"
] | nxr9266@g.rit.edu |
52bc7d5c2ee4a30db87229588d43cff54193260c | 3b3081bfe6c5e9159287395d26e81e359cb9e30f | /Library/users/views.py | a94657d869e78fce2e6c466a15afa72ecf9d296e | [
"MIT"
] | permissive | archx64/FaceEmotionRecognitionDjango | 710bfba0e398e5b3298ae3f8af04f3dbedbcc8ee | b8c0830d05602417949aee172630afcaf1e0652a | refs/heads/main | 2023-02-21T15:48:43.222325 | 2021-01-24T02:47:06 | 2021-01-24T02:47:06 | 331,115,151 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account successfully created for {username}! You can now login')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'register.html', {'form': form})
@login_required()
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'profile.html', context)
| [
"neuclyon@gmail.com"
] | neuclyon@gmail.com |
965136bc0b42a3aabe975b1fa258634b2f69fa3e | 395ebd49c4f3a083369406f1cdcb89cfb79fa57f | /tests/core/test_templatetags.py | 76c80869a6d716dc37f8963d8da04fb543a6c65a | [
"MIT"
] | permissive | philgyford/django-spectator | dbf76d93d1ccce9225c4a907c368f6e2cc4462c1 | 2d89dcdb624b01452a5b6ca0ee092774fcc0aa52 | refs/heads/main | 2023-07-24T06:43:13.846287 | 2023-07-11T14:16:34 | 2023-07-11T14:16:34 | 83,340,861 | 45 | 9 | MIT | 2023-07-11T09:48:16 | 2017-02-27T18:04:48 | Python | UTF-8 | Python | false | false | 9,851 | py | from unittest.mock import Mock, patch
from django.http import QueryDict
from django.test import TestCase
from spectator.core.apps import Apps
from spectator.core.factories import IndividualCreatorFactory
from spectator.core.templatetags.spectator_core import (
change_object_link_card,
domain_urlize,
get_enabled_apps,
get_item,
most_read_creators,
most_read_creators_card,
most_visited_venues,
most_visited_venues_card,
query_string,
)
from spectator.events.factories import MiscEventFactory, VenueFactory
from spectator.reading.factories import (
PublicationFactory,
PublicationRoleFactory,
ReadingFactory,
)
from .. import make_date
class GetEnabledAppsTestCase(TestCase):
@patch.object(Apps, "all")
def test_results(self, patched_all):
# all() will return an app that is not installed:
patched_all.return_value = ["events", "reading", "NOPE"]
# So 'NOPE' shouldn't be returned here:
enabled_apps = get_enabled_apps()
self.assertEqual(2, len(enabled_apps))
self.assertEqual(enabled_apps[0], "events")
self.assertEqual(enabled_apps[1], "reading")
class GetItemTestCase(TestCase):
def test_key(self):
dict = {"a": 1}
self.assertEqual(get_item(dict, "a"), 1)
def test_key_none(self):
dict = {"a": 1}
self.assertIsNone(get_item(dict, "b"))
class DomainUrlizeTestCase(TestCase):
def test_domain_urlize(self):
self.assertEqual(
domain_urlize("http://www.example.org/foo/"),
'<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>',
)
class ChangeObjectLinkCardTestCase(TestCase):
def test_output_can_change(self):
creator = IndividualCreatorFactory(pk=5)
perms = ["spectator.can_edit_creator"]
result = change_object_link_card(creator, perms)
self.assertTrue(result["display_link"])
self.assertEqual(
result["change_url"], "/admin/spectator_core/creator/5/change/"
)
class QueryStringTestCase(TestCase):
def test_adds_arg(self):
"It adds your key/value to the existing GET string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar"), ["foo=bar&a=1", "a=1&foo=bar"]
)
def test_replaces_arg(self):
"It replaces an existing GET arg with what you supply."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertEqual(query_string(context, "a", "bar"), "a=bar")
def test_handles_missing_request(self):
"If there's no request object, it doesn't complain."
context = {}
self.assertEqual(query_string(context, "foo", "bar"), "foo=bar")
def test_urlencodes(self):
"It URL-encodes the returned string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar&bar"),
["foo=bar%26bar&a=1", "a=1&foo=bar%26bar"],
)
class MostReadCreatorsTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
d = make_date("2017-02-15")
for i in range(11):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators()
self.assertEqual(len(creators), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(4):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators(num=3)
self.assertEqual(len(creators), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=True)
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=True)
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=False)
# An unfinished reading
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
ReadingFactory(publication=pub2, start_date=d, end_date=d, is_finished=False)
creators = most_read_creators()
self.assertEqual(len(creators), 1)
self.assertEqual(creators[0], c1)
self.assertEqual(creators[0].num_readings, 2)
class MostReadCreatorsCardTestCase(TestCase):
def test_returns_correct_data(self):
d = make_date("2017-02-15")
for i in range(2, 13):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most read authors")
self.assertEqual(data["score_attr"], "num_readings")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(2, 6):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
3, publication=pub1, start_date=d, end_date=d, is_finished=True
)
# Another finished reading (so there's a chart)
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub2, start_date=d, end_date=d, is_finished=True
)
# An unfinished reading for the same author - they should still be in the
# chart though, because they have one finished reading.
ReadingFactory(publication=pub2, start_date=d, end_date=d, is_finished=False)
# An unfinished reading
c3 = IndividualCreatorFactory()
pub3 = PublicationFactory()
PublicationRoleFactory(publication=pub3, creator=c3, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub3, start_date=d, end_date=d, is_finished=False
)
data = most_read_creators_card()
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 2)
self.assertEqual(data["object_list"][0], c1)
self.assertEqual(data["object_list"][0].num_readings, 3)
self.assertEqual(data["object_list"][1], c2)
self.assertEqual(data["object_list"][1].num_readings, 2)
class MostVisitedVenuesTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
for i in range(11):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues()
self.assertEqual(len(venues), 10)
def test_num(self):
"It should return `num` items."
for i in range(4):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues(num=3)
self.assertEqual(len(venues), 3)
class MostVisitedVenuesCardTestCase(TestCase):
def test_returns_correct_data(self):
for i in range(2, 13):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most visited venues")
self.assertEqual(data["score_attr"], "num_visits")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
for i in range(2, 6):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
| [
"phil@gyford.com"
] | phil@gyford.com |
89dece8d86d548eb18d50cf9020cc5d85d9c4d93 | 4b4d21f6a2aaf8cb0ece595e4aaf9cb705ffdd49 | /marketing_message/controllers/controllers.py | c83db22f29b37ed9ac02bcf44de219ae2e23a33a | [] | no_license | sc4you/odoo-project-10.0 | e8c82b4cd42c0672e996561e75e0f9d0717821fa | bca7e400b6316bcbcefe6f0d088cb97a28f644bb | refs/heads/master | 2020-03-21T13:41:08.042847 | 2018-05-15T07:41:58 | 2018-05-15T07:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | py | # -*- coding: utf-8 -*-
import babel.dates
import time, json
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from werkzeug.exceptions import NotFound
import random
from odoo import http
from odoo import tools
from odoo.http import request
from odoo.tools.translate import _
from odoo.exceptions import UserError, ValidationError
import httplib
import urllib
import json
# 服务条款
class SmsEvent(http.Controller):
def __init__(self):
param = request.env()['ir.config_parameter']
self.account = param.get_param('account') or ''
self.password = param.get_param('password') or ''
self.host_sign = param.get_param('host_sign') or ''
self.host_marketing = param.get_param('host_marketing') or ''
self.sms_heard = param.get_param('sms_heard') or ''
# 发送请求
def send_post(self, datas, host, sms_send_uri):
try:
datas = json.dumps(datas)
"""发送post请求"""
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(host, port=80, timeout=30)
conn.request("POST", sms_send_uri, datas, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
except Exception:
return False
# 发送短信验证码
def commit_send_message(self, tel, code):
sms_send_uri = "/msg/variable/json"
phone = tel
code = code
params = phone + ',' + code
msg = self.sms_heard + u"您好!验证码是:{$var}"
print self.account
print self.account
datas = {
'account': self.account,
'password': self.password,
'msg': msg,
'params': params
}
send_result = self.send_post(datas, self.host_sign, sms_send_uri)
print send_result
if not send_result:
return False
else:
sort_data = json.loads(send_result)
print sort_data
if int(sort_data["code"]) == 0:
return code
else:
raise UserError(_(sort_data['errorMsg']))
| [
"guwenfengvip@163.com"
] | guwenfengvip@163.com |
0961c7df4a2719e2dfeeece2c5a57cf3f59e263c | 2cb507ecd6629b9ff457a36e462f987913d94c1a | /python核心技术与实战/23/gil.py | f3ccf2898fadf6e776549b48a927183257a39720 | [
"Apache-2.0"
] | permissive | youaresherlock/PythonPractice | 6869e0a5949675198826e5a07552237a636d6f5b | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | refs/heads/master | 2021-08-16T03:09:44.203035 | 2021-08-02T07:40:00 | 2021-08-02T07:40:00 | 146,625,560 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | #!usr/bin/python
# -*- coding:utf8 -*-
import time
from threading import Thread
import sys
import threading
# 单线程版
def CountDown(n):
while n > 0:
n -= 1
if __name__ == "__main__":
n = 3000000
start_time = time.perf_counter()
CountDown(n)
end_time = time.perf_counter()
print("n = {},单线程版耗时{}".format(n, end_time-start_time))
# 多线程版
start_time = time.perf_counter()
t1 = Thread(target=CountDown, args = [n//2])
t2 = Thread(target=CountDown, args = [n//2])
t1.start()
t2.start()
t1.join()
t2.join()
end_time = time.perf_counter()
print("n = {},多线程版耗时{}".format(n, end_time-start_time))
# 对象引用计数
for k in range(100):
a = []
b = a
print(sys.getrefcount(a))
# 线程安全
n = 0
def foo():
global n
n += 1
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n) | [
"2263194561@qq.com"
] | 2263194561@qq.com |
58ce61b7f582ce7941345cabdd91cbb99c06692c | 78ee2d20722287f547c406a1cff1efc36d020ba3 | /flare_portal/versioning.py | b5ae7e3705b2d24587dd22d5d6233ad8eded23e8 | [
"MIT"
] | permissive | flare-kcl/flare-portal | db660b4ccc39a6f125d548fc9efb21026f097563 | a1cef9d22ba3f1bafac55bb6ee1c8223425101dd | refs/heads/main | 2023-07-24T10:00:27.807734 | 2022-07-19T02:08:38 | 2022-07-19T02:08:38 | 305,943,258 | 1 | 2 | MIT | 2023-07-03T14:40:33 | 2020-10-21T07:30:12 | Python | UTF-8 | Python | false | false | 2,608 | py | """Provides functions to fetch versions from Git
Copied from Raven Python
https://github.com/getsentry/raven-python/blob/d7d14f61b7fb425bcb15512f659626648c494f98/raven/utils/compat.py
"""
import os.path
class InvalidGitRepository(Exception):
pass
def fetch_git_sha(path: str, head: str = None) -> str:
"""
>>> fetch_git_sha(os.path.dirname(__file__))
"""
if not head:
head_path = os.path.join(path, ".git", "HEAD")
if not os.path.exists(head_path):
raise InvalidGitRepository(
"Cannot identify HEAD for git repository at %s" % (path,)
)
with open(head_path, "r") as fp:
head = str(fp.read()).strip()
if head.startswith("ref: "):
head = head[5:]
revision_file = os.path.join(path, ".git", *head.split("/"))
else:
return head
else:
revision_file = os.path.join(path, ".git", "refs", "heads", head)
if not os.path.exists(revision_file):
if not os.path.exists(os.path.join(path, ".git")):
raise InvalidGitRepository(
"%s does not seem to be the root of a git repository" % (path,)
)
# Check for our .git/packed-refs' file since a `git gc` may have run
# https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery
packed_file = os.path.join(path, ".git", "packed-refs")
if os.path.exists(packed_file):
with open(packed_file) as fh:
for line in fh:
line = line.rstrip()
if line and line[:1] not in ("#", "^"):
try:
revision, ref = line.split(" ", 1)
except ValueError:
continue
if ref == head:
return str(revision)
raise InvalidGitRepository(
'Unable to find ref to head "%s" in repository' % (head,)
)
with open(revision_file) as fh:
return str(fh.read()).strip()
def fetch_package_version(dist_name: str) -> str:
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError(
"pkg_resources is not available " "on this Python install"
)
dist = pkg_resources.get_distribution(dist_name)
return dist.version
| [
"mixxorz@gmail.com"
] | mixxorz@gmail.com |
aba3ebf54e254f17e5cdf9ec8820c92e0f6fe85a | f676f5b576808a002ae4a17a2110ecadd7a87c4d | /beam_search.py | c17370fec1a0e8fc0938f703b53b2d26a1b52b17 | [
"Apache-2.0"
] | permissive | taorui-plus/deepspeech2_pinyin_recognize | 07c8517c1956e6544a8bb2a7a9ac08955544717b | 56122abe8d2718171b606625adc9fe90d8286afb | refs/heads/master | 2022-01-30T19:50:34.975905 | 2019-06-25T12:24:02 | 2019-06-25T12:24:02 | 193,670,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,388 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
# created by: hongyu.shao@qunar.com
# date: 2017-03-09 11:56
"""
执行beam search查询
见:https://arxiv.org/pdf/1408.2873.pdf
"""
import os
from collections import namedtuple
import numpy as np
import kenlm
import random
import time
import heapq
from char_map import char_map, index_map
WORD_BLANK = 0 # 表示语音空白
LANG_ATTENUATION_PARAM = 0.25 # 语言模型的衰减参数
LONG_TAIL_PARAM = 0.9 # 网络输出值只处理概率前x的
BETA = 0.25 # softmax的衰减参数
GAMA=0.6
TOP_SUM = 0.0
SORT_SUM = 0.0
MAIN_SUM = 0.0
M_SUM = 0.0
def load_lm(lm_model_path):
model = kenlm.Model(lm_model_path)
return model
# 根据语言模型,中文之间用空格隔开
# kenlm score的输出为log10(P)
def __lm_prob(lm_model, next_prefix, prefix):
if '' == next_prefix:
return 1.0
score = -10.0
try:
next_prefix = ' '.join(next_prefix)
all_score = [x for (x, _, _) in lm_model.full_scores(next_prefix, bos=False, eos=False)]
score = all_score[-1]
except Exception, e:
print next_prefix + 'a'
print e
#write_to_file(next_prefix + " " + str(10 ** score))
return 10 ** score
def __top_k(probs, k):
if len(probs) <= k:
return probs
pivot = probs[-1]
right = [x for x in probs[:-1] if x[1] > pivot[1]]
rlen = len(right)
if rlen == k:
return right
if rlen > k:
return __top_k(right, k)
else:
left = [x for x in probs[:-1] if x[1] <= pivot[1]]
right.append(pivot)
left_result = __top_k(left, k - rlen - 1)
left_result.extend(right)
return left_result
def __top_k_from_map(prefix_probs, k):
"""
采用quick select算法求出概率最大的前k个prefix返回
时间复制度 O(n)
:param prefix_probs: 前缀及对应的概率 dict
:param k: 取概率最大的前k个数
:return: 返回前k个对应的prefix
"""
prefix_arr = [i for i in prefix_probs.items()]
if len(prefix_arr) <= k:
return prefix_arr
# 转换为数组,便于处理, 这个比较耗时
top_list = __top_k(prefix_arr, k)
return top_list
# 每个timestamp里, 抛弃小概率的词
def __top_props_np(timestep, prop_limit):
IndexVal = namedtuple('IndexVal', ['index', 'proba'])
timestep = [IndexVal(index=i, proba=proba) for i, proba in enumerate(timestep)]
timestep = sorted(timestep, key=lambda x: x.proba, reverse=True)
filter_indexes = []
added_proba = 0
for index_val in timestep:
added_proba += index_val.proba
filter_indexes.append(index_val.index)
if added_proba >= prop_limit:
break
return filter_indexes
def __add_prob(next_prefixes, prefix, prob_no_blanks, prob_blanks, i):
if not prob_no_blanks[i + 1].has_key(prefix):
prob_no_blanks[i + 1][prefix] = 0
if not prob_blanks[i + 1].has_key(prefix):
prob_blanks[i + 1][prefix] = 0
next_prefixes[prefix] = prob_no_blanks[i + 1][prefix] + prob_blanks[i + 1][prefix]
def __softmax(timestep):
timestep = np.array(timestep)
e_vals = np.e ** timestep
total = np.sum(e_vals)
return e_vals/total
def __get_prefixes(prefix_probs):
prefixes = []
for item in prefix_probs:
prefixes.append(item[0])
return prefixes
def prefix_beam_search(lang_model, prediction, beam_width, result_num):
"""
前缀束搜索
:param lm_model_path:语言模型路径
:param prediction: 网络输出概率. 包含map的list
:param beam_width: 束搜索宽度限制
:param result_num: 返回前k的概率
:return: 概率最大的前K个句子
"""
prob_blanks = [{'': 1.}] # 记录序列结尾为blank的概率
prob_no_blanks = [{'': 0.}] # 记录序列结尾不为blank的概率
prev_prefixes = [('', 0.)] # 当前序列及其对应的概率
for i, timestep in enumerate(prediction):
next_prefixes = {}
# 只处理概率前x%的网络输出
softmax_timestep = __softmax(timestep)
choose_word_index = __top_props_np(softmax_timestep, LONG_TAIL_PARAM)
prefixes = __get_prefixes(prev_prefixes)
if len(prob_blanks) < i + 2:
prob_blanks.append({})
if len(prob_no_blanks) < i + 2:
prob_no_blanks.append({})
# 循环当前序列
for prefix in prefixes:
# 遍历所有词表中所有的词
for word_index in choose_word_index:
if WORD_BLANK == word_index:
# 计算下一轮序列结尾空白的概率
prefix_black_prob = ((softmax_timestep[WORD_BLANK]) * (prob_blanks[i][prefix] + prob_no_blanks[i][prefix]))
if len(prefix) == 0:
prefix_black_prob = prefix_black_prob
else:
prefix_black_prob = prefix_black_prob * float((len(prefix))**GAMA)
prob_blanks[i + 1][prefix] = prefix_black_prob
__add_prob(next_prefixes, prefix, prob_no_blanks, prob_blanks, i)
else:
next_prefix = prefix + index_map[word_index]
# 语言模型概率
next_lang_prob = __lm_prob(lang_model, next_prefix, prefix)
# 语言模型衰减后概率
atte_prob = next_lang_prob ** LANG_ATTENUATION_PARAM
# 语言模型结合网络输出概率**BETA
prefix_word_prob = (softmax_timestep[word_index]**BETA) * (
prob_blanks[i][prefix] + prob_no_blanks[i][prefix]) * atte_prob
# # positive weight for length
if len(prefix) == 0:
prefix_word_prob = prefix_word_prob
else:
prefix_word_prob = prefix_word_prob * float((len(prefix))**GAMA)
prob_no_blanks[i + 1][next_prefix] = prefix_word_prob
__add_prob(next_prefixes, next_prefix, prob_no_blanks, prob_blanks, i)
# 取概率值最大的前K个为下次迭代的prev_prefixes
top_next_prefixes = __top_k_from_map(next_prefixes, beam_width)
prev_prefixes = top_next_prefixes
# print top_next_prefixes
return __top_k(prev_prefixes, result_num)
| [
"rui.tao@11bee.com"
] | rui.tao@11bee.com |
f0174be116afed87e93eaaefa4dc607940135311 | 32320f9e98643d26329dab588f8b5231e07f1039 | /hack.py | 4ded570e26d7e7a95368e85e8b5ed6425be845ff | [] | no_license | iamtalhaasghar/facebook-group-posting-bot | 93dc744b9087e43ffd7c8acd2179503cae94eb62 | 551c3457ec4b17adec43ccc77a8ea85a3b201d2b | refs/heads/master | 2023-08-27T15:35:44.379702 | 2021-09-07T10:37:15 | 2021-09-07T10:37:15 | 333,841,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | from bs4 import BeautifulSoup as bs
from urllib.request import Request
from urllib.request import urlopen
PUBLIC_GROUP = 'Public Group'
headers = {'User-Agent': 'Chrome 74.0'}
counter = 0
g = {'id':'1991145924444488', 'name':'test'}
url = 'https://en-gb.facebook.com/groups/%s' % (g['id'])
print('Group # %d: %s, id: %s' %(counter, g['name'], g['id']))
request = Request(url, headers=headers)
site = urlopen(request)
site = bs(site.read(), 'html.parser')
# find the tag with this class, it is a div
status_div = site.find(class_='_19s_')
if(status_div == None):
print('Unable to find group status. You might need to login.')
exit()
# find the span tag inside previously found div and extract text from that span
status = status_div.find('span').text
if(status.lower() == PUBLIC_GROUP):
# open all meta tags in head
for i in site.find_all('meta'):
# check the content attribute of all meta tags
string = i.get('content')
# if it contains a phrase like 'X' members
if(string != None and 'has' in string and 'member' in string):
# extract the number of members
index1 = string.find('has')
index2 = string.find('member')
members = string[index1+3:index2]
members = members.strip()
members = members.replace(',','')
g['members'] = int(members)
print(members)
print('*' * 10)
break
else:
code = str()
# find all 'code' tags
codes = site.find_all('code')
for c in codes:
temp = c.string
temp = str(temp)
temp = temp.strip()
# if the comment in this code tag startswith this line then this code contains our data
if(temp.startswith('<div class="_4-u2 _3-96 _4-u8">')):
code = temp
break
# convert code to html
site = bs(code, 'html.parser')
# extract the desired data
stats = site.find_all(class_='_63om _6qq6')
posts_today = '0'
total_members = '0'
if(len(stats) == 2):
posts_today = stats[0].text
total_members = stats[1].text
# if there were no posts today then
else:
total_members = stats[0].text
total_members = total_members.replace(',','')
stats = site.find_all(class_='_63op _6qqa')
recent_posts = stats[0].text
recent_members = stats[1].text
g['members'] = int(total_members)
print("Total Members: ",total_members)
print("Recent Members: ",recent_members)
print("Posts Today: ",posts_today)
print("Recent Posts: ",recent_posts)
print('*' * 10)
| [
"talhaasghar.contact@simplelogin.fr"
] | talhaasghar.contact@simplelogin.fr |
b0f9c9a30835f1b454344b2216cc0eb83331a28a | de8f9a45dabad940bd79d0f61b838a87e8cbcff6 | /qdk/qdk/chemistry/tests/test_nwchem.py | 47ac03f194414bcaf8cb2aebf867bc278df48e97 | [
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-python-cwi",
"EPL-1.0",
"LGPL-2.1-or-later",
"LGPL-2.1-only",
"ISC",
"Python-2.0",
"PSF-2.0",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | AnudeepGunukula/qdk-python | d1440d295de207a988593d996127292ca97ef44c | 3a1f556d2d0e41fc1f4dc1dd0a9b79b57b989e6d | refs/heads/main | 2023-07-27T05:46:37.403589 | 2021-09-01T06:39:32 | 2021-09-01T06:39:32 | 400,722,035 | 3 | 0 | MIT | 2021-08-28T06:40:31 | 2021-08-28T06:40:31 | null | UTF-8 | Python | false | false | 1,638 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from unittest import mock
import pytest
import ruamel.yaml as yaml
from qdk.chemistry.solvers.nwchem import create_input_deck, parse_nwchem_output
@pytest.fixture()
def test_deck():
return """
start HHO_test
echo
memory stack 1000 mb heap 100 mb global 1000 mb noverify
geometry units au
symmetry c1
O 0.002 0.398 0.0
H 0.762 -0.203 0.0
H -0.764 -0.195 0.0
end
basis
* library sto-3g
end
scf
thresh 1.0e-08
tol2e 1e-09
singlet
rhf
maxiter 200
end
tce
ccsd
2eorb
2emet 13
tilesize 20
thresh 1.0e-08
end
set tce:print_integrals T
set tce:qorb 7
set tce:qela 3
set tce:qelb 3
task tce energy
"""
def test_nwchem(geometry, h2o, test_deck):
mol_name = "HHO_test"
with mock.patch("qdk.chemistry.geometry.Geometry.from_mol") as _m:
_m.return_value = geometry
nw_chem_input = create_input_deck(
mol=h2o,
mol_name=mol_name,
num_active_orbitals=7
)
assert nw_chem_input == test_deck
def test_nwchem_pass_geometry(geometry, h2o, test_deck):
mol_name = "HHO_test"
nw_chem_input = create_input_deck(
mol_name=mol_name,
geometry=geometry,
num_active_orbitals=7,
mol=h2o
)
assert nw_chem_input == test_deck
def test_parse_nwchem_output(caffeine_nw, caffeine_output):
assert parse_nwchem_output(caffeine_nw, caffeine_output) == {
'number of atoms': 24,
'number of orbitals': 80,
'SCF energy': -627.628748906485,
'CCSD correlation energy': -0.002197738334726,
'geometry snapshot': []
}
| [
"noreply@github.com"
] | AnudeepGunukula.noreply@github.com |
ea5d5e55d54477a28c3d0d03081e37950effcb73 | ca17adac27ce0fc199a111db0e786bdbfd24f849 | /02-asyncio-basic/e02-http-server.py | fa21cd3155f88612807884c45b0db4c6eeb30ad7 | [] | no_license | genzj/asyncio-training-course | 862c1edb19bd3d25cb8a927fdb9942a9838c8d80 | 34e72a51f79945709fbd496391295e7cd92ec8e1 | refs/heads/master | 2023-08-08T05:25:01.438483 | 2023-07-17T08:53:59 | 2023-07-17T08:59:14 | 150,000,887 | 1 | 2 | null | 2023-07-25T23:36:11 | 2018-09-23T16:05:10 | Python | UTF-8 | Python | false | false | 353 | py | # -*- encoding: utf-8 -*-
from aiohttp import web
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(text=text)
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle)])
web.run_app(app, host='127.0.0.1', port=5000)
| [
"zj0512@gmail.com"
] | zj0512@gmail.com |
ee9349bef93f082d739c5c022c370a2af4282f01 | 760125998a1c7266bbe3e26ee8835221a2b8c167 | /plugin_georchestra/tasks.py | 5b543ce1ba4a6b419af5d2f7866ffb56602ee25f | [
"Apache-2.0"
] | permissive | neogeo-technologies/geocontrib | 88ca470844b1c493e0b3158ad03ec169e331025e | 7010d2b9d5f7d1bb6c9d9bdb94dd6db42518facf | refs/heads/develop | 2023-08-20T08:00:39.189105 | 2023-08-10T07:05:51 | 2023-08-10T07:05:51 | 180,320,428 | 5 | 11 | Apache-2.0 | 2023-03-31T15:05:06 | 2019-04-09T08:26:52 | JavaScript | UTF-8 | Python | false | false | 173 | py | from celery import shared_task
from django.core.management import call_command
@shared_task()
def task_georchestra_user_sync():
call_command('georchestra_user_sync')
| [
"cbenhabib@neogeo.fr"
] | cbenhabib@neogeo.fr |
e17c9c101f644b8d4c606e4edef9f9ae7f9e1fb6 | c1e2ae8bfb015f0d750f3fe43a0968d77a211af7 | /17 - mkad.py | 21937f6a6758d9d1406a70014c4c548d4d138b5d | [] | no_license | janevis/mypython | e5d23985dc981c71dbfd96abda9054ee877baec5 | 03a189883f2397edc581ad90aea2bd2cadd4ddb4 | refs/heads/master | 2022-08-14T12:05:07.541300 | 2020-05-24T10:54:30 | 2020-05-24T10:54:30 | 260,536,258 | 0 | 1 | null | 2020-05-05T12:48:07 | 2020-05-01T18:58:57 | Python | UTF-8 | Python | false | false | 104 | py | v = int(input())
t = int(input())
route = v * t
lap = route // 109
mile = route - lap * 109
print(mile)
| [
"64321654+janevis@users.noreply.github.com"
] | 64321654+janevis@users.noreply.github.com |
58f4c40eb8c52f99c0002350e82dc95a31f3baa3 | 180dc578d12fff056fce1ef8bd1ba5c227f82afc | /official/legacy/transformer/attention_layer.py | fcdce774b03f1b27cdf8350104946a44372bf458 | [
"Apache-2.0"
] | permissive | jianzhnie/models | 6cb96c873d7d251db17afac7144c4dbb84d4f1d6 | d3507b550a3ade40cade60a79eb5b8978b56c7ae | refs/heads/master | 2023-07-12T05:08:23.314636 | 2023-06-27T07:54:20 | 2023-06-27T07:54:20 | 281,858,258 | 2 | 0 | Apache-2.0 | 2022-03-27T12:53:44 | 2020-07-23T05:22:33 | Python | UTF-8 | Python | false | false | 7,119 | py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of multiheaded attention and self-attention layers."""
import math
import tensorflow as tf
from official.modeling import tf_utils
class Attention(tf.keras.layers.Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size, num_heads, attention_dropout):
"""Initialize Attention.
Args:
hidden_size: int, output dim of hidden layer.
num_heads: int, number of heads to repeat the same attention structure.
attention_dropout: float, dropout rate inside attention for training.
"""
if hidden_size % num_heads:
raise ValueError(
"Hidden size ({}) must be divisible by the number of heads ({})."
.format(hidden_size, num_heads))
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
def build(self, input_shape):
"""Builds the layer."""
# Layers for linearly projecting the queries, keys, and values.
size_per_head = self.hidden_size // self.num_heads
def _glorot_initializer(fan_in, fan_out):
limit = math.sqrt(6.0 / (fan_in + fan_out))
return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit)
attention_initializer = _glorot_initializer(input_shape.as_list()[-1],
self.hidden_size)
self.query_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="query")
self.key_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="key")
self.value_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="value")
output_initializer = _glorot_initializer(self.hidden_size, self.hidden_size)
self.output_dense_layer = tf.keras.layers.EinsumDense(
"BTNH,NHE->BTE",
output_shape=(None, self.hidden_size),
kernel_initializer=output_initializer,
bias_axes=None,
name="output_transform")
super(Attention, self).build(input_shape)
def get_config(self):
return {
"hidden_size": self.hidden_size,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
}
def call(self,
query_input,
source_input,
bias,
training,
cache=None,
decode_loop_step=None):
"""Apply attention mechanism to query_input and source_input.
Args:
query_input: A tensor with shape [batch_size, length_query, hidden_size].
source_input: A tensor with shape [batch_size, length_source,
hidden_size].
bias: A tensor with shape [batch_size, 1, length_query, length_source],
the attention bias that will be added to the result of the dot product.
training: A bool, whether in training mode or not.
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, heads, dim_per_head],
"v": tensor with shape [batch_size, i, heads, dim_per_head]} where
i is the current decoded length for non-padded decode, or max
sequence length for padded decode.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference on TPU.
Returns:
Attention layer output with shape [batch_size, length_query, hidden_size]
"""
# Linearly project the query, key and value using different learned
# projections. Splitting heads is automatically done during the linear
# projections --> [batch_size, length, num_heads, dim_per_head].
query = self.query_dense_layer(query_input)
key = self.key_dense_layer(source_input)
value = self.value_dense_layer(source_input)
if cache is not None:
# Combine cached keys and values with new keys and values.
if decode_loop_step is not None:
cache_k_shape = cache["k"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_k_shape[1], dtype=key.dtype),
[1, cache_k_shape[1], 1, 1])
key = cache["k"] + key * indices
cache_v_shape = cache["v"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_v_shape[1], dtype=value.dtype),
[1, cache_v_shape[1], 1, 1])
value = cache["v"] + value * indices
else:
key = tf.concat([tf.cast(cache["k"], key.dtype), key], axis=1)
value = tf.concat([tf.cast(cache["v"], value.dtype), value], axis=1)
# Update cache
cache["k"] = key
cache["v"] = value
# Scale query to prevent the dot product between query and key from growing
# too large.
depth = (self.hidden_size // self.num_heads)
query *= depth**-0.5
# Calculate dot product attention
logits = tf.einsum("BTNH,BFNH->BNFT", key, query)
logits += bias
# Note that softmax internally performs math operations using float32
# for numeric stability. When training with float16, we keep the input
# and output in float16 for better performance.
weights = tf.nn.softmax(logits, name="attention_weights")
if training:
weights = tf.nn.dropout(weights, rate=self.attention_dropout)
attention_output = tf.einsum("BNFT,BTNH->BFNH", weights, value)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done --> [batch_size, length, hidden_size]
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self,
query_input,
bias,
training,
cache=None,
decode_loop_step=None):
return super(SelfAttention, self).call(query_input, query_input, bias,
training, cache, decode_loop_step)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
ed8ad6067089f4bd299a184a17a17133e9ddb9d1 | ac8c1e9ebaac05f0a213041bf0ad4a1270f1a131 | /authentication.py | dde71ab4ec2b3f1974455083be6009f7bcd55f59 | [] | no_license | xvblack/thunderlive | 125701b13912c2a2a589bbba46bd925b04fe462f | 524535b0945589e13878df997c71c578274952b6 | refs/heads/master | 2021-03-30T17:57:50.908261 | 2015-01-18T20:50:36 | 2015-01-18T20:50:36 | 29,405,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py |
class Authentication: | [
"xvblue@gmail.com"
] | xvblue@gmail.com |
cd0028cb9e3682273c0b46f161d6ac82d6d0a4db | b149c320daf770acedc84d90f758c1c14a83e99d | /News/management/commands/scraper.py | 39191351763c515e62107f87f03f47820bf9cfc5 | [
"MIT"
] | permissive | OleksandrShcherbinin/Ukrpravda | bb643f8d99f7145b114c1269ee89f94311974a0f | 70850a4fb51e895e19d477ff9666d481bc42dcc9 | refs/heads/master | 2020-08-30T12:00:04.694146 | 2019-11-24T12:28:44 | 2019-11-24T12:28:44 | 218,373,471 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,350 | py | from django.core.management.base import BaseCommand
from concurrent.futures import ThreadPoolExecutor
from requests_html import HTMLSession
from django.contrib import messages
from googletrans import Translator
from datetime import datetime
from threading import Lock
from time import sleep
from News.models import *
from queue import Queue
import random
import logging
import sys
import os
LOCKER = Lock()
logger = logging.getLogger('django')
with open(os.path.join('News/management/commands/user_agents.txt'), 'r') as f:
user_agents = f.read().split('\n')
with open(os.path.join('News/management/commands/fresh_socks.txt'), 'r') as f:
proxies_list = f.read().split('\n')
COUNTER = 0
NEWS_COUNTER = 0
ARTICLES_COUNTER = 0
COLUMNS_COUNTER = 0
def news_scraper(qu):
while True:
url = qu.get()
prox = random.choice(proxies_list)
proxies = {'http': prox, 'https': prox}
user_agent = str(random.choice(user_agents)).strip("\t\t\t\t")
headers = {'User-Agent': user_agent}
try:
with HTMLSession() as session:
response = session.get(url, proxies=proxies, headers=headers, timeout=10)
h1_start, h1_end = response.text.index('<h1 class="post_news__title">'), response.text.index('</h1>')
title = response.text[h1_start: h1_end]
title = title.split(">")[-1]
news_text1 = response.text.index('<div class="post_news__text"')
news_text2 = response.text.index('</div> <div class="post__source">')
news_text = response.text[news_text1: news_text2]
news_text = news_text.split(">", maxsplit=1)
news_text = ''.join([f"<p>{p}</p>" for p in news_text[1].split("\n") if p])
slug = url.split('news')[-1]
slug = slug[1:-1].replace('/', '-')
try:
image_url = response.html.xpath('//div[@class="post_news__photo clearfix"]/img/@src')
with HTMLSession() as session2:
resp_img = session2.get(image_url[0])
image_name = 'images/' + image_url[0].split("/")[-1]
with open(f'media/{image_name}', 'wb') as picture:
picture.write(resp_img.content)
del resp_img
except Exception as e:
print(e, type(e), sys.exc_info()[-1].tb_lineno)
image_name = 'images/default.jpg'
image_url = ['default.jpg']
post_date_index = response.text.index('<div class="post_news__date"')
post_date_index2 = response.text.index('<div class="post__social-container">')
post_date = response.text[post_date_index + 29: post_date_index2-7]
days = {'Понеділок': 'Monday', 'Вівторок': 'Tuesday', 'Середа': 'Wednesday', 'Четвер': 'Thursday',
"П'ятниця": 'Friday', 'Субота': 'Saturday', 'Неділя': 'Sunday'}
for day, d_tarns in days.items():
post_date = post_date.replace(day, d_tarns)
months = {'січеня': 'January', 'лютого': 'February', 'березня': 'March', 'квітня': 'April',
'травня': 'May', 'червня': 'June', 'липня': 'July', 'серпня': 'August',
'вересня': 'September', 'жовтня': 'October', 'листопада': 'November', 'грудня': 'December'}
for month, m_trans in months.items():
post_date = post_date.replace(month, m_trans)
date_time_obj = datetime.strptime(post_date, '%A, %d %B %Y, %H:%M')
reviews = response.html.xpath('//div[@class="post__views"]')
reviews = [elem.text for elem in reviews]
reviews = str(reviews[0]).split(' ')[0]
try:
tags = response.html.xpath('//span[@class="post__tags__item"]/a/@href')
except Exception as e:
tags = ['/tags/just-news/']
new_tags = []
for tag in tags:
new_tag = tag.replace("/tags/", '').replace("/", '')
new_tags.append(new_tag.upper())
news = {
'title': title,
'slug': slug,
'news_text': news_text,
'image': image_name,
'image_url': image_url[0],
'news_date': date_time_obj,
'news_source': url,
'parsing_date': datetime.now().date(),
'source_reviews': int(reviews),
}
with LOCKER:
try:
item = News.objects.create(**news)
global COUNTER, NEWS_COUNTER
COUNTER += 1
print(f'[Item number {COUNTER} saved]', news["title"], news['news_date'])
except Exception as e:
print('Не удалось записать', type(e), e)
NEWS_COUNTER -= COUNTER
print(f'{NEWS_COUNTER} left to be parsed!')
return
for t in new_tags:
tag = {'name': t, 'slug': t}
tag, created = NewsTag.objects.get_or_create(**tag)
item.news_tag.add(tag)
del response, title, news_text, reviews, slug, news, image_name, image_url, item, new_tags, new_tag, \
date_time_obj, tags, prox, proxies, user_agent, headers, post_date
# logger.debug(item)
except Exception as e:
print(url)
print(type(e))
qu.put(url)
if qu.empty():
break
def columns_scraper(qu):
while True:
url = qu.get()
translator = Translator()
prox = random.choice(proxies_list)
proxies = {'http': prox, 'https': prox}
user_agent = str(random.choice(user_agents)).strip("\t\t\t\t")
headers = {'User-Agent': user_agent}
try:
with HTMLSession() as session:
response = session.get(url, proxies=proxies, headers=headers, timeout=10)
h1_start, h1_end = response.text.index('<h1 class="post_news__title'), response.text.index('</h1>')
title = response.text[h1_start: h1_end]
title = title.split(">")[-1]
news_text1 = response.text.index('<div class="post_news__text"')
try:
news_text2 = response.text.index('<div class="post__tags">')
news_text = response.text[news_text1: news_text2]
except Exception as e:
news_text2 = response.text.index("Точка зору редакції УП може не збігатися з "
"точкою зору автора колонки.")
news_text = response.text[news_text1: news_text2 + 86]
news_text = news_text.split(">", maxsplit=1)
news_text = ''.join([f"<p>{p}</p>" for p in news_text[1].split("\n") if p])
slug = url.split('columns')[-1]
slug = f"column-{slug[1:-1].replace('/', '-')}"
author_index1 = response.text.index('<div class="post_news__author"')
author = response.text[author_index1+31: author_index1+200].split("<")
author = author[1].split(">")[-1]
author = translator.translate(author, dest='en')
sleep(1)
author = author.text
try:
image_url = response.html.xpath('//div[@class="post_news__column-author"]/img/@src')
with HTMLSession() as session2:
resp_img = session2.get(image_url[0])
image_name = 'images/' + image_url[0].split("/")[-1]
with open(f'media/{image_name}', 'wb') as picture:
picture.write(resp_img.content)
del resp_img
except Exception as e:
print(e, sys.exc_info()[-1].tb_lineno)
image_name = 'images/default.jpg'
image_url = ['default.jpg']
post_date_index = response.text.index('<div class="post_news__date"')
post_date = response.text[post_date_index + 29: post_date_index + 70]
post_date = post_date.split("</div>")[0]
days = {'Понеділок': 'Monday', 'Вівторок': 'Tuesday', 'Середа': 'Wednesday', 'Четвер': 'Thursday',
"П'ятниця": 'Friday', 'Субота': 'Saturday', 'Неділя': 'Sunday'}
for day, d_tarns in days.items():
post_date = post_date.replace(day, d_tarns)
months = {'січеня': 'January', 'лютого': 'February', 'березня': 'March', 'квітня': 'April',
'травня': 'May', 'червня': 'June', 'липня': 'July', 'серпня': 'August',
'вересня': 'September', 'жовтня': 'October', 'листопада': 'November', 'грудня': 'December'}
for month, m_trans in months.items():
post_date = post_date.replace(month, m_trans)
date_time_obj = datetime.strptime(post_date, '%A, %d %B %Y, %H:%M')
reviews = response.html.xpath('//div[@class="post__views"]')
reviews = [elem.text for elem in reviews]
reviews = str(reviews[0]).split(' ')[0]
try:
tags = response.html.xpath('//span[@class="post__tags__item"]/a/@href')
except Exception as e:
tags = ['/tags/just-news/']
new_tags = []
for tag in tags:
new_tag = tag.replace("/tags/", '').replace("/", '')
new_tags.append(new_tag.upper())
columns = {
'title': title,
'slug': slug,
'column_text': news_text,
'image': image_name,
'image_url': image_url[0],
'column_date': date_time_obj,
'column_source': url,
'parsing_date': datetime.now().date(),
'source_reviews': int(reviews),
}
with LOCKER:
try:
item = Columns.objects.create(**columns)
global COUNTER
COUNTER += 1
print(f'[Item number {COUNTER} saved]', columns["title"], columns['column_date'])
except Exception as e:
print('Не удалось записать', e, type(e))
return
for t in new_tags:
tag = {'name': t, 'slug': t}
tag, created = NewsTag.objects.get_or_create(**tag)
item.news_tag.add(tag)
author_slug = '-'.join(author.split(" "))
authors = {'name': author, 'slug': author_slug}
authors, created = Author.objects.get_or_create(**authors)
item.author_tag.add(authors)
except Exception as e:
print(url)
print(type(e))
qu.put(url)
if qu.empty():
break
def articles_scraper(qu):
while True:
url = qu.get()
translator = Translator()
prox = random.choice(proxies_list)
proxies = {'http': prox, 'https': prox}
user_agent = str(random.choice(user_agents)).strip("\t\t\t\t")
headers = {'User-Agent': user_agent}
try:
with HTMLSession() as session:
response = session.get(url, proxies=proxies, headers=headers, timeout=10)
h1_start, h1_end = response.text.index('<h1 class="post_news__title'), response.text.index('</h1>')
title = response.text[h1_start: h1_end]
title = title.split(">")[-1]
news_text1 = response.text.index('<div class="post_news__text"')
try:
news_text2 = response.text.index('<div class="post__tags">')
news_text = response.text[news_text1: news_text2]
except Exception as e:
news_text2 = response.text.index("Точка зору редакції УП може не збігатися з "
"точкою зору автора колонки.")
news_text = response.text[news_text1: news_text2 + 86]
news_text = news_text.split(">", maxsplit=1)
news_text = ''.join([f"<p>{p}</p>" for p in news_text[1].split("\n") if p])
slug = url.split('articles')[-1]
slug = f"article-{slug[1:-1].replace('/', '-')}"
author_index1 = response.text.index('<div class="post_news__author"')
author_index2 = response.text.index('<div class="post_news__photo__about"')
author = response.text[author_index1 + 31: author_index2-25]
author = author.split(">")[1][:-3]
author = translator.translate(author, dest='en')
sleep(1)
author = author.text
try:
image_url = response.html.xpath('//div[@class="article__wide-header__back"]')
image_url = str(image_url[0]).split("url(")
image_url = image_url[1].split(")")
with HTMLSession() as session2:
resp_img = session2.get(image_url[0])
image_name = 'images/' + image_url[0].split("/")[-1]
with open(f'media/{image_name}', 'wb') as picture:
picture.write(resp_img.content)
del resp_img
except Exception as e:
print(e, sys.exc_info()[-1].tb_lineno)
image_url = ['http://default.jpg']
image_name = 'images/default.jpg'
post_date_index = response.text.index('<div class="post_news__date"')
post_date = response.text[post_date_index + 29: post_date_index + 70]
post_date = post_date.split("</div>")[0]
days = {'Понеділок': 'Monday', 'Вівторок': 'Tuesday', 'Середа': 'Wednesday', 'Четвер': 'Thursday',
"П'ятниця": 'Friday', 'Субота': 'Saturday', 'Неділя': 'Sunday'}
for day, d_tarns in days.items():
post_date = post_date.replace(day, d_tarns)
months = {'січеня': 'January', 'лютого': 'February', 'березня': 'March', 'квітня': 'April',
'травня': 'May', 'червня': 'June', 'липня': 'July', 'серпня': 'August',
'вересня': 'September', 'жовтня': 'October', 'листопада': 'November', 'грудня': 'December'}
for month, m_trans in months.items():
post_date = post_date.replace(month, m_trans)
date_time_obj = datetime.strptime(post_date, '%A, %d %B %Y, %H:%M')
reviews = response.html.xpath('//div[@class="post__views"]')
reviews = [elem.text for elem in reviews]
reviews = str(reviews[0]).split(' ')[0]
try:
tags = response.html.xpath('//span[@class="post__tags__item"]/a/@href')
except Exception as e:
tags = ['/tags/just-news/']
new_tags = []
for tag in tags:
new_tag = tag.replace("/tags/", '').replace("/", '')
new_tags.append(new_tag.upper())
articles = {
'title': title,
'slug': slug,
'article_text': news_text,
'image': image_name,
'image_url': image_url[0],
'article_date': date_time_obj,
'article_source': url,
'parsing_date': datetime.now().date(),
'source_reviews': int(reviews),
}
with LOCKER:
try:
item = Article.objects.create(**articles)
global COUNTER
COUNTER += 1
print(f'[Item number {COUNTER} saved]', articles["title"], articles['article_date'])
except Exception as e:
print('Не удалось записать', type(e), e)
return
for t in new_tags:
tag = {'name': t, 'slug': t}
tag, created = NewsTag.objects.get_or_create(**tag)
item.news_tag.add(tag)
author_slug = '-'.join(author.split(" "))
authors = {'name': author, 'slug': author_slug}
authors, created = Author.objects.get_or_create(**authors)
item.author_tag.add(authors)
except Exception as e:
print(url)
print(type(e))
qu.put(url)
if qu.empty():
break
def get_news_links(start, task):
if task:
task.status = 'Started Getting Links'
task.save()
for _ in range(10):
with HTMLSession() as primary_session:
prox = random.choice(proxies_list)
proxies = {'http': prox, 'https': prox}
user_agent = str(random.choice(user_agents)).strip("\t\t\t\t")
headers = {'User-Agent': user_agent}
#print(headers)
#breakpoint()
#headers = {'User-Agent': 'Googlebot-News'}
try:
prime_response = primary_session.get("https://www.pravda.com.ua/sitemap/sitemap-news.xml",
proxies=proxies, headers=headers, timeout=10)
current_month = str(datetime.now().date()).replace("-", '/')[:8]
urls = prime_response.html.xpath('//@href')
if current_month in urls[30]:
urls = set(urls)
num_links = len(urls)
with open(f'News/management/commands/fresh_news_links.txt', 'w') as sitemap:
sitemap.write('\n'.join(urls))
break
urls = prime_response.html.xpath('//url/loc/text()')
if current_month in urls[30]:
urls = set(urls)
num_links = len(urls)
with open(f'News/management/commands/fresh_news_links.txt', 'w') as sitemap:
sitemap.write('\n'.join(urls))
break
except Exception as e:
print(type(e))
if task:
task.status = f'COLLECTED {num_links} TO TEXT FILE FOR BACK UP!'
task.end_time = datetime.now()
task.save()
print('Fresh Links Collected!')
def run_news_scraper(start, task):
if task:
task.status = 'Started Getting News'
task.save()
workers_count = 30
news_queue = Queue()
with open(os.path.join('News/management/commands/fresh_news_links.txt'), 'r') as file:
for url in file:
if "/rus/" in url:
continue
elif "ua/news/" in url:
global NEWS_COUNTER
url = url.strip("\n")
news_queue.put(url)
NEWS_COUNTER += 1
print('TOTAL NEWS TO PARSE', NEWS_COUNTER)
with ThreadPoolExecutor(max_workers=workers_count) as executor:
for _ in range(workers_count):
executor.submit(news_scraper, news_queue)
if task:
global COUNTER
task.status = f'THERE ARE {COUNTER} NEWS COLLECTED!'
task.end_time = datetime.now()
task.save()
def run_columns_scraper(start, task):
if task:
task.status = 'Started Getting Columns'
task.save()
columns_queue = Queue()
with open(os.path.join('News/management/commands/fresh_news_links.txt'), 'r') as file:
for url in file:
if "/rus/" in url:
continue
elif "ua/columns/" in url:
global COLUMNS_COUNTER
url = url.strip("\n")
columns_queue.put(url)
COLUMNS_COUNTER += 1
print(url)
print('TOTAL COLUMNS TO PARSE', COLUMNS_COUNTER)
with ThreadPoolExecutor(max_workers=COLUMNS_COUNTER) as executor:
for _ in range(COLUMNS_COUNTER):
executor.submit(columns_scraper, columns_queue)
if task:
task.status = 'COLUMNS COLLECTED!'
task.end_time = datetime.now()
task.save()
print('COLUMNS COLLECTED!')
def run_articles_scraper(start, task):
if task:
task.status = 'Started Getting Articles'
task.save()
articles_queue = Queue()
with open(os.path.join('News/management/commands/fresh_news_links.txt'), 'r') as file:
for url in file:
if "/rus/" in url:
continue
elif "ua/articles/" in url:
global ARTICLES_COUNTER
url = url.strip("\n")
articles_queue.put(url)
ARTICLES_COUNTER += 1
print(url)
print('TOTAL ARTICLES TO PARSE', ARTICLES_COUNTER)
with ThreadPoolExecutor(max_workers=ARTICLES_COUNTER) as executor:
for _ in range(ARTICLES_COUNTER):
executor.submit(articles_scraper, articles_queue)
if task:
task.status = 'ARTICLES COLLECTED!'
task.end_time = datetime.now()
task.save()
print('ARTICLES COLLECTED!')
messages.add_message(messages.INFO, 'New Articles Needs To Be Moderated')
class Command(BaseCommand):
help = 'Running news scraper to database'
def handle(self, *args, **options):
from task.models import Task
#task = Task.objects.create(name='run_parser')
task1 = Task.objects.create(name='get_fresh_links')
task2 = Task.objects.create(name='get_articles')
task3 = Task.objects.create(name='get_columns')
task4 = Task.objects.create(name='get_news')
get_news_links(1, task1)
run_articles_scraper(1, task2)
run_columns_scraper(1, task3)
run_news_scraper(1, task4)
#if task:
# task.status = 'Started Collecting Full Data'
# task.save()
# task.status = 'DATA COLLECTED!'
# task.end_time = datetime.now()
# task.save()
print('Done')
| [
"shcherbininalex81@gmail.com"
] | shcherbininalex81@gmail.com |
d4454ae10f9dc229bf38300c4b7a23bd69e7c8ac | ac2bdcf4c4e764366219c9f25f67fa69c6ff0e7d | /games/migrations/0007_auto_20170129_1745.py | 73443f6cf1694a97709044f1cdf0604d949000d9 | [] | no_license | goodfootclub/api | 6b9d93a1a161504a8849b506ff5748ef44230045 | ff01ee4eb06e83f3895ae05d6b96c71836403668 | refs/heads/develop | 2021-01-18T04:59:47.133321 | 2017-10-08T12:39:53 | 2017-10-08T12:39:53 | 68,627,493 | 0 | 0 | null | 2017-08-19T15:30:32 | 2016-09-19T17:06:44 | Python | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-29 17:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0006_auto_20170129_1704'),
]
operations = [
migrations.AlterField(
model_name='game',
name='description',
field=models.CharField(blank=True, default='', max_length=255),
),
]
| [
"mail@igonato.com"
] | mail@igonato.com |
f46caa94f9cfe92383336b2bf8c61c45c1d2646e | 8407cbf20f510d2519a6eca312458094ff4fd419 | /build/lib.linux-x86_64-2.7/manage1/validation/secure_password.py | a0fb4b39219b823d79089888289c3534f16bb9bf | [] | no_license | duongthequan1509/manage1 | 683c2af146546315a4e33bbac5376c154dc7f679 | a16d6b066904317cf3fcf05a936e23ef9a1c1a8a | refs/heads/master | 2021-01-01T18:38:52.315536 | 2017-07-14T03:02:13 | 2017-07-14T03:02:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import formencode
import re
class SecurePassword(formencode.validators.ByteString):
def _to_python(self, value, state):
x = True
while x:
if (len(value) < 6):
break
elif not re.search('[a-z]', value):
break
elif not re.search('[0-9]', value):
break
elif re.search('\s', value):
break
else:
x = False
break
if x:
raise formencode.Invalid('Password co tu 6 tro len. gom cac chu cai thuong va cac chu so', value, state)
return value
| [
"tranduytung1994@gmail.com"
] | tranduytung1994@gmail.com |
281955bd57b6731c1d3c22a054361f5f90202055 | a7a25e1b702e342a6153f6df8af418939a96c68d | /result_processing/process_simplelm.py | 9a9a058a9f39bf24a4abb70df3cd28792de91a72 | [
"CC-BY-4.0"
] | permissive | alexwarstadt/blimp | 101c70de7b347c3f7920b50e80a29c6f46b46435 | 3e56b06fcabca9b30822fc66435fca6b1aa40bb1 | refs/heads/master | 2022-12-21T13:06:44.104532 | 2022-12-13T21:02:35 | 2022-12-13T21:02:35 | 224,895,354 | 117 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import pandas as pd
import numpy as np
results = []
for model in ["ngram", "lstm", "txl", "gpt2"]:
df = pd.read_json(f"../raw_results/all_outputs/{model}_outputs.jsonl", lines=True, orient="records")
df["correct"] = df.apply(lambda x: x["prob_good"] > x["prob_bad"], axis=1)
df["model"] = model
correct = df[["correct", "UID", "linguistics_term", "model"]]
correct = pd.pivot_table(correct, values="correct", index=["UID", "linguistics_term"], aggfunc=np.mean)
correct = correct.rename({"correct": model}, axis=1)
results.append(correct)
results = pd.concat(results, axis=1).reset_index()
results = results.sort_values(by=["linguistics_term", "UID"])
term_overall = results.set_index(["UID", "linguistics_term"]).stack().reset_index()
term_overall = pd.pivot_table(term_overall, index="linguistics_term", columns="level_2", values=0, aggfunc=np.mean)
term_overall = term_overall.reset_index()
term_overall["UID"] = "overall"
total_overall = results.set_index(["UID", "linguistics_term"]).stack().reset_index()
total_overall = pd.pivot_table(total_overall, columns="level_2", values=0, aggfunc=np.mean)
total_overall = total_overall.reset_index()
total_overall["UID"] = "overall"
total_overall["linguistics_term"] = "overall"
results = pd.concat([results, term_overall, total_overall])
results = results.drop(["index"], axis=1)
results.to_json("../raw_results/summary/models_summary.jsonl", lines=True, orient="records")
| [
"alexwarstadt@gmail.com"
] | alexwarstadt@gmail.com |
988fd659b25344bb07d811071add6cc1485ebabe | a34c1e7f415c0e535185014d66452f57beb5793a | /cms/check_rule_counts | ae15dce13313d27753aa074cd5bfdbfd873eee6b | [
"Apache-2.0"
] | permissive | mrceyhun/probes | 930e25815ff2429cac5f02298fd7c658ebc62e8b | dfc6c93b5c81f37aebf58999d37cf991f860265d | refs/heads/master | 2023-07-14T06:29:04.260199 | 2021-07-12T07:50:15 | 2021-07-12T07:50:15 | 395,551,407 | 1 | 0 | Apache-2.0 | 2021-08-13T14:23:36 | 2021-08-13T07:03:15 | null | UTF-8 | Python | false | false | 4,678 | #!/usr/bin/env python
# Copyright 2012-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Donata Mielaikaite, <donata.mielaikaite@cern.ch>, 2020
# - Eric Vaandering, <ewv@fnal.gov>, 2021
"""
Probe to check rules.
"""
from __future__ import print_function
import datetime
import sys
import traceback
from rucio.core import monitor
from rucio.db.sqla import models
from rucio.db.sqla.constants import (RuleState)
from rucio.db.sqla.session import get_session
from rucio.db.sqla.util import get_count
from sqlalchemy import func
# Exit statuses
OK, WARNING, CRITICAL, UNKNOWN = 0, 1, 2, 3
if __name__ == '__main__':
try:
session = get_session()
# check rules
state_map = {'REPLICATING': 'rules_replicating',
'OK': 'rules_ok',
'INJECT': 'rules_injecting',
'STUCK': 'rules_stuck',
'SUSPENDED': 'rules_suspend',
'WAITING_APPROVAL': 'rules_waiting_approval', }
result = (session.query(models.ReplicationRule.state, func.count(models.ReplicationRule.state))
.group_by(models.ReplicationRule.state)
.with_hint(models.ReplicationRule, 'INDEX_FFS(rules RULES_PK)', 'oracle')
.all())
for state, num in result:
gauge_state = state_map.get(repr(state), 'rules_' + repr(state).lower())
print('rules.count.%s %s' % (gauge_state, num))
monitor.record_gauge(stat='rules.count.%s' % (gauge_state), value=num)
ages = {
'created_24hours_ago': datetime.timedelta(days=1),
'created_1week_ago': datetime.timedelta(days=7),
'created_3weeks_ago': datetime.timedelta(days=21),
}
query = session.query(models.ReplicationRule.scope).filter(models.ReplicationRule.state != RuleState.OK)
result = get_count(query)
monitor.record_gauge(stat='judge.total_not_OK_rules', value=result)
query = (session.query(func.sum(models.ReplicationRule.locks_stuck_cnt))
.filter(models.ReplicationRule.state == RuleState.STUCK))
result = query.scalar() or 0
print('rules.no_of_files.total.sum_locks_stuck_cnt %s' % (result))
monitor.record_gauge(stat='rules.no_of_files.total.sum_locks_stuck_cnt', value=result)
# check left replicating files
query = (session.query(func.sum(models.ReplicationRule.locks_replicating_cnt))
.filter(models.ReplicationRule.state.in_([RuleState.STUCK, RuleState.REPLICATING])))
result = query.scalar() or 0
print('rules.no_of_files.total.sum_locks_replicating_cnt %s' % (result))
monitor.record_gauge(stat='rules.no_of_files.total.sum_locks_replicating_cnt', value=result)
# check stuck and replicating files which are more than X old
for a_name, a_delta in ages.items():
timeLimit = datetime.datetime.utcnow() - a_delta
query = (session.query(func.sum(models.ReplicationRule.locks_stuck_cnt))
.filter(models.ReplicationRule.state == RuleState.STUCK)
.filter(models.ReplicationRule.created_at <= timeLimit))
result = query.scalar() or 0
print('rules.no_of_files.stuck.%s.sum_locks_stuck_cnt %s' % (a_name, result))
monitor.record_gauge(stat='rules.no_of_files.stuck.%s.sum_locks_stuck_cnt' % a_name, value=result)
query = (session.query(func.sum(models.ReplicationRule.locks_replicating_cnt))
.filter(models.ReplicationRule.state.in_([RuleState.STUCK, RuleState.REPLICATING]))
.filter(models.ReplicationRule.created_at <= timeLimit))
result = query.scalar() or 0
print('rules.no_of_files.replicating.%s.sum_locks_replicating_cnt %s' % (a_name, result))
monitor.record_gauge(stat='rules.no_of_files.replicating.%s.sum_locks_replicating_cnt' % a_name,
value=result)
except:
print(traceback.format_exc())
sys.exit(UNKNOWN)
finally:
session.remove()
sys.exit(OK)
| [
"ericvaandering@gmail.com"
] | ericvaandering@gmail.com | |
1b702f7773f26dd8aa692107b2def69e07485853 | e54c21b9a2eee2c8a28e3b69a346585f9b7f325c | /offlineVoice.py | 9fd69a2080ed85d34c99e45db89d2e2dc37e09de | [] | no_license | interfacekun/pyaudio | d7ad92e2c5710f04338857c3bc8429cb97434807 | 66c9fba8d3c5bea6ef4774b6f5b166605fa1536b | refs/heads/master | 2020-12-30T13:28:19.101565 | 2017-06-18T05:07:37 | 2017-06-18T05:07:37 | 91,219,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | #! /usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import os
import fcntl
import thread
import commands
reload(sys)
sys.setdefaultencoding('utf8')
# cmar='arecord -r 16000 -d 2 -D plughw:1 -f S16 /root/pyaudio/audio/output.wav'
# os.system(cmar)
cmre='export LD_LIBRARY_PATH=/root/linux_voice/libs/RaspberryPi/ && /root/linux_voice/bin/iat_sample'
(status, say) = commands.getstatusoutput(cmre)
#print "say:"+say
print say
def showSay():
# f=open('/root/gpio/say.txt','w')
# fcntl.flock(f, fcntl.LOCK_EX)
# f.write(say)
# fcntl.flock(f, fcntl.LOCK_UN)
cmd = 'python /root/pyaudio/showMsg.py 2$'+say
os.system(cmd)
showSay()
| [
"627795061@qq.com"
] | 627795061@qq.com |
cbf304bae3ef7c746d0dd2cb940d9fcc753ce19b | fa59c7bc546b0615951cce9988f286028a8aad5f | /video_chatt_app/migrations/0002_userrole.py | 3f63eb1c5749e6481bcb52822dad443644059236 | [] | no_license | codedeb/remote_education-2 | 7b01f882b529f26cd1b5530b4f353b63050346be | 9ff2f8cfc6306d9ddced0ce81b9cda53ee4ff278 | refs/heads/master | 2022-11-27T10:39:34.301524 | 2020-07-30T07:20:31 | 2020-07-30T07:20:31 | 279,833,241 | 0 | 0 | null | 2020-07-15T10:05:01 | 2020-07-15T10:04:59 | null | UTF-8 | Python | false | false | 828 | py | # Generated by Django 2.0.5 on 2020-07-05 12:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('video_chatt_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='video_chatt_app.Role')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"hr109sh@gmail.com"
] | hr109sh@gmail.com |
416fe78e694dbbcbd3c0b8814e15b6d1d015a4c9 | 629ebfff7c3a0410c6d0518962aae267b503b147 | /mysite/urls.py | c748d62544b3d5a218a2676ceb29d5502570e4c7 | [] | no_license | tulztime/my-first-blog | d72e88366e636b9ffa18cf1571959a6363061407 | be997ee62987fcd3fcbd4e794969cb3d96c8f242 | refs/heads/master | 2021-01-10T02:26:05.652040 | 2015-12-04T18:21:34 | 2015-12-04T18:21:34 | 47,089,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # We also want to keep the mysite/urls.py file clean, so we will
# import urls from our blog application to the main mysite/urls.py file.
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('blog.urls')),
]
| [
"tulztime@yahoo.com"
] | tulztime@yahoo.com |
79700ce48d4aacbecddc068d807ecf3f56d9dc9c | c7a1470d2f6a15265e1f884c86439dc6d98b4484 | /LintCode/trie/0442_Implement_Trie_(Prefix_Tree).py | 7a674f32a78d465c8b92d461ef3d7d86a6c3d96c | [] | no_license | GuanYangCLU/AlgoTestForPython | 5239774fb6c840f3d65c4e4290ce8125fe8c94d3 | dddbc8115f69dec636c62c755f02905c469155e0 | refs/heads/master | 2022-01-19T15:03:54.835403 | 2021-12-30T02:19:37 | 2021-12-30T02:19:37 | 122,312,195 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
# do intialization if necessary
self.root = TrieNode()
"""
@param: word: a word
@return: nothing
"""
def insert(self, word):
# write your code here
node = self.root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.isWord = True
def find(self, word):
node = self.root
for c in word:
if c not in node.children:
return None
node = node.children[c]
return node
"""
@param: word: A string
@return: if the word is in the trie.
"""
def search(self, word):
# write your code here
res = self.find(word)
return False if not res else res.isWord
"""
@param: prefix: A string
@return: if there is any word in the trie that starts with the given prefix.
"""
def startsWith(self, prefix):
# write your code here
return self.find(prefix) is not None
| [
"noreply@github.com"
] | GuanYangCLU.noreply@github.com |
f13120c1a7f3823fbc2dae509d0ade475e57c264 | 1605a3af7211932b619c1fed045c3c99261a9028 | /display.py | 6ca903e1b6764019dca913e773f9528262933620 | [] | no_license | glebmamedov/strike_game | 377be5502b88a1d97fbb39e3be8f2fc2f1c86864 | ff4b4c5a81d6669794ba7bbdf73636c758240835 | refs/heads/master | 2023-01-21T11:36:50.037989 | 2020-11-29T10:38:11 | 2020-11-29T10:38:11 | 316,929,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | import pygame
pygame.init()
screen = pygame.display.set_mode((800, 600))
x = 50
y = 50
width = 60
height = 90
speed = 10
run = True
while run:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_DOWN]:
y += speed
if pressed_keys[pygame.K_LEFT]:
x -= speed
if pressed_keys[pygame.K_RIGHT]:
x += speed
if pressed_keys[pygame.K_UP]:
y -= speed
pygame.draw.rect(screen, (0, 0, 250), (x, y, width, height))
pygame.display.update()
pygame.displa
| [
"GlebMamedov@icloud.com"
] | GlebMamedov@icloud.com |
4796879ca33cc09ec9eb7d9ee45c79b781a1ca1a | b7b0266cedfbd67a2b708b4b94fd02e9c33e5042 | /test_project/test_project/settings.py | 1a15cc208101276d296b316281fb3dcf9bce6694 | [
"MIT"
] | permissive | vstoykov/django-wpadmin | 8587b2fde15979617a0e9a48b8017e50aacf2d34 | 06d7d059a0ca4739817f6fcd320250cf8688acc8 | refs/heads/master | 2020-04-07T22:00:25.283613 | 2014-07-23T22:44:21 | 2014-07-23T22:44:21 | 22,183,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """
Django settings for test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# add apps to path
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# add wpadmin to path
sys.path.insert(0, os.path.join(BASE_DIR, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&478os2g_tt5g!e+wqgs5h8#-u8ydqhkohnc6u&*yxg9cu@rm5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# Django WP Admin must be before django.contrib.admin
'wpadmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authors',
'books',
'cds',
'dvds',
'test_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'files/static-collected/')
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
WPADMIN = {
'adminpanel': {
'admin_site': 'test_project.admin.admin',
'title': 'Django admin panel',
'menu': {
'top': 'wpadmin.menu.menus.BasicTopMenu',
'left': 'wpadmin.menu.menus.BasicLeftMenu',
},
'dashboard': {
'breadcrumbs': True,
},
'custom_style': STATIC_URL + 'wpadmin/css/themes/sunrise.css',
},
'userpanel': {
'admin_site': 'test_project.admin.user',
'title': 'Django user panel',
'menu': {
'top': 'test_project.wp.UserTopMenu',
'left': 'test_project.wp.UserLeftMenu',
},
'dashboard': {
'breadcrumbs': False,
},
'custom_style': STATIC_URL + 'wpadmin/css/themes/ocean.css',
},
}
| [
"maciej@marczewski.net.pl"
] | maciej@marczewski.net.pl |
14296dca237f4840a537425f8837877dc3058e5e | e3a939063d51f93e89f6a1ec52ca5ed1de60987a | /orders/migrations/0010_sub.py | 2fda320e2df02f31174ee96780c61023f86fcf88 | [] | no_license | Aurian-Nogues/CS50W-Project3 | c3fa9c356e71635b60a66c5996d4488e46cc171b | 6b247ef392e9301e689802e4cf5098f617394ca5 | refs/heads/master | 2020-04-24T00:04:10.160366 | 2019-02-28T14:18:21 | 2019-02-28T14:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # Generated by Django 2.0.3 on 2019-02-24 18:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0009_auto_20190224_1828'),
]
operations = [
migrations.CreateModel(
name='Sub',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=64)),
('price', models.FloatField(max_length=8)),
('size', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_size', to='orders.Size')),
],
),
]
| [
"aurian.nogues@gmail.com"
] | aurian.nogues@gmail.com |
34eeaa9aec196bd76b5ea1cdc3fdcf0586ce7a70 | 7501691eceb21431b7cd199764855a6919a5859e | /backend/python-flask-server/swagger_server/aquadomeproxy/aquadomeproxy.py | c31e284bc5d90b094a417fca68437605aef411ae | [] | no_license | bwade59/aquadome_backend | 3ee976868cc718f1f6eb86d27fe5df5a430380f2 | 1158b0d15f1eafbab619f6db78be80e42b44a6fa | refs/heads/master | 2021-08-15T16:50:38.490980 | 2017-11-18T00:14:16 | 2017-11-18T00:14:16 | 111,162,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #!/usr/bin/python
from airheater import AirHeater
from fishtank import FishTank
from sumptank import SumpTank
from growbed import GrowBed
from pymongo import MongoClient
class AquadomeProxy:
def _init_(self):
self.airheater = AirHeater()
self.fishtank = FishTank()
self.sumptank = SumpTank()
self.growbed = GrowBed()
self.dbclient = MongoClient()
def setstatus(self, status):
pass
def getstatus(self):
pass
| [
"bwade1@nc.rr.com"
] | bwade1@nc.rr.com |
462ac9a85d6bc6fb7b67357293dc32fc8f1a8490 | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/LeetCode/102_binary_tree_level_order_traversal.py | 9bf5ae9c253e87223e6611d5901e3a0a777bd81d | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,498 | py | '''
二叉树按层次遍历
102. Binary Tree Level Order Traversal:https://leetcode.com/problems/binary-tree-level-order-traversal/
思路: 使用队列这种数据结构:首先根节点进入队列,然后在队列头部弹出节点的同时,将其左右分支依次插入队列的尾部,
直至队列为空
其实这就是图的bfs,但是二叉树就是一种特殊的图
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
cur_level = []
level_size = len(queue)
for _ in range(level_size): # 遍历当前层,处理完当前层,再将当前层的一维数组加入到二维结果中
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
cur_level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(cur_level)
return result
class Solution2(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
result.append(node.val) # 处理结点,访问其相邻的节点并进入队列
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return result
s = Solution()
root = TreeNode(3)
treeNode1 = TreeNode(9)
treeNode2 = TreeNode(20)
root.left = treeNode1
root.right = treeNode2
treeNode3 = TreeNode(15)
treeNode4 = TreeNode(7)
treeNode2.left = treeNode3
treeNode2.right = treeNode4
ret = s.levelOrder(root)
print(ret)
s2 = Solution2()
ret = s2.levelOrder(root)
print(ret)
| [
"git@git.dxl.cc:node/hunqing.git"
] | git@git.dxl.cc:node/hunqing.git |
3f98b24d75e3a2029400afca25879d0dedd0dc56 | 9048f27ec5bbb3a20c71ba627875c30e6400e191 | /Testes_python-master/Testes_python-master/python_avancado/modulo1/filtrar_sequencia.py | b50119a9b3dd468fd3f28eb9975fc75540226e7f | [] | no_license | DevHosts/WebScraping | 2303aa14ff34103200d7ba9199f434aeb9bb630b | 77fa009625f133e63b4b272f0797b4d56c66f94a | refs/heads/master | 2023-04-22T19:32:02.411902 | 2021-05-03T17:59:00 | 2021-05-03T17:59:00 | 364,006,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py |
lista = [14, 77, 54, 34, -1, 2, 55, -90]
maiores_vinte = [i for i in lista if i>20]
print(maiores_vinte)
| [
"Devhost73@gmail.com"
] | Devhost73@gmail.com |
2b32be7d04f85fb5ae968a2fe4d3fff4f8cc79ea | cb832c44cad9e4dad844ae174aff0cba5bcef481 | /game_client.py | 8fcb36adab288a5ca15054d0cbc6ab34221825d5 | [] | no_license | Jazz0006/battle_ship | 6f77b1f687021a929fcc86ed22824d06646962f9 | 71c3ccc83fc5fd5fbbf7de8ba9a9a29f85be3b17 | refs/heads/main | 2023-08-01T17:46:32.519009 | 2021-10-03T09:43:51 | 2021-10-03T09:43:51 | 407,540,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,733 | py | import game_class
import socket
import os
SERVER_IP = "172.26.59.193"
def clear_screen():
if os.name == "posix":
_ = os.system('clear')
else:
_ = os.system('cls')
if __name__ == "__main__":
clear_screen()
print(" Welcome to the battle ship game client.")
print(" You will be playing with the computer.")
print(" Let's start! \n")
print(f"Connecting to the server at {SERVER_IP}")
print(" You can edit SERVER_IP in this file to change the setting.")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect((SERVER_IP, 9587))
except Exception as err:
print("-"*71)
print(f" Error: Cannot connect to host: {SERVER_IP}")
print(err)
print(" Please check if the server app is running.")
print(f" And the IP address {SERVER_IP} is correct.\n\n")
exit(1)
my_game = game_class.GamePlay()
print(""" Symbles:
'O' -- empty space
'H' -- ship
'X' -- attacked ship
'.' -- attacked empty space
""")
my_game.print_board()
is_lost = b'0'
while True:
# Let the player input the attack target
target_xy = my_game.get_hit_target()
# Encode the target
byte_2_send = (str(target_xy[0]) + str(target_xy[1])).encode()
# Send the attacking target in 2 bytes
s.sendall(byte_2_send)
# Receive attack result
rcv_buffer = s.recv(2)
if not rcv_buffer:
print(f" Error: Server's connection has lost.")
print(" This game is ended.")
break
clear_screen()
print(f" This round: you attacked at {target_xy}")
# First byte is gome over or not
# Second byte is hit or not
is_game_over = rcv_buffer[0]
you_hit = rcv_buffer[1]
if you_hit == ord(b'Y'):
print(" You hit the enemy's ship!")
hit_result = 'X'
else:
print(" You missed...")
hit_result = '.'
# Update the game board
my_game.opponent_board.update_opponent_board(target_xy, hit_result)
if is_game_over == ord(b'E'):
print(" Congratualation! You won the game!\n\n")
my_game.print_board()
break
# Receive attack
buffer = s.recv(2)
if not rcv_buffer:
print(f" Error: Server's connection has lost.")
print(" This game is ended.")
break
decode_buffer = buffer.decode()
target_cord = (int(decode_buffer[0]), int(decode_buffer[1]))
# Check attack result
attack_result = my_game.my_board.update_my_board(target_cord)
print(f" \nThe computer attacked you at {target_cord}")
if attack_result:
print(" Your ship was hit")
else:
print(" You are lucky, the enemy just missed.")
# Send back the attack result
if my_game.my_board.is_game_end:
is_lost = b'E'
if attack_result:
is_hit = b'Y'
else:
is_hit = b'N'
buffer_result = is_lost + is_hit
s.sendall(buffer_result)
print("\n After this round, the game board is:")
my_game.print_board()
if is_lost == b'E': # Game is over
print(" Game Over, You lost.\n\n")
break
| [
"jazzzeng@LAPTOP-ASUS.localdomain"
] | jazzzeng@LAPTOP-ASUS.localdomain |
4f320ac5111e3b5a6a7f0c9fe42f733e60673d06 | 77faf3264425f6f7374d04b8726d736eb436fcd8 | /bin/pip3 | 1a5ad3080f7b5f077f7c384a91116094117ba7ce | [] | no_license | julian-garcia/julian-garcia-uk | 366572b52c2a6c9039b737b0d415290dd5e393a0 | e4edcd71d373e3aec53c25f5c1305f8ee2564fe2 | refs/heads/master | 2022-12-16T13:44:19.081842 | 2019-10-22T23:48:21 | 2019-10-22T23:48:21 | 150,034,388 | 0 | 0 | null | 2022-11-22T04:29:38 | 2018-09-23T23:30:13 | Python | UTF-8 | Python | false | false | 253 | #!/Users/Cerberus/projects/julian-garcia-uk/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"julian.garcia.leoni@gmail.com"
] | julian.garcia.leoni@gmail.com | |
a5d743868dd81dffa722d547d60ffe54cf008e98 | dd9face6b4ee2700f34201304e47c1ab8be244cd | /PythonPJ1/PythonPJ1/Info.py | c00339a93b44ef0a8dfd7152d78a136d888491c6 | [] | no_license | ej0703/jeju | 07badd42916f0be47bd188617624cb5fd8f0043d | e1aa20837fbf8e4eb122bfacd8606587f7b13d39 | refs/heads/master | 2021-02-10T13:03:58.485643 | 2020-04-09T07:17:39 | 2020-04-09T07:17:39 | 244,384,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,575 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Info.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets, QtWebEngineWidgets
from PyQt5.QtWidgets import QAbstractItemView, QTableWidgetItem
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QPixmap
import pymysql
import pandas as pd
import matplotlib as plt
import folium
import functools
import io
import sys
class Ui_infoWidget(object):
def setupUi(self, infoWidget):
infoWidget.setObjectName("infoWidget")
infoWidget.resize(1080, 900)
infoWidget.move(350,50)
infoWidget.setStyleSheet("background-color:rgb(255,255,219) ")
#제주도광광지도 label
self.infoLbl = QtWidgets.QLabel(infoWidget)
self.infoLbl.setGeometry(QtCore.QRect(50, 20, 392, 60))
self.infoLbl.setPixmap(QPixmap('제주도관광지도.png'))
self.infoLbl.setObjectName("cbLbl")
#제주도 관광지 검색결과 나타내는 table
self.tableWidget = QtWidgets.QTableWidget(infoWidget)
self.tableWidget.setGeometry(QtCore.QRect(430, 590, 600, 300))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
table_colum_headers = ['분류','이름','전화번호','주소']
self.tableWidget.setHorizontalHeaderLabels(table_colum_headers)
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableWidget.setStyleSheet("background-color:rgb(255,255,255) ")
#검색 button
self.searchBtn = QtWidgets.QPushButton(infoWidget)
self.searchBtn.setGeometry(QtCore.QRect(960, 555, 70, 30))
self.searchBtn.setObjectName("searchBtn")
self.searchBtn.setIcon(QtGui.QIcon("검색1.png"))
self.searchBtn.setStyleSheet("background-color:rgb(255,255,244) ")
self.searchBtn.setIconSize(QtCore.QSize(40,25))
self.searchBtn.clicked.connect(self.click_search)
#검색창
self.lineEdit = QtWidgets.QLineEdit(infoWidget)
self.lineEdit.setGeometry(QtCore.QRect(655, 555, 300, 30))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit.setStyleSheet("background-color:rgb(255,255,255) ")
#제주도 관광 map
self.info_webEngineView = QtWebEngineWidgets.QWebEngineView(infoWidget)
self.info_webEngineView.setGeometry(QtCore.QRect(60, 90, 971, 441))
self.info_webEngineView.setUrl(QtCore.QUrl("about:blank"))
self.info_webEngineView.setObjectName("info_webEngineView")
#귤하르방 그림 삽입
self.imageLbl = QtWidgets.QLabel(infoWidget)
self.imageLbl.setGeometry(QtCore.QRect(70, 620, 300, 222))
self.imageLbl.setPixmap(QPixmap('귤하르방.png'))
self.imageLbl.setObjectName("imageLbl")
self.retranslateUi(infoWidget)
QtCore.QMetaObject.connectSlotsByName(infoWidget)
def retranslateUi(self, infoWidget):
_translate = QtCore.QCoreApplication.translate
infoWidget.setWindowTitle(_translate("infoWidget", "제주도 관광 지도"))
def click_search(self):
#DB 연동 -MySQL
conn = pymysql.connect(host='34.64.133.169', user='root', password='1234', db='jeju', charset='utf8')
#Connection 으로부터 Dictoionary Cursor 생성
curs = conn.cursor(pymysql.cursors.DictCursor)
search = self.lineEdit.text()
#SQL문으로 검색(필요하다고 생각한 정보만 검색했음)
sql = "select x, y, category, loc_name, tel, address from jeju where category like '%"+search+"%' or loc_name like '%"+search+"%';"
curs.execute(sql)
#데이타 Fetch
rows = curs.fetchall()
#DB에서 가져온 값을 리스트에 입력 & 딕셔너리로 변경
self.tableWidget.setRowCount(len(rows))
result1 =[]
result2 =[]
result3 =[]
result4 =[]
for row in rows:
result1.append(row['category'])
result2.append(row['loc_name'])
result3.append(row['tel'])
result4.append(row['address'])
dataframe = pd.DataFrame(rows)
dataframe.to_csv("jeju_info.csv",header=None, index=False, encoding ='euc-kr')
info = {
'category' : result1,
'loc_name' : result2,
'tel' : result3,
'address' : result4
}
#검색한 DB table에 나타내기
column_idx_loockup = {'category':0, 'loc_name':1, 'tel':2, 'address':3}
for k, v in info.items():
col = column_idx_loockup[k]
for row, val in enumerate(v):
item = QTableWidgetItem(val)
self.tableWidget.setItem(row, col, item)
self.tableWidget.resizeColumnToContents(1)
#idx_col = 'category' # 인덱스를 종류구분체계 열로 지정
df =pd.read_csv('jeju_info.csv',encoding='euc-kr', names=['x','y','category','loc_name','tel','addr'])
print("---------------------------------------------------------------")
pd.set_option('display.width',None)
pd.set_option('display.max_rows',100)
pd.set_option('display.max_columns',10)
pd.set_option('display.max_colwidth',20)
pd.set_option('display.unicode.east_asian_width',True)
print(df)
# 지도에 위치 표시
jeju_map = folium.Map(location=[33.389398,126.541236], tiles='stamen Terrain', zoom_start=10)
for name, lat, lng in zip(df.loc_name, df.x, df.y):
folium.CircleMarker([lat,lng],
radius=3,
color='brown',
fill=True,
fill_color='coral',
fill_opacity=0.7,
popup=name
).add_to(jeju_map)
# HTML 파일로 저장
jeju_map.save('./jeju_locationEX.html')
# HTML 파일 map으로 나타내기
url = QtCore.QUrl.fromLocalFile('/jeju_locationEX.html')
self.info_webEngineView.load(url)
conn.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
infoWidget = QtWidgets.QWidget()
ui = Ui_infoWidget()
ui.setupUi(infoWidget)
infoWidget.show()
sys.exit(app.exec_())
| [
"ej_0703@naver.com"
] | ej_0703@naver.com |
51804225a2de553369dc622a86dea9e9644d416a | b7e0a7b0f2817f624309e4715f2b5fd4f9ed4df7 | /runners/mlcommons_box_singularity/mlcommons_box_singularity/singularity_run.py | e6a773a12610e0410a89284f4be08640c22a890a | [
"Apache-2.0"
] | permissive | vibhatha/mlbox | 50936f5dbb98226a35b134e0e72b3a4b05bda4f3 | f76aa0043175988109d020be42dad5f32003190f | refs/heads/master | 2022-12-29T18:29:12.250664 | 2020-10-16T16:49:29 | 2020-10-16T16:49:29 | 301,155,105 | 3 | 0 | Apache-2.0 | 2020-10-04T14:56:52 | 2020-10-04T14:56:52 | null | UTF-8 | Python | false | false | 2,520 | py | import os
import logging
from mlcommons_box.common import mlbox_metadata
from mlcommons_box.common.utils import Utils
from mlcommons_box_singularity import metadata
logger = logging.getLogger(__name__)
class SingularityRun(object):
def __init__(self, mlbox: mlbox_metadata.MLBox):
"""Singularity Runner.
Args:
mlbox (mlbox_metadata.MLBox): MLBox specification including platform configuration for Singularity.
"""
self.mlbox: mlbox_metadata.MLBox = mlbox
if not isinstance(self.mlbox.platform, metadata.SingularityPlatform):
raise ValueError("Incorrect platform ({})".format(type(self.mlbox.platform)))
def configure(self):
"""Build Singularity Image on a current host."""
# Get full path to a singularity image. By design, we compute it relative to {mlbox.root}/workspace.
image_path: str = os.path.join(self.mlbox.workspace_path, self.mlbox.platform.image)
if os.path.exists(image_path):
logger.info("Image found (%s).", image_path)
return
# Make sure directory to store s. image exists. If paths are like "/opt/...", the call may fail.
os.makedirs(os.path.dirname(image_path), exist_ok=True)
# According to MLBox specs (?), build directory is {mlbox.root}/build that contains all files to build MLBox.
# Singularity recipes are built taking into account that {mlbox.root}/build is the context (build) directory.
recipe_path: str = self.mlbox.build_path
recipe_file: str = os.path.join(recipe_path, 'Singularity.recipe')
if not os.path.exists(recipe_file):
raise RuntimeError(f"Singularity recipe not found: {recipe_file}")
cmd: str = "cd {}; singularity build --fakeroot '{}' 'Singularity.recipe'".format(recipe_path, image_path)
logger.info(cmd)
Utils.run_or_die(cmd)
def run(self):
""" """
# The 'mounts' dictionary maps host path to container path
mounts, args = Utils.container_args(self.mlbox)
print(f"mounts={mounts}, args={args}")
volumes_str = ' '.join(['--bind {}:{}'.format(t[0], t[1]) for t in mounts.items()])
image_path: str = os.path.join(self.mlbox.workspace_path, self.mlbox.platform.image)
# Let's assume singularity containers provide entry point in the right way.
cmd = "singularity run {} {} {}".format(volumes_str, image_path, ' '.join(args))
logger.info(cmd)
Utils.run_or_die(cmd)
| [
"smodeel@redhat.com"
] | smodeel@redhat.com |
3703f80c8a35f44e25ab5acfc87a2c94b2001201 | 876de904572c611b8cbad21f50877cdc812f2946 | /Leetcode/529. 扫雷游戏.py | 3e649e9aaf806904b938e610485bcf270d5df164 | [
"MIT"
] | permissive | QDylan/Learning- | 66a33de0e15f26672fb63c0b393866721def27ae | f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f | refs/heads/master | 2023-02-08T02:34:26.616116 | 2020-12-25T05:02:32 | 2020-12-25T05:02:32 | 263,805,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,152 | py | # -*- coding: utf-8 -*-
"""
@Time : 2020/8/20 10:17
@Author : QDY
@FileName: 529. 扫雷游戏.py
@Software: PyCharm
"""
"""
让我们一起来玩扫雷游戏!
给定一个代表游戏板的二维字符矩阵。'M'代表一个未挖出的地雷,'E'代表一个未挖出的空方块,
'B'代表没有相邻(上,下,左,右,和所有4个对角线)地雷的已挖出的空白方块,
数字('1' 到 '8')表示有多少地雷与这块已挖出的方块相邻,'X'则表示一个已挖出的地雷。
现在给出在所有未挖出的方块中('M'或者'E')的下一个点击位置(行和列索引),根据以下规则,返回相应位置被点击后对应的面板:
如果一个地雷('M')被挖出,游戏就结束了- 把它改为'X'。
如果一个没有相邻地雷的空方块('E')被挖出,修改它为('B'),并且所有和其相邻的未挖出方块都应该被递归地揭露。
如果一个至少与一个地雷相邻的空方块('E')被挖出,修改它为数字('1'到'8'),表示相邻地雷的数量。
如果在此次点击中,若无更多方块可被揭露,则返回面板。
示例 1:
输入:
[['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'M', 'E', 'E'],
['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'E', 'E', 'E']]
Click : [3,0]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
示例 2:
输入:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
Click : [1,2]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'X', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
注意:
输入矩阵的宽和高的范围为 [1,50]。
点击的位置只能是未被挖出的方块 ('M' 或者 'E'),这也意味着面板至少包含一个可点击的方块。
输入面板不会是游戏结束的状态(即有地雷已被挖出)。
简单起见,未提及的规则在这个问题中可被忽略。例如,当游戏结束时你不需要挖出所有地雷,考虑所有你可能赢得游戏或标记方块的情况。
"""
from collections import deque
class Solution:
def updateBoard(self, board, click):
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
h, w = len(board), len(board[0])
def mine_count(x, y):
res = 0
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
if 0 <= x + dx < h and 0 <= y + dy < w and board[x + dx][y + dy] in ('M', 'X'):
res += 1
return res
def dfs(x, y):
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
dfs(nxt_x, nxt_y)
else:
board[x][y] = str(board[x][y])
# dfs(click[0],click[1])
q = deque([(click[0], click[1])])
while q:
length = len(q)
for i in range(length):
x, y = q.popleft()
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
q.append((nxt_x, nxt_y))
board[nxt_x][nxt_y] = 'B'
else:
board[x][y] = str(board[x][y])
return board
| [
"qdy960411@outlook.com"
] | qdy960411@outlook.com |
a3a216ba5eb2add7fd1e92e28f32ec90873d2f02 | f7d4993c3f9d840b3505e82567e673de00d91afc | /Code/rearrange.py | 95a3ffe7b898c2f242d8bb2a8905229d5b2251a6 | [] | no_license | Andre-Williams22/CS-1.2-Intro-Data-Structures | 026bb08c219ffcb7bafe43d3ea8426f821d6bc5c | a9effc2257a539456688c408ec4ae9e4d4d67e11 | refs/heads/master | 2022-12-10T00:12:31.879273 | 2019-12-12T07:10:33 | 2019-12-12T07:10:33 | 216,670,821 | 0 | 0 | null | 2022-09-23T22:30:43 | 2019-10-21T21:44:10 | Python | UTF-8 | Python | false | false | 1,080 | py | import random
import sys
# a = input('please type a word: ')
# b = input('please type a word: ')
# c = input('please type a word: ')
# d = input('please type a word: ')
# e = input('please type a word: ')
# words = []
# words.append(a)
# words.append(b)
# words.append(c)
# words.append(d)
# words.append(e)
# print ("The list before shuffling is : ", end="")
# for i in range(0, len(words)):
# print(words[i], end=" ")
# print("\r")
# random.shuffle(words)
# print(random.choice(words))
# # Printing list after shuffling
# print ("The list after shuffling is : ", end="")
# for i in range(0, len(words)):
# print (words[i], end=" ")
# print("\r")
def rearrange(words):
result = []
for i in range(len(words)):
word = random.choice(words)
result.append(word)
words.remove(word)
result = result [:-1]
return(result)
def reverse(words):
new_list = words[::-1]
print(new_list)
if __name__ == '__main__':
words = list(sys.argv[1:])
temp = rearrange(words)
print(temp)
print(reverse(temp)) | [
"awjrs22@gmail.com"
] | awjrs22@gmail.com |
18c5902eea4f722190b2bab2698edbd686728315 | d167f7c08122024f23d823aadabcbda3658b9410 | /range_est_fig_mft.py | 2bad4a64a840974f1d751c28845d3b682b26ffea | [] | no_license | hunterakins/coh_mfp | cb0b15659c1c8f7315f9d1dcdcf643848973515c | 27a1c1790cc5a9955eff06a4f4e9b2df6de2fce0 | refs/heads/master | 2023-07-07T14:28:37.512228 | 2021-08-13T20:31:36 | 2021-08-13T20:31:36 | 298,682,183 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,186 | py | import numpy as np
from matplotlib import pyplot as plt
from comparison_plot import get_tracking_spo
from wnc_test import get_cov_time, check_v_arr
from proc_out import SwellProcObj, load_spo
from vel_estimation import load_vel_arr
from scipy.interpolate import interp1d
from copy import deepcopy
from swellex.audio import make_snapshots as ms
vv = ms.get_vv()
"""
Description:
Make a figure that shows the range of maximum correlation
Compares the various methods
Date:
3/8/2021
Author: Hunter Akins
Institution: Scripps Institution of Oceanography, UC San Diego
"""
if __name__ == '__main__':
proj_str = 's5_deep'
#proj_str = 's5_quiet2'
N_fft = 2048
num_snapshots = 36
fact = 8
long_snap = False
if long_snap == True:
N_fft = fact*N_fft
#synth_N_fft = fact*N_fft
synth_N_fft = N_fft
synth_num_snapshots = int(num_snapshots / fact)
num_snapshots = synth_num_snapshots
fig_name = proj_str + '_range_est_color_long_fft.png'
else:
synth_N_fft = fact*N_fft
synth_num_snapshots = int(num_snapshots / fact)
fig_name = proj_str + '_range_est_color.png'
subfolder = str(N_fft)
synth_subfolder = str(synth_N_fft)
num_synth_els = 5
num_tracking_els = num_synth_els
tilt_angle = -1
num_freqs = 13
synth_wn_gain = -.5
wn_gain = -2
cov_times=get_cov_time(proj_str, subfolder, num_snapshots, num_synth_els)
v_arr = load_vel_arr(proj_str, subfolder, num_snapshots)
v_arr = check_v_arr(v_arr, cov_times)
v_interp = interp1d(v_arr[0,:], v_arr[1,:])
synth_cov_times = get_cov_time(proj_str, synth_subfolder, synth_num_snapshots, 5)
synth_v_arr = load_vel_arr(proj_str, synth_subfolder, synth_num_snapshots,num_synth_els)
synth_v_arr = check_v_arr(synth_v_arr, synth_cov_times)
synth_v_interp = interp1d(synth_v_arr[0,:], synth_v_arr[1,:])
root_folder ='pickles/'
cmap = plt.cm.get_cmap('viridis')
#wnc = True
wnc = True
for cov_index in range(cov_times.size):
cov_time = cov_times[cov_index]
v_source = v_interp(cov_time)
if cov_index not in range(cov_times.size - num_tracking_els, cov_times.size):
tracking_spo = get_tracking_spo(root_folder, proj_str, subfolder, num_snapshots, tilt_angle,num_freqs, num_tracking_els, v_interp, cov_index, wnc=True, wn_gain = wn_gain)
tracking_spo.wnc_out -= np.max(tracking_spo.wnc_out)
tracking_spo.bart_out -= np.max(tracking_spo.bart_out)
tracking_spo.get_bathy_corr()
spo = load_spo(root_folder, proj_str, subfolder, num_snapshots, tilt_angle, 1, num_freqs, v_source, cov_time, wn_gain)
spo.get_bathy_corr()
spo.wnc_out -= np.max(spo.wnc_out)
spo.bart_out -= np.max(spo.bart_out)
if cov_index == 0:
tracking_range_vals = np.zeros((cov_times.size-num_tracking_els, tracking_spo.corr_grid_r.size))
simple_range_vals = np.zeros((cov_times.size, spo.corr_grid_r.size))
tracking_depth_vals = np.zeros((cov_times.size-num_tracking_els, tracking_spo.corr_grid_z.size))
simple_depth_vals = np.zeros((cov_times.size, spo.corr_grid_z.size))
if wnc == True:
if cov_index not in range(cov_times.size - num_tracking_els, cov_times.size):
tracking_range_vals[cov_index, :] = np.max(tracking_spo.wnc_out, axis=0)
tracking_depth_vals[cov_index, :] = np.max(tracking_spo.wnc_out, axis=1)
simple_range_vals[cov_index, :] = np.max(spo.wnc_out, axis=0)
simple_depth_vals[cov_index, :] = np.max(spo.wnc_out, axis=1)
else:
if cov_index not in range(cov_times.size - num_tracking_els, cov_times.size):
tracking_range_vals[cov_index, :] = np.max(tracking_spo.bart_out, axis=0)
tracking_depth_vals[cov_index, :] = np.max(tracking_spo.bart_out, axis=1)
simple_range_vals[cov_index, :] = np.max(spo.bart_out, axis=0)
simple_depth_vals[cov_index, :] = np.max(spo.bart_out, axis=1)
""" NOW DO SYNTHETIC """
for cov_index in range(synth_cov_times.size):
cov_time = synth_cov_times[cov_index]
v_source = synth_v_interp(cov_time)
v_source = vv[np.argmin([abs(v_source -x) for x in vv])]
print('v_source', v_source)
synth_spo = load_spo(root_folder, proj_str, synth_subfolder, synth_num_snapshots, tilt_angle, num_synth_els, num_freqs, v_source, cov_time, synth_wn_gain)
synth_spo.get_bathy_corr()
synth_spo.wnc_out -= np.max(synth_spo.wnc_out)
synth_spo.bart_out -= np.max(synth_spo.bart_out)
if cov_index == 0:
synth_range_vals = np.zeros((synth_cov_times.size, synth_spo.corr_grid_r.size))
synth_depth_vals = np.zeros((synth_cov_times.size, synth_spo.corr_grid_z.size))
if wnc == True:
synth_range_vals[cov_index, :] = np.max(synth_spo.wnc_out, axis=0)
synth_depth_vals[cov_index, :] = np.max(synth_spo.wnc_out, axis=1)
else:
synth_range_vals[cov_index, :] = np.max(synth_spo.bart_out, axis=0)
synth_depth_vals[cov_index, :] = np.max(synth_spo.bart_out, axis=1)
fig, axes = plt.subplots(2,3, sharex='col', sharey='row')
tracking_cov_times = cov_times[:-num_tracking_els]
db_min = -15
print(np.min(synth_spo.corr_grid_r), np.max(synth_spo.corr_grid_r))
print(np.min(spo.corr_grid_r), np.max(spo.corr_grid_r))
print(np.min(tracking_spo.corr_grid_r), np.max(tracking_spo.corr_grid_r))
cf = axes[1,2].pcolormesh(synth_cov_times/60, synth_spo.corr_grid_r,synth_range_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cf = axes[1,0].pcolormesh(cov_times/60, spo.corr_grid_r,simple_range_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cf = axes[1,1].pcolormesh(tracking_cov_times/60, spo.corr_grid_r,tracking_range_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cf = axes[0,2].pcolormesh(synth_cov_times/60, synth_spo.corr_grid_z,synth_depth_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cf = axes[0,0].pcolormesh(cov_times/60, spo.corr_grid_z,simple_depth_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cf = axes[0,1].pcolormesh(tracking_cov_times/60, spo.corr_grid_z,tracking_depth_vals.T, vmin=db_min, vmax=0, cmap=cmap)
cb = fig.colorbar(cf, ax=axes.ravel().tolist())
cb.set_label('dB', rotation='horizontal')
cols = ['Traditional', 'MFT', 'Range-coherent']
for i in range(3):
axes[0,i].invert_yaxis()
axes[0,i].set_title(cols[i])
fig.text(0.5, 0.02, 'Event Time (m)', ha='center')
axes[0,0].set_ylabel('Depth (m)')
axes[1,0].set_ylabel('Range (m)')
letters = ['a)', 'b)', 'c)', 'd)', 'e)', 'f)']
i = 0
for ax in axes.ravel().tolist():
if i < 3:
ax.text(40.5, 175, letters[i], color='w', fontsize=15)
else:
ax.text(40.5, 1000, letters[i], color='w', fontsize=15)
i += 1
fig.set_size_inches(8, 4)
plt.savefig('/home/hunter/research/coherent_matched_field/paper/pics/' + fig_name, dpi=500, orientation='landscape')
plt.show()
| [
"yunterakins@gmail.com"
] | yunterakins@gmail.com |
3635e82352eafbe6d125989955af71b35b06e14c | c5d266e4658d7b2656ffaa5c149b95174fef312f | /lib/python2.7/site-packages/django/db/backends/mysql/base.py | 237e3dc3540451b2573649daa6138f6a534dcd0c | [] | no_license | sumitsk20/dp | da444307fad98b2ebc955b980e3f4bbf575b2654 | 697d0ffc6190653ccd9427c02b645e4b429d5c67 | refs/heads/master | 2021-01-24T12:42:00.258970 | 2018-02-27T15:32:36 | 2018-02-27T15:32:36 | 123,146,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,811 | py | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (
version[:3] == (1, 2, 1) and (len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value, conv):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The MySQL database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
datetime.datetime: adapt_datetime_warn_on_aware_datetime,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same).
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BigAutoField': 'bigint AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
if self.features.is_sql_auto_is_null_enabled:
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on
# a recently inserted row will return when the field is tested
# for NULL. Disabling this brings this aspect of MySQL in line
# with SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection() as cursor:
cursor.execute('SELECT VERSION()')
server_info = cursor.fetchone()[0]
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| [
"sumitsk20@gmail.com"
] | sumitsk20@gmail.com |
d9ff93194785baa0357d2c7fdc383b818f862095 | 65953e264491b1f42ea35497dacd91e43fbc9ac1 | /MyAlarm.py | 86a407bd381495b88ba58463cd016a7c2f039838 | [] | no_license | zaverisid/Virtual-Assistant | cfacbe07df66a9e5fc50e6a0abe2cfcea4a87760 | df8eba78e4110cc6b1795daf0d2999562ea49630 | refs/heads/main | 2023-04-08T03:42:17.869562 | 2021-04-22T11:30:28 | 2021-04-22T11:30:28 | 316,928,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import datetime
import winsound
from playsound import playsound
def alarm(Timing):
altime = str(datetime.datetime.now().strptime(Timing,"%I:%M %p"))
altime = altime[11:-3]
print(altime)
Horeal = altime[:2]
Horeal = int(Horeal)
Mireal = altime[3:5]
Mireal = int(Mireal)
print(f"Done, the alarm is set for {Timing}")
while True:
if Horeal == datetime.datetime.now().hour:
if Mireal == datetime.datetime.now().minute:
print("alarm is running")
playsound('alarm_tone.mp3', winsound.SND_LOOP)
elif Mireal<datetime.datetime.now().minute:
break
if __name__ == '__main__':
alarm('10:47 PM')
| [
"zaverisid934@gmail.com"
] | zaverisid934@gmail.com |
9fda26a8b43003b04bc9440f05a4b72b185b2763 | 683862a301ccf6bb691bc5973ab648f264c82b2e | /automata/envs/pgv_ex.py | e124feeabc19c8754040045094285d37302df482 | [] | no_license | lucasvolkmer/automata_gym | 6a54f03190b62852f6bdac7e7056d609af5b31d3 | 016121202bb74545ea70479abaac960a9ba10e8e | refs/heads/master | 2022-12-12T14:12:07.235142 | 2020-09-05T00:15:32 | 2020-09-05T00:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 16:44:28 2019
@author: kallil
"""
from pygraphviz import *
A=AGraph()
# set some default node attributes
A.node_attr['style']='filled'
A.node_attr['shape']='circle'
A.node_attr['fixedsize']='true'
A.node_attr['fontcolor']='#FFFFFF'
# make a star in shades of red
for i in range(16):
A.add_edge(0,i)
n=A.get_node(i)
n.attr['fillcolor']="#%2x0000"%(i*16)
n.attr['height']="%s"%(i/16.0+0.5)
n.attr['width']="%s"%(i/16.0+0.5)
print(A.string()) # print to screen
A.write("star.dot") # write to simple.dot
print("Wrote star.dot")
A.draw('star.png',prog="circo") # draw to png using circo
print("Wrote star.png") | [
"kallil_cz_miguel@live.com"
] | kallil_cz_miguel@live.com |
561996514c424b30b04bd3845fcbf55ce9e2e138 | 85cce4d4bf2fcc952176d7ab482d08c88885fe1d | /DIPLOM_DUEL/asgi.py | b0c6496de96e8cdde8eb97cd1bca3acb89b862e2 | [] | no_license | vadim-globuz/DUplicate_Django | 2482669757528f24b38890f7c02e9602ed6211e2 | b6a03603ea07cfa30bd009f94053a66538dda230 | refs/heads/master | 2023-06-06T08:35:05.488626 | 2021-06-16T16:27:30 | 2021-06-16T16:27:30 | 377,560,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for DIPLOM_DUEL project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DIPLOM_DUEL.settings')
application = get_asgi_application()
| [
"vadimglobuz@gmail.com"
] | vadimglobuz@gmail.com |
ec36724db42b5960ee90aa0f972412339ba03c76 | 1244d9eb8194ff87aa03e7686ba626c85291ecf0 | /nearby_locations.py | ea354314e2154dcce7a254e72ac7f6fc30ffc996 | [] | no_license | iamjai-3/code | b0c0ff99294e887454de6bf0707dcb71cb0a512f | 9468d2f65d743c8dd3dc3667839556eb1a3b46ce | refs/heads/master | 2021-05-19T07:00:50.175830 | 2020-05-02T13:49:09 | 2020-05-02T13:49:09 | 251,576,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | import requests, json
import speech_recognition as sr
import re
import googlemaps
import pyttsx3
from stt_conversion import voice
# say nearby hotels , restaurants, petrol bunks etc...
api_key = 'AIzaSyDevDlcD-iiGG4qOs1OE8ZKsi11HTODjtA'
engine = pyttsx3.init()
text = voice()
t = str(text)
print("You said : {}".format(t))
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?"
query = t
r = requests.get(url + 'query=' + query +
'&key=' + api_key)
x = r.json()
y = x['results']
for i in range(len(y)):
print(y[i]['name'])
res = y[i]['name']
engine.say(res)
engine.runAndWait()
| [
"mail2jai1123@gmail.com"
] | mail2jai1123@gmail.com |
4e31ac6553d8a70ebd5fa0489d0c6f937348cb76 | b80a2e4d39c96212455746c64de35709a289d117 | /Classes practice pt 2.py | dc1ab5b539d2c12dcb780ca5618988b314bbe255 | [] | no_license | NAKO41/Classes | 3b5797c14c323f893adafa75c63aea7a458279a0 | 5105ddd64106f6944d53160377876a104d8f04f4 | refs/heads/master | 2020-03-07T06:47:31.548812 | 2018-03-29T18:31:49 | 2018-03-29T18:31:49 | 127,331,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | #this file is more focused on movements and starting objects at diffirent positions
class MDart():
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def move_up (self):
self.y += 1
def move_Dart(self, x_increment = 0, y_increment = 1):#the increments act as a default movement unless specified otherwise
self.x += x_increment
self.y += y_increment
#this will make a list of Magic darts at diffirent starting positions
M_Darts = []
M_Darts.append(MDart())
M_Darts.append(MDart(10,40))
M_Darts.append(MDart(50,50))
#print the darts in their starting postions
for i in range(len(M_Darts)):
print("dart", str(int(i+1)), "starting location is", M_Darts[i].x, M_Darts[i].y)
#this will move each dart into a new position
M_Darts[0].move_Dart()#since no movements are spcified, the default movement will take place
M_Darts[1].move_Dart(10,10)
M_Darts[2].move_Dart(-10,0)
#print the new positions of the darts
for i in range(len(M_Darts)):
print("dart", str(int(i+1)), "new location is", M_Darts[i].x, M_Darts[i].y)
| [
"nspro1@ocdsb.ca"
] | nspro1@ocdsb.ca |
85d6d96659e6ab8df9179e891d05df56649e2e6d | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N431_Encode_Nary_Tree_To_Binary_Tree.py | 29be7e11dec99008b385e8fc593469702e866409 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | """
For any node in the N_ary tree:
his first child to Binary Tree's left child
all other children will be this first child(left child of BT)'s right child.
"""
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def encode(self, root):
"""Encodes an n-ary tree to a binary tree.
:type root: Node
:rtype: TreeNode
"""
if not root:
return
t = TreeNode(root.val)
if root.children:
t.left = self.encode(root.children[0])
cur = t.left
for node in root.children[1:]:
cur.right = self.encode(node)
cur = cur.right
return t
def decode(self, data):
"""Decodes your binary tree to an n-ary tree.
:type data: TreeNode
:rtype: Node
"""
if not data:
return
root = Node(data.val, [])
cur = data.left
while cur:
root.children.append(self.decode(cur))
cur = cur.right
return root | [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
a6fb074a131298c7b8b149ddd1e2a01e9d30fc87 | 67cb00b4bb4ce5d1c7bb04dd316e3f7054eedf22 | /main/admin.py | c9fe2f4a8c49e7b8459c8a746e9d85fa95a46c7d | [] | no_license | AnvarAka1/Secret | c7854c50c45b0394df077d97cd65d1a178ca5d15 | 1eaa71ce876b6e4b32dcc70e18d60d8887f62151 | refs/heads/master | 2020-03-26T16:28:28.759429 | 2018-08-17T17:54:58 | 2018-08-17T17:54:58 | 145,104,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django.contrib import admin
from .models import *
class TagsAdmin(admin.ModelAdmin):
list_display = ['title', 'description']
class GalleryInline(admin.StackedInline):
model = Gallery
fieldsets = (
(None, {
'fields': ('image',),
}),
)
class PostAdmin(admin.ModelAdmin):
list_display = ['title',]
inlines = [
GalleryInline,
]
admin.site.register(Tags, TagsAdmin)
admin.site.register(Post, PostAdmin)
| [
"the.phenomenon@mail.ru"
] | the.phenomenon@mail.ru |
057b9ad78e1f2ca8f9418d74459fd6bfe1f61eca | 4028c67e87492b607d9a31e6bae90f7247c784ea | /tryten/manage.py | 8a7734d28d82f043238c742a9172f5c8730575c5 | [] | no_license | fujs-uci/Dango_eCommerce_Website | 6e3fe0b22c1807168b1db1bbbea3b9fc475212a6 | b01daee8250528656c25dee26a740655f9904cb4 | refs/heads/master | 2020-05-02T07:42:14.041095 | 2019-03-27T21:44:10 | 2019-03-27T21:44:10 | 177,824,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tryten.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"noreply@github.com"
] | fujs-uci.noreply@github.com |
22bd76b2c564cf07060dd05faeb177fbc42e493a | d6deb77f19395697c804ad9e2f4446c5e3643fab | /scanBAM_peptide.py | f0d6d7a2ddb335d4da9f54ae259e43f6c9143d6d | [] | no_license | hussius/scanBAM | 447bf099ebffe6c4f909d6c38c6ce4baf7510baf | 6081cd1745b6c9c1bb96fe154e0d27901505fa51 | refs/heads/master | 2021-01-10T10:13:31.834291 | 2016-12-08T10:08:54 | 2016-12-08T10:08:54 | 47,552,935 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,692 | py | from pysam import *
import sys
from pyfaidx import Fasta
def has_mismatch_in_interval(reference, bamfile, chrom, start, end):
"""
Return whether there is a mismatch in the interval (start, end) in any read mapping to the given chromosome.
reference -- an IndexedFasta object
"""
for column in bamfile.pileup(chrom, start, end):
refbase = reference[chrom][column.pos:column.pos+1] #.decode()
for piledup in column.pileups:
if piledup.indel != 0: # Insertion is positive; deletion is negative
continue
querybase = piledup.alignment.query_sequence[piledup.query_position]
if refbase != querybase:
# Mismatch
return True
return False
def reads_with_mismatch_in_interval(reference, bamfile, chrom, start, end):
"""
Return reads with at least one mismatch in the interval (start, end) on the given chromosome.
reference -- an IndexedFasta object
"""
mreads = []
for column in bamfile.pileup(chrom, start, end):
refbase = reference[chrom][column.pos:column.pos+1] #.decode()
for piledup in column.pileups:
if piledup.indel != 0: # Insertion is positive; deletion is negative
continue
querybase = piledup.alignment.query_sequence[piledup.query_position]
if refbase != querybase:
# Mismatch
mreads.append(piledup.alignment.query_name)
return mreads
def parse_coords(cstr):
return(cstr.split('_')[:-1])
def is_in_interval(coord, interval_start, interval_end):
return ( ( int(coord) >= int(interval_start)) and (int(coord) <= int(interval_end) ) )
def get_overlap(s1, e1, s2, e2):
"""
Get the coordinates of the overlap between two intervals
"""
if s1 > e2 or e1 < s2: return None
if s1 <= s2 and e1 <= e2: return (s2, e1) # Will also work for s1 == s2 and e1 == e2
if s1 <= s2 and e1 >= e2: return (s2, e2) # Alignment enclosed in peptide
if s1 >= s2 and e1 <= e2: return (s1, e1) # Peptide enclosed in alignment
if s1 >= s2 and e1 >= e2: return (s1, e2)
sys.exit('Check your numbers')
def get_peptide_segments(pep_info, p, suspected_format):
"""
In some cases, peptides come from spliced segments. In these cases we need to
infer the peptide coding regions from the tsv file.
"""
start = pep_info['start'][p]
end = pep_info['end'][p]
chr = pep_info['chrom'][p]
pepcoor = pep_info['pepcoord'][p]
pep_coord = parse_coords(pepcoor)
#print(pep_info['chrom'][p])
#print(pepcoor)
assert(pep_coord[0]==chr)
pep_chr = pep_coord[0]
# CASE 1: BAM file in UCSC, TSV in ENSEMBL
if suspected_format=="UCSC":
if not pep_chr.startswith("chr"): pep_chr = 'chr' + pep_chr
if pep_chr == "chrMT": pep_chr = "chrM"
# CASE 2: BAM file in Ensembl format, TSV file in UCSC
else:
if pep_chr.startswith("chr"): pep_chr = pep_chr[3:]
if pep_chr == "M": pep_chr = "MT"
if len(start.split(';')) == 1: # Not spliced
return([(pep_chr, int(pep_coord[1]), int(pep_coord[2]) )])
# If spliced
estarts = start.split(';')
eends = end.split(';')
#print(chr)
#print(pep_coord)
pep_start_coord_exon = -1
pep_end_coord_exon = -1
for i in range(0, len(estarts)):
assert(int(estarts[i]) < int(eends[i]))
if is_in_interval(pep_coord[1], estarts[i], eends[i]): pep_start_coord_exon = i
if is_in_interval(pep_coord[2], estarts[i], eends[i]): pep_end_coord_exon = i
# For peptides where start or end cannot be assigned to an exon, assume it belongs to the same one as the end that can be assigned.
# This is a rare case
#print('Peptide coordinates:', pep_coord)
#print('Inferred start exon: ' + str(pep_start_coord_exon) + ' (' + str(estarts[pep_start_coord_exon]) + '-' + str(eends[pep_start_coord_exon]))
#print('Inferred end exon: ' + str(pep_end_coord_exon) + ' (' + str(estarts[pep_end_coord_exon]) + '-' + str(eends[pep_end_coord_exon]))
#input()
if pep_start_coord_exon == -1 or pep_end_coord_exon == -1:
if pep_start_coord_exon == -1: pep_start_coord_exon = pep_end_coord_exon
if pep_end_coord_exon == -1: pep_end_coord_exon = pep_start_coord_exon
if pep_start_coord_exon == pep_end_coord_exon:
# Peptide contained within one exon. Can print out the original coordinates.
return([(pep_chr, int(pep_coord[1]), int(pep_coord[2]) )])
else:
# Need to stitch together the regions from annotation.
# First entry, for the starting exon
# Chromosome, peptide start coord, end of the starting exon, etc
entries = []
entries.append( (pep_chr, int(pep_coord[1]), int(eends[pep_start_coord_exon])) )
# Second entry, for the ending exon
entries.append( (pep_chr, int(estarts[pep_end_coord_exon]), int(pep_coord[2])) )
return(entries)
def aln_has_mismatch_in_interval(reference, bamformat, aln, chrom, start, end):
"""
Check if an (unspliced) alignment has at least one mismatch in the interval (start, end) on the given chromosome.
reference -- an IndexedFasta object
"""
qseq = ''
rseq = ''
dbg = False
if bamformat=="UCSC": # Need to give chromosome name that fits with ENSEMBL for now
chrom = chrom[3:]
if chrom == "M": chrom = "MT"
if (end-start) > 1000:
print('Long peptide')
print(str(start) + ' ' + str(end) + ' ' + str(end-start))
print('Reference sequence of peptide:\t' + str(reference[chrom][start:end]))
input()
if (aln.get_overlap(start, end)) == 0:
return False
if dbg:
pass
#print('Alignment reference start:\t' + str(aln.reference_start))
#print('Alignment reference end:\t' + str(aln.reference_end))
#print('Peptide chromosome:\t' + chrom)
#print('Peptide start:\t' + str(start))
#print('Peptide end:\t' + str(end))
#print(aln)
#print('Sequence of whole alignment :\t' + aln.query_alignment_sequence)
#print('Reference sequence of alignment:\t' + str(reference[chrom][aln.reference_start:aln.reference_end]))
#print('Reference sequence of peptide:\t' + str(reference[chrom][start:end]))
if start >= aln.reference_start and end <= aln.reference_end:
if dbg: sys.stderr.write('Case 1: peptide contained within aligned segment\n')
# CASE 1: Peptide contained within aligned segment
# Peptide start ------ end
# Alignment aln.reference_start ----------------------- aln.reference_end
ds = start - aln.reference_start # Offset on aligned sequence, from its start
de = aln.reference_end - end # Offest on aligned sequence, from its end
qseq = aln.query_alignment_sequence[ds:(aln.query_alignment_end-aln.query_alignment_start-de)]
rseq = reference[chrom][(aln.reference_start+ds):(aln.reference_end-de)]
elif start < aln.reference_start:
# Peptide starts before aligned segment
if end <= aln.reference_end:
if dbg: sys.stderr.write('Case 2: peptide left-overlapping aligned segment\n')
# CASE 2: Peptide left-overlapping aligned segment
# Peptide start ---------- end
# Alignment aln.reference_start ---------------- aln.reference_end
ds = end - aln.reference_start
# Offset on aligned sequence, from the start
qseq = aln.query_alignment_sequence[:ds]
rseq = reference[chrom][aln.reference_start:(aln.reference_start+ds)]
else:
if dbg: sys.stderr.write('Case 3: alignment contained within peptide\n')
# CASE 3: (rare) alignment contained within peptide. start < ref start and end > ref end
# Peptide start --------------------------------- end
# Alignment aln.reference_start --------------------- aln.reference_end
qseq = aln.query_alignment_sequence
rseq = reference[chrom][aln.reference_start:aln.reference_end]
else: # start >= ref start and end >= ref end
# CASE 4: Peptide right-overlapping aligned segment
# Peptide start ---------------- end
# Alignment aln.reference_start --------------------- aln.reference_end
if dbg: sys.stderr.write('Case 4: peptide right-overlapping aligned segment\n')
assert start >= aln.reference_start and end > aln.reference_end
de = aln.reference_end - start
qseq = aln.query_alignment_sequence[(aln.query_alignment_end-aln.query_alignment_start-de):(aln.query_alignment_end-aln.query_alignment_start)]
rseq = reference[chrom][(aln.reference_end-de):(aln.reference_end)]
if dbg: sys.stderr.write(str(qseq) + '\n')
if dbg: sys.stderr.write(str(rseq) + '\n')
if len(qseq) != len(rseq):
# Should happen for insertions and deletions only
debug_file.write('Indel: ' + aln.cigarstring + '\n')
return True
if qseq == '' or rseq == '':
sys.exit('Could not extract sequence.')
for i in range(0, len(qseq)):
if str(qseq[i]).upper() != str(rseq[i]).upper():
debug_file.write('Mismatch: ' + '\n' + str(qseq) + '\n' + str(rseq) + '\n')
# More details
debug_file.write("Start of peptide: " + '\t' + str(start) + '\n')
debug_file.write("End of peptide: " + '\t' + str(end) + '\n')
debug_file.write("Start of alignment: " + '\t' + str(aln.reference_start) + '\n')
debug_file.write("End of alignment: " + '\t' + str(aln.reference_end) + '\n')
debug_file.write(str(aln))
return True
return False
# Determine if an alignment has few enough mismatches to pass.
def mismatches_ok(aln, max_mismatches=1):
try:
nm = aln.get_tag('nM')
except:
try:
nm = aln.get_tag('NM')
except:
return(-1) # Could not find a tag for number of mismatches
return (nm <= max_mismatches)
# Is pairing OK? Single-end passes automatically; paired-end passes if properly paired.
def pairing_ok(aln):
if not aln.is_paired:
return True
elif aln.is_proper_pair:
return True
else:
return False
# Is the read multi-mapping? Fail if so
def multimapping_ok(aln, max_loci=1):
try:
if aln.get_tag('NH') > max_loci:
return False
else:
return True
except:
try:
if aln.get_tag('XT') == 'R': # XT:A:U means unique, XT:A:R means repeat, apparently
return False
else:
return True
except:
return(-1) # Could not find a tag for number of loci
# Identify alignments that pass all the criteria defined above
def find_nice_alns(region, bamfile, max_mismatches=1):
good_alns = []
try:
iter = bamfile.fetch(region[0],region[1],region[2])
except:
sys.exit("Region" + region[0] + ' ' + str(region[1]) + ' ' + str(region[2]) + '\nBAM file' + bamfile + '\nMake sure that you have an indexed BAM file!')
for x in iter:
if mismatches_ok(x) and pairing_ok(x) and multimapping_ok(x):
good_alns.append(x)
return(good_alns)
def find_fusion_alns(regions, bamfile):
"""
Try to find alignments corresponding to a fusion event.
Right now just deals with 2 regions.
"""
fus_alns = []
chrom1, start1, end1 = regions[0]
try:
chrom2, start2, end2 = regions[1]
except:
print("Abnormal exit")
print(regions)
sys.exit(0)
#print('Partner should be at:' + str(chrom2) + ':' + str(start2) + '-' + str(end2))
iter = bamfile.fetch(chrom1, start1, end1)
for x in iter:
if bamfile.getrname(x.reference_id) == chrom1 and bamfile.getrname(x.next_reference_id) == chrom2:
if (x.next_reference_start >= start2 and x.next_reference_start <= end2):
if mismatches_ok(x) and pairing_ok(x) and multimapping_ok(x):
fus_alns.append(x)
return(fus_alns)
def compare_seqs(aln, seg, reference, chrformat, chrom):
return False
if chrformat == 'UCSC':
chrom = chrom[3:]
if chrom == 'M': chrom = 'MT'
mm = 0
# Step through alignment entries
ct = aln.cigartuples
curr_loc = aln.reference_start
offset_in_read = 0
for tup in ct:
if tup[0] == 0: # Match
aln_seg_start = curr_loc
aln_seg_end = curr_loc + tup[1]
ol = get_overlap(int(seg[1]),int(seg[2]),aln_seg_start,aln_seg_end)
if ol:
print('Peptide: ' + str(seg[1]) + '-' + str(seg[2]))
print('Aln: ' + str(aln_seg_start) + '-' + str(aln_seg_end))
overlap_length = ol[1] - ol[0] + 1
overlap_offset = ol[0] - curr_loc
print('Overlap length: ' + str(overlap_length))
print('Overlap: ')
print(ol)
qseq = aln.query_sequence[(aln.query_alignment_start+offset_in_read+overlap_offset):(aln.query_alignment_start+offset_in_read+overlap_offset+overlap_length)]
rseq = reference[chrom][ol[0]:(ol[1]+1)]
print(rseq)
print(aln.query_sequence)
assert( overlap_offset >= 0)
for i in range(0, len(qseq)): # It can happen that len(rseq) > len(qseq) if we are at the end of the read, but that's ok! We are only interested in mismatches in qseq w r t rseq
if str(qseq[i]) != str(rseq[i]):
mm += 1
print('Mismatch in spliced segment: ' + '\n' + str(qseq) + '\n' + str(rseq) + '\n')
#input()
debug_file.write('Mismatch in spliced segment: ' + '\n' + str(qseq) + '\n' + str(rseq) + '\n')
offset_in_read += tup[1] # Keep track of location in read
curr_loc += tup[1] # Keep track of location on reference
return(mm)
def mismatch_in_spliced(aln, pcoords, reference, suspected_bamchromformat):
#print(aln)
#print(pcoords)
mm = 0
chrom, pstart, pend = pcoords
if suspected_bamchromformat == 'UCSC':
chrom = chrom[3:]
if chrom == 'M': chrom = 'MT'
whole_qseq = aln.query_sequence
#print(pcoords)
rseq = reference[chrom][pstart:pend]
#print('============')
#print('Whole query sequence: ' + str(aln.query_sequence))
#print('Peptide-corresponding reference sequence: ' + str(rseq))
curr_loc = aln.reference_start
offset_in_read = 0
ct = aln.cigartuples
for tup in ct:
#print('CIGAR string:', aln.cigarstring, str(tup))
if tup[0]==0: # match
aln_seg_start = curr_loc
aln_seg_end = curr_loc + tup[1]
sequence_of_seg = aln.query_sequence[offset_in_read:offset_in_read+tup[1]]
#print('Sequence of this segment: ' + ' '*offset_in_read + sequence_of_seg )
ol = get_overlap(pstart, pend, aln_seg_start,aln_seg_end)
#print('Peptide coordinates: ' + str(pstart) + '-' + str(pend))
#print('Aligned coordinates: ' + str(aln_seg_start) + '-' + str(aln_seg_end))
if ol:
overlap_length = ol[1] - ol[0]
overlap_offset_aln = ol[0] - curr_loc
#print('Overlap: ')
#print(ol)
#print('Overlap offset for alignment: ' + str(overlap_offset_aln))
#print('Overlap offset for peptide: ' + str(overlap_offset_pep))
#print('Overlap length: ' + str(overlap_length))
aln_bit = aln.query_sequence[offset_in_read+overlap_offset_aln:(offset_in_read+overlap_offset_aln+overlap_length)]
try:
pep_rseq = reference[chrom][ol[0]:ol[1]]
#sys.stderr.write('aligned bit: ' + aln_bit + '\n')
#sys.stderr.write('matching peptide seq: ' + str(pep_rseq) + '\n')
for i in range(0, len(aln_bit)):
if aln_bit[i].lower() != str(pep_rseq[i]).lower():
mm += 1
#print('Mismatch in spliced segment')
# print('aligned bit: ' + aln_bit)
# print('matching peptide seq: ' + str(pep_rseq))
#print(ol)
#input()
except:
pass # No overlap
else:
pass
#print('No overlap')
#input()
offset_in_read += tup[1]
elif tup[0]==4:
offset_in_read += tup[1]
if tup[0] != 4: curr_loc += tup[1]
return mm
####
#
# Check input arguments
if len(sys.argv) < 4:
sys.exit("python analyse_sam.py <peptide file> <indexed FASTA file> <BAM files>")
aln_table = {} # A dictionary that will contain, for each BAM file, a dictionary of peptide alignment counts.
debug_file = open("debug.log","w")
# Start by parsing the TSV file
peptides = []
tsv_info = {'score':{}, 'psmcount':{}, 'txtype':{}, 'chrom':{},'start':{},'end':{}, 'pepcoord':{}}
with open(sys.argv[1]) as tsv:
tsv.readline() # Skip header
for line in tsv:
try:
[pepseq, pepcoord, annotation, chrom, strand, start, stop, msgf_score, psm_count] = line.rsplit('\t')
except:
print(line.strip())
peptides.append(pepseq) # Peptide sequence
tsv_info['score'][pepseq] = msgf_score # MSGF score
tsv_info['psmcount'][pepseq] = psm_count # PSM count
# Type of transcript annotated for peptide regions
if annotation[0:3] == 'lnc':
tsv_info['txtype'][pepseq]='lnc'
elif annotation[0:3] == 'PGO':
tsv_info['txtype'][pepseq]='pg'
elif annotation[0:3] == "Fus":
tsv_info['txtype'][pepseq]='fus'
else:
tsv_info['txtype'][pepseq]='nov'
tsv_info['chrom'][pepseq] = chrom # The chromosome for the annotated transcript
tsv_info['start'][pepseq] = start # Start coordinate of annotated transcript
tsv_info['end'][pepseq] = stop # End coordinate of annotated transcript
tsv_info['pepcoord'][pepseq] = pepcoord # Coordinates for the peptide, e g chr6_31620200_31620244_-
# Read indexed FASTA reference file.
refFasta = Fasta(sys.argv[2])
# Go through all the BAM files.
for bam in sys.argv[3:]:
sys.stderr.write(bam + '\n')
aln_count = {} # A dictionary that will collect 'good alignment' counts by peptide.
bamfile = AlignmentFile(bam,"rb")
suspected_bamchromformat = 'ENS' # 'ENS' or 'UCSC'
for ref in bamfile.references:
if ref.startswith('chr'): suspected_bamchromformat = 'UCSC'
max_mismatches = 1 # Refers to the maximum allowed number of mismatched *for the whole RNA alignment* (no mismatched are allowed in the peptide-overlapping region)
for p in peptides:
# Coordinates for the actual peptide locus or loci (in the case of spliced peptides). (example of coordinate format: chr6_29894499_29894540_+)
# If the peptides are spliced the coordinates of the segments must be inferred from the transcript annotation
pcoords = get_peptide_segments(tsv_info, p, suspected_bamchromformat)
# It is for these regions that we want to find overlapping alignments.
passed_alns = set()
n_failed_due_to_mismatch = 0
for r in pcoords: # For each peptide segment (usually 1)
# Find alignments overlapping this segment that have a maximal number of mismatches, are not multimapping and paired if paired-end seq is used
good_alns = find_nice_alns(r, bamfile)
#sys.stderr.write('Found ' + str(len(good_alns)) + ' good alignments' + '\n')
spliced = 0
unspliced = 0
not_overlapping_peptide = 0
for a in good_alns:
# Check if spliced alignment
if 'N' in a.cigarstring:
spliced += 1
# If spliced, find out the aligned bits
ct = a.cigartuples
curr_loc = a.reference_start
aln_starts = []
aln_ends = []
for tup in ct:
if tup[0] == 0:
aln_starts.append(curr_loc)
aln_ends.append(curr_loc + tup[1])
curr_loc += tup[1]
# If there is any segment that overlaps the peptide without mismatches, accept it.
matching_overlap = False
overlap = False
for e in zip(aln_starts, aln_ends):
ol = get_overlap(r[1], r[2], e[0], e[1])
if ol:
overlap = True
# Check if they have mismatches
mm = mismatch_in_spliced(a, r, refFasta, suspected_bamchromformat)
if mm == 0:
matching_overlap = True
if matching_overlap:
passed_alns.add(a)
elif overlap:
n_failed_due_to_mismatch += 1
else:
not_overlapping_peptide += 1
# Not spliced alignment - just make sure there is no mismatch in the peptide region
else:
unspliced += 1
if aln_has_mismatch_in_interval(refFasta, suspected_bamchromformat, a, r[0], int(r[1]), int(r[2])):
debug_file.write('Peptide mismatch ' + a.qname + ' ' + str(r[1]) + ' ' + str(r[2]) + '\n')
n_failed_due_to_mismatch += 1
else:
passed_alns.add(a)
aln_count[p] = len(passed_alns)
#print('Passed through ' + str(aln_count[p]) + ' alignments in total')
#print(str(n_failed_due_to_mismatch) + ' rejected due to mismatch')
#print('Spliced: ' + str(spliced))
#print('Of these, ' + str(not_overlapping_peptide) + ' did not overlap the peptide segment')
#print('Unspliced: ' + str(unspliced))
debug_file.write('BAM file: ' + bam + ' Peptide locus ' + pcoords[0][0] + '-' + str(pcoords[0][1]) + str(pcoords[0][2]) + '\n')
debug_file.write('Passed through ' + str(len(passed_alns)) + ' alignments. Found ' + str(n_failed_due_to_mismatch) + ' alignments with mismatches in peptide locus' + '\n#####################\n')
aln_table[bam]=aln_count
bam = sorted(list(aln_table.keys()))
# Write output file header.
#sys.stdout.write('sequence\ttxtype\tscore\tpsmcount\t')
sys.stdout.write('sequence\t')
for i in range(0,len(bam)):
sys.stdout.write(bam[i].split('.')[0].split('/')[-1])
if (i < (len(bam)-1)): sys.stdout.write('\t')
else: sys.stdout.write('\n')
# And the counts.
for pep in sorted(aln_table[bam[0]].keys()):
#sys.stdout.write(pep + '\t' + tsv_info['txtype'][pep] + '\t' + tsv_info['score'][pep] + '\t' + tsv_info['psmcount'][pep] + '\t')
sys.stdout.write(pep + '\t')
for i in range(0,len(bam)):
sys.stdout.write(str(aln_table[bam[i]][pep]))
if (i < (len(bam)-1)): sys.stdout.write('\t')
sys.stdout.write('\n')
| [
"mikael.huss@gmail.com"
] | mikael.huss@gmail.com |
002d0c2c589e6c7482fd92e33dc1850f82704abf | 29d1c4ed8fc2e5c0eb5bd453ecedd1f8f0672416 | /chapter_11/cmdLineEmailer.py | 9ac701f7f78bf0b1c36c7e20e6871a19784368d9 | [] | no_license | aaronfox/AutomateTheBoringStuff | 01a5bbd8c0d3fccccad4899eea5cda9a03bfd07f | 1c7fd69cc96c6cbd0d90b29af7f6881406ded76a | refs/heads/master | 2021-01-19T20:26:52.853082 | 2017-03-14T00:43:32 | 2017-03-14T00:43:32 | 83,754,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #! python3
#
# cmdLineEmailer.py - takes an email address and a string of text and emails it
# using yahoo's mailing client
import sys, time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
##from selenium.webdriver.support.ui import WebDriverWait
##from selenium.webdriver.support import expected_conditions as EC
##from selenium.webdriver.common.by import By
##from selenium.common.exceptions import TimeoutException
emailRecipient = ""
message = ""
print(len(sys.argv))
if len(sys.argv) > 1:
emailRecipient = sys.argv[1]
message = ' '.join(sys.argv[2:])
else:
print("Usage: cmdLineEmailer.py email [message-to-send]")
driver = webdriver.Firefox()
driver.get('http://yahoo.com')
delay = 2 # seconds
#time.sleep(delay)
##try:
## mailElement = WebDriverWait(driver, delay).until(
## EC.presence_of_element_located((By.ID,
## driver.find_element_by_id(
## 'uh-mail-link'))))
##except TimeoutException:
## print('timed out waiting for page to load')
mailButton = driver.find_element_by_id('uh-mail-link')
mailButton.click()
driver.implicitly_wait(delay)
time.sleep(delay)
#usernameInput = driver.find_element_by_id('login-username')
usernameInput = driver.find_element_by_css_selector('#login-username')
usernameInput.click()
usernameInput.send_keys('*****@yahoo.com')
usernameInput.send_keys(Keys.ENTER)
time.sleep(delay)
passwordInput = driver.find_element_by_css_selector('#login-passwd')
passwordInput.send_keys('*****')
passwordInput.send_keys(Keys.ENTER)
time.sleep(delay)
time.sleep(7)
composeButton = driver.find_element_by_css_selector('#Compose')
composeButton.click()
time.sleep(delay)
# toBoxDict = driver.switch_to_active_element()
# toBox = toBoxDict["value"]
# toBox.send_keys('Bob.smith@yahoo.com')
# toBox.send_keys(Keys.TAB)
toBox = driver.find_element_by_css_selector('#to-field')
toBox.send_keys(emailRecipient)#'Bob.Smith@yahoo.com')
subjectBox = driver.find_element_by_css_selector('#subject-field')
subjectBox.send_keys('SUBJECT')
contentBox = driver.find_element_by_css_selector('#rtetext')
contentBox.click()
contentBox.send_keys(message)#'WOO WE MADE IT BRO!')
contentBox.send_keys(Keys.CONTROL + Keys.ENTER)
##sendButton = driver.find_element_by_partial_link_text('Send')
##sendButton.click()
| [
"aaron.m.fox3@gmail.com"
] | aaron.m.fox3@gmail.com |
fc1d19e6d53ee0d80f38c8df91e1bc9062e99624 | 6547262baa89554b8e98a697302c74e84852f88a | /server/openslides/chat/migrations/0001_initial.py | 1aed4aaa78b6c707567ce0b8bf129af414ec0318 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | FinnStutzenstein/OpenSlides | f957ec63fd5224e9fce1a3ea0ea24a0cf5cf7454 | 7dc35dce404339b41c7729eb3de29010ca63f9a0 | refs/heads/master | 2021-11-26T21:19:42.408206 | 2021-07-15T08:38:26 | 2021-07-15T08:38:26 | 64,663,189 | 0 | 0 | MIT | 2020-08-18T10:52:58 | 2016-08-01T11:58:39 | TypeScript | UTF-8 | Python | false | false | 2,316 | py | # Generated by Django 2.2.15 on 2020-12-03 12:52
from django.db import migrations, models
import openslides.utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("users", "0015_user_vote_delegated_to"),
]
operations = [
migrations.CreateModel(
name="ChatGroup",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
(
"access_groups",
models.ManyToManyField(
blank=True, related_name="chat_access_groups", to="users.Group"
),
),
],
options={
"permissions": (("can_manage", "Can manage chat"),),
"default_permissions": (),
},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="ChatMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=512)),
("timestamp", models.DateTimeField(auto_now_add=True)),
("username", models.CharField(max_length=256)),
("user_id", models.IntegerField()),
(
"chatgroup",
models.ForeignKey(
on_delete=openslides.utils.models.CASCADE_AND_AUTOUPDATE,
related_name="messages",
to="chat.ChatGroup",
),
),
],
options={
"default_permissions": (),
},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
]
| [
"sean.f.t.engelhardt@gmail.com"
] | sean.f.t.engelhardt@gmail.com |
1b6d6a907590c700ea789042edc93e8d6ad3c64a | a20ff16d5ceda636e23b74c60c79e496cf26b9aa | /URI/1 - INICIANTE/Python/1132 - MultiplosDe13.py | d0e4638c6269c3a5ad5df465a179373547605bec | [
"MIT"
] | permissive | william-james-pj/LogicaProgramacao | 10246faf85358091f2483c81ef7dab14dbc458f1 | 629f746e34da2e829dc7ea2e489ac36bb1b1fb13 | refs/heads/master | 2022-04-24T06:32:46.376978 | 2020-04-29T01:36:06 | 2020-04-29T01:36:06 | 255,641,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | soma = 0
x = int(input())
y = int(input())
for z in range(min(x,y), max(x,y)):
if z % 13 != 0:
soma += z
print(soma) | [
"52788612+william-james-pj@users.noreply.github.com"
] | 52788612+william-james-pj@users.noreply.github.com |
5126ed4437d75a2dc38246725d8e7c906b59dbaf | 17d28bc8e3cd3ba64e9dd3cfd291fb7b85de0738 | /train.py | 73ab2caf13d1271326afe29b6b808bc950c59449 | [] | no_license | brod4910/CS4230-Final-Project | 0d66f064fdf3786198e7d9c9f17cb9d329691a9d | a4c4319255eb78f45617654d2d0bb3d798eea785 | refs/heads/master | 2020-03-11T04:23:39.455514 | 2018-04-30T17:39:01 | 2018-04-30T17:39:01 | 129,775,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | import os
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import time
import sys
def train(args, model, device):
# torch.manual_seed(args.seed + rank)
data_tot, forward_tot, backward_tot = 0., 0., 0.
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True,
transform=transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=False, transform=transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=2)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
criterion = torch.nn.CrossEntropyLoss().cuda()
total_time = time.clock()
for epoch in range(1, args.epochs + 1):
data_t0, forward_t1, backward_t2 = train_epoch(epoch, args, model, train_loader, optimizer, criterion, device)
data_tot += data_t0
forward_tot += forward_t1
backward_t2 += backward_t2
test_epoch(model, test_loader, device)
print("The Total Training and Inference time: {:.4f}".format(time.clock() - total_time))
print("The Data Loading Average: {:.10f}".format(data_tot / (50000*args.epochs)))
print("The Forwardpass Average: {:.10f}".format(forward_tot / (50000*args.epochs)))
print("The Backwardpass Average: {:.10f}".format(backward_tot / (50000*args.epochs)))
def train_epoch(epoch, args, model, data_loader, optimizer, criterion, device):
model.train()
correct = 0
data_load_tot = 0.
forward_tot = 0.
backward_tot = 0.
data_load_t0 = time.clock()
for batch_idx, (data, target) in enumerate(data_loader):
data_load_tot += time.clock() - data_load_t0
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
forward_t1 = time.clock()
output = model(data)
loss = criterion(output, target)
forward_tot += time.clock() - forward_t1
backward_t2 = time.clock()
loss.backward()
backward_tot += time.clock() - backward_t2
optimizer.step()
# pred = output.data.max(1)[1] # get the index of the max log-probability
# correct = pred.eq(target.data).sum()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.item()))
data_load_t0 = time.clock()
print("Data Load Time: {:.4f}".format(data_load_tot / batch_idx))
print("Forwardpass Time: {:.4f}".format(forward_tot / batch_idx))
print("Backwardpass Time: {:.4f}".format(backward_tot / batch_idx))
return data_load_tot, forward_tot, backward_tot
def test_epoch(model, data_loader, device):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = F.cross_entropy(output, target, size_average=False) # sum up batch loss
test_loss += loss.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).sum()
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
| [
"brod4910@gmail.com"
] | brod4910@gmail.com |
01281754fae0374b5c3dc2411c4e4f5bede6ff1c | c5538b14a4ee1bd99dd70a80f6f859716799bbe9 | /blog15-textcluster/blog15_textcluster_07.py | ae02211dd7ccfd59d9464a8990075f08b5e1ac1b | [] | no_license | sigma-random/Python-zero2one | 21f1cadbed91f4ac50ad60134520c1d6c75161f9 | 7c025b3a1c26679b111dfbe5b9c34786e4cd6b74 | refs/heads/master | 2023-09-05T03:30:33.492625 | 2021-11-23T06:39:30 | 2021-11-23T06:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # coding:utf-8
#By:Eastmount CSDN
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
#第一步 生成词频矩阵
corpus = []
for line in open('result.txt', 'r', encoding="utf-8").readlines():
corpus.append(line.strip())
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus)
word = vectorizer.get_feature_names()
for n in range(len(word)):
print(word[n],end=" ")
print('')
print(X.toarray())
#第二步 计算TF-IDF值
transformer = TfidfTransformer()
print(transformer)
tfidf = transformer.fit_transform(X)
print(tfidf.toarray())
weight = tfidf.toarray()
#第三步 KMeans聚类
from sklearn.cluster import KMeans
clf = KMeans(n_clusters=3)
s = clf.fit(weight)
y_pred = clf.fit_predict(weight)
print(clf)
print(clf.cluster_centers_) #类簇中心
print(clf.inertia_) #距离:用来评估簇的个数是否合适 越小说明簇分的越好
print(y_pred) #预测类标
#第四步 降维处理
from sklearn.decomposition import PCA
pca = PCA(n_components=2) #降低成两维绘图
newData = pca.fit_transform(weight)
print(newData)
x = [n[0] for n in newData]
y = [n[1] for n in newData]
#第五步 可视化
import numpy as np
import matplotlib.pyplot as plt
plt.scatter(x, y, c=y_pred, s=100, marker='s')
plt.title("Kmeans")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| [
"noreply@github.com"
] | sigma-random.noreply@github.com |
d9f02ffdaab8a4ff5841043029623e7de1da4fdc | e004ab989d60ae01cbf0a68cb797af888ce8270b | /scripts/alloc_alg_wm.py | 2c340be8176d2d30471d75a119525cc184d0475c | [] | no_license | felipeqc/allocation | a074eb92cfa2c0efb23d2f7e3a5b9a36862c3253 | 39c85d0ec17ea3120d4a7e4c6611d42b2e13e7fe | refs/heads/master | 2016-09-02T00:50:14.461145 | 2012-09-20T14:05:11 | 2012-09-20T14:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,933 | py | from alloc_task_wm import *
from alloc_part_wm import *
class WmTaskSet:
def __init__(self, cpus):
self._cpus = int(cpus)
self._partition = [WmPartition(x) for x in xrange(self._cpus)]
self._tasks = []
self._verbose = False
Task.taskNumber = 0
def setVerbose(self, option):
self._verbose = option
def tasks(self):
"""Return list of tasks"""
return self._tasks
def partitions(self):
"""Return list of partitions"""
return sorted(self._partition, key = lambda part : part.number())
def cpus(self):
"""Return the number of cpus"""
return self._cpus
def createTask(self, c, p):
"""Create a task with WCET c and period p"""
self._tasks.append(Task(c, p))
def allocate(self, bp = 'f', order = 'u'):
"""Execute the allocation and fill the partitions"""
self._partition = [WmPartition(x) for x in xrange(self._cpus)] # Clear previous allocation
if order == 'u':
# In case of equal utilizations, we prioritize lesser periods.
task_cmp = lambda t1, t2 : cmp(t1.util(), t2.util()) if t1.util() != t2.util() else -cmp(t1.period(), t2.period())
elif order == 'p':
# In case of equal periods, we prioritize greater utilizations.
task_cmp = lambda t1, t2 : cmp(t1.period(), t2.period()) if t1.period() != t2.period() else cmp(t1.util(), t2.util())
remaining_tasks = sorted(self._tasks, cmp=task_cmp) # Sort in descending order (actually it's ascending, but we use pop() to take the elements for efficiency)
if self._verbose:
print 'Scheduling %d tasks using EDF-WM %s-%s' % (len(self._tasks), 'D' + order.upper(), bp.upper() + 'F');
for task in reversed(remaining_tasks):
print '%d: C: %Lf, T: %Lf, U: %Lf' % (task.number(), task.wcet(), task.period(), task.util())
print
while remaining_tasks:
curTask = remaining_tasks.pop()
if not self._binpacking(curTask, bp) and not self._split_task(curTask, bp, order, remaining_tasks): # If bin-packing failed and task splitting also failed
if self._verbose:
print '\nAllocation FAILED!'
return False
if self._verbose:
print '\nAllocation OK!\n'
for part in self._partition:
print 'CPU %d, U = %Lf:' % (part.number(), part.util())
if part.tasks():
for task in part.normalTasks():
print '- Task %d, C = %Lf, T = %Lf, U = %Lf' % (task.number(), task.wcet(), task.period(), task.util())
for s in part.slices():
print '- Slice %d:%d, C = %Lf, D = %Lf, \n\t\tT = %Lf, U = %Lf' % (s.task().number(), s.slicenumber(), s.wcet(), s.deadline(), s.period(), s.util())
print
return True # Allocation completed
def _binpacking(self, curTask, bp):
if bp == 'f': # First-Fit
for part in self._partition: # For each partition
part._addTask(curTask) # Add task temporarily
if part._schedTest():
if self._verbose:
print 'Task %d -> Processor %d' % (curTask.number(), part.number())
return True
else:
part._removeLastTask()
elif bp == 'w': # Worst-Fit
max_slack = -1
max_slack_part = None
for part in self._partition: # For each partition
part._addTask(curTask) # Add task temporarily
if part._schedTest():
if 1.0 - part.util() > max_slack:
max_slack = 1.0 - part.util()
max_slack_part = part
part._removeLastTask() # Remove task anyway
if max_slack_part != None:
max_slack_part._addTask(curTask) # Add task permanently
if self._verbose:
print 'Task %d -> Processor %d' % (curTask.number(), max_slack_part.number())
return True
else:
# Try to merge the two partitions with minimum utilization and no migratory tasks (worst-Fit improvement)
part_without_mig = [x for x in self._partition if not x._hasSlices()]
if(len(part_without_mig) >= 2):
p1 = min(part_without_mig, key = lambda part : part.util()) # Partition with minimum utilization
part_without_mig.remove(p1)
if part_without_mig:
p2 = min(part_without_mig, key = lambda part : part.util()) # Partition with second to the minimum utilization
# Let's put all tasks in a temporary partition and apply sched test
temp_part = WmPartition(-1)
for t in p1._tasks + p2._tasks:
temp_part._addTask(t)
if temp_part._schedTest():
p1._merge(p2)
p2._addTask(curTask)
if self._verbose:
print 'MERGE: Processors %d and %d -> Processor %d' % (p1.number(), p2.number(), p1.number())
print 'Task %d -> Processor %d' % (curTask.number(), p2.number())
return True
return False # Bin-Packing failed
def _split_task(self, curTask, bp, order, remaining_tasks):
s = 2
while s <= self._cpus:
dPrime = curTask.deadline()/s
if self._verbose:
print "\ns =",s
# Let's order partitions by available exec time (decreasing order)
orderedPartitions = sorted(self._partition, key = lambda part : part._calcExecTime(curTask, dPrime), reverse=True)
# We called CalcExecTime() of every partition, so now memoExecTime() returns updated values
i = 0
remaining = curTask.wcet()
# While the task is too large to fit in the current processor, break it in slices
while i < self._cpus and i < s and remaining > 0.0: # Split the task in s slices
available = orderedPartitions[i]._memoExecTime()
#if remaining <= available:
# break
orderedPartitions[i]._addSlice(WmSlice(i+1, available, dPrime, curTask))
if self._verbose:
print 'Trying Task %d Slice %d -> Processor %d, remaining: %Lf, available: %Lf' % (curTask.number(), i+1, orderedPartitions[i].number(), remaining, available)
remaining -= min(available, remaining)
i += 1
# Find the CPU to put the last piece of the task (minimizing waste)
"""j = i
max_util_part = None
max_util = -1.0
while j < self._cpus:
available = orderedPartitions[j]._memoExecTime()
if self._verbose:
print 'Checking Task %d Slice %d -> Processor %d, remaining: %Lf, available: %Lf' % (curTask.number(), i+1, orderedPartitions[j].number(), remaining, available)
if orderedPartitions[j].util() > max_util and remaining <= available:
max_util_part = orderedPartitions[j]
max_util = orderedPartitions[j].util()
j += 1
if(max_util_part != None):
available = max_util_part._memoExecTime()
max_util_part._addSlice(WmSlice(i+1, remaining, dPrime, curTask))
if self._verbose:
print 'Done! Task %d Slice %d -> Processor %d' % (curTask.number(), i+1, max_util_part.number())
# Try to put small tasks in max_util_part
temp_list = reversed(remaining_tasks)
for smallTask in temp_list:
max_util_part._addTask(smallTask) # Add task temporarily
if max_util_part._schedTest():
remaining_tasks.remove(smallTask)
if self._verbose:
print 'Task %d -> Processor %d' % (smallTask.number(), max_util_part.number())
else:
max_util_part._removeLastTask()
return True"""
if remaining == 0.0:
return True
# Allocation failed, we have to remove the tasks to try again with greater s.
# There are slices in orderedPartitions[0...s-1].
if self._verbose:
print "s =", s, "is not enough."
for j in xrange(s):
orderedPartitions[j]._removeLastSlice()
s += 1
# Can't split the task in slices
return False
| [
"felipe@felipe-EP45-DS3R.(none)"
] | felipe@felipe-EP45-DS3R.(none) |
2e1edde9abd6e425e7051a6e949395bc5e1250bf | 4dfc482b153367d3637f647b269e4282f2c47e0d | /src/mapreduce/job1/mapper.py | 69c48fd79ce1301afd5611ae277653ddf1297b83 | [] | no_license | taku-k/pagerank-hadoop-app | c0cf645b8eca96c680cf8f52b49c62ec2d266ca7 | 4a2a73046cef0ec895bf59ceccd323dc8687d89d | refs/heads/master | 2021-01-18T00:06:56.137494 | 2015-05-28T23:11:25 | 2015-05-28T23:11:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #!/usr/bin/env python
# coding: utf-8
import sys
import doctest
def page_split(row):
""" Split string and return page_id and page_title
>>> page_split("1,4,'アップロードログ_2004年4月','sysop',498,0,0,0.00326005555992951,'20150420034300',NULL,2168855,106607,NULL")
('1', 'アップロードログ_2004年4月')
"""
c1 = row.find(',')
c2 = row[c1+1:].find(',') + c1 + 1
c3 = row[c2+1:].find("','") + c2 + 2
page_id = row[:c1]
page_title = row[c2+1:c3]
return page_id, page_title[1:-1]
def pagelinks_split(row):
""" Split string and return pl_from and pl_title
>>> pagelinks_split("7688,0,'2&4モータリング社',0")
('7688', '2&4モータリング社')
"""
c1 = row.find(',')
c2 = row[c1+1:].find(',') + c1 + 1
c3 = row.rfind(',')
pl_from = row[:c1]
pl_title = row[c2+1:c3]
return pl_from, pl_title[1:-1]
def main():
for line in sys.stdin:
line = line.strip()
if line.startswith('p'):
page_id, page_title = page_split(line[1:])
print("{0}\tp\t{1}".format(page_id, page_title))
elif line.startswith('l'):
pl_from, pl_title = pagelinks_split(line[1:])
print("{0}\tl\t{1}".format(pl_from, pl_title))
if __name__ == '__main__':
# doctest.testmod()
main()
| [
"taakuu19@gmail.com"
] | taakuu19@gmail.com |
d5925f72ca98c0bcd544b9dbb702e12be592c408 | a5b044ded84c380e6faa677dea757912513148b8 | /harriscorner.py | c37c511934b8808cbf9e8ce8b3e03a000508bf5d | [] | no_license | holmes27/open_cv | 21d56413cadca58f408ae18f632df69d553446bc | f2aef9a3e879f718e3a6e6c6dd006824789a9357 | refs/heads/main | 2023-05-31T04:59:28.360166 | 2021-06-11T07:05:51 | 2021-06-11T07:05:51 | 375,821,503 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import numpy as np
import cv2 as cv
img=cv.imread("chessboard.png")
cv.imshow("image",img)
gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
gray=np.float32(gray)
dst=cv.cornerHarris(gray,2,3,0.04)# 2 is the block size ,3 is for sobel(aperture parameter),0.04 is for harris free detector parameter
dst=cv.dilate(dst,None)# for better results
img[dst>0.01*dst.max()]=[0,0,255]# max change in intensity is the corner
cv.imshow("image with corners",img)
if cv.waitKey(0) & 0xff==27:
cv.destroyAllWindows()
| [
"homiraghuvanshi27@gmail.com"
] | homiraghuvanshi27@gmail.com |
b2ea44fad0db54a7e9d2d7ab518827c928f19cd9 | 23f41db81eb6628581be8c693f25c47732694e83 | /tests/test_form.py | b6cc02ab044730f10f929e83051742e249191821 | [
"MIT"
] | permissive | ortkin/django-internal-external-comments | 2ecb4d5c7854ac4bf7c4726285a48c37e8dabea4 | ea608c34398549ca053d7b50a19cc8f614f91bf5 | refs/heads/master | 2021-09-07T12:04:59.516765 | 2018-02-22T16:01:32 | 2018-02-22T16:01:32 | 109,493,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,598 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from internal_external_comments.forms import InternalExternalCommentForm
class InternalExternalFormTests(TestCase):
def getData(self):
return {
'name': "Frodo Baggins",
'email': "frodo.baggins@bagend.org",
'comment': ("Come on, Sam. Remember what Bilbo used to say: "
"\"It\'s a dangerous business, Frodo, going out your door. "
"You step onto the road, and if you don\'t keep your feet, "
"there\'s no knowing where you might be swept off to.\""),
}
def getValidData(self):
# create form for target self.user
f = InternalExternalCommentForm(self.user)
d = self.getData()
d.update(f.initial)
return d
def setUp(self):
super(InternalExternalFormTests, self).setUp()
self.user = get_user_model().objects.create_user('gandalf the white')
def test_form_init(self):
f = InternalExternalCommentForm(self.user)
self.assertEqual(f.initial['content_type'], ContentType.objects.get_for_model(self.user))
self.assertEqual(f.initial['object_pk'], self.user.pk)
self.assertNotEqual(f.initial['security_hash'], None)
self.assertNotEqual(f.initial['timestamp'], None)
# check custom field
self.assertEqual(f.initial['internal_external'], "internal")
def test_form_internal_external_initial_exists(self):
init_data = self.getData()
init_data['internal_external'] = "external"
f = InternalExternalCommentForm(self.user, initial=init_data)
self.assertTrue('internal_external' in f.initial)
self.assertEqual(init_data['internal_external'], f.initial['internal_external'])
def test_form_internal_external_exists(self):
f = InternalExternalCommentForm(self.user)
self.assertTrue('internal_external' in f.initial)
def test_valid_form(self):
form = InternalExternalCommentForm(self.user, data=self.getValidData())
self.assertTrue(form.is_valid())
def test_blank_data(self):
form = InternalExternalCommentForm(self.user, {})
self.assertFalse(form.is_valid())
def test_get_comment_model(self):
form = InternalExternalCommentForm(self.user, self.getValidData())
self.assertEqual(InternalExternalCommentForm, form.get_comment_model())
def test_get_comment_object(self):
form = InternalExternalCommentForm(self.user, self.getValidData())
form.is_valid()
comment = form.get_comment_object()
self.assertTrue("internal_external" in dir(comment))
def test_get_comment_object_invalid_form(self):
form = InternalExternalCommentForm(self.user, {})
try:
form.get_comment_object()
self.fail("get_comment_object should fail when form not valid")
except Exception:
pass
def test_get_comment_create_data(self):
form = InternalExternalCommentForm(self.user, self.getValidData())
form.is_valid()
data = form.get_comment_create_data()
self.assertTrue("internal_external" in data)
self.assertEqual(data['internal_external'], 'internal')
def test_get_comment_create_data_external(self):
data = self.getValidData()
data['internal_external'] = 'external'
form = InternalExternalCommentForm(self.user, data)
form.is_valid()
data = form.get_comment_create_data()
self.assertTrue("internal_external" in data)
self.assertEqual(data['internal_external'], 'external')
def test_get_comment_create_data_external_none_is_internal(self):
data = self.getValidData()
data['internal_external'] = None
form = InternalExternalCommentForm(self.user, data)
form.is_valid()
data = form.get_comment_create_data()
self.assertTrue("internal_external" in data)
self.assertEqual(data['internal_external'], "internal")
widget = form.fields['comment'].widget
self.assertEqual(widget.internal_external, "internal")
def test_get_comment_create_data_external_external(self):
data = self.getValidData()
data['internal_external'] = 'external'
form = InternalExternalCommentForm(self.user, data=data)
form.is_valid()
widget = form.fields['comment'].widget
self.assertEqual(widget.internal_external, "external")
| [
"amyort@wharton.upenn.edu"
] | amyort@wharton.upenn.edu |
88d3dd854018f601e7960c53e13223c135447a52 | 9db281fbed35bb8384eeacaa81d1a32a9dcc5cca | /class-17/demo/monster-jobs/monster_jobs/scraper.py | 0bb9e23c6da0da5a1287792a996e2dcec15b38c1 | [] | no_license | corey-marchand/seattle-python-401d14 | aab3f48c82229f1958989ce8318de60b9abbe4e2 | ae9ffebc9e5250cb5ec1760fd7764da0d3ad4e4c | refs/heads/master | 2022-11-15T16:09:37.248530 | 2020-07-09T19:10:49 | 2020-07-09T19:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import requests
from bs4 import BeautifulSoup
# Send a request to Monster webpage
URL = 'https://www.monster.com/jobs/search/?q=software-engineer&where=Seattle__2C-WA'
response = requests.get(URL)
# print(dir(response))
# Extract content
content = response.content
# Convert to BS object
soup = BeautifulSoup(content, 'html.parser')
# Find an element
results = soup.find(id='SearchResults')
# print(results.prettify())
jobs_list = results.find_all('section', class_='card-content')
# print(len(jobs_list))
final_results = []
for job in jobs_list:
job_dict = {'title': '', 'location':'', 'company':''}
found_title = job.find('h2', class_='title')
if found_title:
title = found_title.text.strip()
job_dict['title'] = title
found_location = job.find('div', class_='location')
if found_location:
location = found_location.text.strip()
job_dict['location'] = location
found_company = job.find('div', class_='company')
if found_company:
company = found_company.text.strip()
job_dict['company'] = company
final_results.append(job_dict)
# print(title)
# print('********************************')
# print(location)
# print('********************************')
# print(company)
# print('\n ############################# \n')
print(final_results)
| [
"ahmad.alawad.sf@gmail.com"
] | ahmad.alawad.sf@gmail.com |
5397b361705d553e3e3310f32c847b29f535c167 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Depth-first Search/116.Populating Next Right Pointers in Each Node.py | 885f028bc97eeb83c99f1867befd8577674b88a1 | [] | no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # coding=utf-8
'''
Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Note:
You may only use constant extra space.
You may assume that it is a perfect binary tree (ie, all leaves are at the same level, and every parent has two children).
For example,
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL
'''
# Definition for singly-linked list.
'''
深度优先搜索
'''
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
一个递归就搞定了,就是递归让每一个节点他的左右子树通过next链接,直至到最后一层,
然后递归左右节点,继续让他们的左右子树通过next链接。
'''
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if root:
LR = root.left
RL = root.right
while LR and RL:
LR.next = RL
LR = LR.right
RL = RL.left
self.connect(root.left)
self.connect(root.right)
| [
"Evan123mg@gmail.com"
] | Evan123mg@gmail.com |
40c50ce4d2f03bde8befeb5f410d94000238daf2 | f4e28006c728263acc96031cc4eded92369d36df | /nms_services/nms_services/wsgi.py | d048a2919a4629ed64ea3f1d08bab21f352156e2 | [] | no_license | truongdtnms/MW | 95cf9e4b44fe7d12ce6b76a03044249f37add800 | 37ddf25a2c4494ac19b80b61270d1c0eaa94b9cb | refs/heads/master | 2022-11-30T03:38:23.106553 | 2020-08-16T13:07:49 | 2020-08-16T13:07:49 | 286,219,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for nms_services project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nms_services.settings')
application = get_wsgi_application()
| [
"tientrien2505@gmail.com"
] | tientrien2505@gmail.com |
ccc7992960506612a2a2b5cf112aec07ec10c737 | b339b70cd5ee8a48ba6b49f0031cd43dbfa55b41 | /src/inquiry.py | 2ebad3139cde27d82beb08053883e12c061ee226 | [] | no_license | JeffinSE/mynewtrial1 | 4adddcaecc30a82075b0f96b0b5ea95f747eb008 | c1559cbe890695df242fc9220f2faa25239920b9 | refs/heads/master | 2020-03-07T19:33:46.603279 | 2018-04-01T21:52:25 | 2018-04-01T21:52:25 | 127,674,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | __author__ = "Jeff Xia"
__project__ = "MyProject"
| [
"xiajihong75@gmail.com"
] | xiajihong75@gmail.com |
83aafba4187bb26dfef831f2cb3ecf91c7677d01 | 04dddbf04893913b0b24c6c02ebd2672b774a616 | /다이나믹 프로그래밍/11052 카드 구매하기.py | 565e7ec0c89ae6471533ed01d0209c88d36b7020 | [] | no_license | hatssww/BOJ | ca16345dbe24641e1ca5adee136a858a64a080b0 | bd7363d5c84de281de9b34667e9c0b76a904cffc | refs/heads/main | 2023-05-24T22:23:35.127397 | 2021-06-08T23:36:40 | 2021-06-08T23:36:40 | 370,254,375 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import sys
n = int(sys.stdin.readline())
p = [0] + list(map(int, sys.stdin.readline().split()))
d = [0] * (n + 1)
d[1] = p[1]
for i in range(2, n + 1):
for j in range(1, i + 1):
if d[i] < d[i - j] + p[j]:
d[i] = d[i - j] + p[j]
print(d[n]) | [
"83650060+hatssww@users.noreply.github.com"
] | 83650060+hatssww@users.noreply.github.com |
ac05e4033b4f6b8b1554aebf03f1a85250b105c8 | 7f8660729c81b3401147d813ec30fc86e2642c30 | /python/Fibonacci.py | 51c0c8924d985943c311201fd21aa91e85d3d98e | [] | no_license | Patrycja13/gitrepo | 881e0451933919ef007bf649ea41af3014edb254 | ea9f9fdce484c477b26cbb25f28261747657261a | refs/heads/master | 2021-07-10T00:01:41.796938 | 2019-03-25T08:29:54 | 2019-03-25T08:29:54 | 103,923,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def fib_iter(n):
"""Wylicza n-ty wyraz ciągu
F(0)=0
F(1)=1
F(n) = F(n-2) + f(n-1) dla n >1 """
a, b = (0, 1)
for i in range(1, n - 1):
a = b
b = a + b
if n > 1:
return b
else:
return a
def main(args):
n = int(input("Podaj wyraz ciągu: "))
assert fib_iter(0) == 0
assert fib_iter(1) == 1
assert fib_iter(2) == 1
assert fib_iter(5) == 5
print("Wyraz {:d} = {:d}".format(n, fib_iter(n)))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"patrycja.gawel13@onet.pl"
] | patrycja.gawel13@onet.pl |
810c374d5845fa02cb9141659fad67f933c09195 | 3abe7b4d572ae81a8222996821569bf3a684ec14 | /text/__init__.py | e9461d87f13166dac13aea90ab80aead3a0ef212 | [
"BSD-3-Clause",
"MIT"
] | permissive | creotiv/RussianTTS-Tacotron2 | 6c8defdd5a9cafdd46b71f8006162c4bab586d0f | 8ac15eea9450d141cb84d4d1a96b600f43d206c9 | refs/heads/master | 2023-06-01T09:43:12.209652 | 2021-06-10T12:54:24 | 2021-06-10T12:54:24 | 334,964,314 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | """ from https://github.com/keithito/tacotron """
import re
from text import cleaners
from text.symbols import symbols, ctc_symbols
# Mappings from symbol to numeric ID and vice versa:
symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_ctc_symbole_to_id = {s: i for i, s in enumerate(ctc_symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_ctc_sequence(sequence):
return [_ctc_symbole_to_id[_id_to_symbol[s]] for s in sequence if _id_to_symbol[s] in ctc_symbols]
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in symbol_to_id and s is not '_' and s is not '~'
| [
"creotiv@gmail.com"
] | creotiv@gmail.com |
73ac9d74e0abf953a4ebfc25959c67b2a3588ebd | 528764cb8103d57fc6aeba3a23a0881de7e2f9db | /292. Nim Game.py | b38a653cda05735230b27efb0210100ed5badd93 | [] | no_license | creageng2016/lc2017July | 44ef8d9cf6f2793627aaa9fd5e852e32db603cf1 | 020f94699e58c40cc5fa39b29b6296c941b525d6 | refs/heads/master | 2021-01-16T11:30:45.089600 | 2018-06-28T06:18:49 | 2018-06-28T06:18:49 | 99,999,222 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
if __name__ == '__main__':
n = 4
print Solution().canWinNim(n)
| [
"creageng@gatech.edu"
] | creageng@gatech.edu |
1092692612326586488a9e0542b6e348c22be9ce | 2c93a4af166fe57d833ec972bef11e2b3c4bffcd | /0_corruptedGene/replaceCorruptedGenes.py | 8c3cc47992b36ccdc07139e9adbff1c76ea8fbe8 | [] | no_license | XuperX/diffnet | 4b7ac413e4feef8d429107d20ab8c94a1816fbe4 | 88bc43bd611c4aa42074de9744787c2174cdb939 | refs/heads/master | 2021-09-18T19:30:52.005750 | 2018-07-18T15:29:40 | 2018-07-18T15:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | # -*- coding: utf-8 -*-
"""
This file replaces all corrupted genes in the original ST_data
Created on Wed Jun 27 15:51:03 2018
@author: fuqixu
"""
import csv
# csv package can read large csv files line by line
import os
import sys
import re
import pandas
import pandas
rawFile = sys.argv[1]
# sys.argv[2] = refFile
# Get corresponding corrupted gene list
sample = re.compile(r'\d.\d')
sampleID = sample.findall(str(rawFile))[0]
refName = 'object' + str(sampleID) + '.csv'
refFile = os.getcwd() + '/corruptedGeneSymbols/' + refName
outFile = os.getcwd()
with open(refFile, 'r') as ref:
geneDict = {}
for lines in ref:
geneList = lines.strip().split(',')
# list[1]: old name, list[0]: new name
geneDict.update({geneList[1]: geneList[0]})
# Load the file to be processed.
# Read file to replace
count = 0
with open(rawFile, 'r') as raw:
rawReader = csv.reader(raw)
newRows = []
for row in rawReader:
temp = []
for rowi in row:
for key, value in geneDict.items():
if key in rowi:
rowi = rowi.replace(key, value)
count = count + 1
temp.append(rowi)
newRows.append(temp)
newDf = pandas.DataFrame(newRows)
outName = str(rawFile)+'_'
newDf.to_csv(outName,header = False)
print(str(len(geneDict)) + ' genes in the dict\n')
print( str(count) + " replacements")
| [
"itisfuqixu@gmail.com"
] | itisfuqixu@gmail.com |
c042f6ea69e8bec496c9cc206cdd8fb7ff5ed9e8 | b08ce26e1873df051ca1fcf306ad7e665632e738 | /util/run_on_me_or_pid_quit | ed902de4b62cc3473d35d84bd119b9bbcbe19ea5 | [
"BSD-3-Clause"
] | permissive | Yidann/action-recognition-visual-attention | 49ec069f115d51d4bdbd80ba5019ec81be4ad0d9 | 7ed6c4c6e211096454f036ff5659a3969c3abb3a | refs/heads/master | 2021-01-09T06:36:15.255180 | 2016-12-27T03:06:24 | 2016-12-27T03:06:24 | 48,528,372 | 0 | 0 | null | 2016-12-27T03:06:25 | 2015-12-24T06:20:50 | Jupyter Notebook | UTF-8 | Python | false | false | 711 | #!/usr/bin/python
"""
run_on_me_or_pid_quit PID cmd arg1 arg2
Runs a command after the process PID has completed, or if this process is
interrupted.
Iain Murray, November 2009, January 2010
"""
# "Daemonize" this job to stop it getting killed by KeyboardInterrupt when
# pressing Ctrl-c in an interactive python session.
import os
if os.fork() != 0:
os._exit(0)
os.setsid()
if os.fork() != 0:
os._exit(0)
import sys, os.path, time, signal
pid = sys.argv[1]
proc_file = '/proc/' + pid
def final():
os.execv(sys.argv[2], sys.argv[2:])
signal.signal(signal.SIGTERM, final)
try:
while os.path.exists(proc_file):
time.sleep(2)
finally:
final()
| [
"wyd@461server.(none)"
] | wyd@461server.(none) | |
a0af01108c13fc966f89021c5c91150515e97d0d | b9c9215eb12ab8f0dcc4a5d964dc97ac2ad62257 | /supervised_learning/0x11-attention/6-multihead_attention.py | 76729225e985b407847cb68ae5dc2a513672b6cb | [] | no_license | AndrewMiranda/holbertonschool-machine_learning-1 | 0318c2f45c863721b478acae26a5a874290e6445 | e8a98d85b3bfd5665cb04bec9ee8c3eb23d6bd58 | refs/heads/main | 2023-01-19T00:34:15.264705 | 2022-07-25T15:10:43 | 2022-07-25T15:10:43 | 386,514,270 | 0 | 0 | null | 2021-07-16T04:58:08 | 2021-07-16T04:58:07 | null | UTF-8 | Python | false | false | 3,218 | py | #!/usr/bin/env python3
"""File that conatins the class MultiHeadAttention"""
import tensorflow as tf
sdp_attention = __import__('5-sdp_attention').sdp_attention
class MultiHeadAttention(tf.keras.layers.Layer):
"""Class that perform multi head attention"""
def __init__(self, dm, h):
"""
Class constructor
dm is an integer representing the dimensionality of the model
h is an integer representing the number of heads
dm is divisible by h
Sets the following public instance attributes:
h - the number of heads
dm - the dimensionality of the model
depth - the depth of each attention head
Wq - a Dense layer with dm units, used to generate the query matrix
Wk - a Dense layer with dm units, used to generate the key matrix
Wv - a Dense layer with dm units, used to generate the value matrix
linear - a Dense layer with dm units, used to generate the attention
output
"""
self.h = h
self.dm = dm
self.depth = dm // h
self.Wq = tf.keras.layers.Dense(units=dm)
self.Wk = tf.keras.layers.Dense(units=dm)
self.Wv = tf.keras.layers.Dense(units=dm)
self.linear = tf.keras.layers.Dense(units=dm)
super(MultiHeadAttention, self).__init__()
def call(self, Q, K, V, mask):
"""
Publci instance method
Args:
Q is a tensor of shape (batch, seq_len_q, dk) containing the input to
generate the query matrix
K is a tensor of shape (batch, seq_len_v, dk) containing the input to
generate the key matrix
V is a tensor of shape (batch, seq_len_v, dv) containing the input to
generate the value matrix
mask is always None
Returns: output, weights
outputa tensor with its last two dimensions as (..., seq_len_q, dm)
containing the scaled dot product attention
weights a tensor with its last three dimensions as
(..., h, seq_len_q, seq_len_v) containing the attention weights
"""
def split_heads(x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size,
num_heads, seq_len, depth)"""
x = tf.reshape(x, (batch_size, -1, self.h, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
batch_size = tf.shape(Q)[0]
q = self.Wq(Q) # (batch_size, seq_len, d_model)
k = self.Wk(K) # (batch_size, seq_len, d_model)
v = self.Wv(V) # (batch_size, seq_len, d_model)
q = split_heads(q, batch_size)
k = split_heads(k, batch_size)
v = split_heads(v, batch_size)
scaled_attention, attention_weights = sdp_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention, (batch_size, -1,
self.dm))
output = self.linear(concat_attention)
return output, attention_weights
| [
"juand0145@gmail.com"
] | juand0145@gmail.com |
0282b0bbcc645a264989a9a6c8b499484f51c94e | 16b4b31a0538696c16de079e4626b2e85ff5e135 | /apps/wish_app/migrations/0001_initial.py | 4fc733c43f4b462170c27102e72265e9a8899bfe | [] | no_license | kamil-wowczuk/wishlist | 614188c310ffeb67698c3c27e7faf5e5876a16f4 | e815a290d13dae50797a292a846ebf8f9edacb4b | refs/heads/master | 2021-01-23T10:48:09.828125 | 2017-06-01T20:20:56 | 2017-06-01T20:20:56 | 93,079,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-31 19:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=45)),
('date_hired', models.DateField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('salt', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='product',
name='users',
field=models.ManyToManyField(related_name='products', to='wish_app.User'),
),
]
| [
"kamilwowczuk@gmail.com"
] | kamilwowczuk@gmail.com |
bb0376e68d6da9334512d8ac0d9c60d24e1b1b3c | 49c046f76842338674de25474235cfef0ba26ae5 | /0.杂七杂八的东西/pygame的学习/1.5.标题和图标函数.py | 1d6b82a9c532557ef5e6d6525b860a4f34d92bdd | [] | no_license | fafaovo/python_Advanced | 4cb7600b0575f065fc122b921bb132994de9ad4c | c84a90c409ac9bf603b70bd9fce4104fdc58a121 | refs/heads/main | 2023-05-27T00:01:49.764524 | 2021-06-09T15:50:29 | 2021-06-09T15:50:29 | 346,731,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | """
pygame.display.set_caption()设置标题信息
pygame.display.get_caption()可以获得标题信息 返回一个元组
pygame.display.set_icon(surface对象) 设置icon图标
需要使用 image.load读进来图片
"""
import pygame
from sys import exit
pygame.init()
"""设置标题"""
pygame.display.set_caption('展示icon图标')
pygame.display.set_mode((100, 100), pygame.RESIZABLE)
"""导入图片并且作为icon"""
icon = pygame.image.load("favicon.ico")
pygame.display.set_icon(icon)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.display.update()
| [
"aa1206180803@sina.cn"
] | aa1206180803@sina.cn |
4e61e93dfb0cd473cbc80b80c38c6ad6b7dafc4a | 7d02c5c912a2a4dc8fd144c32333b5d15746fa1e | /program.pyw | 4eb463aafc4dcc7a2c8da510878702d3acd84e3b | [
"MIT"
] | permissive | mozancetin/Image-to-ASCII-Art | 3315c515b398827ca11d4eacfdbf4266c54bf58e | d5355f3fb0b3bc63c4519fde79487f4df8115786 | refs/heads/main | 2023-04-01T00:41:42.349619 | 2021-03-19T13:31:41 | 2021-03-19T13:31:41 | 344,885,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | pyw | import sys
import os
import drawASCIIart
from PyQt5.QtWidgets import QWidget, QApplication, QLineEdit, QLabel, QPushButton,QVBoxLayout, QFileDialog, QHBoxLayout
class ASCIIArt(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.ratioString = QLabel("Ratio (0.2 - 1.0)")
self.ratio = QLineEdit()
self.nameString = QLabel("Name")
self.name = QLineEdit()
self.ratio.setText(str(drawASCIIart.WIDTHRATIO))
self.sizeString = QLabel("Size (50 - 1000)")
self.size = QLineEdit()
self.size.setText(str(drawASCIIart.SIZE))
self.filePath = QLabel("")
self.button = QPushButton("Add Photo")
self.startButton = QPushButton("Start")
self.startButton.setEnabled(False)
v_box = QVBoxLayout()
v_box.addStretch()
v_box.addWidget(self.nameString)
v_box.addWidget(self.name)
v_box.addWidget(self.sizeString)
v_box.addWidget(self.size)
v_box.addWidget(self.ratioString)
v_box.addWidget(self.ratio)
v_box.addWidget(self.filePath)
v_box.addWidget(self.button)
v_box.addWidget(self.startButton)
v_box.addStretch()
self.setLayout(v_box)
self.setWindowTitle("ASCII Art")
self.button.clicked.connect(self.open_dir)
self.startButton.clicked.connect(self.Start)
self.setMinimumHeight(250)
self.setMaximumHeight(250)
self.setMinimumWidth(300)
self.setMaximumWidth(300)
self.show()
def open_dir(self):
file_path = QFileDialog.getOpenFileName(self, "Select an Image", os.getenv("DESKTOP"),"Images (*.png *.jpg)")
drawASCIIart.path = file_path[0]
self.filePath.setText(file_path[0])
self.startButton.setEnabled(True)
def Start(self):
if self.name.text() != "":
drawASCIIart.SAVE_NAME = self.name.text() + ".txt"
else:
self.filePath.setText("Name field cannot be left blank!")
return False
self.filePath.setText("Wait, please.")
drawASCIIart.main(drawASCIIart.setSize(int(self.size.text())), drawASCIIart.setWidth(float(self.ratio.text())))
self.filePath.setText("Done!")
app = QApplication(sys.argv)
menu = ASCIIArt()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | mozancetin.noreply@github.com |
0c24daedded2881c22f5beb167c8ee8b0efba4f0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/112/23295/submittedfiles/av2_p3_civil.py | 190a7d9f68a0de2937c3818addab0a1181fc2f81 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
linhas=input('Digite a quandidade de linhas:')
a=np.zeros((linhas,linhas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite o termo:')
print a
x=input('Digite a coordenada x da localização da torre:')
y=input('Digite a coordenada y da localização da torre:')
def locali(a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[x,y]
return a[x,y]
print locali(a)
def soma_linha(a):
s=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
for r in range(0,len(s),1):
c=s[y]
return c
print soma_linha(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e660e8bdd77c84ac445656a3f855a77ef3202a7e | 554c50bb8271c7fd5f793afe235ee87fd4a07455 | /src/epubcheck/models.py | 8825eb8c4aeb7565eb7510b73dcbc91d6aa4f518 | [
"BSD-2-Clause",
"BSD-3-Clause",
"MIT"
] | permissive | finnatsea/epubcheck | fd6c47391201c2654adb508f4bc80d820d49751e | 88ee0dafc53fda41b2df6bf8b1016b973a8cb1e5 | refs/heads/master | 2022-04-02T13:42:44.625621 | 2020-01-10T14:57:46 | 2020-01-10T14:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,960 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
_BaseChecker = namedtuple(
'Checker', 'path filename checkerVersion checkDate elapsedTime nFatal '
'nError nWarning nUsage')
class Checker(_BaseChecker):
"""Checker related information from epubcheck json data.
:param str path: Relative path to checked epub
:param str filename: Filename of checked epub
:param str checkerVersion: Version string of epubcheck
:param str checkDate: When the epub was checked
:param int elapsedTime: processing time
:param int nFatal: number of fatal errors
:param int nError: number of errors
:param int nWarning: number of warnings
:param int nUsage: number of usage messages
"""
@classmethod
def from_data(cls, data):
return cls(**data['checker'])
_BaseMeta = namedtuple(
'Meta',
'publisher title creator date subject description rights identifier '
'language nSpines checkSum renditionLayout renditionOrientation '
'renditionSpread ePubVersion isScripted hasFixedFormat isBackwardCompatible '
'hasAudio hasVideo charsCount embeddedFonts refFonts hasEncryption '
'hasSignatures contributors '
)
class Meta(_BaseMeta):
"""EPUB metadata from `publication` key in epubcheck json data.
:param str publisher: name of publisher
:param str title: title of ebook
:param list[str] creator: list of creators
:param str date: date of ebook
:param list[str] subject: list of ebook subjects
:param str description: description of ebook
:param str rights:
:param str identifier:
:param str language: language of ebook
:param int nSpines:
:param int checkSum:
:param str renditionLayout:
:param str renditionSpread:
:param str ePubVersion:
:param bool isScripted:
:param bool hasFixedFormat:
:param bool isBackwardCompatible:
:param bool hasAudio:
:param int charsCount:
:param list[str] embeddedFonts:
:param list[str] refFonts:
:param bool hasEncryption:
:param bool hasSignatures:
:param list[str] contributors:
"""
@classmethod
def from_data(cls, data):
return cls(**data['publication'])
def flatten(self):
return tuple(';'.join(x) if isinstance(x, list) else x for x in self)
_BaseMessage = namedtuple('Message', 'id level location message suggestion')
class Message(_BaseMessage):
"""
A Validation message representing a single error condition.
:param str id: Error type id (ex: "OPF-049")
:param str level: Severity of messeage (ex: "ERROR")
:param str location: Location of error (ex: <file>:<line>:<column>)
:param str message: Description of the error condiction
:param str suggestion: How to resolve error condition
"""
def __str__(self):
return ' | '.join(self._asdict().values())
@classmethod
def from_data(cls, data):
"""Create a list of Messages from deserialized epubcheck json output.
:param dict data: Decoded epubcheck json data
:return list[Message]: List of messages
"""
messages = []
filename = data['checker']['filename']
for m in data['messages']:
for l in m['locations']:
location = l['path']
if not location.startswith(filename):
location = filename + '/' + location
if l['line'] != -1:
location += ':{}'.format(l['line'])
if l['column'] != -1:
location += ':{}'.format(l['column'])
messages.append(
cls(m['ID'], m['severity'], location, m['message'], m['suggestion'])
)
return messages
@property
def short(self):
"""Short string representation of message"""
return "{m.level} - {m.id} - {m.location} - {m.message}".format(m=self)
| [
"tp@py7.de"
] | tp@py7.de |
f4458a84db941593b0519628a47b3311209f8d24 | 70dccaf2181de8da40642b1c3e5c28a88ebfa1c8 | /pages/views.py | df0104566319a3ea262db6d6994dd5ad4ae732bd | [
"MIT"
] | permissive | gsw945/grid-admin | 3cbba4ed45a4589d28213d6f54301c781c6991a2 | fbe1d5fceb14e2a26f05ac35f26eeee59a4a4ed8 | refs/heads/master | 2020-05-31T14:48:49.156147 | 2019-06-06T02:21:26 | 2019-06-06T02:21:26 | 190,339,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from django.shortcuts import render
# Create your views here.
def view_index(request):
context = {}
return render(request, 'views/index.html', context)
| [
"gsw945@foxmail.com"
] | gsw945@foxmail.com |
ae11bd94ca8bcefa0163a7f0650331b5cc0d506e | f24bfbc7c7298e84848634c2a69dfe65ff9cfe2d | /python_sandbox_starter/conditionals.py | 5c2a6eb794dd8420ec3ee3ed42fb57ec05483a59 | [] | no_license | ZY1N/pythontutorial | 910d56d3973541a9ed1643fd7cd719c5209ad9c9 | 9b6b5098f4d456afba6b386155ff1936d342ac4b | refs/heads/master | 2020-07-08T10:54:30.613497 | 2019-08-21T19:30:18 | 2019-08-21T19:30:18 | 203,651,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # If/ Else conditions are used to decide to do something based on something being true or false
x = 3
y = 10
# Comparison Operators (==, !=, >, <, >=, <=) - Used to compare values
if x > y:
print(f'{x} is greater than {y}')
#if/else
#if x > y:
# print(f'{x} is greater than {y}')
#else:
# print(f'{y} is greater than {x}')
#elif
if x > y:
print(f'{x} is greater than {y}')
elif x == y:
print(f'{y} is equal {x}')
else:
print(f'{y} is greater than {x}')
#nested if
# Logical operators (and, or, not) - Used to combine conditional statements
#if x > 2 and x <= 10:
# print(f"{x} is greater than 2 and less than 10")
#or
#if x > 2 or x <= 10:
# print(f"{x} is greater than 2 and less than 10")
#not
#if not x == y:
# print(f"{x} is greater than 2 and less than 10")
# Membership Operators (not, not in) - Membership operators are used to test if a sequence is presented in an object
numbers = [1,2,3,4,5]
#in
if x in numbers:
print(x in numbers)
#not in
if x not in numbers:
print(x in numbers)
# Identity Operators (is, is not) - Compare the objects, not if they are equal, but if they are actually the same object, with the same memory location:
#is
if x is y:
print(x is y)
#is not
if x is not y:
print(x is y) | [
"zhang.yine@husky.neu.edu"
] | zhang.yine@husky.neu.edu |
0321f3960cf46408a6e47fbf0a686d4125623d3a | b2332bac3272501363d141567bdd3c54f4f58140 | /text_classification/UCAS_NLP_TC/data_English/data_01_fasttext.py | 8516dd6bbd67f858e2f599e5ea16904b6f24c61d | [
"Apache-2.0"
] | permissive | q759729997/qyt_clue | a5b9abfb92f5d852febf8ceff465a5b5f0d18d75 | 740df80bf4b58a670f5b3fb34375e505a58b80ee | refs/heads/master | 2022-11-14T22:46:25.957129 | 2020-07-13T01:04:38 | 2020-07-13T01:04:38 | 263,373,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | # coding:utf-8
"""fasttext数据集
"""
import codecs
import os
import sys
import json
import random
from tqdm import tqdm
sys.path.insert(0, './') # 定义搜索路径的优先顺序,序号从0开始,表示最大优先级
import myClue # noqa
print('myClue module path :{}'.format(myClue.__file__)) # 输出测试模块文件位置
from myClue.core import logger # noqa
from myClue.tools.file import read_file_texts # noqa
from myClue.tools.file import init_file_path # noqa
if __name__ == "__main__":
train_file_config = {
'train': './data/UCAS_NLP_TC/data_baidu_trans/train_trans.json',
'dev': './data/UCAS_NLP_TC/data_baidu_trans/dev_trans.json',
'test': './data/UCAS_NLP_TC/data_baidu_trans/test_trans.json',
}
output_path = './data/UCAS_NLP_TC/data_English/data_01_fasttext'
init_file_path(output_path)
texts = read_file_texts(train_file_config['train'])
texts.extend(read_file_texts(train_file_config['dev']))
random.shuffle(texts)
output_file_name = os.path.join(output_path, 'train_data.txt')
with codecs.open(output_file_name, mode='w', encoding='utf8') as fw:
for text in tqdm(texts):
row_data = json.loads(text)
label = row_data['label']
trans_results = row_data['trans_results']
eng_texts = list()
for trans_result in trans_results:
eng_texts.append(trans_result['dst'])
news_content = ' '.join(eng_texts)
if len(news_content) == 0:
continue
fw.write('__label__{}\t{}\n'.format(label, news_content))
texts = read_file_texts(train_file_config['test'])
# random.shuffle(texts)
output_file_name = os.path.join(output_path, 'test_data.txt')
with codecs.open(output_file_name, mode='w', encoding='utf8') as fw:
for text in tqdm(texts):
row_data = json.loads(text)
label = row_data['label']
trans_results = row_data['trans_results']
eng_texts = list()
for trans_result in trans_results:
eng_texts.append(trans_result['dst'])
news_content = ' '.join(eng_texts)
if len(news_content) == 0:
continue
fw.write('__label__{}\t{}\n'.format(label, news_content))
| [
"759729997@qq.com"
] | 759729997@qq.com |
3254cd43b56ed586998ec7c6e815f7d1ec7eb526 | 68b768aef871a9fda01f3a62ec08e7f6c6950edc | /st2mistral/functions/json_escape.py | 4f7813323972801ce928ffa1d8aeebe919fcacd2 | [
"Apache-2.0"
] | permissive | StackStorm/st2mistral | af3098ad68abafea20e688e84a0696fb686c9dec | 0dd3510a6f7ffda36666cf0d6da461f45d961e0d | refs/heads/master | 2023-05-31T21:23:40.398213 | 2020-09-15T10:49:15 | 2020-09-15T10:49:15 | 25,490,223 | 2 | 13 | null | 2020-09-15T10:49:17 | 2014-10-20T22:51:57 | Python | UTF-8 | Python | false | false | 1,354 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
__all__ = [
'json_escape'
]
def json_escape(context, value):
"""Adds escape sequences to problematic characters in the string
This function simply passes the value to json.dumps
as a convenient way of escaping characters in it
However, before returning, we want to strip the double
quotes at the ends of the string, since we're not looking
for a valid JSON value at the end, just conveniently using
this function to do the escaping. The value could be any
arbitrary value
"""
return json.dumps(value).strip('"')
| [
"oswaltm@brocade.com"
] | oswaltm@brocade.com |
3d753eee73c94b858c52d1e6d561825f5839fb8d | a70697ef62978117467695fd3507e4d08e186ab4 | /source/res/scripts/client/gui/scaleform/daapi/view/lobby/server_events/events_helpers.py | 339785d042db63990f89e8ea039173f16cc705a1 | [] | no_license | chipsi007/WorldOfTanks-Decompiled | d208678a6f2f094b02281d09ecc30f3e32725ce9 | 3b9dc21321429e4dee146c23c7250f2c62757937 | refs/heads/master | 2020-03-19T01:21:09.883951 | 2018-05-04T13:19:56 | 2018-05-04T13:19:56 | 135,538,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,682 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/server_events/events_helpers.py
import operator
from collections import defaultdict
import BigWorld
import constants
from constants import EVENT_TYPE
from gui import makeHtmlString
from gui.Scaleform.daapi.view.lobby.server_events.awards_formatters import OldStyleBonusesFormatter
from gui.Scaleform.genConsts.QUESTS_ALIASES import QUESTS_ALIASES
from gui.Scaleform.locale.PERSONAL_MISSIONS import PERSONAL_MISSIONS
from gui.Scaleform.locale.QUESTS import QUESTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.server_events import formatters, conditions, settings as quest_settings
from gui.server_events.events_helpers import EventInfoModel, MISSIONS_STATES, QuestInfoModel
from gui.shared.formatters import text_styles
from helpers import i18n, int2roman, time_utils, dependency
from personal_missions import PM_BRANCH
from quest_xml_source import MAX_BONUS_LIMIT
from skeletons.gui.shared import IItemsCache
_AWARDS_PER_PAGE = 3
FINISH_TIME_LEFT_TO_SHOW = time_utils.ONE_DAY
START_TIME_LIMIT = 5 * time_utils.ONE_DAY
class _EventInfo(EventInfoModel):
def getInfo(self, svrEvents, pCur=None, pPrev=None, noProgressInfo=False):
if noProgressInfo:
status, _ = MISSIONS_STATES.NONE, self._getStatus()[1]
bonusCount = self.NO_BONUS_COUNT
qProgCur, qProgTot, qProgbarType, tooltip = (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
else:
bonusCount = self._getBonusCount(pCur)
status, _ = self._getStatus(pCur)
qProgCur, qProgTot, qProgbarType, tooltip = self._getProgressValues(svrEvents, pCur, pPrev)
isAvailable, _ = self.event.isAvailable()
return {'questID': str(self.event.getID()),
'eventType': self.event.getType(),
'IGR': self.event.isIGR(),
'taskType': self.event.getUserType(),
'tasksCount': bonusCount,
'progrBarType': qProgbarType,
'progrTooltip': tooltip,
'maxProgrVal': qProgTot,
'currentProgrVal': qProgCur,
'rendererType': QUESTS_ALIASES.RENDERER_TYPE_QUEST,
'timerDescription': self.getTimerMsg(),
'status': status,
'description': self.event.getUserName(),
'tooltip': TOOLTIPS.QUESTS_RENDERER_LABEL,
'isSelectable': True,
'isNew': quest_settings.isNewCommonEvent(self.event),
'isAvailable': isAvailable}
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
index = 0
progresses = []
if not isProgressReset and not isCompleted:
for cond in self.event.bonusCond.getConditions().items:
if isinstance(cond, conditions._Cumulativable):
for _, (curProg, totalProg, diff, _) in cond.getProgressPerGroup(pCur, pPrev).iteritems():
label = cond.getUserString()
if not diff or not label:
continue
index += 1
progresses.append({'progrTooltip': None,
'progrBarType': formatters.PROGRESS_BAR_TYPE.SIMPLE,
'maxProgrVal': totalProg,
'currentProgrVal': curProg,
'description': '%d. %s' % (index, label),
'progressDiff': '+ %s' % BigWorld.wg_getIntegralFormat(diff)})
if not progresses:
return
alertMsg = ''
if isProgressReset:
alertMsg = i18n.makeString('#quests:postBattle/progressReset')
_, awards = ('', None)
if not isProgressReset and isCompleted:
awards = self._getBonuses(svrEvents)
return {'title': self.event.getUserName(),
'awards': awards,
'progressList': progresses,
'alertMsg': alertMsg,
'questInfo': self.getInfo(svrEvents, pCur, pPrev),
'personalInfo': [],
'questType': self.event.getType()}
@classmethod
def _getEventsByIDs(cls, ids, svrEvents):
result = {}
for eID in ids:
if eID in svrEvents:
result[eID] = svrEvents[eID]
return result
def _getBonusCount(self, pCur=None):
return self.NO_BONUS_COUNT
def _getProgressValues(self, svrEvents=None, pCur=None, pPrev=None):
return (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
def _getBonuses(self, svrEvents, bonuses=None):
return []
class _QuestInfo(_EventInfo, QuestInfoModel):
PROGRESS_TOOLTIP_MAX_ITEMS = 4
itemsCache = dependency.descriptor(IItemsCache)
def _getStatus(self, pCur=None):
if self.event.isCompleted(progress=pCur):
if self.event.bonusCond.isDaily():
msg = self._getCompleteDailyStatus('#quests:details/status/completed/daily')
else:
msg = i18n.makeString('#quests:details/status/completed')
return (MISSIONS_STATES.COMPLETED, msg)
else:
isAvailable, errorMsg = self.event.isAvailable()
if not isAvailable:
timeLeftInfo = self.event.getNearestActivityTimeLeft()
if errorMsg in ('in_future', 'invalid_weekday', 'invalid_time_interval') and timeLeftInfo is not None:
startTimeLeft = timeLeftInfo[0]
if startTimeLeft > START_TIME_LIMIT:
fmt = self._getDateTimeString(self.event.getStartTime())
else:
fmt = self._getTillTimeString(startTimeLeft)
msg = i18n.makeString('#quests:details/status/notAvailable/%s' % errorMsg, time=fmt)
else:
msg = i18n.makeString('#quests:details/status/notAvailable/%s' % errorMsg)
return (MISSIONS_STATES.NOT_AVAILABLE, msg)
bonus = self.event.bonusCond
bonusLimit = bonus.getBonusLimit()
if bonusLimit is None or bonusLimit >= MAX_BONUS_LIMIT:
msg = i18n.makeString(QUESTS.DETAILS_HEADER_COMPLETION_UNLIMITED)
else:
groupBy = bonus.getGroupByValue()
if bonus.isDaily():
key = QUESTS.DETAILS_HEADER_COMPLETION_DAILY
if groupBy is not None:
key = '#quests:details/header/completion/daily/groupBy%s' % groupBy.capitalize()
else:
key = QUESTS.DETAILS_HEADER_COMPLETION_SINGLE
if groupBy is not None:
key = '#quests:details/header/completion/single/groupBy%s' % groupBy.capitalize()
msg = i18n.makeString(key, count=bonusLimit)
return (MISSIONS_STATES.NONE, msg)
def _getBonuses(self, svrEvents, bonuses=None):
bonuses = bonuses or self.event.getBonuses()
result = OldStyleBonusesFormatter(self.event).getFormattedBonuses(bonuses)
return formatters.todict(result) if result else formatters.todict([formatters.packTextBlock(text_styles.alert('#quests:bonuses/notAvailable'))])
def _getBonusCount(self, pCur=None):
if not self.event.isCompleted(progress=pCur):
bonusLimit = self.event.bonusCond.getBonusLimit()
if bonusLimit is None or bonusLimit > 1 or self.event.bonusCond.getGroupByValue() is not None:
return self.event.getBonusCount(progress=pCur)
return self.NO_BONUS_COUNT
def _getProgressValues(self, svrEvents=None, pCur=None, pPrev=None):
current, total, progressType, tooltip = (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
groupBy = self.event.bonusCond.getGroupByValue()
condsRoot = self.event.bonusCond.getConditions()
if self.event.isCompleted(pCur) or condsRoot.isEmpty():
return (current,
total,
progressType,
tooltip)
else:
countOfCumulatives = 0
cumulatives = defaultdict(list)
for cond in condsRoot.items:
if isinstance(cond, conditions._Cumulativable):
countOfCumulatives += 1
for groupByKey, (cur, tot, _, isCompleted) in cond.getProgressPerGroup(pCur, pPrev).iteritems():
if not isCompleted:
cumulatives[groupByKey].append((cur, tot))
if groupBy is None and countOfCumulatives == 1 and cumulatives[None]:
(current, total), progressType = cumulatives[None][0], formatters.PROGRESS_BAR_TYPE.SIMPLE
else:
avgProgressesPerGroup = []
for groupByKey, values in cumulatives.iteritems():
progressesSum = sum([ c / float(t) for c, t in values ])
avgProgressesPerGroup.append((groupByKey, int(round(100.0 * progressesSum / len(values))), 100))
avgProgresses = sorted(avgProgressesPerGroup, key=operator.itemgetter(1), reverse=True)
if avgProgresses:
(groupByKey, current, total), nearestProgs = avgProgresses[0], avgProgresses[1:]
progressType = formatters.PROGRESS_BAR_TYPE.COMMON
if groupBy is not None and groupByKey is not None:
name, names = ('', '')
if groupBy == 'vehicle':
name = self.itemsCache.items.getItemByCD(groupByKey).shortUserName
names = [ self.itemsCache.items.getItemByCD(intCD).shortUserName for intCD, _, __ in nearestProgs ]
elif groupBy == 'nation':
name = i18n.makeString('#menu:nations/%s' % groupByKey)
names = [ i18n.makeString('#menu:nations/%s' % n) for n, _, __ in nearestProgs ]
elif groupBy == 'class':
name = i18n.makeString('#menu:classes/%s' % groupByKey)
names = [ i18n.makeString('#menu:classes/%s' % n) for n, _, __ in nearestProgs ]
elif groupBy == 'level':
def makeLvlStr(lvl):
return i18n.makeString(QUESTS.TOOLTIP_PROGRESS_GROUPBY_NOTE_LEVEL, int2roman(lvl))
name = makeLvlStr(int(groupByKey.replace('level ', '')))
names = [ makeLvlStr(int(l.replace('level ', ''))) for l, _, __ in nearestProgs ]
note = None
if names:
note = makeHtmlString('html_templates:lobby/quests/tooltips/progress', 'note', {'names': ', '.join(names[:self.PROGRESS_TOOLTIP_MAX_ITEMS])})
tooltip = {'header': i18n.makeString(QUESTS.TOOLTIP_PROGRESS_GROUPBY_HEADER),
'body': makeHtmlString('html_templates:lobby/quests/tooltips/progress', 'body', {'name': name}),
'note': note}
return (current,
total,
progressType,
tooltip)
class _PersonalMissionInfo(_QuestInfo):
def _getBonuses(self, svrEvents, _=None):
mainBonuses = self.event.getBonuses(isMain=True)
addBonuses = self.event.getBonuses(isMain=False)
return (_QuestInfo._getBonuses(self, None, bonuses=mainBonuses), _QuestInfo._getBonuses(self, None, bonuses=addBonuses))
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
def _packCondition(titleKey, text):
return '%s\n%s' % (text_styles.middleTitle(i18n.makeString(titleKey)), text_styles.main(text))
def _packStatus(completed):
return 'done' if completed else 'notDone'
return {'title': self.event.getUserName(),
'questInfo': self.getInfo(svrEvents),
'awards': None,
'progressList': [],
'alertMsg': '',
'personalInfo': [{'statusStr': _packStatus(isCompleted[0]),
'text': _packCondition(PERSONAL_MISSIONS.TASKDETAILSVIEW_MAINCONDITIONS, self.event.getUserMainCondition())}, {'statusStr': _packStatus(isCompleted[1]),
'text': _packCondition(PERSONAL_MISSIONS.TASKDETAILSVIEW_ADDITIONALCONDITIONS, self.event.getUserAddCondition())}],
'questType': self.event.getType()}
class _MotiveQuestInfo(_QuestInfo):
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
motiveQuests = [ q for q in svrEvents.values() if q.getType() == EVENT_TYPE.MOTIVE_QUEST and not q.isCompleted() ]
info = super(_MotiveQuestInfo, self).getPostBattleInfo(svrEvents, pCur, pPrev, isProgressReset, isCompleted)
info.update({'isLinkBtnVisible': len(motiveQuests) > 0})
return info
def getEventInfoData(event):
if event.getType() == constants.EVENT_TYPE.PERSONAL_MISSION:
return _PersonalMissionInfo(event)
if event.getType() == constants.EVENT_TYPE.MOTIVE_QUEST:
return _MotiveQuestInfo(event)
return _QuestInfo(event) if event.getType() in constants.EVENT_TYPE.QUEST_RANGE else _EventInfo(event)
def getEventPostBattleInfo(event, svrEvents=None, pCur=None, pPrev=None, isProgressReset=False, isCompleted=False):
return getEventInfoData(event).getPostBattleInfo(svrEvents, pCur or {}, pPrev or {}, isProgressReset, isCompleted)
_questBranchToTabMap = {PM_BRANCH.REGULAR: QUESTS_ALIASES.SEASON_VIEW_TAB_RANDOM}
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
181d1d5084af6522c9e3c33e95be5e086608176e | a38b4c82feabe5be163ad2eeb5a46f38aeb88d77 | /regressions/checkPageRank.py | 6980c594526fb6e07683fdcf02458c065697e1c9 | [
"Apache-2.0"
] | permissive | zzmjohn/vertexAPI2 | a9ae240c2fde55dc5be4a96f0017e8a2e204b258 | cf59a50d1239f3ea892a7473f8175958c7ac0051 | refs/heads/master | 2020-12-29T01:23:04.602915 | 2013-12-16T18:32:17 | 2013-12-16T18:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | #!/usr/bin/python2
#script to compare pagerank outputs
import sys
from math import fabs
#load ranks for file fn
def load( f ):
ret = {}
for line in f:
vid, val = line.strip().split()
ret[ int(vid) ] = float(val)
return ret
def compare( tol_vals, tol_allowed, test, gold ):
histo_counts = [0] * (len(tol_vals) + 1)
for vid, val in test.items():
try:
diff = fabs( gold[ vid ] - val )
pos = len(tol_vals) - 1
while pos >= 0 and diff < tol_vals[pos]:
pos -= 1
histo_counts[pos + 1] += 1
except KeyError:
print "vid ", vid, " is in test but not in gold"
#this is not an error, we just output all vertices
#but powergraph does not
#return False
totalItems = float(len(test))
for idx in range(len(histo_counts)):
histo_counts[idx] /= totalItems
if histo_counts[idx] > tol_allowed[idx]:
print "Percentage too high: ", tol_allowed[idx], histo_counts[idx]
return False
return True
if __name__ == '__main__':
if len( sys.argv ) != 3:
print "Usage: checkPageRank.py test gold"
sys.exit(1)
test = sys.argv[1]
gold = sys.argv[2]
td = load( open(test) )
gd = load( open(gold) )
#this means we allow up to 100% of values differing by less than .0001
#.9% of values by more than .0001 and less than .001
#.09% of values by more than .001 and less than .01
#.009% of values by more than .01 and less than .1
#0 values more than .1
if not compare( [.0001, .001, .01, .1, 1, 10], [1., 1e-2, 5e-3, 5e-4, 5e-5, 5e-6, 0], td, gd ):
sys.exit(1)
| [
"a@b.c"
] | a@b.c |
488f7e68087720526b63ab477cf97540b233164b | 1ae8c6a7baa936b7a6b0ee6c70c4af7513be9af0 | /mysite/polls/admin.py | dcba3090f8b37b5e9be842522b48f80af3c0baea | [] | no_license | lincolnn/Bioinformatics | 19c00c5d829f2806336c48b4127b4f18570e1807 | ea26870995bc1f16a03645ba5391e5079476e61e | refs/heads/master | 2020-07-04T02:32:18.706764 | 2011-08-10T19:13:10 | 2011-08-10T19:13:10 | 2,064,346 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from polls.models import Poll
from django.contrib import admin
from polls.models import Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question', 'pub_date',
'was_published_today')
list_filter = ['pub_date']
search_fields = ['question']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| [
"lincolnn07@gmail.com"
] | lincolnn07@gmail.com |
b5aab17911c032c7a93a159e063628fc4536e61e | bcb56cc126ea1885eb5ecc920884e2e331def045 | /Part A/Déjà Vu.py | a510d4a81463d13148adb3624a1c08c02197962b | [] | no_license | priyanshkedia04/Codeforces-Solutions | 2d11cb7b8329fe658f983b7212c17fc89fd784f0 | a5197c633bf4c3238f48bfb5b308144c2ffba473 | refs/heads/main | 2023-06-06T13:10:13.787843 | 2021-07-01T14:06:52 | 2021-07-01T14:06:52 | 382,000,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | for i in range(int(input())):
s = input()
temp1 = s + 'a'
temp2 = 'a' + s
if temp2 != temp2[::-1]:
print('YES')
print(temp2)
elif temp1 != temp1[::-1]:
print('YES')
print(temp1)
else:
print('NO') | [
"noreply@github.com"
] | priyanshkedia04.noreply@github.com |
a373c25612d158c45a37dc78cace10f973142be9 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/datacatalog/v1/get_taxonomy_iam_policy.py | f0fed1764ad2f9761170c7b668b41b7acf80b119 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,399 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTaxonomyIamPolicyResult',
'AwaitableGetTaxonomyIamPolicyResult',
'get_taxonomy_iam_policy',
'get_taxonomy_iam_policy_output',
]
@pulumi.output_type
class GetTaxonomyIamPolicyResult:
def __init__(__self__, bindings=None, etag=None, version=None):
if bindings and not isinstance(bindings, list):
raise TypeError("Expected argument 'bindings' to be a list")
pulumi.set(__self__, "bindings", bindings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bindings(self) -> Sequence['outputs.BindingResponse']:
"""
Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> str:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> int:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
class AwaitableGetTaxonomyIamPolicyResult(GetTaxonomyIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTaxonomyIamPolicyResult(
bindings=self.bindings,
etag=self.etag,
version=self.version)
def get_taxonomy_iam_policy(location: Optional[str] = None,
project: Optional[str] = None,
taxonomy_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTaxonomyIamPolicyResult:
"""
Gets the IAM policy for a policy tag or a taxonomy.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['taxonomyId'] = taxonomy_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:datacatalog/v1:getTaxonomyIamPolicy', __args__, opts=opts, typ=GetTaxonomyIamPolicyResult).value
return AwaitableGetTaxonomyIamPolicyResult(
bindings=__ret__.bindings,
etag=__ret__.etag,
version=__ret__.version)
@_utilities.lift_output_func(get_taxonomy_iam_policy)
def get_taxonomy_iam_policy_output(location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
taxonomy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTaxonomyIamPolicyResult]:
"""
Gets the IAM policy for a policy tag or a taxonomy.
"""
...
| [
"noreply@github.com"
] | 24601.noreply@github.com |
7e79b503b18d0387b9dfa5034bb0f9a4e2e53d84 | 48d1002394d233cf5932c7ef69300400af79118a | /examples/widgets/effectwidget.py | aeaf2d149fa96c8762405a9a404318773e80f479 | [
"LGPL-2.1-only",
"MIT",
"Apache-2.0"
] | permissive | kivy/kivy | ba2668bffe4e125fd1c5aace54f671343802850e | ca1b918c656f23e401707388f25f4a63d9b8ae7d | refs/heads/master | 2023-09-04T02:27:05.311875 | 2023-08-26T08:00:20 | 2023-08-26T08:00:20 | 1,049,095 | 16,076 | 4,161 | MIT | 2023-09-09T07:55:18 | 2010-11-03T20:27:32 | Python | UTF-8 | Python | false | false | 5,485 | py | '''
Example usage of the effectwidget.
Currently highly experimental.
'''
from kivy.app import App
from kivy.uix.effectwidget import EffectWidget
from kivy.uix.spinner import Spinner
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.effectwidget import (MonochromeEffect,
InvertEffect,
ChannelMixEffect,
ScanlinesEffect,
FXAAEffect,
PixelateEffect,
HorizontalBlurEffect,
VerticalBlurEffect)
class ComparisonWidget(EffectWidget):
pass
class EffectSpinner(Spinner):
pass
class SpinnerRow(BoxLayout):
effectwidget = ObjectProperty()
def update_effectwidget(self, *args):
effects = []
for child in self.children[::-1]:
text = child.text
if text == 'none':
pass
if text == 'fxaa':
effects.append(FXAAEffect())
if text == 'monochrome':
effects.append(MonochromeEffect())
if text == 'invert':
effects.append(InvertEffect())
if text == 'mix':
effects.append(ChannelMixEffect())
if text == 'blur_h':
effects.append(HorizontalBlurEffect())
if text == 'blur_v':
effects.append(VerticalBlurEffect())
if text == 'postprocessing':
effects.append(ScanlinesEffect())
if text == 'pixelate':
effects.append(PixelateEffect())
if self.effectwidget:
self.effectwidget.effects = effects
example = Builder.load_string('''
#:import Vector kivy.vector.Vector
BoxLayout:
orientation: 'vertical'
FloatLayout:
ComparisonWidget:
pos_hint: {'x': 0, 'y': 0}
size_hint: 0.5, 1
id: effect1
ComparisonWidget:
pos_hint: {'x': pos_slider.value, 'y': 0}
size_hint: 0.5, 1
id: effect2
background_color: (rs.value, gs.value, bs.value, als.value)
SpinnerRow:
effectwidget: effect1
text: 'left effects'
SpinnerRow:
effectwidget: effect2
text: 'right effects'
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'control overlap:'
Slider:
min: 0
max: 0.5
value: 0.5
id: pos_slider
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'right bg r,g,b,a'
Slider:
min: 0
max: 1
value: 0
id: rs
Slider:
min: 0
max: 1
value: 0
id: gs
Slider:
min: 0
max: 1
value: 0
id: bs
Slider:
min: 0
max: 1
value: 0
id: als
<ComparisonWidget>:
Widget:
canvas:
Color:
rgba: 1, 0, 0, 1
Ellipse:
pos: Vector(self.pos) + 0.5*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 1, 0.3, 1
Ellipse:
pos: Vector(self.pos) + 0.1*Vector(self.size)
size: 0.6*Vector(self.size)
Color:
rgba: 0.5, 0.3, 0.8, 1
Ellipse:
pos: Vector(self.pos) + Vector([0, 0.6])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 1, 0.8, 0.1, 1
Ellipse:
pos: Vector(self.pos) + Vector([0.5, 0])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 0, 0.8, 1
Line:
points:
[self.x, self.y,
self.x + self.width, self.y + 0.3*self.height,
self.x + 0.2*self.width, self.y + 0.1*self.height,
self.x + 0.85*self.width, self.y + 0.72*self.height,
self.x + 0.31*self.width, self.y + 0.6*self.height,
self.x, self.top]
width: 1
Color:
rgba: 0, 0.9, 0.1, 1
Line:
points:
[self.x + self.width, self.y + self.height,
self.x + 0.35*self.width, self.y + 0.6*self.height,
self.x + 0.7*self.width, self.y + 0.15*self.height,
self.x + 0.2*self.width, self.y + 0.22*self.height,
self.x + 0.3*self.width, self.y + 0.92*self.height]
width: 2
<SpinnerRow>:
orientation: 'horizontal'
size_hint_y: None
height: dp(40)
text: ''
Label:
text: root.text
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
<EffectSpinner>:
text: 'none'
values:
['none', 'fxaa', 'monochrome',
'invert', 'mix',
'blur_h', 'blur_v',
'postprocessing', 'pixelate',]
''')
class EffectApp(App):
def build(self):
return example
EffectApp().run()
| [
"alexanderjohntaylor@gmail.com"
] | alexanderjohntaylor@gmail.com |
ca1805568a26ec5c40307b8c4a03f203ec9fca14 | 2f122b441d9dcb103e58d395885ad9835ef71abf | /return.py | 2fac3f65fc262bedcec1ec594b6490427faad4c8 | [] | no_license | pattharaphon/My-Script | 0e5ef27ea42e07b834051b8cb15569bf0eff2bf9 | 911fc252aeb5db105fe05c302820ac4a6eaae57b | refs/heads/master | 2020-07-25T09:01:59.039646 | 2019-09-13T10:13:56 | 2019-09-13T10:13:56 | 208,239,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | num = input("Enter An Integer:")
def square(num):
if not num.isdigit():
return"Invalid Entry"
num = int(num)
return num*num
print(num,"SquaredIs:",square(num))
| [
"noreply@github.com"
] | pattharaphon.noreply@github.com |
316cf0246d388c1bd08c8b61cc5d9a625c3825a5 | 2580a1025b81838d28217aea705a1c324c2ba967 | /firstsite/local_settings.py | ee85c33ef0dbbaf7b67e7e208e545b443ae4777f | [] | no_license | Kurmaev/firstsite | c7e90a195dd5d73c9d3d94ecf17c0b04dc450c6e | 783253a305651cd39d2473bbf485af0f6d392d87 | refs/heads/master | 2016-08-07T14:06:24.678986 | 2013-06-01T12:38:28 | 2013-06-01T12:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | import os
from settings import MEDIA_ROOT
mydb = os.path.join(os.path.dirname(__file__),'../../mydb1.db')
MEDIA_PIC = os.path.join(MEDIA_ROOT, 'images/')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': mydb, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'eventsite', # Or path to database file if using sqlite3.
'USER': 'djangouser', # Not used with sqlite3.
'PASSWORD': 'pass28577', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'TEST_CHARSET': 'utf8'
}
}
| [
"bigmozgizerg@gmail.com"
] | bigmozgizerg@gmail.com |
4ecd6ca5c60b50e7658fd8758fd04450d4852f74 | eb8ff670ae56b63ceabb7c50a7e3961c7dad48e8 | /code_scanner/code_scanner/const.py | 312448f888e1b6bfe88bcab3128ed993ab329e11 | [
"MIT"
] | permissive | stargrep/rmm-utils | d387541d577666847a3a44f6127a5b858554c3bd | 55725cb117015bbe35653120779c53ff39a999bc | refs/heads/master | 2022-11-09T08:21:52.508901 | 2020-06-21T15:56:59 | 2020-06-21T15:56:59 | 267,919,168 | 0 | 0 | MIT | 2020-06-12T21:30:52 | 2020-05-29T17:42:00 | Python | UTF-8 | Python | false | false | 1,165 | py | from pathlib import Path
APP_VERSION = "0.2.4"
APP_NAME = "code-scanner"
APP_TITLE = "SCANNER"
IGNORED_FOLDER_START_PATTERN = (".", "env", "_", "__", "dist", "build")
TEST_FOLDER_START_PATTERN = ("tests")
IGNORED_FILE_START_PATTERN = (".", "license")
CONFIG_SUFFIX = (".json", ".yaml", ".config")
DOCUMENT_SUFFIX = (".json", ".md", ".txt")
PYTHON_SOURCE_SUFFIX = (".py", ".c", ".sql")
JAVA_SOURCE_SUFFIX = (".java", ".sql")
DEFAULT_OUTCOME_PATH = ".__scanned_result.txt"
TEMPLATE_STANDARD_OUTCOME = """
--------------------------------------------------
-------------- start with code_scanner -----------
v{{app_version}}
{{current_time}}
Total Files #: {{total_files}}
Python Source Files #: {{python_files}}
Source Lines (w/o Comments): {{src_total_lines}} ({{src_logic_lines}}) Lines {{src_logic_lines/src_total_lines*100//1}}%
Total Lines (w/o Comments): {{total_lines}} ({{logic_lines}}) Lines {{logic_lines/total_lines*100//1}}%
Non-tests Ratio (w/o Comments): {{src_total_lines/total_lines*100//1}}% ({{src_logic_lines/logic_lines*100//1}}%)
--------------------- end ------------------------
--------------------------------------------------
"""
| [
"mikenyc1207@gmail.com"
] | mikenyc1207@gmail.com |
6643954eaa122b23f49c8f4fdebb6ab840d5aabe | 750ca776e1ce2f896535bdf2dfd681502782207c | /guyj319/B2B_v1/myadmin/manage_deal.py | 2ff032c70b3ea7054c2e62a58618ea22738b6261 | [] | no_license | songting77/newB2B_v1 | 8c62bdee66a09d8944121cf683040325a15af3d0 | 9e47e4eb0e65a0a849cc351c79bc0f2b07ac37ae | refs/heads/master | 2020-03-27T00:57:22.578683 | 2018-02-27T07:41:52 | 2018-02-27T07:41:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,895 | py | #coding=utf-8
from myadmin.models import manage_deal
from car.models import car_table
from car.models import car_table_people
from account.models import user_info_table
from account.models import user_info_people
import datetime
class Deal(object):
def get_all_deal_records(self):
try:
records = manage_deal.objects.all().order_by("-deal_time")
records_list = []
for i in range(len(records)):
records_dirct = {}
records_dirct['buyer_merchant_id'] = records[i].buyer_merchant_id
records_dirct['seller_merchant_id'] = records[i].seller_merchant_id
records_dirct['deal_price'] = records[i].deal_price
time = {}
time['year'] = records[i].deal_time.year
time['month'] = records[i].deal_time.month
time['day'] = records[i].deal_time.day
time['hour'] = records[i].deal_time.hour
time['minute'] = records[i].deal_time.minute
time['second'] = records[i].deal_time.second
records_dirct['deal_time'] = time
series_detail = {}
delivery_date = {} #交货时间
out_date = {} #过期
price = {}
source_detail_list = {}
car = car_table.objects.get(id=records[i].car_id)
delivery_time = car.delivery_time #交货时间
delivery_date['year'] = delivery_time.year #交货时间的年份
delivery_date['month'] = delivery_time.month #交货时间的月份
delivery_date['day'] = delivery_time.day #交货时间的天数
out_time = car.date_valid #车的有效期
out_date['year'] = out_time.year #过期的年份
out_date['month'] = out_time.month #过期的月份
out_date['day'] = out_time.day #过期的天数
price['lowest_price'] = car.lowest_price #最低报价
price['highest_price'] = car.highest_price #最高报价
source_detail_list["series_detail"] = series_detail #车型
source_detail_list["price"] = price #报价
source_detail_list["delivery_date"] = delivery_date #交货时间
source_detail_list["out_date"] = out_date #过期
series_detail['brand'] = car.car_brand #品牌
series_detail['displacement'] = car.car_series #车系
series_detail['other'] = car.car_model #车款
source_detail_list["id"] = car.id #车的id
source_detail_list['carType'] = car.car_type #车辆类型
source_detail_list['color'] = car.color #颜色
source_detail_list['color_hex'] = car.color_hex #颜色的16进制,先占位
source_detail_list['delivery_type'] = car.delivery_type #期货类型
source_detail_list['payType'] = car.pay_method #付款方式
source_detail_list["saleRegion"] = car.sell_area #销售区域
source_detail_list['logisticsType'] = car.method_logistics #物流方式
source_detail_list['remark'] = car.introduction #备注说明
people = car_table_people.objects.filter(car_id=records[i].car_id)
people_list = []
for j in range(len(people)):
people_dirct = {}
people_dirct['name'] = people[j].people_name
people_dirct['phone'] = people[j].people_phone
people_list.append(people_dirct)
source_detail_list['peoples'] = people_list
records_dirct['source_detail_list'] = source_detail_list
records_list.append(records_dirct)
return records
except:
return 0
def get_all_user_info_list(self):
try:
#is_black=0,表示该会员没有被拉黑,全部会员页面显示的应该是没有被拉黑的会员
user = user_info_table.objects.filter(is_black=0).order_by('-add_time')
user_info_list = []
for i in range(len(user)):
user_info_dirct = {}
user_info_dirct['company_name'] = user[i].user_company_name
user_info_dirct['region'] = user[i].user_address
user_info_dirct['user_trademark'] = user[i].user_trademark
user_info_dirct['state'] = user[i].state
user_info_dirct['id'] = user[i].id
people = user_info_people.objects.filter(merchant_id=user[i].id)
people_list = []
for j in range(len(people)):
people_dirct = {}
people_dirct['name'] = people[j].user_name
people_dirct['phone'] = people[j].user_phone
people_list.append(people_dirct)
user_info_dirct['peoples'] = people_list
user_info_list.append(user_info_dirct)
return user_info_list
except:
return 0
def get_each_user_info_list(self, merchant_id):
try:
user = user_info_table.objects.get(id=merchant_id)
merchant_dirct = {}
merchant_dirct['id'] = user.id
merchant_dirct['company_name'] = user.user_company_name
merchant_dirct['region'] = user.user_address
merchant_dirct['user_trademark'] = user.user_trademark
people = user_info_people.objects.filter(merchant_id=merchant_id)
people_list = []
for j in range(len(people)):
people_dirct = {}
people_dirct['name'] = people[j].user_name
people_dirct['phone'] = people[j].user_phone
people_list.append(people_dirct)
merchant_dirct['peoples'] = people_list
return merchant_dirct
except:
return 0
def insert_deal_table(self, seller_merchant_id, buyer_merchant_id, car_id, deal_price):
try:
deal_time = datetime.datetime.now()
manage_deal.objects.create(seller_merchant_id=seller_merchant_id, buyer_merchant_id=buyer_merchant_id, car_id=car_id, deal_price=deal_price, deal_time=deal_time)
return 666
except:
return 0
def delete_deal_record_one_by_one(self, operation_id):
'''
一个个删除
:param operation_id:
:return:
'''
try:
manage_deal.objects.get(id=operation_id).delete()
return 666
except:
return 0
def delete_deal_record_some(self, operation_id_list):
'''
批量删除
:param operation_id_list:
:return:
'''
try:
manage_deal.objects.filter(id__in=operation_id_list).delete()
return 666
except:
return 0
def delete_deal_record_all(self):
'''
全部删除
:return:
'''
try:
manage_deal.objects.all().delete()
return 666
except:
return 0
def get_user_list(self):
try:
#is_black=0,表示该会员没有被拉黑,全部会员页面显示的应该是没有被拉黑的会员
user = user_info_table.objects.filter(is_black=0).order_by('-add_time')
user_list = []
for i in range(len(user)):
user_dirct = {}
user_dirct['company_name'] = user[i].user_company_name
user_dirct['region'] = user[i].user_address
user_dirct['user_trademark'] = user[i].user_trademark
user_dirct['state'] = user[i].state
user_dirct['id'] = user[i].id
people = user_info_people.objects.filter(merchant_id=user[i].id)
people_list = []
for j in range(len(people)):
people_dirct = {}
people_dirct['name'] = people[j].user_name
people_dirct['phone'] = people[j].user_phone
people_list.append(people_dirct)
user_dirct['peoples'] = people_list
user_list.append(user_dirct)
return user_list
except:
return 0
| [
"1422297148@qq.com"
] | 1422297148@qq.com |
d650de5f9953a7e086d73ad941196e733adde1ed | ba44dc116cbc3c6e5b0fd417b79b8699cc867fa7 | /MassDOT Workzone/Create_VolProfile_Sheet.py | 5bf131588ac5883b35414c5fe306a536da5a1050 | [
"MIT"
] | permissive | Apoorb/Freeval-Data-Processing | 89d4ff605344d26670ba7045e7513d93c2b38708 | e4806f9aaf993545ace5c40df8ba4ddc5bb0d5ff | refs/heads/master | 2023-02-24T22:32:15.576838 | 2022-07-26T22:01:05 | 2022-07-26T22:01:05 | 201,122,575 | 2 | 1 | MIT | 2023-02-10T22:48:40 | 2019-08-07T20:24:34 | HTML | UTF-8 | Python | false | false | 5,850 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 10:22:48 2019
@author: abibeka
"""
# 0.0 Housekeeping. Clear variable space
from IPython import get_ipython # run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import pandas as pd
import os
import glob
import numpy as np
import seaborn as sns
import subprocess
from itertools import product
import matplotlib.pyplot as plt
import xlrd
import re
# ****************************************************************************
EB_Mainline = r"C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\MassDOT\Volumes\Main-Counters\AET07-EB\AET07-EB_VolProfile.xlsx"
x1 = pd.ExcelFile(EB_Mainline)
x1.sheet_names
Dat_EB_MainLine = x1.parse("VolProfile")
Dat_EB_MainLine = Dat_EB_MainLine.drop(columns=["Unnamed: 0", "Unnamed: 1"])
# https://stackoverflow.com/questions/46250972/split-columns-into-multiindex-with-missing-columns-in-pandas
# Multiindex from column names
idx = Dat_EB_MainLine.columns.str.split("_", expand=True)
Dat_EB_MainLine.columns = idx
# ****************************************************************************
I_495 = r"C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\MassDOT\Volumes\RampVolumes\I-495\I495_Volumes.xlsx"
x1 = pd.ExcelFile(I_495)
x1.sheet_names
I_495_Dat_EB_Off = x1.parse("I-90 EB Offramp")
I_495_Dat_EB_Off = I_495_Dat_EB_Off.drop(
columns=[I_495_Dat_EB_Off.columns[0], I_495_Dat_EB_Off.columns[1]]
)
idx = I_495_Dat_EB_Off.columns.str.split("_", expand=True)
I_495_Dat_EB_Off.columns = idx
# ****************************************************************************
I_495_Dat_EB_On = x1.parse("I-90 EB Onramp")
I_495_Dat_EB_On = I_495_Dat_EB_On.drop(
columns=[I_495_Dat_EB_On.columns[0], I_495_Dat_EB_On.columns[1]]
)
idx = I_495_Dat_EB_On.columns.str.split("_", expand=True)
I_495_Dat_EB_On.columns = idx
# ****************************************************************************
EB_Dat = pd.merge(Dat_EB_MainLine, I_495_Dat_EB_Off, left_index=True, right_index=True)
EB_Dat = pd.merge(EB_Dat, I_495_Dat_EB_On, left_index=True, right_index=True)
mux = pd.MultiIndex.from_product(
[
["Friday", "Saturday", "Sunday", "Monday"],
[
"AET07-EB",
"I-90 EB Offramp-I495",
"I-90 EB Onramp-I495",
"EB Route 9 Off-Ramp",
"EB Route 9 On-Ramp",
"VSep",
],
],
names=["Day", "Seg"],
)
EB_Dat = EB_Dat.reindex(mux, axis=1)
idx = pd.IndexSlice
# R11527: RAMP-RT 90 EB TO RT 9
EB_Dat.loc[0, idx[:, ["EB Route 9 Off-Ramp"]]] = 5600
# R11528: RAMP-RT 9 TO RT 90 EB
EB_Dat.loc[0, idx[:, ["EB Route 9 On-Ramp"]]] = 11645
EB_Dat.loc[
1,
idx[
:,
[
"AET07-EB",
"I-90 EB Offramp-I495",
"I-90 EB Onramp-I495",
"EB Route 9 Off-Ramp",
"EB Route 9 On-Ramp",
"VSep",
],
],
] = [1, 3, 5, 14, 16, -999] * 4
EB_Dat.rename(index={1: "Freeval Seg"}, inplace=True)
# ****************************************************************************
# WB
# ****************************************************************************
WB_Mainline = r"C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\MassDOT\Volumes\Main-Counters\AET09-WB\AET09-WB_VolProfile.xlsx"
x1 = pd.ExcelFile(WB_Mainline)
x1.sheet_names
Dat_WB_MainLine = x1.parse("VolProfile")
Dat_WB_MainLine = Dat_WB_MainLine.drop(columns=["Unnamed: 0", "Unnamed: 1"])
# https://stackoverflow.com/questions/46250972/split-columns-into-multiindex-with-missing-columns-in-pandas
# Multiindex from column names
idx = Dat_WB_MainLine.columns.str.split("_", expand=True)
Dat_WB_MainLine.columns = idx
# ****************************************************************************
x1 = pd.ExcelFile(I_495)
x1.sheet_names
I_495_Dat_WB_Off = x1.parse("I-90 WB Offramp")
I_495_Dat_WB_Off = I_495_Dat_WB_Off.drop(
columns=[I_495_Dat_WB_Off.columns[0], I_495_Dat_WB_Off.columns[1]]
)
idx = I_495_Dat_WB_Off.columns.str.split("_", expand=True)
I_495_Dat_WB_Off.columns = idx
# ****************************************************************************
I_495_Dat_WB_On = x1.parse("I-90 WB Onramp")
I_495_Dat_WB_On = I_495_Dat_WB_On.drop(
columns=[I_495_Dat_WB_On.columns[0], I_495_Dat_WB_On.columns[1]]
)
idx = I_495_Dat_WB_On.columns.str.split("_", expand=True)
I_495_Dat_WB_On.columns = idx
# ****************************************************************************
WB_Dat = pd.merge(Dat_WB_MainLine, I_495_Dat_WB_Off, left_index=True, right_index=True)
WB_Dat = pd.merge(WB_Dat, I_495_Dat_WB_On, left_index=True, right_index=True)
mux = pd.MultiIndex.from_product(
[
["Friday", "Saturday", "Sunday", "Monday"],
[
"AET09-WB",
"WB Route 9 Off-Ramp",
"WB Route 9 On-Ramp",
"I-90 WB Offramp-I495",
"I-90 WB Onramp-I495",
"VSep",
],
],
names=["Day", "Seg"],
)
WB_Dat = WB_Dat.reindex(mux, axis=1)
idx = pd.IndexSlice
# R11544: RAMP-RT 90 WB TO RT 9
WB_Dat.loc[0, idx[:, ["WB Route 9 Off-Ramp"]]] = 13713
# R11545: RAMP-RT 9 TO RT 90 WB
WB_Dat.loc[0, idx[:, ["WB Route 9 On-Ramp"]]] = 5533
WB_Dat.loc[
1,
idx[
:,
[
"AET09-WB",
"WB Route 9 Off-Ramp",
"WB Route 9 On-Ramp",
"I-90 WB Offramp-I495",
"I-90 WB Onramp-I495",
"VSep",
],
],
] = [1, 5, 7, 16, 18, -999] * 4
WB_Dat.rename(index={1: "Freeval Seg"}, inplace=True)
OutFi = r"C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\MassDOT\Volumes\Volumes-Processed.xlsx"
writer = pd.ExcelWriter(OutFi)
EB_Dat.to_excel(writer, "EB Data")
WB_Dat.to_excel(writer, "WB Data")
writer.save()
subprocess.Popen([OutFi], shell=True)
| [
"apoorb2510@gmail.com"
] | apoorb2510@gmail.com |
bb614ca622dae5c7c78c8d3679a7c49844a9a869 | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.buildout/branches/bungeni.buildout-refactor-2010-06-02/src/bungeni.main/bungeni/core/workflows/dia.py | a3415a29b83f39e1c008d34ec51f5e69e252daee | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | #
from bungeni.core.workflows import adapters
from bungeni.core.workflows import bill
from bungeni.core.workflows import question
from bungeni.core.workflows import motion
from bungeni.core.workflows import version
from bungeni.core.workflows import groupsitting
from bungeni.core.workflows import groups
from bungeni.core.workflows import address
from bungeni.core.workflows import tableddocument
from bungeni.core.workflows import agendaitem
from bungeni.core.workflows import committee
from bungeni.core.workflows import parliament
def main():
f = open('bill.dot', 'w')
f.write(bill.wf.dot())
f.close()
f = open('question.dot', 'w')
f.write(question.wf.dot())
f.close()
f = open('motion.dot', 'w')
f.write(motion.wf.dot())
f.close()
f = open('version.dot', 'w')
f.write(version.wf.dot())
f.close()
f = open('groupsitting.dot', 'w')
f.write(groupsitting.wf.dot())
f.close()
f = open('groups.dot', 'w')
f.write(groups.wf.dot())
f.close()
f = open('address.dot', 'w')
f.write(address.wf.dot())
f.close()
f = open('tableddocument.dot', 'w')
f.write(tableddocument.wf.dot())
f.close()
f = open('agendaitem.dot', 'w')
f.write(agendaitem.wf.dot())
f.close()
f = open('committee.dot', 'w')
f.write(committee.wf.dot())
f.close()
f = open('parliament.dot', 'w')
f.write(parliament.wf.dot())
f.close()
if __name__ == "__main__":
main()
| [
"christian.ledermann@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | christian.ledermann@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
5c0f91846cc2feeb9b6bc2da717f71a9af08ea58 | d99e5b65624f115db6982dd88af9390e8d766042 | /tensorflow/contrib/distributions/python/ops/bijector.py | 7a089bb53be8450af76210f20a513e17fe54b1c3 | [
"Apache-2.0"
] | permissive | golbin/tensorflow | 03dbecb6f093f5628c072086c780659bcc14dba8 | 8a58a304bdcf909f8b55ec49e9280fc3af01c7d3 | refs/heads/master | 2021-01-12T07:05:41.360503 | 2016-12-20T00:15:41 | 2016-12-20T00:15:41 | 76,907,006 | 2 | 0 | null | 2016-12-19T23:58:44 | 2016-12-19T23:58:43 | null | UTF-8 | Python | false | false | 93,743 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Bijector Ops.
An API for invertible, differentiable transformations of random variables.
## Background
Differentiable, bijective transformations of continuous random variables alter
the calculations made in the cumulative/probability distribution functions and
sample function. This module provides a standard interface for making these
manipulations.
For more details and examples, see the `Bijector` docstring.
To apply a `Bijector`, use `distributions.TransformedDistribution`.
## Bijectors
@@Affine
@@AffineLinearOperator
@@Bijector
@@Chain
@@CholeskyOuterProduct
@@Exp
@@Identity
@@Inline
@@Invert
@@SigmoidCentered
@@SoftmaxCentered
@@Softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import itertools
import math
import re
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"Affine",
"AffineLinearOperator",
"Bijector",
"Chain",
"CholeskyOuterProduct",
"Exp",
"Identity",
"Inline",
"Invert",
"PowerTransform",
"SigmoidCentered",
"SoftmaxCentered",
"Softplus",
]
# TODO(jvdillon): deprecate this function once tf.expm1 exists.
def _expm1(x):
"""Approximate exp{y}-1~=y for small |y|, and exp{y}-1 elsewhere."""
# Recall, eps is smallest positive number such that 1 + eps != 1.
eps = np.finfo(x.dtype.base_dtype.as_numpy_dtype).eps
# Note we are careful to never send an NaN through ANY branch of where.
return array_ops.where(math_ops.less(math_ops.abs(x), eps),
x, math_ops.exp(x) - 1.)
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
class _Mapping(collections.namedtuple("_Mapping",
["x", "y", "ildj", "condition_kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj=None, condition_kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj, condition_kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return (self.x,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return (self.y,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
def merge(self, x=None, y=None, ildj=None,
condition_kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj,
condition_kwargs=condition_kwargs)
elif not all(arg is None for arg in [x, y, ildj, condition_kwargs]):
raise ValueError("Cannot specify mapping and individual args.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
condition_kwargs=self._merge(self.condition_kwargs,
mapping.condition_kwargs))
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old != new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
"""Interface for transforming a `Distribution` sample.
A `Bijector` implements a
[diffeomorphism](https://en.wikipedia.org/wiki/Diffeomorphism), i.e., a
bijective, differentiable function. A `Bijector` is used by
`TransformedDistribution` but can be generally used for transforming a
`Distribution` generated `Tensor`. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example Use:
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
Example transformations:
- "Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```
class Exp(Bijector):
def __init__(self, event_ndims=0, validate_args=False, name="exp"):
super(Exp, self).__init__(batch_ndims=0, event_ndims=event_ndims,
validate_args=validate_args, name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = math_ops.log(y)
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian requires known event_ndims.")
_, _, event_dims = self.shaper.get_dims(x)
return math_ops.reduce_sum(x, reduction_indices=event_dims)
```
- "Affine"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
- Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
and event (S, B, E) shape semantics. Suppose
the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
Subclass Requirements:
- Typically subclasses implement `_forward` and one or both of:
- `_inverse`, `_inverse_log_det_jacobian`,
- `_inverse_and_inverse_log_det_jacobian`.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions. Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See `Example Use` [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
- If computation can be shared among `_inverse` and
`_inverse_log_det_jacobian` it is preferable to implement
`_inverse_and_inverse_log_det_jacobian`. This usually reduces
graph-construction overhead because a `Distribution`'s implementation of
`log_prob` will need to evaluate both the inverse Jacobian as well as the
inverse function.
- If an additional use case needs just `inverse` or just
`inverse_log_det_jacobian` then he or she may also wish to implement these
functions to avoid computing the `inverse_log_det_jacobian` or the
`inverse`, respectively.
- Subclasses should implement `_get_forward_event_shape`,
`_forward_event_shape` (and `inverse` counterparts) if the transformation is
shape-changing. By default the event-shape is assumed unchanged from input.
Tips for implementing `_inverse` and `_inverse_log_det_jacobian`:
- As case 3 [above] indicates, under some circumstances the inverse function
can be implemented as a cache lookup.
- The inverse `log o det o Jacobian` can be implemented as the negative of the
forward `log o det o Jacobian`. This is useful if the `inverse` is
implemented as a cache or the inverse Jacobian is computationally more
expensive (e.g., `CholeskyOuterProduct` `Bijector`). The following
demonstrates the suggested implementation.
```python
def _inverse_and_log_det_jacobian(self, y):
x = # ... implement inverse, possibly via cache.
return x, -self._forward_log_det_jac(x) # Note negation.
```
By overriding the `_inverse_and_log_det_jacobian` function we have access to
the inverse in one call.
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y=g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `d/dX g(X)!=0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
- If possible, prefer a direct implementation of the inverse Jacobian. This
should have superior numerical stability and will often share subgraphs with
the `_inverse` implementation.
"""
@abc.abstractmethod
def __init__(self,
batch_ndims=None,
event_ndims=None,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(batch_ndims=4, event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(batch_ndims=0, event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
batch_ndims: number of dimensions associated with batch coordinates.
event_ndims: number of dimensions associated with event coordinates.
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
function of the input.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
if batch_ndims is None or event_ndims is None:
self._shaper = None # Apparently subclass will create.
else:
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
self._graph_parents = graph_parents or []
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
self._from_y = {}
self._from_x = {}
# Using abbreviation ildj for "inverse log det Jacobian."
# This variable is not `None` iff is_constant_jacobian is `True`.
self._constant_ildj = None
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the condition_kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__)
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
`Boolean`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
return input_shape
def forward_event_shape(self, input_shape, name="forward_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape(input_shape)
def _get_forward_event_shape(self, input_shape):
"""Subclass implementation for `get_forward_event_shape` public function."""
return input_shape
def get_forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape: `TensorShape` indicating event-portion shape after
applying `forward`. Possibly unknown.
"""
return self._get_forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
return output_shape
def inverse_event_shape(self, output_shape, name="inverse_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape(output_shape)
def _get_inverse_event_shape(self, output_shape):
"""Subclass implementation for `get_inverse_event_shape` public function."""
return self._get_inverse_event_shape(tensor_shape.TensorShape(output_shape))
def get_inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape: `TensorShape` indicating event-portion shape after
applying `inverse`. Possibly unknown.
"""
return self._get_inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def forward(self, x, name="forward", **condition_kwargs):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **condition_kwargs))
self._cache(mapping)
return mapping.y
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def inverse(self, y, name="inverse", **condition_kwargs):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None:
return mapping.x
ildj = None
try:
x = self._inverse(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Use the "global" result.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def inverse_log_det_jacobian(
self, y, name="inverse_log_det_jacobian", **condition_kwargs):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_log_det_jacobian` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
if self._constant_ildj is not None:
return self._constant_ildj
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return mapping.ildj
try:
x = mapping.x
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_log_det_jacobian was not implemented, try to see if
# it's implemented by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if mapping.x is not None:
x = mapping.x
if self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.ildj
def _inverse_and_inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_and_inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"inverse_and_inverse_log_det_jacobian not implemented.")
def inverse_and_inverse_log_det_jacobian(
self, y, name="inverse_and_inverse_log_det_jacobian", **condition_kwargs):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_and_inverse_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None and mapping.ildj is not None:
return mapping.x, mapping.ildj
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_and_inverse_log_det_jacobian was not implemented, try
# to see if we can separately use _inverse and
# _inverse_log_det_jacobian members.
try:
# We want this same try/except to catch either NotImplementedError.
x = self._inverse(y, **condition_kwargs)
if self._constant_ildj is None:
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Ignore any ildj we may/not have.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
# We use the mapped version of x, even if we re-computed x above with a
# call to self._inverse_and_inverse_log_det_jacobian. This prevents
# re-evaluation of the inverse in a common case.
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x, mapping.ildj
def _forward_log_det_jacobian(self, x):
"""Subclass implementation for `forward_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def forward_log_det_jacobian(
self, x, name="forward_log_det_jacobian", **condition_kwargs):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [x]):
if self._constant_ildj is not None:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return -mapping.ildj
y = None
try:
ildj = -self._forward_log_det_jacobian(x, **condition_kwargs)
except NotImplementedError as original_error:
try:
# We want this same try/except to catch either NotImplementedError.
y = self.inverse(x, **condition_kwargs) if y is None else y
ildj = self.inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self.is_constant_jacobian:
self._constant_ildj = ildj
y = y if mapping.y is None else mapping.y
mapping = mapping.merge(y=y, ildj=ildj)
self._cache(mapping)
return -mapping.ildj
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
if self._constant_ildj is not None:
# Fold in ildj if known constant Jacobian.
mapping = mapping.merge(ildj=self._constant_ildj)
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.condition_kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, condition_kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, condition_kwargs=condition_kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
class Inline(Bijector):
# pylint: disable=line-too-long
"""Bijector constructed from callables implementing forward, inverse, and inverse_log_det_jacobian.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="exp")
```
The above example is equivalent to the `Bijector` `Exp(event_ndims=1)`.
"""
# pylint: enable=line-too-long
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
get_forward_event_shape_fn=None,
forward_event_shape_fn=None,
get_inverse_event_shape_fn=None,
inverse_event_shape_fn=None,
is_constant_jacobian=False,
validate_args=False,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
get_forward_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
forward_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
get_inverse_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
is_constant_jacobian: `Boolean` indicating that the Jacobian is constant
for all input arguments.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
self._get_forward_event_shape_fn = get_forward_event_shape_fn
self._forward_event_shape_fn = forward_event_shape_fn
self._get_inverse_event_shape_fn = get_inverse_event_shape_fn
self._inverse_event_shape_fn = inverse_event_shape_fn
def _get_forward_event_shape(self, input_shape):
if self._get_forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._get_forward_event_shape_fn(input_shape)
def _forward_event_shape(self, input_shape):
if self._forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_fn(input_shape)
def _get_inverse_event_shape(self, output_shape):
if self._get_inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._get_inverse_event_shape_fn(output_shape)
def _inverse_event_shape(self, output_shape):
if self._inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_fn(output_shape)
def _forward(self, x, **condition_kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **condition_kwargs)
def _inverse(self, y, **condition_kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **condition_kwargs)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **condition_kwargs)
def _forward_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(y, **condition_kwargs)
class Invert(Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
Gamma(alpha=1., beta=2.),
bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **condition_kwargs)
return -self.inverse_log_det_jacobian(y, **condition_kwargs)
```
Args:
bijector: Bijector instance.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
self._shaper = bijector.shaper
def _get_forward_event_shape(self, input_shape):
return self.bijector.get_inverse_event_shape(input_shape)
def _forward_event_shape(self, input_shape):
return self.bijector.inverse_event_shape(input_shape)
def _get_inverse_event_shape(self, output_shape):
return self.bijector.get_forward_event_shape(output_shape)
def _inverse_event_shape(self, output_shape):
return self.bijector.forward_event_shape(output_shape)
@property
def bijector(self):
return self._bijector
def _forward(self, x, **condition_kwargs):
return self.bijector.inverse(x, **condition_kwargs)
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
return (self.bijector.forward(y, **condition_kwargs),
self.bijector.forward_log_det_jacobian(y, **condition_kwargs))
def _forward_log_det_jacobian(self, x, **condition_kwargs):
return self.bijector.inverse_log_det_jacobian(x, **condition_kwargs)
class Chain(Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
"""
def __init__(self, bijectors=(), validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python list of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object. Default: E.g.,
`Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
self._bijectors = bijectors
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
elif len(dtype) == 1:
dtype = dtype[0]
else:
dtype = None
super(Chain, self).__init__(
graph_parents=list(itertools.chain.from_iterable(
b.graph_parents for b in bijectors)),
is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),
validate_args=validate_args,
dtype=dtype,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _shape_helper(self, func_name, input_shape, reverse):
new_shape = input_shape
for b in reversed(self.bijectors) if reverse else self.bijectors:
func = getattr(b, func_name, None)
if func is None:
raise ValueError("unable to call %s on bijector %s (%s)" %
(func_name, b.name, func))
new_shape = func(new_shape)
return new_shape
def _get_forward_event_shape(self, input_shape):
return self._shape_helper("get_forward_event_shape", input_shape,
reverse=True)
def _forward_event_shape(self, input_shape):
return self._shape_helper("forward_event_shape", input_shape, reverse=True)
def _get_inverse_event_shape(self, output_shape):
return self._shape_helper("get_inverse_event_shape", output_shape,
reverse=False)
def _inverse_event_shape(self, output_shape):
return self._shape_helper("inverse_event_shape", output_shape,
reverse=False)
def _forward(self, x, **condition_kwargs):
y = x
for b in reversed(self.bijectors):
y = b.forward(y, **condition_kwargs.get(b.name, {}))
return y
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
x = y
ildj = constant_op.constant(0., dtype=x.dtype,
name="inverse_log_det_jacobian")
for b in self.bijectors:
x, j = b.inverse_and_inverse_log_det_jacobian(
x, **condition_kwargs.get(b.name, {}))
ildj += j
return x, ildj
def _forward_log_det_jacobian(self, x, **condition_kwargs):
y = x
fldj = constant_op.constant(0., dtype=x.dtype,
name="forward_log_det_jacobian")
for b in reversed(self.bijectors):
bijector_condition_kwargs = condition_kwargs.get(b.name, {})
fldj += b.forward_log_det_jacobian(y, **bijector_condition_kwargs)
y = b.forward(y, **bijector_condition_kwargs)
return fldj
class Identity(Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(batch_ndims=1, event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse_and_inverse_log_det_jacobian(self, y):
return y, constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
class PowerTransform(Bijector):
"""Bijector which computes `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
def __init__(self,
power=0.,
event_ndims=0,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
event_ndims: Python scalar indicating the number of dimensions associated
with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[power]):
power = tensor_util.constant_value(
ops.convert_to_tensor(power, name="power"))
if power is None or power < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = power
super(PowerTransform, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
@property
def power(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# TODO(jvdillon): If large x accuracy is an issue, consider using
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
def _inverse_and_inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
if self.power == 0.:
x = math_ops.log(y)
ildj = -math_ops.reduce_sum(x, reduction_indices=event_dims)
return x, ildj
# TODO(jvdillon): If large y accuracy is an issue, consider using
# (y**self.power - 1.) / self.power when y >> 1.
x = _expm1(math_ops.log(y) * self.power) / self.power
ildj = (self.power - 1.) * math_ops.reduce_sum(
math_ops.log(y),
reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
if self.power == 0.:
return math_ops.reduce_sum(x, reduction_indices=event_dims)
return (1. / self.power - 1.) * math_ops.reduce_sum(
math_ops.log1p(x * self.power),
reduction_indices=event_dims)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.power == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.power * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.power))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y)
class Exp(PowerTransform):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
exp = Exp(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="exp"):
"""Instantiates the `Exp` bijector.
Args:
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
"""
super(Exp, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO(srvasude): Deprecate this class with a dedicated Linear Operator
# corresponding to TriL + V D V.T.
class _TriLPlusVDVTLightweightOperatorPD(object):
"""Helper/hidden class fake an OperatorPD for TriL+VDV.T."""
def __init__(self, tril, v, diag=None, validate_args=False):
"""Creates an instance of _TriLPlusVDVTLightweightOperatorPD.
WARNING: This object is not to be used outside of `Affine` where it is
currently being temporarily used for refactoring purposes.
Args:
tril: `Tensor` of shape `[B1,..,Bb, d, d]`.
v: `Tensor` of shape `[B1,...,Bb, d, k]`.
diag: `Tensor` of shape `[B1,...,Bb, k, k]` or None
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
"""
self._m = tril
self._v = v
self._validate_args = validate_args
self._inputs = [tril, v]
if diag is not None:
self._inputs += [diag]
self._d = operator_pd_diag.OperatorPDDiag(diag, verify_pd=validate_args)
self._d_inv = operator_pd_diag.OperatorPDDiag(1. / diag,
verify_pd=validate_args)
return
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
id_shape = array_ops.concat_v2(
[v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@property
def inputs(self):
return self._inputs
@property
def dtype(self):
return self._m.dtype.base_dtype
@property
def validate_args(self):
return self._validate_args
def rank(self):
"""Returns `rank(self)`."""
return array_ops.rank(self._m)
def sqrt_matmul(self, x):
"""Computes `matmul(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
Args:
x: `Tensor`
Returns:
self_times_x: `Tensor`
"""
m_x = math_ops.matmul(self._m, x)
vt_x = math_ops.matmul(self._v, x, adjoint_a=True)
d_vt_x = self._d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(self._v, d_vt_x)
return m_x + v_d_vt_x
def sqrt_solve(self, x):
"""Computes `solve(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute (M + V D V.T), we use the the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Args:
x: `Tensor`
Returns:
inv_of_self_times_x: `Tensor`
"""
minv_x = linalg_ops.matrix_triangular_solve(self._m, x)
vt_minv_x = math_ops.matmul(self._v, minv_x, transpose_a=True)
cinv_vt_minv_x = linalg_ops.matrix_solve(
self._woodbury_sandwiched_term(), vt_minv_x)
v_cinv_vt_minv_x = math_ops.matmul(self._v, cinv_vt_minv_x)
minv_v_cinv_vt_minv_x = linalg_ops.matrix_triangular_solve(
self._m, v_cinv_vt_minv_x)
return minv_x - minv_v_cinv_vt_minv_x
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
Computes the "`C`" in the the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Returns:
woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
"""
minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
return self._d_inv.add_to_tensor(vt_minv_v)
class Affine(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes `Y = g(X; shift, scale) = matmul(scale, X) + shift` where `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
Write `A @ X` for `matmul(A, X)`. In TF parlance, the `scale` term is
logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
# pylint: enable=line-too-long
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Numeric `Tensor`. If this is set to `None`, no shift is applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag=scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
scale_perturb_factor: Numeric `Tensor` representing factor matrix with
last two dimensions of shape `(k, r)`.
When `None`, no rank-r update is added to `scale`.
scale_perturb_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_perturb_diag` has shape [N1, N2, ... r], which represents an
r x r Diagonal matrix.
When `None` low rank updates will take the form `scale_perturb_factor *
scale_perturb_factor.T`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if self._is_only_identity_multiplier and scale_identity_multiplier is None:
scale_identity_multiplier = 1.
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor, event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if validate_args:
is_less_than_two = check_ops.assert_less(
event_ndims, 2,
message="event_ndims must be 0 or 1")
event_ndims = control_flow_ops.with_dependencies(
[is_less_than_two], event_ndims)
self._shift = _as_tensor(shift, "shift")
# self._create_scale_operator returns an OperatorPD in all cases except if
# self._is_only_identity_multiplier; in which case it returns a scalar
# Tensor.
self._scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
event_ndims=event_ndims,
validate_args=validate_args)
if (self._shift is not None and
self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
self._shift.dtype, self._scale.dtype))
super(Affine, self).__init__(
batch_ndims=self._infer_batch_ndims(),
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if contrib_framework.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, event_ndims,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Numeric `Tensor` representing the diagonal matrix. `scale_diag` has
shape [N1, N2, ... k], which represents a k x k diagonal matrix.
tril: Numeric `Tensor` representing the diagonal matrix. `scale_tril` has
shape [N1, N2, ... k], which represents a k x k lower triangular matrix.
perturb_diag: Numeric `Tensor` representing the diagonal matrix of the
low rank update.
perturb_factor: Numeric `Tensor` representing factor matrix.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
Returns:
scale and batch_ndims. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is an `OperatorPD`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
identity_multiplier = self._maybe_validate_identity_multiplier(
identity_multiplier, validate_args)
if perturb_factor is not None:
perturb_factor = self._process_matrix(
perturb_factor, min_rank=2, event_ndims=event_ndims)
if perturb_diag is not None:
perturb_diag = self._process_matrix(
perturb_diag, min_rank=1, event_ndims=event_ndims)
# The following if-statments are ordered by increasingly stronger
# assumptions in the base matrix, i.e., we process in the order:
# TriL, Diag, Identity.
if tril is not None:
tril = self._preprocess_tril(
identity_multiplier, diag, tril, event_ndims)
if perturb_factor is None:
return operator_pd_cholesky.OperatorPDCholesky(
tril, verify_pd=validate_args)
return _TriLPlusVDVTLightweightOperatorPD(
tril=tril, v=perturb_factor, diag=perturb_diag,
validate_args=validate_args)
if diag is not None:
diag = self._preprocess_diag(identity_multiplier, diag, event_ndims)
if perturb_factor is None:
return operator_pd_diag.OperatorPDSqrtDiag(
diag, verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=operator_pd_diag.OperatorPDDiag(
diag, verify_pd=validate_args),
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
if identity_multiplier is not None:
if perturb_factor is None:
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
identity_shape = array_ops.concat_v2((v_shape[:-1], (v_shape[-2],)), 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
scale=identity_multiplier,
verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=scaled_identity,
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
raise ValueError("One of tril, diag and/or identity_multiplier must be "
"specified.")
def _maybe_validate_identity_multiplier(self, identity_multiplier,
validate_args):
"""Check that the init arg `identity_multiplier` is valid."""
if identity_multiplier is None or not validate_args:
return identity_multiplier
if validate_args:
identity_multiplier = control_flow_ops.with_dependencies(
[check_ops.assert_positive(identity_multiplier)],
identity_multiplier)
return identity_multiplier
def _preprocess_tril(self, identity_multiplier, diag, tril, event_ndims):
"""Helper to preprocess a lower triangular matrix."""
tril = array_ops.matrix_band_part(tril, -1, 0) # Zero out TriU.
if identity_multiplier is None and diag is None:
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
new_diag = array_ops.matrix_diag_part(tril)
if identity_multiplier is not None:
new_diag += identity_multiplier
if diag is not None:
new_diag += diag
tril = array_ops.matrix_set_diag(tril, new_diag)
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
def _preprocess_diag(self, identity_multiplier, diag, event_ndims):
"""Helper to preprocess a diagonal matrix."""
if identity_multiplier is not None:
diag += identity_multiplier
return self._process_matrix(diag, min_rank=1, event_ndims=event_ndims)
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat_v2([
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(matrix)], 0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
"""Return batch_ndims."""
if self._is_only_identity_multiplier:
return 0
# The real batch dims is one less when we pad in the case of event_ndims =
# 1, and the rank of the underlying scale being 2. This allows us to have
# non-negative sample dims.
return (self._scale.rank() - 2 -
array_ops.where(self._rank_two_event_ndims_one, 1, 0))
@property
def shift(self):
return self._shift
@property
def scale(self):
# TODO(srvasude): Remove this exception once TriLPlusVDVT is properly
# implemented.
if isinstance(self._scale, _TriLPlusVDVTLightweightOperatorPD):
raise NotImplementedError("Cannot access scale when Tril+VDV.T.")
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(y)
y = self._scale.sqrt_matmul(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(y, sample_shape)
if self.shift is not None:
return y + self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = self._scale.sqrt_solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# TODO(jvdillon): We don't pad in this case and instead let the fldj be
# applied via broadcast.
d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
math_ops.equal(self.shaper.event_ndims, 0), 1., d)
fldj = self._scale.sqrt_log_abs_det()
# We need to squeeze off the padded dimension.
start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
class AffineLinearOperator(Bijector):
"""Bijector which computes `Y = g(X; shift, scale) = scale @ X.T + shift`.
`shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.
If `X` is a scalar then the forward transformation is: `scale * X + shift`
where `*` denotes the scalar product.
Note: we don't always simply transpose `X` (but write it this way for
brevity). Actually the input `X` undergoes the following transformation
before being premultiplied by `scale`:
1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,
`new_sample_shape = [1]`. Otherwise do nothing.
2. The sample shape is flattened to have one dimension, i.e.,
`new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.
3. The sample dim is cyclically rotated left by 1, i.e.,
`new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the
event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch
dimensions.
(For more details see `shape.make_batch_of_event_sample_matrices`.)
The result of the above transformation is that `X` can be regarded as a batch
of matrices where each column is a draw from the distribution. After
premultiplying by `scale`, we take the inverse of this procedure. The input
`Y` also undergoes the same transformation before/after premultiplying by
`inv(scale)`.
Example Use:
```python
linalg = tf.contrib.linalg
x = [1., 2, 3]
shift = [-1., 0., 1]
diag = [1., 2, 3]
scale = linalg.LinearOperatorDiag(diag)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# diag * scale + shift
y = affine.forward(x) # [0., 4, 10]
shift = [2., 3, 1]
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorTriL(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
y = affine.forward(x) # [3., 7, 11]
```
"""
def __init__(self,
shift=None,
scale=None,
event_ndims=1,
validate_args=False,
name="affine_linear_operator"):
"""Instantiates the `AffineLinearOperator` bijector.
Args:
shift: Numeric `Tensor`.
scale: Subclass of `LinearOperator`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `event_ndims` is not 0 or 1.
TypeError: if `scale` is not a `LinearOperator`.
TypeError: if `shift.dtype` does not match `scale.dtype`.
ValueError: if not `scale.is_non_singular`.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
graph_parents = []
with self._name_scope("init", values=[shift]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if tensor_util.constant_value(event_ndims) is not None:
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims not in (0, 1):
raise ValueError("event_ndims({}) was not 0 or 1".format(event_ndims))
else:
if validate_args:
# Shape tool will catch if event_ndims is negative.
event_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_less(
event_ndims, 2, message="event_ndims must be 0 or 1")],
event_ndims)
graph_parents += [event_ndims]
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
graph_parents += [shift]
self._shift = shift
if scale is not None:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if not isinstance(scale, linear_operator.LinearOperator):
raise TypeError("scale is not an instance of tf.LinearOperator")
if validate_args and not scale.is_non_singular:
raise ValueError("Scale matrix must be non-singular.")
graph_parents += scale.graph_parents
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_dynamic() - 2
graph_parents += [batch_ndims]
else:
batch_ndims = 0 # We won't need shape inference when scale is None.
self._scale = scale
super(AffineLinearOperator, self).__init__(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
graph_parents=graph_parents,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X.T + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X.T + shift`."""
return self._scale
def _forward(self, x):
y = x
if self.scale is not None:
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
y = self.scale.apply(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.scale is None:
return constant_op.constant(0, dtype=x.dtype.base_dtype)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
return self.scale.log_abs_determinant()
class Softplus(Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softplus"):
super(Softplus, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return nn_ops.softplus(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
# The most stable inverse of softplus is not the most obvious one.
# y = softplus(x) = Log[1 + exp{x}], (which means y > 0).
# ==> exp{y} = 1 + exp{x} (1)
# ==> x = Log[exp{y} - 1] (2)
# = Log[(exp{y} - 1) / exp{y}] + Log[exp{y}]
# = Log[(1 - exp{-y}) / 1] + Log[exp{y}]
# = Log[1 - exp{-y}] + y (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large y.
# For small y (e.g. y = 1e-10), (3) will become -inf since 1 - exp{-y} will
# be zero. To fix this, we use 1 - exp{-y} approx y for small y > 0.
#
# Stable inverse log det jacobian.
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
log_one_minus_exp_neg = math_ops.log(-_expm1(-y))
x = y + log_one_minus_exp_neg
ildj = -math_ops.reduce_sum(
log_one_minus_exp_neg, reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return -math_ops.reduce_sum(
nn_ops.softplus(-x), reduction_indices=event_dims)
class SoftmaxCentered(Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Because we append a coordinate, this bijector only supports `event_ndim in [0,
1]`, i.e., scalars and vectors.
Example Use:
```python
bijector.SoftmaxCentered(event_ndims=1).forward(tf.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered(event_ndims=1).inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
batch_ndims=0, # We'll regard all non-event dims as sample dims.
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _get_forward_event_shape(self, input_shape):
if input_shape.ndims is None:
return input_shape
if input_shape.ndims != self._static_event_ndims:
raise ValueError("input_shape.dims = %d != %d" %
(input_shape.ndims, self._static_event_ndims))
if input_shape.ndims == 0:
return tensor_shape.TensorShape([2])
if input_shape.ndims == 1:
return tensor_shape.TensorShape(input_shape[0] + 1)
# Unreachable code:
raise ValueError("event_ndims = %d must be 0 or 1" % input_shape.ndims)
def _forward_event_shape(self, input_shape):
ndims = array_ops.shape(input_shape)
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_zero_or_one = check_ops.assert_equal(
ndims, 0 if self._static_event_ndims == 0 else 1,
message="event_ndims must be 0 or 1")
ndims = control_flow_ops.with_dependencies([is_zero_or_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor(
[2], dtype=dtypes.int32, name="output_shape")
return input_shape + 1
def _get_inverse_event_shape(self, output_shape):
if output_shape.ndims is None:
return output_shape
if output_shape.ndims != 1:
raise ValueError("output_shape.ndims = %d != 1" % output_shape.ndims)
if self._static_event_ndims == 0:
return tensor_shape.TensorShape([])
return tensor_shape.TensorShape(output_shape[0] - 1)
def _inverse_event_shape(self, output_shape):
ndims = array_ops.shape(output_shape)[0]
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_one = check_ops.assert_equal(
ndims, 1, message="event_ndims must be 1")
ndims = control_flow_ops.with_dependencies([is_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor([], dtype=dtypes.int32, name="output_shape")
return array_ops.expand_dims(output_shape[0] - 1, dim=0)
def _forward(self, x):
# Pad the last dim with a zeros vector. We need this because it lets us
# infer the scale in the inverse function.
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
paddings=array_ops.concat_v2(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
if y.get_shape().is_fully_defined()
else array_ops.shape(y, name="shape"))
ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
# We now extract the last coordinate of the rightmost dimension.
# Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
begin = array_ops.one_hot(indices=ndims-1,
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
size = array_ops.concat_v2(
(shape[:-1], np.asarray(
[1], dtype=shape.dtype)), 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
size = array_ops.concat_v2((shape[:-1], [shape[-1] - 1]), 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
if self._static_event_ndims == 0:
x = array_ops.squeeze(x, squeeze_dims=[ndims-1])
# Set shape hints.
if y.get_shape().ndims is not None:
shape = y.get_shape().as_list()
if self._static_event_ndims == 0:
shape = shape[:-1]
elif shape[-1] is not None:
shape[-1] -= 1
shape = tensor_shape.TensorShape(shape)
x.get_shape().assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)
def _forward_log_det_jacobian(self, x):
if self._static_event_ndims == 0:
return x - 2. * nn_ops.softplus(x)
else:
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
fldj = (-log_normalization +
math_ops.reduce_sum(x - log_normalization,
reduction_indices=-1,
keep_dims=True))
return array_ops.squeeze(fldj, squeeze_dims=-1)
class SigmoidCentered(SoftmaxCentered):
"""Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)).
Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`.
See `bijector.SoftmaxCentered` for more details.
"""
def __init__(self, validate_args=False, name="sigmoid_centered"):
super(SigmoidCentered, self).__init__(
validate_args=validate_args, name=name)
class CholeskyOuterProduct(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes Y = g(X) = X X.T where X is a lower-triangular, positive-diagonal matrix.
`event_ndims` must be 0 or 2, i.e., scalar or matrix.
Note: the upper-triangular part of X is ignored (whether or not its zero).
Examples:
```python
bijector.CholeskyOuterProduct(event_ndims=2).forward(x=[[1., 0], [2, 1]])
# Result: [[1, 1], [1, 5]], i.e., x x.T
bijector.SoftmaxCentered(event_ndims=2).inverse(y=[[1., 1], [1, 5]])
# Result: [[1, 0], [2, 1]], i.e., chol(y).
```
"""
# pylint: enable=line-too-long
def __init__(self, event_ndims=2, validate_args=False,
name="cholesky_outer_product"):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
event_ndims: `constant` `int32` scalar `Tensor` indicating the number of
dimensions associated with a particular draw from the distribution. Must
be 0 or 2.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if event_ndims is neither 0 or 2.
"""
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 2]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 2")
self._static_event_ndims = event_ndims
super(CholeskyOuterProduct, self).__init__(
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._static_event_ndims == 0:
return math_ops.square(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = (math_ops.sqrt(y) if self._static_event_ndims == 0
else linalg_ops.cholesky(y))
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
if self._static_event_ndims == 0:
if self.validate_args:
is_positive = check_ops.assert_positive(
x, message="All elements must be positive.")
x = control_flow_ops.with_dependencies([is_positive], x)
return math.log(2.) + math_ops.log(x)
diag = array_ops.matrix_diag_part(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a column vector equal to: [p, p-1, ..., 2, 1].T.
if x.get_shape().ndims is None or x.get_shape()[-1].value is None:
p = array_ops.shape(x)[-1]
else:
p = x.get_shape()[-1].value
exponents = array_ops.expand_dims(
math_ops.linspace(math_ops.cast(p, dtype=x.dtype), 1., p),
dim=1)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag), exponents), squeeze_dims=-1)
fldj = p * math.log(2.) + sum_weighted_log_diag
if x.get_shape().ndims is not None:
fldj.set_shape(x.get_shape()[:-2])
return fldj
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
c419cd527913ba8ffac223f35ecc2a9c486be6f5 | bb4a0c26fad59565ea6d95d1f63d062b00cf951c | /singleton.py | 6d5e31845e71ebb1c189ea29e6c93265659df55d | [] | no_license | buptxiaomiao/python_trick | cfafc6e56c212235c812a6430974268c7b436b7c | 17f0154f35d328df6e827f5cb7cf7edab8b34d7e | refs/heads/master | 2020-03-27T13:37:08.483145 | 2018-09-13T17:42:09 | 2018-09-13T17:42:09 | 146,619,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | #########################################################################
# coding: utf-8
#!/usr/bin/python
# File Name: singleton.py
# Author: buptxiaomiao
# mail: buptwjh@outlook.com
# Created Time: 四 8/30 10:48:24 2018
#########################################################################
# 使用__new__
class Singleton(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(Singleton, cls).__new__(cls)
return cls.instance
# 使用装饰器
def singleton(cls):
instance = dict()
def get_instance(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return get_instance
@singleton
class TEST(object):
pass
if __name__ == '__main__':
o1 = Singleton()
o2 = Singleton()
print id(o1), id(o2)
m1 = TEST()
m2 = TEST()
print id(m1), id(m2)
| [
"buptwjh@outlook.com"
] | buptwjh@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.