blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a981616802effa951af949ca29f15efb99f8b3f8 | 8d8788a356767d58a2aa4ebe0db476e0ca07b39b | /src/mqttsub_bsp.py | 2d60cd3cf4da3bd1a010f8f29a30f1034b99961e | [
"CC0-1.0"
] | permissive | essentialprogramming/IoTDeeperDiveOOP2020 | da64a1f3044c3bbb5fca6f5913b2c30e9904c667 | a8080076e3182623d1b69e52cb106eced8e233de | refs/heads/master | 2022-03-30T22:20:45.752250 | 2020-02-07T19:38:03 | 2020-02-07T19:38:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import paho.mqtt.client as mqtt
# Callback für Verbindungsaufbau
def on_connect(client, userdata, flags, rc):
print("Verbindung aufgebaut mit Fehlercode " + str(rc))
client.subscribe("oop2020/test")
# Callback für Nachrichtenempfang
def on_message(client, userdata, msg):
print("Neue Nachricht im Topic " + msg.topic + " Inhalt: " + str(msg.payload))
# MQTT Client anlegen
client = mqtt.Client()
# Callbackhandler zuweisen
client.on_connect = on_connect
client.on_message = on_message
# Verbindung zum Broker aufbauen
client.connect("localhost", 1883, 60)
# Auf eingehende Nachrichten warten
client.loop_forever()
| [
"noreply@github.com"
] | essentialprogramming.noreply@github.com |
269747f3eeef5378c3f14c48fb365a3b2159e355 | d7757d2215d00307504426ccf87ceaeb1f4536ed | /2708.py | 476536aea6289425fe14dcc72f57c81362af51d8 | [] | no_license | paulaandrezza/URI-Solutions | e23a69164cbdbac636db73a3ba29cabb08a32db6 | 7694fa27d68b2ec2dbd426505ab680fb8ea9ace7 | refs/heads/master | 2022-10-29T06:45:27.791115 | 2020-06-06T00:35:17 | 2020-06-06T00:35:17 | 265,044,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | p = 0
j = 0
while True:
entrada = input().split()
if entrada[0] == "ABEND":
break
elif entrada[0] == "SALIDA":
j += 1
p += int(entrada[1])
else:
j -= 1
p -= int(entrada[1])
print(p)
print(j) | [
"noreply@github.com"
] | paulaandrezza.noreply@github.com |
0d7158803ac2c899a965502d20b12d441c224e5f | 51e1c73f751f28c738dd55f465bc074cd5a6bd6e | /data_structures/linked_list/linkedlist.py | bfe3e2af6f529c6147b4bf58531309ed06287023 | [
"MIT"
] | permissive | MthwBrwn/data_structures_and_algorithms | 10c5e1d289590ac8f613f285f13e52485baba3fa | 376f02346c67d495d8a3e8580c101f1244b78a78 | refs/heads/master | 2020-04-08T13:40:23.799620 | 2019-01-23T07:29:02 | 2019-01-23T07:29:02 | 159,401,541 | 0 | 0 | MIT | 2019-01-24T07:51:24 | 2018-11-27T21:27:20 | Python | UTF-8 | Python | false | false | 4,701 | py | from .node import Node
class LinkedList(object):
""" This class should be aware of the len of the list,
which represents the count of Nodes in the list at any time """
def __init__(self, iterable=None):
self.head = None
self._size = 0
if iterable is None:
iterable = []
if type(iterable) is not list:
raise TypeError('iterable must be of type list')
for val in iterable:
self.insert(val)
def __str__(self):
"""this returns user level information when called
"""
output = f'Linked List: Head val - { self.head }'
return output
def __repr__(self):
"""this will provide technical information when called
"""
output = f'<LinkedList: head - { self.head } size - {self._size}>'
return output
def __len__(self):
"""This returns the list length
"""
return self._size
def insert(self, val):
"""This function takes an argument when the method is called
and inserts the value into the list
"""
node = Node(val)
node._next = self.head
self.head = node
# self.head = Node(val, self.head)
self._size += 1
def includes(self, val):
"""In this method the check variable is the node set by head and in=s the iterator
check passes values down the linked list and stops when
None which would indicate the tail of the list
"""
current = self.head
while current:
if current.val == val:
return True
else:
current = current._next
return False
def append(self, val):
""" this is a method to add a node to the end of the linked list
"""
new_node = Node(val)
if self.head is None:
self.head = new_node
current = self.head
while current._next:
current = current._next
current._next = new_node
current._next._next = None
self._size += 1
return
def insert_before(self, find_val, new_val):
""" in this method the node with the find val is located
and then the new_val is appended in front of it"""
new_node = Node(new_val)
current = self.head
previous = None
# # #if head is None
if current is None:
raise ValueError("There aren't any nodes in linkedlist")
# find_val is equal to head.val
if current.val == find_val:
new_node._next = current
self.head = new_node
self._size += 1
return self
while current:
if current.val == find_val:
new_node._next = current
previous._next = new_node
self._size += 1
return self
previous = current
current = current._next
# find = False ??
raise ValueError("A node did not match your find value")
return
def insert_after(self, find_val, new_val):
""" in this method the node with the find val is located
and then the new_val is appended behind it"""
new_node = Node(new_val)
current = self.head
# # #if head is None
if current is None:
raise ValueError("There aren't any nodes in linkedlist")
# find_val is equal to head.val
while current:
if current.val == find_val:
new_node._next = current._next
current._next = new_node
self._size += 1
return self
current = current._next
# find = False ??
raise ValueError("A node did not match your find value")
return
def kth_from_end(self, k):
"""This method uses a k value to establish a point k from the head
through traversing set to the variable outpoint. The head is set to another variable,
current. both current and outpoint traverse the list and when outpoint reaches None,
the current value will be returned """
# edges : k < 0 or k is not int
# k is greater than linked list
# if k is not type(int) or k < 0:
# return "Exception, your k is not a positive integer"
outpoint = self.head
current = self.head
for i in range(k):
outpoint = outpoint._next
if outpoint._next is None:
return "Exception, k is longer than list"
while outpoint._next:
current = current._next
outpoint = outpoint._next
return current.val
| [
"mthwbrwn@gmail.com"
] | mthwbrwn@gmail.com |
343f224c3b95a445b3a306ce80c44244a21ca995 | 0224a22c3b9960a177a5c962eeb75d53a76a9841 | /src/0-9/4/4.py | 3bd05b4269182b36a6c68891b44079fd872ce269 | [] | no_license | jepatti/euler_solutions | 6619621efb9da62de3ae983b4fc260da87b881bc | bcf11d87178fda4f30a471762d14e38cabe4b293 | refs/heads/master | 2021-01-13T01:56:00.946092 | 2012-01-19T05:38:09 | 2012-01-19T05:38:09 | 3,159,581 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | '''
Created on Jan 11, 2011
@author: Jeff Patti
'''
def isPalindrom(s):
if len(s)<=1:
return True
if s[0]==s[-1]:
return isPalindrom(s[1:-1])
else:
return False
max = 0
for i in range(1000):
for j in range(1000):
prod = i*j
if isPalindrom(str(prod)) and prod > max:
max = prod
print max | [
"jeffpatti@gmail.com"
] | jeffpatti@gmail.com |
71c821509417c94ee842caec376a6a4c2803b333 | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /test_suite/shared_data/dispersion/Fyn_SH3_R1rho/relax_results/solution_tp02.py | 6e2250c6b7fdf8ea287e0c2e8ad080017c2505a3 | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,039 | py | """Compare the synthetic cpmg_fit data to the relax solution.
To run this, type:
$ rm -f solution_tp02.log; ../../../../../relax --tee solution_tp02.log solution_tp02.py
"""
# Python module imports.
from os import remove
from shutil import move
# relax module imports.
from lib.dispersion.variables import EXP_TYPE_R1RHO
from lib.nmr import frequency_to_ppm
from specific_analyses.relax_disp.data import generate_r20_key
# Create a data pipe.
pipe.create('R2eff', 'relax_disp')
# Create the spin system.
spin.create(res_name='X', res_num=14, spin_name='N')
spin.element('N', spin_id='@N')
spin.isotope('15N', spin_id='@N')
# The spectral data - experiment ID, R2eff file name, experiment type, spin ID string, spectrometer frequency in Hertz, relaxation time.
data = [
['600_MHz_nu1_50_Hz', 'T14_600_50.dsp', ':14@N', 600e6, 50, 0.04],
['600_MHz_nu1_75_Hz', 'T14_600_75.dsp', ':14@N', 600e6, 75, 0.04],
['600_MHz_nu1_100_Hz', 'T14_600_100.dsp', ':14@N', 600e6, 100, 0.04],
['600_MHz_nu1_150_Hz', 'T14_600_150.dsp', ':14@N', 600e6, 150, 0.04],
['600_MHz_nu1_200_Hz', 'T14_600_200.dsp', ':14@N', 600e6, 200, 0.04],
['800_MHz_nu1_100_Hz', 'T14_800_100.dsp', ':14@N', 800e6, 100, 0.04],
['800_MHz_nu1_200_Hz', 'T14_800_200.dsp', ':14@N', 800e6, 200, 0.04],
['800_MHz_nu1_400_Hz', 'T14_800_400.dsp', ':14@N', 800e6, 400, 0.04]
]
spin_lock_offset = {}
spin_lock_offset['600_MHz_nu1_50_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_75_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_100_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_150_Hz'] = [ 385.0, 370.0, 355.0, 340.0, 325.0, 310.0, 295.0, 280.0, 265.0, 250.0, 235.0, 220.0, 205.0, 190.0, 175.0, 160.0, 145.0, 130.0, 115.0, 100.0, 85.0, 70.0, 55.0, 40.0, 25.0, 10.0, -5.0, -20.0, -35.0, -50.0, -65.0, -80.0, -95.0, -110.0, -125.0, -140.0, -155.0, -170.0, -185.0]
spin_lock_offset['600_MHz_nu1_200_Hz'] = [ 385.0, 370.0, 355.0, 340.0, 325.0, 310.0, 295.0, 280.0, 265.0, 250.0, 235.0, 220.0, 205.0, 190.0, 175.0, 160.0, 145.0, 130.0, 115.0, 100.0, 85.0, 70.0, 55.0, 40.0, 25.0, 10.0, -5.0, -20.0, -35.0, -50.0, -65.0, -80.0, -95.0, -110.0, -125.0, -140.0, -155.0, -170.0, -185.0]
spin_lock_offset['800_MHz_nu1_100_Hz'] = [ 780.0, 750.0, 720.0, 690.0, 660.0, 630.0, 600.0, 570.0, 540.0, 510.0, 480.0, 450.0, 420.0, 390.0, 360.0, 330.0, 300.0, 270.0, 240.0, 210.0, 180.0, 150.0, 120.0, 90.0, 60.0, 30.0, 0.0, -30.0, -60.0, -90.0, -120.0, -150.0, -180.0, -210.0, -240.0, -270.0, -300.0, -330.0, -360.0]
spin_lock_offset['800_MHz_nu1_200_Hz'] = [ 960.0, 920.0, 880.0, 840.0, 800.0, 760.0, 720.0, 680.0, 640.0, 600.0, 560.0, 520.0, 480.0, 440.0, 400.0, 360.0, 320.0, 280.0, 240.0, 200.0, 160.0, 120.0, 80.0, 40.0, 0.0, -40.0, -80.0, -120.0, -160.0, -200.0, -240.0, -280.0, -320.0, -360.0, -400.0, -440.0, -480.0, -520.0, -560.0]
spin_lock_offset['800_MHz_nu1_400_Hz'] = [ 1150.0, 1100.0, 1050.0, 1000.0, 950.0, 900.0, 850.0, 800.0, 750.0, 700.0, 650.0, 600.0, 550.0, 500.0, 450.0, 400.0, 350.0, 300.0, 250.0, 200.0, 150.0, 100.0, 50.0, 0.0, -50.0, -100.0, -150.0, -200.0, -250.0, -300.0, -350.0, -400.0, -450.0, -500.0, -550.0, -600.0, -650.0, -700.0, -750.0]
# Loop over the files, reading in the data.
for id, file, spin_id, H_frq, field, relax_time in data:
# Loop over each CPMG frequency.
for offset in spin_lock_offset[id]:
# The id.
new_id = "%s_%.3f" % (id, offset)
# Set the NMR field strength.
spectrometer.frequency(id=new_id, frq=H_frq)
# Set the relaxation dispersion experiment type.
relax_disp.exp_type(spectrum_id=new_id, exp_type=EXP_TYPE_R1RHO)
# Relaxation dispersion CPMG constant time delay T (in s).
relax_disp.relax_time(spectrum_id=new_id, time=relax_time)
# Set the relaxation dispersion spin-lock field strength (nu1).
relax_disp.spin_lock_field(spectrum_id=new_id, field=field)
# Set the spin-lock offset, converting back to ppm.
relax_disp.spin_lock_offset(spectrum_id=new_id, offset=-frequency_to_ppm(frq=offset, B0=H_frq, isotope='15N'))
# Read the R2eff data.
relax_disp.r2eff_read_spin(id=id, file=file, dir='..', spin_id=spin_id, offset_col=1, data_col=2, error_col=3)
# Load the R1 data.
relax_data.read(ri_id='600MHz', ri_type='R1', frq=600e6, file='R1_600MHz.out', dir='..', mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)
relax_data.read(ri_id='800MHz', ri_type='R1', frq=800e6, file='R1_800MHz.out', dir='..', mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)
# Change the model.
relax_disp.select_model('TP02')
# The R20 keys.
r20_600_key = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=600e6)
r20_800_key = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=800e6)
# Manually set the parameter values.
spin_N = cdp.mol[0].res[0].spin[0]
spin_N.r2 = {
r20_600_key: 9.108060397660111,
r20_800_key: 13.793213528551924,
}
spin_N.pA = 0.945912353996981
spin_N.pB = 0.054087646003019
spin_N.kex = 367.981715073974556
spin_N.dw = 4.305697497613982
spin_N.ri_data['600MHz'] = 3.179051390898238
spin_N.ri_data['800MHz'] = 4.452840879991469
# Calculate.
minimise.calculate()
print("%-40s %20.15f" % ("relax chi2:", spin_N.chi2))
print("%-40s %20.15f" % ("cpmg_fit chi2 (corrections turned off):", 472.400507470708874))
# Minimisation.
minimise.grid_search(inc=7)
minimise.execute('simplex', constraints=True)
# Plot the dispersion curves.
relax_disp.plot_disp_curves(dir='.', num_points=100, extend=0, force=True)
# Save the results.
state.save('solution_tp02', dir='.', compress_type=1, force=True)
# Cleanup.
print("\n\nMoving 'disp_14_N.agr' to 'solution_tp02.agr'.")
move('disp_14_N.agr', 'solution_tp02.agr')
print("Deleting 'grace2images.py'.")
remove('grace2images.py')
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
400c4825be91859d206dbc84ac0bef043e1582b7 | 66a05459831aef06fc86316ecb782848c116b226 | /collective/etherpad/Extensions/Install.py | be56c049b8b686cc2c19c1f049088f75de18d462 | [] | no_license | toutpt/collective.etherpad | 8d01323b3e31ff0872afa1fd8e4bc85b14a3f123 | 590414ddd3ed7437cefea91c853d291feb9b328f | refs/heads/master | 2020-05-18T05:31:36.603712 | 2013-09-26T12:45:35 | 2013-09-26T12:45:35 | 8,142,351 | 2 | 0 | null | 2013-09-18T16:42:41 | 2013-02-11T17:13:59 | Python | UTF-8 | Python | false | false | 709 | py |
def uninstall(portal, reinstall=False):
"""We uninstall things that are not handles by quickinstaller"""
if not reinstall:
# lets remove action on content types
types = portal.portal_types
for _type in ('Document', 'News Item', 'Event', 'Topic'):
_typeinfo = getattr(types, _type, None)
if _typeinfo:
action_info = _typeinfo.getActionObject('object/etherpad')
if action_info:
actions = _typeinfo.listActions()
indexes = [(a.category, a.id) for a in actions]
index = indexes.index(('object', 'etherpad'))
_typeinfo.deleteActions((index, ))
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
294685dafab114b23a0eb33d3a01804c6c97700c | c9d0f92ac66c5a3985561644af95104e280989ff | /dynamic_정수삼각형.py | d98dbe5d4765b113590344bab5bf4703ade49c5e | [] | no_license | yuheunk/practice_codes | e0dcafd9c0a9cadef65ac08608502e92123b37b5 | 4a32b89bc970d1a8fecd69246fa9a8564bd25a60 | refs/heads/main | 2023-06-13T08:22:41.164562 | 2021-07-06T16:24:42 | 2021-07-06T16:24:42 | 359,150,260 | 0 | 0 | null | 2021-06-22T10:49:15 | 2021-04-18T13:25:58 | Python | UTF-8 | Python | false | false | 402 | py | n = int(input())
array = []
for _ in range(n):
array.append(list(map(int, input().split())))
for i in range(1, len(array)):
for j in range(len(array[i])):
if j == 0:
array[i][j] += array[i-1][j]
elif j == len(array[i])-1:
array[i][j] += array[i-1][j-1]
else:
array[i][j] += max(array[i-1][j], array[i-1][j-1])
print(max(array[-1]))
| [
"noreply@github.com"
] | yuheunk.noreply@github.com |
86315daf9a0d96a287b57218e982382650d20fb7 | e71e6f545790e58447dc94bc404b215388b7e65b | /apps/inscricao/urls.py | 3e8075b6211c029ccdfd658f3262b30b14c27af3 | [
"MIT"
] | permissive | akaytatsu/inscricao_conferencia | e53e04314ebabd9a71d634d3ea3d6f456edd3310 | 9ab774c6fe30cdb1a45d3732ade394df6e3b4258 | refs/heads/master | 2020-09-23T08:50:36.352756 | 2020-07-19T17:33:10 | 2020-07-19T17:33:10 | 225,456,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | from django.urls import path
from .views import (
HomeView, LoginView, PagarView, LogoutView, ContatoView, DependentesView, NovaInscricaoView, inscricaoView
)
urlpatterns = [
path('', LoginView.as_view(), name="home"),
path('dashboard', HomeView.as_view(), name="dashboard"),
path('inscricao', inscricaoView.as_view(), name="inscricao"),
path('dependentes', DependentesView.as_view(), name="dependentes"),
path('nova-inscricao', NovaInscricaoView.as_view(), name="nova_inscricao"),
path('pagar', PagarView.as_view(), name="pagar_inscricao"),
path('contato', ContatoView.as_view(), name="contato"),
path('logout', LogoutView.as_view(), name="logout"),
]
| [
"thiagosistemas3@gmail.com"
] | thiagosistemas3@gmail.com |
eb2ee2569095255a25d0a28fed528f065fdf0706 | 7bd8dfafc3494110faabcf6c677e6c2def533f1f | /src/features/generate_design_features.py | 5fe8ba44630b87e93180f5ec6768c15251023185 | [] | no_license | leaflettuce/tdwpDB | 3d3be8c365853e104f1414d2cadc112624186fbe | c72c5a93ea06ac474f6a23cb31b04b2fee4b663e | refs/heads/master | 2021-08-06T20:45:39.798834 | 2018-12-02T21:44:27 | 2018-12-02T21:44:27 | 147,001,421 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 13:04:58 2018
@author: andyj
"""
import pandas as pd
import numpy as np
from generate_sales_features import add_region, add_day_of_week, states
# import data
data_dir = '../../data/processed/design/'
file_name = 'design_1.0.csv'
df = pd.read_csv(data_dir + file_name)
''' minor cleanup '''
df['num_print_colors'] = np.where(df['num_print_colors']=='na', 0, df['num_print_colors'])
df = df[df['name'] != 'day_total']
''' Generate Feature calls'''
# MONTH
df = df.rename(columns={'date': 'Date'})
df['month'] = df['Date'] # CUT OFF AFTER '/'
df['month'] = df['month'].str[0:2]
df['month'] = pd.to_numeric(df['month'])
# STATE & City
df['State'] = df['location']
df['city'] = df['location']
df['tmp'] = df['State'].str.find(',')
for i, row in df.iterrows():
df.at[i, 'State'] = df.at[i, 'State'][df['tmp'][i] + 2: df['tmp'][i] + 4]
df.at[i, 'city'] = df.at[i, 'city'][:df['tmp'][i]]
df = df.drop(['tmp'], axis = 1)
# REGION
add_region(df, states)
#DAY OF WEEK
add_day_of_week(df, clip_front = False)
''' Other Edits '''
# Drop unusable in prediciton
def setup_pred(df):
pred_df = df.drop(['Unnamed: 0', 'Date', 'location', 'merch_id', 'tour_id'], axis = 1)
pred_df = pred_df.rename(columns={'value': 'RESULT_VALUE'})
return pred_df
''' Get Analytical FORMAT '''
pred_df = setup_pred(df)
# ORganize Cols
pred_df = pred_df[['name', 'tour_name', 'tour_type', 'venue', 'State', 'city', 'region',
'year', 'month', 'day_of_week', 'season', 't_color', 'num_print_colors',
'print_colors', 'logo', 'tour', 'elite' ,'evil' ,'lyrics', 'minimal', 'RESULT_VALUE']]
''' WRITE OUT TO CSV's '''
upload_dir = '../../data/processed/design/'
df.index.name = 'id'
pred_df.index.name = 'id'
df.to_csv(upload_dir + 'design_2.0' + '.csv')
pred_df.to_csv(upload_dir + 'design_3.0' + '.csv') | [
"andyjtrick@gmail.com"
] | andyjtrick@gmail.com |
fb76c4be1b0cf81a073e5af609eb98406eaa239c | fe528368f75a5123bd07e1337e57c11e042ee358 | /lesson4/problem4.py | c4b72c5d2e2630651adc5f44520b367f17b40c4e | [] | no_license | kailaaa/unit4-lesson4 | 7bda1ba128656b9b2e5602c9f83bf184381ecad4 | 2ba095024f16ac0269b338d94d647489d6971416 | refs/heads/master | 2020-04-22T05:55:54.419815 | 2019-02-11T17:37:05 | 2019-02-11T17:37:05 | 170,172,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | from turtle import *
boogie = Turtle()
noel = Turtle()
boogie.color("turquoise")
boogie.pensize(8)
boogie.speed(3)
boogie.turtlesize(4,4,4)
boogie.shape("turtle")
noel.color("red")
noel.pensize(8)
noel.speed(3)
noel.turtlesize(4,4,4)
noel.shape("turtle")
for x in range(3):
boogie.forward(80)
boogie.left(120)
noel.circle(50)
mainloop()
| [
"noreply@github.com"
] | kailaaa.noreply@github.com |
ab94d7bd5e5676a2761a12e1ed5b005ffe772cb9 | cb6cfbb5c44b47ca5cd153cb950462d6a59c9bf4 | /get-subcat.py | 74a506d0023f436df8f77517e8f2e421f1a2eeed | [] | no_license | ludwig/cat3 | 56d789ca14529a00a113738b782e64b2a25aeb04 | 54cb60b1ba6cc79906e7feab1f1231f36774d947 | refs/heads/master | 2020-05-24T11:05:08.260492 | 2017-03-19T17:08:15 | 2017-03-19T17:08:15 | 84,850,553 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python3
import sys
import json
import wiki
# Example: https://en.wikipedia.org/w/api.php?action=query&format=json&list=categorymembers&cmtype=subcat&cmtitle=Category:Anatomy
def main():
if len(sys.argv) < 2:
# NOTE: The category can be specified with either spaces or underscores.
sys.stderr.write("{0} CATEGORY\n".format(sys.argv[0]))
sys.exit(1)
category = sys.argv[1]
wiki_api = wiki.API('wikipedia')
#ganfyd_api = wiki.API('ganfyd')
#obj = wiki_api.query_category_subcat(category)
obj = wiki_api.get_category_subcategories(category)
json.dump(obj, sys.stdout, indent=2)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
| [
"luis.armendariz@gmail.com"
] | luis.armendariz@gmail.com |
947991d0f4f5393e08a326b1cd55967248d091f8 | 7a25f12d96ba6f1fa87f0618bd839186096bccb2 | /vision/template_matching.py | fc927e47f7810ab2cf7731353fd9ec2d8a5ba307 | [] | no_license | asack20/ME-184-AdvancedRobotics | aba6259d59a4395af3b687c2d88f2da8aac13ead | 90e525ac12a604dc7650a411c62808dacc96a7d7 | refs/heads/master | 2020-07-25T04:46:29.367413 | 2019-12-11T22:40:55 | 2019-12-11T22:40:55 | 208,168,193 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 15:40:12 2019
@author: Andrew
"""
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('training/wood/img103.jpg',0)
img2 = img.copy()
template = cv.imread('training/wood_Template/img002.jpg',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv.rectangle(img,top_left, bottom_right, 255, 2)
plt.figure()
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show() | [
"andrew.sack@tufts.edu"
] | andrew.sack@tufts.edu |
01e31b5def65ba66a0b5b8c58dd666c03742a49f | 00ed1eb9f4875be9c116eae90c850b4c5f0ebd4d | /tests/funcat/utils/test_yahoo.py | 8cf228d677cb84b693c54063b84d932589854b5c | [
"Apache-2.0"
] | permissive | pchaos/funcat2 | a64fbcfc5c1d7b6ed1356cd9558a2efabae90c0e | ff554cc134906a5a182fc31774488d62a839b314 | refs/heads/master | 2023-09-02T19:56:16.017728 | 2021-09-03T01:57:15 | 2021-09-03T01:57:15 | 356,155,099 | 12 | 5 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
import unittest
import warnings
from funcat.utils import save_sp500_tickers, get_data_from_yahoo
__updated__ = "2021-08-10"
class TestYahoo(unittest.TestCase):
"""Test case docstring."""
@classmethod
def setUpClass(cls):
super(TestYahoo, cls).setUpClass()
# 隐藏warning: ResourceWarning: Enable tracemalloc to get the object
# allocation traceback
warnings.simplefilter('ignore', ResourceWarning)
def setUp(self):
pass
def tearDown(self):
pass
def test_save_sp500_tickers(self):
sp500 = save_sp500_tickers()
self.assertTrue(len(sp500) >= 500,
f"返回长度不够{len(sp500)=}\n: {sp500=}")
print(f"{len(sp500)=}, {sp500=}")
def test_get_data_from_yahoo(self):
get_data_from_yahoo()
if __name__ == "__main__":
unittest.main()
| [
"drifthua@gmail.com"
] | drifthua@gmail.com |
d5fa36d59cc984969ce718738f359abd86ae766d | 5536c48b1d65a998edcd906ed04aafd47430359a | /music/urls.py | 9a3d34939e7b6f8de99b2d72a0a25c3d9e5ad718 | [] | no_license | Stilen/MyMusic | c29aeec4163a6a80f38edf26adca89566cd8b884 | ff4ed2b22e50de53d30f3abc2234921f80a805b0 | refs/heads/master | 2020-12-04T07:10:25.609352 | 2016-09-03T16:29:56 | 2016-09-03T16:29:56 | 67,233,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from django.conf.urls import url
from . import views
app_name = 'music'
urlpatterns = [
# /music/
url(r'^$', views.IndexView.as_view(), name='index'),
# /music/register/
url(r'^register/$', views.UserFormView.as_view(), name='register'),
# /music/album_id/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# /music/album/add
url(r'album/add/$', views.AlbumCreate.as_view(), name='album-add'),
# /music/album/id/
url(r'album/(?P<pk>[0-9]+)/$', views.AlbumUpdate.as_view(), name='album-update'),
# /music/album/id/delete
url(r'album/(?P<pk>[0-9]+)/delete$', views.AlbumDelete.as_view(), name='album-delete'),
]
| [
"Pedro Silva"
] | Pedro Silva |
f1c5d60ef3020dba050246f8b6a601a8e31a3732 | a53a4a0b4e8f3c617cbe979f74b8a7a71c4cd489 | /learning_logs/migrations/0001_initial.py | 1d27dd77adddeea19811e183d06ba4c30e3e636a | [] | no_license | Afra55/learning_log | af36cf72ce3931fa119360035cb92ab35d436a80 | 1ae5a174d284badbcd97b80012cadaf4632dedfc | refs/heads/master | 2021-05-06T02:36:19.794596 | 2017-12-27T15:20:51 | 2017-12-27T15:20:51 | 114,651,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Generated by Django 2.0 on 2017-12-18 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"1937525930@qq.com"
] | 1937525930@qq.com |
f70e536426fbc6618d6c2e9737ac42819cdfa80c | 8c5db66899c4dc428e699becce752a3fba03b302 | /opencv/face_detection_using_haarCascade.py | 508a1461e6b074262e3b0a935e1c33a9ffc3e32a | [
"MIT"
] | permissive | aliakbar09a/Practising_CV_using_opencv-python | 477280f14942408db37de937b43899c68a70b64d | ed797d4a29ae16aeec5f833c32a94b10cf92ffe7 | refs/heads/master | 2020-03-14T06:20:48.110610 | 2018-05-25T12:15:04 | 2018-05-25T12:15:04 | 131,482,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x + w, y + h), (0, 255, 255), 2)
roi_gray = gray[y:y+h, x:x+h]
roi_color = frame[y:y+h, x:x+h]
eyes = eye_cascade.detectMultiScale(roi_gray, 2.3, 10)
for(ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2)
smile = smile_cascade.detectMultiScale(roi_gray, 2.3, 10)
for(sx, sy, sw, sh) in smile:
cv2.rectangle(roi_color, (sx, sy), (sx + sw, sy + sh), (255, 0, 0), 2)
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"aliakbar09a@gmail.com"
] | aliakbar09a@gmail.com |
be8b873cdb97399f0932d2e2bf37c94e9760b317 | 6b51b7729812b5b7c06128feca32889c31e87ae3 | /runtests.py | 7479d9a69a1e7fcc12eb26398b946c3a4e3a1e46 | [
"MIT"
] | permissive | Krishnamurtyp/pytest-example-1 | 50ec8deb997ddd2eec2657fa32a190b9e50846d7 | 70d0ed2a69dc4631f85d8cec1c2c5617d7b24e78 | refs/heads/master | 2021-12-07T12:20:42.807914 | 2015-11-27T22:04:13 | 2015-11-27T22:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | #!/usr/bin/env python
import os
import re
import subprocess
html_prefix = "./html/"
idx_html = "<table>\n<tr><td>test</td><td>status</td></tr>\n"
for root, dirs, files in os.walk("tests"):
for fname in files:
if re.search("\\.py$", fname) is not None:
sname = os.path.join(root, fname)
html = html_prefix + sname + ".html"
cmd = "PYTHONPATH=. py.test --doctest-modules -v -l " + \
sname + " --html=" + html
code = subprocess.call(cmd, shell = True)
status = "<p style='color:green;'>Success</p>"
if code != 0:
status = "<p style='color:red;'>FAILURE</p>"
idx_html = idx_html + "<tr><td><a href='" + sname +\
".html'>" + sname + "</a></td><td>" + status +\
"</td></tr>\n"
idx_html = idx_html + "</table>\n"
with open(html_prefix + "index.html", "w") as f:
f.write(idx_html)
| [
"afiskon@gmail.com"
] | afiskon@gmail.com |
ebadb75044a2caaba7d6421c16390c9a2c51c683 | fa4f2c0e17d397ac5756bd13197847dda71dd5a1 | /sympy/matrices/expressions/tests/test_permutation.py | 9be3a82f3773008e6eb10070825467bde2930445 | [
"BSD-3-Clause"
] | permissive | MaanasVohra/sympy | 14911337e4d2a4250de71f449ffcaba66458f5e9 | f5af41f27a3f5542a76077176f2990d49922a817 | refs/heads/master | 2020-11-28T13:36:08.221923 | 2019-12-23T18:01:23 | 2019-12-23T18:01:23 | 229,828,941 | 1 | 0 | NOASSERTION | 2019-12-23T22:03:15 | 2019-12-23T22:03:14 | null | UTF-8 | Python | false | false | 5,585 | py | from sympy.combinatorics import Permutation, SymmetricGroup
from sympy.core.expr import unchanged
from sympy.matrices import Matrix
from sympy.matrices.expressions import \
MatMul, BlockDiagMatrix, Determinant, Inverse
from sympy.matrices.expressions.matexpr import \
MatrixSymbol, Identity, ZeroMatrix, OneMatrix
from sympy.matrices.expressions.permutation import \
MatrixPermute, PermutationMatrix
from sympy.utilities.pytest import raises
from sympy import Symbol
def test_PermutationMatrix_basic():
p = Permutation([1, 0])
assert unchanged(PermutationMatrix, p)
raises(ValueError, lambda: PermutationMatrix((0, 1, 2)))
assert PermutationMatrix(p).as_explicit() == Matrix([[0, 1], [1, 0]])
assert isinstance(PermutationMatrix(p) * MatrixSymbol('A', 2, 2), MatMul)
def test_PermutationMatrix_matmul():
p = Permutation([1, 2, 0])
P = PermutationMatrix(p)
M = Matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
assert (P * M).as_explicit() == P.as_explicit() * M
assert (M * P).as_explicit() == M * P.as_explicit()
P1 = PermutationMatrix(Permutation([1, 2, 0]))
P2 = PermutationMatrix(Permutation([2, 1, 0]))
P3 = PermutationMatrix(Permutation([1, 0, 2]))
assert P1 * P2 == P3
def test_PermutationMatrix_matpow():
p1 = Permutation([1, 2, 0])
P1 = PermutationMatrix(p1)
p2 = Permutation([2, 0, 1])
P2 = PermutationMatrix(p2)
assert P1**2 == P2
assert P1**3 == Identity(3)
def test_PermutationMatrix_identity():
p = Permutation([0, 1])
assert PermutationMatrix(p).is_Identity
p = Permutation([1, 0])
assert not PermutationMatrix(p).is_Identity
def test_PermutationMatrix_determinant():
P = PermutationMatrix(Permutation([0, 1, 2]))
assert Determinant(P).doit() == 1
P = PermutationMatrix(Permutation([0, 2, 1]))
assert Determinant(P).doit() == -1
P = PermutationMatrix(Permutation([2, 0, 1]))
assert Determinant(P).doit() == 1
def test_PermutationMatrix_inverse():
P = PermutationMatrix(Permutation(0, 1, 2))
assert Inverse(P).doit() == PermutationMatrix(Permutation(0, 2, 1))
def test_PermutationMatrix_rewrite_BlockDiagMatrix():
P = PermutationMatrix(Permutation([0, 1, 2, 3, 4, 5]))
P0 = PermutationMatrix(Permutation([0]))
assert P.rewrite(BlockDiagMatrix) == \
BlockDiagMatrix(P0, P0, P0, P0, P0, P0)
P = PermutationMatrix(Permutation([0, 1, 3, 2, 4, 5]))
P10 = PermutationMatrix(Permutation(0, 1))
assert P.rewrite(BlockDiagMatrix) == \
BlockDiagMatrix(P0, P0, P10, P0, P0)
P = PermutationMatrix(Permutation([1, 0, 3, 2, 5, 4]))
assert P.rewrite(BlockDiagMatrix) == \
BlockDiagMatrix(P10, P10, P10)
P = PermutationMatrix(Permutation([0, 4, 3, 2, 1, 5]))
P3210 = PermutationMatrix(Permutation([3, 2, 1, 0]))
assert P.rewrite(BlockDiagMatrix) == \
BlockDiagMatrix(P0, P3210, P0)
P = PermutationMatrix(Permutation([0, 4, 2, 3, 1, 5]))
P3120 = PermutationMatrix(Permutation([3, 1, 2, 0]))
assert P.rewrite(BlockDiagMatrix) == \
BlockDiagMatrix(P0, P3120, P0)
P = PermutationMatrix(Permutation(0, 3)(1, 4)(2, 5))
assert P.rewrite(BlockDiagMatrix) == BlockDiagMatrix(P)
def test_MartrixPermute_basic():
p = Permutation(0, 1)
P = PermutationMatrix(p)
A = MatrixSymbol('A', 2, 2)
raises(ValueError, lambda: MatrixPermute(Symbol('x'), p))
raises(ValueError, lambda: MatrixPermute(A, Symbol('x')))
assert MatrixPermute(A, P) == MatrixPermute(A, p)
raises(ValueError, lambda: MatrixPermute(A, p, 2))
pp = Permutation(0, 1, size=3)
assert MatrixPermute(A, pp) == MatrixPermute(A, p)
pp = Permutation(0, 1, 2)
raises(ValueError, lambda: MatrixPermute(A, pp))
def test_MatrixPermute_shape():
p = Permutation(0, 1)
A = MatrixSymbol('A', 2, 3)
assert MatrixPermute(A, p).shape == (2, 3)
def test_MatrixPermute_explicit():
p = Permutation(0, 1, 2)
A = MatrixSymbol('A', 3, 3)
AA = A.as_explicit()
assert MatrixPermute(A, p, 0).as_explicit() == \
AA.permute(p, orientation='rows')
assert MatrixPermute(A, p, 1).as_explicit() == \
AA.permute(p, orientation='cols')
def test_MatrixPermute_rewrite_MatMul():
p = Permutation(0, 1, 2)
A = MatrixSymbol('A', 3, 3)
assert MatrixPermute(A, p, 0).rewrite(MatMul).as_explicit() == \
MatrixPermute(A, p, 0).as_explicit()
assert MatrixPermute(A, p, 1).rewrite(MatMul).as_explicit() == \
MatrixPermute(A, p, 1).as_explicit()
def test_MatrixPermute_doit():
p = Permutation(0, 1, 2)
A = MatrixSymbol('A', 3, 3)
assert MatrixPermute(A, p).doit() == MatrixPermute(A, p)
p = Permutation(0, size=3)
A = MatrixSymbol('A', 3, 3)
assert MatrixPermute(A, p).doit().as_explicit() == \
MatrixPermute(A, p).as_explicit()
p = Permutation(0, 1, 2)
A = Identity(3)
assert MatrixPermute(A, p, 0).doit().as_explicit() == \
MatrixPermute(A, p, 0).as_explicit()
assert MatrixPermute(A, p, 1).doit().as_explicit() == \
MatrixPermute(A, p, 1).as_explicit()
A = ZeroMatrix(3, 3)
assert MatrixPermute(A, p).doit() == A
A = OneMatrix(3, 3)
assert MatrixPermute(A, p).doit() == A
A = MatrixSymbol('A', 4, 4)
p1 = Permutation(0, 1, 2, 3)
p2 = Permutation(0, 2, 3, 1)
expr = MatrixPermute(MatrixPermute(A, p1, 0), p2, 0)
assert expr.as_explicit() == expr.doit().as_explicit()
expr = MatrixPermute(MatrixPermute(A, p1, 1), p2, 1)
assert expr.as_explicit() == expr.doit().as_explicit()
| [
"sylee957@gmail.com"
] | sylee957@gmail.com |
323b2f19eecc771d3c9a96cedb3d48e2ed15d5d8 | f6d5a09f9de8a66e764d593171fd9f27235ea013 | /comic.py | cf4cbfa4d406e1a1c81c2633b351797ea78699b4 | [] | no_license | Popoola-Sinaayo/Python-Projects | bd443c49c970b2296488e0382aa78de295514d11 | 2a703dd2d36f34d1f8a4cae33e2246dc37c1fb8a | refs/heads/master | 2022-12-10T02:18:19.089516 | 2020-08-31T08:20:16 | 2020-08-31T08:20:16 | 291,574,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import requests
#res = requests.get("https://gateway.marvel.com")
#433/v1/public/characters?apikey=e14291a7ea40ad3a82bd126eba5b813fc068aa0")
#res0 = requests.get("https://www.google.com")
#ponse = res.json()
joke = requests.get("https://api.imgflip.com/get_memes")
jokes = joke.json()
koke = jokes["data"]["data.memes"]
print(jokes)
| [
"olusegunpopoola4real@gmail.com"
] | olusegunpopoola4real@gmail.com |
4533e7f4106a08d6c60606ae85e081356575fbf3 | 0c40e97b69dcd00f0b0b05f249d0fce448320fd8 | /test/functional/feature_segwit.py | 2d82afd598a56df99062522ce243f7c32495d618 | [
"MIT"
] | permissive | Arhipovladimir/Earthcoin | 9908912df9b10b97512c545b855c3670767039d9 | bc5b5ee538c76e7232e93434aedd8688bae70792 | refs/heads/main | 2023-07-16T05:50:52.755250 | 2021-08-25T09:19:40 | 2021-08-25T09:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,664 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Earthcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from test_framework.address import (
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import EarthcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes, hex_str_to_bytes, sync_blocks, try_rpc
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
class SegWitTest(EarthcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', wit_ids[NODE_2][WIT_V0][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', wit_ids[NODE_2][WIT_V1][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', p2sh_ids[NODE_2][WIT_V0][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', p2sh_ids[NODE_2][WIT_V1][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].getaddressinfo(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| [
"mail@deveac.com"
] | mail@deveac.com |
110f03f1ca08186fa594f660cabd56e1c35ab2e9 | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/network/azure-mgmt-network/generated_samples/private_link_service_delete.py | 97258f9c138bda212fe46732ae74449e11fcba41 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 1,518 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python private_link_service_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subId",
)
client.private_link_services.begin_delete(
resource_group_name="rg1",
service_name="testPls",
).result()
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/PrivateLinkServiceDelete.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | rdomenzain.noreply@github.com |
815555a0b9ba8d3eef9e459b9d19cd9f6e6e9305 | 824f831ce0921b3e364060710c9e531f53e52227 | /Leetcode/Python_Basics/02_C_Collection_OrderedDict.py | 4c7578f24feadc8a52aabb12cbb8fd63c8f4f69d | [] | no_license | adityakverma/Interview_Prepration | e854ff92c10d05bc2c82566ea797d2ce088de00a | d08a7f728c53943e9a27c33f8e4249633a69d1a6 | refs/heads/master | 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 948 | py |
# 8.3.6.1. OrderedDict Examples and Recipes
# OrderedDict Examples and Recipes
#
# Since an ordered dictionary remembers its insertion order, it can be used in
# conjunction with sorting to make a sorted dictionary:
# >>>
#
# >>> # regular unsorted dictionary
# >>> d = {'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}
#
# >>> # dictionary sorted by key
# >>> OrderedDict(sorted(d.items(), key=lambda t: t[0]))
# OrderedDict([('apple', 4), ('banana', 3), ('orange', 2), ('pear', 1)])
#
# >>> # dictionary sorted by value
# >>> OrderedDict(sorted(d.items(), key=lambda t: t[1]))
# OrderedDict([('pear', 1), ('orange', 2), ('banana', 3), ('apple', 4)])
#
# >>> # dictionary sorted by length of the key string
# >>> OrderedDict(sorted(d.items(), key=lambda t: len(t[0])))
# OrderedDict([('pear', 1), ('apple', 4), ('orange', 2), ('banana', 3)])
# ------------------------------------------------------------------------------ | [
"noreply@github.com"
] | adityakverma.noreply@github.com |
a74ea7563849fb0b7db1fc75e082d1c42015a4ea | 91dc3061601924efb35d0fa4faf3e87ddc68ae91 | /lab/pytest/pytest-tut/test_calculate_stat.py | ec5ae6ad72be550d5a9b0240640cc554c0cf6061 | [
"MIT"
] | permissive | MaxIsWell42/SPD-refactoring-challenges | 6a189882ddbf87dd8dd21ba362bfd8b6e4846d37 | ef2a8a13cb1fcd412e2fb0035bb1cf76527b0dc9 | refs/heads/master | 2023-03-14T02:13:14.011597 | 2021-03-02T18:36:30 | 2021-03-02T18:36:30 | 339,496,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | # Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
# Refactored.
import math
def display_grade_stat():
"""Gathers stats and print them out."""
grade_list = read_input()
# Calculate the mean and standard deviation of the grades
mean, standard_deviation = calculate_stat(grade_list)
# print out the mean and standard deviation in a nice format.
print_stat(mean, standard_deviation)
def read_input():
"""Get the inputs from the user."""
grade_list = []
n_student = 5
for _ in range(0, n_student):
grade_list.append(int(input('Enter a number: ')))
return grade_list
def calculate_stat(grade_list):
"""Calculate the mean and standard deviation of the grades."""
total = 0
for grade in grade_list:
total = total + grade
mean = total / len(grade_list)
sum_of_sqrs = 0
for grade in grade_list:
sum_of_sqrs += (grade - mean) ** 2
sd = math.sqrt(sum_of_sqrs / len(grade_list)) # standard deviation
return mean, sd
def print_stat(mean, sd):
"""print out the mean and standard deviation in a nice format."""
print('****** Grade Statistics ******')
print("The grades's mean is:", mean)
print('The population standard deviation of grades is: ', round(sd, 3))
print('****** END ******')
display_grade_stat()
def test_calculate_stat():
grade_list = [60, 100, 80, 75, 95]
assert calculate_stat(grade_list) == 82, 14.352700094407
| [
"maxfin13@gmail.com"
] | maxfin13@gmail.com |
d59271d0be300afe258e06412fce1d17daee84a4 | f7e592ac06e642b0c1db6779db919b136fea09cf | /blog/views.py | 8756ab2aabac1d3a317a9d316643010cc2a91e08 | [] | no_license | xero7689/myDjangoBlog | c2ccf02b64b452ad162b24a4a43dc900ba4d18be | 9507e2fad44955521b97160d9456be30a623cbe9 | refs/heads/master | 2016-09-15T17:27:38.455029 | 2014-06-09T12:15:29 | 2014-06-09T12:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,190 | py | from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from blog.models import Post, postTag
from work.models import Image
from django.utils import timezone
from django.views.generic.dates import YearArchiveView, MonthArchiveView
from django.views.generic import ListView
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from blog2.settings import MEDIA_URL
from blog.forms import ContactForm
from django.views.generic.edit import FormView
from calendar import month_name
allTags = postTag.objects.all()
allPubPosts = Post.objects.filter(published=True)
#Current Post
current_year = timezone.now().year
current_month = timezone.now().month
current_posts = Post.objects.filter(created__year=current_year, created__month=current_month)
#User Method
def get_archive_dict():
archive_dict = {}
for post in allPubPosts:
if post.created.year not in archive_dict:
archive_dict[post.created.year] = []
if post.created.month not in archive_dict[post.created.year]:
archive_dict[post.created.year].append(post.created.month)
return archive_dict
archive = get_archive_dict()
# Create your views here.
def index(request):
return render(request, 'blog/index3.html', {'allPubPosts': allPubPosts, 'allTags': allTags, 'current_posts': current_posts, 'archive': archive})
def post(request, slug):
post = get_object_or_404(Post, slug=slug)
postTags = post.tags.all()
return render(request, 'blog/post3.html', {'post':post, 'postTags':postTags, 'allTags':allTags,'current_posts': current_posts, 'archive': archive})
def tag(request, tagName):
tag = get_object_or_404(postTag, tagName=tagName)
tagPost = tag.post_tags.all()
tagName = tag.tagName
return render(request, 'blog/tag3.html', {'tagPost': tagPost, 'tagName': tagName,'allTags': allTags, 'current_posts': current_posts, 'archive': archive})
class tagList(ListView):
model = postTag
template_name = "blog/taglist.html"
def about(request):
return render(request, 'blog/about.html')
class work(ListView):
model = Image
image = Image.objects.all()
template_name = "work/work.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name, {"image": self.image,"media_url": MEDIA_URL})
class archiveList(ListView):
model = Post
template_name = "blog/archive.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name, {"archive": archive, "allPubPosts": allPubPosts})
class ContactView(FormView):
template_name = "blog/contact.html"
form_class = ContactForm
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/contactSuccess/')
return render(request, self.template_name, {'form': form})
def ContactSuccess(request):
return render(request, 'blog/contactSuccess.html')
class ArticleYearArchiveView(YearArchiveView):
queryset = Post.objects.all()
date_field = "created"
make_object_list = True
allow_future = True
class ArticleMonthArchiveView(MonthArchiveView):
queryset = Post.objects.all()
date_field = "created"
make_object_list = True
allow_future = True
| [
"volleyp7689@gmail.com"
] | volleyp7689@gmail.com |
b8ba4c0311d5dd7238535ad3b2ce2e24d54e21ec | 870932f5edf527b643db7bf63b42f873d9adffc6 | /p4-smartcab/smartcab/q_table.py | c6aa1759cccddcbe977fb83b67e2911231736415 | [] | no_license | aalhour/udacity-machine-learning-nanodegree | 4342229c7212ef48d6d919c3c6ea759547d88d6f | 45285374fd30848552cbe331d2376a6358dd906c | refs/heads/master | 2021-01-20T23:36:42.197748 | 2018-01-17T11:30:45 | 2018-01-17T11:30:45 | 61,887,809 | 0 | 1 | null | 2018-01-17T11:30:46 | 2016-06-24T13:53:54 | Jupyter Notebook | UTF-8 | Python | false | false | 3,857 | py | # Given a state, figure out which action to take
# best_action_given_state
# Take an action, get reward
# Update entry for old state-action
# TODO: refactor by extracting value object StateActionMapper
import json
class QTable():
def __init__(self, alpha=1.00, gamma=0.5):
self._alpha = alpha
self._gamma = gamma
self.__initialize_table()
def get_value_at(self,
light=None,
left=None,
oncoming=None,
next_waypoint=None,
action=None):
return self._table[self.__state_action(light=light,
left=left,
oncoming=oncoming,
next_waypoint=next_waypoint,
action=action)]
def set_value_at(self, light=None, next_waypoint=None, left=None, oncoming=None, action=None, new_value=0.0):
self._table[self.__state_action(light=light,
left=left,
oncoming=oncoming,
next_waypoint=next_waypoint,
action=action)] = new_value
def alpha(self):
return self._alpha
def gamma(self):
return self._gamma
def max_q(self, light=None, next_waypoint=None, left=None, oncoming=None):
values = []
for action in ['forward', 'left', 'right', None]:
values.append(self.get_value_at(light=light,
next_waypoint=next_waypoint,
left=left,
oncoming=oncoming,
action=action))
return max(values)
def best_action(self, light=None, next_waypoint=None, left=None, oncoming=None):
go_to_next_waypoint = self.get_value_at(light=light,
next_waypoint=next_waypoint,
left=left,
oncoming=oncoming,
action=next_waypoint)
do_nothing = self.get_value_at(light=light,
next_waypoint=next_waypoint,
left=left,
oncoming=oncoming,
action=None)
if go_to_next_waypoint >= do_nothing:
return next_waypoint
else:
return None
def update(self, light=None, next_waypoint=None, left=None, oncoming=None, action=None, reward=0.0):
old_value = self.__value_at(light=light,
next_waypoint=next_waypoint,
left=left,
oncoming=oncoming,
action=action)
new_value = old_value * (1 - self._alpha) + self._alpha * (reward + self._gamma * old_value)
self.__set_value_at(light=light,
next_waypoint=next_waypoint,
left=left,
oncoming=oncoming,
action=action,
new_value=new_value)
# print "After update"
# print json.dumps(self._table, indent=4)
# import pdb; pdb.set_trace()
def __next_waypoint(self, light, next_waypoint):
return self._table['light'][light]['next_waypoint'][next_waypoint]
def __state_action(self, light=None, left=None, oncoming=None, next_waypoint=None, action=None):
return "{}-{}-{}-{}-{}".format(str(light), str(left), str(oncoming), str(next_waypoint), str(action))
def __initialize_table(self):
self._table = {}
for light in ['red', 'green']:
for left in ['forward', 'left', 'right', 'None']:
for oncoming in ['forward', 'left', 'right', 'None']:
for next_waypoint in ['forward', 'left', 'right', 'None']:
for action in ['forward', 'left', 'right', 'None']:
self._table[self.__state_action(light=light,
left=left,
oncoming=oncoming,
next_waypoint=next_waypoint,
action=action)] = 0.0
| [
"edderic@gmail.com"
] | edderic@gmail.com |
da26f0c6745f08a990a23ec69c9a44cd5661d825 | 3e91e2ec469a6eaf47986c8f2d42e417d77df181 | /unregisteredCustomer.py | 1af94dcdbd65e35f0d867df9fcb62b62b5207bde | [] | no_license | Kowenjko/Python_Homework_15_Kowenjko | cafa799822c078dd0546b4cd8a900d1c6aae6f7d | 968022726bb17bffa268e19edfe3dc7028dc108d | refs/heads/master | 2023-06-20T14:09:10.005936 | 2021-07-25T07:39:17 | 2021-07-25T07:39:17 | 389,289,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | import psycopg2
from settings import *
from connection import Connection
class UnregisteredCuctomer(Connection):
# Реєструємо customer
def register_self(self, first_name, last_name, city, login, password):
if self._register(login, password, 'customer'):
table = 'customer'
data = [{
'city_id': city,
'first_name': first_name,
'last_name': last_name,
'reg_id': self._getNextId('login')-1,
}]
result = self._postData(table, data)
else:
result = 'Login is exist!'
return result
# Виводимо інформацію про продукт
def get_product_info(self, category='', selector='',):
"""
category must be one of the item from the list:
['product_name','country_name', 'category_name']
"""
categoryes = ['product_name', 'country_name', 'category_name']
table = ('product p',)
fields = (
"""p.id, p.product_name ,p.unit_price,c.country_name,pc.category_name """,)
fieldNames = ["id", "product_name", "unit_price",
"country_name", "category_name"]
if category and category in categoryes and selector:
where = f"""where {category} = '{selector}'"""
else:
where = ''
selector = f""" inner join country c on c.id =p.country_id
inner join product_category pc on pc.id =p.product_catagery_id {where}"""
result = self._getData(table, fields, selector)
changeRes = []
for item in result:
cort = {}
for index, element in enumerate(item):
cort[fieldNames[index]] = element
changeRes.append(cort)
return changeRes
| [
"OrtosWT@gmail.com"
] | OrtosWT@gmail.com |
c80b70662baa200840e1c1155e06769053238ef2 | 9a28a82f6c3b9ccff3f7a7c0d157dc6ddf0af232 | /User/views.py | 2935cec4a0129b814ee248bd75b1afe4ee8c140f | [
"MIT"
] | permissive | KarryBanana/ckr-zfy | df30811ddfee379908a5dcd4dede3b8cf8df1e9e | 926160fc23e18b10189cd17861b81448b770c50d | refs/heads/master | 2022-12-01T23:49:22.645046 | 2020-08-19T16:28:47 | 2020-08-19T16:28:47 | 288,340,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,338 | py |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.hashers import check_password, make_password
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Profile
from django.contrib.auth.models import User
from django.core import serializers
from django.db.models import Q
# Create your views here.
# 主页面
def index(request):
return JsonResponse("主页", safe=False)
# 用户登录
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
login(request, user)
response = {'info': "Login Success!", 'userID': user.id, 'username': username}
return JsonResponse(response)
else:
return JsonResponse("账号或密码输入有误。请重新输入!", safe=False)
else:
return JsonResponse("Invalid method", safe=False)
# 用户登出
def user_logout(request):
logout(request)
return JsonResponse("Logout Successfully!", safe=False)
# 用户注册
def user_register(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('password2')
if password != password2:
return JsonResponse("两次密码不一致,请重新填写!", safe=False)
else:
try:
exist = User.objects.get(Q(username=username))
return JsonResponse("用户名已存在!", safe=False)
except User.DoesNotExist:
User.objects.create_user(username=username, password=password)
response = {'info': "成功", 'usernameList': username}
return JsonResponse(response)
else:
return JsonResponse("Invalid method", safe=False)
def get_usernamelist(request):
if request.method == 'POST':
ret = []
users = User.objects.all()
for user in users:
ret.append(user.username)
response = {'usernameList': ret}
return JsonResponse(response)
else:
return JsonResponse("Invalid response", safe=False)
def profile(request, id):
tmp = {}
ret = []
user = User.objects.get(id=id)
if Profile.objects.filter(user_id=id).exists():
profile = Profile.objects.get(user_id=id)
print("get profile")
else:
profile = Profile.objects.create(user=user)
print("create profile!")
users = User.objects.all()
namelist = []
for x in users:
namelist.append(x.username)
tmp['usernameList'] = namelist
tmp['id'] = user.id
tmp['username'] = user.username
tmp['age'] = profile.age
tmp['hobby'] = profile.hobby
tmp['introduction'] = profile.introduction
tmp['gender'] = profile.gender
tmp['email'] = user.email
tmp['phone'] = profile.phone
ret.append(tmp)
return JsonResponse(ret, safe=False)
# @login_required(login_url='/User/login/')
def profile_edit(request, id):
user = User.objects.get(id=id)
# user_id 是 OneToOneField 自动生成的字段
if Profile.objects.filter(user_id=id).exists():
profile = Profile.objects.get(user_id=id)
else:
profile = Profile.objects.create(user=user)
# asker_id = request.POST.get('id')
# print(asker_id)
# asker = User.objects.get(id=asker_id)
if request.method == 'POST':
# 验证用户是否是本人
# if asker != user:
# ret = []
# ret.append(user)
# retUser = serializers.serialize("json", ret)
# response = {'user': retUser}
# return JsonResponse(response)
# profile_form = ProfileForm(request.POST, request.FILES)
# if profile_form.is_valid():
# 取得清洗后的合法数据
# else:
# uname = request.POST.get('username') 先不管username!
phone = request.POST.get('phone')
intro = request.POST.get('introduction')
gender = request.POST.get('gender')
hobby = request.POST.get('hobby')
age = request.POST.get('age')
email = request.POST.get('email')
profile.phone = phone
profile.introduction = intro
profile.age = age
profile.gender = gender
profile.hobby = hobby
# user.username = uname
user.email = email
# 如果图片存在FILES中
# if 'img' in request.FILES:
# profile.img = data["img"]
profile.save()
user.save()
print(profile.introduction)
print(user.email)
return JsonResponse("成功", safe=False)
def change_password(request, id):
user = User.objects.get(id=id)
old_pwd = request.POST.get('formerPwd')
new_pwd = request.POST.get('newPwd')
if check_password(old_pwd, user.password):
user.password = make_password(new_pwd)
user.save()
return JsonResponse("success", safe=False)
else:
return JsonResponse("failed", safe=False)
| [
"bill881@126.com"
] | bill881@126.com |
dcc88e38c7a728e2513325e8b7fea3dc04f5b694 | aaa55b54be6a2c52b1e1ea3f36ef25d0e9b6fd53 | /quotes_multiple_items.py | 46c2fd738ebb7f05e34da9c392f604da56b7b00e | [] | no_license | fairlyoddparents/Scrapy-Tutorials-My-Answers- | 312f3c373ae06cf4855f2db74dbc5d411180a2b8 | ba745057b55d837e7d2e22459725075cd9d7c8c4 | refs/heads/master | 2022-12-15T12:54:12.345895 | 2020-09-19T06:46:25 | 2020-09-19T06:46:25 | 296,803,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # -*- coding: utf-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['toscrape.com']
start_urls = ['http://quotes.toscrape.com']
def parse(self, response):
self.log('I just visited: ' + response.url)
#I cannot use for quote in response.css('div.quote').extract()
#because with extract() it returns a string and strings
#have no css so the formulas in item = {} wouldn't work
for quote in response.css('div.quote'):
item = {'author_name': quote.css('small.author::text').extract_first(),
'text': quote.css('span.text::text').extract_first(),
'tags': quote.css('a.tag::text').extract()}
yield item
#follow pagination
next_page_url = response.css('li.next > a::attr(href)').extract_first()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url=next_page_url, callback=self.parse)
| [
"noreply@github.com"
] | fairlyoddparents.noreply@github.com |
ee174d0cc480a106ac3d73263d219cb9230f19cc | b9f98b763ca714908ece20e03075659512e1fe53 | /4th Lab/tttlib+.py | 694e00f34a5034ae42ebcc5c7df7bbd4dc5983c7 | [] | no_license | TheozZeng/CSC180 | 83e8ad1ecfcdca39ab822e67657be57026197786 | 45e4fa42524620dae73fc6d3d978c32b0b91518e | refs/heads/master | 2022-03-06T22:09:43.608914 | 2019-08-20T10:49:04 | 2019-08-20T10:49:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,526 | py | class ttt:
T=[0,0,0,0,0,0,0,0,0]
#printBoard
def printBoard(self):
L=[0,1,2,3,4,5,6,7,8]
for i in range(0,9):
if(len(T)!=9)or((T[i]!=1)and(T[i]!=2)and(T[i]!=0)):
return(False)
else:
if T[i]==0:
L[i]=i
elif T[i]==1:
L[i]='X'
elif T[i]==2:
L[i]='O'
print (L[0],'|',L[1],'|',L[2])
print ('---|---|---')
print (L[3],'|',L[4],'|',L[5])
print ('---|---|---')
print (L[6],'|',L[7],'|',L[8])
return(True)
#analyzeBoard(T)
def analyzeBoard(self):
for i in range(0,9):
if(len(T)!=9)or((T[i]!=1)and(T[i]!=2)and(T[i]!=0)):
return(-1)
else:
if(T[0]==T[1]==T[2]==1)or(T[3]==T[4]==T[5]==1)or(T[6]==T[7]==T[8]==1)or(T[0]==T[3]==T[6]==1)or(T[1]==T[4]==T[7]==1)or(T[2]==T[5]==T[8]==1)or(T[0]==T[4]==T[8]==1)or(T[6]==T[4]==T[2]==1):
return(1)
if(T[0]==T[1]==T[2]==2)or(T[3]==T[4]==T[5]==2)or(T[6]==T[7]==T[8]==2)or(T[0]==T[3]==T[6]==2)or(T[1]==T[4]==T[7]==2)or(T[2]==T[5]==T[8]==2)or(T[0]==T[4]==T[8]==2)or(T[6]==T[4]==T[2]==2):
return(2)
else:
if(T[0]!=0)and(T[1]!=0)and(T[2]!=0)and(T[3]!=0)and(T[4]!=0)and(T[5]!=0)and(T[6]!=0)and(T[7]!=0)and(T[8]!=0):
return(3)
else:
return(0)
#Random Move
def genRandomMove(self,player):
for i in range(0,9):
if(len(T)!=9)or((T[i]!=1)and(T[i]!=2)and(T[i]!=0)):
return(-1)
from random import randint
Random_choose=randint(0,8)
if(T[int(Random_choose)]!='1')and(T[int(Random_choose)]!='2'):
return(Random_choose)
else:
return(-1)
#Wining Move
def genWinningMove(self,player):
for i in range(0,9):
if(len(T)!=9)or((T[i]!=1)and(T[i]!=2)and(T[i]!=0)):
return(-1)
if((player!=1)and(player!=2)):
return(-1)
else:
if(player==1):
if((T[0]==T[1]==1)and(T[2]==0)):
return(2)
if((T[0]==T[2]==1)and(T[1]==0)):
return(1)
if((T[1]==T[2]==1)and(T[0]==0)):
return(0)
if((T[3]==T[4]==1)and(T[5]==0)):
return(5)
if((T[3]==T[5]==1)and(T[4]==0)):
return(4)
if((T[4]==T[5]==1)and(T[3]==0)):
return(3)
if((T[6]==T[7]==1)and(T[8]==0)):
return(8)
if((T[6]==T[8]==1)and(T[7]==0)):
return(7)
if((T[7]==T[8]==1)and(T[6]==0)):
return(6)
if((T[0]==T[3]==1)and(T[6]==0)):
return(6)
if((T[0]==T[6]==1)and(T[3]==0)):
return(3)
if((T[6]==T[3]==1)and(T[0]==0)):
return(0)
if((T[1]==T[4]==1)and(T[7]==0)):
return(7)
if((T[1]==T[7]==1)and(T[4]==0)):
return(4)
if((T[4]==T[7]==1)and(T[1]==0)):
return(1)
if((T[2]==T[5]==1)and(T[8]==0)):
return(8)
if((T[2]==T[8]==1)and(T[5]==0)):
return(5)
if((T[5]==T[8]==1)and(T[2]==0)):
return(2)
if((T[0]==T[4]==1)and(T[8]==0)):
return(8)
if((T[0]==T[8]==1)and(T[4]==0)):
return(4)
if((T[4]==T[8]==1)and(T[0]==0)):
return(0)
if((T[6]==T[4]==1)and(T[2]==0)):
return(2)
if((T[6]==T[2]==1)and(T[4]==0)):
return(4)
if((T[4]==T[2]==1)and(T[6]==0)):
return(6)
if(player==2):
if((T[0]==T[1]==2)and(T[2]==0)):
return(2)
if((T[0]==T[2]==2)and(T[1]==0)):
return(1)
if((T[1]==T[2]==2)and(T[0]==0)):
return(0)
if((T[3]==T[4]==2)and(T[5]==0)):
return(5)
if((T[3]==T[5]==2)and(T[4]==0)):
return(4)
if((T[4]==T[5]==2)and(T[3]==0)):
return(3)
if((T[6]==T[7]==2)and(T[8]==0)):
return(8)
if((T[6]==T[8]==2)and(T[7]==0)):
return(7)
if((T[7]==T[8]==2)and(T[6]==0)):
return(6)
if((T[0]==T[3]==2)and(T[6]==0)):
return(6)
if((T[0]==T[6]==2)and(T[3]==0)):
return(3)
if((T[6]==T[3]==2)and(T[0]==0)):
return(0)
if((T[1]==T[4]==2)and(T[7]==0)):
return(7)
if((T[1]==T[7]==2)and(T[4]==0)):
return(4)
if((T[4]==T[7]==2)and(T[1]==0)):
return(1)
if((T[2]==T[5]==2)and(T[8]==0)):
return(8)
if((T[2]==T[8]==2)and(T[5]==0)):
return(5)
if((T[5]==T[8]==2)and(T[2]==0)):
return(2)
if((T[0]==T[4]==2)and(T[8]==0)):
return(8)
if((T[0]==T[8]==2)and(T[4]==0)):
return(4)
if((T[4]==T[8]==2)and(T[0]==0)):
return(0)
if((T[6]==T[4]==2)and(T[2]==0)):
return(2)
if((T[6]==T[2]==2)and(T[4]==0)):
return(4)
if((T[4]==T[2]==2)and(T[6]==0)):
return(6)
#NonLoser Move
def genNonLoser(self,player):
for i in range(0,9):
if(len(T)!=9)or((T[i]!=1)and(T[i]!=2)and(T[i]!=0)):
return(-1)
if((player!=1)and(player!=2)):
return(-1)
else:
if(player==2):
if((T[0]==T[1]==1)and(T[2]==0)):
return(2)
if((T[0]==T[2]==1)and(T[1]==0)):
return(1)
if((T[1]==T[2]==1)and(T[0]==0)):
return(0)
if((T[3]==T[4]==1)and(T[5]==0)):
return(5)
if((T[3]==T[5]==1)and(T[4]==0)):
return(4)
if((T[4]==T[5]==1)and(T[3]==0)):
return(3)
if((T[6]==T[7]==1)and(T[8]==0)):
return(8)
if((T[6]==T[8]==1)and(T[7]==0)):
return(7)
if((T[7]==T[8]==1)and(T[6]==0)):
return(6)
if((T[0]==T[3]==1)and(T[6]==0)):
return(6)
if((T[0]==T[6]==1)and(T[3]==0)):
return(3)
if((T[6]==T[3]==1)and(T[0]==0)):
return(0)
if((T[1]==T[4]==1)and(T[7]==0)):
return(7)
if((T[1]==T[7]==1)and(T[4]==0)):
return(4)
if((T[4]==T[7]==1)and(T[1]==0)):
return(1)
if((T[2]==T[5]==1)and(T[8]==0)):
return(8)
if((T[2]==T[8]==1)and(T[5]==0)):
return(5)
if((T[5]==T[8]==1)and(T[2]==0)):
return(2)
if((T[0]==T[4]==1)and(T[8]==0)):
return(8)
if((T[0]==T[8]==1)and(T[4]==0)):
return(4)
if((T[4]==T[8]==1)and(T[0]==0)):
return(0)
if((T[6]==T[4]==1)and(T[2]==0)):
return(2)
if((T[6]==T[2]==1)and(T[4]==0)):
return(4)
if((T[4]==T[2]==1)and(T[6]==0)):
return(6)
if(player==1):
if((T[0]==T[1]==2)and(T[2]==0)):
return(2)
if((T[0]==T[2]==2)and(T[1]==0)):
return(1)
if((T[1]==T[2]==2)and(T[0]==0)):
return(0)
if((T[3]==T[4]==2)and(T[5]==0)):
return(5)
if((T[3]==T[5]==2)and(T[4]==0)):
return(4)
if((T[4]==T[5]==2)and(T[3]==0)):
return(3)
if((T[6]==T[7]==2)and(T[8]==0)):
return(8)
if((T[6]==T[8]==2)and(T[7]==0)):
return(7)
if((T[7]==T[8]==2)and(T[6]==0)):
return(6)
if((T[0]==T[3]==2)and(T[6]==0)):
return(6)
if((T[0]==T[6]==2)and(T[3]==0)):
return(3)
if((T[6]==T[3]==2)and(T[0]==0)):
return(0)
if((T[1]==T[4]==2)and(T[7]==0)):
return(7)
if((T[1]==T[7]==2)and(T[4]==0)):
return(4)
if((T[4]==T[7]==2)and(T[1]==0)):
return(1)
if((T[2]==T[5]==2)and(T[8]==0)):
return(8)
if((T[2]==T[8]==2)and(T[5]==0)):
return(5)
if((T[5]==T[8]==2)and(T[2]==0)):
return(2)
if((T[0]==T[4]==2)and(T[8]==0)):
return(8)
if((T[0]==T[8]==2)and(T[4]==0)):
return(4)
if((T[4]==T[8]==2)and(T[0]==0)):
return(0)
if((T[6]==T[4]==2)and(T[2]==0)):
return(2)
if((T[6]==T[2]==2)and(T[4]==0)):
return(4)
if((T[4]==T[2]==2)and(T[6]==0)):
return(6)
#Move
def Move(self,x,player):
if(T[x]==0):
self.T[x] = player
return True
else:
return False
| [
"noreply@github.com"
] | TheozZeng.noreply@github.com |
29cebf2f85aaf258c124b6e61c0068e22a3be8d3 | 7ed1f496f9d4a936c8d07192f625e930018490f6 | /wrapped_qt.py | 262ccc8c1934386209f891a7e785252c0f3f760b | [
"MIT"
] | permissive | akashiro000/Superslick | ac28af5f97d81a139a062b5071cf55457b861398 | f7a9827cbc974522e2ac2bee330c1e43ca0e9e15 | refs/heads/master | 2021-03-10T15:21:17.245284 | 2020-03-15T20:40:25 | 2020-03-15T20:40:25 | 246,463,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # coding: utf-8
from PySide2 import QtWidgets, QtGui, QtCore
class QIconLabel(QtWidgets.QLabel):
COLOR = (30, 30, 30)
OPACITY = 255 / 2
CONSTRUCTED = False
clicked = QtCore.Signal(QtGui.QMouseEvent)
@classmethod
def _construct(self):
if self.CONSTRUCTED is False:
self.overlay_pix = QtGui.QPixmap(QtCore.QSize(1, 1))
self.overlay_pix.fill(QtGui.QColor(*self.COLOR, self.OPACITY))
self.CONSTRUCTED = True
def mousePressEvent(self, ev):
self.clicked.emit(ev)
def __init__(self, filename: str, size=(128, 128), overlay=False, *args, **kwargs):
super(QIconLabel, self).__init__(*args, **kwargs)
self._construct()
self.filename = filename
self.size = size
self.overlay = overlay
self.draw()
def set_overlay(self, value=True):
self.overlay = value
self.draw()
def draw(self):
_pixmap = QtGui.QPixmap(self.filename)
pixmap = _pixmap.scaled(*self.size, QtCore.Qt.KeepAspectRatio)
if self.overlay:
painter = QtGui.QPainter(pixmap)
painter.drawPixmap(
0, 0,
self.overlay_pix.scaled(*self.size, QtCore.Qt.KeepAspectRatio),
)
painter.end()
self.setPixmap(pixmap)
| [
"Takumi.Akashiro000@gmail.com"
] | Takumi.Akashiro000@gmail.com |
601b7e3a344d8fd631ff88dae9299ee5af5140c9 | d6ef240cc3b0babd0a96d51b4a54477f36725f4b | /setup.py | 19acbbaf660b7bd84e3d37dfcaa4adfe953dafef | [] | no_license | alorenzo175/grib2nc | bc972b2cd4e523947533b02628d53311f66e9ea8 | ee6a54c17a4acaec059a97de1102763bdade22fd | refs/heads/master | 2021-01-18T14:05:44.314776 | 2016-02-14T18:38:40 | 2016-02-14T18:38:40 | 25,052,295 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
raise RuntimeError('setuptools is required')
import versioneer
PACKAGE = 'grib2nc'
if sys.version_info[:2] < (2, 7):
sys.exit('%s requires Python 2.7 or higher.' % PACKAGE)
requirements = ['numpy', 'pandas', 'pygrib', 'netCDF4', 'setuptools']
if sys.version_info[:2] < (3, 2):
requirements.append('futures')
SHORT_DESC = 'Convert grib2 files to netCDF format'
AUTHOR = 'Tony Lorenzo'
MAINTAINER_EMAIL = 'alorenzo175@gmail.com'
URL = 'https://github.com/alorenzo175/grib2nc'
setup(
name=PACKAGE,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=SHORT_DESC,
author=AUTHOR,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
data_files=[('.', ['./settings.txt'])],
scripts=[os.path.join('scripts', s) for s in os.listdir('scripts')])
| [
"alorenzo175@users.noreply.github.com"
] | alorenzo175@users.noreply.github.com |
2d2395a8709438a1b0590fef8e3b2f746722f1e2 | b9702abaddfecfe045f3a03d9a3100f42b45d4bd | /flask-example.py | 74f6125e2081a7c8f4d39ab23637a49a5f513755 | [] | no_license | CachingFoX/j-test | fa1cbee9afe7de7bbedb56b894ffde572fbe1405 | a15f41ed4e6a2b0f22d326f93e4b8979b43f6c23 | refs/heads/master | 2021-03-05T21:54:35.406154 | 2020-03-09T23:20:43 | 2020-03-09T23:20:43 | 246,155,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | CachingFoX.noreply@github.com |
30debcb3894b2bc2521cb65ad73c0f8d57f19288 | 8952661cf90c1c6e65e792b0499b8183f0fcb3f5 | /Browser.py | 76b26658ac39d728061c50bcd408dc7d09a22ac7 | [] | no_license | 527088995/s-analysetooldemo | 46c9c9ddb5bd9e6f654eb29f8194435d07b2033a | ccd511c688cabb17c65de60c5cc39aa27831b92e | refs/heads/master | 2022-12-17T21:51:34.376473 | 2020-09-15T07:13:33 | 2020-09-15T07:13:33 | 295,623,705 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,054 | py | from selenium import webdriver
from bs4 import BeautifulSoup
from SearchEngine import EngineConfManage
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import hashlib
import time
import xlwt
class Browser:
def __init__(self,conf):
self.browser=webdriver.Chrome()
self.conf=conf
self.conf['kw']=''
self.engine_conf=EngineConfManage().get_Engine_conf(conf['engine']).get_conf()
#搜索内容设置
def set_kw(self,kw):
self.conf['kw']=kw
#搜索内容写入到搜素引擎中
def send_keyword(self):
input = self.browser.find_element_by_id(self.engine_conf['searchTextID'])
input.send_keys(self.conf['kw'])
#搜索框点击
def click_search_btn(self):
search_btn = self.browser.find_element_by_id(self.engine_conf['searchBtnID'])
search_btn.click()
#获取搜索结果与文本
def get_search_res_url(self):
res_link={}
WebDriverWait(self.browser,timeout=30,poll_frequency=1).until(EC.presence_of_element_located((By.ID, "page")))
#内容通过 BeautifulSoup 解析
content=self.browser.page_source
soup = BeautifulSoup(content, "html.parser")
search_res_list=soup.select('.'+self.engine_conf['searchContentHref_class'])
while len(res_link)<self.conf['target_page']:
for el in search_res_list:
js = 'window.open("'+el.a['href']+'")'
self.browser.execute_script(js)
handle_this=self.browser.current_window_handle #获取当前句柄
handle_all=self.browser.window_handles #获取所有句柄
handle_exchange=None #要切换的句柄
for handle in handle_all: #不匹配为新句柄
if handle != handle_this: #不等于当前句柄就交换
handle_exchange = handle
self.browser.switch_to.window(handle_exchange) #切换
real_url=self.browser.current_url
if real_url in self.conf['white_list']: #白名单
continue
time.sleep(1)
res_link[real_url]=self.browser.page_source #结果获取
self.browser.close()
self.browser.switch_to.window(handle_this)
content_md5=hashlib.md5(self.browser.page_source.encode(encoding='UTF-8')).hexdigest() #md5对比
self.click_next_page(content_md5)
return res_link
#下一页
def click_next_page(self,md5):
WebDriverWait(self.browser,timeout=30,poll_frequency=1).until(EC.presence_of_element_located((By.ID, "page")))
#百度搜索引擎翻页后下一页按钮 xpath 不一致 默认非第一页xpath
try:
next_page_btn = self.browser.find_element_by_xpath(self.engine_conf['nextPageBtnID_xpath_s'])
except:
next_page_btn = self.browser.find_element_by_xpath(self.engine_conf['nextPageBtnID_xpath_f'])
next_page_btn.click()
#md5 进行 webpag text 对比,判断是否已翻页 (暂时使用,存在bug)
i=0
while md5==hashlib.md5(self.browser.page_source.encode(encoding='UTF-8')).hexdigest():#md5 对比
time.sleep(0.3)#防止一些错误,暂时使用强制停止保持一些稳定
i+=1
if i>100:
return False
return True
class BrowserManage(Browser):
#打开目标搜索引擎进行搜索
def search(self):
self.browser.get(self.engine_conf['website']) #打开搜索引擎站点
self.send_keyword() #输入搜索kw
self.click_search_btn() #点击搜索
return self.get_search_res_url() #获取web页搜索数据
| [
"527088995@qq.com"
] | 527088995@qq.com |
1e0916ff486568e516869d103e124d84262fa9f7 | 57ea2c9205d3e288af4e31aef5d70249f92bcbfc | /armature/cli/packer.py | 338e569a2e4cab5ccb0a3f2bff724de598c08721 | [
"MIT"
] | permissive | buuren/armature | 6899b6946e8dc2bf5f8912cee01ed13e38d24d9b | 6675ba872dbeee59dd992890fb22b37490aa145c | refs/heads/master | 2022-12-11T18:18:02.309200 | 2018-03-07T20:23:19 | 2018-03-07T20:23:19 | 123,180,203 | 0 | 0 | MIT | 2021-06-01T21:54:27 | 2018-02-27T19:52:05 | Python | UTF-8 | Python | false | false | 1,105 | py | import click
from modules.executor import Executor
from utils.config_parser import ConfigParser
json_data = ConfigParser(
path="/home/vlad/infra/armature/armature/conf/modules.json"
).return_json()
MODULE = "packer"
@click.group()
def cli():
pass
@cli.command()
def prepare_template():
"""Validate configuration file"""
click.echo('prepare_template')
with Executor(module=MODULE, cli="prepare_template") as cli_executor:
cli_executor.run(
cli="prepare_template",
use_docker_run_wrapper=True
)
@cli.command()
def validate_template():
"""Validate configuration file"""
click.echo('validate_template')
with Executor(module=MODULE, cli="validate_template") as cli_executor:
cli_executor.run(
cli="validate_template",
use_docker_run_wrapper=True
)
@cli.command()
def build_template():
"""Build virtual machine image"""
click.echo('build')
# PACKER_LOG=${PACKER_LOG} ${PACKER_BIN} build $(PACKER_CONFIG)
@cli.command()
def clean():
"""Cleanup"""
click.echo('clean') | [
"vkolesni@gmail.com"
] | vkolesni@gmail.com |
9b06754ff330693fa3c3a481c929c2371cbfe84d | 15e0ae665ffbed837c4912dfbea8db1ced8a43db | /show_help.py | 843d36b9f3b16409a6fa3eacaba625a8008b4f79 | [] | no_license | mhcrnl/TkEditor | a664af328c1f69d6a776c1ed90130d08b569c9c9 | 90e45abcfc8e9f7ce25cc9fb8ac183eb68d85037 | refs/heads/master | 2023-08-25T13:55:26.086446 | 2021-10-24T10:41:19 | 2021-10-24T10:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | from tkinter import *
from tkinter import ttk
from platform import uname
class HelpWindow(Toplevel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# # # # # # Window Options # # # # # # #
# Transient
self.transient(self.master)
# Title
self.title("About TkEditor")
# Geometry, windows default size
self.geometry("380x580+30+50")
# self.tk.call("source", "C:/Users/MW/Desktop/programs/text editor/text editor/sun-valley.tcl")
# self.tk.call("set_theme", "light")
# Disable Resizing
# self.resizable(0, 0)
# # # # # # Main Frame # # # # # # #
self.main_frame = Frame(self, bd=4, bg="#bbbbbb", relief=GROOVE)
self.main_frame.pack(fill=BOTH, expand=True, pady=5, padx=10)
pic = PhotoImage(master=self, file="icon small.png")
# # # # # # Main Items # # # # # # #
# # # # # # Labels # # # # # # #
Label(self.main_frame, image=pic, text=" TkEditor", bg="#bbbbbb",
compound=LEFT, font=("Monospace", 15, "bold")).pack(pady=15, padx=10)
Label(self.main_frame, text="My Python Editor,\nA simple python Editor.\nMade in Python3! And Tkinter",
bg="#bbbbbb", font=("Ubuntu Mono", 13)).pack(pady=8, padx=13)
Label(self.main_frame, text="Email: musaiw@outlook.com", bg="#bbbbbb",
font=("Ubuntu Mono", 13, "bold")).pack(pady=8, padx=13,)
ttk.Separator(self.main_frame, orient=HORIZONTAL).pack(fill=X)
Label(self.main_frame, text=f"Version: 2", bg="#bbbbbb",
font=("Ubuntu Mono", 13, "bold")).pack(pady=8, padx=13)
Label(self.main_frame, text=f"Tk Version: {TkVersion}", bg="#bbbbbb",
font=("Ubuntu Mono", 13, "bold")).pack(pady=8, padx=13)
ttk.Separator(self.main_frame, orient=HORIZONTAL).pack(fill=X)
Label(self.main_frame, text=f"OS: {uname().system}", bg="#bbbbbb",
font=("Ubuntu Mono", 13, "bold")).pack(pady=8, padx=13)
Label(self.main_frame, text=f"Copyright © Musa Wong 2019-2021", bg="#bbbbbb",
font=("Ubuntu Mono", 13, "bold")).pack(pady=8, padx=13)
# # # # # # Buttons # # # # # # #
closebtn = ttk.Button(self, text="Close", command=lambda: self.destroy())
# closebtn.configure(highlightbackground="black")
closebtn.pack(pady=5, side=BOTTOM)
# Call the mainloop
self.mainloop()
if __name__ == '__main__':
HelpWindow()
# print(uname().system + ",", uname().node, uname().processor, "bit")
| [
"noreply@github.com"
] | mhcrnl.noreply@github.com |
900556a23162d352469f42f1d15d3b22de3a5b9c | b598183f47c3ffdc5e2610b63a2e579c24ecafce | /from_image_cli.py | 3ad531649cff8d3d4a1f48b72ca1be6b09586ebf | [] | no_license | bouttier/jps | 81ba8ee05a19aee0b6fa6868733215996f516d92 | cec8093a4ad877bdd5dc809d0080e6739bd14ead | refs/heads/master | 2020-05-09T10:38:34.167917 | 2013-01-18T01:59:41 | 2013-01-18T01:59:41 | 7,328,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
from PIL import Image
from jps import *
import sys
if __name__ == "__main__":
if len(sys.argv) !=2 and len(sys.argv) !=6:
print "\nUtilisation :\n\t"+sys.argv[0]+" image.(png|jpeg) [xDepart yDepart xFin yFin]\n"
sys.exit(1)
mapImage = Image.open(sys.argv[1])
width, height = mapImage.size
map = Map((width, height))
voidColor = mapImage.getpixel((1,1))
count = 0
for p in mapImage.getdata():
x,y = count % width, count // width
if p == voidColor:
map.remove((x, y))
else:
map.add((x, y))
count += 1
if len(sys.argv) == 6:
xBegin = int(sys.argv[2])
yBegin = int(sys.argv[3])
xEnd = int(sys.argv[4])
yEnd = int(sys.argv[5])
if xBegin<0 or xBegin>=width or xEnd<0 or xEnd>=width\
or yBegin<0 or yBegin>=height or yEnd<0 or yEnd>=height\
or xBegin>xEnd or yBegin>yEnd:
print(u"Les coordonnées que vous avez entrées sont incorrectes.")
sys.exit(1)
else:
xBegin = 1
yBegin = 1
xEnd = width - 1
yEnd = height - 1
sol = compute(map, (xBegin, yBegin), (xEnd, yEnd))
print(map)
print(u"\nDépart : ("+str(xBegin)+","+str(yBegin)+")")
print(u"Arrivée : ("+str(xEnd)+","+str(yEnd)+")")
if sol:
print(sol)
else:
print("Il n'y a pas de solution.")
| [
"franklin.delehelle@etu.enseeiht.fr"
] | franklin.delehelle@etu.enseeiht.fr |
14a3acc0bab16e60abbf783682b7aa042165a154 | 20bb1ae805cd796a7c377e55966633441d1d9fd5 | /uva/10680 LCM/lcm.py | b168af4bd0eee42ae91ee9c4dc81041695a8fe27 | [] | no_license | nathantheinventor/solved-problems | 1791c9588aefe2ebdc9293eb3d58317346d88e83 | c738e203fa77ae931b0ec613e5a00f9a8f7ff845 | refs/heads/master | 2022-10-27T08:58:23.860159 | 2022-10-13T20:18:43 | 2022-10-13T20:18:43 | 122,110,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | ans = [0, 1]
cur = 1
factors = [[] for _ in range(1000001)]
for i in range(2, 1000001, 2):
factors[i].append(2)
for i in range(3, 1000001, 2):
if len(factors[i]) == 0:
for k in range(i, 1000001, i):
factors[k].append(i)
for i in range(2, 1000001):
if len(factors[i]) == 1:
for factor in factors[i]:
cur *= factor
while cur % 10 == 0:
cur //= 10
cur %= 1000
ans.append(cur)
n = int(input())
while n > 0:
# print(n)
print(ans[n] % 10)
n = int(input()) | [
"nathantheinventor@gmail.com"
] | nathantheinventor@gmail.com |
03d379607640948529f39b747829d1c818888cf2 | 3ba73ee31160769d1736b1516024fdf40dd7492c | /10.Trees/CheckIfvalidBT.py | a85952b9bf39dc31b41aaec8d4e6c5100cb36233 | [] | no_license | chaerim-kim/Data-Structures-and-Algorithms | 86d4dfcd4948a08b16ead1492e9a825adc4deaad | 80d685ef2fcb54f3299225fa080df7d64e672735 | refs/heads/master | 2023-03-31T01:32:49.638962 | 2021-04-07T16:12:20 | 2021-04-07T16:12:20 | 290,717,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | # Check if an array of integer pairs can form a binary tree
# 1. Parent node can only have 2 children
# 2.*All the children node should be unique;
from collections import Counter
def TreeConstructor(strArr):
parents = []
children = []
print(strArr)
for strTup in strArr:
for i,v in eval(strTup):
children.append(int(i))
parents.append(int(v))
# strTup = eval(strTup)
print(children,parents)
# children.append(int(i[1]))
# parents.append(int(i[3]))
for k,v in Counter(parents).items():
if v > 2:
print('parent problem')
return False
for k,v in Counter(children).items():
if v > 1:
print('kid problem')
return False
return True
print (TreeConstructor(["(1,2)", "(2,4)", "(5,7)", "(7,2)", "(9,5)"])) # True
print (TreeConstructor(["(1,2)", "(3,2)", "(2,12)", "(5,2)"] )) # False
| [
"kcl0720@gmail.com"
] | kcl0720@gmail.com |
2bcf2ce88da8de40f0c8896ff08e2b09b79815cb | e60573cca7a015aace1eb03bb14fbb10eab0a699 | /profiler.py | d663c58292fcb26faf428e377ffe961cd913c1cd | [] | no_license | Atoqaz/ludo | 424f4e9e9ec3caf984efeec2f7e309389a03b8ca | e6140120d3962d2936e505bf7b4dd2b347d0bc92 | refs/heads/master | 2023-03-17T18:02:14.220857 | 2023-03-02T19:47:04 | 2023-03-02T19:47:04 | 249,055,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | """ Tool to measure the time each function takes.
Use it by adding the @profile decorator to a function:
@profile
def myFun(args):
...
"""
import cProfile, pstats, io
def profile(fnc):
""" A decorator that uses cProfile to profile a function
Sort by:
calls (call count)
cumulative (cumulative time)
cumtime (cumulative time)
file (file name)
filename (file name)
module (file name)
ncalls (call count)
pcalls (primitive call count)
line (line number)
name (function name)
nfl (name/file/line)
stdname (standard name)
time (internal time)
tottime (internal time)
"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
# ps.dump_stats(filename='profiling_results.prof')
print(s.getvalue())
return retval
return inner
| [
"atoqaz@gmail.com"
] | atoqaz@gmail.com |
15e529e0b3ced8be4239c5ac1780af25170e7b51 | 0a02fb9f8c2439a10847ffb666c07965e8e5fabc | /BuddyString/buddystrings.py | eb953dc0a3f285144e75bbba42fb4875143b590f | [] | no_license | HJ23/Algorithms-for-interview- | cf40125789a6a7378e376035ac8fe6b4e4c96eb5 | 28525bd097a702d3597d5ffd3cc4800e0499e5b5 | refs/heads/master | 2021-07-11T07:40:00.044046 | 2020-12-08T19:44:12 | 2020-12-08T19:44:12 | 223,262,957 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import sys
sys.path.append("..")
from BasicTester import BasicTester
def find(C):
A,B=C
if(len(A)!=len(B)):
return False
if(A==B and len(set(A))!=len(A)):
return True
found=False
index=-1
A=list(A)
B=list(B)
for i,x in enumerate(A):
if(B[i]!=x and not found):
found=True
index=i
elif(B[i]!=x and found):
tmp=A[index]
A[index]=x
A[i]=tmp
if("".join(A)== "".join(B)):
return True
else:
return False
return False
testme=BasicTester(find)
testme.test(("ab","ba"),True)
testme.test(("abc","abd"),False)
testme.test(("","aaa"),False)
testme.test(("abab","abab"),True)
testme.test(("aaa","aaa"),True)
| [
"carleuler@outlook.com"
] | carleuler@outlook.com |
29c3de812a7aef8d6a7d2ed00d6ccaedd0e88d7b | 366f551437d4e014d6ba8943ac080009fd2f74ca | /pro1.py | dec346ee50ac9f947709460e2ce36a1f6e742498 | [] | no_license | Raviraghul2210/GuviPythonPrograms | feb294804e0c7c0f6da3af9601bf857d57392cf1 | 42f6439bcf4358332c44ba1f653dbc977aa7f8c5 | refs/heads/master | 2020-06-17T00:28:11.611190 | 2019-08-09T09:32:34 | 2019-08-09T09:32:34 | 195,743,797 | 0 | 1 | null | 2019-07-08T05:40:33 | 2019-07-08T05:40:33 | null | UTF-8 | Python | false | false | 178 | py | sum=int(input())
p=[]
for x in range(0,sum):
la=input()
p.append(la)
new=[]
for x in zip(*p):
if(x.count(x[0])==len(x)):
new.append(x[0])
else:
break
print(''.join(new))
| [
"noreply@github.com"
] | Raviraghul2210.noreply@github.com |
01e5ecf71f7dfe0f32d7efff5e02ec11532e4f44 | 8ee64f13d2b837113ba86d42d0a308d550ee8297 | /aiyuke.py | 072038998f066853ee60a8039ddcfc00e127e2a6 | [] | no_license | wang598842739/GenerateRSSFeedForWebPages | 9eb9c126e5f7d308c752133f11cc12942d9fa741 | aaadda52b4a2d7c56f5aaf0b9fab8b955cb432f5 | refs/heads/master | 2021-01-12T08:37:44.568388 | 2016-12-12T13:12:05 | 2016-12-12T13:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | #coding:UTF-8
import re
import urllib
import rss
#爱羽客见招拆招
c = urllib.urlopen("http://www.aiyuke.com/video_special/254.html").read().decode('utf8')
site = "http://www.aiyuke.com/video_play/"
rss.rssify("aiyuke", site, [{'title': id, 'link':site+id} for (id) in re.findall('<a href="http://www.aiyuke.com/video_play/(.*?)">', c)])
| [
"ning.liu4@hpe.com"
] | ning.liu4@hpe.com |
e2a04da04bde7810a7710cff56f97ed8ec041933 | 5c48106efcc2f9de006a3c4bbe66d08a17c6e97f | /zuoye/kaoshi3.py | 4098696654c24ccf005557485885b83d192ef3df | [] | no_license | phpcolt/python | 83631eb00481d8cbbe317196183efc1ae605c69b | 493b1a5c132650f4c7c1fb33ed4c01f1e91c13be | refs/heads/master | 2020-03-09T04:32:10.420134 | 2018-04-09T09:24:18 | 2018-04-09T09:24:18 | 128,590,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,427 | py | #coding=utf-8
import os
goods = [
{"name": "电脑", "price": 1999},
{"name": "鼠标", "price": 10},
{"name": "游艇", "price": 20},
{"name": "美女", "price": 998},
]
'''
功能要求:
基础要求:
1、启动程序后,输入用户名密码后,让用户输入工资,然后打印商品列表
2、允许用户根据商品编号购买商品
3、用户选择商品后,检测余额是否够,够就直接扣款,不够就提醒
4、可随时退出,退出时,打印已购买商品和余额
5、在用户使用过程中, 关键输出,如余额,商品已加入购物车等消息,需高亮显示
扩展需求:
1、用户下一次登录后,输入用户名密码,直接回到上次的状态,即上次消费的余额什么的还是那些,再次登录可继续购买
2、允许查询之前的消费记录
'''
# last_shopping = []
# salacy = 0#工资
# balance = 0#余额
# s = [] #列表
# sava = []
# while True:
# username = input("用户名:")
# password = input("密码:")
# if username.strip() == 'root' and password.strip() == '123456':
# print("登录成功")
# while True:
# pay = input("请输入您的工资:")
# if pay.isdigit():
# salacy = int(pay)
# for key,value in enumerate(goods):
# #print("%s.%s"%(key,value['name']))
# s.append([value['name'],value['price']])
# while True:
# for key, value in enumerate(goods):
# print("%s.%s" % (key, value['name']))
# shop = input("请输入选择的商品号:")
# if shop == 'q':
# if len(last_shopping) != 0:
# print(("已经购买的商品%s,余额是%s")%(last_shopping,salacy))
#
# else:
# print("你都还没购物,就离开了,太遗憾了,下次再再来")
# exit()
#
# if salacy > int(s[0][1]) and int(shop) < len(goods) and shop.isdigit():
# salacy -= int(s[0][1])
#
# if s[int(shop)] in last_shopping:
#
# # print(s[int(shop)])
# number = last_shopping.index(s[int(shop)])
# last_shopping[number][1] = last_shopping[number][1] * 2
# #last_shopping.append(s[int(shop)])
# print(last_shopping)
# else:
# last_shopping.append(s[int(shop)])
# print(last_shopping)
# sava.append(last_shopping)
# sava.append(salacy)
# sf = open('1.txt','w')
# sf.write(str(sava))
# sf.close()
# elif salacy > int(s[0][1]) and int(shop) > len(goods):
# print("当前选的太坑爹了,请重新选")
# continue
# else:
# print("您的余额不足,请下次购物,退出请按q")
#
# else:
# print("请重新输入您的工资")
#
# else:
# print("请重新输入账户和密码")
# last_shopping = []#
# salacy = 0#工资
# balance = 0#余额
# s = [] #列表
# sava = []
# dj = 0
# while True:
# username = input("用户名:")
# password = input("密码:")
# if username.strip() == 'root' and password.strip() == '123456':
# print("登录成功")
# for key, value in enumerate(goods):
# # print("%s.%s"%(key,value['name']))
# s.append([value['name'], value['price']])
# #print(s)
# while True:
# if os.path.getsize('1.txt') > 0:
# sr = open('1.txt','r')
# strs = sr.read()
# sr.close()
# #print("您上次购物时记录%s"%strs)
# #print(list(strs))
# new_list = eval(strs)
# salacy = new_list[len(new_list)-1]
# print(salacy)
# del new_list[len(new_list)-1]
# last_shopping = new_list
# print(last_shopping)
# # salacy = list1[len(list1)-1]
# # print(salacy)
#
# else:
# pay = input("请输入您的工资:")
# if pay.isdigit():
# salacy = int(pay)
# else:
# print("请重新输入您的工资")
#
# while True:
# for key, value in enumerate(goods):
# print("%s.%s %s" % (key, value['name'],value['price']))
# shop = input("请输入选择的商品号:")
#
# if shop == 'q':
# if len(last_shopping) != 0:
# print(("已经购买的商品%s,余额是%s")%(last_shopping,salacy))
# for i in last_shopping:
# sava.append(i)
# sava.append(salacy)
# sf = open('1.txt', 'w')
# sf.write(str(sava))
# sf.close()
#
# else:
# print("你都还没购物,就离开了,太遗憾了,下次再再来")
# exit()
# for key, value in enumerate(goods):
# if key == int(shop):
# dj = value['price']
# if salacy > dj and int(shop) < len(goods) and shop.isdigit():
# salacy -= dj
#
# if s[int(shop)] in last_shopping and len(last_shopping) > 0:
# number = last_shopping.index(s[int(shop)])
# last_shopping[number][1] += dj
# #last_shopping.append(s[int(shop)])
# print(last_shopping)
#
# else:
# last_shopping.append(s[int(shop)])
# print(last_shopping)
#
# elif salacy > dj and int(shop) > len(goods):
# print("当前选的太坑爹了,请重新选")
# continue
# else:
# print("您的余额不足,请下次购物,退出请按q")
#
#
#
# else:
# print("请重新输入账户和密码")
while True:
username = input("请输入您的用户名:")
password = input("请输入您的密码")
if username == "root" and password == "123456":
print("登录成功")
for key,value in enumerate(goods):
print(key,value)
else:
print("请重新登录")
| [
"macbookpro@192.168.100.109"
] | macbookpro@192.168.100.109 |
b9dde9ef681c952721f0165bff9acbd9a024ba59 | 95740c67e49e1528919eb8f96ae8086e7386e558 | /project/reports/election_prediction/pattern/web/__init__.py | 67bf0c5ef521ac625225815154e73cffa6a8de32 | [
"MIT"
] | permissive | mdeff/ntds_2016 | 5449fd5b7a1e4aa8721d0ae33a1f8a097f73b265 | 2d597838cb2688471cc6122a5570441585393148 | refs/heads/master | 2021-01-17T17:47:01.434340 | 2019-12-16T17:53:04 | 2019-12-16T17:53:04 | 69,178,943 | 109 | 51 | null | null | null | null | UTF-8 | Python | false | false | 149,004 | py | #### PATTERN | WEB #################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# Python API interface for various web services (Google, Twitter, Wikipedia, ...)
# smgllib.py is removed from Python 3, a warning is issued in Python 2.6+. Ignore for now.
import warnings; warnings.filterwarnings(action='ignore', category=DeprecationWarning, module="sgmllib")
import threading
import time
import os
import socket, urlparse, urllib, urllib2
import base64
import htmlentitydefs
import httplib
import sgmllib
import re
import xml.dom.minidom
import StringIO
import bisect
import itertools
import new
import api
import feed
import oauth
import json
import locale
from feed import feedparser
from soup import BeautifulSoup
try:
# Import persistent Cache.
# If this module is used separately,
# a dict is used (i.e. this Python session only).
from cache import Cache, cache, TMP
except:
cache = {}
try:
from imap import Mail, MailFolder, Message, GMAIL
from imap import MailError, MailServiceError, MailLoginError, MailNotLoggedIn
from imap import FROM, SUBJECT, DATE, BODY, ATTACHMENTS
except:
pass
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
#### UNICODE #######################################################################################
# Latin-1 (ISO-8859-1) encoding is identical to Windows-1252 except for the code points 128-159:
# Latin-1 assigns control codes in this range, Windows-1252 has characters, punctuation, symbols
# assigned to these code points.
def decode_string(v, encoding="utf-8"):
""" Returns the given value as a Unicode string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try: return v.decode(*e)
except:
pass
return v
return unicode(v)
def encode_string(v, encoding="utf-8"):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try: return v.encode(*e)
except:
pass
return v
return str(v)
u = decode_utf8 = decode_string
s = encode_utf8 = encode_string
# For clearer source code:
bytestring = s
#### ASYNCHRONOUS REQUEST ##########################################################################
class AsynchronousRequest(object):
def __init__(self, function, *args, **kwargs):
""" Executes the function in the background.
AsynchronousRequest.done is False as long as it is busy, but the program will not halt in the meantime.
AsynchronousRequest.value contains the function's return value once done.
AsynchronousRequest.error contains the Exception raised by an erronous function.
For example, this is useful for running live web requests while keeping an animation running.
For good reasons, there is no way to interrupt a background process (i.e. Python thread).
You are responsible for ensuring that the given function doesn't hang.
"""
self._response = None # The return value of the given function.
self._error = None # The exception (if any) raised by the function.
self._time = time.time()
self._function = function
self._thread = threading.Thread(target=self._fetch, args=(function,)+args, kwargs=kwargs)
self._thread.start()
def _fetch(self, function, *args, **kwargs):
""" Executes the function and sets AsynchronousRequest.response.
"""
try:
self._response = function(*args, **kwargs)
except Exception, e:
self._error = e
def now(self):
""" Waits for the function to finish and yields its return value.
"""
self._thread.join(); return self._response
@property
def elapsed(self):
return time.time() - self._time
@property
def done(self):
return not self._thread.isAlive()
@property
def value(self):
return self._response
@property
def error(self):
return self._error
def __repr__(self):
return "AsynchronousRequest(function='%s')" % self._function.__name__
def asynchronous(function, *args, **kwargs):
""" Returns an AsynchronousRequest object for the given function.
"""
return AsynchronousRequest(function, *args, **kwargs)
send = asynchronous
#### URL ###########################################################################################
# User agent and referrer.
# Used to identify the application accessing the web.
USER_AGENT = "Pattern/2.6 +http://www.clips.ua.ac.be/pattern"
REFERRER = "http://www.clips.ua.ac.be/pattern"
# Mozilla user agent.
# Websites can include code to block out any application except browsers.
MOZILLA = "Mozilla/5.0"
# HTTP request method.
GET = "get" # Data is encoded in the URL.
POST = "post" # Data is encoded in the message body.
# URL parts.
# protocol://username:password@domain:port/path/page?query_string#anchor
PROTOCOL, USERNAME, PASSWORD, DOMAIN, PORT, PATH, PAGE, QUERY, ANCHOR = \
"protocol", "username", "password", "domain", "port", "path", "page", "query", "anchor"
# MIME type.
MIMETYPE_WEBPAGE = ["text/html"]
MIMETYPE_STYLESHEET = ["text/css"]
MIMETYPE_PLAINTEXT = ["text/plain"]
MIMETYPE_PDF = ["application/pdf"]
MIMETYPE_NEWSFEED = ["application/rss+xml", "application/atom+xml"]
MIMETYPE_IMAGE = ["image/gif", "image/jpeg", "image/png", "image/tiff"]
MIMETYPE_AUDIO = ["audio/mpeg", "audio/mp4", "audio/x-aiff", "audio/x-wav"]
MIMETYPE_VIDEO = ["video/mpeg", "video/mp4", "video/avi", "video/quicktime", "video/x-flv"]
MIMETYPE_ARCHIVE = ["application/x-stuffit", "application/x-tar", "application/zip"]
MIMETYPE_SCRIPT = ["application/javascript", "application/ecmascript"]
def extension(filename):
""" Returns the extension in the given filename: "cat.jpg" => ".jpg".
"""
return os.path.splitext(filename)[1]
def urldecode(query):
""" Inverse operation of urllib.urlencode.
Returns a dictionary of (name, value)-items from a URL query string.
"""
def _format(s):
if s == "" or s == "None":
return None
if s.lstrip("-").isdigit():
return int(s)
try: return float(s)
except:
return s
if query:
query = query.lstrip("?").split("&")
query = ((kv.split("=") + [None])[:2] for kv in query)
query = ((u(urllib.unquote_plus(bytestring(k))),
_format(u(urllib.unquote_plus(bytestring(v))))) for k, v in query if k != "")
return dict(query)
return {}
url_decode = urldecode
def proxy(host, protocol="https"):
""" Returns the value for the URL.open() proxy parameter.
- host: host address of the proxy server.
"""
return (host, protocol)
class Error(Exception):
""" Base class for pattern.web errors.
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
self.src = kwargs.pop("src", None)
self.url = kwargs.pop("url", None)
@property
def headers(self):
return dict(self.src.headers.items())
class URLError(Error):
pass # URL contains errors (e.g. a missing t in htp://).
class URLTimeout(URLError):
pass # URL takes to long to load.
class HTTPError(URLError):
pass # URL causes an error on the contacted server.
class HTTP301Redirect(HTTPError):
pass # Too many redirects.
# The site may be trying to set a cookie and waiting for you to return it,
# or taking other measures to discern a browser from a script.
# For specific purposes you should build your own urllib2.HTTPRedirectHandler
# and pass it to urllib2.build_opener() in URL.open()
class HTTP400BadRequest(HTTPError):
pass # URL contains an invalid request.
class HTTP401Authentication(HTTPError):
pass # URL requires a login and password.
class HTTP403Forbidden(HTTPError):
pass # URL is not accessible (user-agent?)
class HTTP404NotFound(HTTPError):
pass # URL doesn't exist on the internet.
class HTTP420Error(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP429TooMayRequests(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP500InternalServerError(HTTPError):
pass # Generic server error.
class HTTP503ServiceUnavailable(HTTPError):
pass # Used by Bing for rate limiting.
class URL(object):
def __init__(self, string=u"", method=GET, query={}, **kwargs):
""" URL object with the individual parts available as attributes:
For protocol://username:password@domain:port/path/page?query_string#anchor:
- URL.protocol: http, https, ftp, ...
- URL.username: username for restricted domains.
- URL.password: password for restricted domains.
- URL.domain : the domain name, e.g. nodebox.net.
- URL.port : the server port to connect to.
- URL.path : the server path of folders, as a list, e.g. ['news', '2010']
- URL.page : the page name, e.g. page.html.
- URL.query : the query string as a dictionary of (name, value)-items.
- URL.anchor : the page anchor.
If method is POST, the query string is sent with HTTP POST.
"""
self.__dict__["method"] = method # Use __dict__ directly since __setattr__ is overridden.
self.__dict__["_string"] = u(string)
self.__dict__["_parts"] = None
self.__dict__["_headers"] = None
self.__dict__["_redirect"] = None
if isinstance(string, URL):
self.__dict__["method"] = string.method
self.query.update(string.query)
if len(query) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.query.update(query)
if len(kwargs) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.parts.update(kwargs)
def _parse(self):
""" Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:pass@example.com:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlparse.urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":")+[u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = P[PORT].isdigit() and int(P[PORT]) or P[PORT]
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2]) - len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P
# URL.string yields unicode(URL) by joining the different parts,
# if the URL parts have been modified.
def _get_string(self): return unicode(self)
def _set_string(self, v):
self.__dict__["_string"] = u(v)
self.__dict__["_parts"] = None
string = property(_get_string, _set_string)
@property
def parts(self):
""" Yields a dictionary with the URL parts.
"""
if not self._parts: self._parse()
return self._parts
@property
def querystring(self):
""" Yields the URL querystring: "www.example.com?page=1" => "page=1"
"""
s = self.parts[QUERY].items()
s = dict((bytestring(k), bytestring(v if v is not None else "")) for k, v in s)
s = urllib.urlencode(s)
return s
def __getattr__(self, k):
if k in self.__dict__ : return self.__dict__[k]
if k in self.parts : return self.__dict__["_parts"][k]
raise AttributeError, "'URL' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__ : self.__dict__[k] = u(v); return
if k == "string" : self._set_string(v); return
if k == "query" : self.parts[k] = v; return
if k in self.parts : self.__dict__["_parts"][k] = u(v); return
raise AttributeError, "'URL' object has no attribute '%s'" % k
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
""" Returns a connection to the url from which data can be retrieved with connection.read().
When the timeout amount of seconds is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError (e.g. HTTP404NotFound).
"""
url = self.string
# Use basic urllib.urlopen() instead of urllib2.urlopen() for local files.
if os.path.exists(url):
return urllib.urlopen(url)
# Get the query string as a separate parameter if method=POST.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
if proxy:
proxy = urllib2.ProxyHandler({proxy[1]: proxy[0]})
proxy = urllib2.build_opener(proxy, urllib2.HTTPHandler)
urllib2.install_opener(proxy)
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 301: raise HTTP301Redirect(src=e, url=url)
if e.code == 400: raise HTTP400BadRequest(src=e, url=url)
if e.code == 401: raise HTTP401Authentication(src=e, url=url)
if e.code == 403: raise HTTP403Forbidden(src=e, url=url)
if e.code == 404: raise HTTP404NotFound(src=e, url=url)
if e.code == 420: raise HTTP420Error(src=e, url=url)
if e.code == 429: raise HTTP429TooMayRequests(src=e, url=url)
if e.code == 500: raise HTTP500InternalServerError(src=e, url=url)
if e.code == 503: raise HTTP503ServiceUnavailable(src=e, url=url)
raise HTTPError(str(e), src=e, url=url)
except httplib.BadStatusLine, e:
raise HTTPError(str(e), src=e, url=url)
except socket.timeout, e:
raise URLTimeout(src=e, url=url)
except socket.error, e:
if "timed out" in e.args[0]:
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except urllib2.URLError, e:
if "timed out" in e.args[0] \
or "timed out" in e.reason:
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except ValueError, e:
raise URLError(str(e), src=e, url=url)
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in cache:
if isinstance(cache, dict): # Not a Cache object.
return cache[id]
if unicode is True:
return cache[id]
if unicode is False:
return cache.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default) cache the data.
try:
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
except socket.timeout, e:
raise URLTimeout(src=e, url=self.string)
if unicode is True:
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max(throttle-(time.time()-t), 0))
return data
def read(self, *args, **kwargs):
return self.open(**kwargs).read(*args)
@property
def exists(self, timeout=10):
""" Yields False if the URL generates a HTTP404NotFound error.
"""
try: self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError, URLTimeoutError:
return True
except URLError:
return False
except:
return True
return True
@property
def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None
@property
def headers(self, timeout=10):
""" Yields a dictionary with the HTTP response headers.
"""
if self.__dict__["_headers"] is None:
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__["_headers"] = h
return self.__dict__["_headers"]
@property
def redirect(self, timeout=10):
""" Yields the redirected URL, or None.
"""
if self.__dict__["_redirect"] is None:
try:
r = self.open(timeout).geturl()
except URLError:
r = None
self.__dict__["_redirect"] = r != self.string and r or ""
return self.__dict__["_redirect"] or None
def __str__(self):
return bytestring(self.string)
def __unicode__(self):
# The string representation includes the query attributes with HTTP GET.
P = self.parts
u = []
if P[PROTOCOL]:
u.append("%s://" % P[PROTOCOL])
if P[USERNAME]:
u.append("%s:%s@" % (P[USERNAME], P[PASSWORD]))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append(":%s" % P[PORT])
if P[PORT] or P[DOMAIN] and not P[PATH] and not P[PAGE]:
u.append("/")
if P[PATH]:
u.append("/%s/" % "/".join(P[PATH]))
if P[PAGE] and len(u) > 0:
u[-1] = u[-1].rstrip("/")
if P[PAGE]:
u.append("/%s" % P[PAGE])
if P[QUERY] and self.method == GET:
u.append("?%s" % self.querystring)
if P[ANCHOR]:
u.append("#%s" % P[ANCHOR])
u = u"".join(u)
u = u.lstrip("/")
return u
def __repr__(self):
return "URL(%s, method=%s)" % (repr(self.string), repr(self.method))
def copy(self):
return URL(self.string, self.method, self.query)
def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
#url = URL("http://user:pass@example.com:992/animal/bird?species#wings")
#print url.parts
#print url.query
#print url.string
#--- STREAMING URL BUFFER --------------------------------------------------------------------------
def bind(object, method, function):
""" Attaches the function as a method with the given name to the given object.
"""
setattr(object, method, new.instancemethod(function, object))
class Stream(list):
def __init__(self, url, delimiter="\n", **kwargs):
""" Buffered stream of data from a given URL.
"""
self.socket = URL(url).open(**kwargs)
self.buffer = ""
self.delimiter = delimiter
def update(self, bytes=1024):
""" Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
if data is not None:
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets
def parse(self, data):
""" Must be overridden in a subclass.
"""
return data
def clear(self):
list.__init__(self, [])
def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
""" Returns a new Stream with the given parse method.
"""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream
#--- FIND URLs -------------------------------------------------------------------------------------
# Functions for parsing URL's and e-mail adresses from strings.
RE_URL_PUNCTUATION = ("\"'{(>", "\"'.,;)}")
RE_URL_HEAD = r"[%s|\[|\s]" % "|".join(RE_URL_PUNCTUATION[0]) # Preceded by space, parenthesis or HTML tag.
RE_URL_TAIL = r"[%s|\]]*[\s|\<]" % "|".join(RE_URL_PUNCTUATION[1]) # Followed by space, punctuation or HTML tag.
RE_URL1 = r"(https?://.*?)" + RE_URL_TAIL # Starts with http:// or https://
RE_URL2 = RE_URL_HEAD + r"(www\..*?\..*?)" + RE_URL_TAIL # Starts with www.
RE_URL3 = RE_URL_HEAD + r"([\w|-]*?\.(com|net|org|edu|de|uk))" + RE_URL_TAIL
RE_URL1, RE_URL2, RE_URL3 = (
re.compile(RE_URL1, re.I),
re.compile(RE_URL2, re.I),
re.compile(RE_URL3, re.I))
def find_urls(string, unique=True):
""" Returns a list of URLs parsed from the string.
Works on http://, https://, www. links or domain names ending in .com, .org, .net.
Links can be preceded by leading punctuation (open parens)
and followed by trailing punctuation (period, comma, close parens).
"""
string = u(string)
string = string.replace(u"\u2024", ".")
string = string.replace(" ", " ")
matches = []
for p in (RE_URL1, RE_URL2, RE_URL3):
for m in p.finditer(" %s " % string):
s = m.group(1)
s = s.split("\">")[0].split("'>")[0] # google.com">Google => google.com
if not unique or s not in matches:
matches.append(s)
return matches
links = find_urls
RE_EMAIL = re.compile(r"[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+") # tom.de+smedt@clips.ua.ac.be
def find_email(string, unique=True):
""" Returns a list of e-mail addresses parsed from the string.
"""
string = u(string).replace(u"\u2024", ".")
matches = []
for m in RE_EMAIL.finditer(string):
s = m.group(0)
if not unique or s not in matches:
matches.append(s)
return matches
def find_between(a, b, string):
""" Returns a list of substrings between a and b in the given string.
"""
p = "%s(.*?)%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return [m for m in p.findall(string)]
#### PLAIN TEXT ####################################################################################
# Functions for stripping HTML tags from strings.
BLOCK = [
"title", "h1", "h2", "h3", "h4", "h5", "h6", "p",
"center", "blockquote", "div", "table", "ul", "ol", "dl", "pre", "code", "form"
]
SELF_CLOSING = ["br", "hr", "img"]
# Element tag replacements for a stripped version of HTML source with strip_tags().
# Block-level elements are followed by linebreaks,
# list items are preceded by an asterisk ("*").
LIST_ITEM = "*"
blocks = dict.fromkeys(BLOCK+["br", "tr", "td"], ("", "\n\n"))
blocks.update({
"li": ("%s " % LIST_ITEM, "\n"),
"img": ("", ""),
"br": ("", "\n"),
"th": ("", "\n"),
"tr": ("", "\n"),
"td": ("", "\t"),
})
class HTMLParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
def clean(self, html):
html = decode_utf8(html)
html = html.replace("/>", " />")
html = html.replace(" />", " />")
html = html.replace("<!", "<!")
html = html.replace("<!DOCTYPE", "<!DOCTYPE")
html = html.replace("<!doctype", "<!doctype")
html = html.replace("<!--", "<!--")
return html
def parse_declaration(self, i):
# We can live without sgmllib's parse_declaration().
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
return i + 1
def convert_charref(self, name):
# This fixes a bug in older versions of sgmllib when working with Unicode.
# Fix: ASCII ends at 127, not 255
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return chr(n)
class HTMLTagstripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def strip(self, html, exclude=[], replace=blocks):
""" Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data)
def clean(self, html):
# Escape all entities (just strip tags).
return HTMLParser.clean(self, html).replace("&", "&")
def handle_starttag(self, tag, attributes):
if tag in BLOCK and self._data and self._data[-1][-1:] != "\n":
# Block-level elements always break to a new line.
self._data.append("\n")
if tag in self._exclude:
# Create the tag attribute string,
# including attributes defined in the HTMLTagStripper._exclude dict.
a = len(self._exclude[tag]) > 0 and attributes or []
a = ["%s=\"%s\"" % (k,v) for k, v in a if k in self._exclude[tag]]
a = (" "+" ".join(a)).rstrip()
self._data.append("<%s%s>" % (tag, a))
if tag in self._replace:
self._data.append(self._replace[tag][0])
if tag in self._replace and tag in SELF_CLOSING:
self._data.append(self._replace[tag][1])
def handle_endtag(self, tag):
if tag in self._exclude and self._data and self._data[-1].startswith("<"+tag):
# Never keep empty elements (e.g. <a></a>).
self._data.pop(-1); return
if tag in self._exclude:
self._data.append("</%s>" % tag)
if tag in self._replace:
self._data.append(self._replace[tag][1])
def handle_data(self, data):
self._data.append(data.strip("\n\t"))
def handle_comment(self, comment):
if "comment" in self._exclude or \
"!--" in self._exclude:
self._data.append("<!--%s-->" % comment)
# As a function:
strip_tags = HTMLTagstripper().strip
def strip_element(string, tag, attributes=""):
""" Removes all elements with the given tagname and attributes from the string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", 'class="x"') matches
'<a class="x">' or '<a href="x" class="x">' but not "<a class='x'>".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
#i = s.find("<%s%s" % (t, a), i)
m = re.search(r"<%s[^\>]*?%s" % (t, a), s[i:])
i = i + m.start() if m else -1
j = s.find("</%s>" % t, i+1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j+1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0: return string
if j < 0: return string[:i]
string = string[:i] + string[j+len(t)+3:]; s=string.lower()
return string
def strip_between(a, b, string):
""" Removes anything between (and including) string a and b inside the given string.
"""
p = "%s.*?%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return re.sub(p, "", string)
def strip_javascript(html):
return strip_between("<script.*?>", "</script>", html)
def strip_inline_css(html):
return strip_between("<style.*?>", "</style>", html)
def strip_comments(html):
return strip_between("<!--", "-->", html)
def strip_forms(html):
return strip_between("<form.*?>", "</form>", html)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, basestring):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == "":
return unichr(int(name)) # "&" => "&"
if hex.lower() == "x":
return unichr(int("0x" + name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return unichr(cp) if cp else match.group() # "&foo;" => "&foo;"
if isinstance(string, basestring):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string)) # "black/white" => "black%2Fwhite".
def decode_url(string):
return urllib.unquote_plus(string)
RE_SPACES = re.compile("( |\xa0)+", re.M) # Matches one or more spaces.
RE_TABS = re.compile(r"\t+", re.M) # Matches one or more tabs.
def collapse_spaces(string, indentation=False, replace=" "):
""" Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_tabs(string, indentation=False, replace=" "):
""" Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
def plaintext(html, keep=[], replace=blocks, linebreaks=2, indentation=False):
""" Returns a string with all HTML tags removed.
Content inside HTML comments, the <style> tag and the <script> tags is removed.
- keep : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are followed by linebreaks.
- linebreaks : the maximum amount of consecutive linebreaks,
- indentation : keep left line indentation (tabs and spaces)?
"""
if not keep.__contains__("script"):
html = strip_javascript(html)
if not keep.__contains__("style"):
html = strip_inline_css(html)
if not keep.__contains__("form"):
html = strip_forms(html)
if not keep.__contains__("comment") and \
not keep.__contains__("!--"):
html = strip_comments(html)
html = html.replace("\r", "\n")
html = strip_tags(html, exclude=keep, replace=replace)
html = decode_entities(html)
html = collapse_spaces(html, indentation)
html = collapse_tabs(html, indentation)
html = collapse_linebreaks(html, linebreaks)
html = html.strip()
return html
#### SEARCH ENGINE #################################################################################
SEARCH = "search" # Query for pages (i.e. links to websites).
IMAGE = "image" # Query for images.
NEWS = "news" # Query for news items.
TINY = "tiny" # Image size around 100x100.
SMALL = "small" # Image size around 200x200.
MEDIUM = "medium" # Image size around 500x500.
LARGE = "large" # Image size around 1000x1000.
RELEVANCY = "relevancy" # Sort results by most relevant.
LATEST = "latest" # Sort results by most recent.
class Result(dict):
def __init__(self, url):
""" An item in a list of results returned by SearchEngine.search().
All dictionary entries are available as unicode string attributes.
- url : the URL of the referred web content,
- title : the title of the content at the URL,
- text : the content text,
- language: the content language,
- author : for news items and images, the author,
- date : for news items, the publication date.
"""
dict.__init__(self)
self.url = url
@property
def txt(self):
return self.text
@property
def description(self):
return self.text # Backwards compatibility.
def download(self, *args, **kwargs):
""" Download the content at the given URL.
By default it will be cached - see URL.download().
"""
return URL(self.url).download(*args, **kwargs)
def __getattr__(self, k):
return self.get(k, u"")
def __getitem__(self, k):
return self.get(k, u"")
def __setattr__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"") # Store strings as unicode.
def __setitem__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"")
def setdefault(self, k, v):
dict.setdefault(self, u(k), u(v))
def update(self, *args, **kwargs):
map = dict()
map.update(*args, **kwargs)
dict.update(self, [(u(k), u(v)) for k, v in map.items()])
def __repr__(self):
return "Result(%s)" % dict.__repr__(self)
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
""" A list of results returned from SearchEngine.search().
- source: the service that yields the results (e.g. GOOGLE, TWITTER).
- query : the query that yields the results.
- type : the query type (SEARCH, IMAGE, NEWS).
- total : the total result count.
This is not the length of the list, but the total number of matches for the given query.
"""
self.source = source
self.query = query
self.type = type
self.total = total
class SearchEngine(object):
def __init__(self, license=None, throttle=1.0, language=None):
""" A base class for a web service.
- license : license key for the API,
- throttle : delay between requests (avoid hammering the server).
Inherited by: Google, Bing, Wikipedia, Twitter, Facebook, Flickr, ...
"""
self.license = license
self.throttle = throttle # Amount of sleep time after executing a query.
self.language = language # Result.language restriction (e.g., "en").
self.format = lambda x: x # Formatter applied to each attribute of each Result.
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
return Results(source=None, query=query, type=type)
class SearchEngineError(HTTPError):
pass
class SearchEngineTypeError(SearchEngineError):
pass # Raised when an unknown type is passed to SearchEngine.search().
class SearchEngineLimitError(SearchEngineError):
pass # Raised when the query limit for a license is reached.
#--- GOOGLE ----------------------------------------------------------------------------------------
# Google Search is a web search engine owned by Google Inc.
# Google Custom Search is a paid service.
# https://code.google.com/apis/console/
# http://code.google.com/apis/customsearch/v1/overview.html
GOOGLE = "https://www.googleapis.com/customsearch/v1?"
GOOGLE_LICENSE = api.license["Google"]
GOOGLE_CUSTOM_SEARCH_ENGINE = "000579440470800426354:_4qo2s0ijsi"
# Search results can start with: "Jul 29, 2007 ...",
# which is the date of the page parsed by Google from the content.
RE_GOOGLE_DATE = re.compile("^([A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}) {0,1}...")
class Google(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or GOOGLE_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start-1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("htmlSnippet").replace("<br> ","").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results
def translate(self, string, input="en", output="fr", **kwargs):
""" Returns the translation of the given string in the desired output language.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string, # 1000 characters maximum
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden, e:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data)
def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data
#--- YAHOO -----------------------------------------------------------------------------------------
# Yahoo! Search is a web search engine owned by Yahoo! Inc.
# Yahoo! BOSS ("Build Your Own Search Service") is a paid service.
# http://developer.yahoo.com/search/
YAHOO = "http://yboss.yahooapis.com/ysearch/"
YAHOO_LICENSE = api.license["Yahoo"]
class Yahoo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or YAHOO_LICENSE, throttle, language)
def _authenticate(self, url):
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query,
method = GET,
secret = self.license[1]
)
return url
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": query.replace(" ", "+"),
"start": 1 + (start-1) * count,
"count": min(count, type==IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) Authenticate.
url = self._authenticate(url)
# 4) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Yahoo %s API is a paid service" % type
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get({SEARCH:"web", IMAGE:"images", NEWS:"news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and \
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results
#--- BING ------------------------------------------------------------------------------------------
# Bing is a web search engine owned by Microsoft.
# Bing Search API is a paid service.
# https://datamarket.azure.com/dataset/5BA839F1-12CE-4CCE-BF57-A49D98D29A44
# https://datamarket.azure.com/account/info
BING = "https://api.datamarket.azure.com/Bing/Search/"
BING_LICENSE = api.license["Bing"]
class Bing(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or BING_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from Bing for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 15 for news,
- size : for images, either SMALL, MEDIUM or LARGE.
There is no daily query limit.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
src = "Web"
if type == IMAGE:
src = "Image"
if type == NEWS:
src = "News"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(BING + src + "?", query, type)
# 1) Construct request URL.
url = URL(BING + "Composite", method=GET, query={
"Sources": "'" + src.lower() + "'",
"Query": "'" + query + "'",
"$skip": 1 + (start-1) * count,
"$top": min(count, type==NEWS and 15 or 50),
"$format": "json",
})
# 2) Restrict image size.
if size in (TINY, SMALL, MEDIUM, LARGE):
url.query["ImageFilters"] = {
TINY: "'Size:Small'",
SMALL: "'Size:Small'",
MEDIUM: "'Size:Medium'",
LARGE: "'Size:Large'" }[size]
# 3) Restrict language.
if type in (SEARCH, IMAGE) and self.language is not None:
url.query["Query"] = url.query["Query"][:-1] + " language: %s'" % self.language
#if self.language is not None:
# market = locale.market(self.language)
# if market:
# url.query["market"] = market
# 4) Parse JSON response.
kwargs["authentication"] = ("", self.license)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Bing %s API is a paid service" % type
except HTTP503ServiceUnavailable:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("d", {})
data = data.get("results", [{}])[0]
results = Results(BING, query, type)
results.total = int(data.get(src+"Total", 0))
for x in data.get(src, []):
r = Result(url=None)
r.url = self.format(x.get("MediaUrl", x.get("Url")))
r.title = self.format(x.get("Title"))
r.text = self.format(x.get("Description", x.get("Snippet")))
r.language = self.language or ""
r.date = self.format(x.get("DateTime", x.get("Date")))
r.author = self.format(x.get("Source"))
results.append(r)
return results
#--- DUCKDUCKGO ------------------------------------------------------------------------------------
# DuckDuckGo is a privacy-respecting aggregate search engine,
# with information from Wikipedia, WikiHow, Wikia, GitHub, The Free Dictionary, etc.
# https://duckduckgo.com/api.html
# https://duckduckgo.com/params.html
DUCKDUCKGO = "http://api.duckduckgo.com/"
DUCKDUCKGO_LICENSE = api.license["DuckDuckGo"]
# Results from DuckDuckGo have a Result.type with semantic information,
# e.g., "apple" => "plant and plant parts". Known types:
REFERENCE, CATEGORY, DEFINITION = \
"reference", "category", "definition"
class DuckDuckGo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or DUCKDUCKGO_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=None, count=None, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from DuckDuckGo for the given query.
"""
if type != SEARCH:
raise SearchEngineTypeError
# 1) Construct request URL.
url = URL(DUCKDUCKGO, method=GET, query={
"q": query,
"o": "json"
})
# 2) Restrict language.
if type == SEARCH and self.language is not None:
url.query["kl"] = self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(DUCKDUCKGO, query, type)
results.total = None
for x in data.get("Results", []):
if x.get("FirstURL"):
r = Result(url=None)
# Parse official website link.
r.url = self.format(x.get("FirstURL"))
r.title = self.format(data.get("Heading"))
r.text = self.format(data.get("Abstract"))
r.author = self.format(data.get("AbstractSource"))
r.type = self.format(REFERENCE)
results.append(r)
for topic in data.get("RelatedTopics", []):
for x in topic.get("Topics", [topic]):
r = Result(url=None)
r.url = x.get("FirstURL")
# Parse title and type from URL (e.g., http://duckduckgo.com/d/Cats?kl=en).
m = re.match(r"^http://duckduckgo.com/([a-z]/)?(.*?)(\?|$)", r.url)
# Parse title: "Cats".
s1 = m and m.group(2) or "" # Title: "Cats"
s1 = u(decode_url(s1.encode("utf-8")))
s1 = s1.strip().replace("_", " ")
s1 = s1[:1].upper() + s1[1:]
# Parse description; the part before the first "-" or "," was the link.
s2 = x.get("Text", "").strip()
s2 = re.sub(r" +", " ", s2)
s2 = s2[:1].upper() + s2[1:] or ""
s2 = s2.startswith(s1) \
and "<a href=\"%s\">%s</a>%s" % (r.url, s1, s2[len(s1):]) \
or re.sub(r"^(.*?)( - | or |, )(.*?)", "<a href=\"%s\">\\1</a>\\2\\3" % r.url, s2)
# Parse type: "d/" => "definition".
s3 = m and m.group(1) or ""
s3 = {"c": CATEGORY, "d": DEFINITION}.get(s3.rstrip("/"), "")
s3 = topic.get("Name", "").lower() or s3
s3 = re.sub("^in ", "", s3)
# Format result.
r.url = self.format(r.url)
r.title = self.format(s1)
r.text = self.format(s2)
r.type = self.format(s3)
results.append(r)
return results
def answer(self, string, **kwargs):
""" Returns a DuckDuckGo answer for the given string (e.g., math, spelling, ...)
"""
url = URL(DUCKDUCKGO, method=GET, query={
"q": string,
"o": "json"
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
data = data.get(kwargs.get("field", "Answer"))
return u(data)
def spelling(self, string):
""" Returns a list of spelling suggestions for the given string.
"""
s = self.answer("spell " + string, cached=True)
s = re.findall(r"<a.*?>(.*?)</a>", s)
return s
def definition(self, string):
""" Returns a dictionary definition for the given string.
"""
s = self.answer(string, field="Definition", cached=True)
s = re.sub(r"^.*? definition: ", "", s)
s = re.sub(r"(^'''.*?''' |^)(.)(.*?)$",
lambda m: m.group(1) + m.group(2).upper() + m.group(3), s)
return s
DDG = DuckDuckGo
#for r in DDG().search("cats"):
# print r.url
# print r.title # Can be used as a new query.
# print plaintext(r.text)
# print r.type # REFERENCE, CATEGORY, DEFINITION, "people", "sports" ...
# print
#print DDG().definition("cat")
#print DDG().spelling("catnpa")
#--- TWITTER ---------------------------------------------------------------------------------------
# Twitter is an online social networking service and microblogging service,
# that enables users to post and read text-based messages of up to 140 characters ("tweets").
# https://dev.twitter.com/docs/api/1.1
TWITTER = "https://api.twitter.com/1.1/"
TWITTER_STREAM = "https://stream.twitter.com/1.1/statuses/filter.json"
TWITTER_STATUS = "https://twitter.com/%s/status/%s"
TWITTER_LICENSE = api.license["Twitter"]
TWITTER_HASHTAG = re.compile(r"(\s|^)(#[a-z0-9_\-]+)", re.I) # Word starts with "#".
TWITTER_RETWEET = re.compile(r"(\s|^RT )(@[a-z0-9_\-]+)", re.I) # Word starts with "RT @".
class Twitter(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or TWITTER_LICENSE, throttle, language)
self._pagination = {}
def _authenticate(self, url):
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_token": self.license[2][0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query,
method = GET,
secret = self.license[1],
token = self.license[2][1]
)
return url
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from Twitter for the given query.
- type : SEARCH,
- start: Result.id or int,
- count: maximum 100.
There is a limit of 150+ queries per 15 minutes.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or (isinstance(start, (int, long, float)) and start < 1):
return Results(TWITTER, query, type)
if not isinstance(start, (int, long, float)):
id = int(start) - 1 if start and start.isdigit() else ""
else:
if start == 1:
self._pagination = {}
if start <= 10000:
id = (query, kwargs.get("geo"), kwargs.get("date"), int(start)-1, count)
id = self._pagination.get(id, "")
else:
id = int(start) - 1
# 1) Construct request URL.
url = URL(TWITTER + "search/tweets.json?", method=GET)
url.query = {
"q": query,
"max_id": id,
"count": min(count, 100)
}
# 2) Restrict location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius "10km".
if "geo" in kwargs:
url.query["geocode"] = ",".join((map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 3) Restrict most recent with date="YYYY-MM-DD".
# Only older tweets are returned.
if "date" in kwargs:
url.query["until"] = kwargs.pop("date")
# 4) Restrict language.
url.query["lang"] = self.language or ""
# 5) Authenticate.
url = self._authenticate(url)
# 6) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
except HTTP429TooMayRequests:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("statuses", []):
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("user", {}).get("profile_image_url")) # Profile picture URL.
r.language = self.format(x.get("metadata", {}).get("iso_language_code"))
results.append(r)
# Twitter.search(start=id, count=10) takes a tweet.id,
# and returns 10 results that are older than this id.
# In the past, start took an int used for classic pagination.
# However, new tweets may arrive quickly,
# so that by the time Twitter.search(start=2) is called,
# it will yield results from page 1 (or even newer results).
# For backward compatibility, we keep page cache,
# that remembers the last id for a "page" for a given query,
# when called in a loop.
#
# Store the last id retrieved.
# If search() is called again with start+1, start from this id.
if isinstance(start, (int, long, float)):
k = (query, kwargs.get("geo"), kwargs.get("date"), int(start), count)
if results:
self._pagination[k] = str(int(results[-1].id) - 1)
else:
self._pagination[k] = id
return results
def profile(self, query, start=1, count=10, **kwargs):
""" For the given author id, alias or search query,
returns a list of (id, handle, name, description, location, picture, tweets)-tuple.
"""
# 1) Construct request URL.
url = URL(TWITTER + "users/search.json?", method=GET, query={
"q": query,
"page": start,
"count": count
})
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [(
u(x.get("id_str", "")),
u(x.get("screen_name", "")),
u(x.get("name", "")),
u(x.get("description", "")),
u(x.get("location", "")),
u(x.get("profile_image_url", "")),
u(x.get("statuses_count", ""))) for x in data]
def trends(self, **kwargs):
""" Returns a list with 10 trending topics on Twitter.
"""
# 1) Construct request URL.
url = URL("https://api.twitter.com/1.1/trends/place.json?id=1")
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [u(x.get("name")) for x in data[0].get("trends", [])]
def stream(self, query, **kwargs):
""" Returns a live stream of Result objects for the given query.
"""
url = URL(TWITTER_STREAM)
url.query["track"] = query
url = self._authenticate(url)
return TwitterStream(url, delimiter="\n", format=self.format, **kwargs)
class TwitterStream(Stream):
def __init__(self, socket, delimiter="\n", format=lambda s: s, **kwargs):
kwargs.setdefault("timeout", 30)
Stream.__init__(self, socket, delimiter, **kwargs)
self.format = format
def parse(self, data):
""" TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively.
"""
if data.strip():
x = json.loads(data)
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("user", {}).get("profile_image_url"))
r.language = self.format(x.get("metadata", {}).get("iso_language_code"))
return r
def author(name):
""" Returns a Twitter query-by-author-name that can be passed to Twitter.search().
For example: Twitter().search(author("tom_de_smedt"))
"""
return "from:%s" % name
def hashtags(string):
""" Returns a list of hashtags (words starting with a #hash) from a tweet.
"""
return [b for a, b in TWITTER_HASHTAG.findall(string)]
def retweets(string):
""" Returns a list of retweets (words starting with a RT @author) from a tweet.
"""
return [b for a, b in TWITTER_RETWEET.findall(string)]
#engine = Twitter()
#for i in range(2):
# for tweet in engine.search("cat nap", cached=False, start=i+1, count=10):
# print
# print tweet.id
# print tweet.url
# print tweet.text
# print tweet.author
# print tweet.profile
# print tweet.language
# print tweet.date
# print hashtags(tweet.text)
# print retweets(tweet.text)
#stream = Twitter().stream("cat")
#for i in range(10):
# print i
# stream.update()
# for tweet in reversed(stream):
# print tweet.id
# print tweet.text
# print tweet.url
# print tweet.language
# print
#stream.clear()
#--- MEDIAWIKI -------------------------------------------------------------------------------------
# MediaWiki is a free wiki software application.
# MediaWiki powers popular websites such as Wikipedia, Wiktionary and Wikia.
# http://www.mediawiki.org/wiki/API:Main_page
# http://en.wikipedia.org/w/api.php
WIKIA = "http://wikia.com"
WIKIPEDIA = "http://wikipedia.com"
WIKIPEDIA_LICENSE = api.license["Wikipedia"]
MEDIAWIKI_LICENSE = None
MEDIAWIKI = "http://{SUBDOMAIN}.{DOMAIN}{API}"
# Pattern for meta links (e.g. Special:RecentChanges).
# http://en.wikipedia.org/wiki/Main_namespace
MEDIAWIKI_NAMESPACE = ["Main", "User", "Wikipedia", "File", "MediaWiki", "Template", "Help", "Category", "Portal", "Book"]
MEDIAWIKI_NAMESPACE += [s+" talk" for s in MEDIAWIKI_NAMESPACE] + ["Talk", "Special", "Media"]
MEDIAWIKI_NAMESPACE += ["WP", "WT", "MOS", "C", "CAT", "Cat", "P", "T", "H", "MP", "MoS", "Mos"]
_mediawiki_namespace = re.compile(r"^("+"|".join(MEDIAWIKI_NAMESPACE)+"):", re.I)
# Pattern to identify disambiguation pages.
MEDIAWIKI_DISAMBIGUATION = "<a href=\"/wiki/Help:Disambiguation\" title=\"Help:Disambiguation\">disambiguation</a> page"
# Pattern to identify references, e.g. [12]
MEDIAWIKI_REFERENCE = r"\s*\[[0-9]{1,3}\]"
# Mediawiki.search(type=ALL).
ALL = "all"
class MediaWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language="en"):
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
@property
def _url(self):
# Must be overridden in a subclass; see Wikia and Wikipedia.
return None
@property
def MediaWikiArticle(self):
return MediaWikiArticle
@property
def MediaWikiSection(self):
return MediaWikiSection
@property
def MediaWikiTable(self):
return MediaWikiTable
def __iter__(self):
return self.articles()
def articles(self, **kwargs):
""" Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.index(), MediaWiki.search() and URL.download().
"""
for title in self.index(**kwargs):
yield self.search(title, **kwargs)
# Backwards compatibility.
all = articles
def index(self, namespace=0, start=None, count=100, cached=True, **kwargs):
""" Returns an iterator over all article titles (for a given namespace id).
"""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration
# Backwards compatibility.
list = index
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" With type=SEARCH, returns a MediaWikiArticle for the given query (case-sensitive).
With type=ALL, returns a list of results.
Each result.title is the title of an article that contains the given query.
"""
if type not in (SEARCH, ALL, "*"):
raise SearchEngineTypeError
if type == SEARCH: # Backwards compatibility.
return self.article(query, cached=cached, **kwargs)
if not query or start < 1 or count < 1:
return Results(self._url, query, type)
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "query",
"list": "search",
"srsearch": query,
"sroffset": (start - 1) * count,
"srlimit": min(count, 100),
"srprop": "snippet",
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("query", {})
results = Results(self._url, query, type)
results.total = int(data.get("searchinfo", {}).get("totalhits", 0))
for x in data.get("search", []):
u = "http://%s/wiki/%s" % (URL(self._url).domain, x.get("title").replace(" ", "_"))
r = Result(url=u)
r.id = self.format(x.get("title"))
r.title = self.format(x.get("title"))
r.text = plaintext(self.format(x.get("snippet")))
results.append(r)
return results
def article(self, query, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ", "_"),
"redirects": 1,
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("timeout", 30) # Parsing the article takes some time.
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a
def _parse_article(self, data, **kwargs):
return self.MediaWikiArticle(
title = plaintext(data.get("displaytitle", data.get("title", ""))),
source = data.get("text", {}).get("*", ""),
disambiguation = data.get("text", {}).get("*", "").find(MEDIAWIKI_DISAMBIGUATION) >= 0,
links = [x["*"] for x in data.get("links", []) if not _mediawiki_namespace.match(x["*"])],
categories = [x["*"] for x in data.get("categories", [])],
external = [x for x in data.get("externallinks", [])],
media = [x for x in data.get("images", [])],
redirects = [x for x in data.get("redirects", [])],
languages = dict([(x["lang"], x["*"]) for x in data.get("langlinks", [])]),
language = self.language,
parser = self, **kwargs)
def _parse_article_sections(self, article, data):
# If "References" is a section in the article,
# the HTML will contain a marker <h*><span class="mw-headline" id="References">.
# http://en.wikipedia.org/wiki/Section_editing
t = article.title
d = 0
i = 0
for x in data.get("sections", {}):
a = x.get("anchor")
if a:
p = r"<h.>\s*.*?\s*<span class=\"mw-headline\" id=\"%s\">" % a
p = re.compile(p)
m = p.search(article.source, i)
if m:
j = m.start()
t = plaintext(t)
article.sections.append(self.MediaWikiSection(article,
title = t,
start = i,
stop = j,
level = d))
t = plaintext(x.get("line", ""))
d = int(x.get("level", 2)) - 1
i = j
return article
def _parse_article_section_structure(self, article):
# Sections with higher level are children of previous sections with lower level.
for i, s2 in enumerate(article.sections):
for s1 in reversed(article.sections[:i]):
if s1.level < s2.level:
s2.parent = s1
s1.children.append(s2)
break
return article
class MediaWikiArticle(object):
def __init__(self, title=u"", source=u"", links=[], categories=[], languages={}, disambiguation=False, **kwargs):
""" A MediaWiki article returned from MediaWiki.search().
MediaWikiArticle.string contains the HTML content.
"""
self.title = title # Article title.
self.source = source # Article HTML content.
self.sections = [] # Article sections.
self.links = links # List of titles of linked articles.
self.categories = categories # List of categories. As links, prepend "Category:".
self.external = [] # List of external links.
self.media = [] # List of linked media (images, sounds, ...)
self.disambiguation = disambiguation # True when the article is a disambiguation page.
self.languages = languages # Dictionary of (language, article)-items, e.g. Cat => ("nl", "Kat")
self.language = kwargs.get("language", "en")
self.redirects = kwargs.get("redirects", [])
self.parser = kwargs.get("parser", MediaWiki())
for k, v in kwargs.items():
setattr(self, k, v)
def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
# Strip meta <table> elements.
s = strip_element(s, "table", "id=\"toc") # Table of contents.
s = strip_element(s, "table", "class=\"infobox") # Infobox.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_element(s, "table", "class=\"mbox") # Message.
s = strip_element(s, "table", "class=\"metadata") # Metadata.
s = strip_element(s, "table", "class=\".*?wikitable") # Table.
s = strip_element(s, "table", "class=\"toc") # Table (usually footer).
# Strip meta <div> elements.
s = strip_element(s, "div", "id=\"toc") # Table of contents.
s = strip_element(s, "div", "class=\"infobox") # Infobox.
s = strip_element(s, "div", "class=\"navbox") # Navbox.
s = strip_element(s, "div", "class=\"mbox") # Message.
s = strip_element(s, "div", "class=\"metadata") # Metadata.
s = strip_element(s, "div", "id=\"annotation") # Annotations.
s = strip_element(s, "div", "class=\"dablink") # Disambiguation message.
s = strip_element(s, "div", "class=\"magnify") # Thumbnails.
s = strip_element(s, "div", "class=\"thumb ") # Thumbnail captions.
s = strip_element(s, "div", "class=\"barbox") # Bar charts.
s = strip_element(s, "div", "class=\"noprint") # Hidden from print.
s = strip_element(s, "sup", "class=\"noprint")
# Strip absolute elements (don't know their position).
s = strip_element(s, "div", "style=\"position:absolute")
# Strip meta <span> elements.
s = strip_element(s, "span", "class=\"error")
# Strip math formulas, add [math] placeholder.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s) # LaTex math images.
s = plaintext(s, **kwargs)
# Strip [edit] link (language dependent.)
s = re.sub(r"\[edit\]\s*", "", s)
s = re.sub(r"\[%s\]\s*" % {
"en": "edit",
"es": u"editar código",
"de": "Bearbeiten",
"fr": "modifier le code",
"it": "modifica sorgente",
"nl": "bewerken",
}.get(self.language, "edit"), "", s)
# Insert space before inline references.
s = s.replace("[", " [").replace(" [", " [")
# Strip inline references.
#s = re.sub(r" \[[0-9]+\]", "", s)
return s
def plaintext(self, **kwargs):
return self._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiArticle(title=%s)" % repr(self.title)
class MediaWikiSection(object):
def __init__(self, article, title=u"", start=0, stop=0, level=1):
""" A (nested) section in the content of a MediaWikiArticle.
"""
self.article = article # MediaWikiArticle the section is part of.
self.parent = None # MediaWikiSection the section is part of.
self.children = [] # MediaWikiSections belonging to this section.
self.title = title # Section title.
self._start = start # Section start index in MediaWikiArticle.string.
self._stop = stop # Section stop index in MediaWikiArticle.string.
self._level = level # Section depth (main title + intro = level 0).
self._links = None
self._tables = None
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def source(self):
return self.article.source[self._start:self._stop]
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
@property
def content(self):
# ArticleSection.string, minus the title.
s = self.plaintext()
t = plaintext(self.title)
if s == t or (len(s) > len(t)) and s.startswith(t) and s[len(t)] not in (",", " "):
return s[len(t):].lstrip()
return s
@property
def links(self, path="/wiki/"):
""" Yields a list of Wikipedia links in this section. Similar
in functionality to MediaWikiArticle.links.
"""
if self._links is None:
a = HTMLLinkParser().parse(self.source)
a = (decode_url(a.url) for a in a)
a = (a[len(path):].replace("_", " ") for a in a if a.startswith(path))
a = (a for a in a if not _mediawiki_namespace.match(a))
self._links = sorted(set(a))
return self._links
@property
def tables(self):
""" Yields a list of MediaWikiTable objects in the section.
"""
if self._tables is None:
self._tables = []
for style in ("wikitable", "sortable wikitable"):
b = "<table class=\"%s\"" % style, "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title = p((f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source = b[0] + s + b[1])
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update MediaWikiTable.headers.
for i, row in enumerate(f(r"<tr", "</tr>", s)):
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (((f(r'colspan="', r'"', v)+[1])[0], v[v.find(">")+1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []; [[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables
@property
def level(self):
return self._level
depth = level
def __repr__(self):
return "MediaWikiSection(title=%s)" % repr(self.title)
class MediaWikiTable(object):
def __init__(self, section, title=u"", headers=[], rows=[], source=u""):
""" A <table class="wikitable> in a MediaWikiSection.
"""
self.section = section # MediaWikiSection the table is part of.
self.source = source # Table HTML.
self.title = title # Table title.
self.headers = headers # List of table headers.
self.rows = rows # List of table rows, each a list of cells.
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiTable(title=%s)" % repr(self.title)
#--- MEDIAWIKI: WIKIPEDIA --------------------------------------------------------------------------
# Wikipedia is a collaboratively edited, multilingual, free Internet encyclopedia.
# Wikipedia depends on MediaWiki.
class Wikipedia(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wikipedia.org.
"""
SearchEngine.__init__(self, license or WIKIPEDIA_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikipedia.org")
s = s.replace("{API}", "/w/api.php")
return s
@property
def MediaWikiArticle(self):
return WikipediaArticle
@property
def MediaWikiSection(self):
return WikipediaSection
@property
def MediaWikiTable(self):
return WikipediaTable
class WikipediaArticle(MediaWikiArticle):
def download(self, media, **kwargs):
""" Downloads an item from MediaWikiArticle.media and returns the content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (self.__dict__.get("language", "en"), media)
if url not in cache:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL("http://" + data.group(0)).download(**kwargs) or None
return data
def __repr__(self):
return "WikipediaArticle(title=%s)" % repr(self.title)
class WikipediaSection(MediaWikiSection):
def __repr__(self):
return "WikipediaSection(title=%s)" % repr(self.title)
class WikipediaTable(MediaWikiTable):
def __repr__(self):
return "WikipediaTable(title=%s)" % repr(self.title)
#article = Wikipedia().search("cat")
#for section in article.sections:
# print " "*(section.level-1) + section.title
#if article.media:
# data = article.download(article.media[2])
# f = open(article.media[2], "w")
# f.write(data)
# f.close()
#
#article = Wikipedia(language="nl").search("borrelnootje")
#print article.string
#for result in Wikipedia().search("\"cat's\"", type="*"):
# print result.title
# print result.text
# print
#--- MEDIAWIKI: WIKTIONARY -------------------------------------------------------------------------
# Wiktionary is a collaborative project to produce a free-content multilingual dictionary.
class Wiktionary(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wiktionary.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wiktionary.org")
s = s.replace("{API}", "/w/api.php")
return s
@property
def MediaWikiArticle(self):
return WiktionaryArticle
@property
def MediaWikiSection(self):
return WiktionarySection
@property
def MediaWikiTable(self):
return WiktionaryTable
class WiktionaryArticle(MediaWikiArticle):
def __repr__(self):
return "WiktionaryArticle(title=%s)" % repr(self.title)
class WiktionarySection(MediaWikiSection):
def __repr__(self):
return "WiktionarySection(title=%s)" % repr(self.title)
class WiktionaryTable(MediaWikiTable):
def __repr__(self):
return "WiktionaryTable(title=%s)" % repr(self.title)
#--- MEDIAWIKI: WIKIA ------------------------------------------------------------------------------
# Wikia (formerly Wikicities) is a free web hosting service and a wiki farm for wikis.
# Wikia hosts several hundred thousand wikis using MediaWiki.
# Author: Robert Elwell (2012)
class Wikia(MediaWiki):
def __init__(self, domain="www", license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[domain].wikia.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = domain
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikia.com")
s = s.replace("{API}", '/api.php')
return s
@property
def MediaWikiArticle(self):
return WikiaArticle
@property
def MediaWikiSection(self):
return WikiaSection
@property
def MediaWikiTable(self):
return WikiaTable
def articles(self, **kwargs):
if kwargs.pop("batch", True):
# We can take advantage of Wikia's search API to reduce bandwith.
# Instead of executing a query to retrieve each article,
# we query for a batch of (10) articles.
iterator = self.index(_id="pageid", **kwargs)
while True:
batch, done = [], False
try:
for i in range(10): batch.append(iterator.next())
except StopIteration:
done = True # No more articles, finish batch and raise StopIteration.
url = URL(self._url.replace("api.php", "wikia.php"), method=GET, query={
"controller": "WikiaSearch",
"method": "getPages",
"ids": '|'.join(str(id) for id in batch),
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("cached", True)
kwargs["timeout"] = 10 * (1 + len(batch))
data = url.download(**kwargs)
data = json.loads(data)
for x in (data or {}).get("pages", {}).values():
yield WikiaArticle(title=x.get("title", ""), source=x.get("html", ""))
if done:
raise StopIteration
for title in self.index(**kwargs):
yield self.search(title, **kwargs)
class WikiaArticle(MediaWikiArticle):
def __repr__(self):
return "WikiaArticle(title=%s)" % repr(self.title)
class WikiaSection(MediaWikiSection):
def __repr__(self):
return "WikiaSection(title=%s)" % repr(self.title)
class WikiaTable(MediaWikiTable):
def __repr__(self):
return "WikiaTable(title=%s)" % repr(self.title)
#--- DBPEDIA --------------------------------------------------------------------------------------------------
# DBPedia is a database of structured information mined from Wikipedia.
# DBPedia data is stored as RDF triples: (subject, predicate, object),
# e.g., X is-a Actor, Y is-a Country, Z has-birthplace Country, ...
# DBPedia can be queried using SPARQL:
# http://www.w3.org/TR/rdf-sparql-query/
# A SPARQL query yields rows that match all triples in the WHERE clause.
# A SPARQL query uses ?wildcards in triple subject/object to select fields.
# For example:
# > PREFIX dbo: <http://dbpedia.org/ontology/>
# > SELECT ?actor ?place
# > WHERE {
# > ?actor a dbo:Actor; dbo:birthPlace ?place.
# > ?place a dbo:Country.
# > }
#
# - Each row in the results has an "actor" and a "place" field.
# - The actor is of the class "Actor".
# - The place is of the class "Country".
# - Only actors for which a place of birth is known are retrieved.
#
# The fields are RDF resources, e.g.:
# http://dbpedia.org/resource/Australia
# Author: Kenneth Koch (2013) <kkoch986@gmail.com>
DBPEDIA = "http://dbpedia.org/sparql?"
SPARQL = "sparql"
class DBPediaQueryError(HTTP400BadRequest):
pass
class DBPediaResource(unicode):
@property
def name(self):
# http://dbpedia.org/resource/Australia => Australia
s = re.sub("^http://dbpedia.org/resource/", "", self)
s = s.replace("_", " ")
s = encode_utf8(s)
s = decode_url(s)
s = decode_utf8(s)
return s
class DBPedia(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=SPARQL, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from DBPedia for the given SPARQL query.
- type : SPARQL,
- start: no maximum,
- count: maximum 1000,
There is a limit of 10 requests/second.
Maximum query execution time is 120 seconds.
"""
if type not in (SPARQL,):
raise SearchEngineTypeError
if not query or count < 1 or start < 1:
return Results(DBPEDIA, query, type)
# 1) Construct request URL.
url = URL(DBPEDIA, method=GET)
url.query = {
"format": "json",
"query": "%s OFFSET %s LIMIT %s" % (query,
(start-1) * min(count, 1000),
(start-0) * min(count, 1000)
)
}
# 2) Parse JSON response.
try:
data = URL(url).download(cached=cached, timeout=30, **kwargs)
data = json.loads(data)
except HTTP400BadRequest, e:
raise DBPediaQueryError, e.src.read().splitlines()[0]
except HTTP403Forbidden:
raise SearchEngineLimitError
results = Results(DBPEDIA, url.query, type)
results.total = None
for x in data["results"]["bindings"]:
r = Result(url=None)
for k in data["head"]["vars"]:
t1 = x[k].get("type", "literal") # uri | literal | typed-literal
t2 = x[k].get("datatype", "?") # http://www.w3.org/2001/XMLSchema#float | int | date
v = x[k].get("value")
v = self.format(v)
if t1 == "uri":
v = DBPediaResource(v)
if t2.endswith("float"):
v = float(v)
if t2.endswith("int"):
v = int(v)
dict.__setitem__(r, k, v)
results.append(r)
return results
#--- FLICKR ----------------------------------------------------------------------------------------
# Flickr is a popular image hosting and video hosting website.
# http://www.flickr.com/services/api/
FLICKR = "http://api.flickr.com/services/rest/"
FLICKR_LICENSE = api.license["Flickr"]
INTERESTING = "interesting"
class Flickr(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or FLICKR_LICENSE, throttle, language)
def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500/count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR+"?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": { RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc" }.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results
class FlickrResult(Result):
@property
def url(self):
# Retrieving the url of a FlickrResult (i.e. image location) requires another query.
# Note: the "Original" size no longer appears in the response,
# so Flickr might not like it if we download it.
url = FLICKR + "?method=flickr.photos.getSizes&photo_id=%s&api_key=%s" % (self._id, self._license)
data = URL(url).download(throttle=self._throttle, unicode=True)
data = xml.dom.minidom.parseString(bytestring(data))
size = { TINY: "Thumbnail",
SMALL: "Small",
MEDIUM: "Medium",
LARGE: "Original" }.get(self._size, "Medium")
for x in data.getElementsByTagName("size"):
if size == x.getAttribute("label"):
return x.getAttribute("source")
if size == "Original":
url = x.getAttribute("source")
url = url[:-len(extension(url))-2] + "_o" + extension(url)
return u(url)
#images = Flickr().search("kitten", count=10, size=SMALL)
#for img in images:
# print bytestring(img.description)
# print img.url
#
#data = img.download()
#f = open("kitten"+extension(img.url), "wb")
#f.write(data)
#f.close()
#--- FACEBOOK --------------------------------------------------------------------------------------
# Facebook is a popular online social networking service.
# https://developers.facebook.com/docs/reference/api/
FACEBOOK = "https://graph.facebook.com/"
FACEBOOK_LICENSE = api.license["Facebook"]
FEED = "feed" # Facebook timeline.
COMMENTS = "comments" # Facebook comments (for a given news feed post).
LIKES = "likes" # Facebook likes (for a given post or comment).
FRIENDS = "friends" # Facebook friends (for a given profile id).
class FacebookResult(Result):
def __repr__(self):
return "Result(id=%s)" % repr(self.id)
class Facebook(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license or FACEBOOK_LICENSE, throttle, language)
@property
def _token(self):
# Yields the "application access token" (stored in api.license["Facebook"]).
# With this license, we can view public content.
# To view more information, we need a "user access token" as license key.
# This token can be retrieved manually from:
# http://www.clips.ua.ac.be/pattern-facebook
# Or parsed from this URL:
# https://graph.facebook.com/oauth/authorize?type=user_agent
# &client_id=332061826907464
# &redirect_uri=http://www.clips.ua.ac.be/pattern-facebook
# &scope=read_stream,user_birthday,user_likes,user_photos,friends_birthday,friends_likes
# The token is valid for a limited duration.
return URL(FACEBOOK + "oauth/access_token?", query={
"grant_type": "client_credentials",
"client_id": "332061826907464",
"client_secret": "81ff4204e73ecafcd87635a3a3683fbe"
}).download().split("=")[1]
def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
""" Returns a list of results from Facebook public status updates for the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# Facebook.search(type=FRIENDS) returns authors for the given author.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max)
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + (u(query) or "me").replace(FACEBOOK, "") + "/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max),
})
if type in (SEARCH, NEWS, FEED):
url.query["fields"] = ",".join((
"id", "from", "name", "story", "message", "link", "picture", "created_time",
"comments.limit(1).summary(true)",
"likes.limit(1).summary(true)"
))
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(x.get("story", x.get("message", x.get("name"))))
r.date = self.format(x.get("created_time"))
# Store likes & comments count as int, author as (id, name)-tuple
# (by default Result will store everything as Unicode strings).
s = lambda r, k, v: dict.__setitem__(r, k, v)
s(r, "likes", \
self.format(x.get("like_count", x.get("likes", {}).get("summary", {}).get("total_count", 0))) + 0)
s(r, "comments", \
self.format(x.get("comments", {}).get("summary", {}).get("total_count", 0)) + 0)
s(r, "author", (
u(self.format(x.get("from", {}).get("id", ""))),
u(self.format(x.get("from", {}).get("name", "")))))
# Set Result.text to author name for likes.
if type in (LIKES, FRIENDS):
s(r, "author", (
u(self.format(x.get("id", ""))),
u(self.format(x.get("name", "")))))
r.text = \
self.format(x.get("name"))
# Set Result.url to full-size image.
if re.match(r"^http(s?)://www\.facebook\.com/photo", r.url) is not None:
r.url = x.get("picture", "").replace("_s", "_b") or r.url
# Set Result.title to object id.
if re.match(r"^http(s?)://www\.facebook\.com/", r.url) is not None:
r.title = r.url.split("/")[-1].split("?")[0]
results.append(r)
return results
def profile(self, id=None, **kwargs):
""" For the given author id or alias,
returns a (id, name, date of birth, gender, locale, likes)-tuple.
"""
# 1) Construct request URL.
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# 2) Parse JSON response.
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return (
u(data.get("id", "")),
u(data.get("name", "")),
u(data.get("birthday", "")),
u(data.get("gender", "")[:1]),
u(data.get("locale", "")),
int(data.get("likes", 0)) # For pages.
)
page = profile
#--- PRODUCT REVIEWS -------------------------------------------------------------------------------
# ProductWiki is an open web-based product information resource.
# http://connect.productwiki.com/connect-api/
PRODUCTWIKI = "http://api.productwiki.com/connect/api.aspx"
PRODUCTWIKI_LICENSE = api.license["ProductWiki"]
class ProductWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or PRODUCTWIKI_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Productwiki for the given query.
Each Result.reviews is a list of (review, score)-items.
- type : SEARCH,
- start: maximum undefined,
- count: 20,
- sort : RELEVANCY.
There is no daily limit.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(PRODUCTWIKI, query, type)
# 1) Construct request URL.
url = PRODUCTWIKI+"?"
url = URL(url, method=GET, query={
"key": self.license or "",
"q": query,
"page": start,
"op": "search",
"fields": "proscons", # "description,proscons" is heavy.
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = URL(url).download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(PRODUCTWIKI, query, type)
results.total = None
for x in data.get("products", [])[:count]:
r = Result(url=None)
r.__dict__["title"] = u(x.get("title"))
r.__dict__["text"] = u(x.get("text"))
r.__dict__["reviews"] = []
reviews = x.get("community_review") or {}
for p in reviews.get("pros", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or +1))
for p in reviews.get("cons", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or -1))
r.__dict__["score"] = int(sum(score for review, score in r.reviews))
results.append(r)
# Highest score first.
results.sort(key=lambda r: r.score, reverse=True)
return results
# Backwards compatibility.
Products = ProductWiki
#for r in ProductWiki().search("tablet"):
# print r.title
# print r.score
# print r.reviews
# print
#--- NEWS FEED -------------------------------------------------------------------------------------
# Based on the Universal Feed Parser by Mark Pilgrim:
# http://www.feedparser.org/
class Newsfeed(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
""" Returns a list of results from the given RSS or Atom newsfeed URL.
"""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value") for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and \
x.get("content")[0].get("language") or \
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) => Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results
feeds = {
"Nature": "http://feeds.nature.com/nature/rss/current",
"Science": "http://www.sciencemag.org/rss/podcast.xml",
"Herald Tribune": "http://www.iht.com/rss/frontpage.xml",
"TIME": "http://feeds.feedburner.com/time/topstories",
"CNN": "http://rss.cnn.com/rss/edition.rss",
}
#for r in Newsfeed().search(feeds["Nature"]):
# print r.title
# print r.author
# print r.url
# print plaintext(r.text)
# print
#--- QUERY -----------------------------------------------------------------------------------------
def query(string, service=GOOGLE, **kwargs):
""" Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (DUCKDUCKGO, "duckduckgo", "ddg"):
engine = DuckDuckGo
if service in (TWITTER, "twitter", "tw"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (DBPEDIA, "dbpedia", "dbp"):
engine = DBPedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError, "unknown search engine '%s'" % service
#--- WEB SORT --------------------------------------------------------------------------------------
SERVICES = {
GOOGLE : Google,
YAHOO : Yahoo,
BING : Bing,
TWITTER : Twitter,
WIKIPEDIA : Wikipedia,
WIKIA : Wikia,
FLICKR : Flickr,
FACEBOOK : Facebook
}
def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, prefix=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = prefix and (context + " " + word) or (word + " " + context)
q.strip()
q = strict and "\"%s\"" % q or q
t = service in (WIKIPEDIA, WIKIA) and "*" or SEARCH
r = service.search(q, type=t, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1)/s, r.query) for r in R]
R = sorted(R, reverse=kwargs.pop("reverse", True))
return R
#print sort(["black", "happy"], "darth vader", GOOGLE)
#### DOCUMENT OBJECT MODEL #########################################################################
# The Document Object Model (DOM) is a cross-platform and language-independent convention
# for representing and interacting with objects in HTML, XHTML and XML documents.
# The pattern.web DOM can be used to traverse HTML source code as a tree of nested elements.
# The pattern.web DOM is based on Beautiful Soup.
# Beautiful Soup is wrapped in DOM, Element and Text classes, resembling the Javascript DOM.
# Beautiful Soup can also be used directly, since it is imported here.
# L. Richardson (2004), http://www.crummy.com/software/BeautifulSoup/
SOUP = (
BeautifulSoup.BeautifulSoup,
BeautifulSoup.Tag,
BeautifulSoup.NavigableString,
BeautifulSoup.Comment
)
NODE, TEXT, COMMENT, ELEMENT, DOCUMENT = \
"node", "text", "comment", "element", "document"
#--- NODE ------------------------------------------------------------------------------------------
class Node(object):
def __init__(self, html, type=NODE, **kwargs):
""" The base class for Text, Comment and Element.
All DOM nodes can be navigated in the same way (e.g. Node.parent, Node.children, ...)
"""
self.type = type
self._p = not isinstance(html, SOUP) and BeautifulSoup.BeautifulSoup(u(html), **kwargs) or html
@property
def _beautifulSoup(self):
# If you must, access the BeautifulSoup object with Node._beautifulSoup.
return self._p
def __eq__(self, other):
# Two Node objects containing the same BeautifulSoup object, are the same.
return isinstance(other, Node) and hash(self._p) == hash(other._p)
def _wrap(self, x):
# Navigating to other nodes yields either Text, Element or None.
if isinstance(x, BeautifulSoup.Comment):
return Comment(x)
if isinstance(x, BeautifulSoup.Declaration):
return Text(x)
if isinstance(x, BeautifulSoup.NavigableString):
return Text(x)
if isinstance(x, BeautifulSoup.Tag):
return Element(x)
@property
def parent(self):
return self._wrap(self._p.parent)
@property
def children(self):
return hasattr(self._p, "contents") and [self._wrap(x) for x in self._p.contents] or []
@property
def html(self):
return self.__unicode__()
@property
def source(self):
return self.__unicode__()
@property
def next_sibling(self):
return self._wrap(self._p.nextSibling)
@property
def previous_sibling(self):
return self._wrap(self._p.previousSibling)
next, previous = next_sibling, previous_sibling
def traverse(self, visit=lambda node: None):
""" Executes the visit function on this node and each of its child nodes.
"""
visit(self); [node.traverse(visit) for node in self.children]
def __nonzero__(self):
return True
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "Node(type=%s)" % repr(self.type)
def __str__(self):
return bytestring(self.__unicode__())
def __unicode__(self):
return u(self._p)
#--- TEXT ------------------------------------------------------------------------------------------
class Text(Node):
""" Text represents a chunk of text without formatting in a HTML document.
For example: "the <b>cat</b>" is parsed to [Text("the"), Element("cat")].
"""
def __init__(self, string):
Node.__init__(self, string, type=TEXT)
def __repr__(self):
return "Text(%s)" % repr(self._p)
class Comment(Text):
""" Comment represents a comment in the HTML source code.
For example: "<!-- comment -->".
"""
def __init__(self, string):
Node.__init__(self, string, type=COMMENT)
def __repr__(self):
return "Comment(%s)" % repr(self._p)
#--- ELEMENT ---------------------------------------------------------------------------------------
class Element(Node):
def __init__(self, html):
""" Element represents an element or tag in the HTML source code.
For example: "<b>hello</b>" is a "b"-Element containing a child Text("hello").
"""
Node.__init__(self, html, type=ELEMENT)
@property
def tagname(self):
return self._p.name
tag = tagName = tagname
@property
def attributes(self):
if "_attributes" not in self.__dict__:
self._attributes = self._p._getAttrMap()
return self._attributes
attr = attrs = attributes
@property
def id(self):
return self.attributes.get("id")
@property
def content(self):
""" Yields the element content as a unicode string.
"""
return u"".join([u(x) for x in self._p.contents])
string = content
@property
def source(self):
""" Yields the HTML source as a unicode string (tag + content).
"""
return u(self._p)
html = source
def get_elements_by_tagname(self, v):
""" Returns a list of nested Elements with the given tag name.
The tag name can include a class (e.g. div.header) or an id (e.g. div#content).
"""
if isinstance(v, basestring) and "#" in v:
v1, v2 = v.split("#")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, id=v2)]
if isinstance(v, basestring) and "." in v:
v1, v2 = v.split(".")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, v2)]
return [Element(x) for x in self._p.findAll(v in ("*","") or v.lower())]
by_tag = getElementsByTagname = get_elements_by_tagname
def get_element_by_id(self, v):
""" Returns the first nested Element with the given id attribute value.
"""
return ([Element(x) for x in self._p.findAll(id=v, limit=1) or []]+[None])[0]
by_id = getElementById = get_element_by_id
def get_elements_by_classname(self, v):
""" Returns a list of nested Elements with the given class attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, v))]
by_class = getElementsByClassname = get_elements_by_classname
def get_elements_by_attribute(self, **kwargs):
""" Returns a list of nested Elements with the given attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, attrs=kwargs))]
by_attribute = by_attr = getElementsByAttribute = get_elements_by_attribute
def __call__(self, selector):
""" Returns a list of nested Elements that match the given CSS selector.
For example: Element("div#main p.comment a:first-child") matches:
"""
return SelectorChain(selector).search(self)
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
if k in self.attributes:
return self.attributes[k]
raise AttributeError, "'Element' object has no attribute '%s'" % k
def __contains__(self, v):
if isinstance(v, Element):
v = v.content
return v in self.content
def __repr__(self):
return "Element(tag=%s)" % repr(self.tagname)
#--- DOCUMENT --------------------------------------------------------------------------------------
class Document(Element):
def __init__(self, html, **kwargs):
""" Document is the top-level element in the Document Object Model.
It contains nested Element, Text and Comment nodes.
"""
# Aliases for BeautifulSoup optional parameters:
kwargs["selfClosingTags"] = kwargs.pop("self_closing", kwargs.get("selfClosingTags"))
Node.__init__(self, u(html).strip(), type=DOCUMENT, **kwargs)
@property
def declaration(self):
""" Yields the <!doctype> declaration, as a TEXT Node or None.
"""
for child in self.children:
if isinstance(child._p, BeautifulSoup.Declaration):
return child
@property
def head(self):
return self._wrap(self._p.head)
@property
def body(self):
return self._wrap(self._p.body)
@property
def tagname(self):
return None
tag = tagname
def __repr__(self):
return "Document()"
DOM = Document
#article = Wikipedia().search("Document Object Model")
#dom = DOM(article.html)
#print dom.get_element_by_id("References").source
#print [element.attributes["href"] for element in dom.get_elements_by_tagname("a")]
#print dom.get_elements_by_tagname("p")[0].next.previous.children[0].parent.__class__
#print
#--- DOM CSS SELECTORS -----------------------------------------------------------------------------
# CSS selectors are pattern matching rules (or selectors) to select elements in the DOM.
# CSS selectors may range from simple element tag names to rich contextual patterns.
# http://www.w3.org/TR/CSS2/selector.html
# "*" = <div>, <p>, ... (all elements)
# "*#x" = <div id="x">, <p id="x">, ... (all elements with id="x")
# "div#x" = <div id="x"> (<div> elements with id="x")
# "div.x" = <div class="x"> (<div> elements with class="x")
# "div[class='x']" = <div class="x"> (<div> elements with attribute "class"="x")
# "div:first-child" = <div><a>1st<a><a></a></div> (first child inside a <div>)
# "div a" = <div><p><a></a></p></div> (all <a>'s inside a <div>)
# "div, a" = <div>, <a> (all <a>'s and <div> elements)
# "div + a" = <div></div><a></a> (all <a>'s directly preceded by <div>)
# "div > a" = <div><a></a></div> (all <a>'s directly inside a <div>)
# "div < a" (all <div>'s directly containing an <a>)
# Selectors are case-insensitive.
class Selector(object):
def __init__(self, s):
""" A simple CSS selector is a type (e.g., "p") or universal ("*") selector
followed by id selectors, attribute selectors, or pseudo-elements.
"""
self.string = s
s = s.strip()
s = s.lower()
s = s.startswith(("#", ".", ":")) and "*" + s or s
s = s.replace("#", " #") + " #" # #id
s = s.replace(".", " .") # .class
s = s.replace(":", " :") # :pseudo-element
s = s.replace("[", " [") # [attribute="value"]
s = s.split(" ")
self.tag, self.id, self.classes, self.pseudo, self.attributes = (
s[0],
[x[1:] for x in s if x[0] == "#"][0],
set([x[1:] for x in s if x[0] == "."]),
set([x[1:] for x in s if x[0] == ":"]),
dict(self._parse_attribute(x) for x in s if x[0] == "[")
)
def _parse_attribute(self, s):
""" Returns an (attribute, value)-tuple for the given attribute selector.
"""
s = s.strip("[]")
s = s.replace("'", "")
s = s.replace('"', "")
s = s.replace("<!space>", " ")
s = re.sub(r"(\~|\||\^|\$|\*)\=", "=\\1", s)
s = s.split("=") + [True]
s = s[:2]
if s[1] is not True and s[1].startswith(("~", "|", "^", "$", "*")):
p, s[1] = s[1][0], s[1][1:]
if p == "~": r = r"(^|\s)%s(\s|$)"
if p == "|": r = r"^%s(-|$)" # XXX doesn't work with spaces.
if p == "^": r = r"^%s"
if p == "$": r = r"%s$"
if p == "*": r = r"%s"
s[1] = re.compile(r % s[1], re.I)
return s[:2]
def _first_child(self, e):
""" Returns the first child Element of the given element.
"""
if isinstance(e, Node):
for e in e.children:
if isinstance(e, Element):
return e
def _first_sibling(self, e):
""" Returns the first next sibling Element of the given element.
"""
while isinstance(e, Node):
e = e.next
if isinstance(e, Element):
return e
def _contains(self, e, s):
""" Returns True if string s occurs in the given element (case-insensitive).
"""
s = re.sub(r"^contains\((.*?)\)$", "\\1", s)
s = re.sub(r"^[\"']|[\"']$", "", s)
return re.search(s.lower(), e.content.lower()) is not None
def match(self, e):
""" Returns True if the given element matches the simple CSS selector.
"""
if not isinstance(e, Element):
return False
if self.tag not in (e.tag, "*"):
return False
if self.id not in ((e.id or "").lower(), "", None):
return False
if self.classes.issubset(set(map(lambda s: s.lower(), e.attr.get("class", "").split()))) is False:
return False
if "first-child" in self.pseudo and self._first_child(e.parent) != e:
return False
if any(x.startswith("contains") and not self._contains(e, x) for x in self.pseudo):
return False # jQuery :contains("...") selector.
for k, v in self.attributes:
if k not in e.attrs or v not in (e.attrs[k].lower(), True):
return False
return True
def search(self, e):
""" Returns the nested elements that match the simple CSS selector.
"""
# Map tag to True if it is "*".
tag = self.tag == "*" or self.tag
# Map id into a case-insensitive **kwargs dict.
i = lambda s: re.compile(r"\b%s\b" % s, re.I)
a = {"id": i(self.id)} if self.id else {}
a.update(map(lambda (k, v): (k, i(v)), self.attributes.iteritems()))
# Match tag + id + all classes + relevant pseudo-elements.
if not isinstance(e, Element):
return []
if len(self.classes) == 0 or len(self.classes) >= 2:
e = map(Element, e._p.findAll(tag, **a))
if len(self.classes) == 1:
e = map(Element, e._p.findAll(tag, **dict(a, **{"class": i(list(self.classes)[0])})))
if len(self.classes) >= 2:
e = filter(lambda e: self.classes.issubset(set(e.attr.get("class", "").lower().split())), e)
if "first-child" in self.pseudo:
e = filter(lambda e: e == self._first_child(e.parent), e)
if any(x.startswith("contains") for x in self.pseudo):
e = filter(lambda e: all(not x.startswith("contains") or self._contains(e, x) for x in self.pseudo), e)
return e
def __repr__(self):
return "Selector(%s)" % repr(self.string)
class SelectorChain(list):
def __init__(self, s):
""" A selector is a chain of one or more simple selectors,
separated by combinators (e.g., ">").
"""
self.string = s
for s in s.split(","):
s = s.lower()
s = s.strip()
s = re.sub(r" +", " ", s)
s = re.sub(r" *\> *", " >", s)
s = re.sub(r" *\< *", " <", s)
s = re.sub(r" *\+ *", " +", s)
s = re.sub(r"\[.*?\]", lambda m: m.group(0).replace(" ", "<!space>"), s)
self.append([])
for s in s.split(" "):
if not s.startswith((">", "<", "+")):
self[-1].append((" ", Selector(s)))
elif s.startswith(">"):
self[-1].append((">", Selector(s[1:])))
elif s.startswith("<"):
self[-1].append(("<", Selector(s[1:])))
elif s.startswith("+"):
self[-1].append(("+", Selector(s[1:])))
def search(self, e):
""" Returns the nested elements that match the CSS selector chain.
"""
m, root = [], e
for chain in self:
e = [root]
for combinator, s in chain:
# Search Y, where:
if combinator == " ":
# X Y => X is ancestor of Y
e = map(s.search, e)
e = list(itertools.chain(*e))
if combinator == ">":
# X > Y => X is parent of Y
e = map(lambda e: filter(s.match, e.children), e)
e = list(itertools.chain(*e))
if combinator == "<":
# X < Y => X is child of Y
e = map(lambda e: e.parent, e)
e = filter(s.match, e)
if combinator == "+":
# X + Y => X directly precedes Y
e = map(s._first_sibling, e)
e = filter(s.match, e)
m.extend(e)
return m
#dom = DOM("""
#<html>
#<head></head>
#<body>
# <div id="#main">
# <span class="11 22 33">x</span>
# </div>
#</body>
#</hmtl>
#""")
#
#print dom("*[class='11']")
#print dom("*[class^='11']")
#print dom("*[class~='22']")
#print dom("*[class$='33']")
#print dom("*[class*='3']")
#### WEB CRAWLER ###################################################################################
# Tested with a crawl across 1,000 domains so far.
class Link(object):
def __init__(self, url, text="", relation="", referrer=""):
""" A hyperlink parsed from a HTML document, in the form:
<a href="url"", title="text", rel="relation">xxx</a>.
"""
self.url, self.text, self.relation, self.referrer = \
u(url), u(text), u(relation), u(referrer),
@property
def description(self):
return self.text
def __repr__(self):
return "Link(url=%s)" % repr(self.url)
# Used for sorting in Crawler.links:
def __eq__(self, link):
return self.url == link.url
def __ne__(self, link):
return self.url != link.url
def __lt__(self, link):
return self.url < link.url
def __gt__(self, link):
return self.url > link.url
class HTMLLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def parse(self, html, url=""):
""" Returns a list of Links parsed from the given HTML string.
"""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data
def handle_starttag(self, tag, attributes):
if tag == "a":
attributes = dict(attributes)
if "href" in attributes:
link = Link(url = attributes.get("href"),
text = attributes.get("title"),
relation = attributes.get("rel", ""),
referrer = self._url)
self._data.append(link)
def base(url):
""" Returns the URL domain name:
http://en.wikipedia.org/wiki/Web_crawler => en.wikipedia.org
"""
return urlparse.urlparse(url).netloc
def abs(url, base=None):
""" Returns the absolute URL:
../media + http://en.wikipedia.org/wiki/ => http://en.wikipedia.org/media
"""
if url.startswith("#") and not base is None and not base.endswith("/"):
if not re.search("[^/]/[^/]", base):
base += "/"
return urlparse.urljoin(base, url)
DEPTH = "depth"
BREADTH = "breadth"
FIFO = "fifo" # First In, First Out.
FILO = "filo" # First In, Last Out.
LIFO = "lifo" # Last In, First Out (= FILO).
class Crawler(object):
def __init__(self, links=[], domains=[], delay=20.0, parse=HTMLLinkParser().parse, sort=FIFO):
""" A crawler can be used to browse the web in an automated manner.
It visits the list of starting URLs, parses links from their content, visits those, etc.
- Links can be prioritized by overriding Crawler.priority().
- Links can be ignored by overriding Crawler.follow().
- Each visited link is passed to Crawler.visit(), which can be overridden.
"""
self.parse = parse
self.delay = delay # Delay between visits to the same (sub)domain.
self.domains = domains # Domains the crawler is allowed to visit.
self.history = {} # Domain name => time last visited.
self.visited = {} # URLs visited.
self._queue = [] # URLs scheduled for a visit: (priority, time, Link).
self._queued = {} # URLs scheduled so far, lookup dictionary.
self.QUEUE = 10000 # Increase or decrease according to available memory.
self.sort = sort
# Queue given links in given order:
for link in (isinstance(links, basestring) and [links] or links):
self.push(link, priority=1.0, sort=FIFO)
@property
def done(self):
""" Yields True if no further links are scheduled to visit.
"""
return len(self._queue) == 0
def push(self, link, priority=1.0, sort=FILO):
""" Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True
def pop(self, remove=True):
""" Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until Crawler.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link
@property
def next(self):
""" Returns the next Link queued to visit (without removing it).
"""
return self.pop(remove=False)
def crawl(self, method=DEPTH, **kwargs):
""" Visits the next link in Crawler._queue.
If the link is on a domain recently visited (< Crawler.delay) it is skipped.
Parses the content at the link for new links and adds them to the queue,
according to their Crawler.priority().
Visited links (and content) are passed to Crawler.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Crawler.follow() is True are queued.
# 5) Only links on Crawler.domains are queued.
if new.url == link.url:
continue
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Crawler.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Crawler.pop()).
# Log the URL as visited.
self.history[base(link.url)] = t
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False
def normalize(self, url):
""" Called from Crawler.crawl() to normalize URLs.
For example: return url.split("?")[0]
"""
# All links pass through here (visited or not).
# This can be a place to count backlinks.
return url
def follow(self, link):
""" Called from Crawler.crawl() to determine if it should follow this link.
For example: return "nofollow" not in link.relation
"""
return True
def priority(self, link, method=DEPTH):
""" Called from Crawler.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80
def visit(self, link, source=None):
""" Called from Crawler.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass
def fail(self, link):
""" Called from Crawler.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass
Spider = Crawler
#class Polly(Crawler):
# def visit(self, link, source=None):
# print "visited:", link.url, "from:", link.referrer
# def fail(self, link):
# print "failed:", link.url
#
#p = Polly(links=["http://nodebox.net/"], domains=["nodebox.net"], delay=5)
#while not p.done:
# p.crawl(method=DEPTH, cached=True, throttle=5)
#--- CRAWL FUNCTION --------------------------------------------------------------------------------
# Functional approach to crawling.
def crawl(links=[], domains=[], delay=20.0, parse=HTMLLinkParser().parse, sort=FIFO, method=DEPTH, **kwargs):
""" Returns a generator that yields (Link, source)-tuples of visited pages.
When the crawler is idle, it yields (None, None).
"""
# The scenarios below defines "idle":
# - crawl(delay=10, throttle=0)
# The crawler will wait 10 seconds before visiting the same subdomain.
# The crawler will not throttle downloads, so the next link is visited instantly.
# So sometimes (None, None) is returned while it waits for an available subdomain.
# - crawl(delay=0, throttle=10)
# The crawler will wait 10 seconds after each and any visit.
# The crawler will not delay before visiting the same subdomain.
# So usually a result is returned each crawl.next(), but each call takes 10 seconds.
# - asynchronous(crawl().next)
# AsynchronousRequest.value is set to (Link, source) once AsynchronousRequest.done=True.
# The program will not halt in the meantime (i.e., the next crawl is threaded).
crawler = Crawler(links, domains, delay, parse, sort)
bind(crawler, "visit", \
lambda crawler, link, source=None: \
setattr(crawler, "crawled", (link, source))) # Define Crawler.visit() on-the-fly.
while not crawler.done:
crawler.crawled = (None, None)
crawler.crawl(method, **kwargs)
yield crawler.crawled
#for link, source in crawl("http://www.clips.ua.ac.be/", delay=0, throttle=1, cached=False):
# print link
#g = crawl("http://www.clips.ua.ac.be/"")
#for i in range(10):
# p = asynchronous(g.next)
# while not p.done:
# print "zzz..."
# time.sleep(0.1)
# link, source = p.value
# print link
#### DOCUMENT PARSER ###############################################################################
# Not to be confused with Document, which is the top-level element in the HTML DOM.
class DocumentParserError(Exception):
pass
class DocumentParser(object):
def __init__(self, path, *args, **kwargs):
""" Parses a text document (e.g., .pdf or .docx),
given as a file path or a string.
"""
self.content = self._parse(path, *args, **kwargs)
def _open(self, path):
""" Returns a file-like object with a read() method,
from the given file path or string.
"""
if isinstance(path, basestring) and os.path.exists(path):
return open(path, "rb")
if hasattr(path, "read"):
return path
return StringIO.StringIO(path)
def _parse(self, path, *args, **kwargs):
""" Returns a plaintext Unicode string parsed from the given document.
"""
return plaintext(decode_utf8(self.open(path).read()))
@property
def string(self):
return self.content
def __unicode__(self):
return self.content
#--- PDF PARSER ------------------------------------------------------------------------------------
# Yusuke Shinyama, PDFMiner, http://www.unixuser.org/~euske/python/pdfminer/
class PDFError(DocumentParserError):
pass
class PDF(DocumentParser):
def __init__(self, path, output="txt"):
self.content = self._parse(path, format=output)
def _parse(self, path, *args, **kwargs):
# The output is useful for mining but not for display.
# Alternatively, PDF(format="html") preserves some layout.
from pdf.pdfinterp import PDFResourceManager, process_pdf
from pdf.converter import TextConverter, HTMLConverter
from pdf.layout import LAParams
try:
m = PDFResourceManager()
s = StringIO.StringIO()
p = kwargs.get("format", "txt").endswith("html") and HTMLConverter or TextConverter
p = p(m, s, codec="utf-8", laparams=LAParams())
process_pdf(m, p, self._open(path), set(), maxpages=0, password="")
except Exception, e:
raise PDFError, str(e)
s = s.getvalue()
s = decode_utf8(s)
s = s.strip()
s = re.sub(r"([a-z])\-\n", "\\1", s) # Hyphenation.
s = s.replace("\n\n", "<!-- #p -->") # Paragraphs.
s = s.replace("\n", " ")
s = s.replace("<!-- #p -->", "\n\n")
s = collapse_spaces(s)
return s
#--- OOXML PARSER ----------------------------------------------------------------------------------
# Mike Maccana, Python docx, https://github.com/mikemaccana/python-docx
class DOCXError(DocumentParserError):
pass
class DOCX(DocumentParser):
def _parse(self, path, *args, **kwargs):
from docx.docx import opendocx
from docx.docx import getdocumenttext
try:
s = opendocx(self._open(path))
s = getdocumenttext(s)
except Exception, e:
raise DOCXError, str(e)
s = "\n\n".join(p for p in s)
s = decode_utf8(s)
s = collapse_spaces(s)
return s
#---------------------------------------------------------------------------------------------------
def parsepdf(path, *args, **kwargs):
""" Returns the content as a Unicode string from the given .pdf file.
"""
return PDF(path, *args, **kwargs).content
def parsedocx(path, *args, **kwargs):
""" Returns the content as a Unicode string from the given .docx file.
"""
return DOCX(path, *args, **kwargs).content
def parsehtml(path, *args, **kwargs):
""" Returns the content as a Unicode string from the given .html file.
"""
return plaintext(DOM(path, *args, **kwargs).body)
def parsedoc(path, format=None):
""" Returns the content as a Unicode string from the given document (.html., .pdf, .docx).
"""
if isinstance(path, basestring):
if format == "pdf" or path.endswith(".pdf"):
return parsepdf(path)
if format == "docx" or path.endswith(".docx"):
return parsedocx(path)
if format == "html" or path.endswith((".htm", ".html", ".xhtml")):
return parsehtml(path)
# Brute-force approach if the format is unknown.
for f in (parsepdf, parsedocx, parsehtml):
try:
return f(path)
except:
pass
| [
"michael.defferrard@epfl.ch"
] | michael.defferrard@epfl.ch |
486bbddf0faf146b052d081cc375d1cb0c315478 | 04819640c733981008b41e1829da5b2158e2a689 | /books.py | e7aa06556f5318efd5a8bc6beb1743889f6ad5af | [] | no_license | FlorentRevest/WCPS2017 | a5147ff88f1ed5c44ddefed7ff8d1b529aa0cd7d | a3dafc149b6fe33549945b3123fcad915fc56e5d | refs/heads/master | 2020-03-09T19:09:31.907021 | 2018-04-10T15:00:20 | 2018-04-10T19:57:01 | 128,950,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python3
def adjustedVal(m, n, sub, ai, endVal):
if(m+firstBookIndex >= n):
return abs(ai[m+firstBookIndex-n]-sub+endVal)
else:
return abs(ai[m+firstBookIndex]-sub)
def howManyBooksUnder(x, ai, n, firstBookIndex, sub):
l = 0
r = n-1-firstBookIndex
endVal = ai[n-1]
while r > l + 1:
m = round((l+r)/2)
if adjustedVal(m, n, sub, ai, endVal) <= x:
l = m
else:
r = m
if adjustedVal(l, n, sub, ai, endVal) > x:
return 0
if adjustedVal(r, n, sub, ai, endVal) <= x:
return r+1
else:
return l+1
firstLine = [int(x) for x in input().split()] # prices
n = firstLine[0]
t = firstLine[1]
ai = [int(x) for x in input().split()] # time per book
for i in range(1, n):
ai[i] += ai[i-1]
maxBooks = 0
for firstBookIndex in range(n):
sub = 0
if(firstBookIndex > 0):
sub = ai[firstBookIndex-1]
newMax = howManyBooksUnder(t, ai, n, firstBookIndex, sub)
if newMax > maxBooks:
maxBooks = newMax
print(maxBooks)
| [
"revestflo@gmail.com"
] | revestflo@gmail.com |
f8c788c479d7d8f11e632b6695c8b920e39dfdce | f546e965b0ecf452ceec74e20d53d7c0844336a8 | /wechat/weixin/views.py | 34e609bf6e6557e4cb1b19a6185fb078aa4d63a4 | [] | no_license | hanwenlu/WeChat | 6add8365673084badb3895555b8803e2b32ddf48 | a01c824ce8ed88453cb17173429ba0fbbf862d66 | refs/heads/master | 2021-01-01T19:05:29.865329 | 2016-03-13T16:16:23 | 2016-03-13T16:16:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | #-*- coding:utf-8 -*-
# deal with unicode error(support chinese)
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# import module from django & wechat_sdk
# pip install django && pip install wechat_sdk first
from django.http.response import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from wechat_sdk import WechatConf
from wechat_sdk import WechatBasic
from wechat_sdk.exceptions import ParseError
from wechat_sdk.messages import (TextMessage, VoiceMessage, ImageMessage, VideoMessage, LinkMessage, LocationMessage, EventMessage, ShortVideoMessage)
# config wechat parameters
conf = WechatConf(
token='YOUR_TOKEN',
appid='YOUR_APPID',
appsecret='YOUR_APPSECRET',
encrypt_mode='YOUR_MODE',
encoding_aes_key='YOUR_AES_KEY'
)
# pass csrf check
@csrf_exempt
def wechat_home(request):
# get signature, timestamp and nonce
signature = request.GET.get('signature')
timestamp = request.GET.get('timestamp')
nonce = request.GET.get('nonce')
# create a newInstance
wechat_instance = WechatBasic(conf=conf)
# check_signature function tells whether the request is sent by wechat
# not checked
if not wechat_instance.check_signature(signature=signature, timestamp=timestamp, nonce=nonce):
return HttpResponseBadRequest('Verify Failed')
else:
# checked
# GET method represents that Wechat sent verification information
if request.method == 'GET':
response = request.GET.get('echostr', 'error')
# POST method stands for Wechat sent user's messages
else:
try:
wechat_instance.parse_data(request.body) # parse data from instance
message = wechat_instance.get_message() # get message
# classify the type of message
if isinstance(message, TextMessage): # text message
reply_text = 'text'
elif isinstance(message, VoiceMessage): # voice message
reply_text = 'voice'
elif isinstance(message, ImageMessage): # image message
reply_text = 'image'
elif isinstance(message, LinkMessage): # link message
reply_text = 'link'
elif isinstance(message, LocationMessage): # location message
reply_text = 'location'
elif isinstance(message, VideoMessage): # video message
reply_text = 'video'
elif isinstance(message, ShortVideoMessage): # shortvideo message
reply_text = 'shortvideo'
else:
reply_text = 'other' # other message
response = wechat_instance.response_text(content=reply_text)
except ParseError: # ERROR when parsing
return HttpResponseBadRequest('Invalid XML Data')
# reply with our defined message
return HttpResponse(response, content_type="application/xml")
# END | [
"fanhr5@gmail.com"
] | fanhr5@gmail.com |
ed769e058750777e05debebbc99e7842fd3bfc5f | a5b53e7b32713015d9ce970a4cc622756188f4b0 | /app/catalog/urls.py | f5f311f32a156fb468b49d717cd0f03c9c1b2ab9 | [] | no_license | kimzod/library_app | f1eef4ccc011569af10f7db7327d5d647498505e | 73ef6f383f901de2f17dacd8f6170c0f3ed3b856 | refs/heads/master | 2021-05-22T01:15:29.411376 | 2020-04-08T16:09:43 | 2020-04-08T16:09:43 | 252,902,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from django.urls import path
from . import views
app_name = 'catalog'
urlpatterns = [
path('', views.index, name='index'),
path('books/', views.BookListView.as_view(), name='books'),
path('book/<int:pk>/', views.BookDetailView.as_view(), name='book-detail'),
path('authors/', views.AuthorListView.as_view(), name='authors'),
path('author/<int:pk>/', views.AuthorDetailView.as_view(), name='author-detail'),
]
| [
"51065991+kimzod@users.noreply.github.com"
] | 51065991+kimzod@users.noreply.github.com |
fa9b86655cac7f7c177ccfad6c58f3e8c1771c23 | 8932c700bd52683670e10e870f8b116f25fb6e1f | /melanoma_classification/isicdownload/loadisicmetadata.py | 8a59435b0113bb7f4b54374d98935bc62fbdc6d6 | [
"Unlicense"
] | permissive | michaelsiemmeister/melanomaclassification | 1d09b0ccfe67e16e4052242eaf1b81f8e9deb555 | bf8866e1a4b35faecf89168f5af2ff23139cdf5a | refs/heads/master | 2023-01-10T00:02:18.270578 | 2019-12-18T15:06:09 | 2019-12-18T15:06:09 | 228,863,541 | 0 | 0 | Unlicense | 2022-12-26T21:09:37 | 2019-12-18T15:00:40 | Python | UTF-8 | Python | false | false | 461 | py |
import os
import pandas as pd
def load_isic_df(metadata_path):
'''
I:
metadata_path ... str, relative or absolute path to metadata csv file.
return pandas DataFrame
- set the 'name' column as index with name 'id'
'''
metadata_path = os.path.abspath(metadata_path)
df = pd.read_csv(metadata_path, low_memory=False)
df.set_index('name', drop=False, inplace=True)
df.index.rename('id', inplace=True)
return df
| [
"m.siemmeister@gmail.com"
] | m.siemmeister@gmail.com |
4cd078afb5311f126095acf7b92ad0506caea81c | 87796f8b79b325cdfca7a98b120d690a13ebd267 | /capture/noworkflow/tests/prov_definition/__init__.py | b9e742c0dad1702ab87430c5fbb2e8279693e496 | [
"MIT"
] | permissive | raffaelfoidl/noworkflow | c53c6be45960a93aa546fd8f6c967af2b7c31c93 | aa4ca189df24fec6c7abd32bcca6a097b21fdf31 | refs/heads/master | 2021-01-06T16:32:57.036864 | 2020-03-23T14:12:37 | 2020-03-23T14:12:37 | 241,398,493 | 0 | 0 | MIT | 2020-02-18T15:36:16 | 2020-02-18T15:36:15 | null | UTF-8 | Python | false | false | 426 | py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from .slicing_test import TestSlicingDependencies
__all__ = [
b"TestSlicingDependencies",
]
| [
"joaofelipenp@gmail.com"
] | joaofelipenp@gmail.com |
793e9294a57ac70deb5e9b9239bca0e35c91a5b5 | a609f05150d73cae683ccffd9f892366277891f8 | /faculty_list_crawler/deploy.py | f2a2a40c79c6a6ed69a8b8a995383358cf2b9e2b | [] | no_license | bt3101-project-1/bt3101-project | f7e4ed7eafc6023ea0887cff3311ffffdc2f9377 | 2260bc7b1520bac5c0adabab35968ab3fa97f08b | refs/heads/master | 2021-01-21T03:20:46.668560 | 2017-11-03T08:24:09 | 2017-11-03T08:24:09 | 101,890,028 | 0 | 1 | null | 2017-10-29T05:09:32 | 2017-08-30T14:15:50 | JavaScript | UTF-8 | Python | false | false | 621 | py | from apscheduler.schedulers.blocking import BlockingScheduler
from main import *
import logging
from datetime import datetime
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
downloader = Downloader()
analyser = Analyser()
scheduler = BlockingScheduler(logger=logger)
scheduler.add_job(downloader.run, 'interval', seconds=30, next_run_time=datetime.now())
scheduler.add_job(analyser.run, 'interval', seconds=30, next_run_time=datetime.now())
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| [
"syk950527@gmail.com"
] | syk950527@gmail.com |
af1500f48507bd5b50ceff204c9f8e2e4da6218b | 3a5c94dce25e38fc083619a2a0bf8a657b9afb8c | /vol_001/p188.py | 387317b23d9bda987694f67124fd476f61f4c1fa | [] | no_license | tkoz0/problems-online-judge | a322e296fb14fdf4ca616352ead28895465879e9 | 3372c6fe75556a9dd3e6eb8b06ae65058290ab31 | refs/heads/master | 2022-07-23T01:20:57.380524 | 2022-07-10T07:15:56 | 2022-07-10T07:15:56 | 157,949,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import sys
import random
def hash(word):
m = 1 # base 32 multiplier
h = 0 # hash
for c in word[::-1]:
h += (1+ord(c)-ord('a'))*m
m <<= 5
return h
def find_collision(W,C):
n = len(W)
new_C = 0
for i in range(n):
for j in range(n):
if i != j and ((C//W[i])%n) == ((C//W[j])%n): # collision
#new_C = max(new_C,min((C//W[i]+1)*W[i],(C//W[j]+1)*W[j]))
return min((C//W[i]+1)*W[i],(C//W[j]+1)*W[j])
return 0#new_C # 0 if no collision
def find_C(W): # W is integer list
C = 1
while True: # loop until found
new_C = find_collision(W,C)
if new_C == 0: break # none
else: C = new_C
return C
for line in sys.stdin:
words = line.split()
W = [hash(word) for word in words]
print(line,end='')
print(find_C(W))
print()
| [
"tkozlowski1999@gmail.com"
] | tkozlowski1999@gmail.com |
bdbb71121709de6f4abd49f0053eba8ee7a0b09b | 6477bd1a2da7ce9426c41a04674e19d801d358ef | /AOJ_introductionToProgramming/Topic05/printARectangle.py | 50f699ebfc9deeb0d08c53a34840d8efa1d9cf8b | [] | no_license | takuma375/AtCoder | 6b917153d1597cdd6178db16417bf3b0a0d41850 | 9c24ac600ec80c3ebbc718c66437997462196ee3 | refs/heads/main | 2023-06-28T10:09:26.018064 | 2021-07-25T09:54:53 | 2021-07-25T09:54:53 | 382,631,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | # たてH cm よこ W cm の長方形を描くプログラムを作成して下さい。
# 1 cm × 1cm の長方形を '#'で表します。
# 入力は複数のデータセットから構成されています。各データセットの形式は以下のとおりです:
#H W
# H, W がともに 0 のとき、入力の終わりとします。
while True:
H, W = list(map(int, input().split()))
if H == 0 and W == 0:
break
else:
for i in range(H):
for j in range(W):
print("#", end="")
print()
print()
| [
"takuma-shimizu-10174@keio.jp"
] | takuma-shimizu-10174@keio.jp |
0d015aa9ad6f1df598a7cf9e8e117438071a312b | 2ad7fcfbc6bac1e39ebe0aa4be8c1de058857619 | /Problemas_repaso/lista.py | 4006768e1d7cff7f02cc162ca7ff01ab0579236e | [] | no_license | alulec/CYPAlexisCC | 0fc50b714516da9ed5336e03dd19ed869780e37b | fce0617205178c8380d9b65b986a50f485a1a753 | refs/heads/master | 2022-06-09T01:28:05.005107 | 2022-05-31T01:23:55 | 2022-05-31T01:23:55 | 207,672,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | lista = []
lista2 = list()
print(lista)
print(lista2)
numeros = [3,4,52,45,7,64,546,567,0]
print(numeros)
print(numeros[2])
print(numeros[-1])
#slicing
print(numeros[3:-1:1])
print(numeros[::-1])
cosas = ["Alexis",545,9,34,35,True,None,[3,4,5,6,7,8]]
print(cosas)
print(cosas[7])
print(cosas[7][2])
cosas[1] = 10001
cosas[5] = False
print(cosas)
#tupla
fecha = '12/05/2019'
lista_fecha = fecha.split('/')
print(lista_fecha)
| [
"alexisjesuscordovadelacruz@gmail.com"
] | alexisjesuscordovadelacruz@gmail.com |
130fc956a500655a11b7c5323910c4856b1883bc | cdab20971d99fcf94b414e1060ffb15b6e60d821 | /ara/steps/printer.py | bb736509c71b774a97336ba23ddd8d9164b78314 | [] | no_license | florommel/ara | d27f60b2334bc03fb711dec5722c324a20eb2be9 | 0b0dac6083af10134dccd23db426a354c054a9ba | refs/heads/main | 2023-04-21T10:29:17.084071 | 2021-05-03T13:47:29 | 2021-05-03T13:47:29 | 363,943,723 | 0 | 0 | null | 2021-05-03T13:45:05 | 2021-05-03T13:45:05 | null | UTF-8 | Python | false | false | 9,282 | py | """Container for Printer."""
from ara.graph import ABBType, CFType, Graph, NodeLevel, CFGView
from .option import Option, String, Choice, Bool
from .step import Step
import pydot
import html
import os
import os.path
import graph_tool.draw
class Printer(Step):
"""Print graphs to dot."""
SHAPES = {
ABBType.computation: ("oval", "blue"),
ABBType.call: ("box", "red"),
ABBType.syscall: ("diamond", "green")
}
dot = Option(name="dot",
help="Path to a dot file, '-' will write to stdout.",
ty=String())
graph_name = Option(name="graph_name",
help="Name of the graph.",
ty=String())
subgraph = Option(name="subgraph",
help="Choose, what subgraph should be printed.",
ty=Choice("abbs", "instances", "callgraph"))
entry_point = Option(name="entry_point",
help="system entry point",
ty=String())
from_entry_point = Option(name="from_entry_point",
help="Print only from the given entry point.",
ty=Bool(),
default_value=False)
gen_html_links = Option(name="gen_html_links",
help="Generate source code links (with Pygments)",
ty=Bool(),
default_value=True)
def _print_init(self):
dot = self.dot.get()
if not dot:
self._fail("dot file path must be given.")
name = self.graph_name.get()
if not name:
name = ''
return name
def _write_dot(self, dot):
dot_path = self.dot.get()
assert dot_path
dot_path = os.path.abspath(dot_path)
os.makedirs(os.path.dirname(dot_path), exist_ok=True)
dot.write(dot_path)
self._log.info(f"Write {self.subgraph.get()} to {dot_path}.")
def _gen_html_file(self, filename):
try:
from pygments import highlight
from pygments.lexers import CppLexer
from pygments.formatters import HtmlFormatter
except ImportError:
self._log.warn("Pygments not found, skip source code linking")
return None
filename = os.path.abspath(filename)
if filename in self._graph.file_cache:
return self._graph.file_cache[filename]
hfile = os.path.join(os.path.dirname(self.dump_prefix.get()),
'html_files',
os.path.basename(filename) + ".html")
hfile = os.path.realpath(hfile)
self._graph.file_cache[filename] = hfile
with open(filename) as f:
code = f.read()
os.makedirs(os.path.dirname(hfile), exist_ok=True)
with open(hfile, 'w') as g:
g.write(highlight(code, CppLexer(),
HtmlFormatter(linenos='inline',
lineanchors='line', full=True)))
return hfile
def print_abbs(self):
name = self._print_init()
cfg = self._graph.cfg
entry_label = self.entry_point.get()
if self.from_entry_point.get():
entry_func = self._graph.cfg.get_function_by_name(entry_label)
functions = self._graph.cfg.reachable_functs(entry_func)
else:
functs = self._graph.functs
functions = functs.vertices()
dot_nodes = set()
dot_graph = pydot.Dot(graph_type='digraph', label=name)
for function in functions:
function = cfg.vertex(function)
dot_func = pydot.Cluster(cfg.vp.name[function],
label=cfg.vp.name[function])
dot_graph.add_subgraph(dot_func)
for abb in cfg.get_abbs(function):
if cfg.vp.type[abb] == ABBType.not_implemented:
assert not cfg.vp.implemented[function]
dot_abb = pydot.Node(str(hash(abb)),
label="",
shape="box")
dot_nodes.add(str(hash(abb)))
dot_func.set('style', 'filled')
dot_func.set('color', '#eeeeee')
else:
dot_abb = pydot.Node(
str(hash(abb)),
label=cfg.vp.name[abb],
shape=self.SHAPES[self._graph.cfg.vp.type[abb]][0],
color=self.SHAPES[self._graph.cfg.vp.type[abb]][1]
)
if cfg.vp.part_of_loop[abb]:
dot_abb.set('style', 'dashed')
dot_nodes.add(str(hash(abb)))
dot_func.add_node(dot_abb)
for edge in cfg.edges():
if cfg.ep.type[edge] not in [CFType.lcf, CFType.icf]:
continue
if not all([str(hash(x)) in dot_nodes
for x in [edge.source(), edge.target()]]):
continue
color = "black"
if cfg.ep.type[edge] == CFType.lcf:
color = "red"
if cfg.ep.type[edge] == CFType.icf:
color = "blue"
dot_graph.add_edge(pydot.Edge(str(hash(edge.source())),
str(hash(edge.target())),
color=color))
self._write_dot(dot_graph)
def print_instances(self):
name = self._print_init()
instances = self._graph.instances
dot_graph = pydot.Dot(graph_type='digraph', label=name)
default_fontsize = 14
default_fontsize_diff = 2
def p_str(p_map, key):
"""Convert to a pretty string"""
value = p_map[key]
if p_map.python_value_type() == bool:
value = bool(value)
return html.escape(str(value))
for instance in instances.vertices():
inst_obj = instances.vp.obj[instance]
if inst_obj and hasattr(inst_obj, 'as_dot'):
attrs = inst_obj.as_dot()
else:
attrs = {}
if "label" in attrs:
del attrs["label"]
attrs["fontsize"] = attrs.get("fontsize", 14)
if self.gen_html_links.get():
src_file = instances.vp.file[instance]
src_line = instances.vp.line[instance]
if (src_file != '' and src_line != 0):
hfile = self._gen_html_file(src_file)
if hfile is not None:
attrs["URL"] = f"file://{hfile}#line-{src_line}"
size = attrs["fontsize"] - default_fontsize_diff
label = instances.vp.label[instance]
graph_attrs = '<br/>'.join([f"<i>{k}</i>: {p_str(v, instance)}"
for k, v in instances.vp.items()
if k not in ["label", "obj"]])
graph_attrs = f"<font point-size='{size}'>{graph_attrs}</font>"
label = f"<{label}<br/>{graph_attrs}<br/><br/>{{}}>"
sublabel = attrs.get("sublabel", "")
if len(sublabel) > 0:
sublabel = f"<font point-size='{size}'>{sublabel}</font>"
label = label.format(sublabel)
if "sublabel" in attrs:
del attrs["sublabel"]
dot_node = pydot.Node(
str(hash(instance)),
label=label,
**attrs
)
dot_graph.add_node(dot_node)
for edge in self._graph.instances.edges():
dot_graph.add_edge(pydot.Edge(
str(hash(edge.source())),
str(hash(edge.target())),
label=self._graph.instances.ep.label[edge]))
self._write_dot(dot_graph)
def print_callgraph(self):
name = self._print_init()
shapes = {
True: ("box", "green"),
False: ("box", "black")
}
dot_graph = pydot.Dot(graph_type='digraph', label=name)
callgraph = self._graph.callgraph
cfg = callgraph.gp.cfg
for node in callgraph.vertices():
dot_node = pydot.Node(
str(hash(node)),
label=cfg.vp.name[callgraph.vp.function[node]],
shape=shapes[callgraph.vp.syscall_category_every[node]][0],
color=shapes[callgraph.vp.syscall_category_every[node]][1]
)
if callgraph.vp.recursive[node]:
dot_node.set('style', 'dashed')
dot_graph.add_node(dot_node)
for edge in callgraph.edges():
dot_graph.add_edge(pydot.Edge(
str(hash(edge.source())),
str(hash(edge.target())),
label=cfg.vp.name[callgraph.ep.callsite[edge]]))
self._write_dot(dot_graph)
def run(self):
subgraph = self.subgraph.get()
if subgraph == 'abbs':
self.print_abbs()
if subgraph == 'instances':
self.print_instances()
if subgraph == 'callgraph':
self.print_callgraph()
| [
"entrup@sra.uni-hannover.de"
] | entrup@sra.uni-hannover.de |
8614ee5eeee6d74b19ddc1e1113d47b06dddb8bd | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ethernet-lldp-cfg/gn-create-xr-ethernet-lldp-cfg-20-ydk.py | 3d8398de12dc3fa89df78ddc852a3fa57177f1dc | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,824 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ethernet-lldp-cfg.
usage: gn-create-xr-ethernet-lldp-cfg-20-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ethernet_lldp_cfg \
as xr_ethernet_lldp_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_lldp(lldp):
"""Add config data to lldp object."""
lldp.enable = True
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
lldp = xr_ethernet_lldp_cfg.Lldp() # create object
config_lldp(lldp) # add object configuration
# create configuration on gNMI device
crud.create(provider, lldp)
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
429b4412b055802871c256416b0d6854b4f8d28e | d0baa28c977d8828389e12dd09dc1766cd870c7f | /DecisionTree.py | 47019ede0a891b9a9b5d8e54ee9dc24d0d1bc6f8 | [] | no_license | hsbarbosa/teachingmachines2019 | b586d49657dc933b8b238c7fbf01dc02da57f969 | 198a2d6e78b757f7982a27265208ba144ed4afaa | refs/heads/master | 2020-05-07T00:56:49.949999 | 2019-04-09T13:01:17 | 2019-04-09T13:01:17 | 180,251,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,710 | py | """Code to accompany Machine Learning Recipes #8.
We'll write a Decision Tree Classifier, in pure Python.
"""
# For Python 2 / 3 compatability
from __future__ import print_function
# Toy dataset.
# Format: each row is an example.
# The last column is the label.
# The first two columns are features.
# Feel free to play with it by adding more features & examples.
# Interesting note: I've written this so the 2nd and 5th examples
# have the same features, but different labels - so we can see how the
# tree handles this case.
# Column labels.
# These are used only to print the tree.
def unique_vals(rows, col):
"""Find the unique values for a column in a dataset."""
return set([row[col] for row in rows])
#######
# Demo:
# unique_vals(training_data, 0)
# unique_vals(training_data, 1)
#######
def class_counts(rows):
"""Counts the number of each type of example in a dataset."""
counts = {} # a dictionary of label -> count.
for row in rows:
# in our dataset format, the label is always the last column
label = row[-1]
if label not in counts:
counts[label] = 0
counts[label] += 1
return counts
#######
# Demo:
# class_counts(training_data)
#######
def is_numeric(value):
"""Test if a value is numeric."""
return isinstance(value, int) or isinstance(value, float)
#######
# Demo:
# is_numeric(7)
# is_numeric("Red")
#######
class Question:
"""A Question is used to partition a dataset.
This class just records a 'column number' (e.g., 0 for Color) and a
'column value' (e.g., Green). The 'match' method is used to compare
the feature value in an example to the feature value stored in the
question. See the demo below.
"""
def __init__(self, column, value,columns):
self.column = column
self.value = value
self.columns = columns
def match(self, example):
# Compare the feature value in an example to the
# feature value in this question.
val = example[self.column]
if is_numeric(val):
return val >= self.value
else:
return val == self.value
def __repr__(self):
# This is just a helper method to print
# the question in a readable format.
condition = "=="
if is_numeric(self.value):
condition = ">="
return "Is %s %s %s?" % (
self.columns[self.column], condition, str(self.value))
#######
# Demo:
# Let's write a question for a numeric attribute
# Question(1, 3)
# How about one for a categorical attribute
# q = Question(0, 'Green')
# Let's pick an example from the training set...
# example = training_data[0]
# ... and see if it matches the question
# q.match(example)
#######
def partition(rows, question):
"""Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
"""
true_rows, false_rows = [], []
for row in rows:
if question.match(row):
true_rows.append(row)
else:
false_rows.append(row)
return true_rows, false_rows
#######
# Demo:
# Let's partition the training data based on whether rows are Red.
# true_rows, false_rows = partition(training_data, Question(0, 'Red'))
# This will contain all the 'Red' rows.
# true_rows
# This will contain everything else.
# false_rows
#######
def gini(rows):
"""Calculate the Gini Impurity for a list of rows.
There are a few different ways to do this, I thought this one was
the most concise. See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
"""
counts = class_counts(rows)
impurity = 1
for lbl in counts:
prob_of_lbl = counts[lbl] / float(len(rows))
impurity -= prob_of_lbl**2
return impurity
#######
# Demo:
# Let's look at some example to understand how Gini Impurity works.
#
# First, we'll look at a dataset with no mixing.
# no_mixing = [['Apple'],
# ['Apple']]
# this will return 0
# gini(no_mixing)
#
# Now, we'll look at dataset with a 50:50 apples:oranges ratio
# some_mixing = [['Apple'],
# ['Orange']]
# this will return 0.5 - meaning, there's a 50% chance of misclassifying
# a random example we draw from the dataset.
# gini(some_mixing)
#
# Now, we'll look at a dataset with many different labels
# lots_of_mixing = [['Apple'],
# ['Orange'],
# ['Grape'],
# ['Grapefruit'],
# ['Blueberry']]
# This will return 0.8
# gini(lots_of_mixing)
#######
def info_gain(left, right, current_uncertainty):
"""Information Gain.
The uncertainty of the starting node, minus the weighted impurity of
two child nodes.
"""
p = float(len(left)) / (len(left) + len(right))
return current_uncertainty - p * gini(left) - (1 - p) * gini(right)
#######
# Demo:
# Calculate the uncertainy of our training data.
# current_uncertainty = gini(training_data)
#
# How much information do we gain by partioning on 'Green'?
# true_rows, false_rows = partition(training_data, Question(0, 'Green'))
# info_gain(true_rows, false_rows, current_uncertainty)
#
# What about if we partioned on 'Red' instead?
# true_rows, false_rows = partition(training_data, Question(0,'Red'))
# info_gain(true_rows, false_rows, current_uncertainty)
#
# It looks like we learned more using 'Red' (0.37), than 'Green' (0.14).
# Why? Look at the different splits that result, and see which one
# looks more 'unmixed' to you.
# true_rows, false_rows = partition(training_data, Question(0,'Red'))
#
# Here, the true_rows contain only 'Grapes'.
# true_rows
#
# And the false rows contain two types of fruit. Not too bad.
# false_rows
#
# On the other hand, partitioning by Green doesn't help so much.
# true_rows, false_rows = partition(training_data, Question(0,'Green'))
#
# We've isolated one apple in the true rows.
# true_rows
#
# But, the false-rows are badly mixed up.
# false_rows
#######
def find_best_split(rows,columns):
"""Find the best question to ask by iterating over every feature / value
and calculating the information gain."""
best_gain = 0 # keep track of the best information gain
best_question = None # keep train of the feature / value that produced it
current_uncertainty = gini(rows)
n_features = len(rows[0]) - 1 # number of columns
for col in range(n_features): # for each feature
values = set([row[col] for row in rows]) # unique values in the column
for val in values: # for each value
question = Question(col, val,columns)
# try splitting the dataset
true_rows, false_rows = partition(rows, question)
# Skip this split if it doesn't divide the
# dataset.
if len(true_rows) == 0 or len(false_rows) == 0:
continue
# Calculate the information gain from this split
gain = info_gain(true_rows, false_rows, current_uncertainty)
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question
#######
# Demo:
# Find the best question to ask first for our toy dataset.
# best_gain, best_question = find_best_split(training_data)
# FYI: is color == Red is just as good. See the note in the code above
# where I used '>='.
#######
class Leaf:
"""A Leaf node classifies data.
This holds a dictionary of class (e.g., "Apple") -> number of times
it appears in the rows from the training data that reach this leaf.
"""
def __init__(self, rows):
self.predictions = class_counts(rows)
class Decision_Node:
"""A Decision Node asks a question.
This holds a reference to the question, and to the two child nodes.
"""
def __init__(self,
question,
true_branch,
false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
def build_tree(rows,columns):
"""Builds the tree.
Rules of recursion: 1) Believe that it works. 2) Start by checking
for the base case (no further information gain). 3) Prepare for
giant stack traces.
"""
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
gain, question = find_best_split(rows,columns)
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if gain == 0:
return Leaf(rows)
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Recursively build the true branch.
true_branch = build_tree(true_rows,columns)
# Recursively build the false branch.
false_branch = build_tree(false_rows,columns)
# Return a Question node.
# This records the best feature / value to ask at this point,
# as well as the branches to follow
# dependingo on the answer.
return Decision_Node(question, true_branch, false_branch)
def print_tree(node, spacing=""):
"""World's most elegant tree printing function."""
# Base case: we've reached a leaf
if isinstance(node, Leaf):
print (spacing + "Predict", node.predictions)
return
# Print the question at this node
print (spacing + str(node.question))
# Call this function recursively on the true branch
print (spacing + '--> True:')
print_tree(node.true_branch, spacing + " ")
# Call this function recursively on the false branch
print (spacing + '--> False:')
print_tree(node.false_branch, spacing + " ")
def classify(row, node):
"""See the 'rules of recursion' above."""
# Base case: we've reached a leaf
if isinstance(node, Leaf):
return node.predictions.keys()
# Decide whether to follow the true-branch or the false-branch.
# Compare the feature / value stored in the node,
# to the example we're considering.
if node.question.match(row):
return classify(row, node.true_branch)
else:
return classify(row, node.false_branch)
#######
# Demo:
# The tree predicts the 1st row of our
# training data is an apple with confidence 1.
# my_tree = build_tree(training_data)
# classify(training_data[0], my_tree)
#######
def print_leaf(counts):
"""A nicer way to print the predictions at a leaf."""
total = sum(counts.values()) * 1.0
probs = {}
for lbl in counts.keys():
probs[lbl] = str(int(counts[lbl] / total * 100)) + "%"
return probs
#######
# Demo:
# Printing that a bit nicer
# print_leaf(classify(training_data[0], my_tree))
#######
#######
# Demo:
# On the second example, the confidence is lower
# print_leaf(classify(training_data[1], my_tree))
#######
if __name__ == '__main__':
my_tree = build_tree(training_data)
print_tree(my_tree)
# Evaluate
testing_data = [
['Green', 3, 'Apple'],
['Yellow', 4, 'Apple'],
['Red', 2, 'Grape'],
['Red', 1, 'Grape'],
['Yellow', 3, 'Lemon'],
]
for row in testing_data:
print ("Actual: %s. Predicted: %s" %
(row[-1], print_leaf(classify(row, my_tree))))
# Next steps
# - add support for missing (or unseen) attributes
# - prune the tree to prevent overfitting
# - add support for regression
| [
"hugobarbosa@Hugos-MacBook-Pro.local"
] | hugobarbosa@Hugos-MacBook-Pro.local |
174b23a0701205e2da87894ca11e6fddd5a246d5 | 38a263d52c52a8834b175e867330d8415dad7384 | /Regression_suite_bigc/api/test_shipping_method_api.py | 39dd52af582926da309c4c1bc141b4d413de60a9 | [] | no_license | testing-sravan/tests-scripts-worked | 4e57c47c4ea9a9bc22e85a8b6d628615907537bd | 33c579918356f6ff1cdfd5635d6eba1d85eba0b7 | refs/heads/master | 2021-01-10T20:39:12.805680 | 2014-09-15T04:54:02 | 2014-09-15T04:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,893 | py | from lib.api_lib import *
from lib.shipping_class import *
AUSPOST_AUTH_KEY = "bzfac4efaf7e7e51a4b1dbd7cc76cb31"
@pytest.mark.skipif("True")
def ttest_disable_shipping_reboot(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
shipping.set_feature_flag(browser, 'disable', 'ShippingReboot')
def test_create_australian_post_in_control_panel(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
shipping.navigate_to_shipping()
shipping.skip_shipping_intro()
if not shipping.is_new_ui(browser):
shipping.setup_store_location_new(shipping.au_store_location)
shipping.add_country_zone(shipping.us_country_zone)
shipping.open_country_zone("United States")
shipping.setup_australia_post()
return
pytest.skip("Not new UI")
shipping.setup_store_location(browser, "Australia","New South Wales","2000")
browser.find_element_by_id('tab1').click()
browser.execute_script("$('#div1 .dropdown-trigger:first').click()")
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_link_text('Edit Methods').is_displayed() and s.find_element_by_link_text('Edit Methods'))
browser.find_element_by_link_text('Edit Methods').click()
shipping.disable_the_shipping_method(browser)
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_xpath('//input[@value="Add a Shipping Method..."]').is_displayed() and s.find_element_by_xpath('//input[@value="Add a Shipping Method..."]'))
browser.find_element_by_xpath('//input[@value="Add a Shipping Method..."]').click()
browser.find_element_by_xpath('//span[text()="Australia Post"]').click()
element= shipping.wait_until_element_present('shipping_australiapost_auth_key', "ID")
element.send_keys(AUSPOST_AUTH_KEY)
element = shipping.wait_until_element_present('Select All', 'LINK')
element.click()
browser.find_element_by_name('SubmitButton1').click()
shipping.verify_and_assert_success_message(browser, "The shipping method has been created successfully.", ".alert-success")
#JSON Payload
def test_get_shipping_methods(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods')
result = basic_auth_get(api, username, auth_token)
newdata = json.loads(result.text)
state['shipping_method_id'] = newdata[0]['id']
assert newdata[0]['id'] > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
def test_get_shipping_methods_by_id(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods/' + str(state['shipping_method_id']) + '')
result = basic_auth_get(api, username, auth_token)
newdata = json.loads(result.text)
assert newdata[0]['id'] > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
# XML Payload
def test_get_shipping_methods_xml_payload(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods')
result = basic_auth_get(api, username, auth_token, payload_format = 'xml')
newdata = etree.fromstring(result.text)
state['shipping_method_id_xml'] = newdata[0].find('id').text
assert newdata[0].find('id').text > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
def test_get_shipping_methods_by_id_xml_payload(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods/' + str(state['shipping_method_id_xml']) + '')
result = basic_auth_get(api, username, auth_token, payload_format = 'xml')
newdata = etree.fromstring(result.text)
assert newdata[0].find('id').text > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
@pytest.mark.skipif("True")
def ttest_delete_australian_post_in_control_panel(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
browser.find_element_by_link_text("Setup & Tools").click()
browser.find_element_by_link_text('Shipping').click()
browser.find_element_by_id('tab1').click()
browser.execute_script("$('#div1 .dropdown-trigger:first').click()")
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_link_text('Edit Methods').is_displayed() and s.find_element_by_link_text('Edit Methods'))
browser.find_element_by_link_text('Edit Methods').click()
browser.execute_script("$('.GridRow').find('td:contains(Australia Post)').parent('tr').children('td:eq(0)').find('input').attr('checked','checked')")
browser.find_element_by_xpath('//input[@value="Delete Selected"]').click()
try:
alert = browser.switch_to_alert()
alert.accept()
except WebDriverException:
browser.execute_script("window.confirm = function(){return true;}");
browser.find_element_by_xpath('//input[@value="Delete Selected"]').click()
shipping.verify_and_assert_success_message(browser, "The selected shipping methods have been deleted successfully.", ".alert-success")
| [
"jenkins@localhost"
] | jenkins@localhost |
71b887b18f04dbb09643ce203964d622b53650b9 | 0a7f44c008390453ad4ae433d653b3b6356d5116 | /projects/Translator/08/translator.py | 7d519f98baed1b6aee76f9bc3c6ae608403ae0d3 | [] | no_license | njnareshjoshi/nand2tetris | 3bd4d47da10b1735727c05a772d01e1b9146df2e | 95461633662723398268952c161b001a08990b35 | refs/heads/main | 2023-07-01T07:06:16.388884 | 2021-08-06T15:08:28 | 2021-08-06T15:08:28 | 354,068,797 | 0 | 0 | null | 2021-04-02T16:20:40 | 2021-04-02T16:07:51 | null | UTF-8 | Python | false | false | 10,915 | py | """
Translator program to convert vm instructions to assembly instructions.
We can run the translator in following ways
python3 translator.py vm_file.vm
python3 translator.py vm_file1.vm,vm_file2.vm
python3 translator.py directory_of_vm_file
If present then Sys.vm file needs to be passed in as first file to the translator program to initiate the execution
By
Naresh Joshi
"""
import os
import random
import re
import sys
segments = {
"local": "LCL",
"argument": "ARG",
"this": "THIS",
"that": "THAT"
}
ADD = [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"A=A-1",
"M=D+M"
]
SUB = [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"A=A-1",
"M=M-D"
]
NEG = [
"@SP",
"A=M-1",
"M=-M"
]
AND = [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"A=A-1",
"M=D&M"
]
OR = [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"A=A-1",
"M=D|M"
]
NOT = [
"@SP",
"A=M-1",
"M=!M"
]
PUSH = [
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1"
]
POP = [
"@R13",
"M=D",
"@SP",
"AM=M-1",
"D=M",
"M=0",
"@R13",
"A=M",
"M=D"
]
RETURN = [
# frame = R14 = LCL
"@LCL",
"D=M",
"@R14",
"M=D",
# RET = R13 = *(LCL - 5)
"@5",
"A=D-A",
"D=M",
"@R13",
"M=D",
# *ARG = *(SP - 1)
"@SP",
"A=M-1",
"D=M",
"@ARG",
"A=M",
"M=D ",
# SP = ARG + 1
"D=A+1",
"@SP",
"M=D",
# THAT = *(frame - 1)
"@R14",
"AM=M-1",
"D=M",
"@THAT",
"M=D",
# THIS = *(frame - 1)
"@R14",
"AM=M-1",
"D=M",
"@THIS",
"M=D",
# ARG = *(frame - 1)
"@R14",
"AM=M-1",
"D=M",
"@ARG",
"M=D",
# LCL = *(frame - 1)
"@R14",
"AM=M-1",
"D=M",
"@LCL",
"M=D",
# goto RET R13
"@R13",
"A=M",
"0;JMP"
]
def run():
file_path = 'input.vm'
if len(sys.argv) > 1:
file_path = sys.argv[1]
if '.vm' in file_path:
if "," in file_path or " " in file_path:
vm_files = re.split(r"[, ]", file_path)
path_elements = vm_files[0].split('/')
path_elements = path_elements[0:-1]
asm_file = '/'.join(path_elements) + '/' + path_elements[-1] + '.asm'
else:
asm_file = file_path.replace('.vm', '.asm')
vm_files = [file_path]
else:
file_path = file_path[:-1] if file_path[-1] == '/' else file_path
path_elements = file_path.split('/')
path = '/'.join(path_elements)
asm_file = path + '/' + path_elements[-1] + '.asm'
dirpath, dirnames, filenames = next(os.walk(file_path), [[], [], []])
vm_files = filter(lambda x: '.vm' in x, filenames)
vm_files = [path + '/' + vm_file for vm_file in vm_files]
asm_instructions = []
for vm_file in vm_files:
asm_instructions.append(f"// {vm_file}")
print(f"Reading input file {vm_file}")
vm_instructions = de_comment(vm_file)
program = vm_file.replace('.vm', '').split("/")[-1]
asm_instructions += parse_vm_instructions(program, vm_instructions)
print(f'VM Instructions : {[line for line in vm_instructions if line]}')
print(f'Asm Instructions : {asm_instructions}')
print(f"Writing to output file {asm_file}")
generate_asm(asm_file, asm_instructions)
def de_comment(vm_file):
instructions = []
with open(vm_file) as file:
instruction = file.readline()
is_comment = False
while instruction:
instruction = instruction.strip()
instruction = re.sub(r'//.*', '', instruction)
instruction = re.sub(r'/\*.*\*/', '', instruction)
if instruction.startswith('/*'):
is_comment = True
instruction = ''
elif instruction.endswith('*/'):
is_comment = False
instruction = ''
if is_comment:
instructions.append('')
else:
instructions.append(instruction.strip())
instruction = file.readline()
return instructions
def parse_vm_instructions(program, vm_instructions):
line_count = 0
asm_instructions = []
if program == "Sys":
asm_instructions += sys_init(program)
for instruction in vm_instructions:
line_count += 1
if instruction:
asm_instructions.append(f"// {instruction}")
asm_instructions += parse_vm_instruction(program, line_count, instruction)
return asm_instructions
def parse_vm_instruction(program, line_count, instruction):
if instruction == "add":
return ADD
elif instruction == "sub":
return SUB
elif instruction == "neg":
return NEG
elif instruction == "eq":
return equality(program, line_count, instruction)
elif instruction == "gt":
return equality(program, line_count, instruction)
elif instruction == "lt":
return equality(program, line_count, instruction)
elif instruction == "and":
return AND
elif instruction == "or":
return OR
elif instruction == "not":
return NOT
else:
tokens = instruction.split(" ")
if len(tokens) <= 0 or len(tokens) > 3:
raise Exception(f'Error at line {line_count}, {instruction} is not a valid instruction')
elif tokens[0] == "push":
return parse_push(program, tokens[1], tokens[2])
elif tokens[0] == "pop":
return parse_pop(program, tokens[1], tokens[2])
elif tokens[0] == "label":
return [f"({program}_{tokens[1]})"]
elif tokens[0] == "goto":
return goto(program, tokens[1])
elif tokens[0] == "if-goto":
return if_goto(program, tokens[1])
elif tokens[0] == "function":
return function(tokens[1], tokens[2])
elif tokens[0] == "call":
return call(program, tokens[1], tokens[2])
elif tokens[0] == "return":
return RETURN
else:
raise Exception(f'Error at line {line_count}, {instruction} is not a valid instruction')
def equality(program, line_count, operation):
operations = {
"eq": "JNE",
"gt": "JLE",
"lt": "JGE"
}
return [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"A=A-1",
"D=M-D",
"M=0",
f"@{operation}_{program}_{line_count}",
f"D;{operations[operation]}",
"@SP",
"A=M",
"A=A-1",
"M=-1",
f"({operation}_{program}_{line_count})",
]
def parse_push(program, segment, index):
if segment in ["local", "argument", "this", "that"]:
return [
f"@{segments[segment]}",
"D=M",
f"@{index}",
"A=D+A",
"D=M",
] + PUSH
elif segment == "pointer":
if index in ["0", "1"]:
return [
"@THIS" if index == "0" else "@THAT",
"D=M",
] + PUSH
else:
raise Exception(f'Error at - push {segment} {index} is not a valid instruction')
elif segment == "constant":
return [
f"@{index}",
"D=A",
] + PUSH
elif segment == "static":
return [
f"@{program}_{index}",
"D=M",
] + PUSH
elif segment == "temp":
return [
"@R5",
"D=A",
f"@{index}",
"A=D+A",
"D=M",
] + PUSH
else:
raise Exception(f'Error at - push {segment} {index} is not a valid instruction')
def parse_pop(program, segment, index):
if segment in ["local", "argument", "this", "that"]:
return [
f"@{segments[segment]}",
"D=M",
f"@{index}",
"D=D+A"
] + POP
elif segment == "pointer":
if index in ["0", "1"]:
return [
"@SP",
"AM=M-1",
"D=M",
"M=0",
"@THIS" if index == "0" else "@THAT",
"M=D"
]
else:
raise Exception(f'Error at - pop {segment} {index} is not a valid instruction')
elif segment == "static":
return [
"@SP",
"AM=M-1",
"D=M",
"M=0",
f"@{program}_{index}",
"M=D"
]
elif segment == "temp":
return [
"@R5",
"D=A",
f"@{index}",
"D=D+A"
] + POP
else:
raise Exception(f'Error at - pop {segment} {index} is not a valid instruction')
def goto(program, label):
return [
f"@{program}_{label}",
"0;JMP"
]
def if_goto(program, label):
return [
"@SP",
"AM=M-1",
"D=M",
"M=0",
f"@{program}_{label}",
"D;JNE"
]
def function(f, k):
instructions = [
f"({f})",
"@SP",
"A=M"
]
for i in range(int(k)):
instructions += [
"M=0",
"A=A+1"
]
return instructions + [
"D=A",
"@SP",
"M=D"
]
def call(program, f, n):
count = random.randint(1, 100)
return [
# Push return address
f"@ret_{program}_{count}",
"D=A",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1",
# Push LCL
"@LCL",
"D=M",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1",
# Push ARG
"@ARG",
"D=M",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1",
# Push THIS
"@THIS",
"D=M",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1",
# Push THAT
"@THAT",
"D=M",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1",
# ARG = SP - n - 5
f"@{str(int(n) + 5)}",
"D=A",
"@SP",
"D=M-D",
"@ARG",
"M=D",
# LCL = SP
"@SP",
"D=M",
"@LCL",
"M=D",
# goto f
f"@{f}",
"0;JMP",
# (ret)
f"(ret_{program}_{count})"
]
def sys_init(program):
return [
"@256",
"D=A",
"@SP",
"M=D",
] + call(program, "Sys.init", "0") + ["0;JMP"]
def generate_asm(asm_file, asm_instructions):
with open(asm_file, 'w') as file:
for instruction in asm_instructions:
file.write(instruction + '\n')
run()
| [
"njnareshjoshi@gmail.com"
] | njnareshjoshi@gmail.com |
56ad5604f7e320ff4f9ebd5141cab661611ffddc | cc2a18b152391ffaf437b1b3fe32bd0cec019179 | /setup.py | 0ff918d318adc1d6ecc7628483b3fb0b10ef91b8 | [
"BSD-2-Clause"
] | permissive | theobdt/boundary_tracing | 7da71a69c8f18ba32df3f18a6a7e8f54cf2b6565 | 615883b620f119c85c944ff2a5073619a8e32024 | refs/heads/master | 2021-07-09T12:30:17.718750 | 2020-08-31T09:51:30 | 2020-08-31T09:51:30 | 161,060,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | """Compare a python and a cython version of the boundary tracing algorithm
$ python setup.py build_ext --inplace
"""
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(ext_modules=cythonize('bt_cy.pyx'),
include_dirs=[numpy.get_include()])
| [
"theo.bodrito@hotmail.fr"
] | theo.bodrito@hotmail.fr |
ae79db95820afa78fc580aa49376922c0c238952 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/ms_data/datatypes/facets/float_pkg/float_min_inclusive005_xsd/__init__.py | 2aa44ebe2b8660a5c90d5e944cdea45e6804713b | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 180 | py | from output.models.ms_data.datatypes.facets.float_pkg.float_min_inclusive005_xsd.float_min_inclusive005 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
e189976241518966256753784a9ed61e7a1eaae9 | a4bf1ffc22f9124818f20a5c737ecb1b73ef4ff2 | /navi/gui/ui/analyse_threshold_dialog.py | ccc8cd1b229a59eff8cff04d3b9410b4343b420d | [] | no_license | ivancrneto/tcc | 06d9b8e706a6706083994c51503f6acc725b7cb3 | 58a25c14322a5dc30d22bd7bea8b1447ae89e33d | refs/heads/master | 2021-01-20T10:42:51.058583 | 2011-12-16T18:50:42 | 2011-12-16T18:50:42 | 1,485,093 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'analyse_threshold_dialog.ui'
#
# Created: Mon Oct 17 21:32:14 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_AnalyseThresholdDialog(object):
def setupUi(self, AnalyseThresholdDialog):
AnalyseThresholdDialog.setObjectName(_fromUtf8("AnalyseThresholdDialog"))
AnalyseThresholdDialog.resize(400, 127)
self.buttonBox = QtGui.QDialogButtonBox(AnalyseThresholdDialog)
self.buttonBox.setGeometry(QtCore.QRect(50, 90, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayoutWidget = QtGui.QWidget(AnalyseThresholdDialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 79))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, -1, -1, 0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.distance_radiobutton = QtGui.QRadioButton(self.verticalLayoutWidget)
self.distance_radiobutton.setObjectName(_fromUtf8("distance_radiobutton"))
self.horizontalLayout.addWidget(self.distance_radiobutton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, -1, -1, 0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem1 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.largestcluster_radiobutton = QtGui.QRadioButton(self.verticalLayoutWidget)
self.largestcluster_radiobutton.setObjectName(_fromUtf8("largestcluster_radiobutton"))
self.horizontalLayout_2.addWidget(self.largestcluster_radiobutton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(AnalyseThresholdDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), AnalyseThresholdDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), AnalyseThresholdDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AnalyseThresholdDialog)
def retranslateUi(self, AnalyseThresholdDialog):
AnalyseThresholdDialog.setWindowTitle(QtGui.QApplication.translate("AnalyseThresholdDialog", "Analyse Threshold", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("AnalyseThresholdDialog", "Choose Method:", None, QtGui.QApplication.UnicodeUTF8))
self.distance_radiobutton.setText(QtGui.QApplication.translate("AnalyseThresholdDialog", "Distance", None, QtGui.QApplication.UnicodeUTF8))
self.largestcluster_radiobutton.setText(QtGui.QApplication.translate("AnalyseThresholdDialog", "Largest Cluster", None, QtGui.QApplication.UnicodeUTF8))
| [
"ivan.cr.neto@gmail.com"
] | ivan.cr.neto@gmail.com |
9fa7e5be11a4f14d83f3b8f814d5be4788b10a96 | 93b8e319817b9e317b25a469bc6e4ee60ac780cb | /stonks_COMPLETE/solve.py | cc190e983d7e4494a539fb687319b4408fe288ca | [] | no_license | cyberchaud/ctf_picoctf2021 | 08de27d116b62929c7b71b72a32cf4c3e4c1b3b2 | 327b7a9391eb1f34ce65184a19b87d6db28dbf0d | refs/heads/master | 2023-05-09T13:19:15.315119 | 2021-06-03T00:23:49 | 2021-06-03T00:23:49 | 372,087,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | revstring = 'ocip{FTC0l_I4_t5m_ll0m_y_y3n58a025e3ÿ'
n = 4
revlist = [revstring[i:i+n] for i in range(0, len(revstring), n)]
chunks = [chunk[::-1] for chunk in revlist]
print(''.join(chunks))
| [
"cyberchaud@gmail.com"
] | cyberchaud@gmail.com |
b8819c397f4ac71efe866b205c79d513ff737cc9 | 1c74e301621022e1afa006d8cede10dc8f5100a1 | /ex40a.py | 3311b41306a2694f8aed9f771cd9cb91dee6bcbb | [] | no_license | JohnnyB95/LearnPython3 | ab9cd8d0f257936c77eeec594a084954386241aa | 7c20e30ba2908193175c8ae23007d92485b5c56a | refs/heads/master | 2020-04-12T16:11:33.317910 | 2019-01-06T23:51:03 | 2019-01-06T23:51:03 | 162,605,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py |
# VERSION 1
# import ex40_mystuff #import library
# ex40_mystuff.apple() #execute function apple()
# print(ex40_mystuff.mystuff['apple']) #access an item in a list in module
# print(ex40_mystuff.tangerine) #print variable tangerine in module
# VERSION 2
class MyStuff(object):
def __init__(self):
self.tangerine = "And now a thousand years between"
def apple(self):
print("I AM CLASSY APPLES!")
thing = MyStuff()
thing.apple()
print(thing.tangerine)
| [
"Johnny.Benitez@gmail.com"
] | Johnny.Benitez@gmail.com |
25e8f520ef46a65b0ef6c388729b48c84eba5852 | a6e2e9c9c79dd3e067d3c92402e56b541abed22b | /covid_chatbot/_data_refresh.py | 0d0301b56b9b773396d90938ffa086f99236f8fe | [] | no_license | kavurisrikanth/COVID_chatbot_Telegram_Implemented | 857b112ddbc215e75ef8adc1b73902a2267d0c62 | 90edfa9847ad81ed8a965448470996a57e41b520 | refs/heads/master | 2023-03-17T06:10:33.042674 | 2021-01-24T19:16:55 | 2021-01-24T19:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | import pandas as pd
import requests
import json
url1 = f'https://api.covid19india.org/data.json'
url2 = f'https://api.covid19india.org/v2/state_district_wise.json'
response1 = requests.get(url1)
response1_dict = json.loads(response1.text)
response2 = requests.get(url2)
response2_dict = json.loads(response2.text)
final_state_list = []
final_district_list = []
valid_distrcit_key = ['district','confirmed']
valid_state_key = ['active','recovered','state','deaths','confirmed']
for state_dict in con_dic['statewise']:
temp = (list(map(lambda key,value : key+"#"+value if key in valid_state_key else None , state_dict.keys(),state_dict.values() )))
final_state_list.append(" ".join(list(filter(None, temp))) )
for response in response2_dict:
state = response['state']
for district in response['districtData']:
temp = (list(map(lambda key,value : key+"#"+str(value) if key in valid_distrcit_key else None , district.keys(),district.values())))
final_district_list.append(" ".join(list(filter(None, temp))))
state_dataframe = pd.DataFrame({'Question' : final_state_list, 'Answer' : final_state_list})
district_dataframe = pd.DataFrame({'Question':final_district_list,'Answer':final_district_list})
final_dataframe = pd.concat([d1,d2])
final_dataframe.to_excel('Data.xlsx',index=False); | [
"noreply@github.com"
] | kavurisrikanth.noreply@github.com |
5d9ce4429edd0489a94eab7a8b153785da989b4b | 64f96027c700c13c621affd0574cfc56e57394ed | /courseinfo/migrations/0004_semseter_data.py | 97959abb651a07630d971fff986b9fe4d04c0d25 | [] | no_license | JourneyWoo/UniversityEducationSystem | d251cffb6307aeabd598c64c4ac68f314d9ee89a | 7ada09b7354f915d7269714ecf15000f8a4b9dff | refs/heads/master | 2020-04-25T00:19:28.573609 | 2019-04-21T07:30:03 | 2019-04-21T07:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | # Generated by Django 2.1.1 on 2019-03-17 19:08
from django.db import migrations
SEMESTERS = [
{
"semester_name": "2019 - Summer",
},
{
"semester_name": "2019 - Fall",
},
{
"semester_name": "2020 - Spring",
},
{
"semester_name": "2020 - Summer",
},
{
"semester_name": "2020 - Fall",
},
]
def add_semester_data(apps, schema_editor):
semester_model_class = apps.get_model('courseinfo', 'Semester')
for semester in SEMESTERS:
semester_object = semester_model_class.objects.create(
semester_name=semester['semester_name'],
)
def remove_semester_data(apps, schema_editor):
semester_model_class = apps.get_model('courseinfo', 'Semester')
for semester in SEMESTERS:
semester_object = semester_model_class.objects.get(
semester_name=semester['semester_name']
)
semester_object.delete()
class Migration(migrations.Migration):
dependencies = [
('courseinfo', '0003_auto_20190317_1838'),
]
operations = [
migrations.RunPython(
add_semester_data,
remove_semester_data
)
]
| [
"zw35@illinois.edu"
] | zw35@illinois.edu |
2ab2d7b03e3c836115023be23e59321e084e65e9 | 6fc07cd05bfe0dcf9d1f5c6d040e5391e53ba65e | /gCloud/google-cloud-sdk/lib/surface/compute/target_pools/get_health.py | 86b76ea44bdb0780e9f23e45177a4310856b5f74 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/MeedMems | a43ca33704da52cb0bd989ed8d45c018190313bf | 69fa61c1a326159be4c30a0506869b6ad15525cf | refs/heads/master | 2022-11-23T16:23:50.642724 | 2016-04-26T00:17:24 | 2016-04-26T00:17:24 | 282,374,101 | 0 | 0 | null | 2020-07-25T05:00:53 | 2020-07-25T05:00:52 | null | UTF-8 | Python | false | false | 3,566 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for getting a target pool's health."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.compute import flags
class GetHealth(base_classes.BaseCommand):
"""Get the health of instances in a target pool."""
@staticmethod
def Args(parser):
base_classes.AddFieldsFlag(parser, 'targetPoolInstanceHealth')
flags.AddRegionFlag(
parser,
resource_type='target pool',
operation_type='get health information for')
parser.add_argument(
'name',
completion_resource='targetPools',
help='The name of the target pool.')
@property
def service(self):
return self.compute.targetPools
@property
def resource_type(self):
return 'targetPoolInstanceHealth'
def GetTargetPool(self):
"""Fetches the target pool resource."""
errors = []
objects = list(request_helper.MakeRequests(
requests=[(self.service,
'Get',
self.messages.ComputeTargetPoolsGetRequest(
project=self.project,
region=self.target_pool_ref.region,
targetPool=self.target_pool_ref.Name()))],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch target pool:')
return objects[0]
def Run(self, args):
"""Returns a list of TargetPoolInstanceHealth objects."""
self.target_pool_ref = self.CreateRegionalReference(
args.name, args.region, resource_type='targetPools')
target_pool = self.GetTargetPool()
instances = target_pool.instances
# If the target pool has no instances, we should return an empty
# list.
if not instances:
return
requests = []
for instance in instances:
request_message = self.messages.ComputeTargetPoolsGetHealthRequest(
instanceReference=self.messages.InstanceReference(
instance=instance),
project=self.project,
region=self.target_pool_ref.region,
targetPool=self.target_pool_ref.Name())
requests.append((self.service, 'GetHealth', request_message))
errors = []
resources = request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
for resource in resources:
yield resource
if errors:
utils.RaiseToolException(
errors,
error_message='Could not get health for some targets:')
GetHealth.detailed_help = {
'brief': 'Get the health of instances in a target pool',
'DESCRIPTION': """\
*{command}* displays the health of instances in a target pool.
""",
}
| [
"89445.seenivasan@students.itu.edu"
] | 89445.seenivasan@students.itu.edu |
563677ad56806cad1bc1fd74bdc8d5c4027bd6ef | cf8c8118c16e7f38a190bc4a96b28d8c4b89ff58 | /tests/test_pipeline/test_test_augment_utils.py | 26bf00681f12374676096fa9d0004b46994c3e9c | [
"Apache-2.0"
] | permissive | encore-zhou/mmdetection3d | a7a5e21c0ec8f99843301b89b1e9b079f0a983c5 | c3d7f97baecd1beff1e9757d51523778c38f118b | refs/heads/master | 2023-03-21T17:03:22.723259 | 2020-09-22T07:08:45 | 2020-09-22T07:08:45 | 283,723,083 | 1 | 1 | Apache-2.0 | 2020-09-22T07:08:46 | 2020-07-30T09:07:38 | Python | UTF-8 | Python | false | false | 2,104 | py | import numpy as np
import torch
from mmdet3d.datasets.pipelines import MultiScaleFlipAug3D
def test_multi_scale_flip_aug_3D():
np.random.seed(0)
transforms = [{
'type': 'GlobalRotScaleTrans',
'rot_range': [-0.1, 0.1],
'scale_ratio_range': [0.9, 1.1],
'translation_std': [0, 0, 0]
}, {
'type': 'RandomFlip3D',
'sync_2d': False,
'flip_ratio_bev_horizontal': 0.5
}, {
'type': 'IndoorPointSample',
'num_points': 5
}, {
'type':
'DefaultFormatBundle3D',
'class_names': ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
'dresser', 'night_stand', 'bookshelf', 'bathtub'),
'with_label':
False
}, {
'type': 'Collect3D',
'keys': ['points']
}]
img_scale = (1333, 800)
pts_scale_ratio = 1
multi_scale_flip_aug_3D = MultiScaleFlipAug3D(transforms, img_scale,
pts_scale_ratio)
pts_file_name = 'tests/data/sunrgbd/points/000001.bin'
sample_idx = 4
file_name = 'tests/data/sunrgbd/points/000001.bin'
bbox3d_fields = []
points = np.array([[0.20397437, 1.4267826, -1.0503972, 0.16195858],
[-2.2095256, 3.3159535, -0.7706928, 0.4416629],
[1.5090443, 3.2764456, -1.1913797, 0.02097607],
[-1.373904, 3.8711405, 0.8524302, 2.064786],
[-1.8139812, 3.538856, -1.0056694, 0.20668638]])
results = dict(
points=points,
pts_file_name=pts_file_name,
sample_idx=sample_idx,
file_name=file_name,
bbox3d_fields=bbox3d_fields)
results = multi_scale_flip_aug_3D(results)
expected_points = torch.tensor(
[[-2.2095, 3.3160, -0.7707, 0.4417], [-1.3739, 3.8711, 0.8524, 2.0648],
[-1.8140, 3.5389, -1.0057, 0.2067], [0.2040, 1.4268, -1.0504, 0.1620],
[1.5090, 3.2764, -1.1914, 0.0210]],
dtype=torch.float64)
assert torch.allclose(
results['points'][0]._data, expected_points, atol=1e-4)
| [
"noreply@github.com"
] | encore-zhou.noreply@github.com |
19278d8ef7d38d9fef53807d8b5f43c6599c1860 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_310/ch24_2019_02_28_22_51_17_076072.py | a696f9213b91ef86ad0fdb8ed69e705024d7c53c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def classifica_triangulo(l1, l2, l3):
if l1==l2 and l1==l3:
return "equilátero"
elif l1==l2 and l1!=l3 or l1!=l2 and l1==l3:
return "isóceles"
else:
return "escaleno" | [
"you@example.com"
] | you@example.com |
8b315feae897c34875ac54d7346c9f3fa36a7ae9 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_28421.py | e88056b6d7429d3371507942c15ea0699c845088 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | # Python yagmail attachment not working
pip3 install -U yagmail
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
12b57b06ca8399fdc27fa38e011de43de67a8a30 | 94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af | /559.py | 36b10d1ab3ede26c95a47dba50b8ef0aa9c74592 | [] | no_license | huosan0123/leetcode-py | f1ec8226bae732369d4e1989b99ab0ba4b4061c4 | 22794e5e80f534c41ff81eb40072acaa1346a75c | refs/heads/master | 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | """
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if not root:
return 0
depths = [self.maxDepth(ch) for ch in root.children]
# children 使用list表示的,可能为空;
if not depths:
depth = 0
else:
depth = max(depths)
return 1 + depth
| [
"noreply@github.com"
] | huosan0123.noreply@github.com |
abc71170bebc5eeabbc454115b8a9dcc7b9a858e | 7db0883137d119565540f2d071638c4016f39213 | /Note/Spider/2.28-selenium/selenium+phantomjs学习/selenium_study3.py | 452662361c9123151650e6ef605f1db84e6b3d4e | [] | no_license | PhilHuang-d/python--- | cf22a4cc00d4beaaf75ef7ca87a4c5d31a9d5efe | 152c18f51838ce652b79a0cd24765b1a1c237eee | refs/heads/master | 2021-09-13T05:32:53.754865 | 2018-04-25T13:36:40 | 2018-04-25T13:36:40 | 108,812,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,975 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
__author__ = 'Terry'
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
]
phantomjs_driver_path = 'D:/phantomjs/bin/phantomjs.exe'
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
# 引入配置对象DesiredCapabilities
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def save_vcode(driver, element):
# 获取截图
driver.get_screenshot_as_file('screenshot.png')
left = int(element.location['x'])
top = int(element.location['y'])
right = int(element.location['x'] + element.size['width'])
bottom = int(element.location['y'] + element.size['height'])
# 通过Image处理图像
from PIL import Image
im = Image.open('screenshot.png')
im = im.crop((left, top, right, bottom))
im.save('vcode.png')
dcap = dict(DesiredCapabilities.PHANTOMJS)
#从USER_AGENTS列表中随机选一个浏览器头,伪装浏览器
# dcap["phantomjs.page.settings.userAgent"] = (random.choice(USER_AGENTS))
dcap["phantomjs.page.settings.userAgent"] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
# 不载入图片,爬页面速度会快很多
# dcap["phantomjs.page.settings.loadImages"] = False
#打开带配置信息的phantomJS浏览器
driver = webdriver.PhantomJS(phantomjs_driver_path, desired_capabilities=dcap)
# 设置10秒页面超时返回,类似于requests.get()的timeout选项,driver.get()没有timeout选项
# 以前遇到过driver.get(url)一直不返回,但也不报错的问题,这时程序会卡住,设置超时选项能解决这个问题。
driver.set_page_load_timeout(10)
# 设置10秒脚本超时时间
driver.set_script_timeout(10)
# 设置屏幕尺寸
driver.set_window_size(1366, 768)
# 访问百度
driver.get('https://www.baidu.com')
WebDriverWait(driver, 30, 1).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u1"]/a[7]')))
print(driver.title)
time.sleep(1)
# 点击 弹出登录的窗口
login_index = driver.find_element_by_xpath('//*[@id="u1"]/a[7]')
login_index.click()
time.sleep(.5)
# 选择 用户名登录
login_user_and_pwd = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__footerULoginBtn"]')
login_user_and_pwd.click()
time.sleep(.5)
# 用户名元素
user = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__userName"]')
user.clear()
user.send_keys('用户名')
# 密码元素
pwd = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__password"]')
pwd.clear()
pwd.send_keys('密码')
while True:
# 换下一张 验证码
next_vcode = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCodeChange"]')
next_vcode.click()
# 验证码图片的元素
vcode_img = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCodeImg"]')
save_vcode(driver, vcode_img)
# 输入验证码
vcode_input = input('请输入验证码:')
vcode = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCode"]')
# 在页面上填写验证码
vcode.send_keys(vcode_input)
# 登录
login = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__submit"]')
login.click()
time.sleep(1)
try:
# 判断是否登录成功
user_name = driver.find_element_by_xpath('//*[@id="s_username_top"]/span')
print("登录名为:"+user_name.text)
print("登录成功:")
break
except:
time.sleep(.3)
driver.get('http://index.baidu.com/?tpl=trend&word=%BB%C6%BD%F0')
# 需要手动退出driver
driver.quit() | [
"32259595+PhilHuang-d@users.noreply.github.com"
] | 32259595+PhilHuang-d@users.noreply.github.com |
bfc54acf5cfe74cdca27af81c9e898c7ad2005c3 | 37a67a9531f8c32739ae70fc5be55b4c6acae60d | /multinomial.py | 61b5f29f47a1620d45a703bb42a374da45ab4e3d | [
"MIT"
] | permissive | gongkecun/distribution-is-all-you-need | da1b1b363df3343e0753e55564311e323cd6c890 | 93ae5ed7fa63607474d61723d2d28d1a4b3c653a | refs/heads/master | 2020-08-27T10:03:21.144561 | 2019-10-24T15:04:43 | 2019-10-24T15:04:43 | 217,326,807 | 0 | 0 | MIT | 2019-10-24T15:02:48 | 2019-10-24T15:02:48 | null | UTF-8 | Python | false | false | 1,217 | py | """
Code by Tae-Hwan Hung(@graykode)
https://en.wikipedia.org/wiki/Multinomial_distribution
3-Class Example
"""
import numpy as np
from matplotlib import pyplot as plt
import operator as op
from functools import reduce
def factorial(n):
return reduce(op.mul, range(1, n + 1), 1)
def const(n, a, b, c):
"""
return n! / a! b! c!, where a+b+c == n
"""
assert a + b + c == n
numer = factorial(n)
denom = factorial(a) * factorial(b) * factorial(c)
return numer / denom
def multinomial(n):
"""
:param x : list, sum(x) should be `n`
:param n : number of trial
:param p: list, sum(p) should be `1`
"""
# get all a,b,c where a+b+c == n, a<b<c
ls = []
for i in range(1, n + 1):
for j in range(i, n + 1):
for k in range(j, n + 1):
if i + j + k == n:
ls.append([i, j, k])
y = [const(n, l[0], l[1], l[2]) for l in ls]
x = np.arange(len(y))
return x, y, np.mean(y), np.std(y)
for n_experiment in [20, 21, 22]:
x, y, u, s = multinomial(n_experiment)
plt.scatter(x, y, label=r'$trial=%d$' % (n_experiment))
plt.legend()
plt.savefig('graph/multinomial.png')
plt.show()
| [
"nlkey2022@gmail.com"
] | nlkey2022@gmail.com |
92ec23a0074b50b4007c2cf6742887a1c9e52911 | 3a8293c7f766c75c6bbb5858497566594fd049d1 | /7.数据类型详解/9.元组-生成器与 yield 关键字.py | 0bd9f11d4144ecfb40ea60fd286cd1091c870c29 | [] | no_license | 18303585361/Zero | fe986d966096989e752fa90cf7c913d0b07ca4d8 | f96ddcd26180afc724575e1b33fcb26468c34118 | refs/heads/master | 2021-02-14T05:15:18.080310 | 2020-05-05T15:03:03 | 2020-05-05T15:03:03 | 244,773,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # yield 关键字
'''
yield 关键字使用在生成器函数中
+ yield 和函数中的 return 有点像
+ 共同点:执行到这个关键字后会把结果返回
+ 不同点:
+ return 会把结果返回,并结束当前函数的调用
+ yield 会返回结果,并记住当前代码执行的位置,下一次调用时,会从上一次离开的位置继续向下执行
'''
# 定义一个普通函数
# def hello():
# print('hello 1')
# return 1 # return 在函数中会把结果返回,并且结束当前的函数,后面的代码不再执行
# print('world 2')
# return 2
# hello()
# hello()
# 使用 yield 定义一个 生成器函数
# def hello():
# print('hello 1')
# yield 1 # return 在函数中会把结果返回,并且结束当前的函数,后面的代码不再执行
# print('world 2')
# yield 2
# print('haha 3')
# yield 3
# 调用生成器函数,返回一个迭代器
# res = hello()
# 使用生成器返回的迭代器
# r = next(res)
# print(r)
# r = next(res)
# print(r)
# 使用 list 函数去调用生成器返回的迭代器时,会把迭代器的返回结果,作为容器的元素
# r = list(res)
# print(r)
# 使用生成器返回的迭代器
# for i in res:
# print(i)
'''
上面的生成器函数调用时的过程
首先,调用了生成器函数,返回了一个迭代器
1.第一次去调用迭代器:
走到当前的生成器函数中,遇到了 yield 1,把 1 返回,并且记住了当前的执行状态(位置),暂停了执行,等待下一次的调用
2.第二次去调用迭代器:
从上一次遇到的 yield 位置开始执行,遇到了 yield 2,把 2 返回,并记住状态,暂停执行,等待下一次的调用
3.第三次去调用迭代器
从上一次遇到的 yield 位置开始执行,遇到了 yield 3,把 3 返回,并记住了状态,暂停执行,等待下一次调用
如果在最后又调用了迭代器,那么会从上一次的 yield 位置开始,结果后面没有了,直接就超出范围,报错。
'''
| [
"58509299+18303585361@users.noreply.github.com"
] | 58509299+18303585361@users.noreply.github.com |
d88ff0811eb5f461c537ac6d26faeed75c32d583 | c23a6efb27b02f4656d0c4782ba2377879dffb4e | /python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py | e83afeed2f72635a40aa2ac21dc0c8611c309de4 | [
"Apache-2.0"
] | permissive | wanglei828/Paddle | 785ff6027bd445eae7aa2c8fbd4179e9607be193 | 177324b0674d9f325823a02336d32319f4925601 | refs/heads/develop | 2021-04-15T08:41:39.885729 | 2018-05-11T21:52:32 | 2018-05-11T21:52:32 | 126,545,587 | 0 | 0 | Apache-2.0 | 2018-05-04T04:02:46 | 2018-03-23T22:39:59 | C++ | UTF-8 | Python | false | false | 4,242 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=4096, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=4096, act=None)
predict = fluid.layers.fc(input=fc2, size=10, act='softmax')
return predict
def inference_network():
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
predict = vgg16_bn_drop(images)
return predict
def train_network():
predict = inference_network()
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return avg_cost, accuracy
def train(use_cuda, save_path):
BATCH_SIZE = 128
EPOCH_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
def event_handler(event):
if isinstance(event, fluid.EndIteration):
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_reader)
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_network,
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
place=place,
event_handler=event_handler)
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
def infer(use_cuda, save_path):
params = fluid.Params(save_path)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range
# [0, 1.0].
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
results = inferencer.infer({'pixel': tensor_img})
print("infer results: ", results)
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "image_classification_vgg.inference.model"
train(use_cuda, save_path)
infer(use_cuda, save_path)
if __name__ == '__main__':
for use_cuda in (False, True):
main(use_cuda=use_cuda)
| [
"noreply@github.com"
] | wanglei828.noreply@github.com |
121e800c3ac86e4da98ebf88fb6d4c414cff0c2b | 0328188ecef8313c564e0aca2a8c30bc865a41c7 | /test_fixture.py | 05ba5fcfb63d3a9728f1d1712076df9bdc95102b | [] | no_license | manitagahlayan/PyTest | 2c69baaf29064c160738f6666a97a96bf3b19c2f | bd5ad05b6476abeb0dc445c48a2daae125644250 | refs/heads/master | 2020-12-30T10:50:48.828549 | 2017-08-07T06:35:23 | 2017-08-07T06:35:23 | 98,830,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 30 15:06:38 2017
@author: manitagahlayan
"""
import pytest
@pytest.fixture
def i_set_things_up():
projector = {'status': 'doing fine',
'flashing': "dicts can't flash!"}
return projector
def test_fixture_contents(i_set_things_up):
assert i_set_things_up['status'] == 'doing fine'
| [
"manitagahlayan87@gmail.com"
] | manitagahlayan87@gmail.com |
0800900797c3fab99952243dd452d66397117afe | 3ca58506042b4023fb6c16df5aec5a34b0a82c73 | /manage.py | 4b7df74437dd65d65c75eb020d57e8253cb5ccb5 | [] | no_license | jeanpzanatta/PatchApp | ddd8658813f2318a3108f0720e182468629edd0d | b334556fa1b2efa6b0a0d4acc173c2760b9b73a7 | refs/heads/master | 2022-05-27T18:56:03.163466 | 2019-07-03T22:31:21 | 2019-07-03T22:31:21 | 172,410,275 | 0 | 0 | null | 2022-04-22T21:05:05 | 2019-02-25T00:59:09 | Python | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python3
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PatchApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"jeanpedrozanatta@gmail.com"
] | jeanpedrozanatta@gmail.com |
779939847b2d66cd1cb2763b157c70d4e565b6d8 | c68fd1e7827fc702e096ee94449e8cd2b845b54f | /utils/re_ranking_feature.py | d602da3ca11decd3c65367e0ec8c11ab5e0ac314 | [] | no_license | ChengpengChen/Reid-mask | b61ddccb7dc9b0b19fa19e5b8567b354b3cbec8a | 03e0c12b91b4c6280388c1b36e2014b5699115fe | refs/heads/master | 2020-03-28T12:04:00.690557 | 2018-09-11T07:31:36 | 2018-09-11T07:31:36 | 148,267,700 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,558 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 14:46:56 2017
@author: luohao
"""
"""
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
"""
"""
API
probFea: all feature vectors of the query set, shape = (image_size, feature_dim)
galFea: all feature vectors of the gallery set, shape = (image_size, feature_dim)
k1,k2,lambda: parameters, the original paper is (k1=20,k2=6,lambda=0.3)
MemorySave: set to 'True' when using MemorySave mode
Minibatch: avaliable when 'MemorySave' is 'True'
"""
import numpy as np
from scipy.spatial.distance import cdist
def re_ranking(probFea, galFea, k1, k2, lambda_value, MemorySave=False, Minibatch=2000):
query_num = probFea.shape[0]
all_num = query_num + galFea.shape[0]
feat = np.append(probFea, galFea, axis=0)
feat = feat.astype(np.float16)
print('computing original distance')
if MemorySave:
original_dist = np.zeros(shape=[all_num, all_num], dtype=np.float16)
i = 0
while True:
it = i + Minibatch
if it < np.shape(feat)[0]:
original_dist[i:it, ] = np.power(cdist(feat[i:it, ], feat), 2).astype(np.float16)
else:
original_dist[i:, :] = np.power(cdist(feat[i:, ], feat), 2).astype(np.float16)
break
i = it
else:
original_dist = cdist(feat, feat).astype(np.float16)
original_dist = np.power(original_dist, 2).astype(np.float16)
del feat
gallery_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
print('starting re_ranking')
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
| [
"ChengpengChen@github.com"
] | ChengpengChen@github.com |
92059b88931811604299e929a3b325fe2aae275b | 8ebf4276a30d56e8cba1458940fe258bfb8613ec | /eith/interpreter/errors.py | 4e267f1309d0794628f2982bfc7bfbb62a902615 | [
"MIT"
] | permissive | mufeedvh/jam0001 | a71bcd467d714f8b41874c442587232a98902731 | a730c739daf2c99870d4873ac9b2d65ede9f5bad | refs/heads/main | 2023-07-19T01:54:44.898023 | 2021-08-22T18:45:10 | 2021-08-22T18:45:10 | 398,377,041 | 1 | 0 | null | 2021-08-20T19:09:51 | 2021-08-20T19:09:51 | null | UTF-8 | Python | false | false | 97 | py | class NotCommentException(Exception):
"""Exception if line is not a comment.
"""
pass | [
"noreply@github.com"
] | mufeedvh.noreply@github.com |
9bcd652c815dcdaf6b8d072681a0b441049ad36c | 4fe66c71db6d2338f42cc811f4ebcb0d83830c90 | /main.py | 903206e9a895d188d83abec61ebedbbd8ef042a7 | [] | no_license | srivatsa96/image-captioning | 4ab32d91a920ea4c0a6ca532ba75e897d9540117 | 969641819edefbd110820d8b9e57bde12b05c0dd | refs/heads/master | 2021-08-26T09:13:41.412566 | 2017-11-22T20:35:51 | 2017-11-22T20:35:51 | 108,882,691 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | import tensorflow as tf
import numpy as np
from model.ImageCaptioning import ImageCaptioning
from operations.train import TrainCaptionModel
from configuration.config import ModelConfig, TrainingConfig
## Load Configuration
print("Loading Model Configuration")
model_config = ModelConfig()
train_config = TrainingConfig()
## Create Model
print("Setting up model")
model = ImageCaptioning(model_config,"train")
print("Model Loaded")
print("Training Model")
train = TrainCaptionModel(model,train_config)
train.train_model(restore_model=False)
| [
"srivatsasinha@gmail.com"
] | srivatsasinha@gmail.com |
cd321ea545950a4fdef7d3d855a59651b053ef01 | a29e436db2fb4569ffc896c836bfd6f28b9f6b16 | /2020/2020_17.py | f10f4a909b9c19fb372163ef327dd9916f324f56 | [] | no_license | sjshide/AdventOfCode | a25033747f41630c208d7fd021f3427b9c758792 | 4c5323d7945482d925a1a345ccb9d54c6517dd49 | refs/heads/master | 2022-02-16T17:34:17.448417 | 2022-01-04T23:44:51 | 2022-01-04T23:44:51 | 228,530,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | from utils import *
from copy import deepcopy as dc
from collections import defaultdict as dd
inp = get_input(2020,17)
# lol
# truly hideous
# seems like everyone was thrown by his
# shifting indices in example
# spent a decent amount of time trying to debug against his examples
l_A = dd(str)
l_B = dd(str)
for i in range(len(inp)):
for j in range(len(inp[0])):
l_A[(0,i,j)]=inp[i][j]
l_B[(0,i,j,0)]=inp[i][j]
def update_A(state):
s = dc(state)
t = dd(str)
m_z = min([keys[0] for keys in s])
M_z = max([keys[0] for keys in s])
m_x = min([keys[1] for keys in s])
M_x = max([keys[1] for keys in s])
m_y = min([keys[2] for keys in s])
M_y = max([keys[2] for keys in s])
for z in range(m_z-1,M_z+2):
for x in range(m_x-1,M_x+2):
for y in range(m_y-1,M_y+2):
on_ct = 0
for dx in [-1,0,1]:
for dy in [-1,0,1]:
for dz in [-1,0,1]:
if (dz,dx,dy)!=(0,0,0):
if s[(z+dz,x+dx,y+dy)]=='#':
on_ct+=1
if s[(z,x,y)]=='#':
if on_ct in [2,3]:
t[(z,x,y)]='#'
elif s[(z,x,y)] in ['','.']:
if on_ct==3:
t[(z,x,y)]='#'
else:
t[(z,x,y)]='.'
return(t)
def update_B(state):
s = dc(state)
t = dd(str)
m_z = min([keys[0] for keys in s])
M_z = max([keys[0] for keys in s])
m_x = min([keys[1] for keys in s])
M_x = max([keys[1] for keys in s])
m_y = min([keys[2] for keys in s])
M_y = max([keys[2] for keys in s])
m_w = min([keys[3] for keys in s])
M_w = max([keys[3] for keys in s])
for z in range(m_z-1,M_z+2):
for x in range(m_x-1,M_x+2):
for y in range(m_y-1,M_y+2):
for w in range(m_w-1,M_w+2):
on_ct = 0
for dx in [-1,0,1]:
for dy in [-1,0,1]:
for dz in [-1,0,1]:
for dw in [-1,0,1]:
if (dz,dx,dy,dw)!=(0,0,0,0):
if s[(z+dz,x+dx,y+dy,w+dw)]=='#':
on_ct+=1
if s[(z,x,y,w)]=='#':
if on_ct in [2,3]:
t[(z,x,y,w)]='#'
elif s[(z,x,y,w)] in ['','.']:
if on_ct==3:
t[(z,x,y,w)]='#'
else:
t[(z,x,y,w)]='.'
return(t)
t_A = dc(l_A)
t_B = dc(l_B)
for _ in range(6):
t_A = update_A(t_A)
t_B = update_B(t_B)
ans_A = [t_A[key] for key in t_A].count('#')
ans_B = [t_B[key] for key in t_B].count('#')
print('Part A Solution:', ans_A)
print('Part B Solution:', ans_B) | [
"noreply@github.com"
] | sjshide.noreply@github.com |
4d6845e592d1f3488d99b7a800a98e65b252ade7 | 4e8cb93aa4fb941030319fae339c291a2aabffe2 | /wonderbits/WBNebulier.py | f2c79128ea99a5c55ba15596f35b6c69058f3481 | [
"MIT"
] | permissive | BigCircleLaw/wonderguy | 9e63cba6066bc826ad2aced99a63704e4b167775 | d78a100784c414b9c3945c301d07c4e1b04aab8f | refs/heads/master | 2020-06-20T03:37:36.432422 | 2020-04-28T03:03:13 | 2020-04-28T03:03:13 | 196,978,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | from .WBits import WBits
from .event import Event
def _format_str_type(x):
if isinstance(x, str):
x = str(x).replace('"', '\\"')
x = "\"" + x + "\""
return x
class Nebulier(WBits):
def __init__(self, index = 1):
WBits.__init__(self)
self.index = index
def set_onboard_rgb(self, rgb):
command = 'nebulier{}.set_onboard_rgb({})'.format(self.index, rgb)
self._set_command(command)
def turn_on(self):
"""
开启雾化器
"""
command = 'nebulier{}.turn_on()'.format(self.index)
self._set_command(command)
def turn_off(self):
"""
关闭雾化器
"""
command = 'nebulier{}.turn_off()'.format(self.index)
self._set_command(command)
| [
"794557226@qq.com"
] | 794557226@qq.com |
245925707f4f6c6c98786d629ccf351760017361 | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-Python/Matrix Script.py | 114fa5ab8380753bbc2fe298d07e8f760aeb97f0 | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/bin/python3
import math
import os
import random
import re
import sys
n, m = map(int, input().rstrip().split())
matrix = []
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
print(re.sub(r'(?<=[A-Za-z0-9])([^A-Za-z0-9]+)(?=[A-Za-z0-9])',' ',"".join("".join(decode) for decode in zip(*matrix))))
| [
"rastogiritvik99@gmail.com"
] | rastogiritvik99@gmail.com |
796454219f8845841252c8af4719c448a62f6eeb | cab2340e8279179e9810e3a3abd024db5be78fec | /tvm/urls.py | 3dc8c8399b83995595755ed5d75e940ac452d8aa | [] | no_license | pankajanand26/fabric-tvm-frontend | 93f72a3f1240425546139b553ff8fc4bfe4833b6 | 2eb1e68fa3a85aa183a4b192c26884b7ef1ad0b2 | refs/heads/master | 2020-04-08T08:16:42.958667 | 2018-11-30T13:02:16 | 2018-11-30T13:02:16 | 159,172,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | """tvm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include,path
from django.conf.urls import url
from ideas import views
urlpatterns = [
path('ideas/', include('ideas.urls'), name='ideas'),
url(r'^login/$', views.login, name='login'),
path('profile/', views.profile, name='profile'),
url(r'^oauth/', include('social_django.urls', namespace='social')), # <--
path('admin/', admin.site.urls),
]
| [
"rarunk16@ford.com"
] | rarunk16@ford.com |
ff8d2f7f813791d5786182273a3be57319e04640 | edc22de2bd6917e5fbc61bfba9e655b390c3c1c4 | /source/input_data_initialization.py | 972c86eff2522d58f0b7261af18596baa6648f09 | [] | no_license | cycbill/Local-Vol-Calibration | 0d35cc2de510ad80b1206e53e6350b86925cec86 | eeda77d760d2376afd0b5f747cb0c5465aaa6466 | refs/heads/master | 2020-08-26T22:16:44.519980 | 2019-12-11T04:13:51 | 2019-12-11T04:13:51 | 217,164,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import numpy as np
import xlwings as xw
from rate_curve_class import RateCurve
from strike_grid_discretization import StrikeGridsAllTenors
from tenor_market_data import TenorMarketData
from implied_vol_class import ImpliedVolatility
from compute_sum_sqr_vol_T import compute_sum_sqr_vol_T
from new_pillar_strike_extrapolation import NewPillarStrikeExtrapolation
from compute_maturity_grid import compute_maturity_grid
from compute_local_vol_init_guess import compute_local_vol_init_guess
def input_data_initialization():
np.set_printoptions(linewidth=150)
wb = xw.Book('LocVol Parameters.xlsx')
#wb = xw.Book(r'source\LocVol Parameters.xlsx')
sht = wb.sheets['IRSMFORM']
## Read data from file
S = sht.range('B2').value
r_tenors = sht.range('B6').options(np.array, expand='down').value
r_quotes = sht.range('C6').options(np.array, expand='down').value
rf_tenors = sht.range('E6').options(np.array, expand='down').value
rf_quotes = sht.range('F6').options(np.array, expand='down').value
csc_tenors = sht.range('H6').options(np.array, expand='down').value
csc_quotes = sht.range('I6').options(np.array, expand='down').value
imp_vol_tenors = sht.range('K6').options(np.array, expand='down').value
imp_vol_strikes = sht.range('N6').options(np.array, expand='table').value
imp_vol_quotes = sht.range('T6').options(np.array, expand='table').value
## Build up rate curve class
r_para = RateCurve(r_tenors, r_quotes)
rf_para = RateCurve(rf_tenors, rf_quotes)
csc_para = RateCurve(csc_tenors, csc_quotes)
return S, r_para, rf_para, csc_para, imp_vol_tenors, imp_vol_strikes, imp_vol_quotes
| [
"cuiyechang1991@gmail.com"
] | cuiyechang1991@gmail.com |
879419e98b0be90eff8d2a02105700d40f59e5d9 | 27033910ff8f2ad621866080d0e2cf62231fac6e | /banks_main.py | 12b1fb0c1d74edde427751e40a78b4fceef79ca0 | [] | no_license | johnurb/scrape_shadow_bank_websites | b0fef6fcc23cd19b99bb84cfb53937a56450b20f | b0800cfe2ce9376c9de3527666a11d313805362b | refs/heads/master | 2020-08-31T16:54:29.189776 | 2019-10-31T10:10:55 | 2019-10-31T10:10:55 | 218,737,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,292 | py | import os
import csv
import json
import re
import requests
from bs4 import BeautifulSoup
import csv
import tldextract
from time import sleep
import html2text
import browser_cookie3
import random
import signal
from contextlib import contextmanager
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_pdf_text(file_path):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(file_path, 'rb') as fh:
for page in PDFPage.get_pages(fh, caching=True, check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
else:
return ''
def convert_pdf_to_text(content):
with open('temp.pdf', 'wb') as fout:
fout.write(content)
pdf_text = extract_pdf_text('temp.pdf')
os.remove('temp.pdf')
return pdf_text
@contextmanager
def timeout(time):
# Register a function to raise a TimeoutError on the signal.
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``time``.
signal.alarm(time)
try:
yield
except TimeoutError:
pass
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached.
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
raise TimeoutError
def load_banks_data():
file_name = 'shadow_only.csv'
banks_data = {}
with open(file_name, 'r') as fin:
reader = csv.reader(fin)
for i, line in enumerate(reader):
if i == 0:
pass
else:
bank_name = line[2].replace(' ', '_')
bank_name = re.sub(r'\W+', '', bank_name)
banks_data[bank_name] = line[4]
return banks_data
def create_bank_folders(banks_data):
root_folder = os.getcwd()
bank_folders_name = 'directory'
bank_folders_path = os.path.join(root_folder, bank_folders_name)
try:
os.mkdir(bank_folders_path)
except:
# folder already exists
pass
for key, value in banks_data.items():
folder_name = key.replace(' ', '_')
folder_path = os.path.join(bank_folders_path, folder_name)
try:
os.mkdir(folder_path)
except:
# folder already exists
pass
banks_data[key] = [value, folder_path]
# function to extract the text content from a page.
# page_body -> text object
def get_page_text(page_body):
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.ignore_images = True
text_maker.ignore_anchors = True
text_maker.body_width = 0
text = text_maker.handle(page_body).lower().replace('#', '').replace('*', '').replace('_', '').replace(' - ', ' ').replace('|', '').replace('(icon)', '').strip()
return text
def init_session():
session = requests.Session()
session.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36',
}
cookie = browser_cookie3.chrome()
session.cookies.update(cookie)
return session
def get_page(current_session):
url = 'https://start.loandepot.com/assets/img/aflac/Aflac_loanDepot_CustomerFlyer.pdf'
response = current_session.get(url)
content_type = response.headers['content-type']
if 'pdf' in content_type:
pdf_text = convert_pdf_to_text(response.content)
if 'text/html' in content_type:
pass
def current_urls(urls_list, session):
bad_contents = ['/media/', '/images/', '.css', '.png', '.jpeg', 'jpg', '.gif',
'.ashx', '=', '.ico', '%20', '/css/', '/ajax', '.mp4', '.wav', 'facebook', 'twitter',
'instagram', 'pinterest', '.mp3', '.mov', 'trustpilot', 'mailto', '.zip', '.tar', '.gz', 'webmaxstaging', '#', '.docx', '.doc', '.xlsx', '.csv' ]
while len(urls_list[0]) > 0:
try:
for url in urls_list[0]:
if url in urls_list[1]:
pass
else:
print(url)
urls_list[1].append(url)
with timeout(15):
try:
r = session.get(url, timeout=5)
except Exception as e:
pass
if not r.status_code == 200:
print(r.status_code)
urls_list[0].remove(url)
else:
urls_list[0].remove(url)
content_type = r.headers['content-type']
if 'pdf' in content_type:
out_text = convert_pdf_to_text(response.content)
if 'text/html' in content_type:
r_text = r.text
out_text = get_page_text(r_text)
page_name = url.replace('https://', '').replace('http://', '').replace('/', '_').replace('www.', '').replace('.', '-') + '.txt'
out_dir = urls_list[3][0]
try:
os.mkdir(out_dir)
except Exception:
pass
out_path = os.path.join(out_dir, page_name)
with open(out_path, 'w') as fout:
fout.write(out_text)
soup = BeautifulSoup(r_text, 'html.parser')
for anchor in soup.find_all('a', href=True):
link = anchor['href']
if link.startswith('#'):
pass
if link.endswith('/'):
link = link[:-1]
if link.startswith('/'):
link = 'https://www.' + urls_list[2][0] + link
if not urls_list[2][0] in link:
pass
else:
if any(substring in link for substring in bad_contents):
pass
else:
link = link.replace('https://', 'http://')
urls_list[0].append(link)
for url in urls_list[0]:
if url in urls_list[1]:
urls_list[0].remove(url)
urls_list[0] = list(set(urls_list[0]))
except Exception as e:
print(e)
def main():
if os.path.exists('already_scraped.txt'):
pass
else:
with open('already_scraped.txt', 'w') as fout:
fout.write('')
banks = load_banks_data()
create_bank_folders(banks)
session = init_session()
already_scraped = []
with open('already_scraped.txt', 'r') as fin:
for line in fin:
if line == '':
pass
else:
already_scraped.append(line.strip())
for key, value in banks.items():
bank_name = key
bank_root_url = value[0]
bank_dir = value[1]
if bank_name.strip() in already_scraped:
pass
else:
with open('already_scraped.txt', 'a') as fout:
fout.write(bank_name.strip() + '\n')
urls_list = [[bank_root_url], [], [], [bank_dir]] #[ [current_urls_list], [seen_urls], [current domain], [live folder] ]
domain = tldextract.extract(bank_root_url)
root = '{}.{}'.format(domain[1], domain[2])
urls_list[2].append(root)
current_urls(urls_list, session)
main()
| [
"urbine.5@buckeyemail.osu.edu"
] | urbine.5@buckeyemail.osu.edu |
ddef20ef25a9208e085f1a1eb4c35bc805874298 | 9a0eeaeaffdeeec9228851c763851772ea898741 | /examples/contextmanager.py | 3b715e43309fcce17776ad0fe7d27358c7690605 | [
"MIT"
] | permissive | Gr1N/aiodogstatsd | ff8ccfabb633d36b585f5a634af573c1bf7bfb50 | 4c363d795df04d1cc4c137307b7f91592224ed32 | refs/heads/master | 2021-12-22T08:41:15.105876 | 2021-12-12T21:48:00 | 2021-12-12T21:48:00 | 171,884,198 | 32 | 11 | MIT | 2021-12-12T21:31:30 | 2019-02-21T14:13:45 | Python | UTF-8 | Python | false | false | 392 | py | import asyncio
from random import random
import aiodogstatsd
async def main():
async with aiodogstatsd.Client(
host="0.0.0.0", port=9125, constant_tags={"whoami": "I am Batman!"}
) as client:
for _ in range(5000):
client.timing("fire", value=random())
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"gr1n@protonmail.com"
] | gr1n@protonmail.com |
5084136ecc21efc516abb29d7cfe6c0acda3e829 | 27225cfbbc5e03a0ab48ea1457038b37d4d9fe37 | /app/core/config.py | aa7fd612a0429a367bb067996e910358d8dc2b88 | [] | no_license | CameronXie/python-api | 2d936d6da3849cff4bd5058caa060e85acb53e99 | 2ab8d71ccf7edadf4a74d4df9d22b8a75964655c | refs/heads/master | 2023-03-26T07:10:59.520959 | 2021-03-22T20:27:17 | 2021-03-22T20:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from typing import List
from pydantic import BaseSettings
class Settings(BaseSettings):
API_VERSION: int = 1
ALLOWED_ORIGINS: List[str] = [
'*'
]
class Config:
case_sensitive = True
settings = Settings()
| [
"cameron@cameron.com"
] | cameron@cameron.com |
e4076d56d19e0b60f79ef0b63139cbc98873367c | e92d752737f3e90a47355d5502a364a9e2d0c08b | /tests/test_reverseproxy.py | c93c3521ab514877f5527c50f379fbd88223bb84 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | miri64/aiocoap | c7815758371f0a83c51b058fb0c24e8024feae9f | 93e699280b644465213fc8ba29bae556475fb6fc | refs/heads/master | 2023-08-15T16:46:27.985718 | 2021-10-05T17:15:00 | 2021-10-05T17:15:00 | 404,324,558 | 0 | 0 | NOASSERTION | 2021-09-08T11:39:12 | 2021-09-08T11:39:12 | null | UTF-8 | Python | false | false | 4,526 | py | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
import asyncio
import unittest
from . import common
from .test_server import WithAsyncLoop, Destructing, WithClient, WithTestServer, CLEANUPTIME
import aiocoap.proxy.client
import aiocoap.cli.proxy
from aiocoap.util import hostportjoin
class WithReverseProxy(WithAsyncLoop, Destructing):
def setUp(self):
super(WithReverseProxy, self).setUp()
self.reverseproxy = aiocoap.cli.proxy.Main(
["--reverse", "--bind", hostportjoin(self.proxyhost, self.proxyport), "--namebased", "%s:%s"%(self.name_for_real_server, self.servernetloc), "--pathbased", "%s:%s"%("/".join(self.path_for_real_server), self.servernetloc)],
loop=self.loop,
)
self.loop.run_until_complete(self.reverseproxy.initializing)
def tearDown(self):
super(WithReverseProxy, self).tearDown()
self.loop.run_until_complete(self.reverseproxy.shutdown())
# creating a reference loop between the cli instance and its contexts,
# so that the cli instance's gc-ing is linked o the contexts'.
# TODO how can we handle this more smoothly?
self.reverseproxy.outgoing_context._cli = self.reverseproxy
self.reverseproxy.proxy_context._cli = self.reverseproxy
self._del_to_be_sure('reverseproxy')
self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME))
proxyport = 56839
proxyhost = common.loopbackname_v6 or common.loopbackname_v46
proxyaddress = '%s:%d'%(proxyhost, proxyport)
name_for_real_server = 'aliasedname'
path_for_real_server = ('aliased', 'name')
class TestReverseProxy(WithReverseProxy, WithClient, WithTestServer):
@unittest.skipIf(common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)")
def test_routing(self):
yieldfrom = lambda f: self.loop.run_until_complete(f)
def req():
request = aiocoap.Message(code=aiocoap.GET)
request.unresolved_remote = self.proxyaddress
request.opt.uri_path = ('big',)
return request
request = req()
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_REQUEST, "GET without hostname gave resource (something like BAD_REQUEST expected)")
request = req()
request.opt.uri_host = self.name_for_real_server
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "GET with hostname based proxying was not successful)")
request = req()
request.opt.uri_path = self.path_for_real_server + request.opt.uri_path
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "GET with path based proxying was not successful)")
@unittest.skipIf(common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)")
def test_options(self):
yieldfrom = lambda f: self.loop.run_until_complete(f)
def req():
request = aiocoap.Message(code=aiocoap.GET)
request.unresolved_remote = self.proxyaddress
request.opt.uri_path = ('big',)
request.opt.uri_host = self.name_for_real_server
return request
request = req()
request.opt.proxy_scheme = 'coap'
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_OPTION, "Reverse proxy supports proxying even though it shouldn't.")
request = req()
request.opt.add_option(aiocoap.optiontypes.StringOption(2**10 + 2, "can't proxy this"))
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_OPTION, "Proxy did not react to unsafe option.")
request = req()
request.opt.add_option(aiocoap.optiontypes.StringOption(2**10, "nothing to see here"))
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "Proxy did not ignore to safe-to-forward option.")
| [
"chrysn@fsfe.org"
] | chrysn@fsfe.org |
4ed9816040733bed4f0d0f33e93b66edd9fd06ed | 71b6495c16859d2d6c61b5f56c51fad53a23feb9 | /AppStore_logic/admin.py | ebc2e6d03b436eabf18327007a39b43d130cbaae | [] | no_license | didghwns0514/Learning_Django | 46368ac21f802886150691a6a636116da3fae529 | 8f731c7f885fe5e51e4970bdc7882e7825217723 | refs/heads/master | 2023-03-07T02:03:42.427154 | 2021-02-15T08:30:32 | 2021-02-15T08:30:32 | 338,614,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.contrib import admin
# Register your models here.
from .models import AddOrder, RobotState
admin.site.register(AddOrder)
admin.site.register(RobotState)
| [
"uib58518@contiwan.com"
] | uib58518@contiwan.com |
fcc2088991a5e82f1fa12fd20e80de370702d0a6 | fd3eff76297b61f7045d4180115ff300e09b4208 | /01 神经网络与深度学习/02 编码与实战/01 Python Basic/02 broadcasting.py | 4ee743232e53b6a8adcbd41806ed07b47efbd132 | [] | no_license | xiangjl623/AI | c220b2c88a8fdcd4e371576ff20c2498cdef45ff | 0dc945b95fcf0658519a0e9292660efa6720f91d | refs/heads/master | 2020-03-29T06:20:15.613089 | 2018-11-01T06:44:54 | 2018-11-01T06:44:54 | 149,620,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import numpy as np
A = np.array([[56.0, 0.0, 4.4, 68.0],
[1.2, 104.0, 52.0, 8.0],
[1.8, 135.0, 99.0, 0.9]])
print(A)
print(A.shape) #(3,4)三行四列
print(A[2])
#sum的参数axis=0表示求和运算按列执行
#axis用来指明将要进行的运算是沿着哪个轴执行,在
#numpy中,0轴是垂直的,也就是列,而1轴是水平的,也就是行。
cal = A.sum(axis=0)
print(cal)
#百分比
percent = 100*A/cal.reshape(1,4)
print(percent) | [
"xiangjl@glodon.com"
] | xiangjl@glodon.com |
f137566360c570f51a4657f4d857b543cb8dc0aa | 49a0241e7478fd860fdd57288c85184dd086f7e4 | /Visualisation/bullet_visualisation.py | b18d06eea6817e50066c71ef5911908b4ed28b74 | [] | no_license | Killjoyer/Battle-City | 4520e2634fd0e5cf2b610f75a27302af37430967 | a5347a3059148992d24f6d994911b06080acd0d2 | refs/heads/master | 2023-02-04T18:50:55.127693 | 2020-12-28T10:25:22 | 2020-12-28T10:25:22 | 247,949,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from constants import Bullets, MovingWills
from tank import Bullet
from Visualisation.moving_entity_visualisation import MovingEntityVisualisation
class BulletVisualisation(MovingEntityVisualisation):
def __init__(self, father, bullet: Bullet):
bullet.speed = 8
super().__init__(father, bullet, Bullets.Texture)
self.moves = True
self.moving_will = MovingWills.Forward
self.show()
| [
"saidelsam@ya.ru"
] | saidelsam@ya.ru |
144ef0cc0cda5eab5d3bbf7481c744754b6bdcb1 | 18a6ead35b3402323110fb83507f1ed386d3e5d5 | /ipynb_images/fig_gen.py | 3138a9f2c79d6f772c6ab9e12d0c0620d8ce547c | [
"MIT"
] | permissive | WhoIsJack/python-bioimage-analysis-tutorial | 9cd5badbc0a2b9573635b826cb9f4e2febe12dc4 | 1d2473994c0151d8b83f0385f007425ad4c7a055 | refs/heads/master | 2023-03-08T18:33:21.013828 | 2023-02-17T12:08:27 | 2023-02-17T12:08:27 | 104,102,696 | 72 | 34 | MIT | 2021-06-18T14:12:18 | 2017-09-19T16:56:54 | Jupyter Notebook | UTF-8 | Python | false | false | 4,846 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 01 00:30:59 2017
@author: Jonas Hartmann @ Gilmour group @ EMBL Heidelberg
@descript: Quick & dirty script to generate illustrations for the python image
analysis course's tutorial pipeline.
"""
# IMPORTS
import numpy as np
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
# GAUSSIAN KERNEL GRID
# Create the Gaussian kernel
a = np.zeros((11,11),dtype=np.uint8)
a[5,5] = 255
a = ndi.gaussian_filter(a,2)
# Generate figure
pig,ax = plt.subplots()
ax.matshow(a,cmap='Blues',vmax=12)
# Add the labels
for (i, j), z in np.ndenumerate(a*10):
ax.text(j, i, z, ha='center', va='center')
# Cosmetics, saving and showing
plt.axis('off')
#plt.savefig('gaussian_kernel_grid.png')
#plt.show()
# Clean
plt.clf()
plt.close()
# 1D ADAPTIVE THRESHOLDING
# Create 1D 'membrane' data
a = np.zeros(100) + 10
a[35] = 1000
a[65] = 1000
a = ndi.gaussian_filter(a,2)
# Create adaptive background
b = ndi.uniform_filter(a,size=20)
# Plot stuff
plt.plot(a)
plt.plot(b,c='r')
plt.ylim([-10,270])
# Label, save and show
plt.legend(['Raw 1D Membrane Signal','Adaptive Background'])
plt.xlabel('space [pixels]')
plt.ylabel('intensity [a.u.]')
#plt.savefig('adaptive_bg_1D.png')
#plt.show()
# Clean
plt.clf()
plt.close()
# UNIFORM KERNEL WITH STRUCTURING ELEMENT
# Create data
i = 11
a = (np.mgrid[:i+2,:i+2][0] - np.floor(i/2) - 1)**2 + (np.mgrid[:i+2,:i+2][1] - np.floor(i/2) - 1)**2 <= np.floor(i/2)**2
# Generate figure
pig,ax = plt.subplots()
ax.matshow(a,cmap='Blues',vmax=2)
# Add the labels
for (i, j), z in np.ndenumerate(a*1):
ax.text(j, i, z, ha='center', va='center')
# Cosmetics, saving and showing
plt.axis('off')
#plt.savefig('uniform_filter_SE.png')
#plt.show()
# Clean
plt.clf()
plt.close()
# DISTANCE TRANSFORM
# Create data
a = np.zeros((16,28),dtype=np.uint8)
a[6:10,6:10] = 255
a[6:10,18:22] = 255
a = ndi.gaussian_filter(a,3)
a = a >= 9
# Distance transform
b = ndi.distance_transform_edt(a)
# Generate figure
pig,ax = plt.subplots(1,2)
ax[0].matshow(a,cmap='Blues',vmax=2)
ax[1].matshow(b,cmap='Blues')
# Add the labels
for (i, j), z in np.ndenumerate(a.astype(np.uint8)):
ax[0].text(j, i, z, ha='center', va='center')
for (i, j), z in np.ndenumerate(b.astype(np.uint8)):
ax[1].text(j, i, z, ha='center', va='center')
# Cosmetics
ax[0].axis('off')
ax[1].axis('off')
ax[0].set_title('Boolean Mask')
ax[1].set_title('Distance Transform')
plt.tight_layout()
## Saving
#manager = plt.get_current_fig_manager()
#manager.window.showMaximized()
#plt.savefig('distance_transform.png')
# Showing
#plt.show()
# Clean
plt.clf()
plt.close()
# WATERSHED 1D ILLUSTRATION
# Create 1D 'membrane' data
a = np.zeros(150,dtype=np.uint8)
a[[25,50,90,125]] = [150,180,80,120]
a = ndi.gaussian_filter(a,2)
a = (a.astype(np.float) / float(a.max()) * 200.0).astype(np.uint8) + 10
# Create seed data
b = (np.array([10,38,70,110,140]),np.array([10,10,10,10,10]))
# Three watershed steps
w1 = np.ones_like(a) + 70
w2 = np.ones_like(a) + 140
w3 = np.ones_like(a) + 240
# Plotting function
def plot_stuff(ax,l1=None,l2=None):
# Plot intensity
ax.plot(a,lw=2,label=l1,color='#6AADD5')
ax.fill_between(np.arange(a.shape[0]),a,color='#F7FBFF')
# Plot seeds
ax.scatter(b[0],b[1],label=l2,color='#C71B11',zorder=3,s=30,marker='D')
# Cosmetics
ax.set_ylim([0,255])
ax.set_xlim([0,149])
# Done
return ax
# Make the figure
pig,ax = plt.subplots(2,2,sharex=True,sharey=True)
# Add plot before watershed
ax[0,0] = plot_stuff(ax[0,0],l1='membrane signal',l2='seeds')
ax[0,0].legend()
ax[0,0].set_title("watershed level 0",fontsize=14)
# Add plot with watershed at 70
ax[0,1].fill_between(np.arange(w1.shape[0]),w1,color='#0B559F',
label='watershed')
ax[0,1] = plot_stuff(ax[0,1])
ax[0,1].legend()
ax[0,1].set_title("watershed level 70",fontsize=14)
# Add plot with watershed at 140
ax[1,0].fill_between(np.arange(w2.shape[0]),w2,color='#0B559F')
ax[1,0] = plot_stuff(ax[1,0])
ax[1,0].vlines(90,0,255,lw=2,color='#C71B11',zorder=3)
ax[1,0].set_title("watershed level 140",fontsize=14)
# Add plot with watershed at 240
ax[1,1].fill_between(np.arange(w3.shape[0]),w3,color='#0B559F')
ax[1,1] = plot_stuff(ax[1,1])
ax[1,1].vlines([25,50,90,125],0,255,lw=2,color='#C71B11',zorder=3,
label='final cell boundaries')
ax[1,1].legend()
ax[1,1].set_title("watershed level 240",fontsize=14)
# General labels
pig.text(0.5, 0.02, 'space [pixels]', ha='center', fontsize=14)
pig.text(0.04, 0.5, 'intensity [a.u.]', va='center', rotation='vertical',
fontsize=14)
## Saving
#manager = plt.get_current_fig_manager()
#manager.window.showMaximized()
#plt.savefig('watershed_illustration.png')
# Tighten layout and show
plt.tight_layout()
#plt.show()
# Clean
plt.clf()
plt.close()
| [
"jonas.hartmann@embl.de"
] | jonas.hartmann@embl.de |
449248113ab98dd46f92d9e76576d832177aefbd | f9acfbff2744c014cd4adbc53d75316cacc00896 | /pycspr/api/get_node_peers.py | caff8d058e48be88cc650078538c0c1ab16f9b24 | [
"Apache-2.0"
] | permissive | Shr1ftyy/casper-python-sdk | 30fb3edc42551faef0b9bf10bf5a13ed8b5ac9f5 | 1c32ef89ef269f0307d530cfd635cfcbb3f29290 | refs/heads/main | 2023-07-27T17:17:40.054075 | 2021-07-29T09:58:12 | 2021-07-29T09:58:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import jsonrpcclient as rpc_client
from pycspr.client import NodeConnectionInfo
# Method upon client to be invoked.
_API_ENDPOINT = "info_get_peers"
def execute(connection_info: NodeConnectionInfo) -> dict:
"""Returns node peers information.
:param connection_info: Information required to connect to a node.
:returns: Node peers information.
"""
response = rpc_client.request(connection_info.address_rpc, _API_ENDPOINT)
return response.data.result["peers"] | [
"mark@casperlabs.io"
] | mark@casperlabs.io |
99e8a8fefb709cc55f68edfa97511059a68da167 | 6e0b0add44605c5864fef6ac62aec0567d80ebee | /src/models/trainer.py | adb8aaf25b4e4cc2c10f3f958329354be0a23d12 | [] | no_license | tomoyaf/YouyakuMan | 7ea160d1bde97f9c357bd752994dcb7b869f16dd | 9960829491b04a3ca7da48cb0fa869e07eb8564d | refs/heads/master | 2022-05-30T22:22:45.419082 | 2020-05-06T07:27:22 | 2020-05-06T07:27:22 | 259,449,354 | 0 | 0 | null | 2020-04-27T20:47:08 | 2020-04-27T20:47:08 | null | UTF-8 | Python | false | false | 4,367 | py | import os
import torch
from src.models.reporter import ReportMgr
from src.models.stats import Statistics
def build_trainer(args, model, optim):
gpu_rank = 0
print('gpu_rank %d' % gpu_rank)
report_manager = ReportMgr(args.report_every, start_time=-1)
trainer = Trainer(args, model, optim, report_manager)
return trainer
class Trainer(object):
def __init__(self, args, model, optim, report_manager,
grad_accum_count=1, n_gpu=1, gpu_rank=0):
self.args = args
self.save_checkpoint_steps = self.args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
if model:
self.model.train()
def train(self, train_iter_fct, train_steps):
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
batch = next(train_iter)
true_batchs.append(batch)
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0:
self._save(step)
step += 1
if step > train_steps:
break
train_iter = train_iter_fct()
return total_stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.labels
segs = batch.segs
clss = batch.clss
mask = batch.mask
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
if self.grad_accum_count == 1:
self.optim.step()
if self.grad_accum_count > 1:
self.optim.step()
def _save(self, step):
real_model = self.model
model_state_dict = real_model.state_dict()
checkpoint = {
'model': model_state_dict,
'opt': self.args,
'optim': self.optim,
}
checkpoint_path = os.path.join(self.args.save_path, 'model_step_%d.pt' % step)
print("Saving checkpoint %s" % checkpoint_path)
if not os.path.exists(checkpoint_path):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _report_training(self, step, num_steps, learning_rate,
report_stats):
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats)
| [
"keitei_go@lscc.co.jp"
] | keitei_go@lscc.co.jp |
d0f53eed70c9349c2199a4b19053373dff0df022 | ef03753086b41d9afd528d33e4ee0f6ca56f7600 | /find-hackerrank/ans.py.3 | 7be8c8d8d1d2897ed41ac12c72b27ed2f99ffa91 | [] | no_license | chris-r-harwell/HackerRankRegex | 6c15f61d14541c0087a69d17c08ac842fb493653 | 72aaa5bafdbffd0c9c037ebddbc5d63b32ff1397 | refs/heads/master | 2021-01-23T09:47:34.652257 | 2017-09-06T11:44:11 | 2017-09-06T11:44:11 | 102,601,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | 3 | #!/bin/env python3
import re
if __name__ == '__main__':
"""
-1 if line neither starts nore ends with hackerrank
1 if line starts with hackerrank
2 if line ends with hackerrank
0 if line both starts and ends with hackerrank
"""
n = int(input())
for i in range(n):
s = str(input())
score = -1 # assume none
start = re.search(r'^hackerrank', s) is not None
end = re.search(r'hackerrank$', s) is not None
if start and end:
score = 0
elif end:
score = 2
elif start:
score = 1
print(score)
| [
"chris-r-harwell@gmail.com"
] | chris-r-harwell@gmail.com |
149ec873645dfb5e8d20b0153a10ad6d72a42aca | 5f0308ac178902eed5147e8141904853aacb70a5 | /scripts/cli.py | e505800d883498cf7d04507268234068cddb3187 | [] | no_license | jfjlaros/pyvcf-cli | 39b9a3c1233cab535fce9174a44018d0faa80e8a | 92405685502eb5e4b1d3328b378b77a1cae9fe04 | refs/heads/master | 2021-01-25T12:31:13.072541 | 2014-08-29T10:00:39 | 2018-03-01T18:21:56 | 123,475,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,886 | py | #!/usr/bin/env python
"""
VCF manipulation toolkit.
Copyright (c) 2014 Leiden University Medical Center <humgen@lumc.nl>
Copyright (c) 2014 Jeroen F.J. Laros <J.F.J.Laros@lumc.nl>
Licensed under the MIT license, see the LICENSE file.
"""
from __future__ import division
import argparse
import itertools
import os
import wiggelen
import vcf
import vcf.utils as vcfutils
__version_info__ = ('0', '0', '1')
__version__ = '.'.join(__version_info__)
__author__ = 'LUMC, Jeroen F.J. Laros'
__contact__ = 'J.F.J.Laros@lumc.nl'
__homepage__ = 'https://github.com/jfjlaros/PyVCF'
usage = __doc__.split("\n\n\n")
def doc_split(func):
return func.__doc__.split("\n\n")[0]
def version(name):
return "%s version %s\n\nAuthor : %s <%s>\nHomepage : %s" % (name,
__version__, __author__, __contact__, __homepage__)
def diff(input_handles, output_handle, precision=10):
"""
Calculate the Jaccard distance between two VCF files.
:arg input_handles: List of two open readable handles to VCF files.
:type input_handles: list(stream)
:arg output_handle: An open writable handle.
:type output_handle: stream
:arg precision: Number of decimals in the output.
:type precision: int
"""
first_vcf = vcf.Reader(input_handles[0])
second_vcf = vcf.Reader(input_handles[1])
symmetric_difference = 0
total = 0
walker = vcfutils.walk_together(first_vcf, second_vcf)
for first_record, second_record in walker:
if first_record and second_record and not (first_record.is_indel or
second_record.is_indel):
if (first_record.alleles[1].sequence !=
second_record.alleles[1].sequence):
symmetric_difference += 1
total +=1
#if
#for
output_handle.write('{value:.{precision}f}\n'.format(
value=symmetric_difference / total, precision=precision))
#diff
def vcf_record_to_wig(input_handle, field="AF", unpack=lambda x: x, prefix="",
snp_only=True):
"""
Convert a VCF record to a wiggle record.
:arg input_handle: Open readable handle to a VCF file.
:type input_handle: stream
:arg field:
:type field: str
:arg unpack:
:type unpack: str
:arg prefix:
:type prefix: str
:arg snp_only:
:type snp_only: bool
:returns:
:rtype: object
"""
old_position = ""
for record in vcf.Reader(input_handle):
if (record.is_snp or not snp_only) and field in record.INFO:
if record.CHROM + str(record.POS) != old_position:
yield ("{}{}".format(prefix, record.CHROM), record.POS,
unpack(record.INFO[field]))
old_position = record.CHROM + str(record.POS)
#if
#vcf_record_to_wig
def vcf2wig(input_handle, output_handle, field="AF", prefix="",
unpack="x: x", snp_only=True):
"""
Convert a VCF file to a wiggle track.
:arg input_handle: Open readable handle to a VCF file.
:type input_handle: stream
:arg output_handle: Open writable handle to a wiggle track.
:type output_handle: stream
:arg field:
:type field: str
:arg prefix:
:type prefix: str
:arg unpack:
:type unpack: str
:arg snp_only:
:type snp_only: bool
"""
unpack_function = eval("lambda {}".format(unpack))
wiggelen.write(vcf_record_to_wig(input_handle, field=field,
unpack=unpack_function, prefix=prefix, snp_only=snp_only),
track=output_handle,
name=os.path.splitext(os.path.basename(output_handle.name))[0])
#vcf2wig
def main():
input_parser = argparse.ArgumentParser(add_help=False)
input_parser.add_argument("input_handle", metavar="INPUT",
type=argparse.FileType('r'), help="input file")
pair_in_parser = argparse.ArgumentParser(add_help=False)
pair_in_parser.add_argument("input_handles", metavar="INPUT", nargs=2,
type=argparse.FileType('r'), help="pair of input files")
output_parser = argparse.ArgumentParser(add_help=False)
output_parser.add_argument("output_handle", metavar="OUTPUT",
type=argparse.FileType('w'), default='-', help="output file")
prefix_parser = argparse.ArgumentParser(add_help=False)
prefix_parser.add_argument("-x", dest="prefix", type=str, default="",
help='prefix for chromosome names (%(type)s default="%(default)s")')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('-v', action="version", version=version(parser.prog))
subparsers = parser.add_subparsers()
parser_diff = subparsers.add_parser("diff", parents=[pair_in_parser,
output_parser], description=doc_split(diff))
parser_diff.add_argument('-p', dest='precision', type=int, default=10,
help='precision (%(type)s default=%(default)s)')
parser_diff.set_defaults(func=diff)
parser_vcf2wig = subparsers.add_parser("vcf2wig", parents=[input_parser,
output_parser, prefix_parser], description=doc_split(vcf2wig))
parser_vcf2wig.add_argument('-f', dest='field', type=str, default="AF",
help='INFO field to convert (%(type)s default="%(default)s")')
parser_vcf2wig.add_argument('-u', dest='unpack', type=str, default="x: x",
help='unpack function (%(type)s default="%(default)s")')
parser_vcf2wig.add_argument('-s', dest='snp_only', default=False,
action="store_true", help="only convert SNPs")
parser_vcf2wig.set_defaults(func=vcf2wig)
try:
arguments = parser.parse_args()
except IOError, error:
parser.error(error)
try:
arguments.func(**{k: v for k, v in vars(arguments).items()
if k not in ('func', 'subcommand')})
except ValueError, error:
parser.error(error)
#main
if __name__ == '__main__':
main()
| [
"J.F.J.Laros@lumc.nl"
] | J.F.J.Laros@lumc.nl |
dfe782ae44d0c826f1cf828ff12d497febd5767c | 7b1a4d3c3ccdbb95202f8f38babaae087165928c | /backend/home/management/commands/load_initial_data.py | 7f4e12075bdbcce12eaff178de5c46492cd6885a | [] | no_license | crowdbotics-apps/find-me-4086 | cf5d7e6c9c48a3c0fd3cad669008e17f9056329c | e715d40c825b325ac4bd09f267f1c0a66e0b6645 | refs/heads/master | 2023-01-10T13:25:12.442913 | 2019-05-31T22:46:36 | 2019-05-31T22:46:36 | 189,666,285 | 0 | 0 | null | 2023-01-03T23:14:40 | 2019-05-31T22:39:01 | Python | UTF-8 | Python | false | false | 717 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">Find me</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'Find me'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
70f0c42a9f45b0c0587d68145739c19c27479312 | 6efea391d0dd6087d8753057cff45867884fe5f1 | /google/cloud/logging_v2/proto/logging_pb2_grpc.py | e1759bbc1b990fe9d20bf576c1c5f12009895806 | [
"Apache-2.0"
] | permissive | tswast/python-logging | d9c4ae1ee87fb29436e2f16d9adac2a7a2d08378 | c4387b307f8f3502fb53ae1f7e1144f6284280a4 | refs/heads/master | 2022-12-30T19:50:14.840163 | 2020-08-12T20:28:40 | 2020-08-12T20:28:40 | 298,009,362 | 0 | 0 | Apache-2.0 | 2020-09-23T15:12:47 | 2020-09-23T15:12:46 | null | UTF-8 | Python | false | false | 6,922 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.logging_v2.proto import (
logging_pb2 as google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class LoggingServiceV2Stub(object):
"""Service for ingesting and querying logs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.DeleteLog = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/DeleteLog",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.WriteLogEntries = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/WriteLogEntries",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.FromString,
)
self.ListLogEntries = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogEntries",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.FromString,
)
self.ListMonitoredResourceDescriptors = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.FromString,
)
self.ListLogs = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogs",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.FromString,
)
class LoggingServiceV2Servicer(object):
"""Service for ingesting and querying logs.
"""
def DeleteLog(self, request, context):
"""Deletes all the log entries in a log. The log reappears if it receives new
entries. Log entries written shortly before the delete operation might not
be deleted. Entries received after the delete operation with a timestamp
before the operation will be deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def WriteLogEntries(self, request, context):
"""Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method
is used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use Logging.
A single request may contain log entries for a maximum of 1000
different resources (projects, organizations, billing accounts or
folders)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLogEntries(self, request, context):
"""Lists log entries. Use this method to retrieve log entries that originated
from a project/folder/organization/billing account. For ways to export log
entries, see [Exporting Logs](/logging/docs/export).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListMonitoredResourceDescriptors(self, request, context):
"""Lists the descriptors for monitored resource types used by Logging.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLogs(self, request, context):
"""Lists the logs in projects, organizations, folders, or billing accounts.
Only logs that have entries are listed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_LoggingServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
"DeleteLog": grpc.unary_unary_rpc_method_handler(
servicer.DeleteLog,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"WriteLogEntries": grpc.unary_unary_rpc_method_handler(
servicer.WriteLogEntries,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.SerializeToString,
),
"ListLogEntries": grpc.unary_unary_rpc_method_handler(
servicer.ListLogEntries,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.SerializeToString,
),
"ListMonitoredResourceDescriptors": grpc.unary_unary_rpc_method_handler(
servicer.ListMonitoredResourceDescriptors,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.SerializeToString,
),
"ListLogs": grpc.unary_unary_rpc_method_handler(
servicer.ListLogs,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.logging.v2.LoggingServiceV2", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| [
"noreply@github.com"
] | tswast.noreply@github.com |
1c191670b95cd97eb7e9927c5966fe0fe092eed3 | c259bd9e4a570a1fa37949655530d778e5f5c46d | /mysite/.history/mysite/settings_20211014220254.py | edf1209e555479d4892a4fb712109c1d5b7bea7a | [] | no_license | ritikalohia/django-rest-students | 0cc56f435b7b2af881adfd7cace54eef98213c57 | ca5f9f466fcd74fef8ce91f019bcb6e7d83c8e20 | refs/heads/main | 2023-08-15T21:51:18.988691 | 2021-10-14T18:19:04 | 2021-10-14T18:19:04 | 417,219,011 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-vw0@qaq+af@an^ipzwchu$p*ywufp074e73!dtzcbara-qicvk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"rtklohia@gmail.com"
] | rtklohia@gmail.com |
f565c7f1049dafaeb6f59a5d1402e33d61f66f26 | cccf8da8d41ae2c14f5f4313c1edcf03a27956bb | /python/python2latex/writeLTXnonfrenchspacing.py | 500b80bebf55e18223970983e73099ddd5dc5c8a | [] | no_license | LucaDiStasio/transpilers | e8f8ac4d99be3b42a050148ca8fbc5d025b83290 | c55d4f5240083ffd512f76cd1d39cff1016909b8 | refs/heads/master | 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,438 | py | # Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXnonfrenchspacing(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXnonfrenchspacing.varargin
nargin = writeLTXnonfrenchspacing.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <luca.distasio@gmail.com>
# <luca.distasio@ingpec.eu>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Setting the command untoggles the command\frenchspacingand activates LaTeX standards to insert more space after a period (.) than after an ordinary character.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\nonfrenchspacing'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return | [
"luca.distasio@gmail.com"
] | luca.distasio@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.