blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c404d83db7963f05d6ea5bbeea41481d45b822ea | 79588b10fe6d4e056b7e3c67bf742bc70c5df280 | /blitzem/console.py | f32883d230dc99df28095621a91bd5fe6b24a3bf | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | rnorth/blitzemp | 37969be450835e220a45cddee6f899b759e0c251 | 6967a48c2374c13d4328657c44efc3e6cd721eb7 | refs/heads/master | 2023-08-30T03:53:22.660954 | 2012-01-01T11:59:31 | 2012-01-01T11:59:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | #!/usr/bin/env python
""""
console.py
Provides the command line interface to blitzem.
Copyright (c) 2011 Richard North. All rights reserved.
"""
import sys
from blitzem.core import sync
def main():
execfile("environment.py", locals(), globals())
command = sys.argv[1]
if len(sys.argv) > 2:
tag = sys.argv[2]
print "Blitzing nodes named/tagged '%s' %s\n\n" % (tag, command)
else:
tag = ""
print "Blitzing all nodes %s\n\n" % command
sync(command, tag)
| [
"rich.north@gmail.com"
] | rich.north@gmail.com |
90dc6fff0c730315f1c1bd6b7a37aa07f415f8a6 | 1f777b6d0fddd209a01eb8b75949b7532e69e90c | /assignment-2/7question.py | 746b4bc78d3c8e24fde5d3e440069c9154b41db6 | [] | no_license | prachigeu/cyber | 0f75365724f9e91a2f3010cb52feaaeb74d8f2bb | 81b50ed246487d062351c8cc5cf515c45c6d63b5 | refs/heads/main | 2023-01-12T16:08:01.577177 | 2020-11-09T14:40:56 | 2020-11-09T14:40:56 | 311,296,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | def summer_69(lst):
record=True
sum=0
for n in lst:
if(n==6):
record=False
if record:
sum+=n
if n==9:
record=True
return sum
lst=[]
n=int(input("enter the no. of element:"))
for i in range(0,n):
ele=int(input())
lst.append(ele)
print(summer_69(lst)) | [
"noreply@github.com"
] | noreply@github.com |
a79fbdd011e160d0b420e3b2489bb4c3dec7eac7 | 39fd21569d67287c516a3c42378f3f4c1708b3fe | /burbger-CS2340-master/space_trader/app/forms/__init__.py | a256ce9d1bbbaa5e58f57ab977f3a2d4256ebb51 | [] | no_license | sam1993316/CS2340_project | 1cfd03375aac48629f7186608539cb42684595ca | f683475b2dfb81683dc5001ab6464fdd9dbe1d66 | refs/heads/master | 2020-07-24T08:18:49.033003 | 2020-01-08T19:21:52 | 2020-01-08T19:21:52 | 207,862,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,384 | py | from flask import render_template, url_for
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired
from wtforms import (StringField, RadioField, IntegerField, SubmitField, ValidationError,
HiddenField)
from ordered_set import OrderedSet
from app.objects import Game, Universe, Player
from app import instance
def skill_check(form, field):
if field.data < 0:
raise ValidationError("Can't allocate negative skill points")
form.allocated_skill_points += field.data
if form.allocated_skill_points > form.max_skill_points:
raise ValidationError(
"Can't allocate more than {} skill points on the {} difficulty"
.format(form.max_skill_points,
form.difficulty_setting)
)
# superclass for all new forms we'll make - abstracts away a little bit of code
class SpaceTraderForm(FlaskForm):
title = None
template_file = 'index.html'
def render(self, **kwargs):
return render_template(
self.template_file,
instance=instance,
game=Game(),
universe=Universe(),
player=Player(),
form=self,
**kwargs
)
def make_pylint_happy(self, arg):
return self.__dict__[arg]
class StartForm(SpaceTraderForm):
title = 'Start Game'
template_file = 'start.html'
allocated_skill_points = 0
max_skill_points = 8
difficulty_setting = 'Medium'
error_message_set = OrderedSet()
name = StringField('Name', validators=[DataRequired("Must input a name")])
difficulty = RadioField('Difficulty', default='1', choices=[
('0', 'Easy'),
('1', 'Medium'),
('2', 'Hard')])
pilot_skill = IntegerField(
'Pilot Skill',
validators=[DataRequired("Must input a pilot skill level"), skill_check]
)
fighter_skill = IntegerField(
'Fighter Skill',
validators=[DataRequired("Must input a fighter skill level"), skill_check]
)
merchant_skill = IntegerField(
'Merchant Skill',
validators=[DataRequired("Must input a merchant skill level"), skill_check]
)
engineer_skill = IntegerField(
'Engineer Skill',
validators=[DataRequired("Must input an engineer skill level"), skill_check]
)
done = SubmitField('Start New Game')
def validate(self):
self.difficulty_setting = self.difficulty.choices[int(self.difficulty.data)][1]
self.max_skill_points = 16 - (4 * int(self.difficulty.data))
super_return = super().validate()
self.error_message_set = OrderedSet()
for error in self.errors:
self.error_message_set.add(self.errors[error][0])
return super_return
class IndexForm(SpaceTraderForm):
start_game = SubmitField('Start Game')
class GameForm(SpaceTraderForm):
template_file = 'game.html'
post_location = HiddenField()
game_over_url = HiddenField()
class WinForm(SpaceTraderForm):
player = Player()
template_file = 'win.html'
new_game = SubmitField('New Game')
class LoseForm(SpaceTraderForm):
title = "Game Over"
template_file = 'lose.html'
new_game = SubmitField('New Game')
class ReturnForm(SpaceTraderForm):
template_file = 'return.html'
new_game = SubmitField('New Game')
continue_game = SubmitField('Continue')
| [
"sam1993316@hotmail.com"
] | sam1993316@hotmail.com |
c1e5a8a2013d707dbc31b1a4de39a609690a48f4 | f442f43ac1d7feb808d6ed8dce1db5e68dc5b719 | /testFunctions/BICOP1.py | 18d1af003e24d9ab7f5fe350050de77bbee77a2f | [
"MIT"
] | permissive | RoydeZomer/Multi-Point-SAMO-COBRA | 36b0bae48c57d0118e9a1a45006fbd963d09d9d7 | de139c6facbcc41afe287635e233271fea78c67b | refs/heads/main | 2023-05-10T22:54:35.202718 | 2023-05-01T09:03:22 | 2023-05-01T09:03:22 | 479,006,481 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 15:13:45 2020
@author: r.dewinter
"""
import numpy as np
def BICOP1(x):
x = np.array(x)
g1 = 1 + 9*np.sum(x[1:]/9)
f1 = x[0]*g1
f2 = g1 - np.sqrt(f1/g1)
c1 = g1
#-1* constr because of sacobra's constraint handling
return [ np.array([f1, f2]), -1*np.array([c1]) ]
# amount = 1000000
# x = np.random.rand(amount*10)
# x = np.reshape(x, (amount, 10))
# objs = np.zeros((amount,2))
# cons = np.zeros((amount,1))
# for i in range(len(x)):
# objs[i], cons[i] = BICOP1(x[i])
# import matplotlib.pyplot as plt
# plt.plot(objs[:,0], objs[:,1], 'ro') | [
"r.dewinter@c-job.com"
] | r.dewinter@c-job.com |
8ff433deb370820425f491c07e8a9facbe7a6d34 | 74c4a11f216537c5755a876b867a3b33b11d3ec2 | /env/bin/easy_install | 0ac1fbefae98b7dd53bb6f9fa61091ee746c895e | [] | no_license | macinnis82/tbay | 59462416646bf367fbf74bdcbe0e9ffd67756a00 | c67574a0a5f2febe731b318e19c6de1cbd15dc7a | refs/heads/master | 2020-05-09T14:06:41.226151 | 2015-08-03T18:46:40 | 2015-08-03T18:46:40 | 40,141,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/ubuntu/workspace/thinkful/projects/tbay/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"MacInnis82@gmail.com"
] | MacInnis82@gmail.com | |
1a6f968edf5fdb4c61c2389a23c364c8b3fffc69 | c11c337d4f2a609326fe8545c70dafb918ad8110 | /maintenance/mtrack/scripts.py | 757f45ca27e71c689cf94262e0725a2cd3a0d47e | [
"MIT",
"BSD-2-Clause"
] | permissive | summertriangle-dev/arposandra | 7b7f62b63cebe07c6b3b24321a0d01623dfed2b3 | d4fcbec32e86a96c7d810d3d146695eb0b384889 | refs/heads/master | 2023-07-25T02:55:37.534890 | 2023-07-07T01:05:12 | 2023-07-07T01:18:02 | 213,795,406 | 19 | 4 | NOASSERTION | 2023-03-04T05:48:36 | 2019-10-09T01:48:47 | Python | UTF-8 | Python | false | false | 3,905 | py | # TODO: I haven't looked very carefully at optimizing these queries.
# May want to come back after a couple years and see how they're doing.
# We sort based on latest release.
def update_set_sort_table():
return f"""
INSERT INTO card_p_set_index_v2__sort_dates
(SELECT representative, server_id, MAX(date) FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
GROUP BY (representative, server_id))
ON CONFLICT (representative, server_id) DO UPDATE SET
date = excluded.date;
WITH rd AS (
SELECT representative, (CASE WHEN MIN(date) < '2020-08-05 08:00:00'::timestamp THEN 0 ELSE 1 END) AS have_shio
FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
WHERE server_id = 'jp'
GROUP BY (representative)
)
UPDATE card_p_set_index_v2 SET nijigasaki_member_state =
(SELECT have_shio FROM rd WHERE rd.representative = card_p_set_index_v2.representative)
WHERE nijigasaki_member_state IS NULL;
-- Do it twice for sets without a release date.
UPDATE card_p_set_index_v2 SET nijigasaki_member_state = 0
WHERE nijigasaki_member_state IS NULL
"""
# Tries to set the release date based on feature list from newly added history
# records. If a card was released without a feature and featured later, the
# date will be set wrong. This won't happen though. In theory...
def update_card_release_dates(prefix):
return f"""
WITH rdates AS (
SELECT DISTINCT ON (card_id, {prefix}history_v5__dates.serverid)
card_id, {prefix}history_v5__dates.serverid, {prefix}history_v5__dates.date
FROM {prefix}history_v5__card_ids
INNER JOIN {prefix}history_v5__dates ON (
{prefix}history_v5__dates.id = {prefix}history_v5__card_ids.id
AND {prefix}history_v5__card_ids.serverid = {prefix}history_v5__dates.serverid
AND type = (CASE
WHEN what = 2 THEN 1
WHEN what = 3 THEN 2
WHEN what = 4 THEN 2
ELSE 2
END)
)
ORDER BY card_id, {prefix}history_v5__dates.serverid, date
)
INSERT INTO card_index_v1__release_dates (
(SELECT card_id, serverid, date FROM rdates)
) ON CONFLICT DO NOTHING;
-- First try the entire history table, because we want the oldest source, but restrict to cards that appeared in the partial update.
UPDATE card_index_v1 SET
source = (SELECT history_v5__card_ids.what FROM history_v5__card_ids
INNER JOIN history_v5 USING (id, serverid) WHERE card_id = card_index_v1.id
ORDER BY sort_date LIMIT 1)
WHERE (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1) IS NOT NULL;
-- If still null it wasn't featured before, so go ahead and use the new hist list
UPDATE card_index_v1 SET
source = (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1)
WHERE source IS NULL
"""
def update_hist_event_link():
return """
WITH event_match AS (
SELECT event_v2.serverid AS sid, event_id, history_v5__dates.id AS hid FROM history_v5__dates
INNER JOIN event_v2 ON (history_v5__dates.serverid=event_v2.serverid
AND EXTRACT(epoch FROM history_v5__dates.date - event_v2.start_t) = 0)
WHERE type = 1
)
INSERT INTO history_v5__dates (
(SELECT hid, sid, 7, NULL, event_id FROM event_match)
) ON CONFLICT DO NOTHING;
"""
| [
"summertriangle.dev@gmail.com"
] | summertriangle.dev@gmail.com |
e8d197368d8a83bbf36c5e39a424a7e7a44b5b7c | 632dcb4e37cadd87cb7ff8715b0048df5cd0d11b | /CompuCell3D/core/Demos/SBMLSolverExamples/SBMLSolverAntimony/SBMLSolverAntimony2/Simulation/SBMLSolverAntimony2Steppables.py | 007262772d5e9b8242557c617ed99a115ce20b47 | [
"MIT"
] | permissive | CompuCell3D/CompuCell3D | df638e3bdc96f84b273978fb479842d071de4a83 | 65a65eaa693a6d2b3aab303f9b41e71819f4eed4 | refs/heads/master | 2023-08-26T05:22:52.183485 | 2023-08-19T17:13:19 | 2023-08-19T17:13:19 | 12,253,945 | 51 | 41 | null | 2023-08-27T16:36:14 | 2013-08-20T20:53:07 | C++ | UTF-8 | Python | false | false | 4,920 | py | from cc3d.core.PySteppables import *
class SBMLSolverSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
def start(self):
# Antimony model string: cell type 1
model_string_type1 = """model type1()
# Model
S1 => S2; k1*S1
# Initial conditions
S1 = 0
S2 = 1
k1 = 1
end"""
# Antimony model string: cell type 2
model_string_type2 = """model type2()
# Model
S2 => S1; k2*S2
# Initial conditions
S1 = 0
S2 = 0
k2 = 1
end"""
# adding options that setup SBML solver integrator
# these are optional but useful when encountering integration instabilities
options = {'relative': 1e-10, 'absolute': 1e-12}
self.set_sbml_global_options(options)
step_size = 0.001
# Apply model strings to cell types
self.add_antimony_to_cell_types(model_string=model_string_type1, model_name='dpType1', cell_types=[self.TYPE1],
step_size=step_size)
self.add_antimony_to_cell_types(model_string=model_string_type2, model_name='dpType2', cell_types=[self.TYPE2],
step_size=step_size)
def step(self, mcs):
self.timestep_sbml()
def finish(self):
# this function may be called at the end of simulation - used very infrequently though
return
class SecretionSteppable(SecretionBasePy):
def __init(self, frequency=1):
SecretionBasePy.__init__(self, frequency)
def step(self, mcs):
consume_s1 = 1
consume_s2 = 1
secrete_s1 = 1
secrete_s2 = 1
field1 = self.field.Field1
field2 = self.field.Field2
for cell in self.cell_list_by_type(self.TYPE1):
this_cell_s1 = cell.sbml.dpType1['S1']
this_cell_s2 = cell.sbml.dpType1['S2']
cell_volume = cell.volume
if this_cell_s2 > 0.75:
this_secrete_s2 = secrete_s2
else:
this_secrete_s2 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
sbml_values = cell.sbml.dpType1.values()
s1_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field1.get(pt)
s1_consumed += field_value * consume_s1
s2_secreted = this_cell_s2 * this_secrete_s2
cell.sbml.dpType1['S1'] = this_cell_s1 + s1_consumed
cell.sbml.dpType1['S2'] = this_cell_s2 - s2_secreted
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) - s1_consumed / cell_volume
field2_val = field2.get(pt) + s2_secreted / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
for cell in self.cell_list_by_type(self.TYPE2):
this_cell_s1 = cell.sbml.dpType2['S1']
this_cell_s2 = cell.sbml.dpType2['S2']
cell_volume = cell.volume
if this_cell_s1 > 0.75:
this_secrete_s1 = secrete_s1
else:
this_secrete_s1 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
s2_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field2.get(pt)
s2_consumed += field_value * consume_s2
S1_secreted = this_cell_s1 * this_secrete_s1
cell.sbml.dpType2['S1'] = this_cell_s1 - S1_secreted
cell.sbml.dpType2['S2'] = this_cell_s2 + s2_consumed
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) + S1_secreted / cell_volume
field2_val = field2.get(pt) - s2_consumed / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
# Demo: accessing SBML values for further manipulation/coupling with other components
class IdFieldVisualizationSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
self.create_scalar_field_cell_level_py("IdFieldS1")
self.create_scalar_field_cell_level_py("IdFieldS2")
def step(self, mcs):
id_field_s1 = self.field.IdFieldS1
id_field_s2 = self.field.IdFieldS2
for cell in self.cell_list_by_type(self.TYPE1):
id_field_s1[cell] = cell.sbml.dpType1['S1']
id_field_s2[cell] = cell.sbml.dpType1['S2']
for cell in self.cell_list_by_type(self.TYPE2):
id_field_s1[cell] = cell.sbml.dpType2['S1']
id_field_s2[cell] = cell.sbml.dpType2['S2']
| [
"maciekswat@gmail.com"
] | maciekswat@gmail.com |
9dc7d208b99d32efd620095f13cb728df26b4641 | add160c54fd21154a108ea687aaa0030b07ad01f | /Browser.py | c78acbfc7b7dd455c5dda9d5ed6f667d481d50ec | [] | no_license | raphaelmiy/MOSH-CODE | bb56f3d3d6a981ea9ceb57a7cb1e05c93ac67ebd | 6722e2b0f263b21300ce4b2c51bc0c8fe8b5a6f8 | refs/heads/master | 2020-09-04T17:10:35.082128 | 2019-12-19T06:39:32 | 2019-12-19T06:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import webbrowser
print("Deployment completed.")
webbrowser.open("https://www.google.com") | [
"37901809+AquaticRaphael@users.noreply.github.com"
] | 37901809+AquaticRaphael@users.noreply.github.com |
ec8f47a1f2a8f9a329a2a94a4cc7b3132b0deb39 | 0ae225da5956147207cf52b00fe8242b551f58cc | /event_management/accounts/forms.py | 3958bd575f1da1de38c8add756bde9b598efe9c8 | [] | no_license | IIITL-DeV/College-Event-Management-System | 984100d9fd97d9211e6c8d2df0d2123159d5f2ef | 23f380994821b762d1e8169360b6998416d40f7f | refs/heads/main | 2023-09-03T21:40:29.660611 | 2021-11-14T10:59:55 | 2021-11-14T10:59:55 | 402,279,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from accounts.models import Accounts
from django.contrib.auth import authenticate
class RegistrationForm(UserCreationForm):
"""docstring for ."""
image = forms.ImageField(label="Upload Your Image",widget = forms.FileInput(attrs={'class':'form-control','placeholder':"Your Image"}))
fullname = forms.CharField(label="Full Name",widget= forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Full Name'}))
email = forms.EmailField(label="Email Address",help_text="We'll never share your email with anyone else.", widget= forms.EmailInput(attrs={'class': 'form-control','aria-describedby':'emailHelp','placeholder':'Enter email'}))
username = forms.CharField(label="Username",widget= forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Username'}))
phone_number = forms.CharField(label="Phone Number",widget= forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Phone Number'}))
password1 = forms.CharField(label="Password",help_text="Don't share your password with others",widget= forms.PasswordInput(attrs={'class': 'form-control','placeholder':'Enter Password'}))
password2 = forms.CharField(label="Confirm Password",help_text="",widget= forms.PasswordInput(attrs={'class': 'form-control','placeholder':'Re-Enter Password'}))
class Meta:
"""docstring for ."""
model = Accounts
fields = ('image','fullname','email','username','phone_number','password1','password2')
class Log_in_Form(forms.Form):
"""docstring for ."""
email = forms.EmailField(label="Email Address",help_text="We'll never share your email with anyone else.", widget= forms.EmailInput(attrs={'class': 'form-control','aria-describedby':'emailHelp','placeholder':'Enter email'}))
password = forms.CharField(label="Password",help_text="Don't share your password with others",widget= forms.PasswordInput(attrs={'class': 'form-control','placeholder':'Enter Password'}))
class Meta:
"""docstring for ."""
model = Accounts
fields = ('email','password')
def clean(self):
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email,password=password):
raise forms.ValidationError("Invalid login")
| [
"lit2019073@iiitl.ac.in"
] | lit2019073@iiitl.ac.in |
8cf47f34b56a9495fb0a2bde9cd7ebcfb3394d4f | 8b540332a95f08de99aa639e884d3f5cceea2d40 | /py/compress/ywpress/compress.py | 3ba2ab74579ece226a5f39ca5fd95c7f10d020ba | [] | no_license | goforeverSmile/knowledgeAll | 8cebe81ec5062c5929bc3b019a014319618ba390 | 416fe76790d489739b18faf1bdc0bed948bcfbed | refs/heads/master | 2023-04-17T20:39:06.275966 | 2023-04-03T03:12:38 | 2023-04-03T03:12:38 | 135,508,332 | 0 | 0 | null | 2023-02-21T06:50:48 | 2018-05-30T23:42:12 | JavaScript | UTF-8 | Python | false | false | 2,225 | py | #!/usr/bin/python
# encoding=utf-8
import os
import sys
import shutil
import json
#本python文件相对于项目工程根目录的相对路径
#本项目所有游戏短名
mjGames = ['bjmj']
pkGames = []
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../src/tools/hall/utils')))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../src/tools/png_quant')))
import utils
import png_quant
from logUtils import Logging
cur_dir = utils.flat_path(os.path.dirname(__file__))
print("cur_dir==========="+cur_dir)
proj_root = utils.flat_path(os.path.join(cur_dir, '../../src/tools/png_quant'))
compressCmd = 'python ' + proj_root + '/png_quant.py '
print("proj_root============",proj_root)
# 支持的文件后缀
img_prefix = ['png', 'jpg', 'jpeg']
def start(nameStr):
Logging.debug_msg("\n")
nameLst = []
if nameStr == 'all' or nameStr == 'allmj' or nameStr == 'allpk':
nameLst = getAllGames(nameStr)
else:
nameLst = nameStr.split(',')
for name in nameLst:
compress_imgs_in_path(name)
def compress_imgs_in_path(name):
resPath = '../' + name + '/images'
for root, dirs, files in os.walk(resPath):
# print('root_dir:', root) # 当前目录路径
# print('sub_dirs:', dirs) # 当前路径下所有子目录
# print('files:', files) # 当前路径下所有非目录子文件
for file in files:
index = file.find(".")
prefix = file[index+1:]
if prefix in img_prefix:
if len(dirs) > 0:
image = os.path.join(cur_dir,root,dirs[0],file)
print("image=========="+image)
else:
image = os.path.join(cur_dir,root,file)
cmd = compressCmd + image
os.popen(cmd, 'r')
print(cmd)
def getAllGames(cmd):
if cmd == 'all':
return mjGames + pkGames
elif cmd == 'allmj':
return mjGames
elif cmd == 'allpk':
return pkGames
else:
return []
if __name__ == "__main__":
if len(sys.argv) < 1:
Logging.debug_msg("输入短名错误!")
else:
start(sys.argv[1])
| [
"chengangbao@weile.com"
] | chengangbao@weile.com |
f279c532104dc9ce42d1c72e69c61c76f97d0d98 | e66780b65152d0c17bbafb4857941ac9056cee6a | /django/django_intro/first_Django_project/first_Django_project/settings.py | 47fc4767da9509faeff284f8e2a4db040275a28f | [] | no_license | Bereketetefa/Python | 0a271d7ab98503f71ce58c7b82216c472be93c71 | 01e347ad6a27f4fa32cf119dd9a04900e0f530f2 | refs/heads/master | 2022-12-03T10:59:06.327454 | 2020-08-06T21:24:44 | 2020-08-06T21:24:44 | 285,673,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | """
Django settings for first_Django_project project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qg%%vz8@ve3v&g-3oyu(og(ub0eaza%y_($04o!7v7sgp6qst5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'your_app_name_here',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first_Django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first_Django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"bereketetefa@gmail.com"
] | bereketetefa@gmail.com |
f225e82576f7863526fcd3ee41b099b672fcee2c | 7a47a66d35ffc9cd391fb52c054b04fda9f381ac | /alien_invasion/bullet.py | b287bac0b2b09c60a78ba0c13c6951b98e7f7f91 | [] | no_license | mmiirroo/python | 82f916d73082d44475bf378ca3101ebe6384efe1 | ae13cd9f3430b6f916f3ea3227bf0aad2192dc31 | refs/heads/master | 2021-10-23T09:01:33.150708 | 2019-03-16T09:55:01 | 2019-03-16T09:55:01 | 102,234,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self, ai_settings, screen, ship):
super(Bullet, self).__init__()
self.screen = screen
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width,
ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
self.y -= self.speed_factor
self.rect.y = self.y
def draw_bullet(self):
pygame.draw.rect(self.screen, self.color, self.rect)
| [
"seiyamiro@163.com"
] | seiyamiro@163.com |
7c66f962600a93eac899ce8e78b47514877212f8 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/numpy/core/_add_newdocs.py | aa3210020cc4228d0e9d702b9be0b9074a6b7b7f | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-numpy-1.18.3-c5tvrr2q5vwgtvc6f3ld57v6y4ahvr2h/lib/python3.7/site-packages/numpy/core/_add_newdocs.py | [
"mersoj@rpi.edu"
] | mersoj@rpi.edu |
1dbe9c4baf6a1fde0051a818a55439143b3a22ba | aeb4f955cb15c2b7c97cfcff166dd5b8fa06fc4c | /sorting/utils.py | bd58272d983b84117bf51bce8b9fc73504f2073e | [] | no_license | chisler/basic_algorithms | 0f9f2afdb81790f8a9108a9b31c1c28551694026 | 7ccd7bdc74882ef6286d6beb3fecd01c25a47bb0 | refs/heads/master | 2021-01-19T01:22:29.417126 | 2017-04-19T11:21:58 | 2017-04-19T11:21:58 | 87,241,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from random import randint
def rand_array():
length = randint(1, 10)
return list([randint(-10000, 10000) for _ in range(length)])
def test_sorting(sorting_function):
for array in [rand_array() for _ in range(100)]:
assert sorted(array) == sorting_function(array) | [
"alexander.chisler@gmail.com"
] | alexander.chisler@gmail.com |
8e547b29dfdd757e9da010a8fcb2e0a74ff18ac0 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-web/azure/mgmt/web/models/hosting_environment_profile.py | de68d8bc558ed823419c5846beb0d775e9a116d2 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,438 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HostingEnvironmentProfile(Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
d85e910cdb8df069910878d52d50620e4badb86f | 5950e8bfe3f12398de3b7055a60f9028fe4b9790 | /bin/oc_common.py | f0e4ddcbf2ab49e9105bd97b8783985fba5349be | [] | no_license | stevekuznetsov/jenkins-jjb | 721b4a7bbff93827b7eea9353ea4feeecac07511 | d69aae4284130299b3447b437e70ae87e4b9f801 | refs/heads/master | 2021-01-18T16:37:00.728351 | 2017-03-30T20:26:47 | 2017-03-30T20:26:47 | 86,748,052 | 0 | 0 | null | 2017-03-30T20:56:40 | 2017-03-30T20:56:40 | null | UTF-8 | Python | false | false | 582 | py | import os
import kubernetes.client
def connect_to_kube_core():
api_token = open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r').read()
ca_crt = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
kubernetes.client.configuration.api_key['authorization'] = api_token
kubernetes.client.configuration.api_key_prefix['authorization'] = "Bearer"
kubernetes.client.configuration.ssl_ca_cert = ca_crt
kubernetes.client.configuration.host = 'https://kubernetes.default.svc'
core_instance = kubernetes.client.CoreV1Api()
return core_instance
| [
"cewong@redhat.com"
] | cewong@redhat.com |
d09732d24ed5c663b058fda1ea1a9f991a6ba5c1 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/5823ce64bfec97cdfa0a781253795a6945d469f3-<diff>-bug.py | a06396edf7b6ec24911dd1c5288c295e62b8976d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | @set_module('mxnet.symbol.numpy')
def diff(a, n=1, axis=(- 1), prepend=None, append=None):
'\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n This is the same as the type of a in most cases.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n '
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
a1c6972754b517a1e605b4fc348250331b6a865b | b14b211fe7e7aeec05b908bb6c348524a6c6f279 | /client.py | 25c25d1f435ce2531beb56947b5b9099c6edf819 | [] | no_license | Tej4401/CHAT-APPLICATION | b7d6d8c47d360c1b8dfc214e04c7a260662a97b7 | d5d6bbb7265dfeed3bd6b02398158a5e03876edd | refs/heads/master | 2020-07-22T19:31:03.729111 | 2019-10-11T19:03:55 | 2019-10-11T19:03:55 | 207,304,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | import socket
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("192.168.43.26", 4442))
msg = connection.recv(1024).decode()
print(msg)
while(True):
msg1 = input(">> ").encode()
connection.send(msg1)
msg = connection.recv(1024).decode()
print("<< " + msg) | [
"noreply@github.com"
] | noreply@github.com |
94f5393c43fe86e9fc55011ab648e5e523878e30 | ce1fc088fac542e4aff5f6b4e974a62788b7f632 | /evaluate.py | 5b241271618faff31b7250252ad807e3fe987b9e | [] | no_license | nkaralis/X-RayClassification | b302e0dbaf31c02f88ebcc8028db1e762aa75377 | 940738d557ae4fae1ee3864c01b1baa5ccf2befa | refs/heads/master | 2020-03-07T17:36:46.511309 | 2018-05-22T17:03:43 | 2018-05-22T17:03:43 | 127,616,162 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | """
Author: Nikos Karalis
Big Data Project, Chest XRay Classification using TensorFlow
Dataset is available at: https://www.kaggle.com/nih-chest-xrays/data
Source code is based on the tutorial provided by TensorFlow: https://www.tensorflow.org/tutorials/layers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from data_processing import read_test_data
from model import cnn_model_fn
BATCH_SIZE = 10
TEST_EPOCHS = 1
def main(unused_arg):
# Load test data
test_data, test_labels = read_test_data()
# Load the Estimator
xray_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./models/4conv_2")
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_data},
y=test_labels,
batch_size=BATCH_SIZE,
num_epochs=TEST_EPOCHS,
shuffle=False)
# eval_results = xray_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
eval_results = xray_classifier.predict(input_fn=eval_input_fn)
i = 0
for x in eval_results:
print("Truth: %s Predicted: %s" % (test_labels[i], x["classes"]))
i += 1
if __name__ == "__main__":
tf.app.run() | [
"nkaralis@di.uoa.gr"
] | nkaralis@di.uoa.gr |
efe7223ea2704bde99e63f484470f60f94b09317 | 61a9027b1bf8e160c00520b40abd1c61bb973a7c | /delete_from_s3.py | 0665eb4e36eab517fad65b15e6e8e8e2ece0a2fe | [] | no_license | nithintkv/s3-with-python | d7a899756dad7b087f7df48783136c309c789294 | fe95681731659ffc155e6d58e7347ea2823d2ba0 | refs/heads/master | 2020-05-04T04:20:28.690798 | 2019-04-01T23:56:12 | 2019-04-01T23:56:12 | 178,963,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from boto.s3.connection import S3Connection, Bucket, Key
AWS_S3_ACCESS_KEY_ID = 'ACCESS_KEY'
AWS_S3_SECRET_ACCESS_KEY = 'SECRET_KEY'
AWS_STORAGE_BUCKET_NAME = 'BUCKET_NAME'
conn = S3Connection(AWS_S3_ACCESS_KEY_ID, AWS_S3_SECRET_ACCESS_KEY)
b = Bucket(conn, AWS_STORAGE_BUCKET_NAME)
k = Key(b)
k.key = 'file_to_delete'
b.delete_key(k)
| [
"nithinthank@gmail.com"
] | nithinthank@gmail.com |
95d5c5cadc9fb00f3c1f71d28ec0233c15f404b7 | 5a1e1756025bacae88b619d388ebf61b330001ab | /1.Class/Language_Python-master/Language_Python-master/LC4_HW3.py | 7acbc1e8e531e1ec64cb6af03598dee0507db0cb | [] | no_license | reshmaladi/Python | d1953497703aa15e163cd8ac27be23e3e5c3e947 | 8e9092af63476fef35d221e20acf418983957e53 | refs/heads/master | 2021-10-15T00:55:08.136039 | 2021-10-01T14:32:16 | 2021-10-01T14:32:16 | 165,836,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | x = input("Enter a 1st string \t")
y = input("Enter a 2nd string \t")
print("Swap : \n" + y[0:2] +x[2:] + "\n" + x[0:2] + y[2:]) | [
"reshma.ladi@gmail.com"
] | reshma.ladi@gmail.com |
587d3e84917673843903666b363e93bfde6ea5c6 | a5bba5b2850b1ccb7c0595a6973bd6b97a9a6c55 | /extlib/couchdbkit/schema/properties.py | d3852de189e7fbe6febacdbf2432012046b4b926 | [] | no_license | bjornua/webtemplate | 12fe7a0f10576560ace53f9fc8bd8f9620a65452 | aee465b9f7555e74c007ae89b6e14ec566224fdf | refs/heads/master | 2020-05-17T15:13:53.818074 | 2012-01-30T11:41:49 | 2012-01-30T11:41:49 | 3,298,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,331 | py | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
""" properties used by Document object """
import decimal
import datetime
import re
import time
try:
from collections import MutableSet, Iterable
def is_iterable(c):
return isinstance(c, Iterable)
except ImportError:
from sets import Set as MutableSet
def is_iterable(o):
return hasattr(c, '__iter__')
from couchdbkit.exceptions import BadValueError
__all__ = ['ALLOWED_PROPERTY_TYPES', 'Property', 'StringProperty',
'IntegerProperty', 'DecimalProperty', 'BooleanProperty',
'FloatProperty', 'DateTimeProperty', 'DateProperty',
'TimeProperty', 'DictProperty', 'ListProperty',
'StringListProperty', 'SetProperty',
'dict_to_json', 'list_to_json',
'value_to_json', 'MAP_TYPES_PROPERTIES', 'value_to_python',
'dict_to_python', 'list_to_python', 'convert_property',
'value_to_property', 'LazyDict', 'LazyList', 'LazySet']
ALLOWED_PROPERTY_TYPES = set([
basestring,
str,
unicode,
bool,
int,
long,
float,
datetime.datetime,
datetime.date,
datetime.time,
decimal.Decimal,
dict,
list,
set,
type(None)
])
re_date = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])$')
re_time = re.compile('^([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?$')
re_datetime = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])(\D?([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?([zZ]|([\+-])([01]\d|2[0-3])\D?([0-5]\d)?)?)?$')
re_decimal = re.compile('^(\d+)\.(\d+)$')
class Property(object):
""" Property base which all other properties
inherit."""
creation_counter = 0
def __init__(self, verbose_name=None, name=None,
default=None, required=False, validators=None,
choices=None):
""" Default constructor for a property.
:param verbose_name: str, verbose name of field, could
be use for description
:param name: str, name of field
:param default: default value
:param required: True if field is required, default is False
:param validators: list of callable or callable, field validators
function that are executed when document is saved.
"""
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validators = validators
self.choices = choices
self.creation_counter = Property.creation_counter
Property.creation_counter += 1
def __property_config__(self, document_class, property_name):
self.document_class = document_class
if self.name is None:
self.name = property_name
def __property_init__(self, document_instance, value):
""" method used to set value of the property when
we create the document. Don't check required. """
if value is not None:
value = self.to_json(self.validate(value, required=False))
document_instance._doc[self.name] = value
def __get__(self, document_instance, document_class):
if document_instance is None:
return self
value = document_instance._doc.get(self.name)
if value is not None:
value = self._to_python(value)
return value
def __set__(self, document_instance, value):
value = self.validate(value, required=False)
document_instance._doc[self.name] = self._to_json(value)
def __delete__(self, document_instance):
pass
def default_value(self):
""" return default value """
default = self.default
if callable(default):
default = default()
return default
def validate(self, value, required=True):
""" validate value """
if required and self.empty(value):
if self.required:
raise BadValueError("Property %s is required." % self.name)
else:
if self.choices and value is not None:
if isinstance(self.choices, list): choice_list = self.choices
if isinstance(self.choices, dict): choice_list = self.choices.keys()
if isinstance(self.choices, tuple): choice_list = [key for (key, name) in self.choices]
if value not in choice_list:
raise BadValueError('Property %s is %r; must be one of %r' % (
self.name, value, choice_list))
if self.validators:
if isinstance(self.validators, (list, tuple,)):
for validator in self.validators:
if callable(validator):
validator(value)
elif callable(self.validators):
self.validators(value)
return value
def empty(self, value):
""" test if value is empty """
return not value or value is None
def _to_python(self, value):
if value == None:
return value
return self.to_python(value)
def _to_json(self, value):
if value == None:
return value
return self.to_json(value)
def to_python(self, value):
""" convert to python type """
return unicode(value)
def to_json(self, value):
""" convert to json, Converted value is saved in couchdb. """
return self.to_python(value)
data_type = None
class StringProperty(Property):
""" string property str or unicode property
*Value type*: unicode
"""
to_python = unicode
def validate(self, value, required=True):
value = super(StringProperty, self).validate(value,
required=required)
if value is None:
return value
if not isinstance(value, basestring):
raise BadValueError(
'Property %s must be unicode or str instance, not a %s' % (self.name, type(value).__name__))
return value
data_type = unicode
class IntegerProperty(Property):
""" Integer property. map to int
*Value type*: int
"""
to_python = int
def empty(self, value):
return value is None
def validate(self, value, required=True):
value = super(IntegerProperty, self).validate(value,
required=required)
if value is None:
return value
if value is not None and not isinstance(value, (int, long,)):
raise BadValueError(
'Property %s must be %s or long instance, not a %s'
% (self.name, type(self.data_type).__name__,
type(value).__name__))
return value
data_type = int
LongProperty = IntegerProperty
class FloatProperty(Property):
""" Float property, map to python float
*Value type*: float
"""
to_python = float
data_type = float
def validate(self, value, required=True):
value = super(FloatProperty, self).validate(value,
required=required)
if value is None:
return value
if not isinstance(value, float):
raise BadValueError(
'Property %s must be float instance, not a %s'
% (self.name, type(value).__name__))
return value
Number = FloatProperty
class BooleanProperty(Property):
""" Boolean property, map to python bool
*ValueType*: bool
"""
to_python = bool
data_type = bool
def validate(self, value, required=True):
value = super(BooleanProperty, self).validate(value,
required=required)
if value is None:
return value
if value is not None and not isinstance(value, bool):
raise BadValueError(
'Property %s must be bool instance, not a %s'
% (self.name, type(value).__name__))
return value
def empty(self, value):
"""test if boolean is empty"""
return value is None
class DecimalProperty(Property):
""" Decimal property, map to Decimal python object
*ValueType*: decimal.Decimal
"""
data_type = decimal.Decimal
def to_python(self, value):
return decimal.Decimal(value)
def to_json(self, value):
return unicode(value)
class DateTimeProperty(Property):
"""DateTime property. It convert iso3339 string
to python and vice-versa. Map to datetime.datetime
object.
*ValueType*: datetime.datetime
"""
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
**kwds):
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def validate(self, value, required=True):
value = super(DateTimeProperty, self).validate(value, required=required)
if value is None:
return value
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s, current is %s' %
(self.name, self.data_type.__name__, type(value).__name__))
return value
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = value[0:19] # remove timezone
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
except ValueError, e:
raise ValueError('Invalid ISO date/time %r [%s]' %
(value, str(e)))
return value
def to_json(self, value):
if self.auto_now:
value = self.now()
if value is None:
return value
return value.replace(microsecond=0).isoformat() + 'Z'
data_type = datetime.datetime
@staticmethod
def now():
return datetime.datetime.utcnow()
class DateProperty(DateTimeProperty):
""" Date property, like DateTime property but only
for Date. Map to datetime.date object
*ValueType*: datetime.date
"""
data_type = datetime.date
@staticmethod
def now():
return datetime.datetime.now().date()
def to_python(self, value):
if isinstance(value, basestring):
try:
value = datetime.date(*time.strptime(value, '%Y-%m-%d')[:3])
except ValueError, e:
raise ValueError('Invalid ISO date %r [%s]' % (value,
str(e)))
return value
def to_json(self, value):
if value is None:
return value
return value.isoformat()
class TimeProperty(DateTimeProperty):
""" Date property, like DateTime property but only
for time. Map to datetime.time object
*ValueType*: datetime.time
"""
data_type = datetime.time
@staticmethod
def now(self):
return datetime.datetime.now().time()
def to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = datetime.time(*time.strptime(value, '%H:%M:%S')[3:6])
except ValueError, e:
raise ValueError('Invalid ISO time %r [%s]' % (value,
str(e)))
return value
def to_json(self, value):
if value is None:
return value
return value.replace(microsecond=0).isoformat()
class DictProperty(Property):
""" A property that stores a dict of things"""
def __init__(self, verbose_name=None, default=None,
required=False, **kwds):
"""
:args verbose_name: Optional verbose name.
:args default: Optional default value; if omitted, an empty list is used.
:args**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if default is None:
default = {}
Property.__init__(self, verbose_name, default=default,
required=required, **kwds)
data_type = dict
def validate(self, value, required=True):
value = super(DictProperty, self).validate(value, required=required)
if value and value is not None:
if not isinstance(value, dict):
raise BadValueError('Property %s must be a dict' % self.name)
value = self.validate_dict_contents(value)
return value
def validate_dict_contents(self, value):
try:
value = validate_dict_content(value)
except BadValueError:
raise BadValueError(
'Items of %s dict must all be in %s' %
(self.name, ALLOWED_PROPERTY_TYPES))
return value
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
value = super(DictProperty, self).default_value()
if value is None:
value = {}
return dict(value)
def to_python(self, value):
return LazyDict(value)
def to_json(self, value):
return value_to_json(value)
class ListProperty(Property):
"""A property that stores a list of things.
"""
def __init__(self, verbose_name=None, default=None,
required=False, item_type=None, **kwds):
"""Construct ListProperty.
:args verbose_name: Optional verbose name.
:args default: Optional default value; if omitted, an empty list is used.
:args**kwds: Optional additional keyword arguments, passed to base class.
"""
if default is None:
default = []
if item_type is not None and item_type not in ALLOWED_PROPERTY_TYPES:
raise ValueError('item_type %s not in %s' % (item_type, ALLOWED_PROPERTY_TYPES))
self.item_type = item_type
Property.__init__(self, verbose_name, default=default,
required=required, **kwds)
data_type = list
def validate(self, value, required=True):
value = super(ListProperty, self).validate(value, required=required)
if value and value is not None:
if not isinstance(value, list):
raise BadValueError('Property %s must be a list' % self.name)
value = self.validate_list_contents(value)
return value
def validate_list_contents(self, value):
value = validate_list_content(value, item_type=self.item_type)
try:
value = validate_list_content(value, item_type=self.item_type)
except BadValueError:
raise BadValueError(
'Items of %s list must all be in %s' %
(self.name, ALLOWED_PROPERTY_TYPES))
return value
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
value = super(ListProperty, self).default_value()
if value is None:
value = []
return list(value)
def to_python(self, value):
return LazyList(value, item_type=self.item_type)
def to_json(self, value):
return value_to_json(value, item_type=self.item_type)
class StringListProperty(ListProperty):
""" shorthand for list that should containe only unicode"""
def __init__(self, verbose_name=None, default=None,
required=False, **kwds):
super(StringListProperty, self).__init__(verbose_name=verbose_name,
default=default, required=required, item_type=basestring, **kwds)
class SetProperty(Property):
"""A property that stores a Python set as a list of unique
elements.
Note that Python set operations like union that return a set
object do not alter list that will be stored with the next save,
while operations like update that change a set object in-place do
keep the list in sync.
"""
def __init__(self, verbose_name=None, default=None, required=None,
item_type=None, **kwds):
"""Construct SetProperty.
:args verbose_name: Optional verbose name.
:args default: Optional default value; if omitted, an empty
set is used.
:args required: True if field is required, default is False.
:args item_type: Optional data type of items that set
contains. Used to assist with JSON
serialization/deserialization when data is
stored/retireved.
:args **kwds: Optional additional keyword arguments, passed to
base class.
"""
if default is None:
default = set()
if item_type is not None and item_type not in ALLOWED_PROPERTY_TYPES:
raise ValueError('item_type %s not in %s'
% (item_type, ALLOWED_PROPERTY_TYPES))
self.item_type = item_type
super(SetProperty, self).__init__(
verbose_name=verbose_name, default=default, required=required,
**kwds)
data_type = set
def validate(self, value, required=True):
value = super(SetProperty, self).validate(value, required=required)
if value and value is not None:
if not isinstance(value, MutableSet):
raise BadValueError('Property %s must be a set' % self.name)
value = self.validate_set_contents(value)
return value
def validate_set_contents(self, value):
try:
value = validate_set_content(value, item_type=self.item_type)
except BadValueError:
raise BadValueError(
'Items of %s set must all be in %s' %
(self.name, ALLOWED_PROPERTY_TYPES))
return value
def default_value(self):
"""Return default value for set.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
value = super(SetProperty, self).default_value()
if value is None:
return set()
return value.copy()
def to_python(self, value):
return LazySet(value, item_type=self.item_type)
def to_json(self, value):
return value_to_json(value, item_type=self.item_type)
# structures proxy
class LazyDict(dict):
""" object to make sure we keep updated of dict
in _doc. We just override a dict and maintain change in
doc reference (doc[keyt] obviously).
if init_vals is specified, doc is overwritten
with the dict given. Otherwise, the values already in
doc are used.
"""
def __init__(self, doc, item_type=None, init_vals=None):
dict.__init__(self)
self.item_type = item_type
self.doc = doc
if init_vals is None:
self._wrap()
else:
for key, value in init_vals.items():
self[key] = value
def _wrap(self):
for key, json_value in self.doc.items():
if isinstance(json_value, dict):
value = LazyDict(json_value, item_type=self.item_type)
elif isinstance(json_value, list):
value = LazyList(json_value, item_type=self.item_type)
else:
value = value_to_python(json_value, self.item_type)
dict.__setitem__(self, key, value)
def __setitem__(self, key, value):
if isinstance(value, dict):
self.doc[key] = {}
value = LazyDict(self.doc[key], item_type=self.item_type, init_vals=value)
elif isinstance(value, list):
self.doc[key] = []
value = LazyList(self.doc[key], item_type=self.item_type, init_vals=value)
else:
self.doc.update({key: value_to_json(value, item_type=self.item_type) })
super(LazyDict, self).__setitem__(key, value)
def __delitem__(self, key):
del self.doc[key]
super(LazyDict, self).__delitem__(key)
def pop(self, key, *args):
default = len(args) == 1
if default:
self.doc.pop(key, args[-1])
return super(LazyDict, self).pop(key, args[-1])
self.doc.pop(key)
return super(LazyDict, self).pop(key)
def setdefault(self, key, default):
if key in self:
return self[key]
self.doc.setdefault(key, value_to_json(default, item_type=self.item_type))
super(LazyDict, self).setdefault(key, default)
return default
def update(self, value):
for k, v in value.items():
self[k] = v
def popitem(self, value):
new_value = super(LazyDict, self).popitem(value)
self.doc.popitem(value_to_json(value, item_type=self.item_type))
return new_value
def clear(self):
self.doc.clear()
super(LazyDict, self).clear()
class LazyList(list):
""" object to make sure we keep update of list
in _doc. We just override a list and maintain change in
doc reference (doc[index] obviously).
if init_vals is specified, doc is overwritten
with the list given. Otherwise, the values already in
doc are used.
"""
def __init__(self, doc, item_type=None, init_vals=None):
list.__init__(self)
self.item_type = item_type
self.doc = doc
if init_vals is None:
# just wrap the current values
self._wrap()
else:
# initialize this list and the underlying list
# with the values given.
del self.doc[:]
for item in init_vals:
self.append(item)
def _wrap(self):
for json_value in self.doc:
if isinstance(json_value, dict):
value = LazyDict(json_value, item_type=self.item_type)
elif isinstance(json_value, list):
value = LazyList(json_value, item_type=self.item_type)
else:
value = value_to_python(json_value, self.item_type)
list.append(self, value)
def __delitem__(self, index):
del self.doc[index]
list.__delitem__(self, index)
def __setitem__(self, index, value):
if isinstance(value, dict):
self.doc[index] = {}
value = LazyDict(self.doc[index], item_type=self.item_type, init_vals=value)
elif isinstance(value, list):
self.doc[index] = []
value = LazyList(self.doc[index], item_type=self.item_type, init_vals=value)
else:
self.doc[index] = value_to_json(value, item_type=self.item_type)
list.__setitem__(self, index, value)
def __delslice__(self, i, j):
del self.doc[i:j]
list.__delslice__(self, i, j)
def __getslice__(self, i, j):
return LazyList(self.doc[i:j], self.item_type)
def __setslice__(self, i, j, seq):
self.doc[i:j] = (value_to_json(v, item_type=self.item_type) for v in seq)
list.__setslice__(self, i, j, seq)
def __contains__(self, value):
jvalue = value_to_json(value)
for m in self.doc:
if m == jvalue: return True
return False
def append(self, *args, **kwargs):
if args:
assert len(args) == 1
value = args[0]
else:
value = kwargs
index = len(self)
if isinstance(value, dict):
self.doc.append({})
value = LazyDict(self.doc[index], item_type=self.item_type, init_vals=value)
elif isinstance(value, list):
self.doc.append([])
value = LazyList(self.doc[index], item_type=self.item_type, init_vals=value)
else:
self.doc.append(value_to_json(value, item_type=self.item_type))
super(LazyList, self).append(value)
def extend(self, x):
self.doc.extend(
[value_to_json(v, item_type=self.item_type) for v in x])
super(LazyList, self).extend(x)
def index(self, x, *args):
x = value_to_json(x, item_type=self.item_type)
return self.doc.index(x)
def insert(self, i, x):
self.__setslice__(i, i, [x])
def pop(self, i=-1):
del self.doc[i]
v = super(LazyList, self).pop(i)
return value_to_python(v, item_type=self.item_type)
def remove(self, x):
del self[self.index(x)]
def sort(self, cmp=None, key=None, reverse=False):
self.doc.sort(cmp, key, reverse)
list.sort(self, cmp, key, reverse)
def reverse(self):
self.doc.reverse()
list.reverse(self)
class LazySet(MutableSet):
"""Object to make sure that we keep set and _doc synchronized.
We sub-class MutableSet and maintain changes in doc.
Note that methods like union that return a set object do not
alter _doc, while methods like update that change a set object
in-place do keep _doc in sync.
"""
def _map_named_operation(opname):
fn = getattr(MutableSet, opname)
if hasattr(fn, 'im_func'):
fn = fn.im_func
def method(self, other, fn=fn):
if not isinstance(other, MutableSet):
other = self._from_iterable(other)
return fn(self, other)
return method
issubset = _map_named_operation('__le__')
issuperset = _map_named_operation('__ge__')
symmetric_difference = _map_named_operation('__xor__')
def __init__(self, doc, item_type=None):
self.item_type = item_type
self.doc = doc
self.elements = set(value_to_python(value, self.item_type)
for value in self.doc)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, list(self))
@classmethod
def _from_iterable(cls, it):
return cls(it)
def __iand__(self, iterator):
for value in (self.elements - iterator):
self.elements.discard(value)
return self
def __iter__(self):
return iter(element for element in self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, item):
return item in self.elements
def __xor__(self, other):
if not isinstance(other, MutableSet):
if not is_iterable(Other):
return NotImplemented
other = self._from_iterable(other)
return (self.elements - other) | (other - self.elements)
def __gt__(self, other):
if not isinstance(other, MutableSet):
return NotImplemented
return other < self.elements
def __ge__(self, other):
if not isinstance(other, MutableSet):
return NotImplemented
return other <= self.elements
def __ne__(self, other):
return not (self.elements == other)
def add(self, value):
self.elements.add(value)
if value not in self.doc:
self.doc.append(value_to_json(value, item_type=self.item_type))
def copy(self):
return self.elements.copy()
def difference(self, other, *args):
return self.elements.difference(other, *args)
def difference_update(self, other, *args):
for value in other:
self.discard(value)
for arg in args:
self.difference_update(arg)
def discard(self, value):
self.elements.discard(value)
try:
self.doc.remove(value)
except ValueError:
pass
def intersection(self, other, *args):
return self.elements.intersection(other, *args)
def intersection_update(self, other, *args):
if not isinstance(other, MutableSet):
other = set(other)
for value in self.elements - other:
self.discard(value)
for arg in args:
self.intersection_update(arg)
def symmetric_difference_update(self, other):
if not isinstance(other, MutableSet):
other = set(other)
for value in other:
if value in self.elements:
self.discard(value)
else:
self.add(value)
def union(self, other, *args):
return self.elements.union(other, *args)
def update(self, other, *args):
self.elements.update(other, *args)
for element in self.elements:
if element not in self.doc:
self.doc.append(
value_to_json(element, item_type=self.item_type))
# some mapping
MAP_TYPES_PROPERTIES = {
decimal.Decimal: DecimalProperty,
datetime.datetime: DateTimeProperty,
datetime.date: DateProperty,
datetime.time: TimeProperty,
str: StringProperty,
unicode: StringProperty,
bool: BooleanProperty,
int: IntegerProperty,
long: LongProperty,
float: FloatProperty,
list: ListProperty,
dict: DictProperty,
set: SetProperty,
}
def convert_property(value):
""" convert a value to json from Property._to_json """
if type(value) in MAP_TYPES_PROPERTIES:
prop = MAP_TYPES_PROPERTIES[type(value)]()
value = prop.to_json(value)
return value
def value_to_property(value):
""" Convert value in a Property object """
if type(value) in MAP_TYPES_PROPERTIES:
prop = MAP_TYPES_PROPERTIES[type(value)]()
return prop
else:
return value
# utilities functions
def validate_list_content(value, item_type=None):
""" validate type of values in a list """
return [validate_content(item, item_type=item_type) for item in value]
def validate_dict_content(value, item_type=None):
""" validate type of values in a dict """
return dict([(k, validate_content(v,
item_type=item_type)) for k, v in value.iteritems()])
def validate_set_content(value, item_type=None):
""" validate type of values in a set """
return set(validate_content(item, item_type=item_type) for item in value)
def validate_content(value, item_type=None):
""" validate a value. test if value is in supported types """
if isinstance(value, list):
value = validate_list_content(value, item_type=item_type)
elif isinstance(value, dict):
value = validate_dict_content(value, item_type=item_type)
elif item_type is not None and not isinstance(value, item_type):
raise BadValueError(
'Items must all be in %s' % item_type)
elif type(value) not in ALLOWED_PROPERTY_TYPES:
raise BadValueError(
'Items must all be in %s' %
(ALLOWED_PROPERTY_TYPES))
return value
def dict_to_json(value, item_type=None):
""" convert a dict to json """
return dict([(k, value_to_json(v, item_type=item_type)) for k, v in value.iteritems()])
def list_to_json(value, item_type=None):
""" convert a list to json """
return [value_to_json(item, item_type=item_type) for item in value]
def value_to_json(value, item_type=None):
""" convert a value to json using appropriate regexp.
For Dates we use ISO 8601. Decimal are converted to string.
"""
if isinstance(value, datetime.datetime) and is_type_ok(item_type, datetime.datetime):
value = value.replace(microsecond=0).isoformat() + 'Z'
elif isinstance(value, datetime.date) and is_type_ok(item_type, datetime.date):
value = value.isoformat()
elif isinstance(value, datetime.time) and is_type_ok(item_type, datetime.time):
value = value.replace(microsecond=0).isoformat()
elif isinstance(value, decimal.Decimal) and is_type_ok(item_type, decimal.Decimal):
value = unicode(value)
elif isinstance(value, (list, MutableSet)):
value = list_to_json(value, item_type)
elif isinstance(value, dict):
value = dict_to_json(value, item_type)
return value
def is_type_ok(item_type, value_type):
return item_type is None or item_type == value_type
def value_to_python(value, item_type=None):
""" convert a json value to python type using regexp. values converted
have been put in json via `value_to_json` .
"""
data_type = None
if isinstance(value, basestring):
if re_date.match(value) and is_type_ok(item_type, datetime.date):
data_type = datetime.date
elif re_time.match(value) and is_type_ok(item_type, datetime.time):
data_type = datetime.time
elif re_datetime.match(value) and is_type_ok(item_type, datetime.datetime):
data_type = datetime.datetime
elif re_decimal.match(value) and is_type_ok(item_type, decimal.Decimal):
data_type = decimal.Decimal
if data_type is not None:
prop = MAP_TYPES_PROPERTIES[data_type]()
try:
#sometimes regex fail so return value
value = prop.to_python(value)
except:
pass
elif isinstance(value, (list, MutableSet)):
value = list_to_python(value, item_type=item_type)
elif isinstance(value, dict):
value = dict_to_python(value, item_type=item_type)
return value
def list_to_python(value, item_type=None):
""" convert a list of json values to python list """
return [value_to_python(item, item_type=item_type) for item in value]
def dict_to_python(value, item_type=None):
""" convert a json object values to python dict """
return dict([(k, value_to_python(v, item_type=item_type)) for k, v in value.iteritems()])
| [
"rugkiks@gmail.com"
] | rugkiks@gmail.com |
9ffccd5225785ef2b10916b4fb566d1b0662e776 | d81e47cb813d2265d5d1cdfb9642560ce28ae9c9 | /IterativeDeepeningAI.py | b44b789cfb89ab49f22b828b806aceb3625f4605 | [] | no_license | p-takagi-atilano/ChessAI | 26c22c26301c92ce632d1dfe25a55131b53e045b | 7cf6669c9cf8250269de3c653863fe18283b96da | refs/heads/master | 2020-11-25T18:47:28.432556 | 2019-12-18T09:01:59 | 2019-12-18T09:01:59 | 228,799,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import chess
from MinimaxAI import MinimaxAI
class IterativeDeepeningAI():
def __init__(self, heuristic_fn, depth):
self.depth = depth
self.heuristic_fn = heuristic_fn
def choose_move(self, board):
best_move = None # best_move temporarily set to None
minimax_ai = MinimaxAI(self.heuristic_fn, 0) # depth temporarily set to 0
# iterates though depths
for depth in range(0, self.depth + 1):
# changes and fixes depth
minimax_ai.depth = depth
minimax_ai.reset_depth_fix()
# stores and prints the best move for this depth
best_move = minimax_ai.choose_move(board)
print(str(depth) + ": " + str(best_move))
return best_move
| [
"ptakagia@gmail.com"
] | ptakagia@gmail.com |
50826902f861d5fc6fb8c492c5e315705fa1b725 | f6de81b89c9e5dd034262dc7a89275b265f9f8c7 | /ex15.py | 675def34c27a3225d867f6217da1201542375e08 | [] | no_license | marcin93/lpthw | 98047d5facdc2a46d146f035d6b95ea47ce84348 | 6f56824ea5520de95816b83f2400678b06456fd0 | refs/heads/master | 2021-01-01T18:41:27.557507 | 2013-06-19T07:36:14 | 2013-06-19T07:36:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from sys import argv # from module 'sys' import 'argv' element
script, filename = argv # by argv i will use
txt = open(filename) # variable txt opens the file which was given
print "Here's your file %r:" % filename # display name of file which was given
print txt.read() # print what is in file
txt.close()
print "Type the filename again:"
file_again = raw_input("> ") # script get the new file name which will be open
txt_again = open(file_again) # new variable declaration and their duties > open given file
print txt_again.read() # print what is in given second file
txt_again.close() | [
"marcin93@gmail.com"
] | marcin93@gmail.com |
c74b7b5e17bda9f26d14cc36fa0e16ff52fffa6b | db4ced2bf1d06a886b18947edf434093f0698b88 | /picturesque/settings.py | 9883c8510fcbe7c771c6bf1ee6d9f47bcd8f719c | [] | no_license | Code-Institute-Submissions/picturesque-1 | a9235df044b6eb3d8b7cf4deaefaddfd41631562 | 58dfdb58da863aa0d4e07e05fa0e6481e83d21ac | refs/heads/master | 2023-02-08T05:21:47.975486 | 2020-12-30T23:02:17 | 2020-12-30T23:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,389 | py | """
Django settings for picturesque project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = 'DEVELOPMENT' in os.environ
ALLOWED_HOSTS = ['abg-picturesque-2.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'webpages',
'posters',
'bag',
'checkout',
'profiles',
# Other
'crispy_forms',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'picturesque.urls'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates', 'allauth'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request', #required by allauth
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'bag.contexts.bag_contents',
],
'builtins': [
'crispy_forms.templatetags.crispy_forms_tags',
'crispy_forms.templatetags.crispy_forms_field',
]
},
},
]
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
SITE_ID = 1
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE = True
ACCOUNT_USERNAME_MIN_LENGTH = 4
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
WSGI_APPLICATION = 'picturesque.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if 'DATABASE_URL' in os.environ:
DATABASES = {
'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
if 'USE_AWS' in os.environ:
# Cache control
AWS_S3_OBJECT_PARAMETERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'CacheControl': 'max-age=94608000',
}
# Bucket Config
AWS_STORAGE_BUCKET_NAME = 'abg-picturesque'
AWS_S3_REGION_NAME = 'eu-west-2'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
# Static and media files
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATICFILES_LOCATION = 'static'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
MEDIAFILES_LOCATION = 'media'
# Override static and media URLs in production
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATICFILES_LOCATION}/'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIAFILES_LOCATION}/'
#Stripe
FREE_DELIVERY_THRESHOLD = 75
STANDARD_DELIVERY_PERCENTAGE = 10
STRIPE_CURRENCY = 'gbp'
STRIPE_PUBLIC_KEY = os.getenv('STRIPE_PUBLIC_KEY', '')
STRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY', '')
STRIPE_WH_SECRET = os.getenv('STRIPE_WH_SECRET', '')
if 'DEVELOPMENT' in os.environ:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'picturesque@cmail.com'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASS')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL_HOST_USER') | [
"a.guillermo8@yahoo.com"
] | a.guillermo8@yahoo.com |
cfb356d0b792ad11e304f1c3e18e01215d7d858e | 793a777e5c788286a8ebe67bdb6b5ea4ebdf046a | /env/bin/python-config | bc6de9b390f4163d18cc47f1e18ac19838fd8672 | [] | no_license | 9394974/learnDjango | 94e08cce353a6623c3ddae8ff50ac22a8808e90a | 62b5f0a5917be0ae4f663b1ab10b65469f16f235 | refs/heads/master | 2020-07-31T15:23:33.756475 | 2017-01-25T15:37:20 | 2017-01-25T15:37:20 | 73,597,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | #!/Users/crazyLeaves/Desktop/Python/learnDjango/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"529342824@qq.com"
] | 529342824@qq.com | |
765503c6c7b8f463814da29afd3332f8f03d5e40 | a053d60e2c84750cf1c51142bfdf6dec5048bf25 | /demo.py | 4cfc291b204313593868375ac5df2099451fc16d | [] | no_license | Sharpiless/paddlex-driver-state-recognition | ed57e58bebcdccc19302dcb49e950dd66be9ed45 | 81f81f72e9b893c8adca8f9aaba3615dc7aff7c7 | refs/heads/master | 2023-03-18T23:02:15.255664 | 2020-06-02T15:42:38 | 2020-06-02T15:42:38 | 268,839,488 | 2 | 3 | null | 2021-03-07T13:43:21 | 2020-06-02T15:32:52 | Java | UTF-8 | Python | false | false | 1,480 | py | import matplotlib
import paddlex as pdx
import paddle.fluid as fluid
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from facedet import FaceDet
fontC = ImageFont.truetype('./platech.ttf', 20, 0)
def drawText(img, addText, x1, y1):
color = (20, 255, 20)
# img = Image.fromarray(image)
draw = ImageDraw.Draw(img)
draw.text((x1, y1),
addText.encode("utf-8").decode("utf-8"),
color, font=fontC)
imagex = np.array(img)
return imagex
save_dir = './best_model'
model = pdx.load_model(save_dir)
classes = {'c0': 'normal driving',
'c1': 'texting-right',
'c2': 'talking on the phone-right',
'c3': 'texting-left',
'c4': 'talking on the phone-left',
'c5': 'operating the radio',
'c6': 'drinking',
'c7': 'reaching behind',
'c8': 'hair and makeup',
'c9': 'talking to passenger'}
base = './test_images'
det = FaceDet(thread=0.1)
for im in os.listdir(base):
pt = os.path.join(base, im)
result = model.predict(pt)
print(result)
lbl = classes[result[0]['category']]+' '+str(result[0]['score'])
image = cv2.imread(pt)
image = det.detect(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = drawText(image, lbl, 0, 10)
plt.imshow(image)
plt.show() | [
"1691608003@qq.com"
] | 1691608003@qq.com |
17602b5d69705a85bd265bfeff78a7e5aa65ed40 | 21ddbc57c13d4a9f76b0f5bac74119354f724a94 | /stockupdate-project/stocks/migrations/0001_initial.py | b1e8a281db6f943c0daa0d4e2055ebee6ced397d | [] | no_license | iamsubhajit/stock-update | 4e795bb551602ce4a6169161ba132bed90f64913 | e170bd7a59c3e621b8c3db5518a3d21512f5d3cc | refs/heads/master | 2020-12-11T04:54:32.393817 | 2020-01-15T12:38:08 | 2020-01-15T12:38:08 | 233,782,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Generated by Django 2.2.7 on 2020-01-13 11:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| [
"smy5@3ds.com"
] | smy5@3ds.com |
f791d11822e740af3f079e5f5adbd2832bc6c5de | a469f33b36ac9783740dbed9173ba76a6f18b3cd | /libs/scripts/users_who_finished_the_course.py | 623c4cf08f95e8ef34b193bf601e3cfddf1a5643 | [] | no_license | mandarinSh/OpenPolyEdu | 0f783622a828ef10413725594c5c0bdbdac554f2 | 7f0a052e148f321f09f6eb5f806c15eaa26d364a | refs/heads/master | 2020-07-27T14:04:57.441770 | 2020-05-12T11:13:44 | 2020-05-12T11:13:44 | 209,116,670 | 2 | 6 | null | 2020-06-08T20:40:07 | 2019-09-17T17:28:59 | HTML | UTF-8 | Python | false | false | 2,662 | py | import sys
import datetime
from tabulate import tabulate
from database_services import *
def calculate_users_who_finished_the_course(connection):
print('Start query execution at ', datetime.datetime.now())
get_users_who_finished_the_course_query = '''select allUsers.user_id as user_id, allUsers.user_name as user_name from (
select uniqueUserIds.user_id as user_id, userAndIDs.user_name as user_name from (
select
log_line #>> '{context, user_id}' AS user_id
from logs
GROUP BY user_id
) uniqueUserIds
LEFT JOIN (
select
log_line -> 'username' as user_name,
log_line #>> '{context, user_id}' AS user_id
from logs
where log_line -> 'username' != 'null' and log_line -> 'username' != '""' and log_line -> 'username' is not null
GROUP BY user_id, user_name
) userAndIDs
ON uniqueUserIds.user_id = userAndIDs.user_id
) allUsers
INNER JOIN (
select
log_line #>> '{context, user_id}' as user_id
from logs where log_line ->> 'name' LIKE 'edx.special_exam%'
group by user_id
) usersWhoStartedAnyExam
ON allUsers.user_id = usersWhoStartedAnyExam.user_id
group by allUsers.user_id, user_name
order by user_name desc'''
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
cursor.execute(get_users_who_finished_the_course_query)
started_but_not_finished_users = cursor.fetchall()
cursor.close()
connection.commit()
print('End query execution at ', datetime.datetime.now())
print("Users who started teh course, but not finished it are calculated")
return started_but_not_finished_users
def write_result_to_file(result_file, result):
print('Start writing the data to file.')
with open(result_file,"w") as file:
file.write(tabulate(result, headers=['user_id', 'user_name']))
def main(argv):
print('Start calculating user who finished the course.')
print('It means that user tried to pass any exam type.')
database_name = argv[1]
user_name = argv[2]
result_file = argv[3]
connection = open_db_connection(database_name, user_name)
finished_users = calculate_users_who_finished_the_course(connection)
write_result_to_file(result_file, finished_users)
print('The analytics result can be found at ', result_file)
close_db_connection(connection)
if __name__ == '__main__':
main(sys.argv)
| [
"igor.nikiforovv@gmail.com"
] | igor.nikiforovv@gmail.com |
4c98a9f88ca9eb6021d9773eed475018a705f4f1 | 33d2477dbbf43e81d15fc2256543b0f6160dcafb | /spleeter/spleeter/dataset.py | b6d8a527506b99474076d4b9532004d7f57b6930 | [
"MIT",
"CC-BY-3.0"
] | permissive | kimurakeigo888/spleeeer | edd94245365f9e855b3eced333021bb38234fc17 | 6e9c8d46ddac7b247618121e407360adc269c319 | refs/heads/master | 2022-11-14T22:49:48.835139 | 2020-07-07T07:00:20 | 2020-07-07T07:00:20 | 277,733,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,982 | py | #!/usr/bin/env python
# coding: utf8
"""
Module for building data preprocessing pipeline using the tensorflow
data API. Data preprocessing such as audio loading, spectrogram
computation, cropping, feature caching or data augmentation is done
using a tensorflow dataset object that output a tuple (input_, output)
where:
- input is a dictionary with a single key that contains the (batched)
mix spectrogram of audio samples
- output is a dictionary of spectrogram of the isolated tracks
(ground truth)
"""
import time
import os
from os.path import exists, join, sep as SEPARATOR
# pylint: disable=import-error
import pandas as pd
import numpy as np
import tensorflow as tf
# pylint: enable=import-error
from .audio.convertor import (
db_uint_spectrogram_to_gain,
spectrogram_to_db_uint)
from .audio.spectrogram import (
compute_spectrogram_tf,
random_pitch_shift,
random_time_stretch)
from .utils.logging import get_logger
from .utils.tensor import (
check_tensor_shape,
dataset_from_csv,
set_tensor_shape,
sync_apply)
__email__ = 'research@deezer.com'
__author__ = 'Deezer Research'
__license__ = 'MIT License'
# Default audio parameters to use.
DEFAULT_AUDIO_PARAMS = {
'instrument_list': ('vocals', 'accompaniment'),
'mix_name': 'mix',
'sample_rate': 44100,
'frame_length': 4096,
'frame_step': 1024,
'T': 512,
'F': 1024
}
def get_training_dataset(audio_params, audio_adapter, audio_path):
""" Builds training dataset.
:param audio_params: Audio parameters.
:param audio_adapter: Adapter to load audio from.
:param audio_path: Path of directory containing audio.
:returns: Built dataset.
"""
builder = DatasetBuilder(
audio_params,
audio_adapter,
audio_path,
chunk_duration=audio_params.get('chunk_duration', 20.0),
random_seed=audio_params.get('random_seed', 0))
return builder.build(
audio_params.get('train_csv'),
cache_directory=audio_params.get('training_cache'),
batch_size=audio_params.get('batch_size'),
n_chunks_per_song=audio_params.get('n_chunks_per_song', 2),
random_data_augmentation=False,
convert_to_uint=True,
wait_for_cache=False)
def get_validation_dataset(audio_params, audio_adapter, audio_path):
""" Builds validation dataset.
:param audio_params: Audio parameters.
:param audio_adapter: Adapter to load audio from.
:param audio_path: Path of directory containing audio.
:returns: Built dataset.
"""
builder = DatasetBuilder(
audio_params,
audio_adapter,
audio_path,
chunk_duration=12.0)
return builder.build(
audio_params.get('validation_csv'),
batch_size=audio_params.get('batch_size'),
cache_directory=audio_params.get('validation_cache'),
convert_to_uint=True,
infinite_generator=False,
n_chunks_per_song=1,
# should not perform data augmentation for eval:
random_data_augmentation=False,
random_time_crop=False,
shuffle=False,
)
class InstrumentDatasetBuilder(object):
""" Instrument based filter and mapper provider. """
def __init__(self, parent, instrument):
""" Default constructor.
:param parent: Parent dataset builder.
:param instrument: Target instrument.
"""
self._parent = parent
self._instrument = instrument
self._spectrogram_key = f'{instrument}_spectrogram'
self._min_spectrogram_key = f'min_{instrument}_spectrogram'
self._max_spectrogram_key = f'max_{instrument}_spectrogram'
def load_waveform(self, sample):
""" Load waveform for given sample. """
return dict(sample, **self._parent._audio_adapter.load_tf_waveform(
sample[f'{self._instrument}_path'],
offset=sample['start'],
duration=self._parent._chunk_duration,
sample_rate=self._parent._sample_rate,
waveform_name='waveform'))
def compute_spectrogram(self, sample):
""" Compute spectrogram of the given sample. """
return dict(sample, **{
self._spectrogram_key: compute_spectrogram_tf(
sample['waveform'],
frame_length=self._parent._frame_length,
frame_step=self._parent._frame_step,
spec_exponent=1.,
window_exponent=1.)})
def filter_frequencies(self, sample):
""" """
return dict(sample, **{
self._spectrogram_key:
sample[self._spectrogram_key][:, :self._parent._F, :]})
def convert_to_uint(self, sample):
""" Convert given sample from float to unit. """
return dict(sample, **spectrogram_to_db_uint(
sample[self._spectrogram_key],
tensor_key=self._spectrogram_key,
min_key=self._min_spectrogram_key,
max_key=self._max_spectrogram_key))
def filter_infinity(self, sample):
""" Filter infinity sample. """
return tf.logical_not(
tf.math.is_inf(
sample[self._min_spectrogram_key]))
def convert_to_float32(self, sample):
""" Convert given sample from unit to float. """
return dict(sample, **{
self._spectrogram_key: db_uint_spectrogram_to_gain(
sample[self._spectrogram_key],
sample[self._min_spectrogram_key],
sample[self._max_spectrogram_key])})
def time_crop(self, sample):
""" """
def start(sample):
""" mid_segment_start """
return tf.cast(
tf.maximum(
tf.shape(sample[self._spectrogram_key])[0]
/ 2 - self._parent._T / 2, 0),
tf.int32)
return dict(sample, **{
self._spectrogram_key: sample[self._spectrogram_key][
start(sample):start(sample) + self._parent._T, :, :]})
def filter_shape(self, sample):
""" Filter badly shaped sample. """
return check_tensor_shape(
sample[self._spectrogram_key], (
self._parent._T, self._parent._F, 2))
def reshape_spectrogram(self, sample):
""" """
return dict(sample, **{
self._spectrogram_key: set_tensor_shape(
sample[self._spectrogram_key],
(self._parent._T, self._parent._F, 2))})
class DatasetBuilder(object):
"""
"""
# Margin at beginning and end of songs in seconds.
MARGIN = 0.5
# Wait period for cache (in seconds).
WAIT_PERIOD = 60
def __init__(
self,
audio_params, audio_adapter, audio_path,
random_seed=0, chunk_duration=20.0):
""" Default constructor.
NOTE: Probably need for AudioAdapter.
:param audio_params: Audio parameters to use.
:param audio_adapter: Audio adapter to use.
:param audio_path:
:param random_seed:
:param chunk_duration:
"""
# Length of segment in frames (if fs=22050 and
# frame_step=512, then T=512 corresponds to 11.89s)
self._T = audio_params['T']
# Number of frequency bins to be used (should
# be less than frame_length/2 + 1)
self._F = audio_params['F']
self._sample_rate = audio_params['sample_rate']
self._frame_length = audio_params['frame_length']
self._frame_step = audio_params['frame_step']
self._mix_name = audio_params['mix_name']
self._instruments = [self._mix_name] + audio_params['instrument_list']
self._instrument_builders = None
self._chunk_duration = chunk_duration
self._audio_adapter = audio_adapter
self._audio_params = audio_params
self._audio_path = audio_path
self._random_seed = random_seed
def expand_path(self, sample):
""" Expands audio paths for the given sample. """
return dict(sample, **{f'{instrument}_path': tf.string_join(
(self._audio_path, sample[f'{instrument}_path']), SEPARATOR)
for instrument in self._instruments})
def filter_error(self, sample):
""" Filter errored sample. """
return tf.logical_not(sample['waveform_error'])
def filter_waveform(self, sample):
""" Filter waveform from sample. """
return {k: v for k, v in sample.items() if not k == 'waveform'}
def harmonize_spectrogram(self, sample):
""" Ensure same size for vocals and mix spectrograms. """
def _reduce(sample):
return tf.reduce_min([
tf.shape(sample[f'{instrument}_spectrogram'])[0]
for instrument in self._instruments])
return dict(sample, **{
f'{instrument}_spectrogram':
sample[f'{instrument}_spectrogram'][:_reduce(sample), :, :]
for instrument in self._instruments})
def filter_short_segments(self, sample):
""" Filter out too short segment. """
return tf.reduce_any([
tf.shape(sample[f'{instrument}_spectrogram'])[0] >= self._T
for instrument in self._instruments])
def random_time_crop(self, sample):
""" Random time crop of 11.88s. """
return dict(sample, **sync_apply({
f'{instrument}_spectrogram': sample[f'{instrument}_spectrogram']
for instrument in self._instruments},
lambda x: tf.image.random_crop(
x, (self._T, len(self._instruments) * self._F, 2),
seed=self._random_seed)))
def random_time_stretch(self, sample):
""" Randomly time stretch the given sample. """
return dict(sample, **sync_apply({
f'{instrument}_spectrogram':
sample[f'{instrument}_spectrogram']
for instrument in self._instruments},
lambda x: random_time_stretch(
x, factor_min=0.9, factor_max=1.1)))
def random_pitch_shift(self, sample):
""" Randomly pitch shift the given sample. """
return dict(sample, **sync_apply({
f'{instrument}_spectrogram':
sample[f'{instrument}_spectrogram']
for instrument in self._instruments},
lambda x: random_pitch_shift(
x, shift_min=-1.0, shift_max=1.0), concat_axis=0))
def map_features(self, sample):
""" Select features and annotation of the given sample. """
input_ = {
f'{self._mix_name}_spectrogram':
sample[f'{self._mix_name}_spectrogram']}
output = {
f'{instrument}_spectrogram': sample[f'{instrument}_spectrogram']
for instrument in self._audio_params['instrument_list']}
return (input_, output)
def compute_segments(self, dataset, n_chunks_per_song):
""" Computes segments for each song of the dataset.
:param dataset: Dataset to compute segments for.
:param n_chunks_per_song: Number of segment per song to compute.
:returns: Segmented dataset.
"""
if n_chunks_per_song <= 0:
raise ValueError('n_chunks_per_song must be positif')
datasets = []
for k in range(n_chunks_per_song):
if n_chunks_per_song > 1:
datasets.append(
dataset.map(lambda sample: dict(sample, start=tf.maximum(
k * (
sample['duration'] - self._chunk_duration - 2
* self.MARGIN) / (n_chunks_per_song - 1)
+ self.MARGIN, 0))))
elif n_chunks_per_song == 1: # Take central segment.
datasets.append(
dataset.map(lambda sample: dict(sample, start=tf.maximum(
sample['duration'] / 2 - self._chunk_duration / 2,
0))))
dataset = datasets[-1]
for d in datasets[:-1]:
dataset = dataset.concatenate(d)
return dataset
@property
def instruments(self):
""" Instrument dataset builder generator.
:yield InstrumentBuilder instance.
"""
if self._instrument_builders is None:
self._instrument_builders = []
for instrument in self._instruments:
self._instrument_builders.append(
InstrumentDatasetBuilder(self, instrument))
for builder in self._instrument_builders:
yield builder
def cache(self, dataset, cache, wait):
""" Cache the given dataset if cache is enabled. Eventually waits for
cache to be available (useful if another process is already computing
cache) if provided wait flag is True.
:param dataset: Dataset to be cached if cache is required.
:param cache: Path of cache directory to be used, None if no cache.
:param wait: If caching is enabled, True is cache should be waited.
:returns: Cached dataset if needed, original dataset otherwise.
"""
if cache is not None:
if wait:
while not exists(f'{cache}.index'):
get_logger().info(
'Cache not available, wait %s',
self.WAIT_PERIOD)
time.sleep(self.WAIT_PERIOD)
cache_path = os.path.split(cache)[0]
os.makedirs(cache_path, exist_ok=True)
return dataset.cache(cache)
return dataset
def build(
self, csv_path,
batch_size=8, shuffle=True, convert_to_uint=True,
random_data_augmentation=False, random_time_crop=True,
infinite_generator=True, cache_directory=None,
wait_for_cache=False, num_parallel_calls=4, n_chunks_per_song=2,):
"""
TO BE DOCUMENTED.
"""
dataset = dataset_from_csv(csv_path)
dataset = self.compute_segments(dataset, n_chunks_per_song)
# Shuffle data
if shuffle:
dataset = dataset.shuffle(
buffer_size=200000,
seed=self._random_seed,
# useless since it is cached :
reshuffle_each_iteration=True)
# Expand audio path.
dataset = dataset.map(self.expand_path)
# Load waveform, compute spectrogram, and filtering error,
# K bins frequencies, and waveform.
N = num_parallel_calls
for instrument in self.instruments:
dataset = (
dataset
.map(instrument.load_waveform, num_parallel_calls=N)
.filter(self.filter_error)
.map(instrument.compute_spectrogram, num_parallel_calls=N)
.map(instrument.filter_frequencies))
dataset = dataset.map(self.filter_waveform)
# Convert to uint before caching in order to save space.
if convert_to_uint:
for instrument in self.instruments:
dataset = dataset.map(instrument.convert_to_uint)
dataset = self.cache(dataset, cache_directory, wait_for_cache)
# Check for INFINITY (should not happen)
for instrument in self.instruments:
dataset = dataset.filter(instrument.filter_infinity)
# Repeat indefinitly
if infinite_generator:
dataset = dataset.repeat(count=-1)
# Ensure same size for vocals and mix spectrograms.
# NOTE: could be done before caching ?
dataset = dataset.map(self.harmonize_spectrogram)
# Filter out too short segment.
# NOTE: could be done before caching ?
dataset = dataset.filter(self.filter_short_segments)
# Random time crop of 11.88s
if random_time_crop:
dataset = dataset.map(self.random_time_crop, num_parallel_calls=N)
else:
# frame_duration = 11.88/T
# take central segment (for validation)
for instrument in self.instruments:
dataset = dataset.map(instrument.time_crop)
# Post cache shuffling. Done where the data are the lightest:
# after croping but before converting back to float.
if shuffle:
dataset = dataset.shuffle(
buffer_size=256, seed=self._random_seed,
reshuffle_each_iteration=True)
# Convert back to float32
if convert_to_uint:
for instrument in self.instruments:
dataset = dataset.map(
instrument.convert_to_float32, num_parallel_calls=N)
M = 8 # Parallel call post caching.
# Must be applied with the same factor on mix and vocals.
if random_data_augmentation:
dataset = (
dataset
.map(self.random_time_stretch, num_parallel_calls=M)
.map(self.random_pitch_shift, num_parallel_calls=M))
# Filter by shape (remove badly shaped tensors).
for instrument in self.instruments:
dataset = (
dataset
.filter(instrument.filter_shape)
.map(instrument.reshape_spectrogram))
# Select features and annotation.
dataset = dataset.map(self.map_features)
# Make batch (done after selection to avoid
# error due to unprocessed instrument spectrogram batching).
dataset = dataset.batch(batch_size)
return dataset
| [
"noreply@github.com"
] | noreply@github.com |
33941d22a22cc3b78a3a56dfb400379ab3bdf2b7 | 0f8eda752a66f5cbf12fd39ac2f8e7c659d1db55 | /.local/bin/oslopolicy-checker | d83d95bccfb7f860149dead034258fc908a93c4b | [] | no_license | apiz19/konfiq | 91ab949a06fedf64c028c5aa668b6893a2605c23 | 733be5f9a561de95737f3a6b8067853e029dac05 | refs/heads/main | 2023-06-03T16:07:25.130600 | 2021-06-17T16:10:57 | 2021-06-17T16:10:57 | 377,864,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from oslo_policy.shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"hafiz.chairy@yahoo.com"
] | hafiz.chairy@yahoo.com | |
25e4d40ce31b86ade2aecefffc41a865c35c129a | 0a5540bc9abca153ee1f90e08d3765617369ad51 | /Tres_en_raya.py | bde11d6daf0e44734b14173b667f73ffe78627fb | [] | no_license | Migarve55/Python | 4dbcb13e73da0759dfc830479c955b43a5886681 | 52496594fb60c912e1a1e555a54412f8e102211f | refs/heads/master | 2020-07-23T13:04:19.185494 | 2016-11-20T15:28:47 | 2016-11-20T15:28:47 | 73,808,548 | 2 | 1 | null | 2016-11-18T08:43:47 | 2016-11-15T11:48:11 | Python | UTF-8 | Python | false | false | 4,690 | py | from random import randint
board = []
size = 3
filler = '-'
player1_wins = 0
player2_wins = 0
for i in range(size):
board.append([filler] * size)
def reboot():
for item in board:
for i in range(len(item)):
item[i] = filler
print("Your wins: %i - Computer's wins: %i" % (player1_wins,player2_wins))
def print_board(brd):
for item in brd:
print(' '.join(item))
def check_for_row(row, char):
for i in range(len(row)):
if row[i] == char:
return i
return 0
def check_for_col(col, char):
for i in range(len(board[0])):
if board[i][col] == char:
return i
return 0
def check(char):
vert = ''
#Mira las horinzontales
for item in board:
if ''.join(item) == char * size:
return True
#Mira las verticales
for i in range(size):
for item in board:
vert += item[i - 1]
if vert == char * size:
return True
vert = ''
#Mira las diagonales
for i in range(size):
vert += board[i - 1][i - 1]
if vert == char * 3:
return True
vert = ''
for i in range(size):
vert += board[i][2 - i]
if vert == char * 3:
return True
def about_to_win(brd, char):
lenght = len(brd) - 1
check = 0
result = []
for i in range(len(brd)): #Hor
for j in range(len(brd[i])):
if brd[i][j] == char:
check += 1
if check == lenght:
j = check_for_row(brd[i],filler)
result.append([[j],[i]]) # X, Y
check = 0
for i in range(len(brd)): #Ver
for j in range(len(brd[i])):
if brd[j][i] == char:
check += 1
if check == lenght:
j = check_for_col(i,filler)
result.append([[i],[j]]) # X, Y
check = 0
#Diagonal \
for i in range(len(brd)):
if brd[i][i] == char:
check += 1
if check == lenght:
for i in range(len(brd)):
if brd[i][i] == filler:
result.append([[i],[i]])
check = 0
#Diagonal /
for i in range(len(brd)):
if brd[i][len(brd) - 1 - i] == char:
check += 1
if check == lenght:
for i in range(len(brd)):
if brd[i][len(brd) - 1 - i] == filler:
result.append([[len(brd) - 1 - i],[i]])
return result
def is_full(brd):
for item in brd:
for c in item:
if c == filler:
return False
return True
print_board(board)
def player_1(x, y):
if (x < 1 or x > size) or (y < 1 or y > size):
x = 1
y = 1
if board[y - 1][x - 1] == 'O':
print("You can't write over the others player's movements")
elif board[y - 1][x - 1] != 'X':
board[y - 1][x - 1] = 'X'
else:
print("You already input that")
def player_2():
#Comprueba si estas a punto de ganar
estatus = about_to_win(board,'X')
to_win = about_to_win(board,'O')
safe = True
moved = False
if to_win != []: #Para ganar
for pos in to_win:
x = int(pos[0][0])
y = int(pos[1][0])
if board[y][x] == filler:
board[y][x] = 'O'
moved = True
break
if estatus != [] and not moved: #Defensa
for pos in estatus:
x = int(pos[0][0])
y = int(pos[1][0])
if board[y][x] == filler:
board[y][x] = 'O'
moved = True
break
if not moved: #No ha podido escribir nada
if True: #Aleatorio
#print("Random")
x = randint(0,len(board) - 1)
y = randint(0,len(board[0]) - 1)
while (board[y][x] != filler):
x = randint(0,len(board) - 1)
y = randint(0,len(board[0]) - 1)
if is_full(board):
break
if board[y][x] == filler:
board[y][x] = 'O'
while True: #Main loop
print("Your turn: ")
player_1(int(input("Enter X: ")),int(input("Enter Y: ")))
print_board(board)
if check('X'):
print("You win!")
player1_wins += 1
reboot();
print("Computer's turn:")
player_2()
print_board(board)
if check('O'):
print("You loose!")
player2_wins += 1
reboot();
if is_full(board):
print("Board is full. Rebooting.")
reboot();
| [
"noreply@github.com"
] | noreply@github.com |
efb3c8ea22e6329e1b8538f88338e4ce2b5f5d97 | 2d6d5424e881252898b898fbfbc47fe1487371cf | /examples_trame/basic/file_viewer.py | 6348b5f4cd7f6b8e4acd8f0fea37f1a25034e094 | [
"MIT"
] | permissive | pyvista/pyvista | 333e55bfaa6b8bcdb47e2df04c823d35f05db364 | 1b450b23340f367315fc914075d551e0a4df8cc3 | refs/heads/main | 2023-08-20T08:04:27.146062 | 2023-08-20T01:14:03 | 2023-08-20T01:14:03 | 92,974,124 | 1,885 | 389 | MIT | 2023-09-14T21:09:28 | 2017-05-31T18:01:42 | Python | UTF-8 | Python | false | false | 2,308 | py | import tempfile
from trame.app import get_server
from trame.app.file_upload import ClientFile
from trame.ui.vuetify import SinglePageLayout
from trame.widgets import vuetify
import pyvista as pv
from pyvista.trame.ui import plotter_ui
# -----------------------------------------------------------------------------
# Trame setup
# -----------------------------------------------------------------------------
pv.OFF_SCREEN = True
server = get_server()
state, ctrl = server.state, server.controller
state.trame__title = "File Viewer"
ctrl.on_server_ready.add(ctrl.view_update)
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
pl = pv.Plotter()
@server.state.change("file_exchange")
def handle(file_exchange, **kwargs):
file = ClientFile(file_exchange)
if file.content:
print(file.info)
bytes = file.content
with tempfile.NamedTemporaryFile(suffix=file.name) as path:
with open(path.name, 'wb') as f:
f.write(bytes)
ds = pv.read(path.name)
pl.add_mesh(ds, name=file.name)
pl.reset_camera()
else:
pl.clear_actors()
pl.reset_camera()
# -----------------------------------------------------------------------------
# Web App setup
# -----------------------------------------------------------------------------
state.trame__title = "File Viewer"
with SinglePageLayout(server) as layout:
layout.title.set_text("File Viewer")
with layout.toolbar:
vuetify.VSpacer()
vuetify.VFileInput(
show_size=True,
small_chips=True,
truncate_length=25,
v_model=("file_exchange", None),
dense=True,
hide_details=True,
style="max-width: 300px;",
)
vuetify.VProgressLinear(
indeterminate=True, absolute=True, bottom=True, active=("trame__busy",)
)
with layout.content:
with vuetify.VContainer(
fluid=True, classes="pa-0 fill-height", style="position: relative;"
):
# Use PyVista UI template for Plotters
view = plotter_ui(pl)
ctrl.view_update = view.update
server.start()
| [
"noreply@github.com"
] | noreply@github.com |
08291ce93cc41331a4e39141383462859213da84 | d2befabf29be811f95a2aa7c5460f47248b3bb38 | /code/YOLO101_1Resolusi_60x40_TrainCDV2_OK2.py | 9566e84671c4812757106f4ea0875851a9b92c9f | [
"Apache-2.0"
] | permissive | mlcv-lab/mr3dcapsnet | ed466b86d682c2d36b50698402e384527cb6bce4 | d0b37ca085073257b0c485210ec92a5c6e7d9bb6 | refs/heads/master | 2022-11-14T01:57:52.255660 | 2020-07-02T00:35:32 | 2020-07-02T00:35:32 | 276,502,074 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,938 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 13:24:34 2017
@author: user
"""
#from keras.preprocessing.image import ImageDataGenerator
#from keras.models import Sequential
#from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D, Merge
#from keras.layers.convolutional import Convolution3D, MaxPooling3D
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils import np_utils, generic_utils
import os
import pandas as pd
import matplotlib
from keras.callbacks import ModelCheckpoint
import keras.callbacks
import matplotlib.pyplot as plt
import numpy as np
import cv2
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
import csv
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D
from keras.layers.convolutional import Convolution3D, MaxPooling3D
from sklearn.metrics import classification_report,confusion_matrix,cohen_kappa_score,roc_auc_score
#from keras.regularizers import l2, l1, WeightRegularizer
from keras.layers.normalization import BatchNormalization
import gc
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
def getLabelFromIdx(x):
return {
1 : 'ApplyEyeMakeup',
2 : 'ApplyLipstick',
3 : 'Archery',
4 : 'BabyCrawling',
5 : 'BalanceBeam',
6 : 'BandMarching',
7 : 'BaseballPitch',
8 : 'Basketball',
9 : 'BasketballDunk',
10 : 'BenchPress',
11 : 'Biking',
12 : 'Billiards',
13 : 'BlowDryHair',
14 : 'BlowingCandles',
15 : 'BodyWeightSquats',
16 : 'Bowling',
17 : 'BoxingPunchingBag',
18 : 'BoxingSpeedBag',
19 : 'BreastStroke',
20 : 'BrushingTeeth',
21 : 'CleanAndJerk',
22 : 'CliffDiving',
23 : 'CricketBowling',
24 : 'CricketShot',
25 : 'CuttingInKitchen',
26 : 'Diving',
27 : 'Drumming',
28 : 'Fencing',
29 : 'FieldHockeyPenalty',
30 : 'FloorGymnastics',
31 : 'FrisbeeCatch',
32 : 'FrontCrawl',
33 : 'GolfSwing',
34 : 'Haircut',
35 : 'Hammering',
36 : 'HammerThrow',
37 : 'HandstandPushups',
38 : 'HandstandWalking',
39 : 'HeadMassage',
40 : 'HighJump',
41 : 'HorseRace',
42 : 'HorseRiding',
43 : 'HulaHoop',
44 : 'IceDancing',
45 : 'JavelinThrow',
46 : 'JugglingBalls',
47 : 'JumpingJack',
48 : 'JumpRope',
49 : 'Kayaking',
50 : 'Knitting',
51 : 'LongJump',
52 : 'Lunges',
53 : 'MilitaryParade',
54 : 'Mixing',
55 : 'MoppingFloor',
56 : 'Nunchucks',
57 : 'ParallelBars',
58 : 'PizzaTossing',
59 : 'PlayingCello',
60 : 'PlayingDaf',
61 : 'PlayingDhol',
62 : 'PlayingFlute',
63 : 'PlayingGuitar',
64 : 'PlayingPiano',
65 : 'PlayingSitar',
66 : 'PlayingTabla',
67 : 'PlayingViolin',
68 : 'PoleVault',
69 : 'PommelHorse',
70 : 'PullUps',
71 : 'Punch',
72 : 'PushUps',
73 : 'Rafting',
74 : 'RockClimbingIndoor',
75 : 'RopeClimbing',
76 : 'Rowing',
77 : 'SalsaSpin',
78 : 'ShavingBeard',
79 : 'Shotput',
80 : 'SkateBoarding',
81 : 'Skiing',
82 : 'Skijet',
83 : 'SkyDiving',
84 : 'SoccerJuggling',
85 : 'SoccerPenalty',
86 : 'StillRings',
87 : 'SumoWrestling',
88 : 'Surfing',
89 : 'Swing',
90 : 'TableTennisShot',
91 : 'TaiChi',
92 : 'TennisSwing',
93 : 'ThrowDiscus',
94 : 'TrampolineJumping',
95 : 'Typing',
96 : 'UnevenBars',
97 : 'VolleyballSpiking',
98 : 'WalkingWithDog',
99 : 'WallPushups',
100 : 'WritingOnBoard',
101 : 'YoYo'
}.get(x, "----")
testing = False
R1x = 60
R1y = 40
R2x = 2
R2y = 3
R3x = 2
R3y = 3
RDepth = 13
kcv = 1
vartuning = '1Resolusi_60x40'
filenya = 'YOLO_U_V1_' + vartuning + '.csv'
with open(filenya, 'w') as out_file:
writer = csv.writer(out_file, lineterminator = '\n')
grup = []
grup.append('Blok ke-')
grup.append('Skor Akurasi')
grup.append('Skor Kappa')
writer.writerows([grup])
grup = []
X_train_R1 = []
X_train_R2 = []
X_train_R3 = []
labels_train = []
count_train = 0
X_test_R1 = []
X_test_R2 = []
X_test_R3 = []
labels_test = []
count_test = 0
# training data input
for labelIdx in range(1, 101):
print labelIdx
listing = os.listdir('TestData/' + getLabelFromIdx(labelIdx) + '/')
count_pretesting = 0
for vid in listing:
count_pretesting += 1
#
if (count_pretesting > 5) and testing:
break
vid = 'TestData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
if not((count)%4 == 0):
continue
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 52:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not(count == 52):
print "vid not saved"
continue
count_test += 1
label = labelIdx-1
labels_test.append(label)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_test_R1.append(iptR1)
X_test_R2.append(iptR2)
X_test_R3.append(iptR3)
listing = os.listdir('TrainData/' + getLabelFromIdx(labelIdx) + '/')
count_pretesting = 0
for vid in listing:
count_pretesting += 1
if (count_pretesting > 5) and testing:
break
vid = 'TrainData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
if not((count)%4 == 0):
continue
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 52:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not(count == 52):
print "vid not saved"
continue
count_train += 1
label = labelIdx-1
labels_train.append(label)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_train_R1.append(iptR1)
X_train_R2.append(iptR2)
X_train_R3.append(iptR3)
# formatting data
X_train_R1_array = (X_train_R1)
X_train_R2_array = (X_train_R2)
X_train_R3_array = (X_train_R3)
labels_train_array = np.array(labels_train)
Y_train = np_utils.to_categorical(labels_train_array, 101)
del X_train_R1
del X_train_R2
del X_train_R3
gc.collect()
X_test_R1_array = (X_test_R1)
X_test_R2_array = (X_test_R2)
X_test_R3_array = (X_test_R3)
labels_test_array = np.array(labels_test)
Y_test = np_utils.to_categorical(labels_test_array, 101)
del X_test_R1
del X_test_R2
del X_test_R3
gc.collect()
test_set_R1 = np.zeros((count_test, RDepth, R1y,R1x,3))
test_set_R2 = np.zeros((count_test, RDepth, R2y,R2x,3))
test_set_R3 = np.zeros((count_test, RDepth, R3y,R3x,3))
for h in xrange(count_test):
test_set_R1[h][:][:][:][:]=X_test_R1_array[h]
test_set_R2[h][:][:][:][:]=X_test_R2_array[h]
test_set_R3[h][:][:][:][:]=X_test_R3_array[h]
train_set_R1 = np.zeros((count_train, RDepth, R1y,R1x,3))
train_set_R2 = np.zeros((count_train, RDepth, R2y,R2x,3))
train_set_R3 = np.zeros((count_train, RDepth, R3y,R3x,3))
for h in xrange(count_train):
train_set_R1[h][:][:][:][:]=X_train_R1_array[h]
train_set_R2[h][:][:][:][:]=X_train_R2_array[h]
train_set_R3[h][:][:][:][:]=X_train_R3_array[h]
del X_test_R1_array
del X_test_R2_array
del X_test_R3_array
gc.collect()
del X_train_R1_array
del X_train_R2_array
del X_train_R3_array
gc.collect()
train_set_R1 = train_set_R1.astype('float32')
train_set_R1 -= 127.5
train_set_R1 /=127.5
train_set_R2 = train_set_R2.astype('float32')
train_set_R2 -= 127.5
train_set_R2 /=127.5
train_set_R3 = train_set_R3.astype('float32')
train_set_R3 -= 127.5
train_set_R3 /=127.5
test_set_R1 = test_set_R1.astype('float32')
test_set_R1 -= 127.5
test_set_R1 /=127.5
test_set_R2 = test_set_R2.astype('float32')
test_set_R2 -= 127.5
test_set_R2 /=127.5
test_set_R3 = test_set_R3.astype('float32')
test_set_R3 -= 127.5
test_set_R3 /=127.5
#%% definisikan sebuah model
# # Parameter tuning
# jumEpoch = 25
# nb_classes = 8
# #Lengan A
# filterNumL1 = 16 # jumlah filter L1
# filterSizeXYL1 = 5 #ukuran filter dimensi spasial
# filterSizeTL1 = 3#ukuran filter dimensi spasial
#
# poolingSizeXYL1 = 3
# poolingSizeTL1 = 1
# poolingStrideXYL1 = 1
# poolingStrideTL1 = 1 #parameter pooling L1
# #Lengan B
# filterNumL1B = 32 # jumlah filter L1
# filterSizeXYL1B = 3 #ukuran filter dimensi spasial
# filterSizeTL1B = 3 #ukuran filter dimensi spasial
#
# poolingSizeXYL1B = 3
# poolingSizeTL1B = 1
# poolingStrideXYL1B = 1
# poolingStrideTL1B = 1 #parameter pooling L1
# Define model
# modelA = Sequential()
# modelA.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeXYL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeTL1, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelA.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
# modelA.add(SpatialDropout3D(0.4))
# modelA.add(Flatten())
#
# modelB = Sequential()
# modelB.add(Convolution3D(filterNumL1B,kernel_dim1=filterSizeXYL1B, kernel_dim2=filterSizeXYL1B, kernel_dim3=filterSizeTL1B, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelB.add(MaxPooling3D(pool_size=(poolingSizeXYL1B, poolingSizeXYL1B, poolingSizeTL1B), dim_ordering='tf'))
# modelB.add(SpatialDropout3D(0.4))
# modelB.add(Flatten())
#
#
# model = Sequential()
# model.add(Merge([modelA, modelB], mode='concat'))
# model.add(Dense(paramuji, init='normal', activation='relu'))
#
# model.add(Dropout(0.4))
#
# model.add(Dense(nb_classes,init='normal'))
#
# model.add(Activation('softmax'))
# model.summary()
# model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics = ["accuracy"])
#
#
# # Train the model
#
# hist = model.fit([train_set, train_set], Y_train, validation_data=([test_set, test_set], Y_test),
# batch_size=15, nb_epoch = jumEpoch, show_accuracy=True, shuffle=True, verbose = 0)
#
# # Evaluate the model
# score = model.evaluate([test_set, test_set], Y_test, batch_size=15, show_accuracy=True)
#
# Define model
# Parameter tuning
if testing:
jumEpoch = 2
else:
jumEpoch = 250
nb_classes = 101
filterNumL1 = 64 # jumlah filter L1
filterSizeXYL1 = 5 #ukuran filter dimensi spasial
filterSizeTL1 = 3#ukuran filter dimensi spasial
poolingSizeXYL1 = 2
poolingSizeTL1 = 2
poolingStrideXYL1 = 1
poolingStrideTL1 = 1 #parameter pooling L1
filterNumL2 = 64 # jumlah filter L1
filterSizeXYL2 = 5 #ukuran filter dimensi spasial
filterSizeTL2 = 5#ukuran filter dimensi spasial
model = Sequential()
model.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeTL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeXYL1, input_shape=(RDepth, R1y, R1x,3), activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(2, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(filterNumL2,kernel_dim1=3, kernel_dim2=filterSizeXYL2, kernel_dim3=filterSizeXYL2, activation='relu', dim_ordering='tf'))
# model.add(Convolution3D(filterNumL2,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(128,kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
# model.add(Convolution3D(128,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.5))
model.add(Flatten())
model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Dense(nb_classes,init='normal'))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics = ['acc'])
# Train the model
nama_filenya = "weights_" + vartuning +"_.hdf5"
checkpointer = ModelCheckpoint(filepath=nama_filenya, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
hist = model.fit(train_set_R1, Y_train, validation_data=(test_set_R1, Y_test),
batch_size=16, nb_epoch = jumEpoch, shuffle=True, verbose = 1, callbacks = [checkpointer])
# Evaluate the model
# load best model
model.load_weights(nama_filenya)
score = model.evaluate(test_set_R1, Y_test, batch_size=8)
print "Skor Model:"
print score[1]
Y_pred = model.predict_classes(test_set_R1, batch_size = 8)
grup.append(kcv)
grup.append(score[1])
cohennya = cohen_kappa_score(np.argmax(Y_test,axis=1), Y_pred)
print "kohen kappa:"
print cohennya
grup.append(cohennya)
writer.writerows([grup])
| [
"marketing@delligence.ai"
] | marketing@delligence.ai |
8403858c03fc916e54f0d1a2a88ecc413393b9ef | eeeceb85a215522ef412fddc44cc471c894f28af | /src/python/Problem077.py | 71302c21716d25cb49e1104a98582f5b9f3d89e3 | [
"MIT"
] | permissive | mchrzanowski/ProjectEuler | 3874ad141ca0bf633039899807a6afc1cca67134 | 06a24cadbd2c38fb42c3935779fc7ffb6de4e1b5 | refs/heads/master | 2021-01-25T05:34:32.548331 | 2015-02-08T20:31:10 | 2015-02-08T20:31:10 | 3,176,325 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | '''
Created on Feb 24, 2012
@author: mchrzanowski
http://projecteuler.net/problem=77
'''
from ProjectEulerPrime import ProjectEulerPrime
from time import time
LIMIT = 10 ** 2 # this number can't be that huge; the sequence is very non-linear.
SUM_LIMIT = 5 * 10 ** 3 # by 20, it's at 26. by 50, it's already at 819. so 100 seems like a very safe ceiling.
def main():
primeObject = ProjectEulerPrime()
solution = 0
numberOfWays = [0 for i in xrange(LIMIT + 1)]
numberOfWays[0] = 1 # start it off
for i in xrange(2, LIMIT + 1):
if primeObject.isPrime(i):
for j in xrange(i, LIMIT + 1): numberOfWays[j] += numberOfWays[j - i]
if numberOfWays[i] >= SUM_LIMIT:
solution = i
break
print "First number to be produced", SUM_LIMIT, "different ways using sums of primes:", solution
if __name__ == '__main__':
start = time()
main()
end = time()
print "Runtime:", end - start, "seconds." | [
"mike.chrzanowski0@gmail.com"
] | mike.chrzanowski0@gmail.com |
7730f23c1fe157a139eaf71edadb4982a38877c1 | 0d39e91482abe7f40523e9e225ede5464295888f | /mitogen/unix.py | 1af1c0ec6b66522ccdaa603778a48f45502f81cc | [
"BSD-3-Clause"
] | permissive | eamanu/python-mitogen | bdccdd7ceca4f1b114bf3e28556eb0d959b008e8 | e93c7aae83b130abe1ef2dcf829d32e40f9fe8b1 | refs/heads/master | 2022-04-29T17:01:32.451975 | 2019-10-24T00:30:20 | 2019-10-24T00:45:18 | 217,181,829 | 1 | 0 | BSD-3-Clause | 2022-03-29T21:58:20 | 2019-10-24T01:02:03 | Python | UTF-8 | Python | false | false | 7,133 | py | # Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
"""
Permit connection of additional contexts that may act with the authority of
this context. For now, the UNIX socket is always mode 0600, i.e. can only be
accessed by root or the same UID. Therefore we can always trust connections to
have the same privilege (auth_id) as the current process.
"""
import errno
import logging
import os
import socket
import struct
import sys
import tempfile
import mitogen.core
import mitogen.master
LOG = logging.getLogger(__name__)
class Error(mitogen.core.Error):
"""
Base for errors raised by :mod:`mitogen.unix`.
"""
pass
class ConnectError(Error):
"""
Raised when :func:`mitogen.unix.connect` fails to connect to the listening
socket.
"""
#: UNIX error number reported by underlying exception.
errno = None
def is_path_dead(path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
try:
s.connect(path)
except socket.error:
e = sys.exc_info()[1]
return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
finally:
s.close()
return False
def make_socket_path():
return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
class ListenerStream(mitogen.core.Stream):
def on_receive(self, broker):
sock, _ = self.receive_side.fp.accept()
try:
self.protocol.on_accept_client(sock)
except:
sock.close()
raise
class Listener(mitogen.core.Protocol):
stream_class = ListenerStream
keep_alive = True
@classmethod
def build_stream(cls, router, path=None, backlog=100):
if not path:
path = make_socket_path()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(path) and is_path_dead(path):
LOG.debug('%r: deleting stale %r', cls.__name__, path)
os.unlink(path)
sock.bind(path)
os.chmod(path, int('0600', 8))
sock.listen(backlog)
stream = super(Listener, cls).build_stream(router, path)
stream.accept(sock, sock)
router.broker.start_receive(stream)
return stream
def __repr__(self):
return '%s.%s(%r)' % (
__name__,
self.__class__.__name__,
self.path,
)
def __init__(self, router, path):
self._router = router
self.path = path
def _unlink_socket(self):
try:
os.unlink(self.path)
except OSError:
e = sys.exc_info()[1]
# Prevent a shutdown race with the parent process.
if e.args[0] != errno.ENOENT:
raise
def on_shutdown(self, broker):
broker.stop_receive(self.stream)
self._unlink_socket()
self.stream.receive_side.close()
def on_accept_client(self, sock):
sock.setblocking(True)
try:
pid, = struct.unpack('>L', sock.recv(4))
except (struct.error, socket.error):
LOG.error('listener: failed to read remote identity: %s',
sys.exc_info()[1])
return
context_id = self._router.id_allocator.allocate()
try:
sock.send(struct.pack('>LLL', context_id, mitogen.context_id,
os.getpid()))
except socket.error:
LOG.error('listener: failed to assign identity to PID %d: %s',
pid, sys.exc_info()[1])
return
context = mitogen.parent.Context(self._router, context_id)
stream = mitogen.core.MitogenProtocol.build_stream(
router=self._router,
remote_id=context_id,
auth_id=mitogen.context_id,
)
stream.name = u'unix_client.%d' % (pid,)
stream.accept(sock, sock)
LOG.debug('listener: accepted connection from PID %d: %s',
pid, stream.name)
self._router.register(context, stream)
def _connect(path, broker, sock):
try:
# ENOENT, ECONNREFUSED
sock.connect(path)
# ECONNRESET
sock.send(struct.pack('>L', os.getpid()))
mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12))
except socket.error:
e = sys.exc_info()[1]
ce = ConnectError('could not connect to %s: %s', path, e.args[1])
ce.errno = e.args[0]
raise ce
mitogen.parent_id = remote_id
mitogen.parent_ids = [remote_id]
LOG.debug('client: local ID is %r, remote is %r',
mitogen.context_id, remote_id)
router = mitogen.master.Router(broker=broker)
stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id)
stream.accept(sock, sock)
stream.name = u'unix_listener.%d' % (pid,)
mitogen.core.listen(stream, 'disconnect', _cleanup)
mitogen.core.listen(router.broker, 'shutdown',
lambda: router.disconnect_stream(stream))
context = mitogen.parent.Context(router, remote_id)
router.register(context, stream)
return router, context
def connect(path, broker=None):
LOG.debug('client: connecting to %s', path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
return _connect(path, broker, sock)
except:
sock.close()
raise
def _cleanup():
"""
Reset mitogen.context_id and friends when our connection to the parent is
lost. Per comments on #91, these globals need to move to the Router so
fix-ups like this become unnecessary.
"""
mitogen.context_id = 0
mitogen.parent_id = None
mitogen.parent_ids = []
| [
"eamanu@eamanu.com"
] | eamanu@eamanu.com |
7385b6bf70487969f3751ad0362a364bf63af7b5 | af06f557d31fdfb6ef948dd5acebf74aa8144e2a | /chan/views.py | 8d03e15eb8a9c996e105fea8da01381537ae771d | [] | no_license | junkwon-dev/NET_Comm_web | 1d7c6921460830973b972c24536252dd9f591890 | c1549efaa2cf2288ca5b9f0f1fa4a4af2131069e | refs/heads/master | 2022-12-17T02:58:18.058715 | 2021-06-09T14:18:34 | 2021-06-09T14:18:34 | 225,137,248 | 0 | 0 | null | 2022-12-08T03:15:06 | 2019-12-01T09:39:11 | JavaScript | UTF-8 | Python | false | false | 1,677 | py | from django.shortcuts import render, get_object_or_404,redirect
from django.utils import timezone
from django.core.paginator import Paginator
from .models import Blog_chan
from django.http import HttpResponse
def chan(request):
blogs = Blog_chan.objects
blog_list = Blog_chan.objects.order_by('pub_date').reverse()
#블로그 모든 글들을 대상으로
#블로그 객체 세 개를 한 페이지로 자르기
paginator = Paginator(blog_list,3)
#request된 페이지가 뭔지를 알아내고 ( request페이지를 변수에 담아냄 )
page = request.GET.get('page')
#request된 페이지를 얻어온 뒤 return 해 준다
posts = paginator.get_page(page)
return render(request,'board_chan.html',{'blogs':blogs,'posts':posts})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog_chan, pk=blog_id)
return render(request, 'detail_chan.html', {'blog': blog_detail})
def new(request):
return render(request, 'new_chan.html')
def create(request):
blog = Blog_chan()
blog.title = request.GET['title']
blog.author = request.GET['author']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/chan/' + str(blog.id))
def delete(request,blog_id):
blog = get_object_or_404(Blog_chan,pk=blog_id)
blog.delete()
return redirect('/')
def file(request,blog_id):
blog = get_object_or_404(Blog_chan,pk=blog_id)
filename = blog.files.name.split('/')[-1]
response = HttpResponse(blog.files, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response | [
"jun@junui-MacBookPro.local"
] | jun@junui-MacBookPro.local |
d29987a91f08fec4c9898f78a315160145f2d1ad | f0a45ed9cf352a2cfc8deef130b51a004ccbcac0 | /EstruturaSequencial/Exer10.py | b3444980abda5ab780b0c86ce388cd84b907b4e5 | [] | no_license | AlexBastosMorais/Exercicios-Python | e843f4399a6a5a4b7d29a33776e6c29feaff6140 | 2e3476ceed86c3d71e39dc852636540434357fa1 | refs/heads/master | 2023-07-01T01:16:50.879599 | 2020-02-07T18:01:13 | 2020-02-07T18:01:13 | 236,744,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | print ("---2 inteiros e um Real----")
fun = input ()
if fun == "a":
n1 = int(input("Informe um valor inteiro: "))
n2 = int(input("Informe outro valor inteiro: "))
a = (n1 * 2) + (n2 / 2)
print("O dobro do primeiro com a metade do segundo é igual: ", a)
elif fun == "b":
n1 = int(input("Informe um valor inteiro: "))
n3 = float(input("Informe um valor real: "))
b = (n1 * 3) + n3
print("A soma do triplo do primeiro com o terceiro: ", b)
elif fun == "c":
n3 = float(input("Informe um valor real: "))
c = n3 * n3 * n3
print("O terceiro elevado ao cubo: ", c)
| [
"alexbastos0901@gmail.com"
] | alexbastos0901@gmail.com |
ff539eb27fb795776cd04c93a55f929e87dec27c | 78cc5105c579c44e93baebf6ddc64a59f05d394e | /codes/FTSEanalysis.py | c3fdc682784f5cf30eb9677a0dfaeb7b1a986bf0 | [] | no_license | ankyriakide/FakeNewsIdentifier-ThesisRepository | 4a13e7e0680b52a3675be62f3cd4870f56d0a45a | debf0dd70fbbb37a14b65adfd1105f53c041a477 | refs/heads/master | 2020-07-24T11:26:19.800325 | 2019-09-13T21:18:10 | 2019-09-13T21:18:10 | 207,908,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 23:14:55 2019
@author: ankyriakide
"""
#FTSE analysis
import numpy as np
import pandas as pd
from pydoc import help
from scipy.stats.stats import pearsonr
import statsmodels.api as sm
from statsmodels.tsa.stattools import grangercausalitytests
### 2Label
#FTSE100 Closing Price VS Proportion
data1 = pd.read_csv('FTSEcloseprice_FN_2label.csv')
#Granger Causality Test https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.grangercausalitytests.html
granger_test_result_1 = grangercausalitytests(data1, maxlag=7, verbose=True)
#Pearson Correlation Coefficient https://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
X1 = data1.iloc[:,0].values
Y1 = data1.iloc[:,1].values
pearsonr(X1,Y1)
#FTSE Volume VS Proportion
data2 = pd.read_csv('FTSE%volume_FN_2label.csv')
#Granger Causality Test https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.grangercausalitytests.html
granger_test_result_2 = grangercausalitytests(data2, maxlag=7, verbose=True)
#Pearson Correlation Coefficient https://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
X2 = data1.iloc[:,0].values
Y2 = data1.iloc[:,1].values
pearsonr(X2,Y2)
### 3Label
#FTSE100 Closing Price VS Proportion
data11 = pd.read_csv('FTSEcloseprice_FN_3label.csv')
#Granger Causality Test https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.grangercausalitytests.html
granger_test_result_11 = grangercausalitytests(data11, maxlag=7, verbose=True)
#Pearson Correlation Coefficient https://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
X11 = data1.iloc[:,0].values
Y11 = data1.iloc[:,1].values
pearsonr(X11,Y11)
#FTSE Volume VS Proportion
data22 = pd.read_csv('FTSE%volume_FN_3label.csv')
#Granger Causality Test https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.grangercausalitytests.html
granger_test_result_22 = grangercausalitytests(data22, maxlag=7, verbose=True)
#Pearson Correlation Coefficient https://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
X22 = data1.iloc[:,0].values
Y22 = data1.iloc[:,1].values
pearsonr(X22,Y22)
| [
"noreply@github.com"
] | noreply@github.com |
67ded6b02fa0369ac11a141e0092a08af523d819 | 6849a4911767221b29fd4f455a1eb774c37e91a6 | /NFSIM-master/Revised/controller.py | c471c8adfdf2ed27b89e54a671cd2200dbe33928 | [] | no_license | bmwynne/university | bc81a7352a6650c45ccc031a22e2692db410b0de | 6e7e156fdf739fce4bef6fcfe080bd2fa4d1d62b | refs/heads/master | 2021-08-14T14:04:27.526910 | 2017-11-15T23:19:42 | 2017-11-15T23:19:42 | 110,896,360 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | #controller for creating the nodes using os.system
import os
import sys
import time
#from myhdl import *
#import pcap
import socket
import select
import Queue
import string
import subprocess
topoType = sys.argv[1] #don't read from sys.argv, use cmd line options
numNodes = sys.argv[2]
counter = 1
numNodes = int(numNodes)
rank = 0
nodes = 1
size = numNodes
#use python command line options instead of if statements if showing to people
#print sys.argv
if topoType == "ring": # test case for controller , supplying Ring topology type
while counter < numNodes:
# os.system("python node.py ./" + str(nodes) +" ./" + str(nodes + 1) + " " + str(head))
# head = 0
# nodes = nodes + 1
# counter = counter + 1
# if counter == numNodes:
# headNode = 1
# os.system("python node.py ./" + str(nodes) + " ./"+ str(headNode) + " " + str(head))
subprocess.Popen("python node.py ./" + str(nodes) +" ./" + str(nodes + 1) + " " + str(rank) + " " + str(size), shell=True)
rank = rank + 1
nodes = nodes + 1
counter = counter + 1
if counter == numNodes:
headNode = 1
os.system("python node.py ./" + str(nodes) + " ./"+ str(headNode) + " " + str(rank) + " " + str(size))
# counter = 1
# head = 1
# nodes = 1
#tree topo - simult or individually
| [
"bmwynne@Brandons-MacBook-Pro.local"
] | bmwynne@Brandons-MacBook-Pro.local |
76f6231eb4c58cf12ede8318e3fc5e3477d7a2df | d783eb1f2368cb09a3a57e2f6a6a2ffa12b953d0 | /backend/Database/Controllers/Fluxo.py | 27fadd74a91cdf3d027a238b6b3195e3b254c536 | [
"MIT"
] | permissive | AEDA-Solutions/matweb | 8c1c843e55d86b6ab61adc830d5a2c150f11fe9e | 577170f060fbf7d077efc4ae3ac73b98e46c49b8 | refs/heads/master | 2021-01-22T19:32:00.940622 | 2017-11-30T17:20:46 | 2017-11-30T17:20:46 | 85,203,392 | 1 | 3 | null | 2017-06-30T10:48:54 | 2017-03-16T14:12:05 | HTML | UTF-8 | Python | false | false | 1,140 | py | from Framework.BancoDeDados import BancoDeDados
from Database.Models.Fluxo import Fluxo as ModelFluxo
class Fluxo(object):
def pegarFluxo(self, condicao, valores):
fluxos = []
for fluxo in BancoDeDados().consultarMultiplos("SELECT * FROM fluxo %s" % (condicao), valores):
fluxos.append(ModelFluxo(fluxo))
return fluxos
def pegarFluxo(self, condicao, valores):
return ModelFluxo(BancoDeDados().consultarUnico("SELECT * FROM fluxo %s" % (condicao), valores))
def inserirFluxo(self, fluxo):
BancoDeDados().executar("INSERT INTO fluxo (periodo_inicio, periodo_fim, id_curso, id_opcao) VALUES (%s,%s,%s,%s) RETURNING id", (fluxo.periodo_inicio,fluxo.periodo_fim,fluxo.id_curso,fluxo.id_opcao))
fluxo.id = BancoDeDados().pegarUltimoIDInserido()
return fluxo
def removerFluxo(self, fluxo):
BancoDeDados().executar("DELETE FROM fluxo WHERE id = %s", (str(fluxo.id)))
def alterarFluxo(self, fluxo):
SQL = "UPDATE fluxo SET periodo_inicio = %s, periodo_fim = %s, id_curso = %s, id_opcao = %s WHERE id = %s"
BancoDeDados().executar(SQL, (fluxo.periodo_inicio,fluxo.periodo_fim,fluxo.id_curso,fluxo.id_opcao))
| [
"andrey@localhost.localdomain"
] | andrey@localhost.localdomain |
f6dc05455cd47ae55195c50ba74336f3c0fbbd8c | 9a5505ebc6a4a9f7d710e1ef8ce488b578b63c6e | /pycon/sponsorship/migrations/0008_remove_obsolete_benefit_records.py | e8122e5fb061226ee878cdeaa3743c783bc26e75 | [
"BSD-3-Clause"
] | permissive | arpitjainn189/pycon | 9dabbfd6119a1b2a957469d40e223d063bb91494 | 492c47820d6dc546e79c707180b3c7b3925e8e72 | refs/heads/master | 2022-12-23T15:53:53.365038 | 2020-10-01T09:57:08 | 2020-10-01T09:57:08 | 300,229,565 | 0 | 0 | BSD-3-Clause | 2020-10-01T09:54:30 | 2020-10-01T09:54:29 | null | UTF-8 | Python | false | false | 1,390 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
OBSOLETE_BENEFITS = [
{'name': 'Company URL',
'type': 'simple',
},
{'name': 'Company Description',
'type': 'text',
},
{'name': 'Web logo',
'type': 'weblogo',
}
]
def forward(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
BenefitLevel = apps.get_model('sponsorship', 'BenefitLevel')
SponsorBenefit = apps.get_model('sponsorship', 'SponsorBenefit')
db_alias = schema_editor.connection.alias
names = [b['name'] for b in OBSOLETE_BENEFITS]
# Clean up other records that use these first
BenefitLevel.objects.using(db_alias).filter(benefit__name__in=names).delete()
SponsorBenefit.objects.using(db_alias).filter(benefit__name__in=names).delete()
# Now we can remove the Benefit records themselves
Benefit.objects.using(db_alias).filter(name__in=names).delete()
def back(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
db_alias = schema_editor.connection.alias
for ben in OBSOLETE_BENEFITS:
Benefit.objects.using(db_alias).get_or_create(**ben)
class Migration(migrations.Migration):
dependencies = [
('sponsorship', '0007_auto_20150721_1533'),
]
operations = [
migrations.RunPython(forward, back),
]
| [
"dpoirier@caktusgroup.com"
] | dpoirier@caktusgroup.com |
0c44e067fdd0166bb18f52e65b3576b3eda557cb | 744963edc345c12696ab3048c008769c9955160c | /第十七章_GUI_Tkinter库/venv/Scripts/pip3-script.py | 3350fdf92359d4a1144cca54cb39ac50adeefbbc | [] | no_license | qixujun/Learn-Python-with-a-zero-foundation-primer | 958ab020131bfd44765b71917b6ad2efb2237609 | fcfd9dc69fbcc5f0456188cab35b05793dd2f98e | refs/heads/master | 2022-11-06T06:29:02.090266 | 2020-06-19T00:44:31 | 2020-06-19T00:46:05 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 449 | py | #!F:\python代码文件\小甲鱼入门学习\第十七章_GUI_Tkinter库\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"2685764101@qq.com"
] | 2685764101@qq.com |
0d568e564026f66f38a7a55aeaa4e39b6c3b6cff | 80ea4c1ce04ee8e0ecd85ee71f8bffdbcbd368aa | /iupick/settings/testing.py | 12021d836718a468cf6d733409edc755763667f5 | [
"MIT"
] | permissive | Oswaldinho24k/geo-csv | 659ad24f5e8bcecc869143a61e58b38260cc1901 | 0100435c5d5a5fd12133b376b305e8fa79ddb8f0 | refs/heads/master | 2020-03-15T21:20:34.095967 | 2018-05-06T15:45:34 | 2018-05-06T15:45:34 | 132,353,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # -*- coding: utf-8 -*-
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
AUTH_SECRET_PREFIX = 'sk_test_' | [
"oswalfut_96@hotmail.com"
] | oswalfut_96@hotmail.com |
7425d166b12e42e337dca761b662a1053349afe7 | 7dde8293d4ce030e4817783e3bcc144669e411a8 | /PS2/PS2.py | 754b1aaa22209e1ae58f176f451aed3e9918a913 | [] | no_license | mikeonator/MathModeling | 861143ad6c95879d38b01b7dbb4782dea9a5e096 | 0e51886f5926dca1a11a68499b78ea8e94c7fded | refs/heads/master | 2021-01-08T09:00:28.616275 | 2020-05-04T22:12:41 | 2020-05-04T22:12:41 | 241,979,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | # Michael Audi - Collegiate School Class of 2020
## Math Modeling Problem Set Two Answers
import matplotlib.pyplot as plt
def problemOne():
print("Problem One\n")
print("a) 12 to 13 days and 13,000 people")
print("b) Initial: 42,000 people and 16 days")
print("c) About 33 days to reach 25000 recovered and eventually everyone recovers")
print("d) Max Increase: ~5 days and Max Decrease: ~16 days")
print("e) Area under the curve from t = range(1,21) << (t=1-20)\n or also doable with (R@t=20 + I@t=20)\n")
def problemTwo():
print("\nProblem Two\n")
# \/ initial values \/
S = 44000
I = 500
R = 0
print("S=" + str(S))
print("I=" + str(I))
print("R=" + str(R))
transCoef = 0.00001
sPrime = ((-1*transCoef)*(S)*(I))
iPrime = ((transCoef)*(S)*(I) - (I/14))
rPrime = (I/14)
startTime = 0
endTime = 10
for a in range(startTime, (endTime + 1)):
newS = S
newI = I
newR = R
deltaS = newS+(sPrime*a)
deltaI = newI+(iPrime*a)
deltaR = newR+(rPrime*a)
print("time = " + str(a)) # Print current t value
print("\tS = " + str(deltaS) + "\n\tI = " + str(deltaI) + "\n\tR = " + str(deltaR)) # Print S, I, and R for value t=a
print(5*"_" + "\n") # Print a line
S = newS
I = newI
R = newR
def problemThree():
print("\nProblem Three\n")
def main():
print("Problem Set 2\n" + 10*"_" + "\n")
problemOne()
problemTwo()
problemThree()
if __name__ == '__main__':
main() | [
"maudinyc@gmail.com"
] | maudinyc@gmail.com |
aa759baf95d44890c8a1a6198aaec3e27386890a | 0202337b29f6467d56030dde647314b89bb33079 | /Video Downloader/video_downloader.py | d73d4ef24c99305d539ad6c5947894fa31557477 | [
"Apache-2.0"
] | permissive | Tess314/python-projects | 03bae98866bbbb04ad1c3691c819026a20caef82 | 01d2e85d717c8b31b78bc21219baf5c0cfd48e91 | refs/heads/main | 2023-05-03T01:51:23.106336 | 2021-05-09T16:33:34 | 2021-05-09T16:33:34 | 365,798,866 | 0 | 0 | Apache-2.0 | 2021-05-09T16:32:11 | 2021-05-09T16:32:10 | null | UTF-8 | Python | false | false | 805 | py | from tkinter import *
from pytube import YouTube
root = Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title("DataFlair-youtube video downloader")
Label(root,text = 'Youtube Video Downloader', font ='arial 20 bold').pack()
##enter link
link = StringVar()
Label(root, text = 'Paste Link Here:', font = 'arial 15 bold').place(x= 160 , y = 60)
link_enter = Entry(root, width = 70,textvariable = link).place(x = 32, y = 90)
#function to download video
def Downloader():
url =YouTube(str(link.get()))
video = url.streams.first()
video.download()
Label(root, text = 'DOWNLOADED', font = 'arial 15').place(x= 180 , y = 210)
Button(root,text = 'DOWNLOAD', font = 'arial 15 bold' ,bg = 'pale violet red', padx = 2, command = Downloader).place(x=180 ,y = 150)
root.mainloop() | [
"chandwadkar28@gmail.com"
] | chandwadkar28@gmail.com |
f30face88618b2e53b4b5aed2c70c8cffcfda98a | e2b2d81d1ea5beeb79d498dd4962fb5ed40e4678 | /settings.py | 322bb0ae98666366fece731a9d367f8abb04e868 | [] | no_license | palewire/dorling-cartogram-example | ddd70d3c310d323f3c896a473d032ccf67da182c | 4892546b9a97aa6de0c1f3f0fe6e130319ce8378 | refs/heads/master | 2021-01-19T20:16:26.152106 | 2011-10-04T04:26:30 | 2011-10-04T04:26:30 | 2,502,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,923 | py | # Django settings for project project.
import os
ROOT_PATH = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dorling', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': 'postgres', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = 'http://palewire.s3.amazonaws.com/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5um6b5gjouo_#2ymj1+_&y&pfm6aje8+mpg5%#=z&=1q31awgl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'us_states',
'dorling',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"ben.welsh@gmail.com"
] | ben.welsh@gmail.com |
ccd17e1ca3cb5d28d045a860d29d9f14a1ad9b88 | 27827a35fab762a5823c6d779dd7bc23cd9a4d1a | /python/3danimate.py | 7fbf5b91775ff3e919ad174db84c12225cd08193 | [] | no_license | LucaPaganin/SolarSystem | ea45061e512c1723d9e094bac3c875203a50fc41 | 65c1e5617e931ac2a5562bdc818aebb79b0a2d20 | refs/heads/master | 2023-04-01T09:24:00.782010 | 2023-03-17T21:54:33 | 2023-03-17T21:54:33 | 230,935,437 | 2 | 0 | null | 2020-01-30T10:34:56 | 2019-12-30T15:08:32 | Python | UTF-8 | Python | false | false | 2,897 | py | from matplotlib import pyplot as plt
import matplotlib
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import animation
from pathlib import Path
import re
import json
import functions as fnc
import argparse
#Parameters parsing
parser = argparse.ArgumentParser(description="A python script to animate the planets motion starting from a txt file containing the simulated data.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--filepath", required=False, default="../c++/output/temporal_evolution.txt", help="The name of the file containing time evolution data.")
parser.add_argument("-p", "--planets", nargs="+", default=None, help="The list of the planets to be plotted.")
args = parser.parse_args()
filepath = Path(args.filepath)
planets = args.planets
#Setting up paths and load data into dictionary of numpy arrays
solar_system = fnc.load_from_file(filepath=filepath)
if planets is None:
planets = list(solar_system.keys())
"""
Choose the planets to animate and put data into right form for animation
The data matrix for the given planet must have the following shape:
3 x Ntimesteps
"""
#Check if the chosen planets are in the list loaded from the file:
if not all([p in solar_system.keys() for p in planets]):
print("Some of the planets you have chosen are not in the full list of available planets. The available planets are:")
print(*list(solar_system.keys()), sep="\n")
quit()
planets_data = [solar_system[k] for k in planets]
fig = plt.figure()
ax = p3.Axes3D(fig)
colors = matplotlib.cm.rainbow(np.linspace(0, 1, len(planets)))
lines = []
for p,c in zip(planets, colors):
line, = ax.plot(solar_system[p][0, 0:1],
solar_system[p][1, 0:1],
solar_system[p][2, 0:1],
label=p, markersize=5,
marker='o',
color=c)
lines.append(line)
limits = fnc.get_plot_limits(planets_data)
# Setting the axes properties
ax.set_xlabel('X [A.U.]')
ax.set_ylabel('Y [A.U.]')
ax.set_zlabel('Z [A.U.]')
ax.set_xlim3d([limits[0,0], limits[0,1]])
ax.set_ylim3d([limits[1,0], limits[1,1]])
ax.set_zlim3d([limits[2,0], limits[2,1]])
ax.legend(loc="lower left")
ani = animation.FuncAnimation(fig,
fnc.update_solar_system,
planets_data[0].shape[1],
fargs=(planets_data, lines),
interval=fnc.get_animation_interval(planets_data[0], 2.5),
blit=False)
#ani.save('animation.gif')
if args.moviefilename is not None:
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=90.7, metadata=dict(artist='Me'), bitrate=3600)
ani.save(args.moviefilename, writer=writer)
plt.show()
| [
"luca.paganin@ge.infn.it"
] | luca.paganin@ge.infn.it |
c8ae0bea34c2fbf2d7548c7bab833e44ef96f134 | 544e70c552e29dd0e4944ca97fd326c2e8d9e575 | /python2-CLC2.py | dd7f154f38604574626e2de548e7e7f071e9d95a | [] | no_license | st8skillz/Python | 01336d7df5baf292cc3369ec63ee98c3b24edff1 | 5448893aa6a02363ad0c2e3bbd48798bb0f8c166 | refs/heads/master | 2022-06-12T08:05:23.188051 | 2020-05-04T22:50:48 | 2020-05-04T22:50:48 | 261,310,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,278 | py | #!/usr/bin/python3
"""
Name: Spencer Goldwin
Date: 04/22/2020
Course: CIT 29900
Description: Python homework 2 -- covering types, operators, loops, collections, and classes
Points: ?/44
Instructions: Complete each numbered task below. Use the space below each task to complete your work.
Most of the tasks will require multiple lines of code so adjust your spacing as needed.
Please be sure to leave at least 1 empty line above and below each numbered task.
This assignment is worth 44 points broken down as 2 points per task and 10 points simply
by submitting a completed script that runs without needing to be modified/corrected.
"""
#--------------------------------------------------------------------------------------------------------------
import nmap
import pprint
import nmap
import pprint
video_games = ("Fallout", "Skyrim", "Zelda", "Fortnite", "WoW", "Final Fantasy", "Halo", "Animal Crossing")
view_askew = {"Jay", "Silent Bob", "Randal", "Dante", "Holden", "Banky", "Schooner"}
villians = ["Darth Vader", "Hans Gruber", "Voldemort", "Sauron", "Thanos", "Rick Astley"]
best_sellers = {"Douglas Adams": "The Hitchhiker's Guide to the Galaxy",
"Robert Jordan": "The Wheel of Time",
"J. R. R. Toklien": "The Hobbit",
"Brent Weeks": "The Black Prism",
"Brandon Sanderson": "The Way Of Kings",
"Winnie the Pooh": "How To Get Away With Murder"}
nm = nmap.PortScanner()
pp = pprint.PrettyPrinter(indent=4)
#--------------------------------------------------------------------------------------------------------------
### 1. Create 2 variables called fnamdfe and lname, then assign them your first and last names (strings).
fname = "Spencer"
lname = "Goldwin"
### 2. Write a print statement that concatenates and prints your first and last names with a space between them.
print(fname,"",lname)
### 3. Create a function that accepts 2 arguments. Inside the function, use a print statement to print the
### sum of the 2 arguments.
def add(x = 0,y = 0):
result = x + y
return result
print(add())
### 4. Call the function above and pass it 2 numbers -- 1 whole number (integer) and 1 decimal number (float).
def add(x,y):
result = x + y
return result
print(add(5,2.5))
### 5. Create another function, but this time make it accept 1 argument. Have this function square the value
### that passed into it and then return it (not print it).
def square(x):
result = x * x
return result
### 6. Call the function above, pass it an integer or float, and then print its the returned value.
def square(x):
result = x * x
return result
print(square(6))
### 7. Create a new variable and cast its value, the integer 42, into a string. Print the variable contents.
num1 = '42'
print(num1)
### 8. Prove that the variable above was correctly cast to a string by using the type() function. Print this
### result to the screen in the same statement.
print('Type of num1 is :', type(num1))
### 9. Create a function that accepts 1 argument. Inside this function write an if statement that compares
### the length of the string "Python is a lot of fun" against the argument. If the argument is larger,
### return the boolean true. Otherwise, return false.
def compare (narg):
str_leng = len("python is a lot of fun")
if narg > str_leng:
return True
else:
return False
### 10. Call the function above and pass it an integer. Print the result.
def compare (narg):
str_leng = len("python is a lot of fun")
if ( narg > str_leng):
return True
else:
return False
print(compare(32))
### 11. Use a single print statement to print the data types of the 4 collection objects at the top of this
### script. Those objects are videos_games, view_askew, villians, and best_sellers.
print(type(video_games), type(view_askew), type(villians), type(best_sellers))
### 12. Create a while loop that prints each item inside the object video_games so long as the length of
### the object is less than or equal to a new counter that you initialize outside the loop (which you
### must decrement inside the loop).
### 13. Use one of the methods to remove "Rick Astley" from the villians object. Then use a different method
### to remove "Schooner" from the view_askew object. In separate print statements, print each of the
### objects to show that the items have been removed.
villians.remove("Rick Astley")
print(villians)
### 14. Use a for loop to loop through each item in the best_sellers object. Print both the key and
### the value for each item.
for x, y in best_sellers.items():
print(x, y)
### 15. Near the top of this script, nmap and pprint have been imported. Several lines below, a class from each
### module has been instantiated (PortScanner and PrettyPrinter) resulting in the nm and pp objects. These
### items can work together, but must be used in a specific order. First, use the nmap scan method to scan
### your localhost and port 22 by passing the arguments 'localhost' and '22' (both strings) into the scan
### method (i.e., call the nm object and use the scan method that's part of this object).
nm.scan('localhost','22')
### 16. As a point of reference, you're now going to print the results of the scan. The nmap scan results are
### nested Python objects. For this example, you're going to print the results of the single host and port
### scan you initiated above. To do that, you must print the nm object at the index of '127.0.0.1' which
### is the IP address for localhost (the loopback address). You'll do this using [] to reference the index.
print(nm(['127.0.0.1'])
### 17. Lastly, the printed scan results above come out as a heap of information that's not very attractive. Use
### the pprint method of the pp object to prettify the scan results. (Hint: replace your print method with
### the pp.pprint() method. This will give you a much cleaner view of the nest object types inside the nmap
### scan results so that you can figure out how to work with that data.
pp.pprint(nm(['127.0.0.1']) | [
"noreply@github.com"
] | noreply@github.com |
ed11b6da848a6c6df195ea9ce18fe93d0da14a14 | a6913660d858eeec8e3ab80045efec7de77b0302 | /testing/modelsTest/main/apps/dataApp/views.py | 712526602b2305b8b90afc3be62ca9b6fb9ff243 | [] | no_license | alexanderhunter80/recipe-project | 6a8d5f80a7fa8bf78edca9f70392ac77608fbd87 | 9e35b1163dddf63c97bcc8ee934b2658909b4eac | refs/heads/master | 2022-12-11T00:53:03.770940 | 2018-05-25T15:54:22 | 2018-05-25T15:54:22 | 134,305,991 | 0 | 0 | null | 2021-06-10T20:19:12 | 2018-05-21T18:01:41 | HTML | UTF-8 | Python | false | false | 797 | py | from django.shortcuts import render, HttpResponse
from .models import *
from django.contrib import messages
from django.contrib.auth.models import User
def index(request):
response = "testing"
print(Profile.objects.get(id=2).OAuth_info.first_name)
temp = Profile.objects.get(id=2).OAuth_info
temp.username = "GrizzleyBear"
temp.save()
print(temp.username)
return render(request, "dataApp/index.html")
def create(request):
response = Recipe.objects.recipe_validator(request.POST)
if response['status'] == False:
for error in response['errors']:
messages.error(request, error)
return redirect('/', messages)
else:
return redirect('/recipe')
def recipe(request):
return render(request, "dataApp/recipe.html") | [
"jghtch@gmail.com"
] | jghtch@gmail.com |
2bb29f3a8f5b1b7fbebbe190a039627e34f71d57 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_325/ch30_2019_08_26_19_33_38_456658.py | 856f9fb13cced922caa07303d99b2963d8c3cf61 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | import math
def jaca(v, o):
d = (((v**2.0)*math.sin(2.0*o))/9.8)
if d < 98.0:
return ("Muito perto")
elif d >= 98.0 and d <= 102.0:
return ("Acertou!")
else:
return ("Muito longe")
print(jaca(5.0, 45.0)) | [
"you@example.com"
] | you@example.com |
3f2965e0c1071535736a0f8cee0a336628ca67e0 | 1385cf00f550ad38378227f62c49bb0cd05e1b04 | /leecode/easy/207/1365.py | 18f68a18946e06d936942586dac1d72c48b110fa | [] | no_license | liucheng2912/py | 4a09652fa52a1f92e8d8dd1239f9c128248fc10e | d40f73450fa65b8dd4d59d8d92088382fc573d2a | refs/heads/master | 2023-03-15T21:07:03.456017 | 2021-03-11T09:15:30 | 2021-03-11T09:15:30 | 334,900,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | '''
思路:
双重遍历
'''
def f(nums):
a=[]
for x in nums:
temp=0
nums1=nums[:]
nums1.remove(x)
for y in nums1:
if x>y:
temp+=1
a.append(temp)
return a
nums = [6,5,4,8]
print(f(nums))
| [
"liucheng@fanruan.com"
] | liucheng@fanruan.com |
f66d1d8056bf5b8d52a3656800069e30e540d206 | 56a66c1e758e73c31e26e26c87ebe53bec68f8f3 | /graphBFS.py | f9be7330a0fc903e2358bd577b8c67683a29a13b | [] | no_license | rohanJa/LCM-LeetCodeMaychallenge- | 181ed33b3b02f49948234231c73291007470a19c | fd99026760cc9e0a8b696c1cd866792d8ded2186 | refs/heads/master | 2022-12-14T20:04:59.406723 | 2020-09-05T16:25:13 | 2020-09-05T16:25:13 | 260,500,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | graph = {
'A' : ['B','C'],
'B' : ['D', 'E'],
'C' : ['F'],
'D' : [],
'E' : ['F'],
'F' : []
}
visited = []
queue = []
queue.append('A')
visited.append('A')
while queue:
s=queue.pop(0)
for vertex in graph[s]:
if vertex not in visited:
queue.append(vertex)
visited.append(vertex)
print(visited) | [
"rohan.jain150300@gmail.com"
] | rohan.jain150300@gmail.com |
6dc0ac7d042b1950915b2898b7c5223a44ba9af5 | 86d884eb096ed599c6069e2844985aa6ec30cb6b | /finite_difference/diffusion_coefficient/analyse_AHL.py | 90363c09ea189ea4f7c21ba98bc5b006d7a2c5cf | [] | no_license | zcqsntr/synbiobrain | 46e770471dcfbc5082f271c4e1e5d8b694155780 | 66758554774c087b8c19c6d50fca5ea733b607f4 | refs/heads/master | 2022-11-10T16:28:45.888929 | 2022-10-11T09:07:53 | 2022-10-11T09:07:53 | 183,600,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py |
import sys
import matplotlib.backends.backend_pdf
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
sys.path.append('/home/neythen/Desktop/Projects/synbiobrain/')
from diffusion_sim import *
os.environ['PYOPENCL_CTX'] = '0'
nx = 300
ny = 300
node_radius = 20/40
node_dim = np.array([10, 10])
grid_corners = np.array([[-10, 10], [-10, 10]])
grid = SynBioBrainFD(grid_corners, nx, ny, 'float32')
vertex_positions = np.array([grid.get_node_position(i) for i in range(grid.n_nodes)])
barriers = ['1', '0.8', '0.6', '0.4', '0.2', '0.15', '0.1', '0.05', '0.01']
all_cohesive_ts = []
for barrier in barriers:
print(barrier)
activated_ts = np.load('/home/neythen/Desktop/Projects/synbiobrain/finite_difference/results/diffusion_factor/'+ barrier +'_barrier/output/GFP_ts.npy')
cohesive_ts = count_cohesive_nodes_FD(activated_ts, vertex_positions, node_dim, node_radius, grid_corners)
all_cohesive_ts.append(cohesive_ts)
all_cohesive_ts = np.array(all_cohesive_ts)
np.save('all_cohesive_ts.npy', all_cohesive_ts)
| [
"zcqsntr@ucl.ac.uk"
] | zcqsntr@ucl.ac.uk |
ca98618d119bd1de5693c8e54b3d7ffabdd9ab79 | d273eaa6846f6a99c7e18f101e9ca517ccab1bc3 | /test/test_ansiblest/domain/service/test/test_discovery.py | a6bc5f99ee0728f4aada74eae48b4a1f2b37f00a | [
"Apache-2.0"
] | permissive | xabierlaiseca/ansiblest | 2e55eba23e430907901d73902da80820c75ad526 | 399e571c4bf5617f41e7c741f7e90aa0fd678333 | refs/heads/master | 2021-01-09T20:45:27.316899 | 2016-07-17T17:25:47 | 2016-07-17T17:25:47 | 63,495,536 | 0 | 0 | null | 2016-07-17T17:25:47 | 2016-07-16T17:53:24 | Python | UTF-8 | Python | false | false | 11,588 | py | from ansiblest.domain.model.test import Test
from ansiblest.domain.service.test.discovery import find_tests
from mock import patch
from unittest import TestCase
class TestFindTestsFunction(TestCase):
TESTS_DIR = "tests"
@patch("ansiblest.domain.service.test.discovery.walk")
def test__extra_files__not_yaml_file(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [ "setup.yaml", "teardown.yml", "test-data.txt" ] ]
]
expected_tests = []
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__extra_files__no_test_prefix(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [ "setup.yaml", "teardown.yml", "other.yaml" ] ]
]
expected_tests = []
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__flat_tests_directory__only_tests(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "other-dir" ], [ "test-something-1.yaml", "test-something-2.yml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/test-something-1.yaml"),
Test(test_playbook=self.TESTS_DIR + "/test-something-2.yml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__flat_tests_directory__tests_and_inventory(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [ "test-something-1.yaml", "test-something-2.yml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/test-something-1.yaml", inventory=self.TESTS_DIR + "/inventory"),
Test(test_playbook=self.TESTS_DIR + "/test-something-2.yml", inventory=self.TESTS_DIR + "/inventory")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__flat_tests_directory__tests_and_setup(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "test-something-1.yaml", "test-something-2.yml", "setup.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/test-something-1.yaml", setup_playbook=self.TESTS_DIR + "/setup.yaml"),
Test(test_playbook=self.TESTS_DIR + "/test-something-2.yml", setup_playbook=self.TESTS_DIR + "/setup.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__flat_tests_directory__tests_and_teardown(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "test-something-1.yaml", "test-something-2.yml", "teardown.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/test-something-1.yaml",
teardown_playbook=self.TESTS_DIR + "/teardown.yaml"),
Test(test_playbook=self.TESTS_DIR + "/test-something-2.yml",
teardown_playbook=self.TESTS_DIR + "/teardown.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__flat_tests_directories__tests_in_subdirectories(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "dir-1", "dir-2" ], [ "test-data.txt" ] ],
[ self.TESTS_DIR + "/dir-1", [ "inventory" ], [ "test-something-1.yaml", "setup.yaml", "teardown.yaml" ] ],
[ self.TESTS_DIR + "/dir-2", [ "inventory" ], [ "test-something-2.yaml", "setup.yaml", "teardown.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
inventory=self.TESTS_DIR + "/dir-1/inventory",
setup_playbook=self.TESTS_DIR + "/dir-1/setup.yaml",
teardown_playbook=self.TESTS_DIR + "/dir-1/teardown.yaml"),
Test(test_playbook=self.TESTS_DIR + "/dir-2/test-something-2.yaml",
inventory=self.TESTS_DIR + "/dir-2/inventory",
setup_playbook=self.TESTS_DIR + "/dir-2/setup.yaml",
teardown_playbook=self.TESTS_DIR + "/dir-2/teardown.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__inventory_from_parent(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [] ],
[ self.TESTS_DIR + "/dir-1", [], [ "test-something-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
inventory=self.TESTS_DIR + "/inventory")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__setup_playbook_from_parent(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "setup.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "test-something-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
setup_playbook=self.TESTS_DIR + "/setup.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__teardown_playbook_from_parent(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "teardown.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "test-something-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
teardown_playbook=self.TESTS_DIR + "/teardown.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__preference_current_dir_inventory(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [] ],
[ self.TESTS_DIR + "/dir-1", [ "inventory" ], [ "test-something-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
inventory=self.TESTS_DIR + "/dir-1/inventory")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__preference_current_dir_setup_playbook(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "setup.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "test-something-1.yaml", "setup.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
setup_playbook=self.TESTS_DIR + "/dir-1/setup.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__hierarchy_tests_directories__preference_current_dir_teardown_playbook(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "teardown.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "test-something-1.yaml", "teardown.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-1/test-something-1.yaml",
teardown_playbook=self.TESTS_DIR + "/dir-1/teardown.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__multiple_hierarchies_tests_directories__inventory_from_sibling_discarded(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [ "inventory" ], [] ],
[ self.TESTS_DIR + "/dir-1", [ "inventory" ], [] ],
[ self.TESTS_DIR + "/dir-2", [], [ "test-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-2/test-1.yaml",
inventory=self.TESTS_DIR + "/inventory")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__multiple_hierarchies_tests_directories__setup_playbook_from_sibling_discarded(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "setup.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "setup.yaml" ] ],
[ self.TESTS_DIR + "/dir-2", [], [ "test-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-2/test-1.yaml",
setup_playbook=self.TESTS_DIR + "/setup.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
@patch("ansiblest.domain.service.test.discovery.walk")
def test__multiple_hierarchies_tests_directories__teardown_playbook_from_sibling_discarded(self, walk_mock):
walk_mock.return_value = [
[ self.TESTS_DIR, [], [ "teardown.yaml" ] ],
[ self.TESTS_DIR + "/dir-1", [], [ "teardown.yaml" ] ],
[ self.TESTS_DIR + "/dir-2", [], [ "test-1.yaml" ] ]
]
expected_tests = [
Test(test_playbook=self.TESTS_DIR + "/dir-2/test-1.yaml",
teardown_playbook=self.TESTS_DIR + "/teardown.yaml")
]
actual_tests = find_tests(self.TESTS_DIR)
self.assertEqual(expected_tests, actual_tests)
walk_mock.assert_called_once_with(self.TESTS_DIR)
| [
"xabier@laiseca.me"
] | xabier@laiseca.me |
08a9c4bc87a98509952767645a5c9c4caf333fc4 | 0874031cb652dfe841e06a00b95d740601c6e828 | /Jose Guadarrama ch3 practice test.py | 026f434ed0227854fb4dd5a9fa6b88620fdca361 | [] | no_license | SolidDarrama/Python | 589cba95f9508479522147373d8a92b78cdc97ec | 60e512eb9ad61de8baff59b506fdabe14d06bc46 | refs/heads/master | 2021-06-30T16:53:38.981561 | 2017-09-19T20:16:58 | 2017-09-19T20:16:58 | 104,123,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | #Jose Guadarrama
#9/15/2014
#salary_amount.py
hourly_rate=0
ot_rate=0
reg_hours_worked=0
ot_hours_worked=0
weekly_pay=0
monthly_pay=0
yearly_pay=0
fname=0
lname=0
totalhrs=0
#variables added #2
overtimehrs=0
#user name input
fname=str(input('Enter Your First Name:'))
lname=str(input('Enter Your Last Name:'))
#user hourly rate input
hourly_rate=float(input('Enter Your Hourly Rate:'))
#user work hours
reg_hours_worked=int(input('Enter Your Hours of Work:'))
#if else statement
if reg_hours_worked > 40:
overtimehrs=reg_hours_worked - 40
ot_rate=overtimehrs*(hourly_rate*1.5)
weekly_pay=ot_rate + (40 *hourly_rate)
else:
weekly_pay=reg_hours_worked*hourly_rate
totalhrs=reg_hours_worked+ot_hours_worked
#user pay input
monthly_pay=weekly_pay*4
yearly_pay=monthly_pay*12
#print statements
print('\nSalary Summary for',lname,', ',fname)
print('\nTotal Hours Worked:\t\tHourly Rate:\tOvertime Rate:\t\tWeekly Salary:\t\tMonthly Salary:\t\tYearly Salary:')
print(totalhrs,'\t\t\t\t\t\t$',(format(hourly_rate, ',.2f')),'\t\t\t$',(format(ot_rate, ',.2f')),'\t\t\t\t$',(format(weekly_pay, ',.2f')),'\t\t\t$',(format(monthly_pay, ',.2f')),'\t\t\t$',(format(yearly_pay, ',.2f')))
| [
"noreply@github.com"
] | noreply@github.com |
94927c22d3d83709f7071e9ff9c1bc737fbde555 | 7b59b1468725ef20017d447f7aeaeee81d9934c0 | /venv/Scripts/pip3.7-script.py | d5be34d787e0347d5544a0ae03c25834408818e4 | [] | no_license | isaaclee12/DataCleaningLesson | f1faf9c04a0d38d3c32b374bb0e2dafe0c159583 | 26f47d070610d78e9f8a5544623af6649593244b | refs/heads/master | 2022-08-22T23:13:26.719677 | 2020-05-20T22:57:52 | 2020-05-20T22:57:52 | 265,703,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!C:\Users\isaac\Documents\DataCleaningLesson\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"isaac.lee@uvm.edu"
] | isaac.lee@uvm.edu |
bd3c614d453ba44555d79e289a0d4d923e611a74 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901050013/d11/main.py | 62fffbca9611008650d6ac289947ccf42f4a8d45 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 718 | py | import yagmail
import requests
import getpass
from pyquery import PyQuery
from mymodule.stats_word import stats_text_cn
response = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA') # acquire the article link.
document = PyQuery(response.text)
content = document('#js_content').text() #acquire the article content.
result = str(stats_text_cn(content)) #convert the list type into string type.
sender = input('plese input your email address:')
password = getpass.getpass('please input your password:')
recipients = input('plese input the recipients:')
subject = input('please input the subject:')
yag = yagmail.SMTP(sender,password,'smtp.qq.com')
yag.send(to=recipients,subject=subject,contents=result) | [
"40155646+seven-tears@users.noreply.github.com"
] | 40155646+seven-tears@users.noreply.github.com |
2db42dee1688750e9f9b5361e4af2c9f36d228c3 | 5785d7ed431b024dd910b642f10a6781df50e4aa | /revise-daily/june_2021/walmart/10_triplet_sum_to_zero.py | e4e1d155945f4f6d79319f6ba48f01df9e967c5b | [] | no_license | kashyapa/interview-prep | 45d77324446da34d99bf8efedb3544b367b5523e | 7060c090c40602fb9c4778eace2078e1b51e235b | refs/heads/master | 2023-07-28T13:12:49.515299 | 2021-09-06T14:33:25 | 2021-09-06T14:33:25 | 403,706,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | def triplet_sum_to_zero(nums, target):
def find_target_pair_sum(t, left):
first = left - 1
right = len(nums)-1
while left < right:
if nums[left] + nums[right] == t:
res.append((nums[first], nums[left], nums[right]))
left += 1
right -= 1
while left < right and nums[left] == nums[left-1]:
left += 1
while left < right and nums[right] == nums[right+1]:
right -= 1
if nums[left] + nums[right] > t:
right -= 1
else:
left+=1
nums.sort()
res = []
for i in range(len(nums)-1):
if i == 0 or nums[i] != nums[i-1]:
find_target_pair_sum(target-nums[i], i+1)
| [
"schandra2@godaddy.com"
] | schandra2@godaddy.com |
537ff5660a06711e1738ebf1b6cfdb1f3c9ea47d | 87bf8ea26f6c28bce82ccdd9515c68d6341bd8c5 | /trading/celery.py | 6b455e28d01b1dde8036483d661a75eddc8dd195 | [] | no_license | aisamuel/real-time-forex-api | e9ac21f28f77aadae526df9a275487737d8d1155 | 08b1d0d129659a3b8735b21d7195cb756fdd6b47 | refs/heads/master | 2022-04-06T08:13:51.749351 | 2020-03-04T12:12:55 | 2020-03-04T12:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trading.settings')
app = Celery('trading')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| [
"charliescene512@gmail.com"
] | charliescene512@gmail.com |
3020a1bd60c26eed6fd24abd50180f5b836aea2d | f78efc2e07269595f6f20ec1a312a4c1f2983190 | /check.py | c4a170e6cad28ef9bf0227d31c95e4a1efe43761 | [] | no_license | catthanh/StudyJam2021 | 8fafafd79919f13d3871c218b62e1753f756f1be | 408ebe4d5122688afc0564e3d5142178307f8e7f | refs/heads/main | 2023-07-17T15:57:55.165624 | 2021-09-01T14:58:17 | 2021-09-01T14:58:17 | 402,075,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py | import urllib.request
import urllib.parse
import urllib.error
from bs4 import BeautifulSoup
import ssl
import csv
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
inputFile = "in.csv"
outputFile = "out.csv"
invalidFile = "invalid.csv"
acceptedCourses = ["Serverless Firebase Development",
"Deploy to Kubernetes in Google Cloud",
"Implement DevOps in Google Cloud",
"Serverless Cloud Run Development",
"Monitor and Log with Google Cloud Operations Suite",
"Perform Foundational Infrastructure Tasks in Google Cloud",
"Create and Manage Cloud Resources",
"Secure Workloads in Google Kubernetes Engine",
"Set Up and Configure a Cloud Environment in Google Cloud"]
fields = []
rows = []
# import data
with open(inputFile, encoding='utf-8') as inp:
csvreader = csv.reader(inp)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
print("Total no. of rows: %d" % (len(rows)-1))
newRows = []
newFields = ['name', 'email']
invalidRows = []
for row in rows:
url = row[2]
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
acceptedCoursesCount = 0
# Retrieve all of the badges
badgesList = soup.find_all("div", "profile-badge")
for course in badgesList:
a = course.find("span", "ql-subhead-1 l-mts")
courseName = a.get_text().strip()
if courseName in acceptedCourses:
# print(courseName)
acceptedCoursesCount += 1
print('%s have completed %d course%s' % (
row[1], acceptedCoursesCount, 's' if acceptedCoursesCount > 1 else ''))
newRow = []
if acceptedCoursesCount >= 6:
newRow.append(row[1])
newRow.append(row[0])
newRows.append(newRow)
else:
invalidRows.append(row)
# export accepted data
with open(outputFile, 'w', newline='', encoding='utf-8') as out:
csvwriter = csv.writer(out)
csvwriter.writerow(newFields)
csvwriter.writerows(newRows)
# export invalid data
with open(invalidFile, 'w', newline='', encoding='utf-8') as out2:
csvwriter = csv.writer(out2)
csvwriter.writerow(fields)
csvwriter.writerows(invalidRows)
print("Total no. of accepted rows: %d/%d" % (len(newRows), len(rows)))
| [
"catthanh26@gmail.com"
] | catthanh26@gmail.com |
bcc06d990379d76f8eef19db4573367d4912761c | 4331ccb3e584eb1d9312277333039adc7e850d10 | /duqu/readexcel.py | 6ba2b8be374c4313729de41547ca0d84ce88af12 | [] | no_license | setupsizhe/autotest | 9274c2826c022e28baecc0b31442dbdae04d420b | 083c469ec8968e972d70de3ec3ea12c2ca83eb34 | refs/heads/master | 2023-02-05T03:17:19.168914 | 2020-12-22T07:57:00 | 2020-12-22T07:57:00 | 319,576,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | # coding:utf-8
import os
import xlrd
import sys
class ExcelUtil():
def __init__(self, excelPath='test.xlsx', sheetName="Sheet1"):
self.data = xlrd.open_workbook(excelPath)
self.table = self.data.sheet_by_name(sheetName)
# 获取第一行作为key值
self.keys = self.table.row_values(0)
# 获取总行数
self.rowNum = self.table.nrows
# 获取总列数
self.colNum = self.table.ncols
def dict_data(self):
if self.rowNum <= 1:
print("总行数小于1")
else:
r = []
j = 1
for i in list(range(self.rowNum-1)):
s = {}
# 从第二行取对应values值
s['rowNum'] = i+2
values = self.table.row_values(j)
for x in list(range(self.colNum)):
s[self.keys[x]] = values[x]
r.append(s)
j += 1
return r
if __name__ == "__main__":
data = ExcelUtil(excelPath='test.xlsx', sheetName="Sheet1")
print(data.dict_data())
| [
"sizhe.liu@mushiny.com"
] | sizhe.liu@mushiny.com |
5e5c4e2de7fe0b92d59fe49e8ed41262a8b5854a | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/22Jul/down/emb/DoubleMuParked/StoreResults-Run2012B_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374500587/HTT_22Jul_manzoni_Down_Jobs/Job_104/run_cfg.py | af465597ac04797342d557667927aeff1a2019b3 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,041 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/22Jul/down/emb/DoubleMuParked/StoreResults-Run2012B_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374500587/HTT_22Jul_manzoni_Down_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012B_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_378.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012B_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_379.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012B_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_38.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
017bf3eb32db4c72181608ca25cb4c17651a56bd | 569438e55096aef55bdc23e13192ff769e011467 | /mysql-utilities-1.6.0/mysql/fabric/services/sharding.py | 9cc3b05dcec26cc7ba00af378bf690b1b70bb7b5 | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"bzip2-1.0.6",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft",
"Sleepycat",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-lat... | permissive | scavarda/mysql-dbcompare | b5ef50ae852d0f9b82e3b81e7a200fe12a7d251e | 1e912fd87282be3b3bed48487e6beb0ecb1de339 | refs/heads/master | 2021-05-01T22:24:03.738213 | 2016-12-23T14:22:44 | 2016-12-23T14:22:44 | 77,213,895 | 2 | 1 | Apache-2.0 | 2020-07-23T23:07:18 | 2016-12-23T09:17:46 | Python | UTF-8 | Python | false | false | 34,942 | py | #
# Copyright (c) 2013,2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""This module provides the necessary interfaces for working with the shards
in FABRIC.
"""
import logging
from mysql.fabric import (
errors as _errors,
events as _events,
group_replication as _group_replication,
utils as _utils,
)
from mysql.fabric.server import (
Group,
)
from mysql.fabric.sharding import (
ShardMapping,
RangeShardingSpecification,
HashShardingSpecification,
Shards,
SHARDING_DATATYPE_HANDLER,
SHARDING_SPECIFICATION_HANDLER,
)
from mysql.fabric.command import (
ProcedureShard,
Command,
ResultSet,
CommandResult,
)
from mysql.fabric.services.server import (
ServerLookups,
)
_LOGGER = logging.getLogger(__name__)
#Error messages
INVALID_SHARDING_TYPE = "Invalid Sharding Type %s"
TABLE_NAME_NOT_FOUND = "Table name %s not found"
CANNOT_REMOVE_SHARD_MAPPING = "Cannot remove mapping, while, " \
"shards still exist"
INVALID_SHARD_STATE = "Invalid Shard State %s"
INVALID_SHARDING_RANGE = "Invalid sharding range"
SHARD_MAPPING_NOT_FOUND = "Shard Mapping with shard_mapping_id %s not found"
SHARD_MAPPING_DEFN_NOT_FOUND = "Shard Mapping Definition with "\
"shard_mapping_id %s not found"
SHARD_NOT_DISABLED = "Shard not disabled"
SHARD_NOT_ENABLED = "Shard not enabled"
INVALID_SHARDING_KEY = "Invalid Key %s"
SHARD_NOT_FOUND = "Shard %s not found"
SHARD_LOCATION_NOT_FOUND = "Shard location not found"
INVALID_SHARDING_HINT = "Unknown lookup hint"
SHARD_GROUP_NOT_FOUND = "Shard group %s not found"
SHARD_GROUP_MASTER_NOT_FOUND = "Shard group master not found"
SHARD_MOVE_DESTINATION_NOT_EMPTY = "Shard move destination %s already "\
"hosts a shard"
INVALID_SHARD_SPLIT_VALUE = "The chosen split value must be between the " \
"lower bound and upper bound of the shard"
INVALID_LOWER_BOUND_VALUE = "Invalid lower_bound value for RANGE sharding " \
"specification %s"
SHARDS_ALREADY_EXIST = "Shards are already present in the definition, "\
"use split_shard to create further shards."
LOWER_BOUND_GROUP_ID_COUNT_MISMATCH = "Lower Bound, Group ID pair mismatch "\
"format should be group-id/lower_bound, "\
"group-id/lower_bound...."
LOWER_BOUND_AUTO_GENERATED = "Lower Bounds are auto-generated in hash "\
"based sharding"
SPLIT_VALUE_NOT_DEFINED = "Splitting a RANGE shard definition requires a split"\
" value to be defined"
INVALID_SPLIT_VALUE = "Invalid value given for shard splitting"
NO_LOWER_BOUND_FOR_HASH_SHARDING = "Lower bound should not be specified "\
"for hash based sharding"
MYSQLDUMP_NOT_FOUND = "Unable to find MySQLDump in location %s"
MYSQLCLIENT_NOT_FOUND = "Unable to find MySQL Client in location %s"
DEFINE_SHARD_MAPPING = _events.Event("DEFINE_SHARD_MAPPING")
class DefineShardMapping(ProcedureShard):
"""Define a shard mapping.
"""
group_name = "sharding"
command_name = "create_definition"
def execute(self, type_name, group_id, synchronous=True):
"""Define a shard mapping.
:param type_name: The type of sharding scheme - RANGE, HASH, LIST etc
:param group_id: Every shard mapping is associated with a global group
that stores the global updates and the schema changes
for this shard mapping and dissipates these to the
shards.
"""
procedures = _events.trigger(
DEFINE_SHARD_MAPPING, self.get_lockable_objects(),
type_name, group_id
)
return self.wait_for_procedures(procedures, synchronous)
ADD_SHARD_MAPPING = _events.Event("ADD_SHARD_MAPPING")
class AddShardMapping(ProcedureShard):
"""Add a table to a shard mapping.
"""
group_name = "sharding"
command_name = "add_table"
def execute(self, shard_mapping_id, table_name, column_name,
synchronous=True):
"""Add a table to a shard mapping.
:param shard_mapping_id: The shard mapping id to which the input
table is attached.
:param table_name: The table being sharded.
:param column_name: The column whose value is used in the sharding
scheme being applied
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
ADD_SHARD_MAPPING, self.get_lockable_objects(),
shard_mapping_id, table_name, column_name
)
return self.wait_for_procedures(procedures, synchronous)
REMOVE_SHARD_MAPPING = _events.Event("REMOVE_SHARD_MAPPING")
class RemoveShardMapping(ProcedureShard):
"""Remove the shard mapping represented by the Shard Mapping object.
"""
group_name = "sharding"
command_name = "remove_table"
def execute(self, table_name, synchronous=True):
"""Remove the shard mapping corresponding to the table passed as input.
This method is exposed through the XML-RPC framework and creates a job
and enqueues it in the executor.
:param table_name: The name of the table whose sharding specification is
being removed.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
REMOVE_SHARD_MAPPING, self.get_lockable_objects(), table_name
)
return self.wait_for_procedures(procedures, synchronous)
REMOVE_SHARD_MAPPING_DEFN = _events.Event("REMOVE_SHARD_MAPPING_DEFN")
class RemoveShardMappingDefn(ProcedureShard):
"""Remove the shard mapping definition represented by the Shard Mapping
ID.
"""
group_name = "sharding"
command_name = "remove_definition"
def execute(self, shard_mapping_id, synchronous=True):
"""Remove the shard mapping definition represented by the Shard Mapping
ID. This method is exposed through the XML-RPC framework and creates a
job and enqueues it in the executor.
:param shard_mapping_id: The shard mapping ID of the shard mapping
definition that needs to be removed.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
REMOVE_SHARD_MAPPING_DEFN,
self.get_lockable_objects(),
shard_mapping_id
)
return self.wait_for_procedures(procedures, synchronous)
class LookupShardMapping(Command):
"""Fetch the shard specification mapping for the given table
"""
group_name = "sharding"
command_name = "lookup_table"
def execute(self, table_name):
"""Fetch the shard specification mapping for the given table
:param table_name: The name of the table for which the sharding
specification is being queried.
:return: The a dictionary that contains the shard mapping information
for the given table.
"""
return Command.generate_output_pattern(_lookup_shard_mapping,
table_name)
class ListShardMappings(Command):
"""Returns all the shard mappings of a particular
sharding_type.
"""
group_name = "sharding"
command_name = "list_tables"
def execute(self, sharding_type):
"""The method returns all the shard mappings (names) of a
particular sharding_type. For example if the method is called
with 'range' it returns all the sharding specifications that exist
of type range.
:param sharding_type: The sharding type for which the sharding
specification needs to be returned.
:return: A list of dictionaries of shard mappings that are of
the sharding type
An empty list of the sharding type is valid but no
shard mapping definition is found
An error if the sharding type is invalid.
"""
return Command.generate_output_pattern(_list, sharding_type)
class ListShardMappingDefinitions(Command):
"""Lists all the shard mapping definitions.
"""
group_name = "sharding"
command_name = "list_definitions"
def execute(self):
"""The method returns all the shard mapping definitions.
:return: A list of shard mapping definitions
An Empty List if no shard mapping definition is found.
"""
rset = ResultSet(
names=('mapping_id', 'type_name', 'global_group_id'),
types=(int, str, str),
)
for row in ShardMapping.list_shard_mapping_defn():
rset.append_row(row)
return CommandResult(None, results=rset)
ADD_SHARD = _events.Event("ADD_SHARD")
class AddShard(ProcedureShard):
"""Add a shard.
"""
group_name = "sharding"
command_name = "add_shard"
def execute(self, shard_mapping_id, groupid_lb_list, state="DISABLED",
synchronous=True):
"""Add the RANGE shard specification. This represents a single instance
of a shard specification that maps a key RANGE to a server.
:param shard_mapping_id: The unique identification for a shard mapping.
:param state: Indicates whether a given shard is ENABLED or DISABLED
:param groupid_lb_list: The list of group_id, lower_bounds pairs in
the format, group_id/lower_bound,
group_id/lower_bound...
:param synchronous: Whether one should wait until the execution finishes
or not.
:return: A dictionary representing the current Range specification.
"""
procedures = _events.trigger(ADD_SHARD, self.get_lockable_objects(),
shard_mapping_id, groupid_lb_list, state
)
return self.wait_for_procedures(procedures, synchronous)
REMOVE_SHARD = \
_events.Event("REMOVE_SHARD")
class RemoveShard(ProcedureShard):
"""Remove a Shard.
"""
group_name = "sharding"
command_name = "remove_shard"
def execute(self, shard_id, synchronous=True):
"""Remove the RANGE specification mapping represented by the current
RANGE shard specification object.
:param shard_id: The shard ID of the shard that needs to be removed.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
REMOVE_SHARD, self.get_lockable_objects(), shard_id
)
return self.wait_for_procedures(procedures, synchronous)
SHARD_ENABLE = \
_events.Event("SHARD_ENABLE")
class EnableShard(ProcedureShard):
"""Enable a shard.
"""
group_name = "sharding"
command_name = "enable_shard"
def execute(self, shard_id, synchronous=True):
"""Enable the Shard represented by the shard_id.
:param shard_id: The shard ID of the shard that needs to be removed.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
SHARD_ENABLE, self.get_lockable_objects(), shard_id
)
return self.wait_for_procedures(procedures, synchronous)
SHARD_DISABLE = \
_events.Event("SHARD_DISABLE")
class DisableShard(ProcedureShard):
"""Disable a shard.
"""
group_name = "sharding"
command_name = "disable_shard"
def execute(self, shard_id, synchronous=True):
"""Disable the Shard represented by the shard_id.
:param shard_id: The shard ID of the shard that needs to be removed.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
procedures = _events.trigger(
SHARD_DISABLE, self.get_lockable_objects(), shard_id
)
return self.wait_for_procedures(procedures, synchronous)
class LookupShardServers(Command):
"""Lookup a shard based on the give sharding key.
"""
group_name = "sharding"
command_name = "lookup_servers"
def execute(self, table_name, key, hint="LOCAL"):
"""Given a table name and a key return the server where the shard of
this table can be found.
:param table_name: The table whose sharding specification needs to be
looked up.
:param key: The key value that needs to be looked up
:param hint: A hint indicates if the query is LOCAL or GLOBAL
:return: The Group UUID that contains the range in which the key
belongs.
"""
return _lookup(table_name, key, hint)
class DumpShardTables(Command):
"""Return information about all tables belonging to mappings
matching any of the provided patterns. If no patterns are provided,
dump information about all tables.
"""
group_name = "dump"
command_name = "shard_tables"
def execute(self, connector_version=None, patterns=""):
"""Return information about all tables belonging to mappings
matching any of the provided patterns.
:param connector_version: The connectors version of the data.
:param patterns: shard mapping pattern.
"""
rset = ResultSet(
names=('schema_name', 'table_name', 'column_name', 'mapping_id'),
types=(str, str, str, int),
)
for row in ShardMapping.dump_shard_tables(connector_version, patterns):
rset.append_row(row)
return CommandResult(None, results=rset)
class DumpShardingInformation(Command):
"""Return all the sharding information about the tables passed as patterns.
If no patterns are provided, dump sharding information about all tables.
"""
group_name = "dump"
command_name = "sharding_information"
def execute(self, connector_version=None, patterns=""):
"""Return all the sharding information about the tables passed as
patterns. If no patterns are provided, dump sharding information
about all tables.
:param connector_version: The connectors version of the data.
:param patterns: shard table pattern.
"""
rset = ResultSet(
names=('schema_name', 'table_name', 'column_name', 'lower_bound',
'shard_id', 'type_name', 'group_id', 'global_group'),
types=(str, str, str, str, int, str, str, str),
)
for row in ShardMapping.dump_sharding_info(connector_version, patterns):
rset.append_row(row)
return CommandResult(None, results=rset)
class DumpShardMappings(Command):
"""Return information about all shard mappings matching any of the
provided patterns. If no patterns are provided, dump information about
all shard mappings.
"""
group_name = "dump"
command_name = "shard_maps"
def execute(self, connector_version=None, patterns=""):
"""Return information about all shard mappings matching any of the
provided patterns.
:param connector_version: The connectors version of the data.
:param patterns: shard mapping pattern.
"""
rset = ResultSet(
names=('mapping_id', 'type_name', 'global_group_id'),
types=(int, str, str),
)
for row in ShardMapping.dump_shard_maps(connector_version, patterns):
rset.append_row(row)
return CommandResult(None, results=rset)
class DumpShardIndex(Command):
"""Return information about the index for all mappings matching
any of the patterns provided. If no pattern is provided, dump the
entire index. The lower_bound that is returned is a string that is
a md-5 hash of the group-id in which the data is stored.
"""
group_name = "dump"
command_name = "shard_index"
def execute(self, connector_version=None, patterns=""):
"""Return information about the index for all mappings matching
any of the patterns provided.
:param connector_version: The connectors version of the data.
:param patterns: group pattern.
"""
rset = ResultSet(
names=('lower_bound', 'mapping_id', 'shard_id', 'group_id'),
types=(str, int, int, str),
)
for row in Shards.dump_shard_indexes(connector_version, patterns):
rset.append_row(row)
return CommandResult(None, results=rset)
@_events.on_event(DEFINE_SHARD_MAPPING)
def _define_shard_mapping(type_name, global_group_id):
"""Define a shard mapping.
:param type_name: The type of sharding scheme - RANGE, HASH, LIST etc
:param global_group: Every shard mapping is associated with a
Global Group that stores the global updates
and the schema changes for this shard mapping
and dissipates these to the shards.
:return: The shard_mapping_id generated for the shard mapping.
:raises: ShardingError if the sharding type is invalid.
"""
type_name = type_name.upper()
if type_name not in Shards.VALID_SHARDING_TYPES:
raise _errors.ShardingError(INVALID_SHARDING_TYPE % (type_name, ))
shard_mapping_id = ShardMapping.define(type_name, global_group_id)
return shard_mapping_id
@_events.on_event(ADD_SHARD_MAPPING)
def _add_shard_mapping(shard_mapping_id, table_name, column_name):
"""Add a table to a shard mapping.
:param shard_mapping_id: The shard mapping id to which the input
table is attached.
:param table_name: The table being sharded.
:param column_name: The column whose value is used in the sharding
scheme being applied
:return: True if the the table was successfully added.
False otherwise.
"""
ShardMapping.add(shard_mapping_id, table_name, column_name)
@_events.on_event(REMOVE_SHARD_MAPPING)
def _remove_shard_mapping(table_name):
"""Remove the shard mapping for the given table.
:param table_name: The name of the table for which the shard mapping
needs to be removed.
:return: True if the remove succeeded
False if the query failed
:raises: ShardingError if the table name is not found.
"""
shard_mapping = ShardMapping.fetch(table_name)
if shard_mapping is None:
raise _errors.ShardingError(TABLE_NAME_NOT_FOUND % (table_name, ))
shard_mapping.remove()
@_events.on_event(REMOVE_SHARD_MAPPING_DEFN)
def _remove_shard_mapping_defn(shard_mapping_id):
"""Remove the shard mapping definition of the given table.
:param shard_mapping_id: The shard mapping ID of the shard mapping
definition that needs to be removed.
"""
ShardMapping.remove_sharding_definition(shard_mapping_id)
def _lookup_shard_mapping(table_name):
"""Fetch the shard specification mapping for the given table
:param table_name: The name of the table for which the sharding
specification is being queried.
:return: A dictionary that contains the shard mapping information for
the given table.
"""
shard_mapping = ShardMapping.fetch(table_name)
if shard_mapping is not None:
return [{"mapping_id":shard_mapping.shard_mapping_id,
"table_name":shard_mapping.table_name,
"column_name":shard_mapping.column_name,
"type_name":shard_mapping.type_name,
"global_group":shard_mapping.global_group}]
else:
#We return an empty shard mapping because if an Error is thrown
#it would cause the executor to rollback which is an unnecessary
#action. It is enough if we inform the user that the lookup returned
#nothing.
return [{"mapping_id":"",
"table_name":"",
"column_name":"",
"type_name":"",
"global_group":""}]
def _list(sharding_type):
"""The method returns all the shard mappings (names) of a
particular sharding_type. For example if the method is called
with 'range' it returns all the sharding specifications that exist
of type range.
:param sharding_type: The sharding type for which the sharding
specification needs to be returned.
:return: A list of dictionaries of shard mappings that are of
the sharding type
An empty list of the sharding type is valid but no
shard mapping definition is found
An error if the sharding type is invalid.
:raises: Sharding Error if Sharding type is not found.
"""
if sharding_type not in Shards.VALID_SHARDING_TYPES:
raise _errors.ShardingError(INVALID_SHARDING_TYPE % (sharding_type,))
ret_shard_mappings = []
shard_mappings = ShardMapping.list(sharding_type)
for shard_mapping in shard_mappings:
ret_shard_mappings.append({
"mapping_id":shard_mapping.shard_mapping_id,
"table_name":shard_mapping.table_name,
"column_name":shard_mapping.column_name,
"type_name":shard_mapping.type_name,
"global_group":shard_mapping.global_group})
return ret_shard_mappings
@_events.on_event(ADD_SHARD)
def _add_shard(shard_mapping_id, groupid_lb_list, state):
"""Add the RANGE shard specification. This represents a single instance
of a shard specification that maps a key RANGE to a server.
:param shard_mapping_id: The unique identification for a shard mapping.
:param groupid_lb_list: The list of group_id, lower_bounds pairs in the
format, group_id/lower_bound, group_id/lower_bound... .
:param state: Indicates whether a given shard is ENABLED or DISABLED
:return: True if the add succeeded.
False otherwise.
:raises: ShardingError If the group on which the shard is being
created does not exist,
If the shard_mapping_id is not found,
If adding the shard definition fails,
If the state of the shard is an invalid
value,
If the range definition is invalid.
"""
shard_mapping = ShardMapping.fetch_shard_mapping_defn(shard_mapping_id)
if shard_mapping is None:
raise _errors.ShardingError(SHARD_MAPPING_NOT_FOUND % \
(shard_mapping_id, ))
schema_type = shard_mapping[1]
if len(RangeShardingSpecification.list(shard_mapping_id)) != 0:
raise _errors.ShardingError(SHARDS_ALREADY_EXIST)
group_id_list, lower_bound_list = \
_utils.get_group_lower_bound_list(groupid_lb_list)
if (len(group_id_list) != len(lower_bound_list)) and\
schema_type == "RANGE":
raise _errors.ShardingError(LOWER_BOUND_GROUP_ID_COUNT_MISMATCH)
if len(lower_bound_list) != 0 and schema_type == "HASH":
raise _errors.ShardingError(LOWER_BOUND_AUTO_GENERATED)
if schema_type in Shards.VALID_RANGE_SHARDING_TYPES:
for lower_bound in lower_bound_list:
if not SHARDING_DATATYPE_HANDLER[schema_type].\
is_valid_lower_bound(lower_bound):
raise _errors.ShardingError(
INVALID_LOWER_BOUND_VALUE % (lower_bound, ))
state = state.upper()
if state not in Shards.VALID_SHARD_STATES:
raise _errors.ShardingError(INVALID_SHARD_STATE % (state, ))
for index, group_id in enumerate(group_id_list):
shard = Shards.add(group_id, state)
shard_id = shard.shard_id
if schema_type == "HASH":
HashShardingSpecification.add(
shard_mapping_id,
shard_id
)
_LOGGER.debug(
"Added Shard (map id = %s, id = %s).",
shard_mapping_id,
shard_id
)
else:
range_sharding_specification = \
SHARDING_SPECIFICATION_HANDLER[schema_type].add(
shard_mapping_id,
lower_bound_list[index],
shard_id
)
_LOGGER.debug(
"Added Shard (map id = %s, lower bound = %s, id = %s).",
range_sharding_specification.shard_mapping_id,
range_sharding_specification.lower_bound,
range_sharding_specification.shard_id
)
#If the shard is added in a DISABLED state do not setup replication
#with the primary of the global group. Basically setup replication only
#if the shard is ENABLED.
if state == "ENABLED":
_setup_shard_group_replication(shard_id)
@_events.on_event(REMOVE_SHARD)
def _remove_shard(shard_id):
"""Remove the RANGE specification mapping represented by the current
RANGE shard specification object.
:param shard_id: The shard ID of the shard that needs to be removed.
:return: True if the remove succeeded
False if the query failed
:raises: ShardingError if the shard id is not found,
: ShardingError if the shard is not disabled.
"""
range_sharding_specification, shard, _, _ = \
verify_and_fetch_shard(shard_id)
if shard.state == "ENABLED":
raise _errors.ShardingError(SHARD_NOT_DISABLED)
#Stop the replication of the shard group with the global
#group. Also clear the references of the master and the
#slave group from the current group.
#NOTE: When we do the stopping of the shard group
#replication in shard remove we are actually just clearing
#the references, since a shard cannot be removed unless
#it is disabled and when it is disabled the replication is
#stopped but the references are not cleared.
_stop_shard_group_replication(shard_id, True)
range_sharding_specification.remove()
shard.remove()
_LOGGER.debug("Removed Shard (%s).", shard_id)
def _lookup(lookup_arg, key, hint):
"""Given a table name and a key return the servers of the Group where the
shard of this table can be found
:param lookup_arg: table name for "LOCAL" lookups
Shard Mapping ID for "GLOBAL" lookups.
:param key: The key value that needs to be looked up
:param hint: A hint indicates if the query is LOCAL or GLOBAL
:return: The servers of the Group that contains the range in which the
key belongs.
"""
VALID_HINTS = ('LOCAL', 'GLOBAL')
hint = hint.upper()
if hint not in VALID_HINTS:
raise _errors.ShardingError(INVALID_SHARDING_HINT)
group = None
#Perform the lookup for the group contaning the lookup data.
if hint == "GLOBAL":
#Fetch the shard mapping object. In the case of GLOBAL lookups
#the shard mapping ID is passed directly. In the case of "LOCAL"
#lookups it is the table name that is passed.
shard_mapping = ShardMapping.fetch_by_id(lookup_arg)
if shard_mapping is None:
raise _errors.ShardingError(
SHARD_MAPPING_NOT_FOUND % (lookup_arg, )
)
#GLOBAL lookups. There can be only one global group, hence using
#shard_mapping[0] is safe.
group_id = shard_mapping[0].global_group
else:
shard_mapping = ShardMapping.fetch(lookup_arg)
if shard_mapping is None:
raise _errors.ShardingError(TABLE_NAME_NOT_FOUND % (lookup_arg, ))
sharding_specification = \
SHARDING_SPECIFICATION_HANDLER[shard_mapping.type_name].\
lookup(key, shard_mapping.shard_mapping_id, shard_mapping.type_name)
if sharding_specification is None:
raise _errors.ShardingError(INVALID_SHARDING_KEY % (key, ))
shard = Shards.fetch(str(sharding_specification.shard_id))
if shard.state == "DISABLED":
raise _errors.ShardingError(SHARD_NOT_ENABLED)
#group cannot be None since there is a foreign key on the group_id.
#An exception will be thrown nevertheless.
group_id = shard.group_id
return ServerLookups().execute(group_id=group_id)
@_events.on_event(SHARD_ENABLE)
def _enable_shard(shard_id):
"""Enable the RANGE specification mapping represented by the current
RANGE shard specification object.
:param shard_id: The shard ID of the shard that needs to be removed.
:return: True Placeholder return value
:raises: ShardingError if the shard_id is not found.
"""
_, shard, _, _ = verify_and_fetch_shard(shard_id)
#When you enable a shard, setup replication with the global server
#of the shard mapping associated with this shard.
_setup_shard_group_replication(shard_id)
shard.enable()
@_events.on_event(SHARD_DISABLE)
def _disable_shard(shard_id):
"""Disable the RANGE specification mapping represented by the current
RANGE shard specification object.
:param shard_id: The shard ID of the shard that needs to be removed.
:return: True Placeholder return value
:raises: ShardingError if the shard_id is not found.
"""
_, shard, _, _ = verify_and_fetch_shard(shard_id)
#When you disable a shard, disable replication with the global server
#of the shard mapping associated with the shard.
_stop_shard_group_replication(shard_id, False)
shard.disable()
def verify_and_fetch_shard(shard_id):
"""Find out if the shard_id exists and return the sharding specification for
it. If it does not exist throw an exception.
:param shard_id: The ID for the shard whose specification needs to be
fetched.
:return: The sharding specification class representing the shard ID.
:raises: ShardingError if the shard ID is not found.
"""
#Here the underlying sharding specification might be a RANGE
#or a HASH. The type of sharding specification is obtained from the
#shard mapping.
range_sharding_spec = RangeShardingSpecification.fetch(shard_id)
if range_sharding_spec is None:
raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, ))
#Fetch the shard mappings and use them to find the type of sharding
#scheme.
shard_mappings = ShardMapping.fetch_by_id(
range_sharding_spec.shard_mapping_id
)
if shard_mappings is None:
raise _errors.ShardingError(
SHARD_MAPPING_NOT_FOUND % (
range_sharding_spec.shard_mapping_id,
)
)
#Fetch the shard mapping definition. There is only one shard mapping
#definition associated with all of the shard mappings.
shard_mapping_defn = ShardMapping.fetch_shard_mapping_defn(
range_sharding_spec.shard_mapping_id
)
if shard_mapping_defn is None:
raise _errors.ShardingError(
SHARD_MAPPING_DEFN_NOT_FOUND % (
range_sharding_spec.shard_mapping_id,
)
)
shard = Shards.fetch(shard_id)
if shard is None:
raise _errors.ShardingError(SHARD_NOT_FOUND % (shard_id, ))
#Both of the shard_mappings retrieved will be of the same sharding
#type. Hence it is safe to use one of them to retireve the sharding type.
if shard_mappings[0].type_name == "HASH":
return HashShardingSpecification.fetch(shard_id), \
shard, shard_mappings, shard_mapping_defn
else:
return range_sharding_spec, shard, shard_mappings, shard_mapping_defn
def _setup_shard_group_replication(shard_id):
"""Setup the replication between the master group and the
shard group. This is a utility method that given a shard id
will lookup the group associated with this shard, and setup
replication between the group and the global group.
:param shard_id: The ID of the shard, whose group needs to
be setup as a slave.
"""
#Fetch the Range sharding specification. When we start implementing
#heterogenous sharding schemes, we need to find out the type of
#sharding scheme and we should use that to find out the sharding
#implementation.
_, shard, _, shard_mapping_defn = \
verify_and_fetch_shard(shard_id)
#Setup replication between the shard group and the global group.
_group_replication.setup_group_replication \
(shard_mapping_defn[2], shard.group_id)
def _stop_shard_group_replication(shard_id, clear_ref):
"""Stop the replication between the master group and the shard group.
:param shard_id: The ID of the shard, whose group needs to
be atopped as a slave.
:param clear_ref: Indicates whether removing the shard should result
in the shard group losing all its slave group references.
"""
#Fetch the Range sharding specification. When we start implementing
#heterogenous sharding schemes, we need to find out the type of
#sharding scheme and we should use that to find out the sharding
#implementation.
_, shard, _, shard_mapping_defn = \
verify_and_fetch_shard(shard_id)
#Stop the replication between the shard group and the global group. Also
#based on the clear_ref flag decide if you want to clear the references
#associated with the group.
_group_replication.stop_group_slave(shard_mapping_defn[2], shard.group_id,
clear_ref)
| [
"marcos@planetsite.it"
] | marcos@planetsite.it |
6143aeec23420e381500225a7b77c7106f4afe75 | 3d852809edef71da5ce6cc3df63ae7e77c1c01f7 | /day24/file_io/kbo2021_r.py | ad38063a36414c1ec7cb18dac48c726e2ac64b40 | [] | no_license | yeomye/pyworks | f85f30e3592215efdbdd830c40a54586f636c8cd | b741068264f4a09dd2dcbde1c66617dcc1aa7f37 | refs/heads/master | 2023-08-03T03:18:37.789783 | 2021-09-24T04:19:50 | 2021-09-24T04:19:50 | 402,286,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # 리스트 읽기
try:
f = open("c:/pyfile/kbo2021.txt", "r")
data = f.read()
print(data)
f.close()
except FileNotFoundError:
print('파일을 찾을 수 없습니다.') | [
"yyz2999@naver.com"
] | yyz2999@naver.com |
71b257d53cfa0b1ff1ea40c6dbceb81a4faab0c1 | d9d7f841e1a7c53344000634320db78c5743eba5 | /lib/python/make-csv.py | 8d60b40f8633289728adc8229a567ec4aa777534 | [] | no_license | hellais/ooni-analyst | 5bb7030734319ad0bafec267ec30a7c8d0696b03 | 7e81b812581e36e26951bbfa48fea770ec09c061 | refs/heads/master | 2020-03-22T10:05:37.383835 | 2018-07-09T11:07:10 | 2018-07-09T11:07:10 | 139,880,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | import os
import sys
import argparse
from datetime import datetime
import pandas as pd
import psycopg2
def query(q, params, pg_dsn):
# XXX this is useful for debugging
"""
import yaml
from sshtunnel import SSHTunnelForwarder
with open('private/secrets.yml') as in_file:
secrets = yaml.load(in_file)
with SSHTunnelForwarder(
('hkgmetadb.infra.ooni.io', 22),
ssh_username='art',
ssh_private_key=secrets['ssh_private_key_path'],
remote_bind_address=('localhost', 5432)
) as server:
conn = psycopg2.connect(
host='localhost',
port=server.local_bind_port,
user='shovel',
password=secrets['shovel_password'],
dbname='metadb')
return pd.read_sql_query(q, conn, params=params)
"""
conn = psycopg2.connect(pg_dsn)
return pd.read_sql_query(q, conn, params=params)
def make_csv(output_path, urls, probe_cc, start_date, end_date, pg_dsn):
countries = [probe_cc]
params = [start_date, end_date, probe_cc]
for url in urls:
params.append(url)
base_query = """SELECT measurement.test_runtime,
input.input,
measurement.measurement_start_time,
report.probe_cc,
report.probe_asn,
report.probe_ip,
report.report_id,
http_verdict.http_experiment_failure,
http_verdict.blocking
FROM measurement
JOIN input ON input.input_no = measurement.input_no
JOIN report ON report.report_no = measurement.report_no
JOIN http_verdict ON http_verdict.msm_no = measurement.msm_no
"""
where_clause = "WHERE ({}) AND ({}) AND ({})".format(
" measurement.measurement_start_time BETWEEN %s AND %s",
" OR ".join(["report.probe_cc = %s" for _ in countries]),
" OR ".join(["input = %s" for _ in urls]),
)
q = base_query + where_clause
print(q)
print(params)
res = query(q, params, pg_dsn)
print(res)
res.to_csv(output_path)
def parse_args():
p = argparse.ArgumentParser(description='make-csv: creates a csv file for the specified inputs')
p.add_argument('--output', metavar='PATH', help='Where to write to', required=True)
p.add_argument('--country', metavar='PROBE_CC', help='Country code to target', required=True)
p.add_argument('--start-date', metavar='START_DATE', help='Start date interval', required=True)
p.add_argument('--end-date', metavar='END_DATE', help='End date interval', required=True)
p.add_argument('--urls', metavar='URL', nargs='*', help='URLs to test')
p.add_argument('--postgres', metavar='DSN', help='libpq data source name')
## XXX add urls
opt = p.parse_args()
return opt
def main():
opt = parse_args()
make_csv(output_path=opt.output,
urls=opt.urls,
probe_cc=opt.country,
start_date=opt.start_date,
end_date=opt.end_date,
pg_dsn=opt.postgres)
print(opt.output)
if __name__ == "__main__":
main()
| [
"arturo@filasto.net"
] | arturo@filasto.net |
a88fe823b14d9c288341aa84511f4fe893b1a1d5 | 2b6e2bc133a64be1a26c83b8097c983b5b794989 | /greatMmsWeb/greatMmsWeb/urls.py | cd4ab337d2923c7a7c783cc0fbce402a19967213 | [] | no_license | mms-data/mms_data_hunt | 377d500fd974e5a328bfdea5fad502fa97e02b6a | 6ec465f65a40004868337dc07246d21c30f499eb | refs/heads/master | 2022-12-04T08:10:10.454050 | 2020-08-27T21:57:44 | 2020-08-27T21:57:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """greatMmsWeb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('searchApp/', include('searchApp.urls')),
path('admin/', admin.site.urls),
]
| [
"bhaskarbishnoi@Bhaskars-MBP.fios-router.home"
] | bhaskarbishnoi@Bhaskars-MBP.fios-router.home |
40c0d6fcf7bb9304c361fdaaca659ff158f4207e | c5dd4cdbcc5c3edccacf02d1550013e530e70d2e | /Screenshots/ImageRenamer.py | f2aead5c653f1a0372e2dcb86c15b70f75969fe7 | [] | no_license | dsouzarc/bmfiOS | 4506fcbff1b601182f272e827296684c95a1c8c6 | 0f675df00b5afe85f4d6cf035aae0245145025cd | refs/heads/master | 2016-09-16T04:29:05.659131 | 2015-05-26T02:22:03 | 2015-05-26T02:22:03 | 31,240,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import os;
#Written by Ryan Dsouza
#Quickly renames all the photos in a directory with the format
#'Screenshot_X.png', where X is that file's place in the directory
#Also prints out the markdown code to display these many images
#Run instructions: 'python ImageRenamer.py'
files = os.listdir('.');
index = 0;
prefix = "https://github.com/dsouzarc/bmfiOS/blob/master/Screenshots/";
for fileName in files:
if fileName != "ImageRenamer.py":
newFileName = "Screenshot_" + str(index) + ".png";
os.rename(fileName, newFileName);
print("");
index += 1;
| [
"dsouzarc@gmail.com"
] | dsouzarc@gmail.com |
af6b3f137d875061e788546266ab073b1b555f47 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /examples/v1/synthetics/DeletePrivateLocation.py | e1445b48a8b1ceaab7b068d50ef37b6e45fd3c5f | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 386 | py | """
Delete a private location returns "OK" response
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
api_instance.delete_private_location(
location_id="location_id",
)
| [
"noreply@github.com"
] | noreply@github.com |
53411dfa34e5dcffe4e75154fc53a3b3114f157b | 11f4dd74872c73781a8975698e7cf1e3df2a40af | /Chapter 9 - Organizing Files/findLargeFile.py | dab52b3722813fce1035f9413997e88737ddd764 | [] | no_license | lonewolfcub/Automate-the-Boring-Stuff-with-Python | ca65e9fcbd61c94776ac1a0346b5372e975569db | da90ead498a0597ae5a4f88449a9774887c7d5e6 | refs/heads/master | 2021-01-18T17:03:17.600375 | 2017-03-31T05:58:56 | 2017-03-31T05:58:56 | 86,783,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | #! /usr/bin/env python3
import os
def findLargeFiles(folder):
# iterate over search folder
for dirpath, dirnames, filenames in os.walk(folder):
# check each file to see if over 100mb
for file in filenames:
filepath = os.path.join(dirpath, file)
filesize = os.path.getsize(filepath)
if filesize > 13107200:
print (filepath + ' ' + str(filesize) + ' bytes')
# define search folder
print('Please enter the folder you wish to search:')
folder = input()
findLargeFiles(folder)
| [
"lonewolfcub020@gmail.com"
] | lonewolfcub020@gmail.com |
7af77bd8017df4a03b730d463efae17dd0d9ffb0 | 447914f0f7a6e1b432f6811aacb0f274fbdbe3c5 | /Jerry_Padilla_Py2Assignments-master/assignments/practice_models/apps/ninjaDojo/migrations/0001_initial.py | af5ff3d138e5e38072d2a746df99076ec1ab3a08 | [] | no_license | jsterling23/Python_Prac | 965ab83e6f34191a1ebbc2e3605f71ace07a0b6d | dc41030be125337099ddbc8af8e2598b844e11a4 | refs/heads/master | 2020-03-18T10:49:23.521218 | 2018-05-23T23:19:22 | 2018-05-23T23:19:22 | 134,635,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 21:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dojo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Ninja',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('dojo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ninjas', to='ninjaDojo.Dojo')),
],
),
]
| [
"jerrypadilla23@gmail.com"
] | jerrypadilla23@gmail.com |
4aea674ba9ea55e99e9247ffb88c1838a645ac6e | 7fdd73e4e8526d636c69fa976fdce07d8aba203b | /PyBank/main.py | c29993d65c04c22392c43ec371411656bfd68fc8 | [] | no_license | hopandskip/python-challenge | bc1667cc3eca4ad83170715ab2fb47066392cf01 | 7eecd3acd950390369cde0c664228ed14b877eec | refs/heads/master | 2020-04-22T07:56:40.818467 | 2019-02-13T03:16:44 | 2019-02-13T03:16:44 | 170,228,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | #Establish dependencies
import os
import csv
#Import the file
pybank_path = os.path.join("Resources","budget_data.csv")
#create the variables
date = []
profit_loss = []
change_in_profit_loss = []
with open (pybank_path, newline ="") as pybank_data:
pybank_reader =csv.reader(pybank_data, delimiter=",")
next(pybank_reader) #skip the header row
for row in pybank_reader:
#Put the data into a list
date.append(row[0])
profit_loss.append(int(row[1]))
for i in range(1,len(profit_loss)):
#Find the change in profit/loss between consecutive months and store in a list
change_in_profit_loss.append(int(profit_loss[i]) - int(profit_loss[i-1]))
#Get the max profit
max_profit = max(change_in_profit_loss)
#Get the max loss
max_loss = min(change_in_profit_loss)
#Get the month with the max profit
max_profit_loss_date = str(date[change_in_profit_loss.index(max(change_in_profit_loss))+1])
#Get the month with the max loss
min_profit_loss_date = str(date[change_in_profit_loss.index(min(change_in_profit_loss))+1])
print ("Financial Analysis")
print ("----------------------------")
#Get the total number of months
print (f"Total Months: {len(date)}")
#Get the sum of the profit and losses
print (f"Total: ${sum(profit_loss)}")
#Get average of the changes in "Profit/Losses" over the entire period
average_change = sum(change_in_profit_loss) / (len(date) -1)
print (f"Average change: ${average_change:.2f}")
print (f"Greatest Increase in Profits: {max_profit_loss_date} ${max_profit}")
print (f"Greatest Decrease in Profits: {min_profit_loss_date} ${max_loss}")
print ("Financial Analysis", file=open("PyBank.txt", "a"))
print ("----------------------------", file=open("PyBank.txt", "a"))
#Get the total number of months
print (f"Total Months: {len(date)}", file=open("PyBank.txt", "a"))
#Get the sum of the profit and losses
print (f"Total: ${sum(profit_loss)}", file=open("PyBank.txt", "a"))
#Get average of the changes in "Profit/Losses" over the entire period
average_change = sum(change_in_profit_loss) / (len(date) -1)
print (f"Average change: ${average_change:.2f}", file=open("PyBank.txt", "a"))
print (f"Greatest Increase in Profits: {max_profit_loss_date} ${max_profit}", file=open("PyBank.txt", "a"))
print (f"Greatest Decrease in Profits: {min_profit_loss_date} ${max_loss}", file=open("PyBank.txt", "a"))
| [
"18711721+hopandskip@users.noreply.github.com"
] | 18711721+hopandskip@users.noreply.github.com |
2b720f042ff4ef1edfcb81e6b3105b44a434b8cf | f172de95e4aa77eacebf21639450dc4ac153cfaf | /w/_a.py | a117f36aa81b745f9faac867975b519c1809f02b | [] | no_license | teamWSIZ/python-async-parallel-2019 | 3132403ba49e20eff28ec5eb2a1a9d82a1f629fd | 977bcd5ff848881be68fa5e52b26c2a74b160cd2 | refs/heads/master | 2020-08-11T16:00:08.817047 | 2020-05-31T09:26:11 | 2020-05-31T09:26:11 | 214,592,009 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import sys
class A:
u = 77
def __init__(self):
self.g = 12
a = A()
a.x = 111
print(a.u)
print(a.x)
print(f'->{sys.getrefcount(a)}') | [
"pm@aimachina.io"
] | pm@aimachina.io |
39ba4254009e2a19d141b2c3bde4677bcf87a301 | 5121a0c80c3f20a0ad296b384f8d814386132c9f | /main.py | 788f8ce876e3e9705264b0ce7dbe0b02fcc336ad | [] | no_license | scxr/oghost_new | 35112168b69db1e8f6f4e389027df8a6ff7a0f9d | 815de8a6f3172bbe88b14ad90c75ff4dbae91eac | refs/heads/master | 2023-01-01T10:39:01.652219 | 2020-10-14T15:17:21 | 2020-10-14T15:17:21 | 296,744,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from app import app
from app.main.config.settings import Config
app.config.from_object(Config)
if __name__ == "__main__":
app.run(debug=False) | [
"cswilson@zoho.com"
] | cswilson@zoho.com |
0a4cafcca4c0dfb9516022fa338370e21580a3aa | 3ed906cc2a51b26b6efbe4ed9a76ab61dae66bcd | /aperoll/widgets/star_plot.py | 5fbfaaa9822e97a7c2995f50734ae7c3270f319a | [] | no_license | sot/aperoll | 3c77d39f1cab87d3681228551e651eaebd52222f | 433637e15720294c867f0c1cac847fc15b3d45b4 | refs/heads/main | 2023-06-22T03:19:12.592351 | 2021-07-22T18:57:28 | 2021-07-22T18:57:28 | 388,531,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,430 | py |
import numpy as np
from astropy.table import Table
from PyQt5 import QtCore as QtC, QtWidgets as QtW, QtGui as QtG
from Quaternion import Quat
from cxotime import CxoTime
from chandra_aca.transform import yagzag_to_pixels, pixels_to_yagzag, yagzag_to_radec
def symsize(mag):
# map mags to figsizes, defining
# mag 6 as 40 and mag 11 as 3
# interp should leave it at the bounding value outside
# the range
return np.interp(mag, [6.0, 11.0], [16.0, 4.0])
def get_stars(starcat_time, quaternion, radius=3):
import agasc
from Ska.quatutil import radec2yagzag
stars = agasc.get_agasc_cone(quaternion.ra, quaternion.dec,
radius=radius,
date=starcat_time)
if 'yang' not in stars.colnames or 'zang' not in stars.colnames:
# Add star Y angle and Z angle in arcsec to the stars table.
# radec2yagzag returns degrees.
yags, zags = radec2yagzag(stars['RA_PMCORR'], stars['DEC_PMCORR'], quaternion)
stars['yang'] = yags * 3600
stars['zang'] = zags * 3600
return stars
class StarView(QtW.QGraphicsView):
roll_changed = QtC.pyqtSignal(float)
def __init__(self, scene=None):
super().__init__(scene)
self._start = None
self._moving = False
b1hw = 512.
self.fov = self.scene().addRect(-b1hw, -b1hw, 2 * b1hw, 2 * b1hw)
def mouseMoveEvent(self, event):
pos = event.pos()
if self._start is None:
return
if pos != self._start:
if event.modifiers() == QtC.Qt.ShiftModifier:
self._rotating = True
else:
self._moving = True
if self._moving or self._rotating:
end_pos = self.mapToScene(pos)
start_pos = self.mapToScene(self._start)
if self._moving:
dx, dy = end_pos.x() - start_pos.x(), end_pos.y() - start_pos.y()
scene_rect = self.scene().sceneRect()
new_scene_rect = QtC.QRectF(scene_rect.x() - dx, scene_rect.y() - dy,
scene_rect.width(), scene_rect.height())
self.scene().setSceneRect(new_scene_rect)
elif self._rotating:
center = self.mapToScene(self.viewport().rect().center())
x1 = start_pos.x() - center.x()
y1 = start_pos.y() - center.y()
x2 = end_pos.x() - center.x()
y2 = end_pos.y() - center.y()
r1 = np.sqrt(x1**2 + y1**2)
r2 = np.sqrt(x2**2 + y2**2)
angle = np.rad2deg(np.arcsin((x1 * y2 - x2 * y1) / (r1 * r2)))
transform = self.viewportTransform().rotate(angle)
self.setTransform(transform)
self.roll_changed.emit(self.get_roll_offset())
self._start = pos
def mouseReleaseEvent(self, event):
self._start = None
def mousePressEvent(self, event):
self._moving = False
self._rotating = False
self._start = event.pos()
def wheelEvent(self, event):
scale = 1 + 0.5 * event.angleDelta().y() / 360
self.scale(scale, scale)
def drawForeground(self, painter, rect):
black_pen = QtG.QPen()
black_pen.setWidth(2)
b1hw = 512.
center = QtC.QPoint(self.viewport().width() / 2, self.viewport().height() / 2)
center = self.mapToScene(center)
transform = self.viewportTransform()
t11 = transform.m11()
t12 = transform.m12()
angle = np.rad2deg(np.arctan2(t12, t11))
painter.translate(center)
painter.rotate(-angle)
painter.translate(-center)
painter.drawRect(center.x() - b1hw, center.y() - b1hw, 2 * b1hw, 2 * b1hw)
b2w = 520
painter.drawRect(center.x() - b2w, center.y() - b1hw, 2 * b2w, 2 * b1hw)
painter.setPen(QtG.QPen(QtG.QColor('magenta')))
painter.drawLine(center.x() - 511, center.y(), center.x() + 511, center.y())
painter.drawLine(center.x(), center.y() - 511, center.x(), center.y() + 511)
def get_origin_offset(self):
"""
Get the translation offset (in pixels) of the current view from (511, 511)
"""
center = QtC.QPoint(self.viewport().width() / 2, self.viewport().height() / 2)
center = self.mapToScene(center)
return center.x(), center.y()
def get_roll_offset(self):
transform = self.viewportTransform()
return np.rad2deg(np.arctan2(transform.m12(), transform.m11()))
def re_center(self):
scene_rect = self.scene().sceneRect()
w, h = scene_rect.width(), scene_rect.height()
new_scene_rect = QtC.QRectF(-w / 2, -h / 2, w, h)
# print(f'recentering {w}, {h}')
self.scene().setSceneRect(new_scene_rect)
transform = self.viewportTransform().rotate(-self.get_roll_offset())
self.setTransform(transform)
class StarPlot(QtW.QWidget):
attitude_changed = QtC.pyqtSignal(float, float, float)
def __init__(self, parent=None):
super().__init__(parent)
layout = QtW.QVBoxLayout(self)
self.setLayout(layout)
self._origin = [6.08840495576943, 4.92618563916467]
self.scene = QtW.QGraphicsScene(self)
self.scene.setSceneRect(-100, -100, 200, 200)
self.view = StarView(self.scene)
scale = 1
self.view.scale(scale, scale) # I should not need this but...
self.layout().addWidget(self.view)
self.stars = None
# "base attitude" refers to the attitude when the viewport is at the origin and not rotated
# the actual attitude takes the base attitude and applies a displacement and a rotation
self._base_attitude = None
# "current attitude" refers to the attitude taking into account the viewport's position
self._current_attitude = None
self._time = None
self._highlight = None
self._catalog = None
self.scene.sceneRectChanged.connect(self._radec_changed)
self.view.roll_changed.connect(self._roll_changed)
def _radec_changed(self):
# RA/dec change when the scene rectangle changes, and its given by the rectangle's center
# the base attitude corresponds to RA/dec at the origin, se we take the displacement
# of the view offset, apply it from the origin, and get ra/dec for the offset origin
if self._base_attitude is None:
return
x, y = self.view.get_origin_offset()
yag, zag = pixels_to_yagzag(self._origin[0] + x, self._origin[1] - y, allow_bad=True)
ra, dec = yagzag_to_radec(yag, zag, self._base_attitude)
# print('RA/dec changed', ra, dec)
self._current_attitude = Quat(
equatorial=[ra, dec, self._current_attitude.roll]
)
# print(f'Attitude changed. RA: {ra}, dec: {dec}, roll: {roll} ({x}, {y})')
self.attitude_changed.emit(
self._current_attitude.ra, self._current_attitude.dec, self._current_attitude.roll
)
def _roll_changed(self, roll_offset):
if self._current_attitude is None:
return
# roll changes when the viewport is rotated.
# the view class keeps track of this.
# print('roll changed', roll_offset)
self._current_attitude = Quat(
equatorial=[self._current_attitude.ra,
self._current_attitude.dec,
self._base_attitude.roll - roll_offset]
)
self.attitude_changed.emit(
self._current_attitude.ra, self._current_attitude.dec, self._current_attitude.roll
)
def set_base_attitude(self, q, update=True):
"""
Sets the base attitude
The base attitude is the attitude corresponding to the origin of the scene.
When the base attitude changes, the star positions must be updated. Not doing so will
leave the display in an inconsistent state. The "update" argument is there as a convenience
to delay the update in case one wants to call several setters.
"""
self._base_attitude = Quat(q)
self._current_attitude = Quat(self._base_attitude)
if update:
self.show_stars()
def set_time(self, t, update=True):
self._time = CxoTime(t)
if update:
self.show_stars()
def highlight(self, agasc_ids, update=True):
self._highlight = agasc_ids
if update:
self.show_stars()
def set_catalog(self, catalog, update=True):
self.set_base_attitude(catalog.att, update=False)
self.set_time(catalog.date, update=False)
self._catalog = catalog
if update:
self.show_stars()
def show_stars(self):
self.scene.clear()
if self._base_attitude is None or self._time is None:
return
self.stars = get_stars(self._time, self._base_attitude)
# Update table to include row/col values corresponding to yag/zag
self.stars['row'], self.stars['col'] = yagzag_to_pixels(
self.stars['yang'], self.stars['zang'], allow_bad=True
)
b1hw = 512.
self.fov = self.scene.addRect(-b1hw, -b1hw, 2 * b1hw, 2 * b1hw)
black_pen = QtG.QPen()
black_pen.setWidth(2)
black_brush = QtG.QBrush(QtG.QColor("black"))
red_pen = QtG.QPen(QtG.QColor("red"))
red_brush = QtG.QBrush(QtG.QColor("red"))
for star in self.stars:
s = symsize(star['MAG'])
rect = QtC.QRectF(star['row'] - s / 2, -star['col'] - s / 2, s, s)
if self._highlight is not None and star['AGASC_ID'] in self._highlight:
self.scene.addEllipse(rect, red_pen, red_brush)
else:
self.scene.addEllipse(rect, black_pen, black_brush)
if self._catalog is not None:
cat = Table(self._catalog)
cat['row'], cat['col'] = yagzag_to_pixels(cat['yang'], cat['zang'], allow_bad=True)
gui_stars = cat[(cat['type'] == 'GUI') | (cat['type'] == 'BOT')]
acq_stars = cat[(cat['type'] == 'ACQ') | (cat['type'] == 'BOT')]
fids = cat[cat['type'] == 'FID']
mon_wins = cat[cat['type'] == 'MON']
for gui_star in gui_stars:
w = 20
rect = QtC.QRectF(
gui_star['row'] - w,
-gui_star['col'] - w,
w * 2,
w * 2
)
self.scene.addEllipse(
rect,
QtG.QPen(QtG.QColor("green"), 3)
)
for acq_star in acq_stars:
self.scene.addRect(
acq_star['row'] - acq_star['halfw'] / 5,
-acq_star['col'] - acq_star['halfw'] / 5,
acq_star['halfw'] * 2 / 5,
acq_star['halfw'] * 2 / 5,
QtG.QPen(QtG.QColor("blue"), 3)
)
for mon_box in mon_wins:
# starcheck convention was to plot monitor boxes at 2X halfw
self.scene.addRect(
mon_box['row'] - (mon_box['halfw'] * 2 / 5),
-mon_box['col'] - (mon_box['halfw'] * 2 / 5),
mon_box['halfw'] * 4 / 5,
mon_box['halfw'] * 4 / 5,
QtG.QPen(QtG.QColor(255, 165, 0), 3)
)
for fid in fids:
w = 25
rect = QtC.QRectF(
fid['row'] - w,
-fid['col'] - w,
w * 2,
w * 2
)
self.scene.addEllipse(
rect,
QtG.QPen(QtG.QColor("red"), 3)
)
self.scene.addLine(
fid['row'] - w, -fid['col'],
fid['row'] + w, -fid['col'],
QtG.QPen(QtG.QColor("red"), 3)
)
self.scene.addLine(
fid['row'], -fid['col'] - w,
fid['row'], -fid['col'] + w,
QtG.QPen(QtG.QColor("red"), 3)
)
# self.view.centerOn(QtC.QPointF(self._origin[0], self._origin[1]))
self.view.re_center()
| [
"javierggt@yahoo.com"
] | javierggt@yahoo.com |
089d2fbd735e86518961b5f6f385cd22e1d8c136 | 7345f03494fa8b06ea8215770305718618af1d41 | /nenv/bin/wheel | 1519777ccc4cf6bd72d97e5bd0f7a51fe7d8902d | [] | no_license | mr-kaveh/simplerestapi | 328fd839b74afcfa2a41ff71d4bb556457535519 | a5c68c55018c938211c2f25e19bf4e43b5d99f36 | refs/heads/master | 2022-10-29T11:55:56.033385 | 2022-10-18T15:39:56 | 2022-10-18T15:39:56 | 169,194,555 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/home/hossein/myScripts/apiRepo/nenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mr.hdavoodi@gmail.com"
] | mr.hdavoodi@gmail.com | |
7e05768d55a4ec7a0a86e24d4b7c3bf8ed0954d0 | b67f4c5e1be7d0b2926b71e0673a2b05e6143421 | /myspider/myspider/spiders/detail.py | 5de5ffc906f5b4e03440239ad5fa6bf54332fe2d | [] | no_license | Master-Wu/DJ | 81234e27fdb6980db5630c4ec7970c194c7295c1 | c859d0084f5065bdcd0f0902de95fa8126e739aa | refs/heads/master | 2020-03-25T23:08:03.393589 | 2018-08-16T11:57:47 | 2018-08-16T11:57:47 | 144,260,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,900 | py | # -*- coding: utf-8 -*-
# __author__ = "吴道子mr.worth@qq.com"
import pymysql
import scrapy
import json
from argparse import Namespace
from myspider.items import MyspiderItem
# cookie设定地址为en_gb, 而非中文, 避免中文时出现的全球各城市与机场名称的中英文混杂无法有效提取问题
cookies2={
"w_locale":"en_GB"
}
def json_to_object(data):
'''
用来转换具有json样式的str到python对象
:param data: str类型参数
:return: 返回一个json对象
'''
return json.loads(data, object_hook=lambda d: Namespace(**d))
class DetailSpider(scrapy.Spider):
name = 'detail'
allowed_domains = ['flightaware.com']
start_urls = []
# 重写start_request()
def start_requests(self):
# 从数据库中获取分段抓取中已获得的 航班详情页url
# 并并将url存入list[] detailUrlList中
conn = pymysql.connect(host="", port=3306, user="", passwd="", db="flight", charset="utf8")
cur = conn.cursor()
print("详细信息爬虫"*6)
sql_select_all_dateHref = "select dateHref from `test_flight` WHERE inserted=5 and idid<101"
cur.execute(sql_select_all_dateHref)
rows = cur.fetchall() # 此句或对应cur.excute(sql_select_all_plane)
detailUrlList = []
iii = 0
for dr in rows:
if dr[0]:
detailUrlList.append(dr[0])
print(iii)
iii += 1
else:
print("""
""")
pass
conn.close()
for url in detailUrlList:
yield scrapy.Request(url,cookies=cookies2)
count=1
def parse(self, response):
self_url = "unknown"
# 本页面url,必须抓取,因为要更新数据库做判断
airline = "unknown" # 航空公司
home_name = "unknown" # 始发地城市名
home_airport = "unknown" # 始发地机场
point_name = "unknown" # 目的地城市名
point_airport = "unknown" # 目的地机场
plan_off_time = "unknown" # 计划起飞时间
act_off_time = "unknown" # 实际起飞时间
plan_gate_off_time = "unknown" # 计划离开停机坪时间
plan_land_time = "unknown" # 计划降落时间
plan_gate_land_time = "unknown" # 计划抵达停机坪时间
act_land_time = "unknown" # 实际降落时间
# 截取包含有json的js代码,并将其转换为dictionary
# try:
script_40_splited_selector=response.xpath('/html/body/script[40]/text()').extract_first()# 待分割
script_40_splited=script_40_splited_selector[25:-1] # 已分割
json_to_dict_script_40_splited = json.loads(script_40_splited)
key="" # 获取字典的key
key=list(json_to_dict_script_40_splited['flights'].keys())[0]
# print(key)
flight_value_str=json.dumps(json.loads(script_40_splited)["flights"])
py_object=json_to_object(flight_value_str[len(key)+5:-1])
try:
self_url = response.xpath('//*[@id="popupLogin"]/div/div[2]/div/form/input[1]/@value').extract_first()
# 截去url里可能会有的“uk.“
self_url = self_url.replace("uk.", "")
print(self_url,"77777777777777777777777777777777777777777777777777777777777777777")
except:
self_url = "unknown"
# 始发地城市
try:
tyy = py_object.origin.friendlyLocation
home_name = tyy.split(',')[0]
except:
home_name="unknown"
# 始发地机场
try:
home_airport =py_object.origin.friendlyName
except:
home_airport="unknown"
# 航空公司
try:
airline=py_object.airline.fullName
except:
airline="unknown"
# 目的地城市
try:
tyy2=py_object.destination.friendlyLocation
point_name=tyy2.split(',')[0]
except:
point_name="unknown"
# 目的地机场
try:
point_airport=py_object.destination.friendlyName
except:
point_airport="unknown"
# 计划起飞时间
try:
plan_off_time=py_object.takeoffTimes.scheduled
except:
plan_off_time="unknown"
# 实际起飞时间
try:
act_off_time=py_object.takeoffTimes.estimated
except:
act_off_time="unknown"
# 计划驶出停机坪时间
try:
plan_gate_off_time=py_object.gateDepartureTimes.scheduled
except:
plan_gate_off_time="unknown"
# 计划着陆时间
try:
plan_land_time=py_object.landingTimes.scheduled
except:
plan_land_time="unknown"
# 实际着陆时间
try:
act_land_time=py_object.landingTimes.estimated
except:
act_land_time="unknown"
# 实际进入停机坪时间
try:
plan_gate_land_time=py_object.gateArrivalTimes.scheduled
except:
plan_gate_land_time="unknown"
item = MyspiderItem()
item['selfUrl'] = self_url
item['airline'] = airline
item['homeName'] = home_name
item['homeAirport'] = home_airport
item['pointName'] = point_name
item['pointAirport'] = point_airport
item['planOffTime'] = plan_off_time
item['actOffTime'] = act_off_time
item['planGateOffTime'] = plan_gate_off_time
item['planLandTime'] = plan_land_time
item['planGateLandTime'] = plan_gate_land_time
item['actLandTime'] = act_land_time
print("你看到的是:",self.count)
self.count+=1
yield item
| [
"mr_worth@163.com"
] | mr_worth@163.com |
b7ed28d5198ac65f3ff3ae432d41130615dab2c3 | c5695fdb2d97511d069a33b0c0468943b04ddf0a | /practice/exercise33.py | f83964f94fb9fa79e1b51ef221e8c5790bb2bcfb | [] | no_license | nehahooda/python_zed_shaw | dfab32d8b1470f4c5dd50d5e383c501d4418e5f4 | d0f830f84f2f493c748f025ef710be45c3165f0b | refs/heads/master | 2021-01-20T09:49:11.631056 | 2017-08-26T15:08:33 | 2017-08-26T15:08:33 | 90,289,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | i=0
numbers = []
while i<6:
print "at the top i is %d" %i
numbers.append(i)
i=i+1
print "numbers now",numbers
print "at the bottom i is %d" % i
print "the numbers:"
for num in numbers:
print num | [
"nehahooda2012@gmail.com"
] | nehahooda2012@gmail.com |
8644dd39ff5043271d890631919efb36f3019572 | c3eec4400793de4c1ce3bc4b98acec2992e3c3b8 | /PythonBasics/UdacityRefresher/Class_Practice.py | 269bea3a2b7fee902cfbce6a9c756dcf69f117e7 | [] | no_license | paragshah7/Python-CodingChallenges-LeetCode | f1bef486eb145da0a69ae57287106432317b1078 | 85e64f106715e0bdd5ae3ba13729a51406e1f5df | refs/heads/main | 2023-07-12T11:04:26.748696 | 2021-09-06T22:29:28 | 2021-09-06T22:29:28 | 301,836,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,752 | py | # Udacity
class Person:
def __init__(self, name, age, month):
self.name = name
self.age = age
self.birthday_month = month
def birthday(self):
self.age += 1
def create_person_objects(names, ages, months):
my_data = zip(names, ages, months)
person_objects = []
for item in my_data:
person_objects.append(Person(*item))
return person_objects
def get_april_birthdays(people):
# TODO:
# Increment "age" for all people with birthdays in April.
# Return a dictionary "april_birthdays" with the names of
# all people with April birthdays as keys, and their updated ages
# as values. See the test below for an example expected output.
april_birthdays = {}
for person in people:
if person.birthday_month == 'April':
person.age += 1
april_birthdays[person.name] = person.age
return april_birthdays
def get_most_common_month(people):
# TODO: Use the "months" dictionary to record counts of
# birthday months for persons in the "people" data.
# Return the month with the largest number of birthdays.
months = {'January': 0, 'February': 0, 'March': 0, 'April': 0, 'May': 0,
'June': 0, 'July': 0, 'August': 0, 'September': 0, 'October': 0,
'November': 0, 'December': 0}
for person in people:
months[person.birthday_month] += 1
max_month = None
max_value = 0
for key in months.keys():
if months[key] > max_value:
max_value = months[key]
max_month = key
return max_month
def test():
# Here is the data for the test. Assume there is a single most common month.
names = ['Howard', 'Richard', 'Jules', 'Trula', 'Michael', 'Elizabeth', 'Richard', 'Shirley', 'Mark', 'Brianna',
'Kenneth', 'Gwen', 'William', 'Rosa', 'Denver', 'Shelly', 'Sammy', 'Maryann', 'Kathleen', 'Andrew',
'Joseph', 'Kathleen', 'Lisa', 'Viola', 'George', 'Bonnie', 'Robert', 'William', 'Sabrina', 'John',
'Robert', 'Gil', 'Calvin', 'Robert', 'Dusty', 'Dario', 'Joeann', 'Terry', 'Alan', 'Rosa', 'Jeane', 'James',
'Rachel', 'Tu', 'Chelsea', 'Andrea', 'Ernest', 'Erica', 'Priscilla', 'Carol', 'Michael', 'Dale', 'Arthur',
'Helen', 'James', 'Donna', 'Patricia', 'Betty', 'Patricia', 'Mollie', 'Nicole', 'Ernest', 'Wendy',
'Graciela', 'Teresa', 'Nicole', 'Trang', 'Caleb', 'Robert', 'Paul', 'Nieves', 'Arleen', 'Milton', 'James',
'Lawrence', 'Edward', 'Susan', 'Patricia', 'Tana', 'Jessica', 'Suzanne', 'Darren', 'Arthur', 'Holly',
'Mary', 'Randal', 'John', 'Laura', 'Betty', 'Chelsea', 'Margaret', 'Angel', 'Jeffrey', 'Mary', 'Donald',
'David', 'Roger', 'Evan', 'Danny', 'William']
ages = [17, 58, 79, 8, 10, 57, 4, 98, 19, 47, 81, 68, 48, 13, 39, 21, 98, 51, 49, 12, 24, 78, 36, 59, 3, 87, 94, 85,
43, 69, 15, 52, 57, 36, 52, 5, 52, 5, 33, 10, 71, 28, 70, 9, 25, 28, 76, 71, 22, 35, 35, 100, 9, 95, 69, 52,
66, 91, 39, 84, 65, 29, 20, 98, 30, 83, 30, 15, 88, 89, 24, 98, 62, 94, 86, 63, 34, 23, 23, 19, 10, 80, 88,
67, 17, 91, 85, 97, 29, 7, 34, 38, 92, 29, 14, 52, 94, 62, 70, 22]
months = ['January', 'March', 'January', 'October', 'April', 'February', 'August', 'January', 'June', 'August',
'February', 'May', 'March', 'June', 'February', 'August', 'June', 'March', 'August', 'April', 'April',
'June', 'April', 'June', 'February', 'September', 'March', 'July', 'September', 'December', 'June',
'June', 'August', 'November', 'April', 'November', 'August', 'June', 'January', 'August', 'May', 'March',
'March', 'March', 'May', 'September', 'August', 'April', 'February', 'April', 'May', 'March', 'March',
'January', 'August', 'October', 'February', 'November', 'August', 'June', 'September', 'September',
'January', 'September', 'July', 'July', 'December', 'June', 'April', 'February', 'August', 'September',
'August', 'February', 'April', 'July', 'May', 'November', 'December', 'February', 'August', 'August',
'September', 'December', 'February', 'March', 'June', 'December', 'February', 'May', 'April', 'July',
'March', 'June', 'December', 'March', 'July', 'May', 'September', 'November']
people = create_person_objects(names, ages, months)
# Calls to the two functions you have completed.
print(get_april_birthdays(people))
print(get_most_common_month(people))
test()
# Expected result:
# {'Michael': 11, 'Erica': 72, 'Carol': 36, 'Lisa': 37, 'Lawrence': 87, 'Joseph': 25, 'Margaret': 35, 'Andrew': 13, 'Dusty': 53, 'Robert': 89}
# August
| [
"paragshah367@gmail.com"
] | paragshah367@gmail.com |
a55b550c5b6a6595f4a214f8d53c881dd8b42f67 | 4de5291314b071a85aeb2ec35a66403ebe32cb3d | /oops/class.py | 7122612e6ff43adb4513e08651f87dfc3af1696e | [] | no_license | sudhatumuluru/PythonPractise | d5e71cdac6d354921bc43f65b983b994feba8214 | 484b55e8d22ef34c9ae63b46afa0afae0918d5a2 | refs/heads/master | 2021-01-17T19:52:06.015581 | 2016-06-16T21:20:20 | 2016-06-16T21:20:20 | 61,326,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | class mycla():
'This gives the declaration of the class'
def dis(self):
print "class name: myclass"
obj=mycla()
obj.dis()
| [
"sreesudha.tumuluru@gmail.com"
] | sreesudha.tumuluru@gmail.com |
f655d6f7453d9e2d72506d187a52cd29ee2b45e0 | 40844ef4ab8a8a0945d3c9db53002310d9fbeb72 | /legacy/pythonCrashCourse/tips/hello.py | c81dd366849aab2a2ff7829a389d83a4b962ee0a | [] | no_license | dky/cb | e2f0937e262c24fe040530c462e84bbe83ab60e3 | a69e0a12f0e91fdd36398a16f373d207a2d95b9c | refs/heads/master | 2023-03-23T23:37:53.046644 | 2021-03-10T04:48:57 | 2021-03-10T04:48:57 | 224,289,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | print("This will be printed.")
| [
"don.d.ky@gmail.com"
] | don.d.ky@gmail.com |
ec072e7f784d6f0a00b93ae94bd00133cbbc7aa1 | caa43b83e6fdabbd63fecb6b3e9aa14b875596f1 | /webservice-upload/app.py | 51de2700b9c9b7ce12e0552f54a431b84532ad50 | [] | no_license | thanakarn57/nerwork2 | 243acfa66b8565d7d8919edf6adfa6fd24fe9239 | aa96054c26871d683717d396f90ff735ce5bb65b | refs/heads/master | 2021-05-04T20:45:47.292885 | 2018-02-01T16:04:40 | 2018-02-01T16:04:40 | 119,861,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from flask import Flask , request
from flask_restful import Resource , Api,reqparse
import json ,time
app = Flask (__name__)
api = Api(app)
parser = reqparse.RequestParser()
class Upload(Resource):
def post(self):
if 'file' not in request.files:
return {"code":404,"desc":"upload unsuccessful"}
file = request.files['file']
file.save(file.filename)
return {"code":200,"desc":"upload success"}
api.add_resource(Upload,'/upload')
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5500)
| [
"thanakarno57@email.nu.ac.th"
] | thanakarno57@email.nu.ac.th |
b068a33104b190dfe987923899df18b4fb43123f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_ports.py | 0fffb49ed97cda99bbb0989d662e80ae11e7425e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _PORTS():
def __init__(self,):
self.name = "PORTS"
self.definitions = port
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['port']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7f9c10e48071518561b79cb0525890d87591fae1 | 98d6335b369165ee6d3058ae242c671273c703d0 | /day1/day1_part2.py | 4933c4600ca5a22b9941d60c96a9a59771fe5cd6 | [] | no_license | elsenorbw/advent-of-code-2020 | 6830be42fa5251b7654b33db4643cd1b8473c25b | cd4cc4ddf884de0a7d8a7fa95633525fa87793a0 | refs/heads/main | 2023-08-22T07:22:02.967748 | 2021-10-08T11:18:29 | 2021-10-08T11:18:29 | 317,477,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,903 | py | # --- Day 1: Report Repair ---
# After saving Christmas five years in a row, you've decided to take a
# vacation at a nice resort on a tropical island. Surely, Christmas will go
# on without you.
#
# The tropical island has its own currency and is entirely cash-only. The gold
# coins used there have a little picture of a starfish; the locals just call
# them stars. None of the currency exchanges seem to have heard of them, but
# somehow, you'll need to find fifty of these coins by the time you arrive
# so you can pay the deposit on your room.
#
# To save your vacation, you need to get all fifty stars by December 25th.
#
# Collect stars by solving puzzles. Two puzzles will be made available on each
# day in the Advent calendar; the second puzzle is unlocked when you complete
# the first. Each puzzle grants one star. Good luck!
#
# Before you leave, the Elves in accounting just need you to fix your expense
# report (your puzzle input); apparently, something isn't quite adding up.
#
# Specifically, they need you to find the two entries that sum to 2020 and
# then multiply those two numbers together.
#
# For example, suppose your expense report contained the following:
#
# 1721
# 979
# 366
# 299
# 675
# 1456
# In this list, the two entries that sum to 2020 are 1721 and 299. Multiplying
# them together produces 1721 * 299 = 514579, so the correct answer is 514579.
#
# Of course, your expense report is much larger. Find the two entries that sum
# to 2020; what do you get if you multiply them together?
#
# To begin, get your puzzle input.
# Your puzzle answer was 138379.
#
# The first half of this puzzle is complete! It provides one gold star: *
#
# --- Part Two ---
# The Elves in accounting are thankful for your help; one of them even offers you
# a starfish coin they had left over from a past vacation. They offer you a second
# one if you can find three numbers in your expense report that meet the same criteria.
#
# Using the above example again, the three entries that sum to 2020 are 979,
# 366, and 675. Multiplying them together produces the answer, 241861950.
#
# In your expense report, what is the product of the three entries that sum to 2020?
#
from typing import Set, Tuple
filename = "input.txt"
def load_list_to_set(filename: str) -> Set[int]:
"""
Given a filename we should read each line, treat it as an int and
add it to a resulting set
"""
result = set()
with open(filename, "r") as f:
for this_line in f:
this_line = this_line.strip()
print(this_line)
result.add(int(this_line))
return result
def locate_2020_pair(
possibilities: Set[int], target_value: int = 2020
) -> Tuple[int, int]:
"""
Return a tuple of the first pair of integers in the list that meet the target value
"""
result = None
for x in possibilities:
y = target_value - x
if y in possibilities:
result = (x, y)
break
return result
def locate_2020_triplet(
possibilities: Set[int], target_value: int = 2020
) -> Tuple[int, int, int]:
"""
Return a tuple of the 3 values which add up to target_value
"""
result = None
for x in possibilities:
remainder = target_value - x
remaining_values = possibilities.copy()
remaining_values.remove(x)
other_pair = locate_2020_pair(remaining_values, target_value=remainder)
if other_pair is not None:
result = (x, *other_pair)
break
return result
# load the list
the_list = load_list_to_set(filename)
# find the 2020 pair
the_pair = locate_2020_pair(the_list)
# print the pair and the result
print(f"{the_pair} -> {the_pair[0] * the_pair[1]}")
# and the triplet
the_triplet = locate_2020_triplet(the_list)
print(
f"{the_triplet} -> {sum(the_triplet)} -> {the_triplet[0] * the_triplet[1] * the_triplet[2]}"
)
| [
"bry@home.com"
] | bry@home.com |
ca26735ce0125a5ae0f72ab61952099184dd5f2f | b33890fa8ca3239fdf6d8c06a3032f8b30991a79 | /cameron/p047/p47.py | e8d3196a7c3a5b7b4ea14d2026de2161203efbe1 | [] | no_license | Big-Theta/EulerProblems | 9443eb6a7f29bfb631ffd5e2439c182f37de70c4 | adc3ed1c3a57ab50d41b30bddc9678db5dd456d4 | refs/heads/master | 2021-01-23T13:17:00.802583 | 2013-10-06T03:06:56 | 2013-10-06T03:06:56 | 13,356,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | #!/usr/bin/python
#anwer is 134043
from num_tools import *
def test_gray_upto(x):
for i in range(x):
gray = bin2gray(i)
if bit_count(gray) == 4:
print bin(gray)
#print gray2bin("1010101010101")
def mult_total(arr):
return_val = 1
for i in arr:
return_val *= i
return return_val
def get_all_factors(primes):
factors = []
for p in primes:
power = 1
f = p
while f < primes[-1]:
factors += [f]
power += 1
f = p ** power
factors.sort()
return factors
#print factors
def bit_count(num):
count = 0
while (num):
num &= num - 1
count += 1
return count
def factor_indexes(which_f):
a, b = divmod(which_f, 2)
i = 0
indexes = []
while a:
if b:
indexes += [i]
i += 1
a, b = divmod(a, 2)
indexes += [i]
print indexes
return indexes
def valid_ans(factors, which_f):
return True
def get_next_ans(factors, which_f):
which_f += 1
while bit_count(which_f) != 4 or not valid_ans(factors, which_f):
which_f += 1
ans = 1
#print bin(which_f)
indexes = factor_indexes(which_f)
for i in indexes:
ans *= factors[i]
return [ans, which_f]
def p47():
primes = get_n_primes(10000)
factors = get_all_factors(primes)
ans = [0, 0, 0, 0]
which_f = 0b10111
print "num_bits = %i" % bit_count(which_f)
done = False
while not done and which_f != 0b1111:
#print which_f
next_ans, which_f = get_next_ans(factors, which_f)
ans = ans[1:] + [next_ans]
if (ans[0] + 1 == ans[1] and ans[1] + 1 == ans[2] and ans[2] + 1 == ans[3]):
done = True
print ans[0]
def get_factors_47(num, primes):
i = 0
ans = []
max_num = primes / 3
if len(primes) == 0:
primes += get_n_primes(1)
while num > max_num:
print "extending primes list"
primes = get_n_primes(len(primes) * 2)[len(primes):]
while num > 0 and i < len(primes) and len(ans) < 5:
test_num, r = divmod(num, primes[i])
if r:
i += 1
else:
num = test_num
if not primes[i] in ans:
ans += [primes[i]]
return ans
def p47_easy():
primes = get_n_primes(100000)
ans = 0
in_a_row = 0
while in_a_row < 4:
if len(get_factors_47(ans, primes)) == 4:
in_a_row += 1
print ans
else:
in_a_row = 0
ans += 1
print ans - 4
if __name__ == "__main__":
p47_easy()
#print get_all_factors(get_n_primes(40))
#p47()
| [
"c.d.evans87@gmail.com"
] | c.d.evans87@gmail.com |
6afe8571e5efb5c4c6ebcec6460e3eff20f3c450 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2939/60688/289152.py | 06318fcf6a295848bf438a55ad0e226977a27ba4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | strings=input()
numslist = (strings.split(" "));
if "5 4" in strings:
print(numslist)
numslist=list(int(x) for x in numslist);
times=numslist[0];
delnums=numslist[1];
mynumlist=[1];
finalnums=[];
for i in range(times):
num=mynumlist.pop(0);
mynumlist.append(2*num+1)
mynumlist.append(4*num+5)
finalnums.append(num);
finalnums.extend(mynumlist)
finalnums=sorted(finalnums);
finalnums=finalnums[0:times]
finalnums=list(str(x) for x in finalnums);
first="".join(finalnums);
secondlist=list(first);
secondlist=list([int(x)for x in secondlist]);
#处理从N个数中取出N-M个数,为max,原顺序不变,贪心算法::总是从前向后扫描并删除l<r 中的l并且操作一次重新迭代!!
allnums=delnums;
while (allnums!=0):
for i in range(len(secondlist)-1):
if secondlist[i]<secondlist[i+1]:
secondlist.pop(i);
allnums-=1;
break
secondlist=[str(x)for x in secondlist];
res="".join(secondlist)
print(first)
print(res,end="") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7d3d8008514d7911025c4b7264c1e28decb92aeb | 63a0ef9a51aeea42904e5a861b16c57bd78c4c13 | /linked_list.py | 962a1dd424dc6235ff1b7ed7d511cb0eae66efa1 | [] | no_license | Nicolas1st/DataStructures | dd2938f8a8632bcd63049c3d2fef928e2ef70e20 | 434f458863f64156225fa38fb601d11bfa605ecc | refs/heads/main | 2023-06-08T06:24:00.396573 | 2021-07-01T11:04:25 | 2021-07-01T11:04:25 | 334,188,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | class Node:
def __init__(self, value, next_node=None):
self.value = value
self.next_node = next_node
class SinglyLinkedList:
def __init__(self, array):
self.first_node = Node(array[0])
previous_node = self.first_node
for value in array[1:]:
new_node = Node(value)
previous_node.next_node = new_node
previous_node = new_node
self.last_node = previous_node
def delete(self, node_index):
"""Removes an element from a singly linked list
Input: index of the node to be removed
Return: True if removed, False if there is no node with the index given"""
if node_index < 0 and type(node_index) is int:
print("Index is smaller than 0")
return False
if self.first_node is None:
print("The list is empty")
return False
if node_index == 0:
try:
self.first_node = self.first_node.next_node
except:
self.first_node = None
return True
previous_node = self.first_node
index = 1
while True:
current_node = previous_node.next_node
if current_node is None:
if index == node_index:
return True
print(f"The element at index {node_index} does not exist")
print(f"The list length is equal to {index}")
return False
if index == node_index:
if current_node.next_node is None:
previous_node.next_node = None
else:
previous_node.next_node = current_node.next_node
return True
previous_node = current_node
index += 1
def insert(self, value, node_index):
if node_index < 0:
print(f"Can not insert the {value} at index {node_index}")
return False
if node_index == 0:
self.first_node = Node(value, self.first_node)
return True
previous_node = self.first_node
index = 1
while previous_node is not None:
node = previous_node.next_node
if node_index == index:
next_node = previous_node.next_node
previous_node.next_node = Node(value, next_node)
return True
previous_node = node
index += 1
return False
def read(self, node_index):
current_node = self.first_node
index = 0
while True:
if node_index == index:
return current_node.value
current_node = current_node.next_node
if current_node is None:
print(f"The node at index {node_index} does not exist")
print(f"The lenght of the list is equal to {index+1}")
return None
index += 1
def __repr__(self):
values = []
node = self.first_node
while node is not None:
values.append(node.value)
node = node.next_node
return str(values)
if __name__ == "__main__":
array = list(range(10))
linked_list = SinglyLinkedList(array)
print(linked_list)
linked_list.delete(5)
print(linked_list)
print(linked_list.read(5))
linked_list.insert(11, 1)
linked_list.insert(12, 0)
linked_list.insert("last element", 11)
print(linked_list)
| [
"nicolashereigo@gmail.com"
] | nicolashereigo@gmail.com |
1aa210b0d4aa1209e309316273c6a81b2deaf6f3 | dde43756d6dc56e38f81a097fa02082eaa787cfd | /OCR/lib/ocrdetect/valid.py | 372f420a11c60f29f8a74a7a523ed55becb20473 | [] | no_license | linshaoxin-maker/myproject | 36bde684bfe57d1a52bf87b3919d29eb703e3f90 | 606623bf41fd1741541f2ef4a6aa75404663e353 | refs/heads/master | 2023-04-24T02:08:03.193303 | 2021-05-14T01:58:27 | 2021-05-14T01:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,449 | py | from __future__ import division
from ssd import build_ssd
import os
from init import init_args
import time
import logging
import datetime
from utils import helpers
from config import *
import cv2
import numpy as np
import torch
from torchvision import transforms
from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
IMAGE_WINDOW = 100 # 200
IMAGE_SIZE = 100 # 100
toTensor = transforms.ToTensor()
def alhpa_detect(model, image):
# 注意对于大图片要更改大小, 后面要增加该部分处理
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
origin_image = image.copy()
height, width, _ = image.shape
# print('detect image shape:', image.shape)
use_scale = 1.
if width > IMAGE_WINDOW or height > IMAGE_WINDOW:
# 注意要记住该SCALE,后面得到坐标后,还要再除掉这个SCALE,恢复到原来的大小。这里
# 暂时没改,后面要做修改。
use_scale = min(IMAGE_WINDOW/width,IMAGE_WINDOW/height)
image = cv2.resize(image,(0,0),fx=use_scale, fy=use_scale, interpolation=cv2.INTER_NEAREST)
cropped_image = np.full((IMAGE_WINDOW, IMAGE_WINDOW, image.shape[2]), 255)
cropped_image[0:image.shape[0], 0:image.shape[1],:] = image
# 全部转成灰度图,与训练数据相同
img = cropped_image.astype(np.float32)
img = cv2.resize(img, (IMAGE_SIZE,IMAGE_SIZE), interpolation=cv2.INTER_NEAREST)
img = toTensor(img)
img = img.unsqueeze_(0)
# print('detect convert image shape:', img.shape)
y, boxes, scores = model(img)
detections = y.data
detect_type = ['alpha_location',]
recognized_boxes = []
scores_lists = []
# print('recongoized boxes:', recognized_boxes, ' use scale:', use_scale, ' detections:', detections.size())
image_lists = []
try:
for i, dtype in enumerate(detect_type):
i += 1
j = 0
while j < detections.size(2) and detections[0, i, j, 0] >= 0.01:
pt = (detections[0, i, j, 1:] * IMAGE_WINDOW / use_scale).cpu().numpy()
coords = (int(pt[0]), int(pt[1]), int(pt[2]), int(pt[3]))
recognized_boxes.append(coords)
scores_lists.append(detections[0, i, j, 0])
j += 1
print('scores :',scores_lists)
for box in recognized_boxes:
print('detect boxes :', box)
x0,y0,x1,y1 = box
if min(x0, y0, x1, y1) < 0:
print('detect boxes apper error: ', recognized_boxes)
else:
aimage = origin_image[y0:y1:,x0:x1,:]
image_lists.append(aimage)
except Exception as e:
print('detect error:', e)
return image_lists
def detect_char_area(image, min_area = 80,min_y_diff=5):
origin_image = image.copy()
image_gray_data = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
img = image_gray_data.copy()
blur = cv2.GaussianBlur(img, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,10)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
dilate = cv2.dilate(thresh, kernel, iterations=1)
plt.imshow(dilate)
plt.show()
contours, hierarchy = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = []
for cnt in contours:
if cv2.contourArea(cnt) > min_area:
rect = cv2.boundingRect(cnt)
x,y,w,h = rect
if (y+h)/2 > img.shape[0]*0.05 and w/h > 0.4 and cv2.contourArea(cnt) > 90:
cnts.append([x,y,x+w,y+h, cv2.contourArea(cnt)])
# areas = np.array(cnts,dtype=np.uint8)
# if areas is None or len(areas) == 0:
# return 0,0,0,0
image_lists = []
for box in cnts:
print('detect boxes :', box)
x0,y0,x1,y1,_ = box
if min(x0, y0, x1, y1) < 0:
print('detect boxes apper error: ', recognized_boxes)
else:
aimage = origin_image[y0:y1:,x0:x1,:]
image_lists.append(aimage)
return image_lists
def valid(args):
# args.cfg = 'math_gtdb_512'
args.cfg = 'ssd100'
# weights_path = 'D:\\PROJECT_TW\\git\\data\\mathdetect\\ckpts\\weights_math_detector\\best_ssd512.pth'
weights_path = 'D:\\PROJECT_TW\\git\\data\\ocr\\weights\\ocr_best_ssd100.pth'
# print(args)
cfg = exp_cfg[args.cfg]
gpu_id = 0
if args.cuda:
gpu_id = helpers.get_freer_gpu()
logging.debug('Using GPU with id ' + str(gpu_id))
torch.cuda.set_device(gpu_id)
print('cfg :', cfg)
net = build_ssd(args, 'use', cfg, gpu_id, cfg['min_dim'], cfg['num_classes'])
# print(net)
mod = torch.load(weights_path,map_location=torch.device('cpu'))
net.load_state_dict(mod)
net.eval()
if args.cuda:
net = net.cuda()
mean = (246,246,246)
window = args.window
stride = 0.01
stepx = 200
stepy = 400
size = 100
image_path = r'D:\PROJECT_TW\git\data\ocr\images\wrong\10517.png'
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
img_lists = detect_char_area(image)
print('img lists len :', len(img_lists))
for item in img_lists:
item = item.astype(np.uint8)
plt.imshow(item)
plt.show()
if __name__ == '__main__':
args = init_args()
start = time.time()
try:
filepath=os.path.join(args.log_dir, args.exp_name + "_" + str(round(time.time())) + ".log")
print('Logging to ' + filepath)
# logging.basicConfig(filename=filepath,
# filemode='w', format='%(process)d - %(asctime)s - %(message)s',
# datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG)
logging.basicConfig(format='%(process)d - %(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
valid(args)
except Exception as e:
logging.error("Exception occurred", exc_info=True)
end = time.time()
logging.debug('Total time taken ' + str(datetime.timedelta(seconds=end - start)))
logging.debug("Training done!")
| [
"hecong@talkweb.com.cn"
] | hecong@talkweb.com.cn |
8b8810a06129d028538ccc83325f5af48057c89f | b7708fb5f8c9e1e41378b52d35b23b9003d242c8 | /visual/topo.py | 71750e764ff7fd1dad76ed1a3b895c1820a9913c | [] | no_license | drelatgithub/MembraneSimulation | 410772aeb6cf6cb3c2a7bb3397a64b57de9fba85 | 62e1f8368366bfcded4c1673315a811dc3868084 | refs/heads/master | 2020-04-05T12:12:30.079421 | 2017-12-15T23:39:40 | 2017-12-15T23:39:40 | 81,012,571 | 0 | 1 | null | 2017-09-12T19:43:57 | 2017-02-05T18:28:56 | C++ | UTF-8 | Python | false | false | 2,476 | py | import numpy as np
class Vertex(object):
def __init__(self):
self.nIndices = None # Neighbor indices, in counter-clockwise direction
class Facet(object):
def __init__(self, vIndices):
self.vIndices = vIndices # Indices of vertices in counter-clockwise direction (must have length 3)
def __eq__(self, obj):
"""Returns whether two facets are identical.
Two triangles are considered identical if they contain same indices
and are in the same loop sequence. For example (1, 4, 5) == (4, 5, 1),
but (1, 4, 5) != (1, 5, 4)
"""
# Given that all three indices are different
objIndex = next((x for x in range(3) if obj.vIndices[x] == self.vIndices[0]), None)
if objIndex is None:
return False
return all(self.vIndices[x] == obj.vIndices[(x + objIndex) % 3] for x in range(3))
class Meshwork(object):
def __init__(self):
self.vertices = []
self.facets = []
class MeshworkLoader(object):
def __init__(self, neighborFileName, triangleFileName):
self.neighborFileName = neighborFileName
self.triangleFileName = triangleFileName
def loadTo(self, meshworkObj):
print("Loading topology...")
# Vertices
try:
with open(self.neighborFileName) as f:
for eachLine in f:
neighborInfo = np.fromstring(eachLine, dtype=int, sep='\t')
self._parseNeighborInfo(neighborInfo, meshworkObj)
except IOError as e:
print("Cannot open the file containing neighboring vertices.")
raise
print("Total vertices: %d" % len(meshworkObj.vertices))
# Triangles
try:
with open(self.triangleFileName) as f:
for eachLine in f:
triangleInfo = np.fromstring(eachLine, dtype=int, sep='\t')
self._parseTriangleInfo(triangleInfo, meshworkObj)
except IOError as e:
print("Cannot open the file containing triangles.")
raise
print("Total triangles: %d" % len(meshworkObj.facets))
def _parseNeighborInfo(self, neighborInfo, meshworkObj):
newVertex = Vertex()
newVertex.nIndices = neighborInfo
meshworkObj.vertices.append(newVertex)
def _parseTriangleInfo(self, triangleInfo, meshworkObj):
meshworkObj.facets.append(Facet(triangleInfo))
| [
"drel@sohu.com"
] | drel@sohu.com |
7b0dc52e0ab9e95fc6de99bb478dd63c4efa4c46 | 0ecb596f31a29b45840081e01e92dee6aed80db3 | /dist/encodedpolylineexporter/encoded_polyline_exporter_csv.py | f41378f770767a108f5281724cc88f5a43772506 | [] | no_license | pvinton/QGIS-EncodedPolylineExporter-plugin | 1a1d12a2f472a2fe9b97dadab692dea1f2fbe1d9 | 106f55faad11807ada820dd90246df5be093ba60 | refs/heads/master | 2021-07-12T09:39:17.038001 | 2020-07-26T00:35:46 | 2020-07-26T00:35:46 | 16,705,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,369 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
EncodedPolylineExporterDialog
A QGIS plugin
Export a vector layer to a .csv file in Encoded Polyline format, with optional geometry simplification
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-04-26
git sha : $Format:%H$
copyright : (C) 2018 by Patrick Vinton
email : patrickvinton@hotmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import csv
import os.path
from qgis import core, gui
from qgis.core import QgsMessageLog, QgsWkbTypes
def encodedPolylineExport( selfIface, layername, node_filename, outputFieldPrefix, field_delimiter, line_terminator ):
layer = find_layer(layername)
forwardSlashIndex = node_filename[::-1].find("/")
backSlashIndex = node_filename[::-1].find("\\")
if forwardSlashIndex < 0:
forwardSlashIndex = 9999999
if backSlashIndex < 0:
backSlashIndex = 9999999
if forwardSlashIndex < backSlashIndex:
slashIndex = forwardSlashIndex+1
else:
slashIndex = backSlashIndex+1
if (layer == None) or (layer.type() != core.QgsMapLayer.VectorLayer):
return "Invalid Vector Layer " + layername
attribute_header = []
for index, field in enumerate(layer.dataProvider().fields()):
if (layer.geometryType() == core.QgsPoint):
node_header.append(field.name())
else:
attribute_header.append(field.name())
nodefile = open(node_filename, 'w')
paramsFile = os.path.dirname(__file__) + "/LastOutputFileLocation.txt"
with open(paramsFile, 'w') as f:
f.write(node_filename[0:-slashIndex])
attribute_header.append(outputFieldPrefix + "Boundary")
attribute_header.append(outputFieldPrefix + "CenterLat")
attribute_header.append(outputFieldPrefix + "CenterLng")
node_writer = csv.writer(nodefile, delimiter = field_delimiter, lineterminator = '\n', quoting=csv.QUOTE_NONNUMERIC)
node_writer.writerow(attribute_header)
QgsMessageLog.logMessage("Your plugin code has been executed correctly", 'EPE', level=0)
QgsMessageLog.logMessage("LineString: " + str(QgsWkbTypes.LineString), 'EPE', level=0)
feature_type = ""
feature_count = layer.dataProvider().featureCount()
for feature_index, feature in enumerate(layer.dataProvider().getFeatures()):
QgsMessageLog.logMessage("Feature type is: " + str(feature.geometry().wkbType()), 'EPE', level=0)
if (feature_index % 10) == 0:
message = "Exporting feature " + str(feature_index) + " of " + str(feature_count)
selfIface.statusBarIface().showMessage( message, 1000 )
if (feature.geometry() == None):
QgsMessageLog.logMessage("Feature has no geometry", 'EPE', level=0)
nodefile.close()
del nodefile
return "Cannot export layer with no shape data"
elif (feature.geometry().wkbType() == QgsWkbTypes.LineString) or \
(feature.geometry().wkbType() == QgsWkbTypes.LineString25D):
QgsMessageLog.logMessage("Feature is LineString", 'EPE', level=0)
ring_number = 0
polyline = feature.geometry().asPolyline()
centroidLat = str(feature.geometry().centroid().asPoint().y())
centroidLng = str(feature.geometry().centroid().asPoint().x())
shape_id = str(feature_index)
row = [ ]
for attindex, attribute in enumerate(feature.attributes()):
if type(attribute) == float:
if attribute - round(attribute) == 0:
attribute = int(round(attribute))
row.append(str(attribute).encode("utf-8"))
encodedPolyline = ""
if ring_number > 0:
shape_id = shape_id + ".ring" + str(ring_number)
ring_number = ring_number + 1
plat = 0
plng = 0
for point in polyline:
lat = float(point.y())
lng = float(point.x())
plate5 = round(plat * 100000)
plnge5 = round(plng * 100000)
late5 = round(lat * 100000)
lnge5 = round(lng * 100000)
dlat = late5 - plate5
dlng = lnge5 - plnge5
encodedLat = encodeCoord(dlat)
encodedLng = encodeCoord(dlng)
encodedPolyline += encodedLat
encodedPolyline += encodedLng
plat = lat
plng = lng
encodedPolyline += '<br>'
encodedPolyline = encodedPolyline[0:-4]
row.append(encodedPolyline)
row.append(centroidLat)
row.append(centroidLng)
node_writer.writerow(row)
elif (feature.geometry().wkbType() == QgsWkbTypes.MultiLineString) or \
(feature.geometry().wkbType() == QgsWkbTypes.MultiLineString25D):
QgsMessageLog.logMessage("Feature is MultiLineString", 'EPE', level=0)
polylines = feature.geometry().asMultiPolyline()
centroidLat = str(feature.geometry().centroid().asPoint().y())
centroidLng = str(feature.geometry().centroid().asPoint().x())
encodedPolyline = ""
for polyline_index, polyline in enumerate(polylines):
ring_number = 0
shape_id = str(feature_index) + "." + str(polyline_index)
if ring_number > 0:
shape_id = shape_id + ".ring" + str(ring_number)
ring_number = ring_number + 1
plat = 0
plng = 0
for point in polyline:
lat = float(point.y())
lng = float(point.x())
plate5 = round(plat * 100000)
plnge5 = round(plng * 100000)
late5 = round(lat * 100000)
lnge5 = round(lng * 100000)
dlat = late5 - plate5
dlng = lnge5 - plnge5
encodedLat = encodeCoord(dlat)
encodedLng = encodeCoord(dlng)
encodedPolyline += encodedLat
encodedPolyline += encodedLng
plat = lat
plng = lng
encodedPolyline += '<br>'
row = [ ]
for attindex, attribute in enumerate(feature.attributes()):
if type(attribute) == float:
if attribute - round(attribute) == 0:
attribute = int(round(attribute))
row.append(str(attribute).encode("utf-8"))
encodedPolyline = encodedPolyline[0:-4]
row.append(encodedPolyline)
row.append(centroidLat)
row.append(centroidLng)
node_writer.writerow(row)
elif (feature.geometry().wkbType() == QgsWkbTypes.Polygon) or \
(feature.geometry().wkbType() == QgsWkbTypes.Polygon25D):
QgsMessageLog.logMessage("Feature is Polygon", 'EPE', level=0)
# The first polyline in the polygon is the outer ring
# Subsequent polylines (if any) are inner rings (holes)
ring_number = 0
polygon = feature.geometry().asPolygon()
centroidLat = str(feature.geometry().centroid().asPoint().y())
centroidLng = str(feature.geometry().centroid().asPoint().x())
shape_id = str(feature_index)
row = [ ]
for attindex, attribute in enumerate(feature.attributes()):
if type(attribute) == float:
if attribute - round(attribute) == 0:
attribute = int(round(attribute))
row.append(str(attribute).encode("utf-8"))
encodedPolyline = ""
for polyline in polygon:
if ring_number > 0:
shape_id = shape_id + ".ring" + str(ring_number)
ring_number = ring_number + 1
plat = 0
plng = 0
for point in polyline:
lat = float(point.y())
lng = float(point.x())
plate5 = round(plat * 100000)
plnge5 = round(plng * 100000)
late5 = round(lat * 100000)
lnge5 = round(lng * 100000)
dlat = late5 - plate5
dlng = lnge5 - plnge5
encodedLat = encodeCoord(dlat)
encodedLng = encodeCoord(dlng)
encodedPolyline += encodedLat
encodedPolyline += encodedLng
plat = lat
plng = lng
encodedPolyline += '<br>'
encodedPolyline = encodedPolyline[0:-4]
row.append(encodedPolyline)
row.append(centroidLat)
row.append(centroidLng)
node_writer.writerow(row)
elif (feature.geometry().wkbType() == QgsWkbTypes.MultiPolygon) or \
(feature.geometry().wkbType() == QgsWkbTypes.MultiPolygon25D):
QgsMessageLog.logMessage("Feature is MultiPolygon", 'EPE', level=0)
multipolygon = feature.geometry().asMultiPolygon()
centroidLat = str(feature.geometry().centroid().asPoint().y())
centroidLng = str(feature.geometry().centroid().asPoint().x())
encodedPolyline = ""
for polygon_index, polygon in enumerate(multipolygon):
ring_number = 0
for polyline in polygon:
shape_id = str(feature_index) + "." + str(polygon_index)
if ring_number > 0:
shape_id = shape_id + ".ring" + str(ring_number)
ring_number = ring_number + 1
plat = 0
plng = 0
for point in polyline:
lat = float(point.y())
lng = float(point.x())
plate5 = round(plat * 100000)
plnge5 = round(plng * 100000)
late5 = round(lat * 100000)
lnge5 = round(lng * 100000)
dlat = late5 - plate5
dlng = lnge5 - plnge5
encodedLat = encodeCoord(dlat)
encodedLng = encodeCoord(dlng)
encodedPolyline += encodedLat
encodedPolyline += encodedLng
plat = lat
plng = lng
encodedPolyline += '<br>'
row = [ ]
for attindex, attribute in enumerate(feature.attributes()):
if type(attribute) == float:
if attribute - round(attribute) == 0:
attribute = int(round(attribute))
row.append(attribute)
encodedPolyline = encodedPolyline[0:-4]
row.append(encodedPolyline)
row.append(centroidLat)
row.append(centroidLng)
node_writer.writerow(row)
else:
QgsMessageLog.logMessage("Feature has unsupported geometry", 'EPE', level=0)
# nodefile.close()
# del nodefile
# return "Unsupported geometry"
continue
QgsMessageLog.logMessage("Closing nodefile", 'EPE', level=0)
nodefile.close()
del nodefile
message = str(feature_count) + " records exported"
selfIface.statusBarIface().showMessage( message, 5000 )
# qgis.messageBar().pushMessage(message, 0, 3)
return None
def encodeCoord(x):
encoded_point = ""
x = int(round(x))
x = x<<1
if x<0:
x = ~x
while x >= 32:
z = x&31
z = z|32
z = z+63
z = chr(z)
encoded_point += z
x = x>>5
z = x+63
z = chr(z)
encoded_point += z
return encoded_point
def find_layer(layer_name):
# print "find_layer(" + str(layer_name) + ")"
for name, layer in list(core.QgsProject.instance().mapLayers().items()):
if layer.name() == layer_name:
return layer
return None | [
"ttakahashi@analytics8.com"
] | ttakahashi@analytics8.com |
74c2347b9150e15dbbe69fe6dce4493a8258841f | b424c3262c9eacf8dd4230019eba7e05a9b95461 | /.history/ndn_hello_sender_20200530012537.py | a9866ad3bbc9dcd7f10b5fa74fed00e9084ad214 | [] | no_license | leonerii/aer_tp | 30e47f29bcda69512718a6279a7cad32e9a01b14 | d8f46b188b5be9f315dd155ed147880ce7dce169 | refs/heads/master | 2022-09-30T03:27:24.375971 | 2020-06-04T14:23:16 | 2020-06-04T14:23:16 | 245,219,806 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | from threading import Thread, RLock
from time import sleep
from json import dumps
from uuid import uuid4
import socket
class HelloSender(Thread):
def __init__(self, lock, hello_interval, fib, cs, localhost, mcast_group, mcast_port):
Thread.__init__(self)
self.lock = lock
self.hello_interval = hello_interval
self.localhost = localhost
self.mcast_group = mcast_group
self.mcast_port = mcast_port
self.fib = fib # Forwarding Information Base
self.cs = cs # Content Store
def run(self):
while True:
try:
self.lock.acquire()
self.ndn_hello_sender()
except Exception as e:
print('Failed: {}'.format(e.with_traceback()))
finally:
self.lock.release()
sleep(self.hello_interval)
def ndn_hello_sender(self):
'''
Envia Messagem do tipo "HELLO" com informação em CS e constroi a FIB
'''
try:
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Hello message to be sent
if self.cs:
csdata =
self.msg = {
"type": "HELLO",
#"source": self.localhost
"data": self.cs.keys()
}
for key, value in self.fib.items():
if value['next_hop'] == None:
self.msg[key] = value['timestamp']
client_sock.sendto(dumps(self.msg).encode('utf-8'), (self.mcast_group,self.mcast_port))
except socket.gaierror as socket_error:
print('Sending error: {}'.format(socket_error))
finally:
client_sock.close()
| [
"aseie@Adrianos-MBP.lan"
] | aseie@Adrianos-MBP.lan |
d3de760c391304be61217622b6ce2f11f6e7752b | 2060a33ce9386f3805179fee3823e604d3c621a8 | /test.py | 6629e5c1af114b347b4d9d09c0ac2b64f1d139cd | [
"MIT"
] | permissive | BasilaryGroup/docker-selenium-lambda | 0efeb70352099a567d91442c81466eeccb5063fc | 32ed94b4a2750f3b419b2c64b0b2d2c2d9270c89 | refs/heads/main | 2023-09-05T12:57:13.633022 | 2021-11-10T22:19:11 | 2021-11-10T22:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | from selenium import webdriver
def handler(event=None, context=None):
options = webdriver.ChromeOptions()
options.binary_location = '/opt/chrome/chrome'
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1280x1696")
options.add_argument("--single-process")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-dev-tools")
options.add_argument("--no-zygote")
options.add_argument("--user-data-dir=/tmp/chrome-user-data")
options.add_argument("--remote-debugging-port=9222")
chrome = webdriver.Chrome("/opt/chromedriver",
options=options)
chrome.get("https://example.com/")
return chrome.find_element_by_xpath("//html").text
| [
"umihico@users.noreply.github.com"
] | umihico@users.noreply.github.com |
8b2f4a9a7d045651e33b7d78a5efc9722b082e18 | 0ab3b5dedb8ebef15682d5abb0cb7910c4d9a4b0 | /reactive/streams/base_objects/priority_subscription_pool.py | e620d40992cf965171b028005e66ccd7a5b5f6ad | [
"Apache-2.0"
] | permissive | xyicheng/ReactiveThespian | cd8023df7a07518a376c0e813a73c1fdfcde3861 | db93ea9acf58b0da12bcc78ab267e83f3c3c473b | refs/heads/master | 2021-08-07T20:10:59.735700 | 2017-11-07T21:02:31 | 2017-11-07T21:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,648 | py | '''
Subscriptions in this pool are called on a priority basis, enabling some jobs
to take priority.
Created on Nov 5, 2017
@author: aevans
'''
from reactive.streams.base_objects.subscription_pool import SubscriptionPool
from reactive.error.handler import handle_actor_system_fail
from reactive.message.stream_messages import Pull, Push, Cancel,\
SubscribeWithPriority
from reactive.message.router_messages import DeSubscribe
class SubscriptionPriority:
def __init__(self, subscription, priority):
"""
Constructor
:param subscription: The subsription
:type subscription: Subscription
:param priority: The priority
:type priority: int()
"""
self.subscription = subscription
self.default_priority = priority
self.priority = priority
class PrioritySubscriptionPool(SubscriptionPool):
def __init__(self):
"""
Constructor
"""
super().__init__()
self.__priority_queue = []
self.__waiting_queue = []
def remake_priority_make(self):
"""
Re-create the priority queue
"""
self.__priority_queue = list(
sorted(self.__waiting_queue, key=lambda x: x.priority))
self.__waiting_queue = []
def next(self, msg, sender):
"""
Get the next n elements in the batch.
:param msg: The message to handle
:type msg: Message
:param sender: The sender
:type sender: BaseActor
"""
if msg.sender:
sender = msg.sender
batch_size = msg.payload
batch = []
rq = super().get_result_q()
pull_size = 0
if batch_size > 0:
if rq.empty() is False:
i = 0
while rq.empty() is False and i < batch_size:
try:
pull_size += 1
val = rq.get_nowait()
batch.append(val)
except Exception:
handle_actor_system_fail()
finally:
i += 1
msg = Push(batch, sender, self)
self.send(sender, msg)
subs = self.get_subscriptions()
if pull_size > 0:
if len(self.__priority_queue) == 0:
self.remake_priority_queue()
sub = self.__priority_queue.pop(0)
outsub = sub.subscription
self.__waiting_queue.append(sub)
msg = Pull(pull_size, self)
self.send(outsub, msg)
elif rq.empty() and len(subs) > 0:
pull_size = int(self.get_default_queue_size() / len(subs))
for sub in subs:
outsub = sub.subscription
msg = Pull(pull_size, outsub, self.myAddress)
self.send(outsub, msg)
def handle_push(self, msg, sender):
"""
Handle a push
:param msg: The message
:type msg: Message
:param sender: The sender of the message
:type sender: BaseActor
"""
payload = msg.payload
if isinstance(payload, list):
rq = self.get_result_q()
for result in payload:
if rq.full():
if self.__drop_policy == "pop":
try:
rq.get_nowait()
except:
pass
if rq.full() is False:
rq.put_nowait(result)
def subscribe(self, msg, sender):
"""
Subscribe. If the subscription exists,
reset the default priority.
:param msg: The message to handle
:type msg: Message
:param sender: The sender
:type sender: BaseActor
"""
subscription = msg.payload
found = False
i = 0
sp = None
while not found and i < len(self.get_subscriptions()):
psp = self.get_subscriptions()[i]
if psp.subscription == subscription:
found = True
sp = psp
i += 1
if sp:
sp.priority = msg.default_priority
else:
sp = SubscriptionPriority(subscription, 0)
self.get_subscriptions().append(sp)
self.__waiting_queue.append(sp)
def desubscribe(self, msg, sender):
"""
DeSubscribe
:param msg: The message to handle
:type msg: Message
:param sender: The sender
:type sender: BaseActor
"""
subscription = msg.payload
i = 0
while i < len(self.get_subscriptions()):
sp = self.__subscriptions
if subscription == sp.subscription:
i = len(self.get_subscriptions())
self.get_subscriptions().remove(sp)
def receiveMessage(self, msg, sender):
"""
Handle message on receipt.
:param msg: The message to handle
:type msg: Message
:param sender: The sender
:tpye sender: BaseActor
"""
try:
if isinstance(msg, SubscribeWithPriority):
self.subscribe(msg, sender)
elif isinstance(msg, DeSubscribe):
self.remove_subscription(msg, sender)
elif isinstance(msg, Pull):
self.next(msg, sender)
elif isinstance(msg, Push):
self.handle_push(msg, sender)
elif isinstance(msg, Cancel):
sub = msg.payload
self.cancel(sub)
except Exception:
handle_actor_system_fail()
| [
"asevans48@gmail.com"
] | asevans48@gmail.com |
6027504c6f62aa980d9f96cc77dfda2d0ed618e0 | 75f65baea3857acef1f54a83da82db18eede7fd4 | /maskrcnn_benchmark/data/datasets/kidney.py | 6069248fa87c587d9ba92febaab66783083a8658 | [] | no_license | yijaein/KidneyMRC | d04128f18c928e2be0f8c30fc5af801cd7211509 | ed6a39a319468b0e018d70305712d8b47af56389 | refs/heads/master | 2020-04-22T09:01:19.786768 | 2019-04-11T05:55:27 | 2019-04-11T05:55:27 | 170,257,433 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,202 | py | import os
import cv2
import numpy as np
import torch
import torch.utils.data
from PIL import Image
from imgaug import augmenters as iaa
from imgaug import HooksImages
from maskrcnn_benchmark.data.augmentation.data_augmentation import img_and_mask_augmentation
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, ImageMask
import matplotlib.pyplot as plt
# 이 파일과 같은 새로운 데이터로더 클래스 추가후에는
# ~/lib/robin_mrcnn/maskrcnn_benchmark/data/datasets/__init__.py 파일에 등록을 해야만
# ~/lib/robin_mrcnn/maskrcnn_benchmark/config/paths_catalog.py 에 지정한 Factory로 참조가 가능해짐
class KidneyDataset(torch.utils.data.Dataset):
# 1.aki = 급성 2.ckd = 만성 3.normal = 정상
CLASSES = (
"__background__ ",
"AKI",
"CKD",
"normal"
)
def __init__(self, mask_dir=None, root=None, mask_type=None, transforms=None, is_train=True):
# "mask_type" = "polygon" or "image"
# for debug
print('data loader init args')
init_list = [mask_dir, root, mask_type, transforms, is_train]
print('\t' + '\n\t'.join([str(arg) if arg else 'None' for arg in init_list]))
# end
# norm path
root = norm_path(root)
mask_dir = norm_path(mask_dir)
self.mask_type = 'polygon' # mask_type
self.transforms = transforms
self.image_size = 512
self.is_train = is_train
self.img_key_list = list()
self.img_dict = dict()
self.ann_info = dict()
cls = KidneyDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
# img_dict는 이미지 파일 이름을 키로 하고 파일의 전체경로를 값으로 한다
self.img_dict = image_dict(root, exts=['.png', '.jpg', '.jpeg'], recursive=True, followlinks=True)
# for cls_num, cls_name in enumerate(self.CLASSES[1:], 1):
# cls_mask_path = os.path.join(mask_dir, cls_name)
# assert os.path.exists(cls_mask_path), "Not found class({}) path: {}".format(cls, cls_mask_path)
mask_dict = image_dict(mask_dir)
for mask_key, mask_file in mask_dict.items():
if mask_key not in self.ann_info:
self.ann_info[mask_key] = list()
if mask_key not in self.img_dict:
continue
us_img_path = self.img_dict[mask_key]
us_img_path = os.path.split(us_img_path)[0]
cls_name, acc_no = us_img_path.split('/')[-2:]
cls_num = self.CLASSES.index(cls_name)
self.ann_info[mask_key].append([cls_num, mask_file])
self.img_key_list = list(set(self.img_dict) & set(self.ann_info))
print('found images', len(self.img_dict))
print('found masks', len(self.ann_info))
print('using image&mask', len(self.img_key_list))
self.train_augmentation = iaa.Sequential([
# iaa.PiecewiseAffine(scale=(0.00, 0.05), nb_cols=3, nb_rows=3),
iaa.Affine(rotate=(-20, 20)),
iaa.SomeOf((0, None), [
iaa.Fliplr(0.5),
iaa.Multiply((0.5, 1.5)),
iaa.Add((-10, 10)),
iaa.GaussianBlur(sigma=(0, 1.0))
], random_order=False)
])
self.val_augmentation = iaa.Sequential([], random_order=False)
# for i in range(1000):
# self.__getitem__(i)
# exit()
def __getitem__(self, idx):
filename = self.img_key_list[idx]
img = cv2.imread(self.img_dict[filename], cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.resize_keep_aspect_ratio_with_padding(img)
if self.is_train:
aug_det = self.train_augmentation.to_deterministic()
else:
aug_det = self.val_augmentation.to_deterministic()
img = aug_det.augment_image(img)
# aug_det = None
img = Image.fromarray(img, mode="RGB")
width, height = img.size
target = self.get_groundtruth(filename, width, height, aug_det)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
# mask = target.get_field('masks').masks[0].masks[0].numpy()
# plt.imshow(mask)
# plt.show()
# exit()
return img, target, idx
def __len__(self):
return len(self.img_key_list)
def get_groundtruth(self, filename, width, height, aug_det):
anno = self._preprocess_annotation(self.ann_info[filename], aug_det)
target = BoxList(anno["boxes"], (width, height), mode="xywh").convert("xyxy")
target.add_field("labels", anno["labels"])
masks = SegmentationMask(anno["masks"], (width, height), type=self.mask_type)
target.add_field("masks", masks)
return target
def _preprocess_annotation(self, target, aug_det):
boxes = []
masks = []
gt_classes = []
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad", "Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
for ann_info in target:
# x, y, w, h, cls_num, mask_file = ann_info
cls_num, mask_file = ann_info
mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
mask = self.resize_keep_aspect_ratio_with_padding(mask)
# plt.imshow(mask, cmap='gray')
# plt.show()
# mask = aug_det.augment_image(mask)
if aug_det:
mask = aug_det.augment_image(mask.astype(np.uint8), hooks=HooksImages(activator=hook))
# plt.imshow(mask, cmap='gray')
# plt.show()
x, y, w, h, mask_points= self.find_bounding_square(mask)
bbox = [x, y, w, h]
boxes.append(bbox)
# masks.append([mask])
masks.append(mask_points)
gt_classes.append(cls_num)
res = {
"boxes": torch.tensor(boxes, dtype=torch.float32),
"masks": masks,
"labels": torch.tensor(gt_classes),
}
return res
def find_bounding_square(self, mask):
mask = mask.astype(np.uint8)
_, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(contours[0])
mask_points = []
for cont in contours:
cont = np.array(cont)
cont = cont.reshape((-1))
# print(cont.shape)
mask_points.append(cont.tolist())
# print(x, y, w, h, len(contours), np.min(mask), np.max(mask))
return x, y, w, h, mask_points
def resize_keep_aspect_ratio_with_padding(self, im):
size = self.image_size
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = size - new_size[1]
delta_h = size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return new_im
# def get_image_polygons(self, mask):
# _, contours, hierarchy = cv2.findContours(
# mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
# )
# return 0
#
# def get_img_info(self, index):
# return {"height": 512, "width": 512}
#
# def map_class_id_to_class_name(self, class_id):
# return KidneyDataset.CLASSES[class_id]
# util functions
def norm_path(path, makedirs=False):
path = os.path.normcase(path)
path = os.path.normpath(path)
path = os.path.expanduser(path)
path = os.path.abspath(path)
if makedirs and not os.path.exists(path):
os.makedirs(path)
print('makedirs:, path')
return path
def image_list(path, exts=['.png', '.jpg'], recursive=True, followlinks=True):
path = norm_path(path)
l = list()
if recursive:
for (root, dirs, files) in os.walk(path, followlinks=followlinks):
for file in files:
name, ext = os.path.splitext(file)
if ext.lower() not in exts:
continue
l.append(os.path.join(root, file))
else:
for fileDir in os.listdir(path):
if os.path.isfile(os.path.join(path, fileDir)):
file = fileDir
else:
continue
name, ext = os.path.splitext(file)
if ext.lower() not in exts:
continue
l.append(os.path.join(path, file))
return l
def image_dict(path, exts=['.png', '.jpg'], recursive=True, key=None, followlinks=True):
path = norm_path(path)
if key == None:
key = lambda p: os.path.splitext(os.path.split(p)[-1])[0]
d = dict()
if recursive:
for (root, dirs, files) in os.walk(path, followlinks=followlinks):
for file in files:
name, ext = os.path.splitext(file)
if ext.lower() not in exts:
continue
full_path = os.path.join(root, file)
d[key(full_path)] = full_path
else:
for fileDir in os.listdir(path):
if os.path.isfile(os.path.join(path, fileDir)):
file = fileDir
else:
continue
name, ext = os.path.splitext(file)
if ext.lower() not in exts:
continue
full_path = os.path.join(path, file)
d[key(full_path)] = full_path
return d
| [
"jiyi.nexys@gmail.com"
] | jiyi.nexys@gmail.com |
978ff86fae607a0d5114a3d0192a240b45694f16 | b1b06ae1dfbad7178445cb0dd93d294e98be767b | /eigen_printers.py | d2ff8c387c2b06ac4e8d141b174acc79735033f8 | [] | no_license | nspo/drake_gdb | f7f0ae37bfdf7909708b350e4fa1feeadc14c474 | f762e51371aa1b6921462b2075425bbac4bd720b | refs/heads/master | 2020-05-05T03:23:22.713155 | 2019-04-05T11:41:54 | 2019-04-05T11:41:54 | 179,670,350 | 2 | 0 | null | 2019-04-05T11:36:17 | 2019-04-05T11:36:17 | null | UTF-8 | Python | false | false | 15,936 | py | # -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
import gdb
import re
import itertools
class EigenAutoDiffScalarPrinter:
def __init__(self, val, for_clion):
# TODO: extract enough arguments for the autodiff scalar that it's declaration is clear.
self.val = val
self.for_clion = for_clion
self.scalarValue = float(self.val['m_value'])
class _iterator:
VALUE = 0
DERIVS = 1
MAX_FIELDS = 2
def __init__(self, value, derivs):
self.value = value
self.derivs = derivs
self.stage = self.VALUE
def __next__(self):
result = None
if (self.stage == self.MAX_FIELDS):
raise StopIteration
elif (self.stage == self.VALUE):
result = ('value', self.value)
elif (self.stage == self.DERIVS):
result = ("derivatives", self.derivs)
self.stage += 1
return result
def next(self):
return self.__next__()
def __iter__(self):
return self
def children(self):
return self._iterator(self.scalarValue, self.val['m_derivatives'])
def to_string(self):
# TODO: Confirm that this is actually double -- although it's a safe assumption
# TODO: Report the *size* of the derivatives vector.
return "AutoDiffScalar<double>: {0:<14g}".format(self.scalarValue)
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val, for_clion):
"Extract all the necessary information"
self.for_clion = for_clion
if (for_clion):
self.children = lambda: self._iterator(self.rows, self.cols, self.data, self.rowMajor)
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
template_params = self.get_template_parameters(val)
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = int(val['m_storage']['m_rows'])
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = int(val['m_storage']['m_cols'])
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def get_template_parameters(self, val):
'''Handles the special case where the template parameters have nested template parameters.
e.g., Eigen::Matrix<Eigen::AutoDifScalar<...>, 4, 4, 0, 4, 4>'''
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
parm_list_re = re.compile('<.*\>')
parm_str = parm_list_re.findall(tag)[0][1:-1]
# this *should* handle nested template types for the first parameter (the scalar value).
param_re = re.compile('(?:[^<>]+<.*>\s*,)|(?:[^<>]+?(?:,|$))')
template_params = []
m = param_re.search(parm_str)
while (m):
template_params.append(parm_str[m.pos:m.end()].strip(' ,'))
m = param_re.search(parm_str, m.end())
return template_params
def matString( self ):
'''Produces a tab-indented, RXC printout of the matrix data.'''
mat = ''
ptr = self.data
getFloat = float
if (ptr.dereference().type.code != gdb.TYPE_CODE_FLT):
# assume autodiff
auto_diff_val = ptr.dereference()
getFloat = lambda x: float(x['m_value'])
rows = [ [] for r in range(self.rows) ]
widths = [0 for c in range(self.cols) ]
if (self.rowMajor == 0 ):
for c in range(self.cols):
for r in range(self.rows):
s = '{:.14g}'.format(getFloat(ptr.dereference()))
widths[c] = max(widths[c], len(s))
rows[r].append(s)
ptr += 1
else:
for r in range(self.rows):
for c in range(self.cols):
s = '{:.14g}'.format(getFloat(ptr.dereference()))
widths[c] = max(widths[c], len(s))
rows[r].append(s)
ptr += 1
# compute column widths independently
return '\n'.join(map(lambda row: '\t' + ''.join(map(lambda c: '{0:{1}}'.format(row[c], widths[c] + 1), range(len(row)))), rows))
def get_major_label(self):
'''Maps the row major boolean to a string for display'''
if self.rowMajor:
return "RowMajor"
else:
return "ColMajor"
def get_prefix(self):
'''Defines the display prefix -- can be overridden by derived classes'''
return 'Eigen::%s<%s, %d, %d, %s>' % (self.variety, self.innerType, self.rows, self.cols, self.get_major_label())
def to_string(self):
'''Produces the string representation -- prefix, pointer, and matrix string representation.'''
return self.get_prefix() + " (data ptr: %s)\n%s" % (self.data, self.matString())
class EigenTransformPrinter(EigenMatrixPrinter):
def __init__(self, val, for_clion):
EigenMatrixPrinter.__init__(self, "Transform", val["m_matrix"], for_clion)
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
tag = type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
self.mode = int(template_params[2])
def get_mode_string(self):
if (self.mode == 0):
return "Affine"
elif (self.mode == 1):
return "AffineCompact"
else:
return "Projective"
def get_prefix(self):
return 'Eigen::Transform<%s, %d, %s, %s>' % (self.innerType, self.rows - 1, self.get_mode_string(), self.get_major_label())
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
# The quaternion is four scalar values: this is the interpretation of the *order* of those values.
elementNames = ['x', 'y', 'z', 'w']
def __init__(self, val, for_clion):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
if (for_clion):
self.children = lambda: self._iterator(self.data)
# I expect this will fail with AutoDiff
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (EigenQuaternionPrinter.elementNames[element],), item)
def quat_string(self):
'''Produces a quaternion string of the form "value, <value, value, value>'''
to_float = float
ptr = self.data
if (ptr.dereference().type.code != gdb.TYPE_CODE_FLT):
# assume autodiff
to_float = lambda x: float(x['m_value'])
def getNextFloat(pointer):
val = to_float(pointer.dereference())
pointer += 1
return val
values = [ getNextFloat(ptr) for x in range(4) ]
q_values = dict(zip(self.elementNames, values))
return '{w:.14g}, <{x:.14g}, {y:.14g}, {z:.14g}>'.format(**q_values)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)\n\t%s" % (self.innerType, self.data, self.quat_string())
def register_printers(for_clion):
"Register eigen pretty-printers with objfile Obj"
global pretty_printers_dict
pretty_printers_dict[re.compile('^Eigen::AutoDiffScalar<.*>$')] = lambda val: EigenAutoDiffScalarPrinter(val, for_clion)
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val, for_clion)
pretty_printers_dict[re.compile('^Eigen::Transform<.*>$')] = lambda val: EigenTransformPrinter(val, for_clion)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val, for_clion)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val, for_clion)
gdb.pretty_printers.append(lambda val: lookup_function(val, for_clion))
def lookup_function(val, for_clion):
"Look-up and return a pretty-printer that can print val."
type = val.type
orig = type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
| [
"sean.curtis@tri.global"
] | sean.curtis@tri.global |
cfd9d072ccc62a10c789c10715c8b0e675bb55b1 | afafa418e1ba7d1c06249993ca0b33db7ab3ab2e | /movo_common/si_utils/scripts/gripper_action_test.py | fa70d0c88db58fe967834d35095193b50b658130 | [
"BSD-3-Clause"
] | permissive | Kinovarobotics/kinova-movo | 7c493c7d53ea902f19c24544718457269706bf61 | 9f515fa476f6c761829a4b5b19e769f869e1cfce | refs/heads/master | 2023-07-20T20:11:34.621763 | 2023-07-14T19:54:37 | 2023-07-14T19:54:37 | 114,134,532 | 44 | 39 | BSD-3-Clause | 2023-02-22T19:09:06 | 2017-12-13T15:02:35 | Python | UTF-8 | Python | false | false | 4,133 | py | #!/usr/bin/env python
"""--------------------------------------------------------------------
Copyright (c) 2017, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file gripper_action_test.py
\brief ROS Driver for interfacing with the Kinova Jaco integrated
with the Stanley Innovation Vector platform
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
import sys
from copy import copy
import rospy
import actionlib
import math
import random
from control_msgs.msg import (
GripperCommandAction,
GripperCommandGoal,
)
from sensor_msgs.msg import JointState
class GripperActionTest(object):
def __init__(self,prefix="right"):
self._prefix = prefix
self._client = actionlib.SimpleActionClient(
'/movo/%s_gripper_controller/gripper_cmd'%self._prefix,
GripperCommandAction,
)
self._goal = GripperCommandGoal()
server_up = self._client.wait_for_server(timeout=rospy.Duration(10.0))
if not server_up:
rospy.logerr("Timed out waiting for Gripper Command"
" Action Server to connect. Start the action server"
" before running example.")
rospy.signal_shutdown("Timed out waiting for Action Server")
sys.exit(1)
self.clear()
def command(self, position, block=False, timeout=15.0):
self._goal.command.position = position
self._goal.command.max_effort = -1.0
self._client.send_goal(self._goal)
if block:
self._client.wait_for_result(timeout=rospy.Duration(timeout))
def stop(self):
self._client.cancel_goal()
def wait(self, timeout=15.0):
self._client.wait_for_result(timeout=rospy.Duration(timeout))
def result(self):
return self._client.get_result()
def clear(self):
self._goal = GripperCommandGoal()
def main():
rospy.init_node('gripper_action_test')
rg_test = GripperActionTest("right")
lg_test = GripperActionTest("left")
lg_test.command(0.0)
rg_test.command(0.0)
lg_test.wait()
rg_test.wait()
lg_test.command(0.085)
rg_test.command(0.085)
lg_test.wait()
rg_test.wait()
lg_test.command(0.165)
rg_test.command(0.165)
lg_test.wait()
rg_test.wait()
lg_test.command(0.0)
rg_test.command(0.0)
lg_test.wait()
rg_test.wait()
print("Gripper Action Test Example Complete")
if __name__ == "__main__":
main()
| [
"sparadis@kinova.ca"
] | sparadis@kinova.ca |
7666cc4322342c7631b027db798897a2881c9e5e | 9f764af298f3ee07dcea1977e93fb2e15b5c88ea | /ml-unsupervised/functions.py | e69da6a1c33658f98af71070aa176387d80de3e6 | [] | no_license | nvg/ml_sandbox | de691596398476cdd4a4fbb6b3e385791c2a6ed7 | 6b1cf398678e85fd66b3d9c5a8d3e48058ae0d73 | refs/heads/master | 2022-09-20T17:41:56.627003 | 2022-09-04T20:09:46 | 2022-09-04T20:09:46 | 204,779,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,220 | py | import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import dendrogram
def display_circles(pcs, n_comp, pca, axis_ranks, labels=None, label_rotation=0, lims=None):
for d1, d2 in axis_ranks:
if d2 < n_comp:
fig, ax = plt.subplots(figsize=(7,6))
if lims is not None :
xmin, xmax, ymin, ymax = lims
elif pcs.shape[1] < 30 :
xmin, xmax, ymin, ymax = -1, 1, -1, 1
else :
xmin, xmax, ymin, ymax = min(pcs[d1,:]), max(pcs[d1,:]), min(pcs[d2,:]), max(pcs[d2,:])
if pcs.shape[1] < 30 :
plt.quiver(np.zeros(pcs.shape[1]), np.zeros(pcs.shape[1]),
pcs[d1,:], pcs[d2,:],
angles='xy', scale_units='xy', scale=1, color="grey")
else:
lines = [[[0,0],[x,y]] for x,y in pcs[[d1,d2]].T]
ax.add_collection(LineCollection(lines, axes=ax, alpha=.1, color='black'))
if labels is not None:
for i,(x, y) in enumerate(pcs[[d1,d2]].T):
if x >= xmin and x <= xmax and y >= ymin and y <= ymax :
plt.text(x, y, labels[i], fontsize='14', ha='center', va='center', rotation=label_rotation, color="blue", alpha=0.5)
circle = plt.Circle((0,0), 1, facecolor='none', edgecolor='b')
plt.gca().add_artist(circle)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.plot([-1, 1], [0, 0], color='grey', ls='--')
plt.plot([0, 0], [-1, 1], color='grey', ls='--')
plt.xlabel('F{} ({}%)'.format(d1+1, round(100*pca.explained_variance_ratio_[d1],1)))
plt.ylabel('F{} ({}%)'.format(d2+1, round(100*pca.explained_variance_ratio_[d2],1)))
plt.title("Correlation circle (F{} and F{})".format(d1+1, d2+1))
plt.show(block=False)
def display_factorial_planes(X_projected, n_comp, pca, axis_ranks, labels=None, alpha=1, illustrative_var=None):
for d1,d2 in axis_ranks:
if d2 < n_comp:
fig = plt.figure(figsize=(7,6))
if illustrative_var is None:
plt.scatter(X_projected[:, d1], X_projected[:, d2], alpha=alpha)
else:
illustrative_var = np.array(illustrative_var)
for value in np.unique(illustrative_var):
selected = np.where(illustrative_var == value)
plt.scatter(X_projected[selected, d1], X_projected[selected, d2], alpha=alpha, label=value)
plt.legend()
if labels is not None:
for i,(x,y) in enumerate(X_projected[:,[d1,d2]]):
plt.text(x, y, labels[i],
fontsize='14', ha='center',va='center')
boundary = np.max(np.abs(X_projected[:, [d1,d2]])) * 1.1
plt.xlim([-boundary,boundary])
plt.ylim([-boundary,boundary])
plt.plot([-100, 100], [0, 0], color='grey', ls='--')
plt.plot([0, 0], [-100, 100], color='grey', ls='--')
# nom des axes, avec le pourcentage d'inertie expliqué
plt.xlabel('F{} ({}%)'.format(d1+1, round(100*pca.explained_variance_ratio_[d1],1)))
plt.ylabel('F{} ({}%)'.format(d2+1, round(100*pca.explained_variance_ratio_[d2],1)))
plt.title("Individual projections (on F{} and F{})".format(d1+1, d2+1))
plt.show(block=False)
def display_scree_plot(pca):
scree = pca.explained_variance_ratio_*100
plt.bar(np.arange(len(scree))+1, scree)
plt.plot(np.arange(len(scree))+1, scree.cumsum(),c="red",marker='o')
plt.xlabel("rank of the inertia axis")
plt.ylabel("percentage of inertia")
plt.title("Scree of eigenvalues")
plt.show(block=False)
def plot_dendrogram(Z, names):
plt.figure(figsize=(10,25))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('distance')
dendrogram(
Z,
labels = names,
orientation = "left",
)
plt.show() | [
"nick.goupinets@gmail.com"
] | nick.goupinets@gmail.com |
59a6d91e7d4d02f0fe7af2523e694328ca0317f0 | c4353343c9fd8a1b904e64d96dd85ec09ff153fe | /dongu.py | 61eda8b6137b8cc54f154e708f73af850936dbb8 | [] | no_license | kadertarlan/python_py | 77aee3111c758aa9f73184bb1c81e37e6af6e94c | 178b45bbdc877ed955da45046e6a67f03caf2207 | refs/heads/master | 2021-01-20T05:08:20.746598 | 2014-02-16T18:57:47 | 2014-02-16T18:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
a=0;
while a<3:
a=a+1
print a
print "\n"
for i in range(1,10):
print i
for i in "kelime":
print i
print "\n"
for i in range(0,11,2):
print i
print "\n"
print range(10,15)
print range(0,20,3)
a=" kader tarlan "
print len(a)
a=1234567
b=str(a)
c=len(b)
print c
a="kader"
print 'k' in a
kul_ad="kader"
kul_soyad="tarlan"
while True:
ad=raw_input(" kullanıcı adı:")
soyad=raw_input( "kullanıcı soyad:")
if ad==" " and soyad==" ":
continue
if ad==kul_ad and soyad==kul_soyad:
print " programa hosgeldınız."
break
else:
print " yanlıs ad soyad"
break
cevap=raw_input(" sistemden cıkmak mı istiyorsunuz(e/E)")
if 'e' in cevap or 'E' in cevap:
print "gule gule"
| [
"sinemusul42@gmail.com"
] | sinemusul42@gmail.com |
d6101a03676385d1cab0549536ac13e065be7204 | 40043e5a5daf7817cbac766dfaede265a8b9a29c | /setup.py | d65b78460c756943bd495ead90e175877bb9f82c | [] | no_license | juniuszhou/substrate-python-api | 166246266aa9f96954125cbb600caf854774a6da | 98d538aa3e13f57f02758656ffa7977463977e5a | refs/heads/master | 2022-12-16T07:13:11.767383 | 2020-09-17T14:07:37 | 2020-09-17T14:07:37 | 197,921,346 | 6 | 3 | null | 2020-05-25T01:26:51 | 2019-07-20T11:29:55 | Python | UTF-8 | Python | false | false | 748 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#############################################
# File Name: setup.py
# Author: junius
# Mail: junius.zhou@gmail.com
# Created Time: 2019-07-20 19:17:34
#############################################
from setuptools import setup, find_packages
setup(
name="substrate-python-api",
version="0.0.2",
keywords=("pip", "substrate", "api"),
description="python api for substrate",
long_description="python api for substrate",
license="MIT Licence",
url="https://github.com/juniuszhou/substrate-pyton-api",
author="junius",
author_email="junius.zhou@gmail.com",
packages=find_packages(),
include_package_data=True,
platforms="any",
install_requires=[]
)
| [
"junius.zhou@gmail.com"
] | junius.zhou@gmail.com |
66293c35631b9c820a51f20977c34a270203973b | 85b6f7782108bede2838c95adc89067e0ead70c7 | /PythonAssignment5/dividebyzero.py | bd096464b2bc3a4182a160f0f5d585bdb5faa151 | [] | no_license | RaunakJalan/iNeuronDLCVNLP | 2fa005df34a712e078a8736578ad2808cd28826c | 58e440f906530c8834df0a030c155fa480a6400a | refs/heads/main | 2023-02-09T19:32:22.901752 | 2021-01-09T20:13:58 | 2021-01-09T20:13:58 | 314,672,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | a=5
b=0
test=0
try:
test = a/b
print(test)
except Exception as e:
print(e)
finally:
print("Done Execution.")
| [
"ronakjalan98@gmail.com"
] | ronakjalan98@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.