hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c944fb70978ce339d17a2b5185083d51125ce9b | 1,442 | py | Python | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | src/entities/user.py | clayz/crazy-quiz-web | 7601809ad521d95ae251a026f171b9ec6939c55f | [
"Apache-2.0"
] | null | null | null | from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from entities import BaseEntity
from constants import Gender, UserStatus, Device, APIStatus
from errors import DataError
class User(BaseEntity):
name = ndb.StringProperty()
mail = ndb.StringProperty()
gender = msgprop.EnumProperty(Gender)
birthday = ndb.DateProperty()
avatar = ndb.BlobProperty(compressed=True)
status = msgprop.EnumProperty(UserStatus, required=True, default=UserStatus.INACTIVE)
device = msgprop.EnumProperty(Device, required=True)
continue_got_count = ndb.IntegerProperty(required=True, default=0) # daily bonus
last_got_datetime = ndb.DateTimeProperty() # daily bonus
push_token = ndb.StringProperty()
update_date = ndb.DateTimeProperty(required=True, auto_now=True)
@classmethod
def get(cls, uuid):
user = cls.get_by_id(uuid)
if user:
return user
else:
raise DataError(APIStatus.DATA_NOT_FOUND, 'User not found, uuid: %s' % uuid)
class Currency(BaseEntity):
gem = ndb.IntegerProperty(required=True, default=0)
coin = ndb.IntegerProperty(required=True, default=0)
total_spend = ndb.IntegerProperty(required=True, default=0)
update_date = ndb.DateTimeProperty(required=True, auto_now=True)
class StartupHistory(BaseEntity):
version = ndb.StringProperty(required=True)
ip = ndb.StringProperty(required=True)
| 36.05 | 89 | 0.73301 | 170 | 1,442 | 6.135294 | 0.417647 | 0.115053 | 0.091083 | 0.115053 | 0.245446 | 0.245446 | 0.099712 | 0.099712 | 0.099712 | 0 | 0 | 0.003353 | 0.172677 | 1,442 | 39 | 90 | 36.974359 | 0.870914 | 0.01595 | 0 | 0.0625 | 0 | 0 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.15625 | 0 | 0.84375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9c983e06b9771f99c2823cbf81e31f60d912dcdd | 3,327 | py | Python | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 10 | 2019-11-08T12:10:19.000Z | 2019-11-16T09:59:45.000Z | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 6 | 2019-11-09T09:16:46.000Z | 2022-02-10T11:30:55.000Z | paytm/models.py | Faisal-Manzer/django-paytm-checkout | bc796fca160f2664d85d0a09acf558532cc74442 | [
"MIT"
] | 1 | 2020-06-02T15:56:56.000Z | 2020-06-02T15:56:56.000Z | __all__ = ['Order', 'Item']
from django.db import models
from django.contrib.auth import get_user_model
from paytm import conf as paytm_conf
from paytm.helpers import sha256
class Item(models.Model):
price = models.FloatField()
name = models.CharField(max_length=255)
tag = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Order(models.Model):
class Channel:
WEB = paytm_conf.CHANNEL_WEBSITE
APP = paytm_conf.CHANNEL_MOBILE_APP
choices = (
(WEB, 'Web'),
(APP, 'App')
)
class Status:
SUCCESS = 'S'
FAILURE = 'F'
UNKNOWN = 'U'
PENDING = 'P'
FRAUD = 'E'
choices = (
(SUCCESS, 'Success'),
(FAILURE, 'Failure'),
(PENDING, 'Pending'),
(UNKNOWN, 'Unknown'),
(FRAUD, 'Fraud')
)
user = models.ForeignKey(get_user_model(), null=True, blank=True, on_delete=models.DO_NOTHING)
# ------------------------------------ Pay load sent to paytm --------------------------------
# ORDER_ID* String(50)
order_id = models.CharField(max_length=50)
# CUST_ID* String(64)
customer_id = models.CharField(max_length=64)
# TXN_AMOUNT* String(10)
amount = models.FloatField()
real_amount = models.FloatField() # amount aimed to capture
# CHANNEL_ID* String(3)
channel = models.CharField(max_length=3, choices=Channel.choices, default=Channel.WEB)
# MOBILE_NO String(15)
mobile = models.CharField(max_length=15, null=True, blank=True)
# EMAIL String(50
email = models.EmailField(null=True, blank=True)
# MERC_UNQ_REF String(50)
notes = models.CharField(max_length=50, null=True, blank=True)
# ---------------------------------- Response sent by paytm ---------------------------------
# TXNID* String(64)
txn_id = models.CharField(max_length=64, null=True, blank=True)
# BANKTXNID* String
bank_txn_id = models.TextField(null=True, blank=True)
# STATUS* String(20)
status = models.CharField(max_length=20, choices=Status.choices, default=Status.UNKNOWN)
# CURRENCY* String(3)
currency = models.CharField(max_length=3, null=True, blank=True)
# RESPCODE* String(10)
resp_code = models.CharField(max_length=10, null=True, blank=True)
# RESPMSG* String
resp_message = models.TextField(null=True, blank=True)
# TXNDATE* DateTime
transaction_date = models.DateTimeField(null=True, blank=True)
# GATEWAYNAME String(15)
gateway = models.CharField(max_length=15, null=True, blank=True)
# BANKNAME* String
bank = models.TextField(null=True, blank=True)
# PAYMENTMODE* String(15)
mode = models.CharField(max_length=15, null=True, blank=True)
initiated = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
is_new_order = False
if self.id is None:
is_new_order = True
self.order_id = ''
super(Order, self).save(*args, **kwargs)
if is_new_order:
self.order_id = sha256(str(self.id).rjust(50, '0'))[:50]
self.save()
def __str__(self):
return f'{self.order_id}'
| 28.681034 | 98 | 0.612564 | 405 | 3,327 | 4.871605 | 0.298765 | 0.056766 | 0.092245 | 0.120628 | 0.222504 | 0.142423 | 0.065383 | 0.065383 | 0.065383 | 0 | 0 | 0.023875 | 0.232041 | 3,327 | 115 | 99 | 28.930435 | 0.748337 | 0.165014 | 0 | 0.0625 | 0 | 0 | 0.025027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.0625 | 0.03125 | 0.578125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9c9dc2b48466cf77d6827d16eafaafb0b544703d | 1,046 | py | Python | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 2 | 2018-11-05T17:12:40.000Z | 2019-09-05T17:10:01.000Z | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | null | null | null | tests/algorithms/sets/test_cartesian.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 1 | 2019-01-02T19:07:51.000Z | 2019-01-02T19:07:51.000Z | #!/usr/bin/env python
import unittest
from algorithms.sets.cartesian_product import cartesian
class TestFatorial(unittest.TestCase):
def setUp(self):
self.set_a = [1, 2]
self.set_b = [4, 5]
def test_cartesian_product(self):
self.assertEqual(cartesian.product(self.set_a, self.set_b), [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_by_list_comprehension(self):
self.assertEqual(cartesian.list_comprehension(self.set_a, self.set_b), [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_recursive_two_sets(self):
result = [i for i in cartesian.product_n(self.set_a, self.set_b)]
self.assertEqual(result, [[1, 4], [1, 5], [2, 4], [2, 5]])
def test_cartesian_product_recursive_three_sets(self):
result = [i for i in cartesian.product_n(self.set_a, self.set_b, self.set_a)]
self.assertEqual(result, [[1, 4, 1], [1, 4, 2], [1, 5, 1], [1, 5, 2], [2, 4, 1], [2, 4, 2], [2, 5, 1], [2, 5, 2]])
if __name__ == '__main__':
unittest.main()
| 34.866667 | 122 | 0.630975 | 170 | 1,046 | 3.647059 | 0.235294 | 0.124194 | 0.077419 | 0.096774 | 0.522581 | 0.483871 | 0.417742 | 0.417742 | 0.417742 | 0.417742 | 0 | 0.061611 | 0.193117 | 1,046 | 29 | 123 | 36.068966 | 0.672986 | 0.01912 | 0 | 0 | 0 | 0 | 0.007805 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.277778 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9c9e9f62b13ea8939d24495e7e75ae788410b53b | 5,520 | py | Python | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | 1 | 2020-03-31T17:17:40.000Z | 2020-03-31T17:17:40.000Z | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | 2 | 2020-03-31T17:18:38.000Z | 2020-03-31T17:21:13.000Z | tests/test_models.py | thmslmr/timebomb-client | a57fdbb8bfc0157d2c3d713496ab4819fb33f1fd | [
"MIT"
] | null | null | null | from datetime import datetime
import timebomb.models as models
def test_Notification():
notif = models.Notification("message")
assert notif.content == "message"
assert notif.read is False
assert str(notif) == "message"
def test_Player():
player = models.Player("name", "id")
assert player.name == "name"
assert player.id == "id"
assert player.team is None
assert player.hand is None
player = models.Player("name", "id", "team", ("A", "B"), "roomid")
assert player.name == "name"
assert player.id == "id"
assert player.team == "team"
assert player.hand == ("A", "B")
assert player.roomId == "roomid"
def test_Message():
now = datetime.now()
message = models.Message("player", "message")
assert message.player_name == "player"
assert message.content == "message"
assert message.timestamp and isinstance(message.timestamp, datetime)
assert str(message) == f"[{now:%H:%M}] player: message"
def test_Room():
player = models.Player("player", "player_id")
room = models.Room("room", "room_id", (player,))
assert room.name == "room" and room.id == "room_id"
assert len(room.players) == 1 and room.players[0] is player
assert room.cutter is None and room.winning_team is None and room.status == ""
assert isinstance(room.cards_found, dict) and isinstance(room.cards_left, dict)
assert not room.cards_found and not room.cards_left
def test_GameState():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
def test_GameState_new_message():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
message = state.new_message({"player": "player", "message": "test_message"})
assert len(state.messages) == 1 and state.messages[0] is message
assert message.player_name == "player" and message.content == "test_message"
for i in range(99):
last = state.new_message(
{"player": f"player{i}", "message": f"test_message{i}"}
)
assert len(state.messages) == 100
assert state.messages[0] is message and state.messages[99] is last
assert last.player_name == "player98" and last.content == "test_message98"
last = state.new_message({"player": "player99", "message": "test_message99"})
assert len(state.messages) == 100
assert state.messages[0] is not message and state.messages[99] is last
assert (
state.messages[0].player_name == "player0"
and state.messages[0].content == "test_message0"
)
assert last.player_name == "player99" and last.content == "test_message99"
res = state.new_message({"message": "test_message100"})
assert res is None
assert state.messages[99] is last
def test_GameState_new_notification():
state = models.GameState()
assert state.notification is None
notif1 = state.new_notification({"message": "notif1"})
assert state.notification is notif1 and notif1.content == "notif1"
notif2 = state.new_notification({"message": "notif2"})
assert state.notification is notif2 and notif2.content == "notif2"
notif3 = state.new_notification({"unknown": "notif2"})
assert notif3 is None and state.notification is notif2
def test_GameState_update_room():
state = models.GameState()
assert state.room is None
players_data = [{"name": "player1", "id": "id1"}]
room_data = {"name": "roomname", "id": "roomid", "players": players_data}
room = state.update_room(room_data)
assert state.room is room and room.name == "roomname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
new_data = {"name": "newname", "cutter": {"name": "cutter", "id": "cutterid"}}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
assert (
isinstance(room.cutter, models.Player)
and room.cutter.id == "cutterid"
and room.cutter.name == "cutter"
)
new_data = {
"players": [{"name": "player1", "id": "id1"}, {"name": "player2", "id": "id2"}]
}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 2
def test_GameState_update_me():
state = models.GameState()
assert state.me is None
player = state.update_me({"name": "player1", "id": "id1"})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand is None
player = state.update_me({"hand": ("A", "A", "B", "A")})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand == ("A", "A", "B", "A")
def test_GameState_reset():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
state.messages = ["m1", "m2"]
state.room = "Room"
state.me = "Me"
state.notification = "Notification"
state.reset()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
| 32.470588 | 87 | 0.661051 | 740 | 5,520 | 4.85 | 0.113514 | 0.072444 | 0.022569 | 0.033157 | 0.46949 | 0.395653 | 0.346057 | 0.346057 | 0.325439 | 0.315408 | 0 | 0.017603 | 0.197283 | 5,520 | 169 | 88 | 32.662722 | 0.792372 | 0 | 0 | 0.284483 | 0 | 0 | 0.123551 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.086207 | false | 0 | 0.017241 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ca437236746459609c5690cd68a04412ca8726f | 2,034 | py | Python | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | python/tests/test_nessie_cli.py | ryantse/nessie | c347d61ec7358fdc5759147cc6207143927246f3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pynessie` package."""
import pytest
import requests_mock
import simplejson as json
from click.testing import CliRunner
from pynessie import __version__
from pynessie import cli
from pynessie.model import ReferenceSchema
def test_command_line_interface(requests_mock: requests_mock) -> None:
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
assert "Usage: cli" in result.output
help_result = runner.invoke(cli.cli, ["--help"])
assert help_result.exit_code == 0
assert "Usage: cli" in help_result.output
help_result = runner.invoke(cli.cli, ["--version"])
assert help_result.exit_code == 0
assert __version__ in help_result.output
requests_mock.get(
"http://localhost:19120/api/v1/trees",
text=json.dumps([{"name": "main", "type": "BRANCH", "hash": "1234567890abcdef"}]),
)
help_result = runner.invoke(cli.cli, ["list-references"])
assert help_result.exit_code == 0
references = ReferenceSchema().loads(help_result.output, many=True)
assert len(references) == 1
assert references[0].name == "main"
assert references[0].kind == "BRANCH"
assert references[0].hash_ == "1234567890abcdef"
@pytest.mark.e2e
def test_command_line_interface_e2e() -> None:
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
assert "Usage: cli" in result.output
help_result = runner.invoke(cli.cli, ["--help"])
assert help_result.exit_code == 0
assert "Usage: cli" in help_result.output
help_result = runner.invoke(cli.cli, ["--version"])
assert help_result.exit_code == 0
assert __version__ in help_result.output
help_result = runner.invoke(cli.cli, ["list-references"])
assert help_result.exit_code == 0
branches = ReferenceSchema().loads(help_result.output, many=True)
assert len(branches) == 1
assert branches[0].name == "main"
| 35.684211 | 90 | 0.69174 | 268 | 2,034 | 5.063433 | 0.261194 | 0.132646 | 0.106116 | 0.123803 | 0.632277 | 0.592483 | 0.592483 | 0.592483 | 0.592483 | 0.51437 | 0 | 0.02558 | 0.17355 | 2,034 | 56 | 91 | 36.321429 | 0.781678 | 0.049164 | 0 | 0.521739 | 0 | 0 | 0.105895 | 0 | 0 | 0 | 0 | 0 | 0.434783 | 1 | 0.043478 | false | 0 | 0.152174 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cade016723908e48e753e2ca24979b97411963e | 1,970 | py | Python | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | null | null | null | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | 1 | 2015-05-11T18:25:49.000Z | 2015-05-11T18:29:11.000Z | chooks/commands/add.py | thegedge/chooks | 1a5115d16f67d716206da456ee405a562a6df042 | [
"MIT"
] | null | null | null | """Adds a chook to a git repository.
Usage:
chooks add [--stdin | --argument] [--once | --filter=FILTER...] [--global]
[--fatal] [--hook=NAME...] [--name=NAME] [--disabled]
[--] <command> [<args>...]
Options:
--stdin Supply input files to this chook via stdin.
--filter=<filter> Only execute this chook for files who names match the
given filter.
--global Execute this chook for all repositories.
--hook=<name> Name of the git hooks this chook will be executed for
(if not specified, default to all git hooks).
--fatal Return a nonzero status to the git hook if this chook
returns a nonzero status.
--once If specified, this chook is only applied once for the git
hook. If not specified, this chook is applied against all
files echoed by 'git status' (excluding ignored/untracked)
--name=<name> Custom hook name (defaults to the command name).
--disabled Default the hook to a disabled state.
"""
from chooks import constants
from chooks import git
# TODO interactive mode?
def run(args):
full_cmd = '%s %s' % (args['<command>'], ' '.join(args['<args>']))
filters = args.get('--filter') and ','.join(args['--filter'])
# TODO validate hook names, making sure they're actually git hooks
hooks = args.get('--hook') and ','.join(args['--hook'])
values = {
constants.KEY_COMMAND: full_cmd,
constants.KEY_STDIN: args.get('--stdin'),
constants.KEY_FILTERS: filters,
constants.KEY_HOOKS: hooks,
constants.KEY_FATAL: args.get('--fatal'),
constants.KEY_DISABLED: args.get('--disabled'),
}
hook_name = args.get('--name') or args['<command>']
is_global = args.get('--global', False)
if git.set_hook_values(hook_name, values, is_global=is_global):
return 0
return 1
| 38.627451 | 79 | 0.601015 | 252 | 1,970 | 4.638889 | 0.349206 | 0.053892 | 0.02053 | 0.032506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001389 | 0.269036 | 1,970 | 50 | 80 | 39.4 | 0.810417 | 0.605076 | 0 | 0 | 0 | 0 | 0.127273 | 0 | 0 | 0 | 0 | 0.02 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9caf4f6433bdc8722933884a9170248eea827c22 | 3,432 | py | Python | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | MakeConects.py | Gimba/AmberUtils | f10ccc474a4fa4ea3aa2d93f85b99e2bb8b5d3b0 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Copyright (c) 2015 William Lees
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Read SSBOND directives from a PDB, and generate CONECT records at the end. This can be used to fix up a PDB file
# after residues and atom numbers have been modified.
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import sys
import argparse
def main(argv):
parser = argparse.ArgumentParser(description='Read SSBOND directives from a PDB, and generate corresponding CONECT records')
parser.add_argument('infile', help='input file (PDB format)')
parser.add_argument('outfile', help='output file (PDB format)')
args = parser.parse_args()
ssbonds = []
atoms = {}
written = False
with open(args.infile, "r") as f, open(args.outfile, "w") as of:
for line in f:
line = line.strip()
if line[0:6] == "SSBOND":
res1 = line[15:22]
res2 = line[29:36]
ssbonds.append((res1, res2))
elif line[0:6] == "ATOM ":
res = line[21] + ' ' + line [22:27]
atom = line[12:16]
number = line[6:11]
if 'SG' in atom:
atoms[res] = number
elif line[0:6] == "CONECT":
continue
elif line[0:3] == "END":
if len(line) == 3 or line[3] == ' ':
for r1,r2 in ssbonds:
if r1 in atoms and r2 in atoms:
of.write("CONECT%s%s\n" % (atoms[r1], atoms[r2]))
of.write("CONECT%s%s\n" % (atoms[r2], atoms[r1]))
else:
print 'Warning: atoms corresponding to SSBOND(%s,%s) were not found.' % (r1, r2)
written = True
of.write(line + '\n')
if not written:
print 'Warning: END record was not found. CONECTS will be written at the end of the file.'
for r1, r2 in ssbonds:
if r1 in atoms and r2 in atoms:
of.write("CONECT%s%s\n" % (atoms[r1], atoms[r2]))
of.write("CONECT%s%s\n" % (atoms[r2], atoms[r1]))
else:
print 'Warning: atoms corresponding to SSBOND(%s,%s) were not found.' % (r1, r2)
if __name__ == "__main__":
main(sys.argv)
| 41.349398 | 128 | 0.592657 | 464 | 3,432 | 4.342672 | 0.413793 | 0.043672 | 0.025806 | 0.027792 | 0.196526 | 0.196526 | 0.196526 | 0.196526 | 0.157816 | 0.157816 | 0 | 0.025 | 0.312354 | 3,432 | 82 | 129 | 41.853659 | 0.828814 | 0.361014 | 0 | 0.255319 | 0 | 0 | 0.20927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.042553 | null | null | 0.06383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cb06ce3367f1f618bf6ef97ee8c9a3b4e22a068 | 481 | py | Python | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | supersalon/professionals/models.py | dogukantufekci/supersalon | 6ce552712d9d91fb493043030a09386544ed8b81 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Professional(models.Model):
# User
user = models.OneToOneField('users.User', primary_key=True, related_name='professional', verbose_name=_("User"))
class Meta:
ordering = ('user__first_name', 'user__last_name',)
verbose_name = _("Professional")
verbose_name_plural = _("Professionals")
def __str__(self):
return self.user.get_full_name() | 30.0625 | 116 | 0.704782 | 57 | 481 | 5.54386 | 0.596491 | 0.10443 | 0.14557 | 0.170886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.182952 | 481 | 16 | 117 | 30.0625 | 0.804071 | 0.008316 | 0 | 0 | 0 | 0 | 0.172269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0.1 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cb7bc1d4808741ef60b11b2ef895ca36d1c2b75 | 7,687 | py | Python | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | 1 | 2022-02-21T02:45:25.000Z | 2022-02-21T02:45:25.000Z | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | datageneration/dbmake_h264hits.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | import skimage.io
import skvideo.io
import os
import h5py
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import f1_score
import scipy.misc
import scipy.signal
import numpy as np
from sporco import util
import matplotlib.pyplot as plt
import pylab as py
import glob
from PIL import Image
import cv2
import sys
def get_postrainpatches(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=0, traintest=0):
return genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=idx, traintest=traintest)
def genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, offsets, idx=0, traintest=0):
width = 256
height = 256
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst480p[575:], lst1080p[215:]))
for repeates in [1, 2]:
n_samples = len(lst)
for fidx, fname in enumerate(lst):
print fidx, n_samples, fname
#vid = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v libx264 -g 50 -mpv_flags +strict_gop -bsf noise=2000000 -b:v 40000k /tmp/test_distorted.mp4" % (fname,)
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -ec 0 -i /tmp/test_distorted.mp4 -vcodec rawvideo -pix_fmt yuv420p /tmp/test_distorted.avi"
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v mpeg2video -b:v 40000k /tmp/test_pristine.mp4" % (fname,)
os.system(cmd)
cmd = "ffmpeg -y -nostats -loglevel 0 -ec 0 -i /tmp/test_pristine.mp4 -vcodec rawvideo -pix_fmt yuv420p /tmp/test_pristine.avi"
os.system(cmd)
vid_dis = skvideo.io.vread("/tmp/test_distorted.avi", as_grey=True).astype(np.float32)
vid_pris = skvideo.io.vread("/tmp/test_pristine.avi", as_grey=True).astype(np.float32)
os.remove("/tmp/test_distorted.mp4")
os.remove("/tmp/test_pristine.mp4")
os.remove("/tmp/test_distorted.avi")
os.remove("/tmp/test_pristine.avi")
T, H, W, C = vid_dis.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
tv = np.arange(1, T-1)
limit = 0
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[t-1:t+2, y:y+height, x:x+width, 0]
badpatch = vid_dis[t-1:t+2, y:y+height, x:x+width, 0]
# difference the magntudes, so we don't worry about phase shifts
if badpatch.shape[0] == goodpatch.shape[0]:
diff = np.mean(np.abs(badpatch[1, 30:-30, 30:-30] - goodpatch[1, 30:-30, 30:-30])**2)
if diff < 50:
continue
# check that either the previous frame or next frame match exactly, except where the middle frame doesn't match
# this ensures that the difference measured is not because of frame mis-alignment
error1 = np.sum((goodpatch[0] - badpatch[0])**2)
error2 = np.sum((goodpatch[2] - badpatch[2])**2)
print error1, error2
else:
continue
# check for no variance img
if np.std(badpatch[0, 30:-30, 30:-30]) < 10:
continue
if np.std(badpatch[1, 30:-30, 30:-30]) < 10:
continue
if np.std(badpatch[2, 30:-30, 30:-30]) < 10:
continue
#goodpatch = goodpatch.astype(np.uint8)
#badpatch = badpatch.astype(np.uint8)
#badimg = badpatch[0].astype(np.uint8)
#skimage.io.imsave("dump/patch_%d.png" % (idx,), badimg)
#print diff
#skimage.io.imsave("/tmp/test_%d.png" % (limit,), np.hstack((goodpatch.astype(np.uint8), badpatch.astype(np.uint8))))
#preprocess = preprocess[:, 5:-5, 5:-5]
hdf5_im[idx] = badpatch
hdf5_lab[idx] = 1
hdf5_trainset[idx] = traintest
offsets[idx] = [y, x]
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 10:
break
return idx
def get_negtrainpatches(image_patches, labels, hdf5_traintest, offsets, idx=0, traintest=0):
return genericnegpatcher(image_patches, labels, hdf5_traintest, offsets, idx=idx, traintest=traintest)
def genericnegpatcher(hdf5_im, hdf5_lab, hdf5_traintest, offsets, idx=0, traintest=0):
width = 256
height = 256
#lst = glob.glob("/mnt/hd3/databases/video/film_pristine/480p/*/*mpg")
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst480p[575:], lst1080p[215:]))
n_samples = len(lst)
for fidx, fname in enumerate(lst):
print fidx, n_samples, fname
#vid = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
cmd = "ffmpeg -y -nostats -loglevel 0 -i %s -codec:v h264 -b:v 40000k /tmp/test_pristine.mp4" % (fname,)
os.system(cmd)
vid_pris = skvideo.io.vread("/tmp/test_pristine.mp4", inputdict={'-ec': '0'}, as_grey=True).astype(np.float32)
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
tv = np.arange(1, T-1)
limit = 0
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[t-1:t+2, y:y+height, x:x+width, 0]
#print diff
#skimage.io.imsave("/tmp/test_%d.png" % (limit,), np.hstack((goodpatch.astype(np.uint8), badpatch.astype(np.uint8))))
hdf5_im[idx] = goodpatch
hdf5_lab[idx] = 0
hdf5_traintest[idx] = traintest
offsets[idx] = [y, x]
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 20:
break
return idx
# get the number of patches
np.random.seed(12345)
n_total_images = 62417
patch_height = 256
patch_width = 256
n_channels = 3
# sf = single frame
# fd = frame diff
f = h5py.File('/mnt/hd2/hitsdataset_sf_h264_2.hdf5', mode='w')
image_patches = f.create_dataset('image_patches', (n_total_images, n_channels, patch_height, patch_width), dtype='float')
image_patches.dims[0].label = 'batch'
image_patches.dims[1].label = 'channel'
image_patches.dims[2].label = 'height'
image_patches.dims[3].label = 'width'
labels = f.create_dataset('labels', (n_total_images,), dtype='uint8')
trainset = f.create_dataset('set', (n_total_images,), dtype='uint8')
offsets = f.create_dataset('offsets', (n_total_images, 2), dtype='int32')
n_idx = 0
n_idx = get_postrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=0)
n_idx = get_negtrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=0)
n_idx = get_postrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=1)
n_idx = get_negtrainpatches(image_patches, labels, trainset, offsets, n_idx, traintest=1)
print n_idx, n_total_images
#n_idx = get_negtestpatches(image_patches, labels, trainset, n_idx)
#n_idx = get_postestpatches(image_patches, labels, trainset, n_idx)
f.flush()
f.close()
| 34.013274 | 162 | 0.655392 | 1,168 | 7,687 | 4.19863 | 0.20976 | 0.022839 | 0.012235 | 0.031811 | 0.629078 | 0.572186 | 0.535277 | 0.502243 | 0.472471 | 0.444535 | 0 | 0.060631 | 0.199688 | 7,687 | 225 | 163 | 34.164444 | 0.736508 | 0.15897 | 0 | 0.487013 | 0 | 0.032468 | 0.144055 | 0.070009 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.116883 | null | null | 0.025974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cc0a99af59fe409930941e63a1459a695a01a4f | 2,258 | py | Python | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | 4 | 2019-04-02T21:20:19.000Z | 2021-09-01T06:08:47.000Z | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | null | null | null | scripts/spark.py | Sapphirine/Reducing_Manufacturing_Failures | ee6fd6dba345997431cae30380150e3513cab58e | [
"Apache-2.0"
] | 3 | 2019-08-02T12:13:04.000Z | 2020-03-24T12:16:33.000Z |
# coding: utf-8
# ### Open using Databricks Platform/Py-spark. It holds the code for developing the RandomForest Classifier on the chosen subset of important features.
# In[1]:
import os, sys
import pandas as pd
import numpy as np
from sklearn.metrics import matthews_corrcoef
import pyspark
from numpy import array
import numpy as np
import pandas as pd
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import StringIndexer, VectorAssembler, VectorIndexer
import gc
from pyspark.sql.functions import col, count, sum
from sklearn.metrics import matthews_corrcoef
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.functions import rand
REPLACE_YOUR_FILE = "/FileStore/tables/e9svdv4y1482386357547/test_numeric.csv"
df0 = sqlContext.read.format("csv").load(REPLACE_YOUR_FILE, header="true", inferSchema="true")
df = df0.na.fill(99999)
df = df.na.drop()
df.printSchema()
# In[2]:
feature=['L3_S31_F3846','L1_S24_F1578','L3_S33_F3857','L1_S24_F1406','L3_S29_F3348','L3_S33_F3863',
'L3_S29_F3427','L3_S37_F3950','L0_S9_F170', 'L3_S29_F3321','L1_S24_F1346','L3_S32_F3850',
'L3_S30_F3514','L1_S24_F1366','L2_S26_F3036']
assembler = VectorAssembler(
inputCols=feature,
outputCol='features')
data = (assembler.transform(df).select("features", df.Response.astype('double')))
(trainingData, testData) = data.randomSplit([0.8, 0.2], seed=451)
data.printSchema()
# In[3]:
cls = RandomForestClassifier(numTrees=60, seed=1111, maxDepth=15, labelCol="Response", featuresCol="features")
pipeline = Pipeline(stages=[cls])
evaluator = MulticlassClassificationEvaluator(
labelCol="Response", predictionCol="prediction", metricName="accuracy")
trainingData=trainingData.na.drop()
trainingData.printSchema()
# In[4]:
gc.collect()
model = pipeline.fit(trainingData)
# In[5]:
# making predictions
predicted = model.transform(testData)
response = predictions.select("Response").rdd.map(lambda r: r[0]).collect()
predictedValue = predictions.select("probability").rdd.map(lambda r: int(r[0][1])).collect()
mcc = matthews_corrcoef(response, predictedValue)
print (mcc)
| 27.876543 | 152 | 0.767493 | 299 | 2,258 | 5.668896 | 0.521739 | 0.045428 | 0.038348 | 0.018879 | 0.112094 | 0.047198 | 0 | 0 | 0 | 0 | 0 | 0.074092 | 0.109389 | 2,258 | 80 | 153 | 28.225 | 0.768772 | 0.095217 | 0 | 0.136364 | 0 | 0 | 0.161656 | 0.0276 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.386364 | 0 | 0.386364 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9ccb29cee770a8f04a2031d1320f314942e54690 | 13,400 | py | Python | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import warnings
import dateparser
import datetime
import time
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.1f')
warnings.filterwarnings('ignore')
URL = "https://fr.wikipedia.org/wiki/Liste_de_sondages_sur_l'%C3%A9lection_pr%C3%A9sidentielle_fran%C3%A7aise_de_2017#2016"
path1 = "/var/www/html/1ertour/"
path2 = "/var/www/html/2ndtour/"
'''dicoTableMois = {4:"Janvier 2016", 5:"Février 2016", 6:"Mars 2016", 7:"Avril 2016", 8:"Mai 2016", 9:"Juin 2016",\
10:"Juillet 2016", 11:"Septembre 2016", 12:"Octobre 2016", 13:"Novembre 2016", 14:"Décembre 2016", \
15:"Janvier 2017", 16:"Février 2017"}
'''
dicoTableMois = {0:"Mars 2017", 1:"Février 2017", 2:"Janvier 2017"}
dico_couleurs_candidats = {u"Arnaud Montebourg":"#CC0066", u"Benoît Hamon":"#CC3399",u"Cécile Duflot":"#008000", u"Emmanuel Macron":"#A9A9A9",
u"François Bayrou":"#FF6600", u"François Fillon":"#3399FF", u"François Hollande":"#FF9999", u"Jacques Cheminade":"#CC0000",
u"Jean-Luc Mélenchon":"#FF0000", u"Manuel Valls":"#FF6699", u"Marine Le Pen":"#000080", u"Nathalie Arthaud":"#CC0033",
u"Nicolas Dupont-Aignan":"#0000CC", u"Nicolas Hulot":"#66CC00", u"Philippe Poutou":"#990033",
u"Sylvia Pinel":"#FF0066", u"Yannick Jadot":"#339900"}
dico_candidat_parti = {u"Arnaud Montebourg":"PS",u"Benoît Hamon":"PS",u"Cécile Duflot":"eelv",
u"Emmanuel Macron" : "En Marche",
u"François Bayrou" : "MoDem", u"François Fillon":"Les Républicains",
u"François Hollande" : "PS", u"Jacques Cheminade" : "sp",
u"Jean-Luc Mélenchon" : "Parti de Gauche", u"Manuel Valls":"PS",u"Marine Le Pen":"FN",
u"Nathalie Arthaud":"lutte ouvriere",
u"Nicolas Dupont-Aignan":"Debout La France", u"Nicolas Hulot":"empty", u"Philippe Poutou":"NPA",
u"Sylvia Pinel":"ps", u"Yannick Jadot":"eelv"}
def loadHTML(URL):
resultats = requests.get(URL)
return BeautifulSoup(resultats.text, 'html.parser')
def loadPandas(URL):
tables = loadHTML(URL).findAll("table")
dfF = pd.DataFrame()
dfFs = pd.DataFrame()
#Pour chaque table de wikipedia :
for idx, table in enumerate(tables) :
lignes = table.findAll("tr")
#On récupère le nom de chaque colonne :
colonnes = []
for elem in lignes[0].findAll("th"):
if elem.find("a") is None :
if elem.text != u'Autres candidats':
colonnes.append(elem.text)
else :
if(elem.find("a").text != ""):
colonnes.append(elem.find("a").text)
for elem in lignes[1].findAll("th"):
if elem.find("a") is not None :
colonnes.append(elem.find("a").text)
if len(colonnes) < 7:
for elem in lignes[2].findAll("th"):
a=3
colonnes.append(elem.text)
#On crée un pandas dataframe pour stocker nos table :
df = pd.DataFrame(columns = colonnes)
#print(len(colonnes))
nbRowspan = 0
rowspan = []
rowspanMil = []
#our chaque ligne de notre table :
for j,ligne in enumerate(lignes[2:]):
line = list(np.zeros(len(colonnes)))
line = ["/" for item in line]
#lorsque certains éléments de notre tableau occupent plusieurs lignes
for i,item in enumerate(rowspanMil):
if item[0] > 1 :
line[item[1]] = item[2]
item[0] -= 1
for i,elem in enumerate(ligne.findAll("td")):
try:
while line[i] != "/":
i+=1
except:
continue
if elem.has_attr("rowspan"):
nbRowspan = int(elem["rowspan"])
if nbRowspan >1:
try :
rowspanMil.append([nbRowspan, i, float(elem.text.replace("%", "").replace(",",".").replace("<",""))])
except Exception as e :
rowspanMil.append([nbRowspan, i, (elem.text.replace("%", "").replace(",",".").replace("<",""))])
try:
line[i] = (float(elem.text.replace("%", "").replace(",",".").replace("<","")))
except Exception as e :
line[i] = (elem.text.replace("%", "").replace(",",".").replace("<",""))
if len(line) > len(colonnes) - 3 :
df.loc[j] = line
#print(df)
try :
df = df[df["Date"] != "/"]
except:
continue
if idx >= 0 and idx <= 2:
df["Date"] = df["Date"].map(lambda x : x.lower().replace(dicoTableMois[idx].lower()[:-5],""))
df["Date"] = df["Date"].map(lambda x : x+" "+dicoTableMois[idx])
#2ème tour :
if len(colonnes) < 7 :
dfFs = dfFs.append(df)
#1er tour :
elif idx >= 0 and idx <= 2:
dfF = dfF.append(df.ix[1:])
return (dfF, dfFs)
dfF, dfFs = loadPandas(URL)
#######################################################################
########################### 1er tour ##################################
#######################################################################
dfF = dfF.replace(to_replace=["-", "–"], value=" ")
dfF = dfF[dfF["Pourrait changer d'avis"]!="/"]
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : (str(x).split("[")[0].strip()))
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : 0 if x == "nan" or x == "" else float(x[:2]))
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
anciensCandidats = [u"Alain Juppé", u"Bruno Le Maire", u"Jean-François Copé", u"Nicolas Sarkozy", u"Eva Joly", u"Sylvia Pinel", u"Vincent Peillon", u"Arnaud Montebourg"]
for col in dfF.columns:
if col not in notCandidats:
dfF[col] = dfF[col].map(lambda x: x if isinstance(x, float) else np.nan)
dfF2 = dfF
for col in anciensCandidats:
if col in dfF2.columns :
dfF2 = dfF2[dfF2[col].isnull()]
dfF2 = dfF2.drop(col, axis=1)
dfF2["Pourrait changer d'avis"] = dfF2["Pourrait changer d'avis"].map(lambda x : np.nan if x==0 else x)
#print(dfF)
dfF3 = dfF2
dfF3["Date"] = dfF3["Date"].map(lambda x : x.replace("1er", "1").replace("fév.", ""))
dfF3["Date"] = dfF3["Date"].map(lambda x : ' '.join(x.split()))
dfF3["Date"] = dfF3["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfF3["Date"] = dfF3["Date"].map(lambda x : dateparser.parse(x).date())
dfF3 = dfF3.groupby(["Date"]).mean().reset_index()
dfF3 = dfF3.sort_values('Date', ascending=1)
def dateToString(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return str(date.year)+month+day
dfF3 = dfF3.round(2)
dfF3 = dfF3[dfF3["Date"] > datetime.date(year=2017,month=01,day=01)]
dfF4 = dfF3
#dfF4 = dfF4.drop([u"Cécile Duflot", u"François Hollande", u"Nicolas Hulot", u"Rama Yade"], axis=1)
for col in dfF4.columns:
if col not in [u"Benoît Hamon", u"Emmanuel Macron", u"Date", u"François Fillon",\
u"Jean-Luc Mélenchon", u"Marine Le Pen", u"Nicolas Dupont-Aignan"]:
dfF4 = dfF4.drop(col, axis=1)
dfF5 = dfF4
dfF4["date"] = dfF4["Date"].map(lambda x: dateToString(x))
dfF4 = dfF4.drop("Date", axis=1)
dfF4 = dfF4.set_index("date")
dfF4 = dfF4.dropna(axis=1, how='all')
dfF4 = dfF4.dropna(axis=0, how='all')
# --- To json --- #
dfF5 = dfF5.dropna(axis=1, how='all')
dfF5 = dfF5.dropna(axis=0, how='all')
dfF5 = dfF5.set_index("Date")
#dfF5.to_csv("table_agrege.csv")
dfF5 = pd.read_csv("table_agrege.csv", encoding="utf-8")
dfF5["Date"] = pd.to_datetime(dfF5["Date"])
dfF5 = dfF5.groupby(["Date", "date"]).mean().reset_index()
dfF5.set_index("Date", inplace=True)
print(dfF5)
idx = pd.date_range(min(dfF5.index), max(dfF5.index))
dfF5 = dfF5.reindex(idx, fill_value="null")
########################
# Agrégats sur 6 jours #
########################
dfF5 = dfF5.drop("date", axis=1)
dfF5 = dfF5.replace(to_replace=["null"], value=np.nan)
diffDaysLast = (datetime.datetime.now()-max(dfF5.index).to_datetime()).days
#dfF5.index = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=diffDaysLast))
#dfF5 = dfF5.map(lambda x : )
lastsondages = max(dfF5.index)
to_add = (max(dfF5.index) - (max(dfF5.groupby(pd.TimeGrouper('6D')).mean().index))).days
dfF5.index = dfF5.index.map(lambda x : (x + datetime.timedelta(days=to_add)) )
dfF5 = dfF5.groupby(pd.TimeGrouper('6D')).mean()
#dfF5 = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=6))
for col in dfF5.columns :
dfF5[col] = np.round(dfF5[col], 1)
print(dfF5)
to_json = []
dico_sondage = {}
dico_sondage["id"] = 1
dico_sondage["refresh"] = {}
dfF5 = dfF5.fillna("null")
dico_sondage["refresh"]["last"] = time.mktime((lastsondages.to_datetime()).timetuple())
dico_sondage["refresh"]["dayInterval"] = 6
dico_sondage["title"] = "Agrégation des sondages pour le 1er tour de 11 instituts*"
dico_sondage["legende"] = "* Les données de ce graphique sont les moyennes des sondages d'intentions de vote de 11 instituts sur six jours. \
Plus précisément, pour chaque jour affiché, il fait la moyenne sur les six derniers jours. \
Les instituts sont : Ifop-Fiducial, OpinionWay, CSA, Future Thinking - SSI, BVA, Odoxa, Harris Interactive, TNS Sofres, Cevipof Ipsos-Sopra Steria, Elabe, Dedicated Research."
dico_sondage["unit"] = "%"
dico_sondage["dataset"] = []
for col in dfF5.columns:
#On garde les candidats demandés :
dico_temp = {}
dico_temp["title"] = col
if col in dico_candidat_parti.keys():
dico_temp["subtitle"] = dico_candidat_parti[col]
else :
dico_temp["subtitle"] = ""
if col in dico_couleurs_candidats.keys():
dico_temp["color"] = dico_couleurs_candidats[col]
else :
dico_temp["color"] = "#ffffff"
dico_temp["data"] = list(dfF5[col])
dico_sondage["dataset"].append(dico_temp)
to_json.append(dico_sondage)
#dfF4.to_csv(path+"sondages1er.csv", sep="\t", encoding='utf-8')
#dfF4.to_json(path1+"pollster1.json", force_ascii=False)
dfF4.to_csv(path1+"sondages1er.csv", sep="\t", encoding='utf-8')
dfF4.to_csv(path1+"data.tsv", sep="\t", encoding='utf-8')
dfF4.to_csv("data.tsv", sep="\t", encoding='utf-8')
#print(dfF3[["Manuel Valls", "Date"]])
#######################################################################
########################### 2nd tour ##################################
#######################################################################
dfFs2 = dfFs
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x)>5 else np.nan)
dfFs2 = dfFs2[dfFs2["Date"].notnull()]
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x.replace(u"-", " ").replace(u"–", " "))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : dateparser.parse(x).date())
#dfFs2 = dfFs2.set_index(["Date"])
#dfFs2.index = pd.to_datetime(dfFs2.index)
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
def dateToString2(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return day+"/"+month+"/"+str(date.year)
def getDuel(df, nom1, nom2):
return df[[nom1, nom2, "date"]].set_index("date").dropna(axis=0, how='any')
for col in dfFs2.columns:
if col not in notCandidats:
if col != "Abstention, blanc ou nul":
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else np.nan)
else :
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else 0)
#dfFs2["Date"] = pd.to_datetime(dfFs2["Date"])
#dfFs2 = dfFs2.groupby(dfFs2["Date"].dt.month).mean()
#dfFs2 = dfFs2.reset_index()
dfFs2["date"] = dfFs2["Date"].map(lambda x: dateToString2(x))
dfFs2 = dfFs2.drop("Date", axis=1)
getDuel(dfFs2, u"Marine Le Pen", u"François Fillon").to_csv(path2+"mlpVSff.tsv", sep="\t", encoding="utf-8")
getDuel(dfFs2, u"Marine Le Pen", u"Manuel Valls").to_csv(path2+"mlpVSmv.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"Marine Le Pen", u"Emmanuel Macron").to_csv(path2+"mlpVSem.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"Emmanuel Macron", u"François Fillon").to_csv(path2+"emvsff.tsv", sep="\t", encoding="utf-8")
'''
getDuel(dfFs2, u"Marine Le Pen", u"Manuel Valls").to_json(path2+"mlpVSmv.json", force_ascii=False)
getDuel(dfFs2, u"Marine Le Pen", u"François Fillon").to_json(path2+"mlpVSff.json", force_ascii=False)
getDuel(dfFs2, u"Marine Le Pen", u"Emmanuel Macron").to_json(path2+"mlpVSem.json", force_ascii=False)
getDuel(dfFs2, u"Emmanuel Macron", u"François Fillon").to_json(path2+"emvsff.json", force_ascii=False)
'''
dfFs2.to_csv(path2+"sondages2e.csv", encoding='utf-8')
#dfFs2.to_json(path2+"sondages2e.json")
print("Done")
| 35.356201 | 175 | 0.589552 | 1,822 | 13,400 | 4.288145 | 0.226674 | 0.025342 | 0.028158 | 0.018303 | 0.302189 | 0.278382 | 0.242544 | 0.207091 | 0.180212 | 0.151414 | 0 | 0.040738 | 0.199478 | 13,400 | 378 | 176 | 35.449735 | 0.687424 | 0.08403 | 0 | 0.193833 | 0 | 0.013216 | 0.192079 | 0.00397 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.048458 | null | null | 0.013216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cd0d98d0b63b4259d4d0c7661566ba636ce4420 | 20,644 | py | Python | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | null | null | null | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | 42 | 2019-12-31T12:22:10.000Z | 2021-07-11T10:57:06.000Z | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | null | null | null | import ast
from ast import Import as IType, ImportFrom as IFType
from astor import to_source
from asttokens import ASTTokens
from .colours import colour_str as colour
from os import linesep as nl
from sys import stderr
__all__ = [
"get_import_stmt_str",
"multilinify_import_stmt_str",
"colour_imp_stmt",
"get_imported_name_sources",
"get_module_srcs",
"count_imported_names",
"annotate_imports",
"imp_def_subsets",
]
def get_import_stmt_str(alias_list, import_src=None, max_linechars=88):
"""
Construct an import statement by building an AST, convert it to source using
astor.to_source, and then return the string.
alias_list: List of strings to use as ast.alias `name`, and optionally also
`asname entries. If only one name is listed per item in the
alias_list, the `asname` will be instantiated as None.
import_src: If provided, the import statement will be use the
`ast.ImportFrom` class, otherwise it will use `ast.Import`.
Relative imports are permitted for "import from" statements
(such as `from ..foo import bar`) however absolute imports
(such as `from foo import bar`) are recommended in PEP8.
max_linechars: Maximum linewidth, beyond which the import statement string will
be multilined with `multilinify_import_stmt_str`.
"""
alias_obj_list = []
assert type(alias_list) is list, "alias_list must be a list"
for alias_pair in alias_list:
if type(alias_pair) is str:
alias_pair = [alias_pair]
assert len(alias_pair) > 0, "Cannot import using an empty string"
assert type(alias_pair[0]) is str, "Import alias name must be a string"
if len(alias_pair) < 2:
alias_pair.append(None)
al = ast.alias(*alias_pair[0:2])
alias_obj_list.append(al)
if import_src is None:
ast_imp_stmt = ast.Import(alias_obj_list)
else:
import_level = len(import_src) - len(import_src.lstrip("."))
import_src = import_src.lstrip(".")
ast_imp_stmt = ast.ImportFrom(import_src, alias_obj_list, level=import_level)
import_stmt_str = to_source(ast.Module([ast_imp_stmt]))
if len(import_stmt_str.rstrip(nl)) > max_linechars:
return multilinify_import_stmt_str(import_stmt_str)
else:
return import_stmt_str
def multilinify_import_stmt_str(import_stmt_str, indent_spaces=4, trailing_comma=True):
"""
Takes a single line import statement and turns it into a multiline string.
Will raise a `ValueError` if given a multiline string (a newline at the end
of the string is permitted).
This function is written in expectation of the output of `get_import_stmt_str`,
and is not intended to process all potential ways of writing an import statement.
import_stmt_str: String of Python code carrying out an import statement.
indent_spaces: Number of spaces to indent by in multiline format.
trailing_comma: Whether to add a trailing comma to the final alias in a
multiline list of import aliases (default: True)
"""
import_stmt_str = import_stmt_str.rstrip(nl)
n_nl = import_stmt_str.count(nl)
if n_nl > 0:
raise ValueError(f"{import_stmt_str} is not a single line string")
imp_ast = ast.parse(import_stmt_str)
assert type(imp_ast.body[0]) in [IType, IFType], "Not a valid import statement"
tko = ASTTokens(import_stmt_str)
first_tok = tko.tokens[0]
import_tok = tko.find_token(first_tok, tok_type=1, tok_str="import")
assert import_tok.type > 0, f"Unable to find import token in the given string"
imp_preamble_str = import_stmt_str[: import_tok.endpos]
post_import_tok = tko.tokens[import_tok.index + 1]
imp_names_str = import_stmt_str[post_import_tok.startpos :]
aliases = [(x.name, x.asname) for x in imp_ast.body[0].names]
seen_comma_tok = None
multiline_import_stmt_str = imp_preamble_str
multiline_import_stmt_str += " (" + nl
for al_i, (a_n, a_as) in enumerate(aliases):
is_final_alias = al_i + 1 == len(aliases)
if seen_comma_tok is None:
# Get start of alias by either full name or first part of .-separated name
al_n_tok = tko.find_token(import_tok, 1, tok_str=a_n.split(".")[0])
assert al_n_tok.type > 0, f"Unable to find the token for {a_n}"
else:
al_n_tok = tko.find_token(seen_comma_tok, 1, tok_str=a_n.split(".")[0])
assert al_n_tok.type > 0, f"Unable to find the token for {a_n}"
al_startpos = al_n_tok.startpos
if a_as is None:
if is_final_alias:
# There won't be a comma after this (it is the last import name token)
al_endpos = al_n_tok.endpos
else:
comma_tok = tko.find_token(al_n_tok, tok_type=53, tok_str=",")
if comma_tok.type == 0:
# Due to an error in asttokens, sometimes tok_type is given as 54
# although this should be an error (the failure tok_type is 0)
comma_tok = tko.find_token(al_n_tok, tok_type=54, tok_str=",")
assert comma_tok.type > 0, f"Unable to find comma token"
al_endpos = comma_tok.endpos
else:
al_as_tok = tko.find_token(al_n_tok, tok_type=1, tok_str=a_as)
assert al_as_tok.type > 0, f"Unable to find the token for {a_as}"
if is_final_alias:
# There won't be a comma after this (it's the last import asname token)
al_endpos = al_as_tok.endpos
else:
comma_tok = tko.find_token(al_as_tok, tok_type=53, tok_str=",")
if comma_tok.type == 0:
# Due to an error in asttokens, sometimes tok_type is given as 54
# although this should be an error (the failure tok_type is 0)
comma_tok = tko.find_token(al_n_tok, tok_type=54, tok_str=",")
assert comma_tok.type > 0, f"Unable to find comma token"
al_endpos = comma_tok.endpos
alias_chunk = import_stmt_str[al_startpos:al_endpos]
if is_final_alias:
if trailing_comma:
alias_chunk += ","
else:
seen_comma_tok = comma_tok
multiline_import_stmt_str += (" " * indent_spaces) + alias_chunk + nl
# Finally, verify that the end of the tokenised string was reached
assert al_endpos == tko.tokens[-1].endpos, "Did not tokenise to the end of string"
# No need to further slice the input string, return the final result
multiline_import_stmt_str += ")" + nl
return multiline_import_stmt_str
def colour_imp_stmt(imp_stmt, lines):
"""
Summary: get a string which when printed will show the separate parts of an
import statement in different colours (preamble in blue, alias names in red,
alias asnames in purple, the word "as" itself in yellow, commas between import
aliases in light green, and post-matter (a bracket) in light red.
For an import statement within an asttokens-annotated AST, which comes with
all subnodes annotated with first and last token start/end positional information,
access all the tokens corresponding to the import statement name(s) and asname(s).
Do this using a list of lines (i.e. a list of strings, each of which is a line),
the subset of which corresponding to the import statement `imp_stmt` are given
by its `first_token.start` and `last_token.end` attributes (in each case, the
attribute is a tuple of `(line, column)` numbers, and it is conventional to store
these as a 1-based index, so to cross-reference to a 0-based index of the list
of lines we decrement this value and store as `imp_startln` and `imp_endln`).
The subset of lines corresponding to `imp_stmt` is then assigned as `nodelines`,
and we join this into a single string as `nodestring`.
Then a new ASTTokens object, `tko`, can be made by parsing `nodestring`, on which
the `find_tokens` method provides access to each name/asname one at a time, when
matched to the name/asname string. These name/asname strings are available
within the `imp_stmt` object via its `names` attribute, which is a list of
`ast.alias` class instances, each of which has both a `name` and `asname` attribute
(the latter of which is `None` if no asname is given for the import name).
`find_tokens` returns a token with attribute `type` of value `1` for a name (1 is
the index of "NAME" in the `token.tok_name` dictionary), and `startpos`/`endpos`
attributes (integers which indicate the string offsets within `nodestring`).
These `startpos` integers are an efficient way to store this list of tokens
(the "NAME" tokens corresponding to import statement alias names and asnames),
and so even though it would be possible to store all tokens, I choose to simply
re-access them with the `tko.get_token_from_offset(startpos)` method.
At the moment, I only re-access these tokens to retrieve their `endpos` (end
position offset), which is also an integer and could also be stored easily
without much problem, however for the sake of clarity I prefer to re-access
the entire token and not have to construct an arbitrary data structure for
storing the start and end positions (which could get confusing).
Lastly, I construct a colourful string representation of the import statement
by using these start positions and re-retrieved end positions to pull out
and modify (using the `mvdef.colours`⠶`colour_str` function) the names and asnames
(names are coloured red, asnames are coloured purple), and use string slicing
to swap the ranges that the names and asnames were in in the original
`nodestring` for these colourful replacements.
The end result, `modified_nodestring` is then returned, which will then
display in colour on Linux and OSX (I don't think Windows supports ANSI codes,
so I made `colour_str` only apply on these platforms).
"""
assert "first_token" in imp_stmt.__dir__(), "Not an asttokens-annotated AST node"
assert type(imp_stmt) in [IType, IFType], "Not an import statement"
is_from = type(imp_stmt) is IFType
imp_startln = imp_stmt.first_token.start[0] - 1 # Use 0-based line index
imp_endln = imp_stmt.last_token.end[0] - 1 # to match list of lines
nodelines = lines[imp_startln : (imp_endln + 1)]
n_implines = len(nodelines)
nodestring = "".join(nodelines)
tko = ASTTokens(nodestring)
new_nodelines = [list() for _ in range(n_implines)]
# Subtract the import statement start position from the name or asname
# token start position to get the offset, then use the offset to extract
# a range of text from the re-parsed ASTTokens object for the nodestring
# corresponding to the import name or asname in question.
imp_startpos = imp_stmt.first_token.startpos
alias_starts = []
for alias in imp_stmt.names:
al_n, al_as = alias.name, alias.asname
# 1 is the key for "NAME" in Python's tokens.tok_name
s = [tko.find_token(tko.tokens[0], 1, tok_str=al_n).startpos]
if al_as is not None:
s.append(tko.find_token(tko.tokens[0], 1, tok_str=al_as).startpos)
alias_starts.append(s)
assert len(alias_starts) > 0, "An import statement cannot import no names!"
assert alias_starts[0][0] > 0, "An import statement cannot begin with a name!"
modified_nodestring = ""
# -------------------------------------------------------------------------
# Now set up colour definitions for the modified import statement string
name_colour, asname_colour = ["red", "purple"]
pre_colour, post_colour = ["light_blue", "light_red"]
as_string_colour = "yellow"
comma_colour = "light_green"
# -------------------------------------------------------------------------
first_import_name_startpos = alias_starts[0][0]
pre_str = nodestring[:first_import_name_startpos]
modified_nodestring += colour(pre_colour, pre_str)
seen_endpos = first_import_name_startpos
# (Could add a try/except here to verify colours are in colour dict if modifiable)
for al_i, alias_start_list in enumerate(alias_starts):
for al_j, al_start in enumerate(alias_start_list):
if seen_endpos < al_start:
# There is an intervening string, append it to modified_nodestring
intervening_str = nodestring[seen_endpos:al_start]
if al_j > 0:
# This is the word "as", which comes between a name and an asname
modified_nodestring += colour(as_string_colour, intervening_str)
else:
if al_i > 0:
assert "," in intervening_str, "Import aliases not comma-sep.?"
modified_nodestring += colour(comma_colour, intervening_str)
else:
modified_nodestring += intervening_str
# Possible here to distinguish between names and asnames by al_j if needed
is_asname = bool(al_j) # al_j is 0 if name, 1 if asname
name_tok = tko.get_token_from_offset(al_start)
assert name_tok.type > 0, f"No import name at {al_start} in {nodestring}"
al_endpos = name_tok.endpos
imp_name = nodestring[al_start:al_endpos]
cstr_colour = [name_colour, asname_colour][al_j]
cstr = colour(cstr_colour, imp_name)
modified_nodestring += cstr
seen_endpos = al_endpos
end_str = nodestring[seen_endpos:]
modified_nodestring += colour(post_colour, end_str)
return modified_nodestring
def get_imported_name_sources(trunk, report=True):
import_types = [IType, IFType]
imports = [n for n in trunk if type(n) in import_types]
imp_name_lines, imp_name_dict_list = annotate_imports(imports, report=report)
imported_names = {}
for ld in imp_name_lines:
ld_n = imp_name_lines.get(ld).get("n")
line_n = imp_name_dict_list[ld_n]
imp_src = next(x for x in [*line_n.items()] if x[1] == ld)
imported_names[ld] = imp_src
return imported_names
def get_module_srcs(imports):
ifr_srcs = []
for imp in imports:
if type(imp) == IFType:
ifr_srcs.append(imp.module)
else:
ifr_srcs.append(None)
return ifr_srcs
def count_imported_names(nodes):
"""
Return an integer for a single node (0 if not an import statement),
else return a list of integers for a list of AST nodes.
"""
if type(nodes) is not list:
if type(nodes) in [IType, IFType]:
return len(nodes.names)
else:
assert ast.stmt in type(nodes).mro(), f"{nodes} is not an AST statement"
return 0
counts = []
for node in nodes:
if type(node) in [IType, IFType]:
counts.append(len(node.names))
else:
assert ast.stmt in type(nodes).mro(), f"{nodes} is not an AST statement"
counts.append(0)
return counts
def annotate_imports(imports, report=True):
"""
Produce two data structures from the list of import statements (the statements
of type ast.Import and ast.ImportFrom in the source program's AST),
imp_name_linedict: A dictionary whose keys are all the names imported by the
program (i.e. the names which they are imported as: the
asname if one is used), and whose value for each name
is a dictionary of keys (`n`, `line`):
n: [0-based] index of the import statement importing
the name, over the set of all import statements.
line: [1-based] line number of the file of the import
statement importing the name. Note that it may
not correspond to the line number on which the
name is given, only to the import function call.
imp_name_dict_list: List of one dict per import statement, whose keys
are the full import path (with multi-part paths conjoined
by a period `.`) and the values of which are the names
that these import paths are imported as (either the asname
or else just the terminal part of the import path). The
dict preserves the per-line order of the imported
names.
"""
report_VERBOSE = False # Silencing debug print statements
# This dictionary gives the import line it's on for cross-ref with either
# the imports list above or the per-line imported_name_dict
imp_name_linedict = dict() # Stores all names and their asnames
imp_name_dict_list = [] # Stores one dict per AST import statement
for imp_no, imp_line in enumerate(imports):
imp_name_dict = dict()
for imported_names in imp_line.names:
name, asname = imported_names.name, imported_names.asname
if type(imp_line) == IFType:
assert imp_line.level == 0, "I've only encountered level 0 imports"
fullname = ".".join([imp_line.module, name])
else:
fullname = name
if asname is None:
imp_name_dict[fullname] = name
# Store both which import in the list of imports it's in
# and the line number it's found on in the parsed file
imp_name_linedict[name] = {"n": imp_no, "line": imp_line.lineno}
else:
imp_name_dict[fullname] = asname
imp_name_linedict[asname] = {"n": imp_no, "line": imp_line.lineno}
imp_name_dict_list.append(imp_name_dict)
# Ensure that they each got all the names
assert len(imp_name_dict_list) == len(imports)
assert sum([len(d) for d in imp_name_dict_list]) == len(imp_name_linedict)
if report_VERBOSE:
print("The import name line dict is:", file=stderr)
for ld in imp_name_linedict:
# print(f" {ld}: {imp_name_linedict.get(ld)}")
pass
print("The import name dict list is:", file=stderr)
for ln in imp_name_dict_list:
print(ln, file=stderr)
return imp_name_linedict, imp_name_dict_list
def imp_def_subsets(linkfile):
"""
Given the list of mvdef_names and nonmvdef_names, construct the subsets:
mv_imports: imported names used by the functions to move,
nonmv_imports: imported names used by the functions not to move,
mutual_imports: imported names used by both the functions to move and
the functions not to move
"""
# report = linkfile.report
report_VERBOSE = False # Silencing debug print statements
mvdef_dicts = linkfile.mvdef_names # rename to emphasise that these are dicts
mvdef_names = set().union(
*[list(mvdef_dicts[x]) for x in mvdef_dicts]
) # funcdef names
nonmvdef_dicts = linkfile.nonmvdef_names # (as for mvdef_dicts)
nonmvdef_names = set().union(*[list(nonmvdef_dicts[x]) for x in nonmvdef_dicts])
linkfile.mv_imports = mvdef_names - nonmvdef_names
linkfile.nonmv_imports = nonmvdef_names - mvdef_names
linkfile.mutual_imports = mvdef_names.intersection(nonmvdef_names)
assert linkfile.mv_imports.isdisjoint(
linkfile.nonmv_imports
), "mv/nonmv_imports intersect!"
assert linkfile.mv_imports.isdisjoint(
linkfile.mutual_imports
), "mv/mutual imports intersect!"
assert linkfile.nonmv_imports.isdisjoint(
linkfile.mutual_imports
), "nonmv/mutual imports intersect!"
if report_VERBOSE:
print(
f"mv_imports: {linkfile.mv_imports}",
f", nonmv_imports: {linkfile.nonmv_imports}",
f", mutual_imports: {linkfile.mutual_imports}",
sep="",
file=stderr,
)
all_defnames = set().union(*[mvdef_names, nonmvdef_names])
all_def_imports = set().union(
*[linkfile.mv_imports, linkfile.nonmv_imports, linkfile.mutual_imports]
)
assert sorted(all_defnames) == sorted(all_def_imports), "Defnames =/= import names"
return
| 50.722359 | 87 | 0.653749 | 2,980 | 20,644 | 4.344295 | 0.151007 | 0.020856 | 0.027113 | 0.010428 | 0.162058 | 0.122586 | 0.10621 | 0.076626 | 0.076626 | 0.070755 | 0 | 0.005156 | 0.267245 | 20,644 | 406 | 88 | 50.847291 | 0.850598 | 0.417506 | 0 | 0.148594 | 0 | 0 | 0.108171 | 0.010549 | 0 | 0 | 0 | 0 | 0.104418 | 1 | 0.032129 | false | 0.004016 | 0.349398 | 0 | 0.425703 | 0.016064 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9cd8ab1b3376a9a6e91e80aece04a51923c049ca | 10,351 | py | Python | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | try:
from nrgpy import logger
except ImportError:
pass
from datetime import datetime
import os
import subprocess
import time
import traceback
from nrgpy.api.convert import nrg_api_convert
from nrgpy.utils.utilities import check_platform, windows_folder_path, affirm_directory, count_files
class local(object):
"""For handling NRG SymphoniePRO Data Logger raw data files in the *.rld format.
This method uses locally installed SymphoniePRO Desktop software to convert *.rld files to txt format (tab-delimited-text).
Parameters
----------
rld_dir : str, optional
specify directory. Note for unc values, you
will need to escape all forward slashes, e.g.
rld_dir = "\\\\sol\\techsupport\\data\\"
or use the r'\\path\to\dir' approach
out_dir : str, optional
see note for rld_dir.
encryption_pass : str
specify data encryption password if logger is set up for that.
hex_key : str
specify if using hex data encryption key
sympro_path : str
default is "C:\Program Files (x86)\Renewable NRG Systems\SymPRO Desktop\SymPRODesktop.exe"
process_type : str
[convert], or import
convert_type : str
'meas', alternately specify 'comm', 'diag', 'sample', or 'events'
nec : str
path to nec file
site_filter : str
specify part or all of the file you'd like to filter on, like site_filter='123456_2018-09'
would filter on site 123456 and only the month of September in 2018.
site_file : bool or str
set to True to use local ndb site file, or set to path to an alternate ndb site file
Examples
--------
Convert a folder of RLD files to Text with SymphoniePRO Desktop Software
>>> from nrgpy.convert_rld import local
>>> converter = local(
rld_dir='/path/to/rld/files',
out_dir=/path/to/txt/outputs,
file_filter='123456_2020-01', # for files from January 2020
)
>>> converter.convert()
"""
def __init__(self, rld_dir='', out_dir='', encryption_pass='', hex_key='', filename='',
sympro_path=r'"C:/Program Files (x86)/Renewable NRG Systems/SymPRO Desktop/SymPRODesktop.exe"',
process_type='convert', convert_type='meas', nec='', site_filter='', site_file='', **kwargs):
self.rld_dir = windows_folder_path(rld_dir)
self.out_dir = windows_folder_path(out_dir)
self.encryption_pass = encryption_pass
self.hex_key = hex_key
self.sympro_path = sympro_path
self.process_type = process_type
self.convert_type = convert_type
self.nec = nec
self.site_filter = site_filter
self.site_file = site_file
if 'file_filter' in kwargs and site_filter == '':
self.file_filter = kwargs.get('file_filter')
self.site_filter = self.file_filter
if check_platform() == 'win32':
if filename:
affirm_directory(self.out_dir)
self.single_file(filepath=filename)
else:
print("""
convert_rld.local() method ONLY compatible with Windows OS.
Please use nrgpy.cloud_convert() method instead.
Alternately, follow the instructions for using SymphoniePRO Desktop
with wine here:
https://github.com/nrgpy/nrgpy/blob/master/SymPRODeskop_Linux_README.md
""")
def directory(self):
"""processes all rld files in self.rld_dir, outputs to txt files to out_dir"""
affirm_directory(self.out_dir)
try:
if self.encryption_pass:
encryption = '/pass "{0}"'.format(self.encryption_pass)
else:
encryption = ''
except:
print('could not parse encryption_pass')
try:
if self.hex_key:
encryption_key = '/key "{0}"'.format(self.hex_key)
else:
encryption_key = ''
except:
print('could not parse hex_key')
try:
if self.nec:
nec = '/config "{0}"'.format(self.nec)
else:
nec = ''
except:
print('could not parse encryption_pass')
try:
if self.site_file:
site_file = '/site '
elif self.site_file:
site_file = '/site "{0}"'.format(self.site_file)
else:
site_file = ''
except:
print('could not parse encryption_pass')
try:
rld_count = count_files(self.rld_dir, self.site_filter, 'rld')
self.start_time = time.time()
logger.info('converting {0} files from {1}'.format(rld_count, self.rld_dir))
print('\nConverting {0} files from {1}\n'.format(rld_count, self.rld_dir))
print('Saving outputs to {0}'.format(self.out_dir))
cmd = [
self.sympro_path,
"/cmd", self.process_type,
"/file", '"'+"\\".join([self.rld_dir, '*'+self.site_filter])+'*.rld"',
encryption,
encryption_key,
nec,
site_file,
"/type", '"'+self.convert_type+'"',
"/outputdir", '"'+self.out_dir[:-1]+'"'
]
# print('\nUsing command line script:\n{}'.format(" ".join(cmd)))
self.cmd = cmd
self.start = datetime.now()
subprocess.run(" ".join(cmd), stdout=subprocess.PIPE)
self.end = datetime.now()
self.convert_time = str(self.end - self.start)
logger.info('TXT files saved in {0}'.format(self.out_dir))
print('\nTXT files saved in {0}\n'.format(self.out_dir))
txt_count = count_files(self.out_dir, self.site_filter, 'txt', start_time=self.start_time)
log_count, log_files = count_files(self.out_dir, self.site_filter, 'log', show_files=True, start_time=self.start_time)
print('RLDs in : {}'.format(rld_count))
print('TXTs out : {}'.format(txt_count))
print('LOGs out : {}'.format(log_count))
if len(log_files) > 0:
print('Log files created:')
for _filename in log_files:
print('\t{}'.format(_filename))
print('----------------\nDifference : {}'.format(rld_count - (txt_count + log_count)))
except FileNotFoundError:
logger.error('SymphoniePRO Desktop Application not found: {0}'.format(self.sympro_path))
print("""
No instance of SymphoniePRO Desktop Application found.
Please follow the link below to download and install this software:
https://www.nrgsystems.com/support/product-support/software/symphoniepro-desktop-application
""")
except:
logger.error("unable to process files in {0}".format(self.rld_dir))
logger.debug(traceback.format_exc())
print('Unable to process files in directory')
def convert(self):
self.directory()
def process(self):
self.directory()
def rename_rlds(self, **kwargs):
"""uses SymPRO utility NrgRldSiteSerialRename.exe to rename files with site number and logger serial number.
This function is only compatible with Windows>=7 AND
a local installation of SymphoniePRO Desktop software
"""
try:
renamer_path = kwargs.get('renamer_path', r"C:/Program Files (x86)/Renewable NRG Systems/SymPRO Desktop/Default Application Files/Utilities/NrgRldSiteSerialRename.exe")
for f in os.listdir(self.rld_dir):
filepath = self.rld_dir + f
if f[-4:].lower()==".rld" and self.site_filter in f:
rename_cmd = [renamer_path, '"'+filepath+'"']
try:
subprocess.run(" ".join(rename_cmd), stdout=subprocess.PIPE)
except:
logger.error("unable to rename {0}".format(f))
print("Unable to rename {0}".format(f))
pass
else:
pass
except:
logger.error('Could not rename files')
logger.debug(traceback.format_exc())
print('Could not rename files')
def single_file(self, filepath=''):
self.filepath = filepath.replace('/','\\')
try:
if self.encryption_pass:
encryption = '/pass "{0}"'.format(self.encryption_pass)
else:
encryption = ''
except:
print('could not parse encryption_pass')
try:
if self.hex_key:
encryption_key = '/key "{0}"'.format(self.hex_key)
else:
encryption_key = ''
except:
print('could not parse hex_key')
try:
if self.nec:
nec = '/config "{0}"'.format(self.nec)
else:
nec = ''
except:
print('could not get nec file')
try:
if self.site_file:
site_file = '/site "{0}"'.format(self.site_file)
else:
site_file = ''
except:
print('could not get site file')
cmd = [
self.sympro_path,
"/cmd", "convert",
"/file", '"'+self.filepath+'"',
encryption,
encryption_key,
nec,
site_file,
"/type", '"'+self.convert_type+'"',
"/outputdir", '"'+self.out_dir[:-1]+'"'
]
self.cmd = cmd
try:
print("{0} ... \t\t".format(filepath), end="", flush=True)
subprocess.run(" ".join(cmd), stdout=subprocess.PIPE)
print("[DONE]")
except:
logger.error("processing {0} FAILED".format(filepath))
logger.debug(traceback.format_exc())
print("\n\t processing {0} [FAILED]".format(filepath))
pass
logger.info("files in {0} processed OK".format(self.rld_dir))
logger.info("TXT files saved to {0}".format(self.out_dir))
print("\nQueue processed\n")
nrg_convert_api = nrg_api_convert
| 35.088136 | 180 | 0.562651 | 1,201 | 10,351 | 4.701915 | 0.196503 | 0.026917 | 0.025323 | 0.026917 | 0.362316 | 0.281211 | 0.243846 | 0.209846 | 0.189481 | 0.189481 | 0 | 0.010693 | 0.322384 | 10,351 | 294 | 181 | 35.207483 | 0.794411 | 0.182301 | 0 | 0.512821 | 0 | 0.010256 | 0.217949 | 0.014151 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0.071795 | 0.046154 | 0 | 0.082051 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9cd8dd0d174f6ca548c62ed627de0eb3a16baa55 | 379 | py | Python | events/migrations/0040_event_team_size.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 166 | 2019-05-16T23:46:08.000Z | 2022-03-31T05:20:23.000Z | events/migrations/0040_event_team_size.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 92 | 2020-01-18T22:51:53.000Z | 2022-03-12T01:23:57.000Z | events/migrations/0040_event_team_size.py | VadVergasov/clist | 4afcdfe88250d224043b28efa511749347cec71c | [
"Apache-2.0"
] | 23 | 2020-02-09T17:38:43.000Z | 2021-12-09T14:39:07.000Z | # Generated by Django 2.2.10 on 2020-04-03 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_event_limits'),
]
operations = [
migrations.AddField(
model_name='event',
name='team_size',
field=models.IntegerField(default=3),
),
]
| 19.947368 | 49 | 0.591029 | 41 | 379 | 5.365854 | 0.780488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078358 | 0.292876 | 379 | 18 | 50 | 21.055556 | 0.742537 | 0.121372 | 0 | 0 | 1 | 0 | 0.111782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cd9c5db682a634d72e6f3ad900937179bb19d00 | 1,303 | py | Python | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | from enum import Enum
from typing import List
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch as SignedBatch
from .requests import AbstractRequest
from .requests.helpers import get_signer
class BatchStatus(Enum):
UNKNOWN = 'UNKNOWN'
PENDING = 'PENDING'
COMMITTED = 'COMMITTED'
INVALID = 'INVALID'
class Batch():
def __init__(self, signer_private_key: bytes):
self._requests: List[AbstractRequest] = []
self._signer_private_key = signer_private_key
def add_request(self, request: AbstractRequest):
self._requests.append(request)
def get_signed_batch(self) -> SignedBatch:
signer = get_signer(self._signer_private_key)
signed_transactions = [t for r in self._requests for t in r.get_signed_transactions(signer) ]
batch_header_bytes = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=[txn.header_signature for txn in signed_transactions],
).SerializeToString()
signature = signer.sign(batch_header_bytes)
batch = SignedBatch(
header=batch_header_bytes,
header_signature=signature,
transactions=signed_transactions
)
return batch
| 30.302326 | 101 | 0.707598 | 150 | 1,303 | 5.846667 | 0.326667 | 0.059293 | 0.072976 | 0.068415 | 0.084379 | 0.084379 | 0.084379 | 0 | 0 | 0 | 0 | 0.001967 | 0.219493 | 1,303 | 42 | 102 | 31.02381 | 0.860374 | 0 | 0 | 0 | 0 | 0 | 0.023024 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.516129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9cdb60e1aaf4a83afb78be721e50868f4655f336 | 7,548 | py | Python | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 63 | 2019-03-28T18:46:55.000Z | 2022-03-25T02:49:24.000Z | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 75 | 2019-10-14T08:54:18.000Z | 2022-03-30T02:33:29.000Z | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 15 | 2019-12-15T11:26:01.000Z | 2022-03-25T02:49:27.000Z | #! /usr/local/msl/bin/python
#******************************************************************************
# InstLoc.py <image.IMG/VIC>
#
# Project: Instrument Loco String for a given file
# Purpose: Localizations stored in python dictionary
#
# Author: Hallie Gengl
#
# Updated: 8/24/18 Corrine Rojas (crojas6@asu.edu)
# Updated: 6/26/19 Hallie Gengl
#
#******************************************************************************
import os
import sys
import parseVicarLabel
import FixedInstLoc
import MastInstLoc
import ArmInstLoc
import msl.instruments as instruments
import msl.PdsToVic as PdsToVic
import msl.placesTranslation as places
LocArray = {'Data_Product' : '', 'Instrument' : '', 'Spacecraft_Clock(sec)' : '',
'Rover_Motion_Counter': '', 'Mission': '', 'Site_Frame_Origin_Offset_Vector': '',
'Spacecraft_Quaternion': '', 'Sol_Number': '','Sequence_ID' : '',
'Instrument_Azimuth(deg)': '','Instrument_Elevation(deg)': '',
'Global_Northing(m)': '','Global_Easting(m)': '','Global_Elevation(m)': '',
'Stereo': '','ObsType': '', 'LocType': '' ,'Frame': '','Method': '', 'APID' : '',
'APID_Name' : '', 'Local_True_Solar_Time' : '', 'Local_Mean_Solar_Time' : '',
'Planetary_Radius': '', 'Surface_Intersection_DEM': '', 'Rover_Global_Northing(m)': '',
'Rover_Global_Easting(m)':'', 'Rover_Global_Elevation(m)': ''}
def runLoco(locType,filen,oLBL,oDAT):
print "Entering runLoco.InstLoc.py"
print "Here is str(locType) from " + __name__ + ".InstLoc.py: ", str(locType)
print "Here is str(filen) from " + __name__ + ".InstLoc.py: ", str(filen)
print "Here is str(oLBL) from " + __name__ + ".InstLoc.py: ",str(oLBL)
print "Here is str(oDAT) from " + __name__ + ".InstLoc.py: ",str(oDAT)
if locType == 'fixed':
x,y,z,sid,p2xyz_status = FixedInstLoc.allLoc(filen)
elif locType == 'mast':
x,y,z,sid,p2xyz_status = MastInstLoc.allLoc(filen)
elif locType == 'contact':
x,y,z,sid,p2xyz_status = ArmInstLoc.ArmInstLoc(filen,oLBL,oDAT)
print "Leaving " + __name__ + ".Instloc.py and returning: ", x, y, z
print "Stereo Intersection DEM: " + sid
print "p2xyz_status: " + str(p2xyz_status)
return x,y,z,sid,p2xyz_status
def InstLocDB(filen):
print "Entering InstLocDB.InstLoc.py"
try:
os.environ['R2LIB']
except KeyError as e:
print "%s is not set, run select" % (e)
raise SystemExit
print "Here is the filen from " + __name__ + ".InstLoc.py: ", filen
original = filen
#print "Split: ", os.path.splitext(filen)[0]
#print os.path.splitext(filen)[1]
filen,oDAT,oLBL = getNewProduct(filen)
inst = parseVicarLabel.getInstrumentId(filen)
if inst == 'CHEMCAM_SOH' or inst == 'CHEMCAM_PARMS':
SystemExit
print "filename: ", filen
print " creating array [" + __name__ + ".InstLoc.py]"
print "instrument parsing of dictionary [" + __name__ + ".InstLoc.py]"
#print "file: ",filen
inst = parseVicarLabel.getInstrumentId(filen)
#print "instrument: ", inst
rover = parseVicarLabel.getSpacecraftId(filen)
sol = parseVicarLabel.getPlanetDayNumber(filen)
sclk = parseVicarLabel.getSclk(filen)
oov = parseVicarLabel.getOriginOffsetVector(filen)
q = parseVicarLabel.getOriginRotationQuaternion(filen)
rmc = parseVicarLabel.getRoverMotionCounter(filen)
az = parseVicarLabel.getAz(filen)
el = parseVicarLabel.getEl(filen)
#c = parseVicarLabel.getCameraCPoint(filen)
#pId = parseVicarLabel.getProcessID(filen)
ltst = parseVicarLabel.getLTST(filen)
print "ltst :" + ltst
lmst = parseVicarLabel.getLMST(filen)
print "lmst :" + lmst
# to do: add APP ID, Planetary Radius, Pointing Vector, ...
# currently empty dictionaries
seqID = parseVicarLabel.getSeqId(filen)
apid = parseVicarLabel.getApId(filen)
if parseVicarLabel.getApIdName(filen) == "McamLRecoveredProduct":
return
elif parseVicarLabel.getApIdName(filen) == "McamRRecoveredProduct":
return
elif parseVicarLabel.getApIdName(filen) == "RADSendData":
return
else:
apidName = parseVicarLabel.getApIdName(filen)
print rmc
loc_x,loc_y,loc_z = places.getLocoRover('ops',rmc[0],rmc[1],'rover2orbital')
inst = parseVicarLabel.getInstrumentId(filen)
locType = instruments.InstDic[inst][1]
print "Here is locType from " + __name__ + ".InstLoc.py: ",locType
print "Here is filen from " + __name__ + ".InstLoc.py: ",filen
print "Here is oLBL from " + __name__ + ".InstLoc.py: ",oLBL
print "Here is oDAT from " + __name__ + ".InstLoc.py: ",oDAT
x,y,z,sid,p2xyz_status = runLoco(locType,filen,oLBL,oDAT)
stereo = parseVicarLabel.frameType(filen)
LocArray['Stereo'] = stereo
LocArray['Data_Product'] = original
LocArray['Instrument'] = inst
LocArray['Spacecraft_Clock(sec)'] = sclk
LocArray['Rover_Global_Northing(m)'] = loc_x
LocArray['Rover_Global_Easting(m)'] = loc_y
LocArray['Rover_Global_Elevation(m)'] = loc_z
LocArray['Global_Northing(m)'] = x
LocArray['Global_Easting(m)'] = y
LocArray['Global_Elevation(m)'] = z
LocArray['LocType'] = locType
LocArray['Rover_Motion_Counter'] = rmc
LocArray['Site_Origin_Offset_Vector'] = oov
LocArray['Quaternion'] = q
LocArray['Instrument_Elevation(deg)'] = el
LocArray['Instrument_Azimuth(deg)'] = az
LocArray['Mission'] = rover
LocArray['Sol_Number'] = sol
#LocArray['Cpnt'] = c
LocArray['Sequence_ID'] = seqID
LocArray['Frame'] = 'orbital'
LocArray['Local_Mean_Solar_Time'] = str(lmst)
LocArray['Local_True_Solar_Time'] = str(ltst)
LocArray['APID'] = apid
LocArray['APID_Name'] = apidName
LocArray['Surface_Intersection_DEM'] = sid
LocArray['p2xyz_status_code'] = p2xyz_status
#Print out the dictionary entry
print "Here is dict.items(LocArray) from " + __name__ + ".InstLoc.py: ",dict.items(LocArray)
print "Leaving " + __name__ + ".InstLoc.py returning: ", LocArray
return LocArray
#InstLocDB(filename) #filename
def getNewProduct(filen):
if os.path.splitext(filen)[1] == '.VIC' or os.path.splitext(filen)[1] == '.IMG':
oDAT = filen
oLBL = filen
if os.path.splitext(filen)[1] == '.DAT' or os.path.splitext(filen)[1] == '.LBL':
if 'ODL' not in open(filen).readline():
oDAT = os.path.splitext(filen)[0] + '.DAT'
oLBL = os.path.splitext(filen)[0] + '.LBL'
filen = os.path.splitext(filen)[0] + '.LBL'
else:
oDAT = os.path.splitext(filen)[0] + '.DAT'
oLBL = os.path.splitext(filen)[0] + '.DAT'
filen = os.path.splitext(filen)[0] + '.DAT'
print "Creating associated VICAR text file [" + __name__ + ".InstLoc.py]"
PdsToVic.PdsToVic(filen)
base = os.path.basename(filen)
print "Base: ", base
core = os.path.splitext(base)[0]
print "Core: ",core
filen = core + '.VIC'
print "oDAT :", oDAT
print "oLBL :", oLBL
print "filename:", filen
return filen,oDAT,oLBL
def main():
InstLocDB(sys.argv[1]) #filename
if (__name__ == "__main__"):
print
main()
print
#print type(LocArray)
#print json.dumps(LocArray, indent=2)
#print dict.header(LocArray)
#print dict.values(LocArray)
#print dict.header(LocArray),dict.values(LocArray)
#print ""
| 36.640777 | 96 | 0.631028 | 854 | 7,548 | 5.405152 | 0.262295 | 0.035095 | 0.042244 | 0.049393 | 0.190858 | 0.086872 | 0.033795 | 0.022097 | 0.022097 | 0.022097 | 0 | 0.006682 | 0.206942 | 7,548 | 205 | 97 | 36.819512 | 0.76445 | 0.131293 | 0 | 0.098592 | 0 | 0 | 0.278006 | 0.084335 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.06338 | null | null | 0.21831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9cdd6d64010e9c068a5d368c4a2af2b0acb6a55f | 628 | py | Python | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | class Solution:
# @param {string} s A string
# @return {boolean} whether the string is a valid parentheses
def isValidParentheses(self, s):
stack = []
dict = {"]": "[", "}": "{", ")": "("}
for char in s:
if char in dict.values():
stack.append(char)
elif char in dict.keys():
if stack == [] or dict[char] != stack.pop():
return False
# If accept other characters "ABCDEF"
else:
return False
return stack == []
test = Solution()
print test.isValidParentheses("(A)[]{}") | 33.052632 | 65 | 0.488854 | 64 | 628 | 4.796875 | 0.5625 | 0.058632 | 0.065147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.371019 | 628 | 19 | 66 | 33.052632 | 0.777215 | 0.194268 | 0 | 0.133333 | 0 | 0 | 0.025845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ce11bf87b067840337b0d13348e056b624caea2 | 602 | py | Python | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 10 | 2016-05-03T20:41:25.000Z | 2021-09-17T18:42:01.000Z | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 655 | 2016-05-04T19:00:35.000Z | 2022-03-28T13:09:20.000Z | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 8 | 2016-05-06T10:24:27.000Z | 2020-10-21T00:56:59.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-07-10 06:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0121_auto_20190708_2246'),
]
operations = [
migrations.RemoveField(
model_name='teammembership',
name='communication_opt_in',
),
migrations.AddField(
model_name='teammembership',
name='weekly_update_opt_in',
field=models.BooleanField(default=True),
),
]
| 24.08 | 52 | 0.619601 | 61 | 602 | 5.868852 | 0.754098 | 0.050279 | 0.128492 | 0.150838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075342 | 0.272425 | 602 | 24 | 53 | 25.083333 | 0.742009 | 0.112957 | 0 | 0.235294 | 1 | 0 | 0.19209 | 0.043315 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ce7bfb4241f59939bdcb06aabfa03be7b8bc555 | 4,688 | py | Python | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | 1 | 2017-06-04T23:56:54.000Z | 2017-06-04T23:56:54.000Z | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | null | null | null | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Matt Austin
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from .feeds import Feed
import logging
import requests
__title__ = 'gaussian'
__version__ = '0.1.0'
__url__ = 'https://github.com/mattaustin/gaussian'
__author__ = 'Matt Austin <mail@mattaustin.me.uk>'
__copyright__ = 'Copyright 2014 Matt Austin'
__license__ = 'Apache 2.0'
class NewsBlur(object):
"""NewsBlur API client.
http://www.newsblur.com/api
"""
_user_agent = '{name}/{version} ({name}; +{url})'.format(
name=__title__, version=__version__, url=__url__)
endpoint = 'https://www.newsblur.com/'
logged_in = False
def __init__(self, endpoint=None, username=None, password=None):
"""
:param str endpoint: API endpoint URL. Defaults to 'www.newsblur.com'.
Specify this if you have your own NewsBlur server.
:param str username: Your NewsBlur account username.
:param str password: Your NewsBlur account password.
"""
self._logger = self._get_logger()
self._set_endpoint(endpoint)
self.username = username
self.password = password
self.session = self._create_session()
if self.username and self.password:
self.login()
def __repr__(self):
username = self.username.encode('utf-8') if self.username else b''
return b'<{0}: {1}>'.format(self.__class__.__name__, username)
def _get_logger(self):
return logging.getLogger(__name__)
def _create_session(self):
session = requests.Session()
session.headers = {'Accept': 'application/json',
'User-Agent': self._user_agent}
return session
def _set_endpoint(self, endpoint):
self.endpoint = endpoint or self.endpoint
self._logger.debug('API endpoint set to: {0}'.format(self.endpoint))
def _construct_url(self, path):
return '{endpoint}{path}'.format(endpoint=self.endpoint, path=path)
def get_feeds(self, refresh=False):
"""Get the feeds for this account.
:param bool refresh: If True, any cached data is ignored and data is
fetched from the API. Default: False.
:returns: List of :py:class:`~gaussian.feeds.Feed` instances.
:rtype: list
"""
if not hasattr(self, '_feeds') or refresh:
response = self.session.get(self._construct_url('/reader/feeds'))
# TODO: properly check for success, it appears server always
# returns 200.
assert response.json()['result'] == 'ok'
items = response.json()['feeds'].items()
self._feeds = [
Feed(id=id, api_client=self, data=data) for id, data in items]
return self._feeds
def login(self):
"""Login to NewsBlur, using session (cookie) authentication."""
response = self.session.post(self._construct_url('/api/login'),
data={'username': self.username,
'password': self.password})
# TODO: properly check for success, it appears server always returns
# 200.
self._logger.debug(response.content)
assert response.json()['result'] == 'ok'
self.logged_in = True
return True
def mark_as_read(self, days=0):
"""Mark all stories from all feeds as read.
:param int days: Number of days back to mark as read. Default: 0 (all).
"""
response = self.session.post(
self._construct_url('/reader/mark_all_as_read'),
data={'days': days})
return response.json()['result'] == 'ok'
def mark_stories_as_read(self, stories):
"""Mark provided stories as read.
:param list stories: List of :py:class:`~gaussian.stories.Story`
instances.
"""
response = self.session.post(
self._construct_url('/reader/mark_story_hashes_as_read'),
data={'story_hash': [story.hash for story in stories]})
return response.json()['result'] == 'ok'
| 32.331034 | 79 | 0.62884 | 574 | 4,688 | 4.942509 | 0.315331 | 0.014804 | 0.026789 | 0.028199 | 0.140642 | 0.089179 | 0.089179 | 0.075432 | 0.075432 | 0.040888 | 0 | 0.008628 | 0.258319 | 4,688 | 144 | 80 | 32.555556 | 0.807305 | 0.319753 | 0 | 0.09375 | 0 | 0 | 0.139814 | 0.026631 | 0 | 0 | 0 | 0.006944 | 0.03125 | 1 | 0.15625 | false | 0.0625 | 0.0625 | 0.03125 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9ce7d60aa1fddaa6c915b248970c026594e52b7a | 614 | py | Python | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | 3 | 2021-03-04T08:20:24.000Z | 2022-02-11T10:04:03.000Z | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | null | null | null | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | 2 | 2021-02-26T10:15:57.000Z | 2021-03-04T08:20:36.000Z | # Hibiscus Sense - Exercise 07 RGB LED
#
# There is 1x RGB LED.
# This RGB LED is connected to GPIO16 and integrated with WS2812.
# WS2812 is an LED controller, which use single-wire control protocol to control the LEDs.
from machine import Pin
from neopixel import NeoPixel
from time import sleep
pin = Pin(16, Pin.OUT) # set GPIO0 to output to drive NeoPixels
RGB = NeoPixel(pin, 1) # create NeoPixel driver on GPIO16 for 1 pixels
while True:
# set the first pixel to green
RGB[0] = (0, 255, 0)
# write data to all pixels
RGB.write()
sleep(0.5)
| 27.909091 | 93 | 0.664495 | 96 | 614 | 4.25 | 0.604167 | 0.044118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06278 | 0.273616 | 614 | 22 | 94 | 27.909091 | 0.852018 | 0.583062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9cf660cb297e4f4275f9be8ec5a0164060234d14 | 1,207 | py | Python | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | 4 | 2020-10-14T15:35:07.000Z | 2022-01-13T15:31:16.000Z | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | null | null | null | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | 2 | 2021-01-07T14:39:05.000Z | 2022-01-18T12:31:50.000Z | from django.urls import path
from . import views
from core import views as core
from community import views as community
from library import views as library
from ie.urls_baseline import baseline_urlpatterns
from ie.urls_education_baseline import baseline_education_urlpatterns
app_name = "education"
urlpatterns = baseline_urlpatterns + baseline_education_urlpatterns + [
path("", views.index, name="index"),
path("theses/", views.theses, name="theses"),
path("controlpanel/students/", views.controlpanel_students, name="controlpanel_students"),
path("controlpanel/students/<int:id>/", views.controlpanel_student, name="controlpanel_student"),
path("controlpanel/courses/", views.controlpanel_courses, name="controlpanel_courses"),
path("controlpanel/courses/<int:id>/", views.controlpanel_course, name="controlpanel_course"),
path("controlpanel/courses/<int:id>/edit/", views.controlpanel_course_form, name="controlpanel_course_form"),
path("controlpanel/courses/create/", views.controlpanel_course_form, name="controlpanel_course_form"),
path("controlpanel/courses/<int:id>/<int:content>/", views.controlpanel_course_content, name="controlpanel_course_content"),
]
| 52.478261 | 128 | 0.785418 | 143 | 1,207 | 6.41958 | 0.202797 | 0.156863 | 0.125272 | 0.084967 | 0.232026 | 0.165577 | 0.165577 | 0.165577 | 0.165577 | 0.165577 | 0 | 0 | 0.091964 | 1,207 | 22 | 129 | 54.863636 | 0.837591 | 0 | 0 | 0 | 0 | 0 | 0.325601 | 0.25435 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.368421 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9cf88f9c3c66dc987145112c9d9843d8bd9f0a49 | 849 | py | Python | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-23 06:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='shareTodoList',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateTimeField(auto_now_add=True)),
('userInfo', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
('workList', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='todos.worklist')),
],
),
]
| 32.653846 | 125 | 0.640754 | 92 | 849 | 5.782609 | 0.597826 | 0.06015 | 0.078947 | 0.12406 | 0.172932 | 0.172932 | 0.172932 | 0.172932 | 0 | 0 | 0 | 0.027778 | 0.236749 | 849 | 25 | 126 | 33.96 | 0.79321 | 0.050648 | 0 | 0 | 1 | 0 | 0.08209 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
140ab3e208fd8520fd27c0b61a95fd2da405a3b2 | 428 | py | Python | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from docutils.core import publish_parts
register = template.Library()
@register.filter(name='rst')
@stringfilter
def rst_to_html5(text):
parts = publish_parts(text, writer_name='html5', settings_overrides={'initial_header_level': 2})
return mark_safe(parts['html_title'] + parts['body'])
| 32.923077 | 100 | 0.794393 | 57 | 428 | 5.77193 | 0.596491 | 0.091185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007792 | 0.100467 | 428 | 12 | 101 | 35.666667 | 0.846753 | 0 | 0 | 0 | 0 | 0 | 0.098131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.4 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
140dfac64786934c620c3477a54ffeec5d9625f8 | 1,083 | py | Python | ballet/__init__.py | HDI-Project/fhub_core | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 19 | 2021-04-06T18:56:39.000Z | 2022-03-15T00:23:00.000Z | ballet/__init__.py | HDI-Project/ballet | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 52 | 2018-09-27T01:11:58.000Z | 2021-03-24T19:11:18.000Z | ballet/__init__.py | HDI-Project/ballet | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 3 | 2019-12-07T17:55:34.000Z | 2021-02-02T17:58:39.000Z | # -*- coding: utf-8 -*-
"""Top-level package for ballet."""
__author__ = 'Micah Smith'
__email__ = 'micahs@mit.edu'
__version__ = '0.19.5'
# filter warnings
import warnings # noqa E402
warnings.filterwarnings(
action='ignore', module='scipy', message='^internal gelsd')
# silence sklearn deprecation warnings
import logging # noqa E402
logging.captureWarnings(True)
import sklearn # noqa E402
logging.captureWarnings(False)
warnings.filterwarnings(
action='ignore', module='sklearn', category=DeprecationWarning)
warnings.filterwarnings(
action='ignore', module='sklearn', category=FutureWarning)
# configure module-level logging
from ballet.util.log import logger # noqa E402
logger.addHandler(logging.NullHandler())
# re-export some names
from ballet.client import b # noqa E402
from ballet.contrib import collect_contrib_features # noqa E402
from ballet.feature import Feature # noqa E402
from ballet.project import load_config, Project # noqa E402
# for feature development, you really only need these two members
__all__ = (
'b',
'Feature',
)
| 27.769231 | 67 | 0.753463 | 133 | 1,083 | 5.992481 | 0.533835 | 0.080301 | 0.105395 | 0.12798 | 0.188206 | 0.138018 | 0.138018 | 0 | 0 | 0 | 0 | 0.031216 | 0.142198 | 1,083 | 38 | 68 | 28.5 | 0.826695 | 0.277932 | 0 | 0.125 | 0 | 0 | 0.119266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
140e681121ae14e453921573cd7aea566206c1e5 | 8,816 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CorsPolicy(object):
"""
Enable CORS (Cross-Origin-Resource-Sharing) request handling.
"""
def __init__(self, **kwargs):
"""
Initializes a new CorsPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param allowed_origins:
The value to assign to the allowed_origins property of this CorsPolicy.
:type allowed_origins: list[str]
:param allowed_methods:
The value to assign to the allowed_methods property of this CorsPolicy.
:type allowed_methods: list[str]
:param allowed_headers:
The value to assign to the allowed_headers property of this CorsPolicy.
:type allowed_headers: list[str]
:param exposed_headers:
The value to assign to the exposed_headers property of this CorsPolicy.
:type exposed_headers: list[str]
:param is_allow_credentials_enabled:
The value to assign to the is_allow_credentials_enabled property of this CorsPolicy.
:type is_allow_credentials_enabled: bool
:param max_age_in_seconds:
The value to assign to the max_age_in_seconds property of this CorsPolicy.
:type max_age_in_seconds: int
"""
self.swagger_types = {
'allowed_origins': 'list[str]',
'allowed_methods': 'list[str]',
'allowed_headers': 'list[str]',
'exposed_headers': 'list[str]',
'is_allow_credentials_enabled': 'bool',
'max_age_in_seconds': 'int'
}
self.attribute_map = {
'allowed_origins': 'allowedOrigins',
'allowed_methods': 'allowedMethods',
'allowed_headers': 'allowedHeaders',
'exposed_headers': 'exposedHeaders',
'is_allow_credentials_enabled': 'isAllowCredentialsEnabled',
'max_age_in_seconds': 'maxAgeInSeconds'
}
self._allowed_origins = None
self._allowed_methods = None
self._allowed_headers = None
self._exposed_headers = None
self._is_allow_credentials_enabled = None
self._max_age_in_seconds = None
@property
def allowed_origins(self):
"""
**[Required]** Gets the allowed_origins of this CorsPolicy.
The list of allowed origins that the CORS handler will use to respond to CORS requests. The gateway will
send the Access-Control-Allow-Origin header with the best origin match for the circumstances. '*' will match
any origins, and 'null' will match queries from 'file:' origins. All other origins must be qualified with the
scheme, full hostname, and port if necessary.
:return: The allowed_origins of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_origins
@allowed_origins.setter
def allowed_origins(self, allowed_origins):
"""
Sets the allowed_origins of this CorsPolicy.
The list of allowed origins that the CORS handler will use to respond to CORS requests. The gateway will
send the Access-Control-Allow-Origin header with the best origin match for the circumstances. '*' will match
any origins, and 'null' will match queries from 'file:' origins. All other origins must be qualified with the
scheme, full hostname, and port if necessary.
:param allowed_origins: The allowed_origins of this CorsPolicy.
:type: list[str]
"""
self._allowed_origins = allowed_origins
@property
def allowed_methods(self):
"""
Gets the allowed_methods of this CorsPolicy.
The list of allowed HTTP methods that will be returned for the preflight OPTIONS request in the
Access-Control-Allow-Methods header. '*' will allow all methods.
:return: The allowed_methods of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_methods
@allowed_methods.setter
def allowed_methods(self, allowed_methods):
"""
Sets the allowed_methods of this CorsPolicy.
The list of allowed HTTP methods that will be returned for the preflight OPTIONS request in the
Access-Control-Allow-Methods header. '*' will allow all methods.
:param allowed_methods: The allowed_methods of this CorsPolicy.
:type: list[str]
"""
self._allowed_methods = allowed_methods
@property
def allowed_headers(self):
"""
Gets the allowed_headers of this CorsPolicy.
The list of headers that will be allowed from the client via the Access-Control-Allow-Headers header.
'*' will allow all headers.
:return: The allowed_headers of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_headers
@allowed_headers.setter
def allowed_headers(self, allowed_headers):
"""
Sets the allowed_headers of this CorsPolicy.
The list of headers that will be allowed from the client via the Access-Control-Allow-Headers header.
'*' will allow all headers.
:param allowed_headers: The allowed_headers of this CorsPolicy.
:type: list[str]
"""
self._allowed_headers = allowed_headers
@property
def exposed_headers(self):
"""
Gets the exposed_headers of this CorsPolicy.
The list of headers that the client will be allowed to see from the response as indicated by the
Access-Control-Expose-Headers header. '*' will expose all headers.
:return: The exposed_headers of this CorsPolicy.
:rtype: list[str]
"""
return self._exposed_headers
@exposed_headers.setter
def exposed_headers(self, exposed_headers):
"""
Sets the exposed_headers of this CorsPolicy.
The list of headers that the client will be allowed to see from the response as indicated by the
Access-Control-Expose-Headers header. '*' will expose all headers.
:param exposed_headers: The exposed_headers of this CorsPolicy.
:type: list[str]
"""
self._exposed_headers = exposed_headers
@property
def is_allow_credentials_enabled(self):
"""
Gets the is_allow_credentials_enabled of this CorsPolicy.
Whether to send the Access-Control-Allow-Credentials header to allow CORS requests with cookies.
:return: The is_allow_credentials_enabled of this CorsPolicy.
:rtype: bool
"""
return self._is_allow_credentials_enabled
@is_allow_credentials_enabled.setter
def is_allow_credentials_enabled(self, is_allow_credentials_enabled):
"""
Sets the is_allow_credentials_enabled of this CorsPolicy.
Whether to send the Access-Control-Allow-Credentials header to allow CORS requests with cookies.
:param is_allow_credentials_enabled: The is_allow_credentials_enabled of this CorsPolicy.
:type: bool
"""
self._is_allow_credentials_enabled = is_allow_credentials_enabled
@property
def max_age_in_seconds(self):
"""
Gets the max_age_in_seconds of this CorsPolicy.
The time in seconds for the client to cache preflight responses. This is sent as the Access-Control-Max-Age
if greater than 0.
:return: The max_age_in_seconds of this CorsPolicy.
:rtype: int
"""
return self._max_age_in_seconds
@max_age_in_seconds.setter
def max_age_in_seconds(self, max_age_in_seconds):
"""
Sets the max_age_in_seconds of this CorsPolicy.
The time in seconds for the client to cache preflight responses. This is sent as the Access-Control-Max-Age
if greater than 0.
:param max_age_in_seconds: The max_age_in_seconds of this CorsPolicy.
:type: int
"""
self._max_age_in_seconds = max_age_in_seconds
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.733333 | 245 | 0.674342 | 1,124 | 8,816 | 5.073843 | 0.157473 | 0.032614 | 0.084166 | 0.078906 | 0.687007 | 0.602841 | 0.506575 | 0.485709 | 0.430475 | 0.366824 | 0 | 0.003067 | 0.260322 | 8,816 | 239 | 246 | 36.887029 | 0.871492 | 0.572255 | 0 | 0.084507 | 0 | 0 | 0.121034 | 0.027931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.225352 | false | 0 | 0.028169 | 0.028169 | 0.408451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
141ff955c77ded0e08d843f987f0b62eb3d4713d | 562 | py | Python | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | def cwinstart(callobj, *args, **kwargs):
print('cwinstart')
print(' args', repr(args))
for arg in args:
print(' ', arg)
print(' kwargs', len(kwargs))
for k, v in kwargs.items():
print(' ', k, v)
w = callobj(*args, **kwargs)
print(' callobj()->', w)
return w
def cwincall(req1, req2, *args, **kwargs):
print('cwincall')
print(' req1=', req1, 'req2=', req2)
print(' args', repr(args))
for arg in args:
print(' ', arg)
print('kwargs')
for k, v in kwargs.items():
print(' ', k, v)
return 'tomorrow'
| 21.615385 | 42 | 0.55694 | 75 | 562 | 4.173333 | 0.266667 | 0.025559 | 0.14377 | 0.140575 | 0.485623 | 0.485623 | 0.485623 | 0.485623 | 0.485623 | 0.485623 | 0 | 0.014085 | 0.241993 | 562 | 25 | 43 | 22.48 | 0.720657 | 0 | 0 | 0.47619 | 0 | 0 | 0.163701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0 | 0 | 0.190476 | 0.571429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
1420f29df1fa70eb2e3757666daa61e2e39f87c0 | 358 | py | Python | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='emoji_map',
version='0.1',
description='Maps unicode emoji to its description',
url='http://github.com/rchurch4/emoji_map',
author='Rob Churchill',
author_email='rchurch4@bu.edu',
license='MIT',
packages=['emoji_map'],
include_package_data=True,
zip_safe=False)
| 27.538462 | 58 | 0.656425 | 45 | 358 | 5.066667 | 0.8 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014134 | 0.209497 | 358 | 12 | 59 | 29.833333 | 0.791519 | 0 | 0 | 0 | 0 | 0 | 0.349162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1423487aa9ea63d89b16cc57da5de0d83c612a84 | 7,560 | py | Python | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for Google Analytics Account Feed and Data Feed.
AccountFeedTest: All unit tests for AccountFeed class.
DataFeedTest: All unit tests for DataFeed class.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import unittest
from gdata import test_data
import gdata.analytics.data
import atom.core
import gdata.test_config as conf
class AccountFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Account Feed."""
def setUp(self):
"""Retrieves the test XML feed into a AccountFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_ACCOUNT_FEED,
gdata.analytics.data.AccountFeed)
def testAccountEntryTableId(self):
"""Tests custom classes in Google Analytics Account Feed."""
entry = self.feed.entry[0]
self.assertEquals(entry.table_id.text, 'ga:1174')
def testAccountEntryProperty(self):
"""Tests the property classes in Google Analytics Account Feed."""
property = self.feed.entry[0].property
self.assertEquals(property[0].name, 'ga:accountId')
self.assertEquals(property[0].value, '30481')
self.assertEquals(property[1].name, 'ga:accountName')
self.assertEquals(property[1].value, 'Google Store')
self.assertEquals(property[2].name, 'ga:profileId')
self.assertEquals(property[2].value, '1174')
self.assertEquals(property[3].name, 'ga:webPropertyId')
self.assertEquals(property[3].value, 'UA-30481-1')
self.assertEquals(property[4].name, 'ga:currency')
self.assertEquals(property[4].value, 'USD')
self.assertEquals(property[5].name, 'ga:timezone')
self.assertEquals(property[5].value, 'America/Los_Angeles')
def testAccountEntryGetProperty(self):
"""Tests GetProperty inherited class in the AccountEntry class."""
entry = self.feed.entry[0]
self.assertEquals(entry.GetProperty('ga:accountId').value, '30481')
self.assertEquals(entry.GetProperty('ga:accountName').value, 'Google Store')
self.assertEquals(entry.GetProperty('ga:profileId').value, '1174')
self.assertEquals(entry.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(entry.GetProperty('ga:currency').value, 'USD')
self.assertEquals(entry.GetProperty('ga:timezone').value, 'America/Los_Angeles')
class DataFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Data Feed."""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_DATA_FEED,
gdata.analytics.data.DataFeed)
def testDataFeed(self):
"""Tests custom classes in Google Analytics Data Feed."""
self.assertEquals(self.feed.start_date.text, '2008-10-01')
self.assertEquals(self.feed.end_date.text, '2008-10-31')
def testAggregates(self):
"""Tests Aggregates class in Google Analytics Data Feed."""
self.assert_(self.feed.aggregates is not None)
def testAggregatesElements(self):
"""Tests Metrics class in Aggregates class."""
metric = self.feed.aggregates.metric[0]
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
metric = self.feed.aggregates.GetMetric('ga:visits')
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
def testDataSource(self):
"""Tests DataSources class in Google Analytics Data Feed."""
self.assert_(self.feed.data_source[0] is not None)
def testDataSourceTableId(self):
"""Tests TableId class in the DataSource class."""
table_id = self.feed.data_source[0].table_id
self.assertEquals(table_id.text, 'ga:1174')
def testDataSourceTableName(self):
"""Tests TableName class in the DataSource class."""
table_name = self.feed.data_source[0].table_name
self.assertEquals(table_name.text, 'www.googlestore.com')
def testDataSourceProperty(self):
"""Tests Property clas in the DataSource class."""
property = self.feed.data_source[0].property
self.assertEquals(property[0].name, 'ga:profileId')
self.assertEquals(property[0].value, '1174')
self.assertEquals(property[1].name, 'ga:webPropertyId')
self.assertEquals(property[1].value, 'UA-30481-1')
self.assertEquals(property[2].name, 'ga:accountName')
self.assertEquals(property[2].value, 'Google Store')
def testDataSourceGetProperty(self):
"""Tests GetProperty utility method in the DataSource class."""
ds = self.feed.data_source[0]
self.assertEquals(ds.GetProperty('ga:profileId').value, '1174')
self.assertEquals(ds.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(ds.GetProperty('ga:accountName').value, 'Google Store')
def testEntryDimension(self):
"""Tests Dimension class in Entry class."""
dim = self.feed.entry[0].dimension[0]
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
def testEntryGetDimension(self):
"""Tests GetDimension utility method in the Entry class."""
dim = self.feed.entry[0].GetDimension('ga:source')
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
error = self.feed.entry[0].GetDimension('foo')
self.assertEquals(error, None)
def testEntryMetric(self):
"""Tests Metric class in Entry class."""
met = self.feed.entry[0].metric[0]
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
def testEntryGetMetric(self):
"""Tests GetMetric utility method in the Entry class."""
met = self.feed.entry[0].GetMetric('ga:visits')
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
error = self.feed.entry[0].GetMetric('foo')
self.assertEquals(error, None)
def testEntryGetObject(self):
"""Tests GetObjectOf utility method in Entry class."""
entry = self.feed.entry[0]
dimension = entry.GetObject('ga:source')
self.assertEquals(dimension.name, 'ga:source')
self.assertEquals(dimension.value, 'blogger.com')
metric = entry.GetObject('ga:visits')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.value, '68140')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.confidence_interval, '0.0')
error = entry.GetObject('foo')
self.assertEquals(error, None)
def suite():
"""Test Account Feed and Data Feed."""
return conf.build_suite([AccountFeedTest, DataFeedTest])
if __name__ == '__main__':
unittest.main()
| 34.678899 | 84 | 0.714153 | 964 | 7,560 | 5.554979 | 0.204357 | 0.18226 | 0.080672 | 0.026144 | 0.577031 | 0.477498 | 0.335574 | 0.265733 | 0.225584 | 0.178151 | 0 | 0.02414 | 0.150661 | 7,560 | 217 | 85 | 34.83871 | 0.809843 | 0.236376 | 0 | 0.267241 | 0 | 0 | 0.129272 | 0 | 0 | 0 | 0 | 0 | 0.543103 | 1 | 0.163793 | false | 0 | 0.043103 | 0 | 0.232759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1425326030a36f7088c87f42da842ce1d53ade94 | 7,974 | py | Python | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 22 | 2018-08-29T12:33:15.000Z | 2022-03-30T00:17:25.000Z | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 1 | 2020-02-07T20:54:34.000Z | 2020-02-07T20:54:34.000Z | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 19 | 2019-07-16T04:42:00.000Z | 2022-03-30T00:17:29.000Z | #!/usr/bin/python
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__metaclass__ = type
from ansible.module_utils import baremetal_deploy as bd
from ansible.module_utils.basic import AnsibleModule
import yaml
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tripleo_baremetal_expand_roles
short_description: Manage baremetal nodes with metalsmith
version_added: "2.9"
author: "Steve Baker (@stevebaker)"
description:
- Takes a baremetal deployment description of roles and node instances
and transforms that into an instance list and a heat environment file
for deployed-server.
options:
stack_name:
description:
- Name of the overcloud stack which will be deployed on these instances
default: overcloud
state:
description:
- Build instance list for the desired provision state, "present" to
provision, "absent" to unprovision, "all" for a combination of
"present" and "absent".
default: present
choices:
- present
- absent
- all
baremetal_deployment:
description:
- Data describing roles and baremetal node instances to provision for
those roles
type: list
elements: dict
suboptions:
name:
description:
- Mandatory role name
type: str
required: True
hostname_format:
description:
- Overrides the default hostname format for this role.
The default format uses the lower case role name.
For example, the default format for the Controller role is
%stackname%-controller-%index%. Only the Compute role does not
follow the role name rule. The Compute default format is
%stackname%-novacompute-%index%
type: str
count:
description:
- Number of instances to create for this role.
type: int
default: 1
defaults:
description:
- A dictionary of default values for instances entry properties.
An instances entry property overrides any defaults that you specify
in the defaults parameter.
type: dict
instances:
description:
- Values that you can use to specify attributes for specific nodes.
The length of this list must not be greater than the value of the
count parameter.
type: list
elements: dict
default_network:
description:
- Default nics entry when none are specified
type: list
suboptions: dict
default:
- network: ctlplane
vif: true
default_image:
description:
- Default image
type: dict
default:
href: overcloud-full
ssh_public_keys:
description:
- SSH public keys to load
type: str
user_name:
description:
- Name of the admin user to create
type: str
'''
RETURN = '''
instances:
description: Expanded list of instances to perform actions on
returned: changed
type: list
sample: [
{
"hostname": "overcloud-controller-0",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-controller-1",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-controller-2",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-0",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-1",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-2",
"image": {
"href": "overcloud-full"
}
}
]
environment:
description: Heat environment data to be used with the overcloud deploy.
This is only a partial environment, further changes are
required once instance changes have been made.
returned: changed
type: dict
sample: {
"parameter_defaults": {
"ComputeDeployedServerCount": 3,
"ComputeDeployedServerHostnameFormat": "%stackname%-novacompute-%index%",
"ControllerDeployedServerCount": 3,
"ControllerDeployedServerHostnameFormat": "%stackname%-controller-%index%",
"HostnameMap": {
"overcloud-controller-0": "overcloud-controller-0",
"overcloud-controller-1": "overcloud-controller-1",
"overcloud-controller-2": "overcloud-controller-2",
"overcloud-novacompute-0": "overcloud-novacompute-0",
"overcloud-novacompute-1": "overcloud-novacompute-1",
"overcloud-novacompute-2": "overcloud-novacompute-2"
}
}
}
''' # noqa
EXAMPLES = '''
- name: Expand roles
tripleo_baremetal_expand_roles:
baremetal_deployment:
- name: Controller
count: 3
defaults:
image:
href: overcloud-full
networks: []
- name: Compute
count: 3
defaults:
image:
href: overcloud-full
networks: []
state: present
stack_name: overcloud
register: tripleo_baremetal_instances
'''
def main():
argument_spec = yaml.safe_load(DOCUMENTATION)['options']
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
state = module.params['state']
try:
if state in ('present', 'all'):
present, env, role_net_map, hostname_role_map = bd.expand(
roles=module.params['baremetal_deployment'],
stack_name=module.params['stack_name'],
expand_provisioned=True,
default_image=module.params['default_image'],
default_network=module.params['default_network'],
user_name=module.params['user_name'],
ssh_public_keys=module.params['ssh_public_keys'],
)
if state in ('absent', 'all'):
absent, _, _, _ = bd.expand(
roles=module.params['baremetal_deployment'],
stack_name=module.params['stack_name'],
expand_provisioned=False,
default_image=module.params['default_image'],
)
env = {}
role_net_map = {}
hostname_role_map = {}
if state == 'present':
instances = present
elif state == 'absent':
instances = absent
elif state == 'all':
instances = present + absent
module.exit_json(
changed=True,
msg='Expanded to %d instances' % len(instances),
instances=instances,
environment=env,
role_net_map=role_net_map,
hostname_role_map=hostname_role_map,
)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 30.787645 | 91 | 0.577878 | 793 | 7,974 | 5.702396 | 0.311475 | 0.026537 | 0.033835 | 0.038921 | 0.210084 | 0.14927 | 0.12782 | 0.059708 | 0.040248 | 0.040248 | 0 | 0.006623 | 0.337221 | 7,974 | 258 | 92 | 30.906977 | 0.849007 | 0.074367 | 0 | 0.29386 | 0 | 0 | 0.753836 | 0.119212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004386 | false | 0 | 0.013158 | 0 | 0.017544 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1425809d2b5fe657e70492cd714e8e6e266db850 | 2,207 | py | Python | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 3.2.6 on 2021-09-10 11:56
import uuid
import django.db.models.deletion
import django.db.models.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user_management", "0002_remove_username"),
]
operations = [
migrations.CreateModel(
name="Feature",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("code", models.CharField(max_length=200)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="FeatureFlag",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("config", models.JSONField(blank=True, null=True)),
(
"feature",
models.ForeignKey(
on_delete=django.db.models.fields.CharField,
to="user_management.feature",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
| 30.232877 | 72 | 0.421386 | 160 | 2,207 | 5.68125 | 0.43125 | 0.044004 | 0.061606 | 0.110011 | 0.437844 | 0.437844 | 0.437844 | 0.354235 | 0.354235 | 0.354235 | 0 | 0.020997 | 0.482102 | 2,207 | 72 | 73 | 30.652778 | 0.774278 | 0.02039 | 0 | 0.584615 | 1 | 0 | 0.072685 | 0.010648 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.123077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1428fc857bf985d08474b17410858d44bd518123 | 1,009 | py | Python | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | import pyGDP
import os
from nose.tools import assert_equal
from nose.tools import assert_not_equal
class TestFeatureCategoricalGridCoverage(object):
def test_submit_FCGC(self):
pyGDP.WPS_URL='http://cida.usgs.gov/gdp/process/WebProcessingService'
testPyGDP = pyGDP.pyGDPwebProcessing()
shapefile = 'sample:CONUS_states'
attribute = 'STATE'
value = 'Rhode Island'
dataSetURI = 'http://cida.usgs.gov/ArcGIS/services/statsgo_muid/MapServer/WCSServer'
dataType = '1'
outputFile_handle = testPyGDP.submitFeatureCategoricalGridCoverage(shapefile, dataSetURI, dataType, attribute, value, verbose=True)
# This test is not currently working because what comes from
# testPyGDP.submitFeatureCategoricalGridCoverage() is a NoneType
# even through I've verified that it consistently writes a file
# of the size below. I expect a string to come back from this
# function
assert_equal(os.path.getsize(outputFile_handle), 650)
| 38.807692 | 136 | 0.739346 | 119 | 1,009 | 6.176471 | 0.697479 | 0.021769 | 0.035374 | 0.051701 | 0.068027 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00486 | 0.184341 | 1,009 | 25 | 137 | 40.36 | 0.888214 | 0.249752 | 0 | 0 | 0 | 0 | 0.211718 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0 | null | null | 0 | 0.266667 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
143877dd2c390f5b0dc8e5729d0cf5ded4ef1c91 | 2,154 | py | Python | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | from flask import Flask, request
from flask_httpauth import HTTPBasicAuth
from auth_handler import AuthHandler
from cache import Cache
from os import environ
from yaml import safe_load
import logging
from connection_provider import ConnectionProvider
# init logging
logging.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s', level=logging.DEBUG)
# Init flask app
app = Flask(__name__)
auth = HTTPBasicAuth()
# Basic cache
CACHE_KEY_EXPIRATION_SECONDS = 60 * 60 * 8 # 8 hours
cache = Cache(CACHE_KEY_EXPIRATION_SECONDS)
# Init LDAP config
logging.info("Reading config.yaml")
with open("/config/config.yaml", 'r') as stream:
config = safe_load(stream)
# Create the AuthHandler instance
logging.info("Initializing authentication handler")
authHandler = AuthHandler(
environ['LDAP_MANAGER_BINDDN'],
environ["LDAP_MANAGER_PASSWORD"],
ConnectionProvider(config['ldapServers'])
)
@auth.verify_password
def login(username, password):
# Check if username or password is empty
if not username or not password:
return False
# Get lookup key for config
ldap_config_key = request.headers['Ldap-Config-Key']
# Check if authentication was cached
if cache.validate(username, ldap_config_key, password):
logging.info("[user=%s, config=%s] authenticated from cache", username, ldap_config_key)
return True
# Lookup LDAP config
ldapParameters = config[ldap_config_key]
# Validate user
if authHandler.validate(username, password, ldap_config_key, ldapParameters):
# Add successful authentication to cache
cache.set(username, ldap_config_key, password)
return True
return False
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
@auth.login_required
def index(path):
code = 200
msg = "LDAP Authentication"
headers = []
return msg, code, headers
# health endpoint
@app.route('/healthz')
def healthz():
if cache is None or authHandler is None:
return "not healthy", 503
else:
return "healthy", 200
# Main
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000, debug=True)
| 26.268293 | 96 | 0.716806 | 276 | 2,154 | 5.445652 | 0.373188 | 0.05988 | 0.060546 | 0.041916 | 0.07851 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013046 | 0.181523 | 2,154 | 81 | 97 | 26.592593 | 0.839478 | 0.134169 | 0 | 0.078431 | 0 | 0 | 0.163784 | 0.011351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.137255 | 0.156863 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
1444b1b92ae47cbc60d016724b5d44bf1122493f | 901 | py | Python | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | import sys
import re
line = sys.stdin.readline().strip()
commands = line.split()
if len(commands) > 15: #Too Long
print 'ERROR'
exit(0)
for i in range(0,len(line)): #Missing Spaces
if i % 2 == 1 and line[i] != ' ':
print 'ERROR'
exit(0)
for i in range(0,len(commands) - 2,2): #Repeated Symbol
if commands[i] == commands[i + 2]:
print 'REJECT'
exit(0)
regex_test = re.search("[^RYGPCX ]", line) #Invalid Symbol
if regex_test is not None:
print 'ERROR'
exit(0)
flashing_test1 = re.search("[^R] [PC]", line)
flashing_test2 = re.search("[^ ][PC][^ ]", line)
flashing_test3 = re.search("[PC] [^R]", line)
if flashing_test1 is not None or flashing_test2 is not None or flashing_test3 is not None: #Flashing not surrounded
print 'REJECT'
exit(0)
if line[0] != 'R': #Doesn't start with R
print 'REJECT'
exit(0)
print 'ACCEPT' | 21.97561 | 115 | 0.619312 | 141 | 901 | 3.900709 | 0.361702 | 0.054545 | 0.065455 | 0.081818 | 0.178182 | 0.109091 | 0.109091 | 0.109091 | 0.109091 | 0.109091 | 0 | 0.031564 | 0.226415 | 901 | 41 | 116 | 21.97561 | 0.757532 | 0.104329 | 0 | 0.413793 | 0 | 0 | 0.100998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.068966 | null | null | 0.241379 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1446b8c5fba47ab7e1297f04805cc27c8254a77e | 544 | py | Python | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | class AImage(AShape):
color: any
def __init__(self, width=100, height=None, cx=None, cy=None, image='pet_darui_dog.png'):
AShape.__init__(self, width, height, cx, cy)
if image.startswith('http')
self.pic = Image.open(io.BytesIO(requests.get(image).content))
else:
self.pic = Image.open(image)
def render(self, canvas: ACanvas, frame: int):
ox, oy, w, h = self.bounds()
pic = self.pic.resize((int(w), int(h)))
canvas.image.paste(pic, (int(ox), int(oy)), pic)
| 36.266667 | 92 | 0.595588 | 78 | 544 | 4.025641 | 0.538462 | 0.066879 | 0.082803 | 0.101911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007264 | 0.240809 | 544 | 14 | 93 | 38.857143 | 0.753027 | 0 | 0 | 0 | 0 | 0 | 0.038603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1446e34643a51f72b5218098028b06210bd3c768 | 1,898 | py | Python | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import aiohttp
import aiohttp_jinja2
import pytest
from ddtrace.contrib.aiohttp.middlewares import trace_app
from ddtrace.contrib.aiohttp_jinja2.patch import patch as patch_jinja2
from ddtrace.internal.utils import version
from ddtrace.pin import Pin
from .app.web import setup_app
if version.parse_version(aiohttp.__version__) < (3, 0, 0):
@pytest.fixture
def aiohttp_client(test_client):
return test_client
@pytest.fixture
def app_tracer(tracer, loop):
app = setup_app()
trace_app(app, tracer)
return app, tracer
@pytest.fixture
def patched_app_tracer(app_tracer):
patch_jinja2()
app, tracer = app_tracer
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
@pytest.fixture
def untraced_app_tracer(tracer, loop):
patch_jinja2()
app = setup_app()
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
else:
@pytest.fixture
async def app_tracer(tracer, loop):
app = setup_app()
trace_app(app, tracer)
return app, tracer
@pytest.fixture
async def patched_app_tracer(app_tracer):
patch_jinja2()
app, tracer = app_tracer
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
@pytest.fixture
async def untraced_app_tracer(tracer, loop):
patch_jinja2()
app = setup_app()
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
| 26 | 70 | 0.643309 | 238 | 1,898 | 4.957983 | 0.180672 | 0.183051 | 0.076271 | 0.10678 | 0.681356 | 0.681356 | 0.681356 | 0.681356 | 0.681356 | 0.681356 | 0 | 0.016012 | 0.27608 | 1,898 | 72 | 71 | 26.361111 | 0.842795 | 0.140674 | 0 | 0.617021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.170213 | 0.021277 | 0.404255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1446f15e2cee0b0f3b21fb8bde4f8852bd30e37b | 1,074 | py | Python | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | import requests, json
from bs4 import BeautifulSoup
hds=[{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\
{'User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},\
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}]
url_hot='https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&page_limit=50&page_start=0'
url_highrate = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E8%B1%86%E7%93%A3%E9%AB%98%E5%88%86&page_limit=50&page_start=0'
response = requests.get(url_highrate, headers=hds[1])
# print(response.text)
data = json.loads(response.text);
items = data['subjects']
for item in items[:]:
print(item["title"] + ' ' + item['url'])
if False:
movie_resp = requests.get(item['url'], headers=hds[0])
#print(movie_resp.text);
soup = BeautifulSoup(movie_resp.text, 'html.parser')
div = soup.select('strong.ll.rating_num')[0]
print(div.text) | 51.142857 | 138 | 0.68622 | 186 | 1,074 | 3.892473 | 0.510753 | 0.037293 | 0.066298 | 0.070442 | 0.301105 | 0.254144 | 0.127072 | 0.127072 | 0.127072 | 0.127072 | 0 | 0.087553 | 0.117318 | 1,074 | 21 | 139 | 51.142857 | 0.67616 | 0.040037 | 0 | 0 | 0 | 0.294118 | 0.541302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14471fb559051a475c6f693ab9f75659647a5b99 | 1,323 | py | Python | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField,IntegerField
from wtforms.validators import DataRequired, NumberRange
class ConnectForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_port = StringField('Connect on port:', validators=[DataRequired()], description = 'Serial port')
name = StringField('Name of the Arduino:', description = 'Name', default = 'Arduino')
submit = SubmitField('Connect')
class UpdateForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_port = StringField('Update to port:', validators=[DataRequired()])
baud_rate = IntegerField('Baudrate:', validators=[ NumberRange(4800,1000000)])
submit = SubmitField('Update connection')
class SerialWaitForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_time = IntegerField('Time between measurements (s):', [DataRequired(), NumberRange(2,300)])
submit = SubmitField('Update waiting time.')
class DisconnectForm(FlaskForm):
'''
The form for disconnecting from the Arduino
'''
id = HiddenField('A hidden field');
submit = SubmitField('Disconnect')
| 34.815789 | 107 | 0.699169 | 141 | 1,323 | 6.524823 | 0.375887 | 0.054348 | 0.069565 | 0.082609 | 0.305435 | 0.305435 | 0.305435 | 0.267391 | 0.267391 | 0.267391 | 0 | 0.01385 | 0.181406 | 1,323 | 37 | 108 | 35.756757 | 0.835642 | 0.120937 | 0 | 0.2 | 0 | 0 | 0.201452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
14479cf21718a5d4475f47cd6e178f58e9dd330a | 421 | py | Python | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | 1 | 2022-02-03T03:18:43.000Z | 2022-02-03T03:18:43.000Z | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import User, EmailDevice
@receiver(post_save, sender=User)
def creating_user_settings(sender, instance, created, raw, **kwargs):
"""Creating the user device for a new User"""
if created and not raw:
EmailDevice.objects.create(user=instance, name=f"personal device for user {instance.pk}", confirmed=True)
| 35.083333 | 113 | 0.760095 | 60 | 421 | 5.266667 | 0.6 | 0.063291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142518 | 421 | 11 | 114 | 38.272727 | 0.875346 | 0.092637 | 0 | 0 | 0 | 0 | 0.101333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.428571 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
144e75f0af24909b2f53fb50c3580fc3294a54f6 | 2,171 | py | Python | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from role.role_state import RoleState
from situations.situation_complex import SituationState, SituationStateData
from situations.situation_job import SituationJob
from situations.visiting.visiting_situation_common import VisitingNPCSituation
import services
import sims4.tuning.instances
import sims4.tuning.tunable
import situations.bouncer
import tunable_time
class StayTheNightSituation(VisitingNPCSituation):
INSTANCE_TUNABLES = {'invited_job': sims4.tuning.tunable.TunableTuple(situation_job=SituationJob.TunableReference(description='\n The situation job for the sim spending the night.\n '), staying_role_state=RoleState.TunableReference(description='\n The role state for the sim spending the night.\n ')), 'when_to_leave': tunable_time.TunableTimeOfDay(description='\n The time of day for the invited sim to leave.\n ', default_hour=7)}
@classmethod
def _states(cls):
return (SituationStateData(1, _StayState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.invited_job.situation_job, cls.invited_job.staying_role_state)]
@classmethod
def default_job(cls):
return cls.invited_job.situation_job
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._start_time = None
def start_situation(self):
super().start_situation()
self._start_time = services.time_service().sim_now
self._change_state(_StayState())
def _get_duration(self):
if self._seed.duration_override is not None:
return self._seed.duration_override
time_span = self._start_time.time_till_next_day_time(self.when_to_leave)
return time_span.in_minutes()
sims4.tuning.instances.lock_instance_tunables(StayTheNightSituation, exclusivity=situations.bouncer.bouncer_types.BouncerExclusivityCategory.VISIT, creation_ui_option=situations.situation_types.SituationCreationUIOption.NOT_AVAILABLE, duration=0, _implies_greeted_status=True)
class _StayState(SituationState):
pass
| 48.244444 | 558 | 0.739291 | 252 | 2,171 | 6.051587 | 0.373016 | 0.029508 | 0.029508 | 0.040656 | 0.078689 | 0.078689 | 0.078689 | 0 | 0 | 0 | 0 | 0.003948 | 0.183326 | 2,171 | 44 | 559 | 49.340909 | 0.856176 | 0 | 0 | 0.085714 | 0 | 0 | 0.140028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0.028571 | 0.257143 | 0.085714 | 0.657143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
14560849915561883889885a3454e4e8c7afc839 | 286 | py | Python | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | from tflyrics import Poet, LyricsGenerator
artists = ['Bob Dylan', 'Tim Buckley', 'The Beatles']
gen = LyricsGenerator(artists, per_artist=5)
ds = gen.as_dataset(batch_size=4)
p = Poet()
p.train_on(ds, n_epochs=10)
poem = p.generate(start_string='Hey ', n_gen_chars=1000)
print(poem)
| 26 | 56 | 0.741259 | 46 | 286 | 4.434783 | 0.782609 | 0.215686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031621 | 0.115385 | 286 | 10 | 57 | 28.6 | 0.774704 | 0 | 0 | 0 | 0 | 0 | 0.122378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1458c5f022bcbdb49cfdd4c7518d98dccf9754e4 | 1,372 | py | Python | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | 2 | 2015-02-10T01:19:21.000Z | 2016-07-24T14:40:59.000Z | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | null | null | null | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | null | null | null | from provider.base import BaseProvider
class FacebookProvider(BaseProvider):
def __init__(self, client_id, client_secret, name, redirect_uri, state=None):
"""
:param client_id:
:param client_secret:
:param name:
:param redirect_uri:
:param state:
:return:
"""
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
base_url = 'https://graph.facebook.com/'
super().__init__(client_id, client_secret, authorize_url, access_token_url, base_url, name, redirect_uri,
state=state)
def auth(self, scope=None):
if scope is None:
scope = 'email public_profile'
return super().auth(scope)
def get_user_info(self, info):
"""
retrieve basic user info
"""
if self.info[info] is None:
self.info[info] = self.get_info(info)
def get_user(self):
"""
retrieve username
:return: :rtype: string
"""
self.get_user_info('me')
return self.info['me']['name']
def get_email(self):
"""
retrieve users email address
:return: :rtype: string
"""
self.get_user_info('me')
return self.info['me']['email'] | 28 | 113 | 0.569971 | 156 | 1,372 | 4.782051 | 0.320513 | 0.053619 | 0.044236 | 0.053619 | 0.198391 | 0.134048 | 0.134048 | 0.134048 | 0.134048 | 0.134048 | 0 | 0 | 0.311953 | 1,372 | 49 | 114 | 28 | 0.790254 | 0.161079 | 0 | 0.095238 | 0 | 0 | 0.147029 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.238095 | false | 0 | 0.047619 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
145a2aac257a81ff3e9b25e0317ce229eb2a70b3 | 15,109 | py | Python | test_vs_model_DEMs.py | drewleonard42/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | 1 | 2019-07-31T17:27:12.000Z | 2019-07-31T17:27:12.000Z | test_vs_model_DEMs.py | SolarDrew/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | null | null | null | test_vs_model_DEMs.py | SolarDrew/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | 2 | 2015-07-07T10:31:03.000Z | 2015-10-19T15:42:47.000Z | # -*- coding: utf-8 -*-
"""
Script to produce synthetic AIA data based on arbitrary model DEMs and test the
results of the tempmap code against the model.
Created on Mon Jul 28 16:34:28 2014
@author: Drew Leonard
"""
import numpy as np
from matplotlib import use, rc
use('agg')
rc('savefig', bbox='tight', pad_inches=0.5)
import matplotlib.pyplot as plt
from matplotlib import patches
import sunpy
from sunpy.map import Map
from temperature import TemperatureMap
from utils import gaussian, load_temp_responses
from os import path, makedirs
import subprocess32 as subp
from itertools import product
from skimage import measure
from sys import argv
# Decide whether to assess single-parameter or full-Gaussian method
n_pars = int(argv[1])
# Define which wavelength to use for EM estimation with 1-parameter TMaps
emwlen = str(argv[2])
# Define CoronaTemps home folder and output folder
CThome = path.join(path.expanduser('~'), 'CoronaTemps')
outdir = path.join(CThome, 'validation', '{}pars'.format(n_pars))
tmap_script = path.join(CThome, 'create_tempmap.py')
if not path.exists(outdir): makedirs(outdir)
# Define parameter ranges
#temps = np.arange(4.6, 7.405, 0.01)#0.005)
temps = np.arange(5.6, 7.005, 0.01)
widths = np.array([0.01, 0.1, 0.5])#np.arange(0.01, 0.605, 0.005) # Just copying Aschwanden's range here
#heights = 10 ** np.arange(18, 37, 0.1)#0.05)
heights = 10 ** np.arange(20, 35, 0.1)
#print heights
n_temps = len(temps)
n_widths = len(widths)
n_heights = len(heights)
parvals = np.array([i for i in product(temps, widths, heights)])
#print parvals.shape
n_vals = n_temps * n_widths * n_heights
#print n_temps, n_widths, n_heights, n_vals, n_vals * 6
# Create model DEMs and synthetic emission
emission = np.zeros((6, n_temps, n_widths, n_heights))
#print emission.shape
logt = np.arange(0, 15.05, 0.05)
resp = load_temp_responses()
delta_t = logt[1] - logt[0]
for p, params in enumerate(parvals):
dem = gaussian(logt, *params)
f = resp * dem
t = np.where(temps == params[0])[0][0]
w = np.where(widths == params[1])[0][0]
h = np.where(heights == params[2])[0][0]
emission[:, t, w, h] = np.sum(f, axis=1) * delta_t
#emission = emission / emission[2, :, :, :]
#print '----', emission[2, :, :, :].min(), emission[2, :, :, :].max()
# Load AIA response functions
resp = load_temp_responses()
# Load unnessecary map for its metadata
voidmap = Map(sunpy.AIA_171_IMAGE)
mapmeta = voidmap.meta
#rect = patches.Rectangle([25.0, 5.6], 1.0, 1.0, color='black', fill=True, clip_on=False)
# Run synthetic data through 1param tempmap method
for w, wid in enumerate(widths):#heights):
print '\nWidth:', wid
fig = plt.figure(figsize=(30, 12))
for wl, wlength in enumerate(['94', '131', '171', '193', '211', '335']):
#emiss = Map(emission[wl, :, :, w], mapmeta)
emiss = Map(emission[wl, :, w, :].copy(), mapmeta)
emiss.cmap = sunpy.cm.get_cmap('sdoaia{}'.format(wlength))
emiss.meta['naxis1'] = emiss.shape[1]
emiss.meta['naxis2'] = emiss.shape[0]
#emiss.meta['cdelt1'] = widths[1] - widths[0]
emiss.meta['cdelt1'] = heights[1] - heights[0] #np.log10(heights[1]) - np.log10(heights[0])
emiss.meta['cdelt2'] = temps[1] - temps[0]
#emiss.meta['crval1'] = widths[0]
emiss.meta['crval1'] = heights[0] #np.log10(heights[0])
emiss.meta['crval2'] = temps[0]
emiss.meta['crpix1'] = 0.5
emiss.meta['crpix2'] = 0.5
if wlength == '94': wlength = '094'
fits_dir = path.join(CThome, 'data', 'synthetic', wlength)
if not path.exists(fits_dir): makedirs(fits_dir)
emiss.save(path.join(fits_dir, 'model.fits'), clobber=True)
#print '----', emission[2, :, :, :].min(), emission[2, :, :, :].max()
#print '------', emission[2, :, w, :].min(), emission[2, :, w, :].max()
emiss.data /= emission[2, :, w, :]
#print '--------', emiss.min(), emiss.max()
ax = fig.add_subplot(1, 6, wl+1)
emiss.plot(aspect='auto', vmin=emiss.min(), vmax=emiss.max())
plt.title('{}'.format(wlength))
plt.xlabel('Input EM')
plt.ylabel('Input log(T)')
plt.colorbar()
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
#plt.savefig(path.join(outdir, 'model_emission_h={}'.format(np.log10(wid)).replace('.', '_')))
plt.savefig(path.join(outdir, 'model_emission_w={}'.format(wid).replace('.', '_')))
plt.close()
#images = [Map(emission[i, :, :, w], mapmeta) for i in range(6)]
images = [Map(emission[i, :, w, :], mapmeta) for i in range(6)]
if n_pars == 3:
cmdargs = "mpiexec -n 10 python {} model {} {} {} {} {} {}".format(
tmap_script, n_pars, path.join(CThome, 'data'),
None, None, True, True).split()
else:
cmdargs = "python {} model {} {} {} {} {} {}".format(
tmap_script, n_pars, path.join(CThome, 'data'),
None, None, True, True).split()
status = subp.call(cmdargs)
newmap = TemperatureMap(fname=path.join(CThome, 'temporary.fits'))
subp.call(["rm", path.join(CThome, 'temporary.fits')])
data, meta = newmap.data, newmap.meta
fitsmap = Map(np.log10(newmap.goodness_of_fit), newmap.meta.copy())
#print fitsmap.max()
newmap.data = data
print '-------------MINMAX:-------------'#, newmap.min(), newmap.max(), newmap.shape,
#print newmap.data[newmap.data == 0].shape, '----------\n'
print 'GoF', fitsmap.min(), fitsmap.mean(), fitsmap.max()
print 'T_out', newmap.min(), newmap.mean(), newmap.max()
#truetemp = np.array(list(temps)*n_widths).reshape((n_widths, n_temps)).T
truetemp = np.array(list(temps)*n_heights).reshape((n_heights, n_temps)).T
#print truetemp.shape, data.shape
diff = Map((abs(truetemp - data) / truetemp) * 100, newmap.meta.copy())
print 'T_diff', diff.min(), diff.mean(), diff.max()
if n_pars == 3:
wdata = Map(newmap.dem_width, newmap.meta.copy())
truew = np.ones(shape=(n_temps, n_heights)) * wid
#print 'truew', truew.min(), truew.mean(), truew.max()
diffw = Map((abs(truew - wdata.data) / truew) * 100, newmap.meta.copy())
print 'w_out', wdata.min(), wdata.mean(), wdata.max()
print 'w_diff', diffw.min(), diffw.mean(), diffw.max()
#print wid, newmap.xrange, newmap.yrange, newmap.scale
#print wid, diff.xrange, diff.yrange, diff.scale
fig = plt.figure(figsize=(24, 12))
fig.add_subplot(1, 3, 1)
newmap.plot(cmap='coolwarm', vmin=5.6, vmax=7.0, aspect='auto')
plt.colorbar()
plt.title('Solution log(T)', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
ax = fig.add_subplot(1, 3, 2)
#print 'diff', diff.min(), diff.max()
#print np.nanmin(diff.data), np.nanmax(diff.data)
diff.plot(cmap='RdYlGn_r', aspect='auto')#, vmin=diff.min(), vmax=diff.max())
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
ax = fig.add_subplot(1, 3, 3)
#print 'fits', fitsmap.min(), fitsmap.max()
#print np.nanmin(fitsmap.data), np.nanmax(fitsmap.data)
fitsmap.plot(cmap='cubehelix', aspect='auto')#,
# vmin=np.nanmin(fitsmap.data)-(2.0*(np.nanstd(fitsmap.data))),
# vmax=np.nanmax(fitsmap.data)+(2.0*(np.nanstd(fitsmap.data))))
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
#plt.savefig(path.join(outdir, 'tempsolutions_em={}'.format(np.log10(wid)).replace('.', '_')))
plt.savefig(path.join(outdir, 'tempsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
plt.close()
if n_pars == 3:
emdata = Map(newmap.emission_measure, newmap.meta.copy())
else:
if emwlen == 'three':
total = np.zeros(newmap.shape)
for w in ['171', '193', '211']:
emdata = newmap.calculate_em(w, model=True)
total += emdata.data
emdata.data = total/3.0
elif emwlen == 'all':
total = np.zeros(newmap.shape)
for w in ['94', '131', '171', '193', '211', '335']:
emdata = newmap.calculate_em(w, model=True)
total += emdata.data
emdata.data = total/6.0
else:
emdata = newmap.calculate_em(emwlen, model=True)
trueem = np.array(list(heights)*n_temps).reshape(n_temps, n_heights)
diffem = Map((abs(trueem - emdata.data) / trueem) * 100, newmap.meta.copy())
#print wid, emdata.xrange, emdata.yrange, emdata.scale
#print wid, diffem.xrange, diffem.yrange, diffem.scale
#print wid, fitsmap.xrange, fitsmap.yrange, fitsmap.scale
fig = plt.figure(figsize=(24, 12))
ax = fig.add_subplot(1, 3, 1)
print 'em_out', emdata.min(), emdata.mean(), emdata.max()
print 'em_diff', diffem.min(), diffem.mean(), diffem.max()
#print np.nanmin(emdata.data), np.nanmax(emdata.data)
emdata.plot(cmap='coolwarm', aspect='auto',
vmin=emdata.min(), vmax=emdata.max())
# vmin=np.log10(heights[0]), vmax=np.log10(heights[-1]))
contours = measure.find_contours(emdata.data, heights[0])
for contour in contours:
contour[:, 0] *= emdata.scale['y']
contour[:, 1] *= emdata.scale['x']
contour[:, 0] += emdata.yrange[0]
contour[:, 1] += emdata.xrange[0]
plt.plot(contour[:, 1], contour[:, 0], color='blue')
plt.xlim(*emdata.xrange)
plt.ylim(*emdata.yrange)
contours = measure.find_contours(emdata.data, heights[-1])
for contour in contours:
contour[:, 0] *= emdata.scale['y']
contour[:, 1] *= emdata.scale['x']
contour[:, 0] += emdata.yrange[0]
contour[:, 1] += emdata.xrange[0]
plt.plot(contour[:, 1], contour[:, 0], color='black')
plt.xlim(*emdata.xrange)
plt.ylim(*emdata.yrange)
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Solution EM', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
ax = fig.add_subplot(1, 3, 2)
#print 'diffem', diffem.min(), diffem.max()
#print np.nanmin(diffem.data), np.nanmax(diffem.data)
diffem.plot(cmap='RdYlGn_r', aspect='auto',
# vmin=0, vmax=50)
vmin=diffem.min(), vmax=diffem.max())
#fig.gca().add_artist(rect)
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
ax = fig.add_subplot(1, 3, 3)
fitsmap.plot(cmap='cubehelix', aspect='auto')
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
if n_pars == 3:
plt.savefig(path.join(outdir, 'emsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
else:
plt.savefig(path.join(outdir, 'emsolutions_wid={:.3f}_wlen={}'.format(wid, emwlen).replace('.', '_')))
plt.close()
if n_pars == 3:
#print wid, wdata.xrange, wdata.yrange, wdata.scale
#print wid, diffw.xrange, diffw.yrange, diffw.scale
fig = plt.figure(figsize=(24, 12))
ax = fig.add_subplot(1, 3, 1)
wdata.plot(cmap='coolwarm', vmin=widths[0], vmax=widths[-1], aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Solution width', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
ax = fig.add_subplot(1, 3, 2)
#print 'diffw', diffw.min(), diffw.max()
#print np.nanmin(diffw.data), np.nanmax(diffw.data)
diffw.plot(cmap='RdYlGn_r', vmin=diffw.min(), vmax=diffw.max(), aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
ax = fig.add_subplot(1, 3, 3)
fitsmap.plot(cmap='cubehelix', aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
plt.savefig(path.join(outdir, 'widsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
plt.close()
"""w = np.where((widths > 0.097)*(widths < 0.103))
dataslice = data[:, w].reshape(len(temps))
diffslice = diff[:, w].reshape(len(temps))
fitslice = fits[:, w].reshape(len(temps))
fig = plt.figure(figsize=(16, 12))
plt.plot(temps, dataslice)
plt.title('Solution log(T) at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('Solution log(T)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/dataslice')
plt.close()
fig = plt.figure(figsize=(16, 12))
plt.plot(temps, diffslice)
plt.title('Difference from input at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('Difference (%)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/diffslice')
plt.close()
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(1, 1, 1)
plt.plot(temps, np.log10(fitslice))
plt.title('Goodness-of-fit at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('log(Goodness-of-fit)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/fitslice')
plt.close()"""
# Run synthetic data throguh 3param tempmap method
# Somehow display the results.
| 41.057065 | 110 | 0.613608 | 2,174 | 15,109 | 4.212512 | 0.146274 | 0.043678 | 0.053942 | 0.042804 | 0.504259 | 0.454575 | 0.427823 | 0.384145 | 0.334462 | 0.326818 | 0 | 0.042867 | 0.195579 | 15,109 | 367 | 111 | 41.168937 | 0.71063 | 0.207889 | 0 | 0.512397 | 0 | 0 | 0.10808 | 0.012373 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.053719 | null | null | 0.03719 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
145d2ba73cbb15562b18fa340ca1117a5d2135a1 | 2,459 | py | Python | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | 6 | 2022-01-21T07:16:12.000Z | 2022-01-23T22:01:16.000Z | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | null | null | null | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | 4 | 2022-01-21T07:17:03.000Z | 2022-01-23T03:37:12.000Z | # the comments in this file were made while learning, as reminders
# to RUN APP IN CMD PROMPT: cd to this directory, or place in default CMD directory:
# then run 'python rubicon_reminders_cli.py'
from os import listdir
from datetime import datetime
# this assigns dt variable as date + timestamp
dt = (datetime.now())
# TODO numerate note items per entry
# open existing or create a new file prompt:
p1 = input("(V)iew or (N)ew [V/N]: ").upper()
if p1 == "V":
# this views file directory of existing notes if first input is (view)
for file in listdir():
if file.endswith(".txt"):
print(file)
# below opens existing file, allows multiple note lines, and dates it when finished with session.
old_file = (input("which file would you like to open: "))
hdl = open(old_file + ".txt", "r+") # using r+ by default places text at beginning, overwriting.
for line in hdl: # as long as you first READ the file, then r+ becomes APPEND TO END.
print(line.strip())
of_note = input("Add Note: ")
if of_note == "done": # FIXME add accept on any 'done' check for upper and lowercase
# specifies notes were reviewed if first note entry is "done"
hdl.write(" REVIEWED: ")
hdl.write(str(dt))
# if first entered note is not 'done', continue asking for more notes until entry == 'done'
else:
hdl.write('\n')
hdl.write(of_note)
hdl.write('\n')
while of_note != "done":
of_note = input("Add more notes: ")
while of_note != "done":
hdl.write(of_note)
hdl.write('\n')
else:
hdl.write("SESSION END: ")
hdl.write(str(dt))
hdl.write('\n')
hdl.close()
# below is the block for generating and noting in a new file, if line 15 == 'N'
elif p1 == "N":
new_file = input("new file name: ")
hdl = open(new_file, "a")
nf_note = input("Add Note: ")
if nf_note == "done":
print("finished")
else:
hdl.write(nf_note)
hdl.write('\n')
while nf_note != "done":
nf_note = input("Add more notes: ")
while nf_note != "done":
hdl.write(nf_note)
hdl.write('\n')
break
else:
hdl.write("SESSION END: ")
hdl.write(str(dt))
hdl.write('\n')
hdl.close()
else:
print("Error: please enter V or N")
| 34.633803 | 101 | 0.581537 | 353 | 2,459 | 3.994334 | 0.368272 | 0.096454 | 0.044681 | 0.036879 | 0.208511 | 0.175887 | 0.139007 | 0.073759 | 0.073759 | 0.073759 | 0 | 0.002904 | 0.299715 | 2,459 | 70 | 102 | 35.128571 | 0.815912 | 0.363156 | 0 | 0.519231 | 0 | 0 | 0.159355 | 0 | 0 | 0 | 0 | 0.014286 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
145d4d73e5688c8c92783fdd2c96db07fb334f0c | 352 | py | Python | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | null | null | null | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | 6 | 2020-04-19T10:18:05.000Z | 2020-04-28T16:26:07.000Z | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | null | null | null | import os
from airflow import models, settings
from airflow.contrib.auth.backends.password_auth import PasswordUser
user = PasswordUser(models.User())
user.username = os.environ['AIRFLOW_UI_USER']
user.password = os.environ['AIRFLOW_UI_PASSWORD']
user.superuser = True
session = settings.Session()
session.add(user)
session.commit()
session.close()
| 23.466667 | 68 | 0.795455 | 47 | 352 | 5.851064 | 0.446809 | 0.08 | 0.116364 | 0.130909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 352 | 14 | 69 | 25.142857 | 0.859375 | 0 | 0 | 0 | 0 | 0 | 0.096591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.272727 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
1466e6f9e9390c64c5ae93f8f73e9d6b213aba54 | 4,536 | py | Python | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/palindrome-permutation/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/palindrome-permutation.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 266. Palindrome Permutation
#
# Given a string, determine if a permutation of the string could form a palindrome.
#
# For example,
# "code" -> False, "aab" -> True, "carerac" -> True.
#
# Hint:
#
# Consider the palindromes of odd vs even length. What difference do you notice?
# Count the frequency of each character.
# If each character occurs even number of times,
# then it must be a palindrome. How about character which occurs odd number of times?
#
# #count of odd number char < 2
# Companies
# Google Uber Bloomberg
# Related Topics
# Hash Table
# Similar Questions
# Longest Palindromic Substring Valid Anagram Palindrome Permutation II Longest Palindrome
#
import unittest
import collections
# 20ms 99.07%
class Solution(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
# print collections.Counter(s).values()
return sum(v % 2 for v in collections.Counter(s).values()) < 2
# 20ms 99.07%
from collections import defaultdict
class Solution2(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
dict = defaultdict(int)
for char in s:
dict[char] = dict[char] + 1
odd = 0
for cnt in dict.values():
if cnt % 2 == 1:
odd += 1
if odd > 1:
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
# Java solution
Java = '''
# Thought: https://leetcode.com/problems/palindrome-permutation/solution/
#
Time complexity : O(n). We traverse over the string ss of length nn once only.
Space complexity : O(n). The setset can grow upto a maximum size of nn in case of all distinct elements.
The idea is to iterate over string, adding current character to set if set doesn't contain that character,
or removing current character from set if set contains it.
When the iteration is finished, just return set.size()==0 || set.size()==1.
set.size()==0 corresponds to the situation when there are even number of any character in the string, and
set.size()==1 corresponsds to the fact that there are even number of any character except one.
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set=new HashSet<Character>();
for(int i=0; i<s.length(); ++i){
if (!set.contains(s.charAt(i)))
set.add(s.charAt(i));
else
set.remove(s.charAt(i));
}
return set.size()==0 || set.size()==1;
}
}
# same as above
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set = new HashSet();
for (int i = 0; i < s.length(); i++) {
if (!set.add(s.charAt(i))) {
set.remove(s.charAt(i));
}
}
return set.size() <= 1;
}
}
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
BitSet bs = new BitSet();
for (byte b : s.getBytes())
bs.flip(b);
return bs.cardinality() < 2;
}
}
# count char with boolean[128]
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
boolean[] arr = new boolean[128];
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
arr[c] = !arr[c];
}
boolean odd = false;
for (int i = 0; i < 128; i++) {
if (arr[i]) {
if (odd) { //2 occurrence of odd char count
return false;
} else {
odd = true;
}
}
}
return true;
}
}
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
if (s == null || s.length() == 0) return true;
int[] map = new int[128]; //or use 256 depending on encoding
int count = 0;
for (int i = 0; i < s.length(); i++) {
map[s.charAt(i)]++;
if ( (map[s.charAt(i)] & 1) == 0) { //%2 ==0
count--;
} else {
count++;
}
}
return count <= 1;
}
}
'''
| 28.173913 | 106 | 0.573633 | 582 | 4,536 | 4.448454 | 0.347079 | 0.02163 | 0.02472 | 0.050212 | 0.309 | 0.295867 | 0.261105 | 0.224411 | 0.188104 | 0.142526 | 0 | 0.027044 | 0.307099 | 4,536 | 160 | 107 | 28.35 | 0.796691 | 0.195988 | 0 | 0.212963 | 0 | 0.037037 | 0.784065 | 0.077703 | 0 | 0 | 0 | 0 | 0.009259 | 1 | 0.027778 | false | 0 | 0.027778 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
147035cef75b4248a1685bdc47735841a296c61d | 2,407 | py | Python | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 16 | 2017-10-31T21:43:26.000Z | 2019-08-11T08:49:06.000Z | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 1 | 2017-11-01T06:25:56.000Z | 2017-11-01T06:25:56.000Z | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 9 | 2017-10-30T05:23:15.000Z | 2022-02-17T03:53:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: file_query_results
# Purpose: Represents the query result object returned from a file query
#
# Notes:
###############################################################################
import itertools
from .query_results import QueryResults
from data_pipeline.stream.file_reader import FileReader
def default_post_process_func(line):
return line
class FileQueryResults(QueryResults):
def __init__(self, filename, post_process_func):
super(FileQueryResults, self).__init__()
self._handle = FileReader(filename)
if post_process_func is None:
self._post_process_func = default_post_process_func
else:
self._post_process_func = post_process_func
def __iter__(self):
return self
def next(self):
line = self._handle.readline().strip('\n')
if not line:
self._handle.close()
raise StopIteration
return self._post_process_func(line)
def fetchone(self):
line = None
try:
line = self.next()
except StopIteration, e:
pass
return line
def fetchall(self):
return [self._post_process_func(l.strip('\n'))
for l in self._handle]
def fetchmany(self, arraysize=None):
if arraysize > 0:
return [self._post_process_func(l.strip('\n'))
for l in itertools.islice(self._handle, arraysize)]
return self.fetchall()
def __del__(self):
self._handle.close()
| 31.671053 | 79 | 0.636061 | 291 | 2,407 | 5.075601 | 0.429553 | 0.074475 | 0.101557 | 0.06432 | 0.068382 | 0.051456 | 0.051456 | 0.051456 | 0.051456 | 0.051456 | 0 | 0.002695 | 0.229331 | 2,407 | 75 | 80 | 32.093333 | 0.793531 | 0.359369 | 0 | 0.157895 | 0 | 0 | 0.004405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.026316 | 0.078947 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14728ecbe8e51b8719ee9b289419e71b4c078ce0 | 1,542 | py | Python | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 2 | 2022-01-20T18:57:53.000Z | 2022-03-16T23:23:47.000Z | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 2 | 2022-01-20T18:41:46.000Z | 2022-01-24T18:15:53.000Z | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 1 | 2022-01-20T01:29:03.000Z | 2022-01-20T01:29:03.000Z | import json
def is_valid(smc_type): # checks if smc_type is valid
if smc_type == 'vmt':
return True
elif smc_type == 'flt':
return True
elif smc_type == 'nfl':
return True
else:
return False
def parse_vmt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
except:
return False
return True
def parse_flt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_nfl(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_type(smc_type, contract):
if smc_type=='vmt':
return parse_vmt(contract)
elif smc_type=='flt':
return parse_flt(contract)
elif smc_type == 'nfl':
return parse_nfl(contract)
def parse_contract(json):
try:
contract = json.load(json) # Tries to load the contract
smc_type = contract.get('type')
except:
return False
if(is_valid(smc_type) is False): # validate if the contract type is valid
return False
if parse_type(smc_type=smc_type, contract=contract) is True:
return contract
else:
return False
| 22.028571 | 78 | 0.592088 | 195 | 1,542 | 4.558974 | 0.169231 | 0.173228 | 0.049494 | 0.07649 | 0.552306 | 0.412823 | 0.412823 | 0.373453 | 0.373453 | 0.373453 | 0 | 0.00185 | 0.298962 | 1,542 | 69 | 79 | 22.347826 | 0.820537 | 0.060311 | 0 | 0.736842 | 0 | 0 | 0.073356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.017544 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14770d630f638d640c9e37a29b68e980bdb4f14d | 2,897 | py | Python | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | from hamcrest import *
from nose.tools import eq_
from mc import List, Some, Nothing,add
def test_list_map():
eq_(List([1, 2, 3]).map(lambda x: x * 2), [2, 4, 6])
def test_list_flat_map():
eq_(List([1, 3]).flat_map(lambda x: (x * 2, x * 4)), [2, 4, 6, 12])
def test_list_filter():
eq_(List([1, 2, 3]).filter(lambda x: x < 2), [1])
def test_list_fold():
eq_(List([1, 2, 3]).fold(lambda x, y: x * y, 1), 6)
def test_list_group_by():
eq_(
List([1, 2, 3, 4, 5, 6]).group_by(lambda x: x % 2),
{1: [1, 3, 5], 0: [2, 4, 6]}
)
def test_list_mk_string():
eq_(List([5, 6, 7]).mk_string("_", "<", ">"), "<5_6_7>")
def test_list_to_dict():
eq_(List([(5, 6), (7, 8)]).to_dict(), {5: 6, 7: 8})
def test_list_to_set():
eq_(List([5, 6, 7]).to_set().to_list(), List([5, 6, 7]))
def test_list_multiproc_map():
def process_el(x):
return x * 2
eq_(List([1, 2, 3]).multiproc_map(process_el), [2, 4, 6])
def test_list_foreach():
dictionary = {}
def add_to_dict(value):
dictionary[value] = value
List([9, 8, 7]).foreach(add_to_dict)
actual = set(dictionary.keys())
eq_(actual, {9, 8, 7})
def test_list_should_flat_map_iterables():
assert_that(
List([1, 2]).flat_map(lambda x: {
x, x * 2, x * 3}), contains_inanyorder(1, 2, 3, 2, 4, 6)
)
def test_list_reduce_should_return_nothing_for_empty_list():
assert_that(
List([]).reduce(lambda x, y: x), equal_to(Nothing())
)
def test_list_reduce_should_aggregate_values():
assert_that(
List([1, 2, 3]).reduce(lambda x, y: x + y), equal_to(Some(6))
)
def test_list_addition():
assert_that(
List([1, 2]) + List(["3", 4]), equal_to(List([1, 2, "3", 4]))
)
def test_zip_with_idx():
assert_that(
List(["A","C","D"]).zip_with_idx(), equal_to(List([(0,"A"),(1,"C"),(2,"D")]))
)
def test_list_pick_one():
assert_that(
calling(List(['1','2']).pick_one), raises(AssertionError)
)
assert_that(
calling(List([]).pick_one), raises(AssertionError)
)
assert_that(
List([1]).pick_one(), equal_to(1)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_count():
assert_that(
List([1,2,3]).count(), equal_to(3)
)
def test_zip():
assert_that(
List([1,2]).zip([3,4]) , equal_to([(1,3),(2,4)])
)
def test_zip_shift():
assert_that(
List([1,2]).zip_shift() , equal_to([(1,2)])
)
assert_that(
List([1,2]).zip_shift(2) , equal_to([])
)
assert_that(
List([1,2,3]).zip_shift(2) , equal_to([(1,3)])
)
assert_that(
List([1,2,3]).zip_shift(1) , equal_to([(1,2),(2,3)])
)
| 21.145985 | 85 | 0.556438 | 476 | 2,897 | 3.12395 | 0.155462 | 0.098857 | 0.072629 | 0.05649 | 0.468056 | 0.287828 | 0.145931 | 0.113652 | 0.080027 | 0.080027 | 0 | 0.065914 | 0.235416 | 2,897 | 136 | 86 | 21.301471 | 0.605418 | 0 | 0 | 0.215054 | 0 | 0 | 0.006904 | 0 | 0 | 0 | 0 | 0 | 0.193548 | 1 | 0.247312 | false | 0 | 0.032258 | 0.010753 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
147a5096d0d2e1067ea8f8785a929ae310227d3e | 386 | py | Python | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #program to compute and print sum of two given integers (more than or equal to zero).
# If given integers or the sum have more than 80 digits, print "overflow".
print("Input first integer:")
x = int(input())
print("Input second integer:")
y = int(input())
if x >= 10 ** 80 or y >= 10 ** 80 or x + y >= 10 ** 80:
print("Overflow!")
else:
print("Sum of the two integers: ",x + y) | 38.6 | 86 | 0.647668 | 67 | 386 | 3.731343 | 0.447761 | 0.048 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045752 | 0.207254 | 386 | 10 | 87 | 38.6 | 0.771242 | 0.409326 | 0 | 0 | 0 | 0 | 0.330396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
147d0d54209eeeaab12be3279f9d110b874ff369 | 5,136 | py | Python | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | 11 | 2019-01-03T04:05:31.000Z | 2021-09-22T23:47:05.000Z | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | 11 | 2020-10-28T08:02:06.000Z | 2022-03-18T08:02:51.000Z | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | null | null | null | import uuid
from datetime import datetime, timedelta
from controllers import zfsController
import jwt
import pam
import render
JWT_SECRET = "7wXJ4kxCRWJpMQNqRVTVR3Qbc"
JWT_ALGORITHM = "HS256"
JWT_EXP_DELTA_SECONDS = 4300
async def index(request):
return render.json({'error': 'nothing to see here...'}, 200)
async def auth(request):
try:
data = await request.json()
user = data['username']
password = data['password']
if pam.authenticate(user, password):
payload = {
'user': user,
'session_id': str(uuid.uuid4()),
'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return await render.json({'token': jwt_token.decode('utf-8')}, 200)
else:
return None
except Exception as e:
return await render.json({'error': str(e)}, 200)
async def check_token(request):
try:
jwt_token = request.headers.get('Authorization', None)
payload = jwt.decode(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload['session_id']
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return False
async def create_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.create_pool(data['name'], data['raid'], data['devices'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def delete_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.delete_pool(data['name'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def check_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_status()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
async def get_storage_info(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_disk_info()
return await render.json(res, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def get_io_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_IO_stats()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def add_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_new_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def add_spare_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_spare_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def replace_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.replace_disk(data['pool'], data['old_device'], data['new_device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def set_mountpoint(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.set_mountpoint(data['mountpoint'], data['pool'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
| 31.900621 | 104 | 0.596379 | 623 | 5,136 | 4.82825 | 0.150883 | 0.091423 | 0.14129 | 0.118684 | 0.664894 | 0.648604 | 0.648604 | 0.648604 | 0.648604 | 0.648604 | 0 | 0.024496 | 0.284657 | 5,136 | 160 | 105 | 32.1 | 0.79423 | 0 | 0 | 0.631579 | 0 | 0 | 0.090945 | 0.004869 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.015038 | 0.045113 | 0 | 0.263158 | 0.067669 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
147e734bd09bed751a8e4643798609eb58918fad | 539 | py | Python | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | 1 | 2019-04-27T20:15:11.000Z | 2019-04-27T20:15:11.000Z | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | null | null | null | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Sequence
Base = declarative_base()
class Subscription(Base):
__tablename__ = 'subscriptions'
id = Column(Integer, Sequence('subscription_id_seq'), primary_key=True)
sr_id = Column(String, index=True)
method = Column(String)
contact = Column(String)
class UpdateInfoItem(Base):
__tablename__ = 'updateinfo'
key = Column(String, primary_key=True)
value = Column(String)
| 25.666667 | 80 | 0.714286 | 59 | 539 | 6.271186 | 0.457627 | 0.162162 | 0.075676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19295 | 539 | 20 | 81 | 26.95 | 0.850575 | 0 | 0 | 0 | 0 | 0 | 0.078067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.923077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
14817026f39bd06023f2b84c932bb54cab1a147b | 690 | py | Python | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | null | null | null | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | 1 | 2022-03-30T02:44:05.000Z | 2022-03-30T02:44:05.000Z | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | null | null | null |
import graphene
from typing import cast
from graphene_django import DjangoObjectType
from graphene_django.debug import DjangoDebug
from django.contrib.auth import get_user_model
import devind_dictionaries.schema
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
fields = ('id', 'username', 'last_name', 'email', 'is_active',)
class Query(
devind_dictionaries.schema.Query,
graphene.ObjectType
):
debug = graphene.Field(DjangoDebug, name='__debug')
class Mutation(
devind_dictionaries.schema.Mutation,
graphene.ObjectType
):
pass
schema = graphene.Schema(query=cast(graphene.ObjectType, Query), mutation=Mutation)
| 20.294118 | 83 | 0.750725 | 79 | 690 | 6.392405 | 0.417722 | 0.106931 | 0.142574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15942 | 690 | 33 | 84 | 20.909091 | 0.87069 | 0 | 0 | 0.190476 | 0 | 0 | 0.05814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.285714 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
1486390989e81040ce5dfa57404584bdd0aad30e | 462 | py | Python | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | 1 | 2019-01-09T10:14:38.000Z | 2019-01-09T10:14:38.000Z | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | null | null | null | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | null | null | null | import os
import webapp2
import jinja2
import config
from app.utils.compressor import WEBASSETS_ENV
JINJA_ENV = jinja2.Environment(
autoescape=lambda x: True,
extensions=['jinja2.ext.autoescape',
'webassets.ext.jinja2.AssetsExtension'],
loader=jinja2.FileSystemLoader(
os.path.join(config.PROJECT_ROOT, 'templates')),
)
JINJA_ENV.globals.update({'uri_for': webapp2.uri_for})
JINJA_ENV.assets_environment = WEBASSETS_ENV
| 22 | 56 | 0.742424 | 56 | 462 | 5.964286 | 0.571429 | 0.071856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017903 | 0.15368 | 462 | 20 | 57 | 23.1 | 0.836317 | 0 | 0 | 0 | 0 | 0 | 0.158009 | 0.123377 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.357143 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
14878c677e10667944d3fa541d7f4f2e7bfbbbc0 | 1,479 | py | Python | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | # Crie um programa que leia nome, sexo e idade de várias pessoas, guardando os dados
# de cada pessoa em um dicionário e todos os dicionários em uma lista. No final, mostre:
# A) Quantas pessoas foram cadastradas B) A média de idade C) Uma lista com as mulheres
# D) Uma lista de pessoas com idade acima da média
dados = dict()
lista = list()
somaIdade = media = 0
while True:
dados['nome'] = str(input('Nome: '))
while True:
dados['sexo'] = str(input('Sexo [M/F]: ')).upper()
if dados['sexo'] in 'MF':
break
print('ERRO! Por favor, Digite apenas M ou F.')
if dados['sexo'] in 'N':
break
dados['idade'] = int(input('Idade: '))
somaIdade += dados['idade']
lista.append(dados.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).upper()
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N.')
if resp in 'N':
break
media = somaIdade / len(lista)
print('-=' * 30)
print(f'A) A quantidade de pessoas cadastradas foi: {len(lista)}')
print(f'B) A média de idade é: {media:5.2f} anos.')
print('C) As mulheres cadastradas são: ', end='')
for p in lista:
if p['sexo'] == 'F':
print(f'{p["nome"]}; ', end='')
print()
print('D) As pessoas que estão acima da média de idade são: ')
for p in lista:
if p['idade'] >= media:
for k, v in p.items():
print(f'{k} = {v}; ', end='')
print()
print('<< ENCERRADO >>')
| 33.613636 | 88 | 0.584178 | 225 | 1,479 | 3.84 | 0.395556 | 0.027778 | 0.041667 | 0.020833 | 0.064815 | 0.032407 | 0 | 0 | 0 | 0 | 0 | 0.004541 | 0.255578 | 1,479 | 43 | 89 | 34.395349 | 0.7802 | 0.20622 | 0 | 0.289474 | 0 | 0 | 0.324209 | 0 | 0 | 0 | 0 | 0.023256 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.315789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
148a037daf2a31f1b0747c7a72dcdff26d1106d3 | 959 | gyp | Python | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | 3 | 2016-05-04T23:23:48.000Z | 2021-08-03T21:48:07.000Z | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | null | null | null | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | 2 | 2016-01-31T04:06:51.000Z | 2016-09-30T16:38:36.000Z | {
"targets": [
{
"target_name": "allofw",
"include_dirs": [
"<!@(pkg-config liballofw --cflags-only-I | sed s/-I//g)",
"<!(node -e \"require('nan')\")"
],
"libraries": [
"<!@(pkg-config liballofw --libs)",
"<!@(pkg-config glew --libs)",
],
"cflags!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc": [
"-std=c++11"
],
'conditions': [
[ 'OS=="mac"', {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++11'],
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES'
},
} ],
],
"sources": [
"src/allofw.cpp",
"src/node_graphics.cpp",
"src/node_sharedmemory.cpp",
"src/node_opengl.cpp",
"src/node_omnistereo.cpp",
"src/gl3binding/glbind.cpp"
]
}
]
}
| 25.236842 | 66 | 0.448384 | 89 | 959 | 4.651685 | 0.539326 | 0.072464 | 0.096618 | 0.096618 | 0.135266 | 0.135266 | 0 | 0 | 0 | 0 | 0 | 0.007837 | 0.334724 | 959 | 37 | 67 | 25.918919 | 0.641066 | 0 | 0 | 0.108108 | 0 | 0 | 0.527633 | 0.124088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1499e05f9e701f51c62ae9adf1ada191425c6e1e | 326 | py | Python | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/Mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | 2 | 2021-02-16T22:21:36.000Z | 2021-02-17T03:16:30.000Z | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/Mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | 2 | 2021-02-17T03:20:24.000Z | 2021-04-30T06:46:02.000Z | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | null | null | null | import functools
def memoise(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.hasResult:
wrapper.result = func(*args, **kwargs)
wrapper.hasResult = True
return wrapper.result
wrapper.result = None
wrapper.hasResult = False
return wrapper
| 23.285714 | 50 | 0.628834 | 35 | 326 | 5.857143 | 0.485714 | 0.234146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.276074 | 326 | 13 | 51 | 25.076923 | 0.868644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
149df9e3f7439d9013fa722d9aa4c7ae4e678566 | 1,432 | py | Python | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | #!/venv/bin python
"""
DESCRIPTION:
This file contains wrappers and variations on DataLoader.
"""
# Libraries
import os
from random import shuffle
import torch
import numpy as np
from torch.utils.data import Dataset
from resquiggle_utils import parse_resquiggle, window_resquiggle
from torch import nn
class CombinedDataLoader:
"""
DESCRIPTION:
"""
# Methods
def __init__(self, *args):
"""
DESCRIPTION:
"""
self.current_dataloader = 0
self.dataloaders = args
def __next__(self):
"""
DESCRIPTION:
"""
next_batch = next(iter(self.dataloaders[self.current_dataloader]))
self.current_dataloader = (self.current_dataloader + 1) % len(self.dataloaders)
return next_batch
class CustomisedDataLoader:
"""
DESCRIPTION:
"""
# Methods
def __init__(self, dataset, batch_size, sampler, collate_fn, shuffle):
self.dataset = dataset
self.batch_size = batch_size
self.sampler = sampler
self.collate_fn = collate_fn
self.shuffle = shuffle
self.sampled_data = self.sampler(self.dataset, self.batch_size, shuffle=self.shuffle)
def __iter__(self):
for batch in self.sampled_data:
if not batch:
raise StopIteration
yield self.collate_fn(batch)
def __next__(self):
return next(iter(self))
| 22.730159 | 93 | 0.639665 | 157 | 1,432 | 5.585987 | 0.369427 | 0.050171 | 0.095781 | 0.057013 | 0.13797 | 0.071836 | 0 | 0 | 0 | 0 | 0 | 0.001923 | 0.273743 | 1,432 | 62 | 94 | 23.096774 | 0.841346 | 0.115922 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.233333 | 0.033333 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
14b1286bb9090e5e7de52578dcf3d83c33bdb3b1 | 2,781 | py | Python | src/py_dss_interface/models/Sensors/SensorsV.py | davilamds/py_dss_interface | a447c97787aeac962381db88dd622ccb235eef4b | [
"MIT"
] | 8 | 2020-08-15T12:56:03.000Z | 2022-01-04T15:51:14.000Z | src/py_dss_interface/models/Sensors/SensorsV.py | rodolfoplondero/py_dss_interface | cb6771b34ed322a5df7ef1cc194611e794f26441 | [
"MIT"
] | 24 | 2021-04-24T18:33:19.000Z | 2021-11-13T14:59:54.000Z | src/py_dss_interface/models/Sensors/SensorsV.py | rodolfoplondero/py_dss_interface | cb6771b34ed322a5df7ef1cc194611e794f26441 | [
"MIT"
] | 7 | 2020-08-15T12:56:04.000Z | 2021-10-04T16:14:30.000Z | # -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import ctypes
from py_dss_interface.models import Bridge
from py_dss_interface.models.Base import Base
from py_dss_interface.models.Sensors.SensorsS import SensorsS
from py_dss_interface.models.Text.Text import Text
class SensorsV(Base):
"""
This interface can be used to read/write certain properties of the active DSS object.
The structure of the interface is as follows:
void SensorsV(int32_t Parameter, VARIANT *Argument);
This interface returns a Variant with the result of the query according to the value of the variable Parameter,
which can be one of the following.
"""
def sensors_all_names(self):
"""Returns a variant array of sensor names."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(0), ctypes.c_int(0), None)
def sensors_read_currents(self):
"""Gets an array of doubles for the line current measurements; don't use with KWS and KVARS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(1), ctypes.c_int(0), None)
def sensors_write_currents(self, argument):
"""Sets an array of doubles for the line current measurements; don't use with KWS and KVARS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} currents = {argument}')
def sensors_read_kvars(self):
"""Gets an array of doubles for Q measurements; overwrites currents with a new estimate using KWS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(3), ctypes.c_int(0), None)
def sensors_write_kvars(self, argument):
"""Sets an array of doubles for Q measurements; overwrites currents with a new estimate using KWS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} kvars = {argument}')
def sensors_read_kws(self):
"""Gets an array of doubles for P measurements; overwrites currents with a new estimate using KVARS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(5), ctypes.c_int(0), None)
def sensors_write_kws(self, argument):
"""Sets an array of doubles for P measurements; overwrites currents with a new estimate using KVARS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} kws = {argument}')
| 44.142857 | 115 | 0.703344 | 415 | 2,781 | 4.551807 | 0.245783 | 0.037057 | 0.052938 | 0.050821 | 0.698253 | 0.647433 | 0.647433 | 0.621493 | 0.548438 | 0.548438 | 0 | 0.008562 | 0.202086 | 2,781 | 62 | 116 | 44.854839 | 0.842722 | 0.360662 | 0 | 0.375 | 0 | 0 | 0.072813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.21875 | false | 0 | 0.15625 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14b3ea58e56ed94f8934f892735450dec9e7e14d | 629 | py | Python | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | """
WSGI config for test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# test_project directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'test_project'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| 27.347826 | 78 | 0.779014 | 93 | 629 | 5.107527 | 0.612903 | 0.069474 | 0.075789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005319 | 0.103339 | 629 | 22 | 79 | 28.590909 | 0.836879 | 0.475358 | 0 | 0 | 0 | 0 | 0.190031 | 0.068536 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
14b53da5ae7dc9a0ddd6d2cbcafa9f97c4f9a304 | 747 | py | Python | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 3 | 2020-04-05T21:09:07.000Z | 2022-02-15T15:23:37.000Z | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 2 | 2020-06-06T01:42:31.000Z | 2021-06-10T22:43:54.000Z | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 3 | 2020-04-08T12:53:47.000Z | 2021-08-10T11:10:32.000Z | # Generated by Django 2.1.1 on 2020-04-05 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200)),
('cough', models.FileField(upload_to='')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| 28.730769 | 114 | 0.570281 | 76 | 747 | 5.473684 | 0.671053 | 0.064904 | 0.086538 | 0.115385 | 0.129808 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045541 | 0.294511 | 747 | 25 | 115 | 29.88 | 0.743833 | 0.060241 | 0 | 0 | 1 | 0 | 0.064286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14b6e3bd8ac3a7eb4e7e8620a55ce89ce7b5721c | 1,534 | py | Python | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | null | null | null | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | 5 | 2020-06-05T21:26:16.000Z | 2022-01-13T01:21:27.000Z | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | null | null | null | SRM_TO_HEX = {
"0": "#FFFFFF",
"1": "#F3F993",
"2": "#F5F75C",
"3": "#F6F513",
"4": "#EAE615",
"5": "#E0D01B",
"6": "#D5BC26",
"7": "#CDAA37",
"8": "#C1963C",
"9": "#BE8C3A",
"10": "#BE823A",
"11": "#C17A37",
"12": "#BF7138",
"13": "#BC6733",
"14": "#B26033",
"15": "#A85839",
"16": "#985336",
"17": "#8D4C32",
"18": "#7C452D",
"19": "#6B3A1E",
"20": "#5D341A",
"21": "#4E2A0C",
"22": "#4A2727",
"23": "#361F1B",
"24": "#261716",
"25": "#231716",
"26": "#19100F",
"27": "#16100F",
"28": "#120D0C",
"29": "#100B0A",
"30": "#050B0A"
}
WATER_L_PER_GRAIN_KG = 2.5
MAIN_STYLES = {
"1": "LIGHT LAGER",
"2": "PILSNER",
"3": "EUROPEAN AMBER LAGER",
"4": "DARK LAGER",
"5": "BOCK",
"6": "LIGHT HYBRID BEER",
"7": "AMBER HYBRID BEER",
"8": "ENGLISH PALE ALE",
"9": "SCOTTISH AND IRISH ALE",
"10": "AMERICAN ALE",
"11": "ENGLISH BROWN ALE",
"12": "PORTER",
"13": "STOUT",
"14": "INDIA PALE ALE (IPA)",
"15": "GERMAN WHEAT AND RYE BEER",
"16": "BELGIAN AND FRENCH ALE",
"17": "SOUR ALE",
"18": "BELGIAN STRONG ALE",
"19": "STRONG ALE",
"20": "FRUIT BEER",
"21": "SPICE / HERB / VEGETABLE BEER",
"22": "SMOKE-FLAVORED AND WOOD-AGED BEER",
"23": "SPECIALTY BEER",
"24": "TRADITIONAL MEAD",
"25": "MELOMEL (FRUIT MEAD)",
"26": "OTHER MEAD",
"27": "STANDARD CIDER AND PERRY",
"28": "SPECIALTY CIDER AND PERRY"
}
| 22.895522 | 46 | 0.468057 | 180 | 1,534 | 3.95 | 0.644444 | 0.028129 | 0.036568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197851 | 0.271838 | 1,534 | 66 | 47 | 23.242424 | 0.438675 | 0 | 0 | 0 | 0 | 0 | 0.498044 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14b9e8b4dcb1e20a307af26c71e8f867c0782a0d | 285 | py | Python | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 10 | 2020-08-12T22:27:15.000Z | 2022-03-31T13:34:12.000Z | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 7 | 2021-09-06T05:30:07.000Z | 2021-09-08T02:25:41.000Z | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 3 | 2021-05-27T11:31:45.000Z | 2021-05-28T07:22:08.000Z | #!/usr/bin/env python3
"""VUnit run script."""
from pathlib import Path
from vunit import VUnit
prj = VUnit.from_argv()
lib = prj.add_library("lib")
root = Path(__file__).parent
lib.add_source_files(root / "src" / "*.vhd")
lib.add_source_files(root / "test" / "*.vhd")
prj.main()
| 17.8125 | 45 | 0.687719 | 44 | 285 | 4.227273 | 0.568182 | 0.064516 | 0.129032 | 0.182796 | 0.225806 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004049 | 0.133333 | 285 | 15 | 46 | 19 | 0.748988 | 0.136842 | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
14bd6078e39ef46714d7bb697f11da50734262f2 | 1,810 | py | Python | tests/test_env_var.py | sfelix-martins/laradock-up-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 2 | 2020-10-06T15:40:43.000Z | 2020-11-27T12:13:10.000Z | tests/test_env_var.py | sfelix-martins/laradock-up-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 5 | 2019-11-10T12:08:35.000Z | 2019-11-10T13:34:54.000Z | tests/test_env_var.py | sfelix-martins/laradock-multiple-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 1 | 2020-11-27T12:13:13.000Z | 2020-11-27T12:13:13.000Z | import unittest
from multienv.config import Config
from multienv.env_var import EnvVar
from multienv.exceptions import InvalidYamlFileException, \
EnvVarContainerBuildNotFoundException
class EnvVarTestCase(unittest.TestCase):
def test_get_containers_to_rebuild_with_existent_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('PHP_VERSION', 7.1, config)
self.assertEqual(
env_var.get_containers_to_rebuild(),
['php-fpm', 'workspace']
)
def test_get_containers_to_rebuild_with_not_exists_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
self.assertEqual(env_var.get_containers_to_rebuild(), [])
def test_get_containers_to_rebuild_with_invalid_config(self):
with self.assertRaises(InvalidYamlFileException):
config = Config(
env_var_container_build='tests/fixtures'
'/invalid_env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
def test_get_containers_to_rebuild_with_not_existent_config(self):
with self.assertRaises(EnvVarContainerBuildNotFoundException):
config = Config(
env_var_container_build='not_found/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
if __name__ == '__main__':
unittest.main()
| 39.347826 | 79 | 0.649724 | 196 | 1,810 | 5.535714 | 0.229592 | 0.105069 | 0.110599 | 0.162212 | 0.675576 | 0.620277 | 0.590783 | 0.560369 | 0.482949 | 0.423963 | 0 | 0.00607 | 0.271823 | 1,810 | 45 | 80 | 40.222222 | 0.817147 | 0 | 0 | 0.378378 | 0 | 0 | 0.135359 | 0.064641 | 0 | 0 | 0 | 0 | 0.108108 | 1 | 0.108108 | false | 0 | 0.108108 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1ad2c6a11db3d3af3b1a5dc0792859101e67bf90 | 1,964 | py | Python | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | null | null | null | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | 36 | 2020-06-17T04:48:55.000Z | 2022-02-07T12:04:10.000Z | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Do not use this module
import tflite.Model
import tflite.SubGraph
from ir import graph_stats
from .subgraph_parser import SubgraphParser
class TFLiteParser(object):
def __init__(self, model_file):
self.model_file = model_file
def Parse(self):
# Generate Model: top structure of tflite model file
buf = self.model_file.read()
buf = bytearray(buf)
tf_model = tflite.Model.Model.GetRootAsModel(buf, 0)
stats = graph_stats.GraphStats()
# Model file can have many models
subg_list = list()
for subgraph_index in range(tf_model.SubgraphsLength()):
tf_subgraph = tf_model.Subgraphs(subgraph_index)
model_name = "#{0} {1}".format(subgraph_index, tf_subgraph.Name())
# 0th subgraph is main subgraph
if (subgraph_index == 0):
model_name += " (MAIN)"
# Parse Subgraphs
subg_parser = SubgraphParser(tf_model, tf_subgraph)
subg_parser.Parse()
stats += graph_stats.CalcGraphStats(subg_parser)
subg = (model_name, subg_parser)
subg_list.append(subg)
# Validate
assert subg_list is not None
assert len(subg_list) > 0
assert stats is not None
return (subg_list, stats)
| 33.862069 | 78 | 0.67057 | 260 | 1,964 | 4.930769 | 0.480769 | 0.046802 | 0.030421 | 0.024961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 0.251527 | 1,964 | 57 | 79 | 34.45614 | 0.862585 | 0.395621 | 0 | 0 | 0 | 0 | 0.012853 | 0 | 0 | 0 | 0 | 0.017544 | 0.111111 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1ad93b581be550c1b778274bfd4d391d94cbf882 | 1,953 | py | Python | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 8 | 2019-02-02T11:41:28.000Z | 2022-03-10T14:15:09.000Z | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 2 | 2019-02-01T07:57:57.000Z | 2021-03-01T06:16:35.000Z | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 6 | 2019-02-01T07:17:38.000Z | 2021-12-28T02:37:29.000Z | # -*- coding: utf-8 -*-
# 系统模块
import sys
# 数据处理模块
import pandas as pd
# 引入外部模块
# 整理数据
from predict_prepare import Predict_Prepare as Prepare
# 获取价格预测结果
from predict_predict import Predict_Predict as Predict
class Predict_Lead:
def __init__(self):
pass
# 其他包调用的函数
def predict_result(self):
# 模型分两段进行预测
period = [1, 2]
# 实例化准备模块和模型预测模块
PrePare_Data = Prepare()
Predict_Data = Predict()
# 获得第一段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
# History_Model11、Predict_Model11:生猪预测模型所需使用的自变量和因变量
# Last_data_model11:原始数据集中生猪价格的最后一条记录的时间
# History_Model21、Predict_Model21:玉米预测模型所需使用的自变量和因变量
# Last_data_model21:原始数据集中玉米价格的最后一条记录的时间
History_Model11, Predict_Model11, Last_data_model11, History_Model21, Predict_Model21, Last_data_model21 = PrePare_Data.variables_prepar(period[0])
# 获取预测结果
# predict_result1:生猪价格和玉米价格的预测结果
# y_test_compare11:第一时间段中生猪模型训练结果和实际价格的集合
# y_test_compare12:第一时间段中玉米模型训练结果和实际价格的集合
predict_result1, y_test_compare11, y_test_compare12 = Predict_Data.predict_result(History_Model11, Last_data_model11, Predict_Model11, History_Model21, Last_data_model21, Predict_Model21, period[0])
# 获得第二段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
History_Model12, Predict_Model12, Last_data_model12, History_Model22, Predict_Model22, Last_data_model22 = PrePare_Data.variables_prepar(period[1])
# 获取预测结果
predict_result2, y_test_compare21, y_test_compare22 = Predict_Data.predict_result(History_Model12, Last_data_model12, Predict_Model12, History_Model22, Last_data_model22, Predict_Model22, period[1])
# 整合两端时间的预测结果
predict_result = pd.concat([predict_result1, predict_result2])
predict_result = predict_result.reset_index(drop=True)
return predict_result, Last_data_model11, y_test_compare11, y_test_compare12
| 35.509091 | 206 | 0.729647 | 215 | 1,953 | 6.218605 | 0.306977 | 0.065819 | 0.044877 | 0.041885 | 0.136126 | 0.041885 | 0 | 0 | 0 | 0 | 0 | 0.058102 | 0.206861 | 1,953 | 55 | 207 | 35.509091 | 0.805036 | 0.240143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.055556 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
1ada20acc9ce4a88cc954468b6dae92540d23e52 | 4,255 | py | Python | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | 2 | 2021-09-24T12:57:07.000Z | 2022-01-14T00:47:43.000Z | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | null | null | null | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | null | null | null | import pygame
from aerforge.color import *
from aerforge.error import *
class Text:
def __init__(self, window, text, font_size = 24, font_file = None, font_name = "arial", bold = False, italic = False, underline = False, color = Color(240, 240, 240), x = 0, y = 0, parent = None, add_to_objects = True):
self.window = window
self.parent = parent
self.x = x
self.y = y
self.font_file = font_file
self.font_name = font_name
self.font_size = font_size
self.bold = bold
self.italic = italic
self.underline = underline
self.load_font(self.font_file, self.font_name)
self.set_bold(self.bold)
self.set_italic(self.italic)
self.set_underline(self.underline)
self.color = color
self.text = text
self.scripts = []
self.destroyed = False
self.visible = True
self.add_to_objects = add_to_objects
if self.add_to_objects:
self.window.objects.append(self)
def update(self):
pass
def draw(self):
if not self.destroyed:
if self.visible:
if self.parent != None:
self.x += self.parent.x
self.y += self.parent.y
rendered_text = self.font.render(self.text, True, self.color.get())
self.window.window.blit(rendered_text, (self.x, self.y))
if self.parent != None:
self.x -= self.parent.x
self.y -= self.parent.y
def set_color(self, color):
self.color = color
def get_color(self):
return self.color
def set_text(self, text):
self.text = text
def get_text(self):
return self.text
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def get_font_size(self):
return self.font_size
def set_font_size(self, font_size):
self.font_size = font_size
self.load_font(self.font_file, self.font_name)
def get_font_file(self):
return self.font_file
def get_font_name(self):
return self.font_name
def set_bold(self, bold):
self.bold = bold
self.font.set_bold(self.bold)
def set_italic(self, italic):
self.italic = italic
self.font.set_italic(self.italic)
def set_underline(self, underline):
self.underline = underline
self.font.set_underline(self.underline)
def get_bold(self):
return self.bold
def get_italic(self):
return self.italic
def get_underline(self):
return self.underline
def load_font(self, font_file = None, font_name = "arial"):
self.font_file = font_file
self.font_name = font_name
if self.font_file != None:
self.font = pygame.font.Font(self.font_file, self.font_size)
else:
self.font = pygame.font.SysFont(self.font_name, self.font_size)
def get_width(self):
rendered_text = self.font.render(self.text, True, self.color.get())
return rendered_text.get_width()
def get_height(self):
rendered_text = self.font.render(self.text, True, self.color.get())
return rendered_text.get_height()
def center(self):
self.x = self.window.width / 2 - self.get_width() / 2
self.y = self.window.height / 2 - self.get_height() / 2
def center_x(self):
self.x = self.window.width / 2 - self.get_width() / 2
def center_y(self):
self.y = self.window.height / 2 - self.get_height() / 2
def destroy(self):
self.destroyed = True
if self.add_to_objects:
try:
self.window.objects.pop(self.window.objects.index(self))
except:
pass
def add_script(self, script):
self.scripts.append(script)
def remove_script(self, script):
self.scripts.pop(self.scripts.index(script)) | 26.93038 | 224 | 0.568038 | 555 | 4,255 | 4.187387 | 0.10991 | 0.096386 | 0.060241 | 0.034423 | 0.432014 | 0.309811 | 0.259036 | 0.259036 | 0.259036 | 0.228055 | 0 | 0.007384 | 0.33161 | 4,255 | 158 | 225 | 26.93038 | 0.809775 | 0 | 0 | 0.315315 | 0 | 0 | 0.00244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.27027 | false | 0.018018 | 0.027027 | 0.09009 | 0.414414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1adbac124bbaf8f82229656776f6cf0f6360b65e | 500 | py | Python | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | 2 | 2021-03-02T04:44:08.000Z | 2021-08-25T09:42:06.000Z | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | null | null | null | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | 1 | 2019-05-19T10:51:53.000Z | 2019-05-19T10:51:53.000Z | from distutils.core import setup
setup(
name='als',
packages=['als'],
version='0.0.2',
description='Python library for Alternating Least Squares (ALS)',
author='Rui Vieira',
author_email='ruidevieira@googlemail.com',
url='https://github.com/ruivieira/python-als',
download_url='https://github.com/'
'ruivieira/python-als/archive/0.0.2.tar.gz',
keywords=['als', 'recommendation', 'scientific', 'machine-learning', 'models'],
classifiers=[],
)
| 31.25 | 83 | 0.652 | 59 | 500 | 5.491525 | 0.677966 | 0.012346 | 0.018519 | 0.104938 | 0.216049 | 0.216049 | 0.216049 | 0 | 0 | 0 | 0 | 0.014493 | 0.172 | 500 | 15 | 84 | 33.333333 | 0.768116 | 0 | 0 | 0 | 0 | 0 | 0.49 | 0.134 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1adbdfb4bc66d95866bcc5ed925f8780b4dac055 | 318 | py | Python | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | 1 | 2020-06-17T12:07:27.000Z | 2020-06-17T12:07:27.000Z | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import argparse
import sys
import os
dir_name = os.path.dirname(os.path.realpath(__file__))
npy_data = np.load(os.path.join(dir_name, sys.argv[1]))
npy_data = npy_data.astype(np.float32)
npy_data = npy_data.reshape((-1,))
npy_data.tofile(os.path.join(dir_name, sys.argv[1].split(".")[0] + ".f32"))
| 28.909091 | 75 | 0.732704 | 58 | 318 | 3.793103 | 0.448276 | 0.190909 | 0.090909 | 0.118182 | 0.227273 | 0.227273 | 0.227273 | 0.227273 | 0 | 0 | 0 | 0.027586 | 0.08805 | 318 | 10 | 76 | 31.8 | 0.731034 | 0 | 0 | 0 | 0 | 0 | 0.015723 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
1ae5744d76fd5fe30712a3dceb1ec7d3ea37f9e1 | 1,394 | py | Python | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | from functools import wraps
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from access.cbac import default_cbac_required
from core.models import Event
from .views.admin_menu_items import labour_admin_menu_items
def labour_admin_required(view_func):
@wraps(view_func)
@default_cbac_required
def wrapper(request, *args, **kwargs):
kwargs.pop('event_slug')
event = request.event
meta = event.labour_event_meta
if not meta:
messages.error(request, "Tämä tapahtuma ei käytä Kompassia työvoiman hallintaan.")
return redirect('core_event_view', event.slug)
vars = dict(
event=event,
admin_menu_items=labour_admin_menu_items(request, event),
admin_title='Työvoiman hallinta'
)
return view_func(request, vars, event, *args, **kwargs)
return wrapper
def labour_event_required(view_func):
@wraps(view_func)
def wrapper(request, event_slug, *args, **kwargs):
event = get_object_or_404(Event, slug=event_slug)
meta = event.labour_event_meta
if not meta:
messages.error(request, "Tämä tapahtuma ei käytä Kompassia työvoiman hallintaan.")
return redirect('core_event_view', event.slug)
return view_func(request, event, *args, **kwargs)
return wrapper
| 30.304348 | 94 | 0.691535 | 175 | 1,394 | 5.268571 | 0.285714 | 0.052061 | 0.060738 | 0.030369 | 0.420824 | 0.360087 | 0.29718 | 0.29718 | 0.29718 | 0.29718 | 0 | 0.005581 | 0.228838 | 1,394 | 45 | 95 | 30.977778 | 0.852093 | 0 | 0 | 0.363636 | 0 | 0 | 0.120517 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0.060606 | 0.181818 | 0 | 0.484848 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
1ae6b0af984b4e774a2ea4fe2177c6d38cd7328b | 411 | py | Python | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
a = np.arange(15)
out = a.reshape(5, 3)
c = np.arange(15) / 2
y_onehot = c.reshape(5, 3)
out_tensor = tf.convert_to_tensor(out, dtype=tf.float32)
y_onehot_tensor = tf.convert_to_tensor(y_onehot, dtype=tf.float32)
# y_onehot = tf.one_hot(y_onehot_tensor, depth=3) # one-hot编码
loss1 = tf.square(out_tensor - y_onehot_tensor)
loss2 = tf.reduce_sum(loss1) / 32
pass | 22.833333 | 66 | 0.737226 | 77 | 411 | 3.714286 | 0.428571 | 0.146853 | 0.136364 | 0.118881 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053521 | 0.136253 | 411 | 18 | 67 | 22.833333 | 0.752113 | 0.143552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.090909 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
1aec3c2b4298503556aef1b8d4f0b2abb934f5fa | 2,003 | py | Python | DeBERTa/data/data_sampler.py | tirkarthi/DeBERTa | c558ad99373dac695128c9ec45f39869aafd374e | [
"MIT"
] | 7 | 2021-02-04T01:26:55.000Z | 2021-11-23T00:38:47.000Z | DeBERTa/data/data_sampler.py | tirkarthi/DeBERTa | c558ad99373dac695128c9ec45f39869aafd374e | [
"MIT"
] | 1 | 2021-03-18T00:23:17.000Z | 2022-01-05T15:36:48.000Z | src/LASER/data/data_sampler.py | BigBird01/LASER | 57143200814583410acdd0c5ac0a0f8bab8a1f7e | [
"MIT"
] | null | null | null | #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Author: Pengcheng He (penhe@microsoft.com)
# Date: 05/15/2019
#
import os
import numpy as np
import math
import sys
from torch.utils.data import Sampler
__all__=['BatchSampler', 'DistributedBatchSampler', 'RandomSampler', 'SequentialSampler']
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size):
self.sampler = sampler
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch)==self.batch_size:
yield batch
batch = []
if len(batch)>0:
yield batch
def __len__(self):
return (len(self.sampler) + self.batch_size - 1)//self.batch_size
class DistributedBatchSampler(Sampler):
def __init__(self, sampler, rank=0, world_size = 1, drop_last = False):
self.sampler = sampler
self.rank = rank
self.world_size = world_size
self.drop_last = drop_last
def __iter__(self):
for b in self.sampler:
if len(b)%self.world_size != 0:
if self.drop_last:
break
else:
b.extend([b[0] for _ in range(self.world_size-len(b)%self.world_size)])
chunk_size = len(b)//self.world_size
yield b[self.rank*chunk_size:(self.rank+1)*chunk_size]
def __len__(self):
return len(self.sampler)
class RandomSampler(Sampler):
def __init__(self, total_samples:int, data_seed:int = 0):
self.indices = np.array(np.arange(total_samples))
self.rng = np.random.RandomState(data_seed)
def __iter__(self):
self.rng.shuffle(self.indices)
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
class SequentialSampler(Sampler):
def __init__(self, total_samples:int):
self.indices = np.array(np.arange(total_samples))
def __iter__(self):
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
| 26.012987 | 89 | 0.683974 | 287 | 2,003 | 4.498258 | 0.285714 | 0.068164 | 0.050349 | 0.055771 | 0.323005 | 0.271108 | 0.238575 | 0.140976 | 0.082107 | 0.082107 | 0 | 0.010044 | 0.204693 | 2,003 | 76 | 90 | 26.355263 | 0.800377 | 0.089366 | 0 | 0.392857 | 0 | 0 | 0.035793 | 0.012665 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.089286 | 0.071429 | 0.446429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1af1ba1f73d50b880d12d443416c0d5ee955fd4e | 1,160 | bzl | Python | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | 1 | 2021-10-10T17:17:27.000Z | 2021-10-10T17:17:27.000Z | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | null | null | null | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | null | null | null | load("@io_bazel_rules_dotnet//dotnet:defs.bzl", "core_library", "core_resx", "core_xunit_test")
core_resx(
name = "core_resource",
src = ":src/Moq/Properties/Resources.resx",
identifier = "Moq.Properties.Resources.resources",
)
core_library(
name = "Moq.dll",
srcs = glob(["src/Moq/**/*.cs"]),
defines = [
"NETCORE",
],
keyfile = ":Moq.snk",
resources = [":core_resource"],
visibility = ["//visibility:public"],
nowarn = ["CS3027"],
deps = [
"@//ifluentinterface:IFluentInterface.dll",
"@TypeNameFormatter//:TypeNameFormatter.dll",
"@castle.core//:Castle.Core.dll",
"@core_sdk_stdlib//:libraryset",
],
)
core_xunit_test(
name = "Moq.Tests.dll",
srcs = glob(
["tests/Moq.Tests/**/*.cs"],
exclude = ["**/FSharpCompatibilityFixture.cs"],
),
defines = [
"NETCORE",
],
keyfile = ":Moq.snk",
nowarn = ["CS1701"],
visibility = ["//visibility:public"],
deps = [
":Moq.dll",
"@xunit.assert//:lib",
"@xunit.extensibility.core//:lib",
"@xunit.extensibility.execution//:lib",
],
)
| 25.217391 | 95 | 0.563793 | 110 | 1,160 | 5.809091 | 0.418182 | 0.034429 | 0.040689 | 0.071987 | 0.090767 | 0.090767 | 0 | 0 | 0 | 0 | 0 | 0.009029 | 0.236207 | 1,160 | 45 | 96 | 25.777778 | 0.71219 | 0 | 0 | 0.333333 | 0 | 0 | 0.49569 | 0.318966 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
1afe05c194caa5c442bb47f534efb7a249603873 | 3,846 | py | Python | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | import logging
# configure logging before initializing further modules
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(name)s - %(message)s")
logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
import argparse
import json
import flask
import flask_compress
from werkzeug.middleware.dispatcher import DispatcherMiddleware
import prometheus_client
from pvcontrol import views, relay
from pvcontrol.meter import MeterFactory
from pvcontrol.chargecontroller import ChargeControllerFactory
from pvcontrol.wallbox import WallboxFactory
from pvcontrol.car import CarFactory
from pvcontrol.scheduler import Scheduler
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="PV Control")
parser.add_argument("-m", "--meter", default="SimulatedMeter")
parser.add_argument("-w", "--wallbox", default="SimulatedWallbox")
parser.add_argument("-a", "--car", default="SimulatedCar")
parser.add_argument("-c", "--config", default="{}")
args = parser.parse_args()
logger.info("Starting pvcontrol")
logger.info(f"Meter: {args.meter}")
logger.info(f"Wallbox: {args.wallbox}")
logger.info(f"Car: {args.car}")
logger.info(f"config: {args.config}")
config = json.loads(args.config)
for c in ["wallbox", "meter", "car", "controller"]:
if c not in config:
config[c] = {}
wallbox = WallboxFactory.newWallbox(args.wallbox, **config["wallbox"])
meter = MeterFactory.newMeter(args.meter, wallbox, **config["meter"])
car = CarFactory.newCar(args.car, **config["car"])
controller = ChargeControllerFactory.newController(meter, wallbox, **config["controller"])
controller_scheduler = Scheduler(controller.get_config().cycle_time, controller.run)
controller_scheduler.start()
car_scheduler = Scheduler(car.get_config().cycle_time, car.read_data)
car_scheduler.start()
app = flask.Flask(__name__)
app.json_encoder = views.JSONEncoder
app.after_request(views.add_no_cache_header)
app.config["COMPRESS_MIN_SIZE"] = 2048
app.config["COMPRESS_MIMETYPES"] = ["text/html", "text/css", "application/json", "application/javascript", "image/vnd.microsoft.icon"]
compress = flask_compress.Compress()
compress.init_app(app)
app.add_url_rule("/", view_func=views.StaticResourcesView.as_view("get_index"), defaults={"path": "index.html"})
app.add_url_rule("/<path:path>", view_func=views.StaticResourcesView.as_view("get_static"))
app.add_url_rule("/api/pvcontrol", view_func=views.PvControlView.as_view("get_pvcontrol", meter, wallbox, controller, car))
app.add_url_rule("/api/pvcontrol/controller", view_func=views.PvControlConfigDataView.as_view("get_controller", controller))
app.add_url_rule("/api/pvcontrol/controller/desired_mode", view_func=views.PvControlChargeModeView.as_view("put_desired_mode", controller))
app.add_url_rule("/api/pvcontrol/controller/phase_mode", view_func=views.PvControlPhaseModeView.as_view("put_phase_mode", controller))
app.add_url_rule("/api/pvcontrol/meter", view_func=views.PvControlConfigDataView.as_view("get_meter", meter))
app.add_url_rule("/api/pvcontrol/wallbox", view_func=views.PvControlConfigDataView.as_view("get_wallbox", wallbox))
app.add_url_rule("/api/pvcontrol/car", view_func=views.PvControlConfigDataView.as_view("get_car", car))
# for testing only
app.add_url_rule("/api/pvcontrol/wallbox/car_status", view_func=views.PvControlCarStatusView.as_view("put_car_status", wallbox))
# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {"/metrics": prometheus_client.make_wsgi_app()})
app.run(host="0.0.0.0", port=8080)
controller_scheduler.stop()
car_scheduler.stop()
# disable charging to play it safe
# TODO: see ChargeMode.INIT handling
logger.info("Set wallbox.allow_charging=False on shutdown.")
wallbox.allow_charging(False)
relay.cleanup()
logger.info("Stopped pvcontrol")
| 46.337349 | 139 | 0.788612 | 506 | 3,846 | 5.798419 | 0.304348 | 0.02045 | 0.030675 | 0.044308 | 0.185412 | 0.185412 | 0.168371 | 0.045331 | 0 | 0 | 0 | 0.003626 | 0.067863 | 3,846 | 82 | 140 | 46.902439 | 0.814784 | 0.050962 | 0 | 0 | 0 | 0 | 0.235181 | 0.068606 | 0 | 0 | 0 | 0.012195 | 0 | 1 | 0 | false | 0 | 0.203125 | 0 | 0.203125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
210025e5881047a75dc28e56284192add56bd13d | 9,466 | py | Python | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | 2 | 2020-10-31T15:03:20.000Z | 2021-03-22T16:29:15.000Z | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | 13 | 2019-12-04T23:29:42.000Z | 2022-03-02T04:53:53.000Z | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2016 Max Breitenfeldt and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from django.contrib.auth.models import User
from django.db import models
from django.apps import apps
import json
import random
from collections import Counter
from dashboard.exceptions import ResourceAvailabilityException
class LabStatus(object):
"""
A Poor man's enum for the status of a lab.
If everything is working fine at a lab, it is UP.
If it is down temporarily e.g. for maintenance, it is TEMP_DOWN
If its broken, its DOWN
"""
UP = 0
TEMP_DOWN = 100
DOWN = 200
def upload_to(object, filename):
return object.user.username + '/' + filename
class UserProfile(models.Model):
"""Extend the Django User model."""
user = models.OneToOneField(User, on_delete=models.CASCADE)
timezone = models.CharField(max_length=100, blank=False, default='UTC')
ssh_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
pgp_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
email_addr = models.CharField(max_length=300, blank=False, default='email@mail.com')
company = models.CharField(max_length=200, blank=False)
oauth_token = models.CharField(max_length=1024, blank=False)
oauth_secret = models.CharField(max_length=1024, blank=False)
jira_url = models.CharField(max_length=100, null=True, blank=True, default='')
full_name = models.CharField(max_length=100, null=True, blank=True, default='')
booking_privledge = models.BooleanField(default=False)
public_user = models.BooleanField(default=False)
class Meta:
db_table = 'user_profile'
def __str__(self):
return self.user.username
class VlanManager(models.Model):
"""
Keeps track of the vlans for a lab.
Vlans are represented as indexes into a 4096 element list.
This list is serialized to JSON for storing in the DB.
"""
# list of length 4096 containing either 0 (not available) or 1 (available)
vlans = models.TextField()
# list of length 4096 containing either 0 (not reserved) or 1 (reserved)
reserved_vlans = models.TextField()
block_size = models.IntegerField()
# True if the lab allows two different users to have the same private vlans
# if they use QinQ or a vxlan overlay, for example
allow_overlapping = models.BooleanField()
def get_vlans(self, count=1):
"""
Return the IDs of available vlans as a list[int], but does not reserve them.
Will throw index exception if not enough vlans are available.
Always returns a list of ints
"""
allocated = []
vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
for i in range(0, len(vlans) - 1):
if len(allocated) >= count:
break
if vlans[i] == 0 and self.allow_overlapping is False:
continue
if reserved[i] == 1:
continue
# vlan is available and not reserved, so safe to add
allocated.append(i)
continue
if len(allocated) != count:
raise ResourceAvailabilityException("can't allocate the vlans requested")
return allocated
def get_public_vlan(self):
"""Return reference to an available public network without reserving it."""
return PublicNetwork.objects.filter(lab=self.lab_set.first(), in_use=False).first()
def reserve_public_vlan(self, vlan):
"""Reserves the Public Network that has the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=False)
net.in_use = True
net.save()
def release_public_vlan(self, vlan):
"""Un-reserves a public network with the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=True)
net.in_use = False
net.save()
def public_vlan_is_available(self, vlan):
"""
Whether the public vlan is available.
returns true if the network with the given vlan is free to use,
False otherwise
"""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan)
return not net.in_use
def is_available(self, vlans):
"""
If the vlans are available.
'vlans' is either a single vlan id integer or a list of integers
will return true (available) or false
"""
if self.allow_overlapping:
return True
reserved = json.loads(self.reserved_vlans)
vlan_master_list = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
if not vlan_master_list[vlan] or reserved[vlan]:
return False
return True
def release_vlans(self, vlans):
"""
Make the vlans available for another booking.
'vlans' is either a single vlan id integer or a list of integers
will make the vlans available
doesnt return a value
"""
my_vlans = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
my_vlans[vlan] = 1
self.vlans = json.dumps(my_vlans)
self.save()
def reserve_vlans(self, vlans):
"""
Reserves all given vlans or throws a ValueError.
vlans can be an integer or a list of integers.
"""
my_vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
vlans = set(vlans)
for vlan in vlans:
if my_vlans[vlan] == 0 or reserved[vlan] == 1:
raise ValueError("vlan " + str(vlan) + " is not available")
my_vlans[vlan] = 0
self.vlans = json.dumps(my_vlans)
self.save()
class Lab(models.Model):
"""
Model representing a Hosting Lab.
Anybody that wants to host resources for LaaS needs to have a Lab model
We associate hardware with Labs so we know what is available and where.
"""
lab_user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200, primary_key=True, unique=True, null=False, blank=False)
contact_email = models.EmailField(max_length=200, null=True, blank=True)
contact_phone = models.CharField(max_length=20, null=True, blank=True)
status = models.IntegerField(default=LabStatus.UP)
vlan_manager = models.ForeignKey(VlanManager, on_delete=models.CASCADE, null=True)
location = models.TextField(default="unknown")
# This token must apear in API requests from this lab
api_token = models.CharField(max_length=50)
description = models.CharField(max_length=240)
lab_info_link = models.URLField(null=True)
project = models.CharField(default='LaaS', max_length=100)
@staticmethod
def make_api_token():
"""Generate random 45 character string for API token."""
alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
key = ""
for i in range(45):
key += random.choice(alphabet)
return key
def get_available_resources(self):
# Cannot import model normally due to ciruclar import
Server = apps.get_model('resource_inventory', 'Server') # TODO: Find way to import ResourceQuery
resources = [str(resource.profile) for resource in Server.objects.filter(lab=self, working=True, booked=False)]
return dict(Counter(resources))
def __str__(self):
return self.name
class PublicNetwork(models.Model):
"""L2/L3 network that can reach the internet."""
vlan = models.IntegerField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
in_use = models.BooleanField(default=False)
cidr = models.CharField(max_length=50, default="0.0.0.0/0")
gateway = models.CharField(max_length=50, default="0.0.0.0")
class Downtime(models.Model):
"""
A Downtime event.
Labs can create Downtime objects so the dashboard can
alert users that the lab is down, etc
"""
start = models.DateTimeField()
end = models.DateTimeField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
description = models.TextField(default="This lab will be down for maintenance")
def save(self, *args, **kwargs):
if self.start >= self.end:
raise ValueError('Start date is after end date')
# check for overlapping downtimes
overlap_start = Downtime.objects.filter(lab=self.lab, start__gt=self.start, start__lt=self.end).exists()
overlap_end = Downtime.objects.filter(lab=self.lab, end__lt=self.end, end__gt=self.start).exists()
if overlap_start or overlap_end:
raise ValueError('Overlapping Downtime')
return super(Downtime, self).save(*args, **kwargs)
| 33.214035 | 119 | 0.648109 | 1,228 | 9,466 | 4.900651 | 0.259772 | 0.022433 | 0.038883 | 0.051844 | 0.275008 | 0.23679 | 0.211532 | 0.198903 | 0.139581 | 0.139581 | 0 | 0.015111 | 0.244982 | 9,466 | 284 | 120 | 33.330986 | 0.82692 | 0.251215 | 0 | 0.241379 | 0 | 0 | 0.043056 | 0.0094 | 0 | 0 | 0 | 0.003521 | 0 | 1 | 0.096552 | false | 0 | 0.048276 | 0.02069 | 0.544828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
21052d838f3c6f1bc317c7615a2db829dddf4cec | 299 | py | Python | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | def run():
animal = str(input('¿Cuál es tu animal favorito? '))
if animal.lower() == 'tortuga' or animal.lower() == 'tortugas':
print('También me gustan las tortugas.')
else:
print('Ese animal es genial, pero prefiero las tortugas.')
if __name__ == '__main__':
run() | 33.222222 | 67 | 0.615385 | 39 | 299 | 4.538462 | 0.692308 | 0.124294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 299 | 9 | 68 | 33.222222 | 0.765217 | 0 | 0 | 0 | 0 | 0 | 0.44 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2108113aa55ab74979a849198e4a5b50f03a9738 | 1,153 | py | Python | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | from enum import Enum
class Registration(Enum):
ID_SERVIDOR_PORTAL = 1
NOME = 2
CPF = 3
MATRICULA = 4
DESCRICAO_CARGO = 5
CLASSE_CARGO = 6
REFERENCIA_CARGO = 7
PADRAO_CARGO = 8
NIVEL_CARGO = 9
SIGLA_FUNCAO = 10
NIVEL_FUNCAO = 11
FUNCAO = 12
CODIGO_ATIVIDADE = 13
ATIVIDADE = 14
OPCAO_PARCIAL = 15
COD_UORG_LOTACAO = 16
UORG_LOTACAO = 17
COD_ORG_LOTACAO = 18
ORG_LOTACAO = 19
COD_ORGSUP_LOTACAO = 20
ORGSUP_LOTACAO = 21
COD_UORG_EXERCICIO = 22
UORG_EXERCICIO = 23
COD_ORG_EXERCICIO = 24
ORG_EXERCICIO = 25
COD_ORGSUP_EXERCICIO = 26
ORGSUP_EXERCICIO = 27
TIPO_VINCULO = 28
SITUACAO_VINCULO = 29
DATA_INICIO_AFASTAMENTO = 30
DATA_TERMINO_AFASTAMENTO = 31
REGIME_JURIDICO = 32
JORNADA_DE_TRABALHO = 33
DATA_INGRESSO_CARGOFUNCAO = 34
DATA_NOMEACAO_CARGOFUNCAO = 35
DATA_INGRESSO_ORGAO = 36
DOCUMENTO_INGRESSO_SERVICOPUBLICO = 37
DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO = 38
DIPLOMA_INGRESSO_CARGOFUNCAO = 39
DIPLOMA_INGRESSO_ORGAO = 40
DIPLOMA_INGRESSO_SERVICOPUBLICO = 41
UF_EXERCICIO = 42
| 24.531915 | 45 | 0.699046 | 147 | 1,153 | 5.102041 | 0.619048 | 0.08 | 0.077333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088132 | 0.261925 | 1,153 | 46 | 46 | 25.065217 | 0.793184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022727 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
21086d29a3c48a77d90c32973849ccb037435414 | 1,269 | py | Python | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 21 | 2018-11-20T15:58:39.000Z | 2022-03-15T19:57:24.000Z | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 732 | 2018-11-21T18:33:26.000Z | 2022-03-31T16:16:24.000Z | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 85 | 2018-11-20T17:55:00.000Z | 2022-03-29T09:40:31.000Z | from leapp.actors import Actor
from leapp.models import InstalledRedHatSignedRPM
from leapp.libraries.common.rpms import has_package
from leapp.reporting import Report, create_report
from leapp import reporting
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckAcpid(Actor):
"""
Check if acpid is installed. If yes, write information about non-compatible changes.
"""
name = 'checkacpid'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'acpid'):
create_report([
reporting.Title('Acpid incompatible changes in the next major version'),
reporting.Summary('The option -d (debug) no longer implies -f (foreground).'),
reporting.Severity(reporting.Severity.LOW),
reporting.Remediation(
hint='You must now use both options (\'-df\') for the same behavior. Please update '
'your scripts to be compatible with the changes.'),
reporting.Tags([reporting.Tags.KERNEL, reporting.Tags.SERVICES]),
reporting.RelatedResource('package', 'acpid')
])
| 40.935484 | 104 | 0.662727 | 132 | 1,269 | 6.340909 | 0.590909 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249015 | 1,269 | 30 | 105 | 42.3 | 0.878279 | 0.066194 | 0 | 0 | 0 | 0 | 0.216424 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.26087 | 0 | 0.521739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
210ce5a109662e3af414b660e816005c84a91241 | 1,091 | py | Python | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | 2 | 2020-07-09T22:21:57.000Z | 2021-03-20T15:30:31.000Z | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | 37 | 2018-04-17T06:40:54.000Z | 2022-03-22T09:06:01.000Z | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Yue-Wen FANG'
__maintainer__ = "Yue-Wen FANG"
__email__ = 'fyuewen@gmail.com'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 25, 2018'
"""
This example shows the functionality
of positional arguments and keyword ONLY arguments.
The positional arguments correspond to tuple,
the keyword ONLY arguments correspond to dict.
"""
def add_function_01(x, *args): # you can use any other proper names instead of using args
""" positional arguments"""
print('x is', x)
for i in args:
print(i),
def add_function_02(x, *args, **kwargs): # you can use any other proper names instead of using args
""" positional arguments and keyword specific arguments """
print('x is', x)
print(args)
print('the type of args is', type(args))
print(kwargs.values())
print(kwargs.keys())
print('the type or kwargs is', type(kwargs))
if __name__ == "__main__":
add_function_01(1,2,3,45)
print("*************")
add_function_02(3, 1, 2, 3, 45, c=3, d=4)
print("*************")
| 27.974359 | 100 | 0.656279 | 159 | 1,091 | 4.27044 | 0.477987 | 0.111929 | 0.029455 | 0.08542 | 0.231222 | 0.191458 | 0.191458 | 0.191458 | 0.191458 | 0.191458 | 0 | 0.034014 | 0.191567 | 1,091 | 38 | 101 | 28.710526 | 0.735828 | 0.210816 | 0 | 0.190476 | 0 | 0 | 0.235115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0 | 0 | 0.095238 | 0.47619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
21166bc527a261fd10a8f75ff6962975085966f9 | 780 | py | Python | start.py | Wrdle/BertieBotV3 | 8bc5f762ec8de7192a8635e1bebd9dc1bba6ab0f | [
"MIT"
] | 3 | 2019-02-10T14:42:58.000Z | 2020-04-14T08:32:12.000Z | start.py | Wrdle/BertieBotV3 | 8bc5f762ec8de7192a8635e1bebd9dc1bba6ab0f | [
"MIT"
] | 1 | 2019-03-16T11:22:07.000Z | 2019-03-16T11:22:07.000Z | start.py | Wrdle/BertieBotV3 | 8bc5f762ec8de7192a8635e1bebd9dc1bba6ab0f | [
"MIT"
] | null | null | null | import threading
from discord.ext import commands
import discord
from website import create_app
from cogs import generalCommands, events, leaderboard
from settings import configFunctions
global client
client = commands.Bot(command_prefix=commands.when_mentioned_or(configFunctions.getCommandPrefix()), description='Your local BertieBot')
global serverid
def startWebsite():
napp=create_app(client)
napp.run('0.0.0.0', port=configFunctions.getPortNumber())
if __name__=='__main__':
websiteThread = threading.Thread(target=startWebsite)
websiteThread.start()
client.add_cog(generalCommands.GeneralCommands(client))
client.add_cog(events.Events(client))
client.add_cog(leaderboard.Leaderboard(client))
client.run(configFunctions.getBotToken())
| 30 | 137 | 0.794872 | 90 | 780 | 6.711111 | 0.511111 | 0.07947 | 0.059603 | 0.059603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005764 | 0.110256 | 780 | 25 | 138 | 31.2 | 0.864553 | 0 | 0 | 0 | 0 | 0 | 0.044929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.315789 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
211cc816802d2504b9662cbd05920bf956accf92 | 607 | py | Python | src/demo.py | wenksi/pren-robo-cube-ipcv | e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f | [
"MIT"
] | null | null | null | src/demo.py | wenksi/pren-robo-cube-ipcv | e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f | [
"MIT"
] | null | null | null | src/demo.py | wenksi/pren-robo-cube-ipcv | e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f | [
"MIT"
] | null | null | null | import logging
import pyttsx3
from src.a_detect_pictogram import course_detect_pictogram
from src.common.communication.serial_handler import *
from src.common.movement.drive import *
from src.common.movement.direction import *
from src.common.movement.climb import *
ts = pyttsx3.init()
ts.say("Welcome to our Demonstration. I will show you some moves")
ts.runAndWait()
sh = SerialHandler()
climb = Climb(sh)
drive = Drive(sh)
drive.forward(20)
drive.rotate_body_left(20)
drive.backward(20)
drive.rotate_body_right(20)
ts.say("please show me a pictogram")
ts.runAndWait()
course_detect_pictogram.run() | 23.346154 | 66 | 0.794069 | 92 | 607 | 5.119565 | 0.467391 | 0.07431 | 0.110403 | 0.121019 | 0.171975 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018382 | 0.103789 | 607 | 26 | 67 | 23.346154 | 0.847426 | 0 | 0 | 0.1 | 0 | 0 | 0.134868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.35 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
211ce10aeead46c0c7f51e5abafc02877fb2cf97 | 704 | py | Python | saleor/product/migrations/0168_fulfil_digitalcontenturl_orderline_token.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 1,392 | 2021-10-06T15:54:28.000Z | 2022-03-31T20:50:55.000Z | saleor/product/migrations/0168_fulfil_digitalcontenturl_orderline_token.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 888 | 2021-10-06T10:48:54.000Z | 2022-03-31T11:00:30.000Z | saleor/product/migrations/0168_fulfil_digitalcontenturl_orderline_token.py | gustavoarmoa/saleor | f81b2f347e4c7a624cd68a1eca3b0a5611498f6e | [
"CC-BY-4.0"
] | 538 | 2021-10-07T16:21:27.000Z | 2022-03-31T22:58:57.000Z | # Generated by Django 3.2.12 on 2022-04-12 14:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("product", "0167_digitalcontenturl_order_line_token"),
("order", "0140_alter_orderline_old_id_and_created_at"),
]
operations = [
migrations.RunSQL(
"""
UPDATE product_digitalcontenturl
SET order_line_token = (
SELECT token
FROM order_orderline
WHERE product_digitalcontenturl.line_id = order_orderline.id
)
WHERE line_id IS NOT NULL;
""",
reverse_sql=migrations.RunSQL.noop,
),
]
| 26.074074 | 76 | 0.586648 | 71 | 704 | 5.535211 | 0.619718 | 0.045802 | 0.071247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051282 | 0.335227 | 704 | 26 | 77 | 27.076923 | 0.788462 | 0.065341 | 0 | 0 | 1 | 0 | 0.259053 | 0.225627 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21226fcd5be7a149c71e6cd157a3b6472337737a | 1,430 | py | Python | dashboard/app.py | katabaticwind/learn-vue | c83fc1b7ef3edfdc0abd06ba39be58f5a1ca5a59 | [
"MIT"
] | null | null | null | dashboard/app.py | katabaticwind/learn-vue | c83fc1b7ef3edfdc0abd06ba39be58f5a1ca5a59 | [
"MIT"
] | null | null | null | dashboard/app.py | katabaticwind/learn-vue | c83fc1b7ef3edfdc0abd06ba39be58f5a1ca5a59 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, url_for
import os
app = Flask(__name__)
# Homepage
@app.route('/')
def index():
return render_template('index.html')
# Domestic Dashboards
@app.route('/domestic/')
@app.route('/domestic/<model>')
def domestic(model=None):
return render_template('domestic.html', model=model)
# JSON API (allows users to "download" JSON files)
@app.route('/json/')
def json():
return str(os.listdir('static/json/'))
@app.route('/json/meta/')
@app.route('/json/meta/<file>')
def json_meta(file=None):
if file is not None:
with app.open_resource('static/json/meta/{}'.format(file)) as f:
return f.read()
else:
return str(os.listdir('static/json/meta/'))
@app.route('/json/models/')
@app.route('/json/models/<file>')
def json_models(file=None):
if file is not None:
with app.open_resource('static/json/models/{}'.format(file)) as f:
return f.read()
else:
return str(os.listdir('static/json/models/'))
# Trick (converting Python variable to Javascript variable)
# @app.route('/some/route/<url>')
# def convert_variable(url=None):
# x = some data pull (e.g. SQL query), dict or list
# if x is not None:
# return render_template('page.html', jsonify(x))
# return render_template('page.html', x) # then {{ x | tojson }} in page.html...
# else:
# return render_template('page.html')
| 27.5 | 89 | 0.645455 | 202 | 1,430 | 4.490099 | 0.326733 | 0.079383 | 0.110254 | 0.059537 | 0.401323 | 0.269019 | 0.238148 | 0.238148 | 0.238148 | 0.238148 | 0 | 0 | 0.183217 | 1,430 | 51 | 90 | 28.039216 | 0.776541 | 0.313287 | 0 | 0.206897 | 0 | 0 | 0.211777 | 0.021694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.068966 | 0.103448 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
212c7789af89131c2bbfe4f6722164ee35269ced | 486 | py | Python | src/softforest/routing.py | Zayn484/softForest-rest-api | 6ba80f738f2f00e938bc472b900c467d67dc4bc4 | [
"bzip2-1.0.6"
] | null | null | null | src/softforest/routing.py | Zayn484/softForest-rest-api | 6ba80f738f2f00e938bc472b900c467d67dc4bc4 | [
"bzip2-1.0.6"
] | 5 | 2021-03-19T00:39:57.000Z | 2022-01-13T01:14:11.000Z | src/softforest/routing.py | Zayn484/softforest-rest-api | 6ba80f738f2f00e938bc472b900c467d67dc4bc4 | [
"bzip2-1.0.6"
] | null | null | null | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from chat.consumers import ChatConsumer
from teams.consumers import InvitationConsumer
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter([
re_path(r'^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer),
re_path(r'^ws/invitation/(?P<id>[^/]+)/$', InvitationConsumer)
])
),
}) | 30.375 | 74 | 0.699588 | 49 | 486 | 6.857143 | 0.530612 | 0.053571 | 0.041667 | 0.053571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17284 | 486 | 16 | 75 | 30.375 | 0.835821 | 0 | 0 | 0 | 0 | 0 | 0.143737 | 0.125257 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.384615 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
212d2ae3f3860ba8035eb54ebdfbfb256b22bb6e | 973 | py | Python | src/spaceone/inventory/manager/pricing_manager.py | jihyungSong/plugin-aws-price-info | f97f4edff4f022cc9e7c43185ecc98fe9a92fb48 | [
"Apache-2.0"
] | 1 | 2021-02-18T07:56:14.000Z | 2021-02-18T07:56:14.000Z | src/spaceone/inventory/manager/pricing_manager.py | jihyungSong/plugin-aws-price-info | f97f4edff4f022cc9e7c43185ecc98fe9a92fb48 | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/pricing_manager.py | jihyungSong/plugin-aws-price-info | f97f4edff4f022cc9e7c43185ecc98fe9a92fb48 | [
"Apache-2.0"
] | 2 | 2021-02-18T07:56:17.000Z | 2021-03-23T01:53:40.000Z | import json
from spaceone.inventory.libs.manager import AWSManager
from spaceone.inventory.connector.pricing import PricingConnector
from spaceone.inventory.model.pricing.cloud_service_type import CLOUD_SERVICE_TYPES
class PricingManager(AWSManager):
conn = None
def __init__(self, transaction=None, **kwargs):
super().__init__(transaction=transaction)
self.conn: PricingConnector = self.locator.get_connector('PricingConnector', **kwargs)
self.conn.set_client()
def list_service_codes(self):
services = self.conn.describe_services()
return [service.get('ServiceCode') for service in services if service.get('ServiceCode')]
def list_products(self, service_code):
for product in self.conn.get_products(service_code):
yield json.loads(product)
@staticmethod
def collect_cloud_service_types():
for cloud_service_type in CLOUD_SERVICE_TYPES:
yield cloud_service_type
| 36.037037 | 97 | 0.741007 | 115 | 973 | 6.008696 | 0.4 | 0.104197 | 0.091172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175745 | 973 | 26 | 98 | 37.423077 | 0.861596 | 0 | 0 | 0 | 0 | 0 | 0.039054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.55 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
21302d3ba8091fd04babc7e6b1cac595bcb3588f | 816 | py | Python | django_drf_filepond/migrations/0003_add_storedupload.py | alarivas/django-drf-filepond | 30098c186cd489728c227de0f85004039a1f72fc | [
"BSD-3-Clause"
] | 81 | 2018-12-03T22:37:40.000Z | 2022-03-31T15:33:06.000Z | django_drf_filepond/migrations/0003_add_storedupload.py | alarivas/django-drf-filepond | 30098c186cd489728c227de0f85004039a1f72fc | [
"BSD-3-Clause"
] | 56 | 2019-01-02T16:04:12.000Z | 2022-03-15T02:45:29.000Z | django_drf_filepond/migrations/0003_add_storedupload.py | alarivas/django-drf-filepond | 30098c186cd489728c227de0f85004039a1f72fc | [
"BSD-3-Clause"
] | 29 | 2018-12-30T22:45:19.000Z | 2022-03-12T02:48:51.000Z | # Generated by Django 2.1.3 on 2019-02-13 13:00
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_drf_filepond', '0002_add_upload_dir'),
]
operations = [
migrations.CreateModel(
name='StoredUpload',
fields=[
('upload_id',
models.CharField(
max_length=22, primary_key=True, serialize=False,
validators=[
django.core.validators.MinLengthValidator(22)])),
('file_path', models.CharField(max_length=2048)),
('uploaded', models.DateTimeField()),
('stored', models.DateTimeField(auto_now_add=True)),
],
),
]
| 30.222222 | 74 | 0.552696 | 75 | 816 | 5.853333 | 0.68 | 0.045558 | 0.091116 | 0.109339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049724 | 0.334559 | 816 | 26 | 75 | 31.384615 | 0.758748 | 0.055147 | 0 | 0 | 1 | 0 | 0.106632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21367cf6bd5bc02d6a029a160d178f8fc6a45ea5 | 24,632 | py | Python | MixPanel_daily_DataPipeline/bin/mixpanel_daily_datapipeline.py | ThiagoBarsante/DataEngineer_projects | bbed3653cc8e7bf6aec096d11c78566e3a3f8a2b | [
"MIT"
] | null | null | null | MixPanel_daily_DataPipeline/bin/mixpanel_daily_datapipeline.py | ThiagoBarsante/DataEngineer_projects | bbed3653cc8e7bf6aec096d11c78566e3a3f8a2b | [
"MIT"
] | null | null | null | MixPanel_daily_DataPipeline/bin/mixpanel_daily_datapipeline.py | ThiagoBarsante/DataEngineer_projects | bbed3653cc8e7bf6aec096d11c78566e3a3f8a2b | [
"MIT"
] | null | null | null |
""" This program run one complete datapipeline with raw data from mixpanel (json files API)
and generate one structured file format to be used in a Data Science project
Resume
- validate startup process
- check if the configuration and variables are setup
- run mixpanel json api to download 5 events from specific day (daily execution)
- merge all events and do feature engineering (Label Encode, One Hot Encode...)
- export the results to .csv (local)
- export the result to a cloud provider (GCP) and provide the logic to AWS
- cleanup old processed files(.csv, .log, .json and .zip)
- some exceptions are generated intentionaly to be catched by scheduler
tools/platforms when executed
Basic execution info and setup
- Directory structure requirements
./bin
./log
./data_dir => configuration file
./json_dir => temp directory to download the json files
Configuration file wiht additional parameters
- the configuration file must have the same name of .py file with extension .cfg
"""
import os
import sys
import subprocess as prc
import datetime
import pandas as pd
import boto3
## Auxiliary functions from python module utils...py
from utils_support_files import (f_short_name,
f_rename_properties,
f_path_rename,
labelEncode_value_ab,
delta_days_hours,
f_hours_since_start,
f_convert_str_to_datetime,
f_workaround_local_json,
f_workaround_default_event_df_5,
f_include_default_path_df,
f_update_date)
#### ------------------------------------------- DATA PIPELINE
print('print - Process start...')
# events to be downloaded
V_EVENTS = ['Company First Start', 'Conversation Started',
'Conversation Completed', 'A/B - Onboard - Voice',
'Subscription Confirmed']
## Initial setup (log files)
V_BOL_EXCEPTION = False
V_BOL_LOG_FILE = False
PYTHON_FILEN_NAME = os.path.basename(__file__)
V_LOG_FILENAME = PYTHON_FILEN_NAME[:-3] + '.log'
V_CONFIG_FILENAME = PYTHON_FILEN_NAME[:-3] + '.cfg'
V_STG_JSON_FILENAME = PYTHON_FILEN_NAME[:-3] + '_STG.json'
V_LOG_DIR = '../log/'
if not os.path.isdir(V_LOG_DIR):
V_BOL_EXCEPTION = True
print('EXCEPTION : Invalid log dir - ', V_LOG_DIR)
raise 'Invalid log directory. Create the log dir first '
### --------------------------------------------------------------
## FUNCTIONS
def f_log(msg='', time=0):
""" Aux function to print msg and generate log to file"""
if time == 1:
now = datetime.datetime.now()
v_time = now.strftime("%Y-%m-%d %H:%M:%S")
else:
v_time = ''
v_log_msg = v_time + ' | ' + msg
print(v_log_msg)
if V_BOL_LOG_FILE:
F.write('\n' + v_log_msg)
def f_setup_config(v_config_filename):
"""This function read the configuration file"""
df_conf_file = pd.read_csv(v_config_filename, delimiter="|", header=0)
api_key = df_conf_file[df_conf_file.CONFIG_VAR == 'API_KEY']['VALUE'].values[0]
data_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'DATA_DIR']['VALUE'].values[0]
json_log_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'JSON_DIR']['VALUE'].values[0]
gcs_bucket = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_BUCKET']['VALUE'].values[0]
# gcs_service_account_key = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_SERVICE_ACOUNT_KEY']['VALUE'].values[0]
# aws_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_ACCESS_KEY']['VALUE'].values[0]
# aws_secret_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_SECRET_ASSES_KEY']['VALUE'].values[0]
aws_s3 = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_S3_BUCKET']['VALUE'].values[0]
export_csv = df_conf_file[df_conf_file.CONFIG_VAR == 'EXPORT_CSV']['VALUE'].values[0]
cleanup_days = df_conf_file[df_conf_file.CONFIG_VAR == 'CLEANUP_DAYS']['VALUE'].values[0]
# return api_key, gcs_bucket, gcs_service_account_key, aws_key, aws_secret_key, \
# aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
return api_key, gcs_bucket, aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
def f_cleanup_process(days, dir_cleanup):
"""This function will delete all files older than x days (minmal is 5) in the directory
Only .zip, .json and .csv and .log files will be deleted
"""
try:
cleanup_days_aux = int(days)
except:
cleanup_days_aux = 5
if cleanup_days_aux < 5:
cleanup_days_aux = 5
file_type = ['*.csv', '*.zip', '*.json', '*.log']
for file_remove in file_type:
v_files_deleted = dir_cleanup + file_remove
msg_shell = 'find ' + v_files_deleted + ' -mtime +' +str(cleanup_days_aux) + ' -exec rm {} \;'
# print(msg_shell)
try:
f_log('Delete files older than ' + days + ' days. FILES: ' + v_files_deleted, 1)
shell_result = prc.check_output(msg_shell, shell=True)
print(shell_result)
except:
msg = 'Zip files: problem to delete'
msg = 'OK'
return msg
def f_message_start_error(msg=''):
""" Function to raise a msg if do not start the data pipeline correctly """
f_log('')
f_log('--------------- SETUP ALL INFORMATION IN THE CONFIG FILE')
f_log('')
f_log('---- Exception..')
f_log('')
f_log('----------- Run the program with parameter local, gcp or aws')
f_log('-------------- local = local / on-premise execution')
f_log('-------------- gcp - Google cloud shell')
f_log('-------------- aws - Amazon Web Services')
f_log('')
f_log('------------- EXAMPLE')
f_log('')
f_log('-- Local daily execution')
f_log('python company_mixpanel_daily_datapilene.py local')
f_log('')
f_log('-- GCP daily execution')
f_log('python company_mixpanel_daily_datapilene.py gcp')
f_log('')
f_log('-- AWS daily execution')
f_log('python company_mixpanel_daily_datapilene.py aws')
f_log('')
f_log('')
f_log('-- Run the program (local, gcp or aws) for especifict day. ' + \
'Re-execution date: Format DD-MM-YYYY')
f_log('python company_mixpanel_daily_datapilene.py local 31-10-2019')
f_log('')
f_log('EXCEPTION : problem running the program', 1)
if V_BOL_LOG_FILE:
F.close()
raise ValueError(msg)
def def_aux_json_api_download(api_key_cfg, events, download_date, dir_json_data, json_filename):
""" This function use mixpanel json API to download all 5 events from a specific day
download json files
"""
v_filename = dir_json_data + V_DT_LOG_STR + json_filename
fullstr_events = '","'.join(events)
curl_1_aux = "curl https://data.mixpanel.com/api/2.0/export/ \
-u " + api_key_cfg + " \
-d from_date=\"" + download_date + "\" \
-d to_date=\"" + download_date + "\" \
-d event='[\"" + fullstr_events + "\"]' >> " + v_filename
f_log(curl_1_aux, 1)
###### - Simple comment about the code below
## INFO - all curl commands were downloaded from 31-10-2019 until 04-11-2019
## into ./bin/support_datafiles/ for local execution - Work around to run on
## Linux and Windows (curl execution)
## FOR PRODUCTION ENVIROMENT JUST COMMENT THE LINE RELATED DO f_workaround_local_json
curl_1_aux = f_workaround_local_json('../data/', V_DT_MIXPANEL_STR, json_filename, V_DT_LOG_STR)
curl_shell = prc.check_output(curl_1_aux, shell=True)
## Keep this line to see the result of the subprocess
print(curl_shell)
## check if the file had downloaded events
if os.stat(v_filename).st_size == 0:
f_log('EXCEPTION : JSON API DOWNLOAD - NO DATA to process - check internet \
connection or the execution date parameter', 1)
raise 'No data to process - check internet connection or the execution date parameter'
else:
f_log('JSON API DOWNLOAD - OK', 1)
df_aux_json = pd.read_json(v_filename, lines=True)
return df_aux_json
def f_dataframe_jsonfile(event_desc, event_list, df_json):
""" This function read on json file and convert it to a pandas dataframe format """
v_aux_event_list = 'EVENT_XXX'
## comment: idx in python as in c language start with 0, so 1 is 0, 2 is 1..
if event_desc == 'event_1_company_start':
v_aux_event_list = event_list[0]
elif event_desc == 'event_2_conversation_start':
v_aux_event_list = event_list[1]
elif event_desc == 'event_3_conversation_completed':
v_aux_event_list = event_list[2]
elif event_desc == 'event_4_AB_OnBoard_Voice':
v_aux_event_list = event_list[3]
elif event_desc == 'event_5_subscription_confirmed':
v_aux_event_list = event_list[4]
df_default_aux = f_workaround_default_event_df_5()
else:
f_log('EXCEPTION : Raise exception - Event not mapped', 1)
## filter data frame by event
df_aux = df_json
df_aux = df_aux.query("event == @v_aux_event_list")
## validate total records in the dataframe created
total_records = df_aux.shape[0]
if total_records == 0:
df_result = df_default_aux
else:
df_result = pd.io.json.json_normalize(df_aux['properties'])
df_result['event'] = v_aux_event_list
### SHORT NAME
short_colname = f_short_name(df_result['event'][0])
### rename properties name
df_result.columns = f_rename_properties(df_result.columns, short_colname)
return df_result
## ----------------------------------------------------------------------------- PROGRAM START
## ------------------------------ Log file creation
try:
f_log('Creation of log file ...')
V_DT_LOG = datetime.datetime.now()
V_DT_LOG_STR = V_DT_LOG.strftime("%Y%m%d_%H%M%S") ## 2020-02-16 16:18:25.280030
V_LOG_FILENAME = V_LOG_DIR + V_DT_LOG_STR + '_' + V_LOG_FILENAME
F = open(V_LOG_FILENAME, "w+")
V_BOL_LOG_FILE = True
except:
print('Exception running the program...')
V_BOL_EXCEPTION = True
f_log('EXCEPTION : Problem to create log file - ' + V_LOG_FILENAME)
#### ----------------------------------------- START VALIDATION
## VALIDATION OF MANY OPTIONS AND GIVE JUST ONE MSG. IF ANY OF EXCEPTIONS OCCURS
if (len(sys.argv) not in [2, 3]):
f_message_start_error(msg='Re-start the process with correct parameter. local or gcp or aws')
if len(sys.argv) == 2:
if sys.argv[1] not in ['local', 'gcp', 'aws']:
V_BOL_EXCEPTION = True
f_log('EXCEPTION : Wrong parameter local/gcp/aws -' + sys.argv[1])
V_DT_MIXPANEL = datetime.date.today() - datetime.timedelta(days=1) ## yesterday
V_DT_MIXPANEL_STR = V_DT_MIXPANEL.strftime("%Y-%m-%d")
elif len(sys.argv) == 3:
## VALIDATE THE DATE AS PAREMETER
try:
V_DT_MIXPANEL = datetime.datetime.strptime(sys.argv[2], '%d-%m-%Y').date()
V_DT_MIXPANEL_STR = V_DT_MIXPANEL.strftime("%Y-%m-%d")
except:
V_BOL_EXCEPTION = True
f_log('EXCEPTION : Invalid date - ' + sys.argv[2])
#### ----------------------------------------- VALIDATION OF CONFIG FILES AND DIR
try:
# V_API_KEY, V_GCS_BUCKET, V_GCS_SERVICE_ACCOUNT_KEY, V_AWS_KEY, \
# V_AWS_SECRET_KEY, V_AWS_S3, V_DATA_DIR, V_JSON_DIR, V_EXPORT_CSV, \
# V_CLEANUP_DAYS = f_setup_config(V_CONFIG_FILENAME)
V_API_KEY, V_GCS_BUCKET, V_AWS_S3, V_DATA_DIR, V_JSON_DIR, V_EXPORT_CSV, \
V_CLEANUP_DAYS = f_setup_config(V_CONFIG_FILENAME)
except:
V_BOL_EXCEPTION = True
print('EXCEPTION : Invalid configuration file - ', V_CONFIG_FILENAME)
# VALIDATE IF DATA DIR, BACKUP AND LOG DIR EXISTS
if not os.path.isdir(V_DATA_DIR):
V_BOL_EXCEPTION = True
f_log('EXCEPTION : Invalid data directory - ' + V_DATA_DIR)
if not os.path.isdir(V_JSON_DIR):
V_BOL_EXCEPTION = True
f_log('EXCEPTION : Invalid directory - ' + V_JSON_DIR)
#### ------------ Check if work around EVENT 5 missing for day 01-11-2019
V_FILE_EVENT_5 = './support_datafiles/EVENT_5__DEFAULT_DF_Subscription_Confirmed.CSV'
if not os.path.isfile(V_FILE_EVENT_5):
V_BOL_EXCEPTION = True
f_log('EXCEPTION : File not found. Pending file (setup it manually) - \
workaround DEFAULT_DF_EVENT_5 01-11-2019: - ' + V_FILE_EVENT_5)
#### ----------------------- Raise a problem to re-start the process
if V_BOL_EXCEPTION:
f_message_start_error('EXCEPTION : Re-start the program with correct parameter \
and config file. Read all messages above...')
######################### MAIN FUNCTION
f_log('-------------------------------------- PROGRAM EXECUTION')
f_log('------------------------------------------------ Data pipeline start')
f_log('Python program: ' + sys.argv[0])
f_log('local/cloud parameter: ' + sys.argv[1])
f_log('Execution date: ' + V_DT_MIXPANEL_STR)
f_log('Data dir: '+ V_DATA_DIR)
f_log('Log dir: '+ V_LOG_DIR)
f_log('Json download file dir: '+ V_JSON_DIR)
f_log('Log file name: ' + V_LOG_FILENAME)
f_log(' ', 0)
f_log('-------------------- Starting execution -------------------------- ', 1)
### --------------------------------------------------------------
## Download JSON FILE
f_log('Download JSON file and create one dataframe for each EVENT', 1)
DF_ALL_EVENTS = def_aux_json_api_download(V_API_KEY, V_EVENTS,
V_DT_MIXPANEL_STR,
V_JSON_DIR,
json_filename='_mixpanel_ALL_events_STG.json')
DF_COMPANY_START = f_dataframe_jsonfile('event_1_company_start',
V_EVENTS, df_json=DF_ALL_EVENTS)
DF_CONVERSATION_START = f_dataframe_jsonfile('event_2_conversation_start',
V_EVENTS, df_json=DF_ALL_EVENTS)
DF_CONVERSATION_COMPLETED = f_dataframe_jsonfile('event_3_conversation_completed',
V_EVENTS, df_json=DF_ALL_EVENTS)
DF_AB_ONBOARD_VOICE = f_dataframe_jsonfile('event_4_AB_OnBoard_Voice',
V_EVENTS, df_json=DF_ALL_EVENTS)
DF_SUBSCRIPTION_CONFIRMED = f_dataframe_jsonfile('event_5_subscription_confirmed',
V_EVENTS, df_json=DF_ALL_EVENTS)
### --------------------------------------------------------------
f_log('Merge all Data frames and filter rows and columns ... ', 1)
## Merge ALL 5 Data Frames - item 5.
## Obs. Remove all duplicates if necessary in a production environment before apply merge
## sample : DF_COMPANY_START = DF_COMPANY_START.drop_duplicates() ...
DF_ALL_EVENTS = DF_COMPANY_START
## Merge Data frames
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_CONVERSATION_START, left_on='distinct_id',
right_on='distinct_id', how='left')
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_CONVERSATION_COMPLETED,
left_on=['distinct_id', 'conversationid'],
right_on=['distinct_id', 'conversationid'], how='left')
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_AB_ONBOARD_VOICE,
left_on=['distinct_id', 'conversationid'],
right_on=['distinct_id', 'conversationid'], how='left')
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_SUBSCRIPTION_CONFIRMED,
left_on=['distinct_id', 'conversationid'],
right_on=['distinct_id', 'conversationid'], how='left')
# ''' validation export results -- full execution (flow 1 + flow 2) vs daily (just one flow)
# ## DF_ALL_EVENTS.to_csv('../data/DF_ALL_EVENTS__DAILY_FULL.CSV')
# VALIDATION OK!
# divergence...
# MISSING CONVERSATION vISvgqOIowcQn54xDyfIFrswYYr9Tonz - Event start on day 01-11
# On day 04-11 just events Conversation Started and A/B - Onboard - Voice and because
# this do not have information on day 04-11
# '''
### --------------------------------------------------------------
## Filter columns - item 6.
FILTER_COLUMNS = ['distinct_id',
'conversationid',
'app_versionconsta',
'conversationindexconsta',
'conversationstartedatconsta',
'valueabonbvoi',
'pathscompletedconcom',
'datelocaltimecomfirsta',
'frequencysubcon']
for coluna in FILTER_COLUMNS:
if coluna not in DF_ALL_EVENTS.columns:
print(coluna)
DF_ALL_EVENTS = DF_ALL_EVENTS[FILTER_COLUMNS]
## Filter columns without conversation : calc hours_since_start (drop na)
DF_ALL_EVENTS = DF_ALL_EVENTS.dropna(subset=['conversationstartedatconsta',
'datelocaltimecomfirsta'])
# """ Comments
# ## Filter records - item 7. however the only record with
# ##### frequencysubcon == 'monthly do not have distinct_id and conversationid
# ##### correlated. Code code just for the requirement without any change in the data
# """
DF_ALL_EVENTS = DF_ALL_EVENTS.query("frequencysubcon != 'monthly'", inplace=False)
f_log('Label Encode valueabonbvoi ...', 1)
DF_ALL_EVENTS['valueabonbvoi'] = DF_ALL_EVENTS['valueabonbvoi'].apply(labelEncode_value_ab)
f_log('One Hot Encode paths ... ', 1)
### --------------------------------------------------------------
## ONE HOT ENCODE pathscompletedconcom - fill NA = N for NoPath
DF_ALL_EVENTS['pathscompletedconcom'] = \
DF_ALL_EVENTS['pathscompletedconcom'].fillna(value='N')
DF_OHE = pd.Series(DF_ALL_EVENTS['pathscompletedconcom']). \
apply(frozenset).to_frame(name='pathscompletedconcom')
for pathscompletedconcom in frozenset.union(*DF_OHE.pathscompletedconcom):
DF_OHE[pathscompletedconcom] = DF_OHE.apply(lambda _: int(pathscompletedconcom in _.pathscompletedconcom), axis=1)
## remove_columns pathscompletedconcom_V2, sort dataframe index and
#### rename Paths columns path1..., path2... path3... and merge all events
DF_OHE = DF_OHE.drop(columns=['pathscompletedconcom'])
DF_OHE.columns = f_path_rename(DF_OHE.columns)
DF_OHE = f_include_default_path_df(DF_OHE)
DF_OHE.fillna(0, inplace=True)
INT_COLUMNS = DF_OHE.columns
DF_OHE = DF_OHE[INT_COLUMNS].astype(int)
DF_ALL_EVENTS = DF_ALL_EVENTS.join(DF_OHE)
DF_ALL_EVENTS = DF_ALL_EVENTS.drop(columns=['pathscompletedconcom'])
f_log('Calculate number of hours...', 1)
DF_ALL_EVENTS['conversationstartedatconsta'] = \
DF_ALL_EVENTS['conversationstartedatconsta'].apply(f_convert_str_to_datetime)
DF_ALL_EVENTS['datelocaltimecomfirsta'] = \
DF_ALL_EVENTS['datelocaltimecomfirsta'].apply(f_convert_str_to_datetime)
## SIMULATE EXECUTION FOR YESTERDAY - UPDATE all_events to yesterday date INSTEAD OF 04-11-2019
EXECUTION_DATE = datetime.date.today() - datetime.timedelta(days=1) ## yesterday
EXECUTION_DATE_STR = EXECUTION_DATE.strftime("%Y-%m-%d")
if V_DT_MIXPANEL_STR == EXECUTION_DATE_STR:
OLD_DATE = datetime.date(2019, 11, 4)## SAME DAY OF WORKAROUND TO COPY FILES
ADD_DAYS = EXECUTION_DATE - OLD_DATE
DF_ALL_EVENTS['conversationstartedatconsta'] = f_update_date(
DF_ALL_EVENTS.conversationstartedatconsta, ADD_DAYS.days)
DF_ALL_EVENTS['datelocaltimecomfirsta'] = f_update_date(
DF_ALL_EVENTS.datelocaltimecomfirsta, ADD_DAYS.days)
## OLD CALC , REPLACE BY THE LAMBDA FUNCTION BELOW WITH TRY EXCEPTION
DF_ALL_EVENTS['hours_since_start'] = DF_ALL_EVENTS.apply(lambda x: f_hours_since_start(
x['datelocaltimecomfirsta'], x['conversationstartedatconsta']), axis=1)
### --------------------------------------------------------------
## Label Target info : Number of conversations started
f_log('Processing target - number of conversations ...', 1)
DF_GROUP = DF_CONVERSATION_START.groupby('distinct_id').conversationid.nunique()
DF_GROUP = DF_GROUP.to_frame(name='num_convo').reset_index()
### --------------------------------------------------------------
## Data wrangling 2 - Amount of time in hours between Conversations
f_log('Processing additional feature - amount of yours...', 1)
COL_HOURS = ['distinct_id', 'conversationid', 'conversationstartedatconsta']
DF_AMOUNT_TIME = DF_CONVERSATION_START.copy()
DF_AMOUNT_TIME = DF_AMOUNT_TIME[COL_HOURS]
DF_AMOUNT_TIME['conversationstartedatconsta'] = DF_AMOUNT_TIME['conversationstartedatconsta'].apply(f_convert_str_to_datetime)
DF_AMOUNT_TIME = DF_AMOUNT_TIME.sort_values(by=['distinct_id', 'conversationstartedatconsta'], ascending=True)
TIME_LIST = []
DELTA_LIST = []
V_OLD_USER = ''
V_OLD_DATETIME = ''
for index, row in DF_AMOUNT_TIME.iterrows():
v_current_user = row['distinct_id']
v_current_datetime = row['conversationstartedatconsta']
if index == 0:
V_OLD_USER = row['distinct_id']
V_OLD_DATETIME = row['conversationstartedatconsta']
v_hour = 0
v_delta = 0
else:
if V_OLD_USER != v_current_user:
v_hour = 0
v_delta = 0
V_OLD_DATETIME = v_current_datetime
V_OLD_USER = v_current_user
else:
v_hour = delta_days_hours(v_current_datetime - V_OLD_DATETIME)
v_delta = v_current_datetime - V_OLD_DATETIME
V_OLD_DATETIME = v_current_datetime
V_OLD_USER = v_current_user
TIME_LIST.append(v_hour)
DELTA_LIST.append(v_delta)
DF_AMOUNT_TIME['hours_since_last'] = TIME_LIST
DF_AMOUNT_TIME['delta_hours_since_last'] = DELTA_LIST
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_GROUP, left_on=['distinct_id'],
right_on=['distinct_id'], how='left')
COLUMNS_DELTA_HOURS = ['distinct_id', 'conversationid', 'hours_since_last']
DF_AMOUNT_TIME = DF_AMOUNT_TIME[COLUMNS_DELTA_HOURS]
DF_ALL_EVENTS = DF_ALL_EVENTS.merge(DF_AMOUNT_TIME, left_on=['distinct_id', 'conversationid'],
right_on=['distinct_id', 'conversationid'], how='left')
### --------------------------------------------------------------
## Export the results to CSV - final output
V_EXPORT_FILENAME = V_DATA_DIR + V_DT_MIXPANEL_STR + '-' + V_EXPORT_CSV
f_log('Export results to csv (Linux storage): ' + V_EXPORT_FILENAME, 1)
DF_ALL_EVENTS.to_csv(V_EXPORT_FILENAME, index=False)
### --------------------------------------------------------------
### GCP EXECUTION
if sys.argv[1] == 'gcp':
try:
f_log('GCP EXECUTION ... ',1)
# TODO: Setup GCP service account and GS BUCKET to run the command gsutil cp ...
## AND UNCOMMENT GCP LINES BELOW
f_log('GCP : exporting results (gs bucket):' + V_GCS_BUCKET + '/' + V_EXPORT_FILENAME, 1)
# GSUTIL_MSG = 'gsutil cp ' + V_EXPORT_FILENAME + ' ' + V_GCS_BUCKET
# f_log(GSUTIL_MSG, 1)
# CP_GCS = prc.check_output(GSUTIL_MSG, shell=True)
# print(CP_GCS)
except:
f_log('EXCEPTION : problem to save file to google cloud storage')
### --------------------------------------------------------------
### AWS EXECUTION
if sys.argv[1] == 'aws':
f_log('AWS EXECUTION : access key (credentials local file) ', 1)
SHORT_FILENAME = V_DT_MIXPANEL_STR + '-' + V_EXPORT_CSV
## TODO: setup aws credentials to run the upload file using boto3
### AND UNCOMMENT AWS LINES BELOW
print(V_AWS_S3 + '/datapipeline_tmp/')
f_log('Exporting results to AWS S3 : ' + V_AWS_S3 + '/datapipeline_tmp/' + SHORT_FILENAME, 1)
# S3_FILE_UPLOAD = boto3.client('s3', region_name='us-east-1')
# ## dir_bucket = 'datapipeline_tmp'
# S3_FILE_UPLOAD.upload_file(V_EXPORT_FILENAME,
# AWS_BUCKET,
# 'datapipeline_tmp/{}'.format(SHORT_FILENAME))
### --------------------------------------------------------------
### Cleanup process
###
f_log('Cleanup process in days... ' + V_CLEANUP_DAYS, 1)
if not os.name == 'nt':
f_cleanup_process(V_CLEANUP_DAYS, V_LOG_DIR)
f_cleanup_process(V_CLEANUP_DAYS, V_DATA_DIR)
f_cleanup_process(V_CLEANUP_DAYS, V_JSON_DIR)
f_log('Cleanup done in Linux ', 1)
f_log('-------------------- Process end -------------------------- ', 1)
### -------------------------------------------------------------- Close file
if V_BOL_LOG_FILE:
F.close()
V_BOL_LOG_FILE = False
print('print - Process End')
| 42.987784 | 126 | 0.631942 | 3,248 | 24,632 | 4.462746 | 0.142549 | 0.018489 | 0.036426 | 0.01435 | 0.306657 | 0.223801 | 0.186133 | 0.147637 | 0.115005 | 0.088789 | 0 | 0.011553 | 0.216345 | 24,632 | 572 | 127 | 43.062937 | 0.739367 | 0.266767 | 0 | 0.191977 | 1 | 0 | 0.249062 | 0.06478 | 0 | 0 | 0 | 0.001748 | 0 | 1 | 0.017192 | false | 0 | 0.020057 | 0 | 0.048711 | 0.028653 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
213e1a7b560720288d3f4be6cc46d4f9de5f1850 | 863 | py | Python | backend/home/migrations/0003_auto_20220225_1628.py | crowdbotics-apps/myclub-33848 | 3f02f80851419194beb68f6b0caf7630ff10593f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0003_auto_20220225_1628.py | crowdbotics-apps/myclub-33848 | 3f02f80851419194beb68f6b0caf7630ff10593f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0003_auto_20220225_1628.py | crowdbotics-apps/myclub-33848 | 3f02f80851419194beb68f6b0caf7630ff10593f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | # Generated by Django 2.2.26 on 2022-02-25 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_listecategorie_utilisateurs"),
]
operations = [
migrations.CreateModel(
name="Listerole",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("nom", models.CharField(max_length=16)),
],
),
migrations.AddField(
model_name="utilisateurs",
name="photo",
field=models.URLField(blank=True, null=True),
),
]
| 25.382353 | 57 | 0.453071 | 67 | 863 | 5.731343 | 0.731343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045929 | 0.444959 | 863 | 33 | 58 | 26.151515 | 0.755741 | 0.053302 | 0 | 0.148148 | 1 | 0 | 0.084663 | 0.039264 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21494c69ffef8b679fa92b163bcdf892d0556e01 | 466 | py | Python | application/__init__.py | thec0sm0s/Quick-Notes | 09940a1dc7780b16fadb1e43d7734b101dd989de | [
"MIT"
] | 1 | 2020-10-18T02:34:26.000Z | 2020-10-18T02:34:26.000Z | application/__init__.py | thec0sm0s/Quick-Notes | 09940a1dc7780b16fadb1e43d7734b101dd989de | [
"MIT"
] | 8 | 2020-09-28T10:01:31.000Z | 2020-10-12T04:51:25.000Z | application/__init__.py | thec0sm0s/cosnote | 09940a1dc7780b16fadb1e43d7734b101dd989de | [
"MIT"
] | 4 | 2020-09-28T11:47:27.000Z | 2020-10-12T06:54:06.000Z | from flask import Flask
from flask_cors import CORS
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
def get_app(configs=None):
from . import api
from . import resource
app = Flask(__name__)
app.config.from_object(configs)
_cors = CORS(app, supports_credentials=True, origins=["http://localhost:3000", ])
bcrypt.init_app(app)
resource.initialize_mongo_connection(configs)
app.register_blueprint(api.blueprint)
return app
| 20.26087 | 85 | 0.729614 | 61 | 466 | 5.344262 | 0.47541 | 0.082822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010471 | 0.180258 | 466 | 22 | 86 | 21.181818 | 0.842932 | 0 | 0 | 0 | 0 | 0 | 0.045064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.357143 | 0 | 0.5 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
2156d626cca5172c098911f8f87121b702a8db66 | 1,455 | py | Python | api/routes/user_router.py | cgiroux86/TeamInterview | c59285479298f84543f80bb9f00a01bc4cef3eb1 | [
"MIT"
] | 1 | 2020-08-31T16:19:11.000Z | 2020-08-31T16:19:11.000Z | api/routes/user_router.py | cgiroux86/TeamInterview | c59285479298f84543f80bb9f00a01bc4cef3eb1 | [
"MIT"
] | null | null | null | api/routes/user_router.py | cgiroux86/TeamInterview | c59285479298f84543f80bb9f00a01bc4cef3eb1 | [
"MIT"
] | null | null | null | from flask import Blueprint, request, jsonify
from api.models.user_model import User, UserPasswords, db
from flask_bcrypt import Bcrypt
user_bp = Blueprint('user_bp', __name__)
def validate_register_fields(req):
data = req.get_json(silent=True)
fields = ['first_name', 'last_name', 'email', 'password']
for f in fields:
if f not in data:
return False
return True
@user_bp.route('/test', methods=['POST'])
def test():
return f'{validate_register_fields(request)}'
@user_bp.route('/register', methods=['POST'])
def register():
if validate_register_fields(request):
data = request.get_json(silent=True)
first_name = data['first_name']
last_name = data['last_name']
email = data['email']
digest = data['password']
user = User(
first_name=first_name,
last_name=last_name,
email=email,
)
try:
db.session.add(user)
db.session.commit()
except Exception as e:
return(str(e))
try:
user_pw = UserPasswords(
user_id=user.id,
digest=digest
)
db.session.add(user_pw)
db.session.commit()
return jsonify(user.serialize()), 201
except Exception as e:
return str(e)
else:
return jsonify(error='missing required fields with a hot reload!'), 400
| 26.944444 | 79 | 0.586942 | 175 | 1,455 | 4.702857 | 0.36 | 0.054678 | 0.058323 | 0.061968 | 0.068044 | 0.068044 | 0.068044 | 0 | 0 | 0 | 0 | 0.005929 | 0.304467 | 1,455 | 53 | 80 | 27.45283 | 0.807312 | 0 | 0 | 0.136364 | 0 | 0 | 0.116838 | 0.024055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0.090909 | 0.068182 | 0.022727 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
215b643496a981463dbb6c2efc37c1fc0cfbec6d | 132,394 | py | Python | pywikibot/page.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | pywikibot/page.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | pywikibot/page.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Objects representing various types of MediaWiki pages.
"""
#
# (C) Pywikipedia bot team, 2008-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 215b643496a981463dbb6c2efc37c1fc0cfbec6d $'
import pywikibot
from pywikibot import deprecate_arg
from pywikibot import deprecated
from pywikibot import config
import pywikibot.site
import htmlentitydefs
import logging
import re
import threading
import unicodedata
import urllib
logger = logging.getLogger("pywiki.wiki.page")
# Pre-compile re expressions
reNamespace = re.compile("^(.+?) *: *(.*)$")
# Note: Link objects (defined later on) represent a wiki-page's title, while
# Page objects (defined here) represent the page itself, including its contents.
class Page(object):
"""Page: A MediaWiki page
This object only implements internally methods that do not require
reading from or writing to the wiki. All other methods are delegated
to the Site object.
"""
@deprecate_arg("insite", None)
@deprecate_arg("defaultNamespace", "ns")
def __init__(self, source, title=u"", ns=0):
"""Instantiate a Page object.
Three calling formats are supported:
- If the first argument is a Page, create a copy of that object.
This can be used to convert an existing Page into a subclass
object, such as Category or ImagePage. (If the title is also
given as the second argument, creates a copy with that title;
this is used when pages are moved.)
- If the first argument is a Site, create a Page on that Site
using the second argument as the title (may include a section),
and the third as the namespace number. The namespace number is
mandatory, even if the title includes the namespace prefix. This
is the preferred syntax when using an already-normalized title
obtained from api.php or a database dump. WARNING: may produce
invalid objects if page title isn't in normal form!
- If the first argument is a Link, create a Page from that link.
This is the preferred syntax when using a title scraped from
wikitext, URLs, or another non-normalized source.
@param source: the source of the page
@type source: Link, Page (or subclass), or Site
@param title: normalized title of the page; required if source is a
Site, ignored otherwise
@type title: unicode
@param ns: namespace number; required if source is a Site, ignored
otherwise
@type ns: int
"""
if isinstance(source, pywikibot.site.BaseSite):
self._link = Link(title, source=source, defaultNamespace=ns)
self._revisions = {}
elif isinstance(source, Page):
# copy all of source's attributes to this object
self.__dict__ = source.__dict__
if title:
# overwrite title
self._link = Link(title, source=source.site, defaultNamespace=ns)
elif isinstance(source, Link):
self._link = source
self._revisions = {}
else:
raise pywikibot.Error(
"Invalid argument type '%s' in Page constructor: %s"
% (type(source), source))
@property
def site(self):
"""Return the Site object for the wiki on which this Page resides."""
return self._link.site
@property
def image_repository(self):
"""Return the Site object for the image repository."""
return self.site.image_repository()
@property
def data_repository(self):
"""Return the Site object for the data repository."""
return self.site.data_repository()
def namespace(self):
"""Return the number of the namespace of the page.
"""
return self._link.namespace
@deprecate_arg("decode", None)
@deprecate_arg("savetitle", "asUrl")
def title(self, underscore=False, savetitle=False, withNamespace=True,
withSection=True, asUrl=False, asLink=False,
allowInterwiki=True, forceInterwiki=False, textlink=False,
as_filename=False):
"""Return the title of this Page, as a Unicode string.
@param underscore: if true, replace all ' ' characters with '_'
@param withNamespace: if false, omit the namespace prefix
@param withSection: if false, omit the section
@param asUrl: if true, quote title as if in an URL
@param asLink: if true, return the title in the form of a wikilink
@param allowInterwiki: (only used if asLink is true) if true, format
the link as an interwiki link if necessary
@param forceInterwiki: (only used if asLink is true) if true, always
format the link as an interwiki link
@param textlink: (only used if asLink is true) if true, place a ':'
before Category: and Image: links
@param as_filename: if true, replace any characters that are unsafe
in filenames
"""
title = self._link.canonical_title()
if withSection and self._link.section:
title = title + "#" + self._link.section
if asLink:
if forceInterwiki or (allowInterwiki and
(self.site.family.name != config.family
or self.site.code != config.mylang)):
if self.site.family.name != config.family \
and self.site.family.name != self.site.code:
return u'[[%s:%s:%s]]' % (self.site.family.name,
self.site.code,
title)
else:
# use this form for sites like commons, where the
# code is the same as the family name
return u'[[%s:%s]]' % (self.site.code,
title)
elif textlink and (self.isImage() or self.isCategory()):
return u'[[:%s]]' % title
else:
return u'[[%s]]' % title
if not withNamespace and self.namespace() != 0:
title = self._link.title
if withSection and self._link.section:
title = title + "#" + self._link.section
if underscore or asUrl:
title = title.replace(u' ', u'_')
if asUrl:
encodedTitle = title.encode(self.site.encoding())
title = urllib.quote(encodedTitle)
if as_filename:
# Replace characters that are not possible in file names on some
# systems.
# Spaces are possible on most systems, but are bad for URLs.
for forbidden in ':*?/\\ ':
title = title.replace(forbidden, '_')
return title
@deprecate_arg("decode", None)
@deprecate_arg("underscore", None)
def section(self):
"""Return the name of the section this Page refers to.
The section is the part of the title following a '#' character, if
any. If no section is present, return None.
"""
return self._link.section
def __str__(self):
"""Return a console representation of the pagelink."""
return self.title(asLink=True, forceInterwiki=True
).encode(config.console_encoding,
"xmlcharrefreplace")
def __unicode__(self):
return self.title(asLink=True, forceInterwiki=True)
def __repr__(self):
"""Return a more complete string representation."""
return "%s(%s)" % (self.__class__.__name__,
self.title().encode(config.console_encoding))
def __cmp__(self, other):
"""Test for equality and inequality of Page objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by namespace first, then by title.
"""
if not isinstance(other, Page):
# especially, return -1 if other is None
return -1
if self.site != other.site:
return cmp(self.site, other.site)
if self.namespace() != other.namespace():
return cmp(self.namespace(), other.namespace())
return cmp(self._link.title, other._link.title)
def __hash__(self):
# Pseudo method that makes it possible to store Page objects as keys
# in hash-tables. This relies on the fact that the string
# representation of an instance can not change after the construction.
return hash(unicode(self))
def autoFormat(self):
"""Return L{date.autoFormat} dictName and value, if any.
Value can be a year, date, etc., and dictName is 'YearBC',
'Year_December', or another dictionary name. Please note that two
entries may have exactly the same autoFormat, but be in two
different namespaces, as some sites have categories with the
same names. Regular titles return (None, None).
"""
if not hasattr(self, '_autoFormat'):
from pywikibot import date
self._autoFormat = date.getAutoFormat(
self.site.code,
self.title(withNamespace=False)
)
return self._autoFormat
def isAutoTitle(self):
"""Return True if title of this Page is in the autoFormat dictionary."""
return self.autoFormat()[0] is not None
@deprecate_arg("throttle", None)
@deprecate_arg("change_edit_time", None)
def get(self, force=False, get_redirect=False, sysop=False):
"""Return the wiki-text of the page.
This will retrieve the page from the server if it has not been
retrieved yet, or if force is True. This can raise the following
exceptions that should be caught by the calling code:
@exception NoPage The page does not exist
@exception IsRedirectPage The page is a redirect. The argument of the
exception is the title of the page it
redirects to.
@exception SectionError The section does not exist on a page with
a # link
@param force reload all page attributes, including errors.
@param get_redirect return the redirect text, do not follow the
redirect, do not raise an exception.
@param sysop if the user has a sysop account, use it to
retrieve this page
"""
if force:
# When forcing, we retry the page no matter what:
# * Old exceptions do not apply any more
# * Deleting _revid to force reload
# * Deleting _redirtarget, that info is now obsolete.
for attr in ['_redirtarget', '_getexception', '_revid']:
if hasattr(self, attr):
delattr(self, attr)
try:
self._getInternals(sysop)
except pywikibot.IsRedirectPage:
if not get_redirect:
raise
return self._revisions[self._revid].text
def _getInternals(self, sysop):
"""Helper function for get().
Stores latest revision in self if it doesn't contain it, doesn't think.
* Raises exceptions from previous runs.
* Stores new exceptions in _getexception and raises them.
"""
# Raise exceptions from previous runs
if hasattr(self, '_getexception'):
raise self._getexception
# If not already stored, fetch revision
if not hasattr(self, "_revid") \
or not self._revid in self._revisions \
or self._revisions[self._revid].text is None:
try:
self.site.loadrevisions(self, getText=True, sysop=sysop)
except (pywikibot.NoPage, pywikibot.SectionError), e:
self._getexception = e
raise
# self._isredir is set by loadrevisions
if self._isredir:
self._getexception = pywikibot.IsRedirectPage(self)
raise self._getexception
@deprecate_arg("throttle", None)
@deprecate_arg("change_edit_time", None)
def getOldVersion(self, oldid, force=False, get_redirect=False,
sysop=False):
"""Return text of an old revision of this page; same options as get().
@param oldid: The revid of the revision desired.
"""
if force or not oldid in self._revisions \
or self._revisions[oldid].text is None:
self.site.loadrevisions(self, getText=True, revids=oldid,
sysop=sysop)
# TODO: what about redirects, errors?
return self._revisions[oldid].text
def permalink(self, oldid=None):
"""Return the permalink URL of an old revision of this page.
@param oldid: The revid of the revision desired.
"""
return "//%s%s/index.php?title=%s&oldid=%s" \
% (self.site.hostname(),
self.site.scriptpath(),
self.title(asUrl=True),
(oldid if oldid is not None else self.latestRevision()))
def latestRevision(self):
"""Return the current revision id for this page."""
if not hasattr(self, '_revid'):
self.site.loadrevisions(self)
return self._revid
@property
def text(self):
"""Return the current (edited) wikitext, loading it if necessary."""
if not hasattr(self, '_text') or self._text is None:
try:
self._text = self.get(get_redirect=True)
except pywikibot.NoPage:
# TODO: what other exceptions might be returned?
self._text = u""
return self._text
@text.setter
def text(self, value):
"""Update the edited wikitext"""
self._text = None if value is None else unicode(value)
@text.deleter
def text(self):
"""Delete the edited wikitext"""
if hasattr(self, "_text"):
del self._text
def expand_text(self, refresh=False):
"""Return the page text with all templates expanded."""
if not hasattr(self, "_expanded_text") or (self._expanded_text is None) or refresh:
req = pywikibot.data.api.Request(action="expandtemplates",
text=self.text,
title=self.title(withSection=False),
site=self.site)
self._expanded_text = req.submit()["expandtemplates"]["*"]
return self._expanded_text
def userName(self):
"""Return name or IP address of last user to edit page.
"""
rev = self.latestRevision()
if rev not in self._revisions:
self.site.loadrevisions(self)
return self._revisions[rev].user
def isIpEdit(self):
"""Return True if last editor was unregistered.
"""
rev = self.latestRevision()
if rev not in self._revisions:
self.site.loadrevisions(self)
return self._revisions[rev].anon
def editTime(self):
"""Return timestamp of last revision to page.
"""
rev = self.latestRevision()
if rev not in self._revisions:
self.site.loadrevisions(self)
return self._revisions[rev].timestamp
def previousRevision(self):
"""Return the revision id for the previous revision of this Page."""
vh = self.getVersionHistory(total=2)
revkey = sorted(self._revisions, reverse=True)[1]
return revkey
def exists(self):
"""Return True if page exists on the wiki, even if it's a redirect.
If the title includes a section, return False if this section isn't
found.
"""
return self.site.page_exists(self)
def isRedirectPage(self):
"""Return True if this is a redirect, False if not or not existing."""
return self.site.page_isredirect(self)
def isStaticRedirect(self, force=False):
"""Return True if this is a redirect containing the magic word
__STATICREDIRECT__, False if not or not existing.
"""
found = False
if self.isRedirectPage():
staticKeys = self.site.getmagicwords('staticredirect')
text = self.get(get_redirect=True, force=force)
if staticKeys:
for key in staticKeys:
if key in text:
found = True
break
return found
def isCategoryRedirect(self):
"""Return True if this is a category redirect page, False otherwise."""
if not self.isCategory():
return False
if not hasattr(self, "_catredirect"):
catredirs = self.site.category_redirects()
for (template, args) in self.templatesWithParams():
if template.title(withNamespace=False) in catredirs:
# Get target (first template argument)
try:
self._catredirect = self.site.namespace(14) \
+ ":" + args[0].strip()
break
except IndexError:
pywikibot.warning(
u"No target for category redirect on %s"
% self.title(asLink=True))
self._catredirect = False
break
else:
self._catredirect = False
return bool(self._catredirect)
def getCategoryRedirectTarget(self):
"""If this is a category redirect, return the target category title."""
if self.isCategoryRedirect():
return Category(Link(self._catredirect, self.site))
raise pywikibot.IsNotRedirectPage(self.title())
def isEmpty(self):
"""Return True if the page text has less than 4 characters.
Character count ignores language links and category links.
Can raise the same exceptions as get().
"""
txt = self.get()
txt = pywikibot.removeLanguageLinks(txt, site=self.site)
txt = pywikibot.removeCategoryLinks(txt, site=self.site)
if len(txt) < 4:
return True
else:
return False
def isTalkPage(self):
"""Return True if this page is in any talk namespace."""
ns = self.namespace()
return ns >= 0 and ns % 2 == 1
def toggleTalkPage(self):
"""Return other member of the article-talk page pair for this Page.
If self is a talk page, returns the associated content page;
otherwise, returns the associated talk page. The returned page need
not actually exist on the wiki.
Returns None if self is a special page.
"""
ns = self.namespace()
if ns < 0: # Special page
return None
if self.isTalkPage():
if self.namespace() == 1:
return Page(self.site, self.title(withNamespace=False))
else:
return Page(self.site,
self.site.namespace(ns - 1) + ':'
+ self.title(withNamespace=False))
else:
return Page(self.site,
self.site.namespace(ns + 1) + ':'
+ self.title(withNamespace=False))
def isCategory(self):
"""Return True if the page is a Category, False otherwise."""
return self.namespace() == 14
def isImage(self):
"""Return True if this is an image description page, False otherwise."""
return self.namespace() == 6
def isDisambig(self, get_Index=True):
"""Return True if this is a disambiguation page, False otherwise.
Relies on the presence of specific templates, identified in
the Family file or on a wiki page, to identify disambiguation
pages.
By default, loads a list of template names from the Family file;
if the value in the Family file is None no entry was made, looks for
the list on [[MediaWiki:Disambiguationspage]]. If this page does not
exist, take the mediawiki message.
If get_Index is True then also load the templates for index articles
which are given on en-wiki
Template:Disambig is always assumed to be default, and will be
appended regardless of its existence.
"""
if not hasattr(self.site, "_disambigtemplates"):
try:
default = set(self.site.family.disambig('_default'))
except KeyError:
default = set([u'Disambig'])
try:
distl = self.site.family.disambig(self.site.code,
fallback=False)
except KeyError:
distl = None
if distl is None:
disambigpages = Page(self.site,
"MediaWiki:Disambiguationspage")
indexes = set()
if disambigpages.exists():
disambigs = set(link.title(withNamespace=False)
for link in disambigpages.linkedPages()
if link.namespace() == 10)
# cache index article templates separately
if self.site.sitename() == 'wikipedia:en':
regex = re.compile('\(\((.+?)\)\)')
content = disambigpages.get()
for index in regex.findall(content):
indexes.add(index[:1].upper() + index[1:])
self.site._indextemplates = indexes
else:
message = self.site.mediawiki_message(
'disambiguationspage').split(':', 1)[1]
# add the default template(s) for default mw message
# only
disambigs = set([message[:1].upper() +
message[1:]]) | default
self.site._disambigtemplates = disambigs
else:
# Normalize template capitalization
self.site._disambigtemplates = set(
t[:1].upper() + t[1:] for t in distl
)
templates = set(tl.title(withNamespace=False)
for tl in self.templates())
disambigs = set()
# always use cached disambig templates
disambigs.update(self.site._disambigtemplates)
# if get_Index is True, also use cached index templates
if get_Index and hasattr(self.site, '_indextemplates'):
disambigs.update(self.site._indextemplates)
# see if any template on this page is in the set of disambigs
disambigInPage = disambigs.intersection(templates)
return self.namespace() != 10 and len(disambigInPage) > 0
def getReferences(self, follow_redirects=True, withTemplateInclusion=True,
onlyTemplateInclusion=False, redirectsOnly=False,
namespaces=None, step=None, total=None, content=False):
"""Return an iterator all pages that refer to or embed the page.
If you need a full list of referring pages, use
C{pages = list(s.getReferences())}
@param follow_redirects: if True, also iterate pages that link to a
redirect pointing to the page.
@param withTemplateInclusion: if True, also iterate pages where self
is used as a template.
@param onlyTemplateInclusion: if True, only iterate pages where self
is used as a template.
@param redirectsOnly: if True, only iterate redirects to self.
@param namespaces: only iterate pages in these namespaces
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each referring page (default False)
"""
# N.B.: this method intentionally overlaps with backlinks() and
# embeddedin(). Depending on the interface, it may be more efficient
# to implement those methods in the site interface and then combine
# the results for this method, or to implement this method and then
# split up the results for the others.
return self.site.pagereferences(
self,
followRedirects=follow_redirects,
filterRedirects=redirectsOnly,
withTemplateInclusion=withTemplateInclusion,
onlyTemplateInclusion=onlyTemplateInclusion,
namespaces=namespaces, step=step,
total=total, content=content)
def backlinks(self, followRedirects=True, filterRedirects=None,
namespaces=None, step=None, total=None, content=False):
"""Return an iterator for pages that link to this page.
@param followRedirects: if True, also iterate pages that link to a
redirect pointing to the page.
@param filterRedirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
@param namespaces: only iterate pages in these namespaces
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each referring page (default False)
"""
return self.site.pagebacklinks(self,
followRedirects=followRedirects,
filterRedirects=filterRedirects,
namespaces=namespaces, step=step,
total=total, content=content)
def embeddedin(self, filter_redirects=None, namespaces=None, step=None,
total=None, content=False):
"""Return an iterator for pages that embed this page as a template.
@param filterRedirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
@param namespaces: only iterate pages in these namespaces
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each embedding page (default False)
"""
return self.site.page_embeddedin(self,
filterRedirects=filter_redirects,
namespaces=namespaces,
step=step, total=total,
content=content)
def canBeEdited(self):
"""Return bool indicating whether this page can be edited.
This returns True if and only if:
- page is unprotected, and bot has an account for this site, or
- page is protected, and bot has a sysop account for this site.
"""
return self.site.page_can_be_edited(self)
def botMayEdit(self):
"""Return True if this page allows bots to edit it.
This will be True if the page doesn't contain {{bots}} or
{{nobots}}, or it contains them and the active bot is allowed to
edit this page. (This method is only useful on those sites that
recognize the bot-exclusion protocol; on other sites, it will always
return True.)
The framework enforces this restriction by default. It is possible
to override this by setting ignore_bot_templates=True in
user-config.py, or using page.put(force=True).
"""
# TODO: move this to Site object?
if config.ignore_bot_templates: #Check the "master ignore switch"
return True
username = self.site.user()
try:
templates = self.templatesWithParams()
except (pywikibot.NoPage,
pywikibot.IsRedirectPage,
pywikibot.SectionError):
return True
# go through all templates and look for any restriction
# multiple bots/nobots templates are allowed
for template in templates:
title = template[0].title(withNamespace=False)
if title == 'Nobots':
if len(template[1]) == 0:
return False
else:
bots = template[1][0].split(',')
if 'all' in bots or calledModuleName() in bots \
or username in bots:
return False
elif title == 'Bots':
if len(template[1]) == 0:
return True
else:
(ttype, bots) = template[1][0].split('=', 1)
bots = bots.split(',')
if ttype == 'allow':
return 'all' in bots or username in bots
if ttype == 'deny':
return not ('all' in bots or username in bots)
if ttype == 'allowscript':
return 'all' in bots or calledModuleName() in bots
if ttype == 'denyscript':
return not ('all' in bots or calledModuleName() in bots)
# no restricting template found
return True
def save(self, comment=None, watch=None, minor=True, botflag=None,
force=False, async=False, callback=None, **kwargs):
"""Save the current contents of page's text to the wiki.
@param comment: The edit summary for the modification (optional, but
most wikis strongly encourage its use)
@type comment: unicode
@param watch: if True, add or if False, remove this Page to/from bot
user's watchlist; if None (default), follow bot account's default
settings
@type watch: bool or None
@param minor: if True, mark this edit as minor
@type minor: bool
@param botflag: if True, mark this edit as made by a bot (default:
True if user has bot status, False if not)
@param force: if True, ignore botMayEdit() setting
@type force: bool
@param async: if True, launch a separate thread to save
asynchronously
@param callback: a callable object that will be called after the
page put operation. This object must take two arguments: (1) a
Page object, and (2) an exception instance, which will be None
if the page was saved successfully. The callback is intended for
use by bots that need to keep track of which saves were
successful.
"""
if not comment:
comment = config.default_edit_summary
if watch is None:
watchval = None
elif watch:
watchval = "watch"
else:
watchval = "unwatch"
if not force and not self.botMayEdit():
raise pywikibot.PageNotSaved(
"Page %s not saved; editing restricted by {{bots}} template"
% self.title(asLink=True))
if botflag is None:
botflag = ("bot" in self.site.userinfo["rights"])
if async:
pywikibot.async_request(self._save, comment=comment, minor=minor,
watchval=watchval, botflag=botflag,
async=async, callback=callback, **kwargs)
else:
self._save(comment=comment, minor=minor, watchval=watchval,
botflag=botflag, async=async, callback=callback,
**kwargs)
def _save(self, comment, minor, watchval, botflag, async, callback, **kwargs):
err = None
link = self.title(asLink=True)
if config.cosmetic_changes:
comment = self._cosmetic_changes_hook(comment) or comment
try:
done = self.site.editpage(self, summary=comment, minor=minor,
watch=watchval, bot=botflag, **kwargs)
if not done:
pywikibot.warning(u"Page %s not saved" % link)
raise pywikibot.PageNotSaved(link)
else:
pywikibot.output(u"Page %s saved" % link)
except pywikibot.LockedPage, err:
# re-raise the LockedPage exception so that calling program
# can re-try if appropriate
if not callback and not async:
raise
# TODO: other "expected" error types to catch?
except pywikibot.Error, err:
pywikibot.log(u"Error saving page %s (%s)\n" % (link, err),
exc_info=True)
if not callback and not async:
raise pywikibot.PageNotSaved("%s: %s" %(link, err))
if callback:
callback(self, err)
def _cosmetic_changes_hook(self, comment):
if self.isTalkPage() or \
pywikibot.calledModuleName() in config.cosmetic_changes_deny_script:
return
family = self.site.family.name
config.cosmetic_changes_disable.update({'wikidata': ('repo', )})
if config.cosmetic_changes_mylang_only:
cc = (family == config.family and \
self.site.lang == config.mylang) or \
family in config.cosmetic_changes_enable.keys() and \
self.site.lang in config.cosmetic_changes_enable[family]
else:
cc = True
cc = cc and not \
(family in config.cosmetic_changes_disable.keys() and \
self.site.lang in config.cosmetic_changes_disable[family])
if not cc:
return
old = self.text
pywikibot.log(u'Cosmetic changes for %s-%s enabled.'
% (family, self.site.lang))
from scripts.cosmetic_changes import CosmeticChangesToolkit
from pywikibot import i18n
ccToolkit = CosmeticChangesToolkit(self.site,
redirect=self.isRedirectPage(),
namespace=self.namespace(),
pageTitle=self.title())
self.text = ccToolkit.change(old)
if comment and \
old.strip().replace('\r\n', '\n') != self.text.strip().replace('\r\n', '\n'):
comment += i18n.twtranslate(self.site, 'cosmetic_changes-append')
return comment
def put(self, newtext, comment=u'', watchArticle=None, minorEdit=True,
botflag=None, force=False, async=False, callback=None, **kwargs):
"""Save the page with the contents of the first argument as the text.
This method is maintained primarily for backwards-compatibility.
For new code, using Page.save() is preferred. See save() method
docs for all parameters not listed here.
@param newtext: The complete text of the revised page.
@type newtext: unicode
"""
self.text = newtext
return self.save(comment=comment, watch=watchArticle,
minor=minorEdit, botflag=botflag, force=force,
async=async, callback=callback, **kwargs)
def put_async(self, newtext, comment=u'', watchArticle=None,
minorEdit=True, botflag=None, force=False, callback=None,
**kwargs):
"""Put page on queue to be saved to wiki asynchronously.
Asynchronous version of put (takes the same arguments), which places
pages on a queue to be saved by a daemon thread. All arguments are
the same as for .put(). This version is maintained solely for
backwards-compatibility.
"""
return self.put(newtext, comment=comment, watchArticle=watchArticle,
minorEdit=minorEdit, botflag=botflag, force=force,
async=True, callback=callback, **kwargs)
def watch(self, unwatch=False):
"""Add or remove this page to/from bot account's watchlist.
@param unwatch: True to unwatch, False (default) to watch.
@return: True if successful, False otherwise.
"""
return self.site.watchpage(self, unwatch)
def linkedPages(self, namespaces=None, step=None, total=None,
content=False):
"""Iterate Pages that this Page links to.
Only returns pages from "normal" internal links. Image and category
links are omitted unless prefixed with ":". Embedded templates are
omitted (but links within them are returned). All interwiki and
external links are omitted.
@param namespaces: only iterate links in these namespaces
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each linked page (default False)
@return: a generator that yields Page objects.
"""
return self.site.pagelinks(self, namespaces=namespaces, step=step,
total=total, content=content)
def interwiki(self, expand=True):
"""Iterate interwiki links in the page text, excluding language links.
@param expand: if True (default), include interwiki links found in
templates transcluded onto this page; if False, only iterate
interwiki links found in this page's own wikitext
@return: a generator that yields Link objects
"""
# This function does not exist in the API, so it has to be
# implemented by screen-scraping
if expand:
text = self.expand_text()
else:
text = self.text
for linkmatch in pywikibot.link_regex.finditer(
pywikibot.removeDisabledParts(text)):
linktitle = linkmatch.group("title")
link = Link(linktitle, self.site)
# only yield links that are to a different site and that
# are not language links
try:
if link.site != self.site:
if linktitle.lstrip().startswith(":"):
# initial ":" indicates not a language link
yield link
elif link.site.family != self.site.family:
# link to a different family is not a language link
yield link
except pywikibot.Error:
# ignore any links with invalid contents
continue
def langlinks(self):
"""Return a list of all interlanguage Links on this page.
"""
# Data might have been preloaded
if not hasattr(self, '_langlinks'):
self._langlinks = list(self.iterlanglinks())
return self._langlinks
def iterlanglinks(self, step=None, total=None):
"""Iterate all interlanguage links on this page.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@return: a generator that yields Link objects.
"""
if hasattr(self, '_langlinks'):
return iter(self._langlinks)
# XXX We might want to fill _langlinks when the Site
# method is called. If we do this, we'll have to think
# about what will happen if the generator is not completely
# iterated upon.
return self.site.pagelanglinks(self, step=step, total=total)
def templates(self, content=False):
"""Return a list of Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
@param content: if True, retrieve the content of the current version
of each template (default False)
"""
# Data might have been preloaded
if not hasattr(self, '_templates'):
self._templates = list(self.itertemplates(content=content))
return self._templates
def itertemplates(self, step=None, total=None, content=False):
"""Iterate Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each template (default False)
"""
if hasattr(self, '_templates'):
return iter(self._templates)
return self.site.pagetemplates(self, step=step, total=total,
content=content)
@deprecate_arg("followRedirects", None)
@deprecate_arg("loose", None)
def imagelinks(self, step=None, total=None, content=False):
"""Iterate ImagePage objects for images displayed on this Page.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each image description page (default False)
@return: a generator that yields ImagePage objects.
"""
return self.site.pageimages(self, step=step, total=total,
content=content)
@deprecate_arg("get_redirect", None)
def templatesWithParams(self):
"""Iterate templates used on this Page.
@return: a generator that yields a tuple for each use of a template
in the page, with the template Page as the first entry and a list of
parameters as the second entry.
"""
# WARNING: may not return all templates used in particularly
# intricate cases such as template substitution
titles = list(t.title() for t in self.templates())
templates = pywikibot.extract_templates_and_params(self.text)
# backwards-compatibility: convert the dict returned as the second
# element into a list in the format used by old scripts
result = []
for template in templates:
link = pywikibot.Link(template[0], self.site,
defaultNamespace=10)
try:
if link.canonical_title() not in titles:
continue
except pywikibot.Error:
# this is a parser function or magic word, not template name
continue
args = template[1]
intkeys = {}
named = {}
positional = []
for key in sorted(args):
try:
intkeys[int(key)] = args[key]
except ValueError:
named[key] = args[key]
for i in xrange(1, len(intkeys)+1):
# only those args with consecutive integer keys can be
# treated as positional; an integer could also be used
# (out of order) as the key for a named argument
# example: {{tmp|one|two|5=five|three}}
if i in intkeys:
positional.append(intkeys[i])
else:
for k in intkeys:
if k < 1 or k >= i:
named[str(k)] = intkeys[k]
break
for name in named:
positional.append("%s=%s" % (name, named[name]))
result.append((pywikibot.Page(link, self.site), positional))
return result
@deprecate_arg("nofollow_redirects", None)
@deprecate_arg("get_redirect", None)
def categories(self, withSortKey=False, step=None, total=None,
content=False):
"""Iterate categories that the article is in.
@param withSortKey: if True, include the sort key in each Category.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each category description page (default False)
@return: a generator that yields Category objects.
"""
return self.site.pagecategories(self, withSortKey=withSortKey,
step=step, total=total, content=content)
def extlinks(self, step=None, total=None):
"""Iterate all external URLs (not interwiki links) from this page.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@return: a generator that yields unicode objects containing URLs.
"""
return self.site.page_extlinks(self, step=step, total=total)
def coordinates(self, primary_only=False):
"""Return a list of Coordinate objects for points
on the page using [[mw:Extension:GeoData]]
@param primary_only: Only return the coordinate indicated to be primary
@return: A list of Coordinate objects
"""
if not hasattr(self, '_coords'):
self.site.loadcoordinfo(self)
if primary_only:
return self._coords[0]
else:
return self._coords
def getRedirectTarget(self):
"""Return a Page object for the target this Page redirects to.
If this page is not a redirect page, will raise an IsNotRedirectPage
exception. This method also can raise a NoPage exception.
"""
return self.site.getredirtarget(self)
# BREAKING CHANGE: in old framework, default value for getVersionHistory
# returned no more than 500 revisions; now, it iterates
# all revisions unless 'total' argument is used
@deprecate_arg("forceReload", None)
@deprecate_arg("revCount", "total")
@deprecate_arg("getAll", None)
def getVersionHistory(self, reverseOrder=False, step=None,
total=None):
"""Load the version history page and return history information.
Return value is a list of tuples, where each tuple represents one
edit and is built of revision id, edit date/time, user name, and
edit summary. Starts with the most current revision, unless
reverseOrder is True.
@param step: limit each API call to this number of revisions
@param total: iterate no more than this number of revisions in total
"""
self.site.loadrevisions(self, getText=False, rvdir=reverseOrder,
step=step, total=total)
return [ ( self._revisions[rev].revid,
self._revisions[rev].timestamp,
self._revisions[rev].user,
self._revisions[rev].comment
) for rev in sorted(self._revisions,
reverse=not reverseOrder)
]
def getVersionHistoryTable(self, forceReload=False, reverseOrder=False,
step=None, total=None):
"""Return the version history as a wiki table."""
result = '{| class="wikitable"\n'
result += '! oldid || date/time || username || edit summary\n'
for oldid, time, username, summary \
in self.getVersionHistory(forceReload=forceReload,
reverseOrder=reverseOrder,
step=step, total=total):
result += '|----\n'
result += '| %s || %s || %s || <nowiki>%s</nowiki>\n'\
% (oldid, time, username, summary)
result += '|}\n'
return result
def fullVersionHistory(self, reverseOrder=False, step=None,
total=None):
"""Iterate previous versions including wikitext.
Takes same arguments as getVersionHistory.
@return: A generator that yields tuples consisting of revision ID,
edit date/time, user name and content
"""
self.site.loadrevisions(self, getText=True,
rvdir=reverseOrder,
step=step, total=total)
return [( self._revisions[rev].revid,
self._revisions[rev].timestamp,
self._revisions[rev].user,
self._revisions[rev].text
) for rev in sorted(self._revisions,
reverse=not reverseOrder)
]
def contributingUsers(self, step=None, total=None):
"""Return a set of usernames (or IPs) of users who edited this page.
@param step: limit each API call to this number of revisions
@param total: iterate no more than this number of revisions in total
"""
edits = self.getVersionHistory(step=step, total=total)
users = set([edit[2] for edit in edits])
return users
@deprecate_arg("throttle", None)
def move(self, newtitle, reason=None, movetalkpage=True, sysop=False,
deleteAndMove=False, safe=True):
"""Move this page to a new title.
@param newtitle: The new page title.
@param reason: The edit summary for the move.
@param movetalkpage: If true, move this page's talk page (if it exists)
@param sysop: Try to move using sysop account, if available
@param deleteAndMove: if move succeeds, delete the old page
(usually requires sysop privileges, depending on wiki settings)
@param safe: If false, attempt to delete existing page at newtitle
(if there is one) and then move this page to that title
"""
if reason is None:
pywikibot.output(u'Moving %s to [[%s]].'
% (self.title(asLink=True), newtitle))
reason = pywikibot.input(u'Please enter a reason for the move:')
# TODO: implement "safe" parameter (Is this necessary ?)
# TODO: implement "sysop" parameter
return self.site.movepage(self, newtitle, reason,
movetalk=movetalkpage,
noredirect=deleteAndMove)
@deprecate_arg("throttle", None)
def delete(self, reason=None, prompt=True, throttle=None, mark=False):
"""Deletes the page from the wiki. Requires administrator status.
@param reason: The edit summary for the deletion. If None, ask for it.
@param prompt: If true, prompt user for confirmation before deleting.
@param mark: if true, and user does not have sysop rights, place a
speedy-deletion request on the page instead.
"""
# TODO: add support for mark
if reason is None:
pywikibot.output(u'Deleting %s.' % (self.title(asLink=True)))
reason = pywikibot.input(u'Please enter a reason for the deletion:')
answer = u'y'
if prompt and not hasattr(self.site, '_noDeletePrompt'):
answer = pywikibot.inputChoice(u'Do you want to delete %s?'
% self.title(asLink = True, forceInterwiki = True),
['Yes', 'No', 'All'],
['Y', 'N', 'A'],
'N')
if answer in ['a', 'A']:
answer = 'y'
self.site._noDeletePrompt = True
if answer in ['y', 'Y']:
try:
return self.site.deletepage(self, reason)
except pywikibot.NoUsername, e:
if mark:
raise NotImplementedError(
"Marking pages for deletion is not yet available.")
raise e
# all these DeletedRevisions methods need to be reviewed and harmonized
# with the new framework; they do not appear functional
def loadDeletedRevisions(self, step=None, total=None):
"""Retrieve all deleted revisions for this Page from Special/Undelete.
Stores all revisions' timestamps, dates, editors and comments in
self._deletedRevs attribute.
@return: iterator of timestamps (which can be used to retrieve
revisions later on).
"""
if not hasattr(self, "_deletedRevs"):
self._deletedRevs = {}
for item in self.site.deletedrevs(self, step=step, total=total):
for rev in item.get("revisions", []):
self._deletedRevs[rev['timestamp']] = rev
yield rev['timestamp']
def getDeletedRevision(self, timestamp, retrieveText=False):
"""Return a particular deleted revision by timestamp.
@return: a list of [date, editor, comment, text, restoration
marker]. text will be None, unless retrieveText is True (or has
been retrieved earlier). If timestamp is not found, returns
None.
"""
if hasattr(self, "_deletedRevs"):
if timestamp in self._deletedRevs and (
(not retrieveText)
or "content" in self._deletedRevs["timestamp"]):
return self._deletedRevs["timestamp"]
for item in self.site.deletedrevs(self, start=timestamp,
get_text=retrieveText, total=1):
# should only be one item with one revision
if item['title'] == self.title:
if "revisions" in item:
return item["revisions"][0]
def markDeletedRevision(self, timestamp, undelete=True):
"""Mark the revision identified by timestamp for undeletion.
@param undelete: if False, mark the revision to remain deleted.
"""
if not hasattr(self, "_deletedRevs"):
self.loadDeletedRevisions()
if timestamp not in self._deletedRevs:
#TODO: Throw an exception?
return None
self._deletedRevs[timestamp][4] = undelete
self._deletedRevsModified = True
@deprecate_arg("throttle", None)
def undelete(self, comment=None):
"""Undelete revisions based on the markers set by previous calls.
If no calls have been made since loadDeletedRevisions(), everything
will be restored.
Simplest case:
Page(...).undelete('This will restore all revisions')
More complex:
pg = Page(...)
revs = pg.loadDeletedRevsions()
for rev in revs:
if ... #decide whether to undelete a revision
pg.markDeletedRevision(rev) #mark for undeletion
pg.undelete('This will restore only selected revisions.')
@param comment: The undeletion edit summary.
"""
if comment is None:
pywikibot.output(u'Preparing to undelete %s.'
% (self.title(asLink=True)))
comment = pywikibot.input(
u'Please enter a reason for the undeletion:')
return self.site.undelete(self, comment)
@deprecate_arg("throttle", None)
def protect(self, edit='sysop', move='sysop', unprotect=False,
reason=None, prompt=True):
"""(Un)protect a wiki page. Requires administrator status.
Valid protection levels (in MediaWiki 1.12) are '' (equivalent to
'none'), 'autoconfirmed', and 'sysop'.
@param edit: Level of edit protection
@param move: Level of move protection
@param unprotect: If true, unprotect the page (equivalent to setting
all protection levels to '')
@param reason: Edit summary.
@param prompt: If true, ask user for confirmation.
"""
if reason is None:
if unprotect:
un = u'un'
else:
un = u''
pywikibot.output(u'Preparing to %sprotect %s.'
% (un, self.title(asLink=True)))
reason = pywikibot.input(u'Please enter a reason for the action:')
if unprotect:
edit = move = ""
answer = 'y'
if prompt and not hasattr(self.site, '_noProtectPrompt'):
answer = pywikibot.inputChoice(
u'Do you want to change the protection level of %s?'
% self.title(asLink=True, forceInterwiki = True),
['Yes', 'No', 'All'], ['Y', 'N', 'A'], 'N')
if answer in ['a', 'A']:
answer = 'y'
self.site._noProtectPrompt = True
if answer in ['y', 'Y']:
return self.site.protect(self, edit, move, reason)
def change_category(self, oldCat, newCat, comment=None, sortKey=None,
inPlace=True):
"""Remove page from oldCat and add it to newCat.
oldCat and newCat should be Category objects.
If newCat is None, the category will be removed.
comment: string to use as an edit summary
sortKey: sortKey to use for the added category.
Unused if newCat is None, or if inPlace=True
"""
#TODO: is inPlace necessary?
site = self.site
changesMade = False
if not self.canBeEdited():
pywikibot.output(u"Can't edit %s, skipping it..."
% self.title(asLink=True))
return False
if inPlace == True:
newtext = pywikibot.replaceCategoryInPlace(self.text,
oldCat, newCat)
if newtext == self.text:
pywikibot.output(
u'No changes in made in page %s.'
% self.title(asLink=True))
return False
try:
self.put(newtext, comment)
return True
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict'
% self.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.output(u'Skipping locked page %s'
% self.title(asLink=True))
except pywikibot.SpamfilterError, error:
pywikibot.output(
u'Changing page %s blocked by spam filter (URL=%s)'
% (self.title(asLink=True), error.url))
except pywikibot.NoUsername:
pywikibot.output(
u"Page %s not saved; sysop privileges required."
% self.title(asLink=True))
except pywikibot.PageNotSaved, error:
pywikibot.output(u"Saving page %s failed: %s"
% (self.title(asLink=True), error.message))
return False
# This loop will replace all occurrences of the category to be changed,
# and remove duplicates.
newCatList = []
newCatSet = set()
cats = list(self.categories(get_redirect=True))
for i in range(len(cats)):
cat = cats[i]
if cat == oldCat:
changesMade = True
if not sortKey:
sortKey = cat.sortKey
if newCat:
if newCat.title() not in newCatSet:
newCategory = Category(site, newCat.title(),
sortKey=sortKey)
newCatSet.add(newCat.title())
newCatList.append(newCategory)
elif cat.title() not in newCatSet:
newCatSet.add(cat.title())
newCatList.append(cat)
if not changesMade:
pywikibot.output(u'ERROR: %s is not in category %s!'
% (self.title(asLink=True), oldCat.title()))
else:
try:
text = pywikibot.replaceCategoryLinks(self.text, newCatList)
except ValueError:
# Make sure that the only way replaceCategoryLinks() can return
# a ValueError is in the case of interwiki links to self.
pywikibot.output(
u'Skipping %s because of interwiki link to self' % self)
try:
self.put(text, comment)
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict' % self.title())
except pywikibot.SpamfilterError, e:
pywikibot.output(
u'Skipping %s because of blacklist entry %s'
% (self.title(), e.url))
except pywikibot.LockedPage:
pywikibot.output(
u'Skipping %s because page is locked' % self.title())
except pywikibot.PageNotSaved, error:
pywikibot.output(u"Saving page %s failed: %s"
% (self.title(asLink=True), error.message))
@property
def categoryinfo(self):
"""If supported, return a dict containing category content values:
Numbers of pages, subcategories, files, and total contents.
"""
if not self.isCategory():
return None # should this raise an exception??
try:
return self.site.categoryinfo(self)
except NotImplementedError:
return None
######## DEPRECATED METHODS ########
@deprecated("Site.encoding()")
def encoding(self):
"""DEPRECATED: use Site.encoding() instead"""
return self.site.encoding()
@deprecated("Page.title(withNamespace=False)")
def titleWithoutNamespace(self, underscore=False):
"""DEPRECATED: use self.title(withNamespace=False) instead."""
return self.title(underscore=underscore, withNamespace=False,
withSection=False)
@deprecated("Page.title(as_filename=True)")
def titleForFilename(self):
"""DEPRECATED: use self.title(as_filename=True) instead."""
return self.title(as_filename=True)
@deprecated("Page.title(withSection=False)")
def sectionFreeTitle(self, underscore=False):
"""DEPRECATED: use self.title(withSection=False) instead."""
return self.title(underscore=underscore, withSection=False)
@deprecated("Page.title(asLink=True)")
def aslink(self, forceInterwiki=False, textlink=False, noInterwiki=False):
"""DEPRECATED: use self.title(asLink=True) instead."""
return self.title(asLink=True, forceInterwiki=forceInterwiki,
allowInterwiki=not noInterwiki, textlink=textlink)
@deprecated("Page.title(asUrl=True)")
def urlname(self):
"""Return the Page title encoded for use in an URL.
DEPRECATED: use self.title(asUrl=True) instead.
"""
return self.title(asUrl=True)
####### DISABLED METHODS (warnings provided) ######
# these methods are easily replaced by editing the page's text using
# textlib methods and then using put() on the result.
def removeImage(self, image, put=False, summary=None, safe=True):
"""Old method to remove all instances of an image from page."""
pywikibot.warning(u"Page.removeImage() is no longer supported.")
def replaceImage(self, image, replacement=None, put=False, summary=None,
safe=True):
"""Old method to replace all instances of an image with another."""
pywikibot.warning(u"Page.replaceImage() is no longer supported.")
class ImagePage(Page):
"""A subclass of Page representing an image descriptor wiki page.
Supports the same interface as Page, with the following added methods:
getImagePageHtml : Download image page and return raw HTML text.
fileURL : Return the URL for the image described on this
page.
fileIsShared : Return True if image stored on a shared
repository like Wikimedia Commons or Wikitravel.
getFileMd5Sum : Return image file's MD5 checksum.
getFileVersionHistory : Return the image file's version history.
getFileVersionHistoryTable: Return the version history in the form of a
wiki table.
usingPages : Iterate Pages on which the image is displayed.
"""
def __init__(self, source, title=u"", insite=None):
Page.__init__(self, source, title, 6)
if self.namespace() != 6:
raise ValueError(u"'%s' is not in the image namespace!" % title)
def getImagePageHtml(self):
"""
Download the image page, and return the HTML, as a unicode string.
Caches the HTML code, so that if you run this method twice on the
same ImagePage object, the page will only be downloaded once.
"""
if not hasattr(self, '_imagePageHtml'):
from pywikibot.comms import http
path = "%s/index.php?title=%s" \
% (self.site.scriptpath(), self.title(asUrl=True))
self._imagePageHtml = http.request(self.site, path)
return self._imagePageHtml
def fileUrl(self):
"""Return the URL for the image described on this page."""
# TODO add scaling option?
if not hasattr(self, '_imageinfo'):
self._imageinfo = self.site.loadimageinfo(self)
return self._imageinfo['url']
@deprecated("fileIsShared")
def fileIsOnCommons(self):
"""Return True if the image is stored on Wikimedia Commons"""
return self.fileIsShared()
def fileIsShared(self):
"""Return True if image is stored on any known shared repository."""
# as of now, the only known repositories are commons and wikitravel
# TODO: put the URLs to family file
if not self.site.has_image_repository:
return False
elif 'wikitravel_shared' in self.site.shared_image_repository():
return self.fileUrl().startswith(
u'http://wikitravel.org/upload/shared/')
else:
return self.fileUrl().startswith(
'http://upload.wikimedia.org/wikipedia/commons/')
@deprecated("ImagePage.getFileSHA1Sum()")
def getFileMd5Sum(self):
"""Return image file's MD5 checksum."""
# FIXME: MD5 might be performed on incomplete file due to server disconnection
# (see bug #1795683).
import md5
f = urllib.urlopen(self.fileUrl())
# TODO: check whether this needs a User-Agent header added
md5Checksum = md5.new(f.read()).hexdigest()
f.close()
return md5Checksum
def getFileSHA1Sum(self):
"""Return image file's SHA1 checksum."""
if not hasattr(self, '_imageinfo'):
self._imageinfo = self.site.loadimageinfo(self)
return self._imageinfo['sha1']
def getFileVersionHistory(self):
"""Return the image file's version history.
@return: An iterator yielding tuples containing (timestamp,
username, resolution, filesize, comment).
"""
#TODO; return value may need to change
return self.site.loadimageinfo(self, history=True)
def getFileVersionHistoryTable(self):
"""Return the version history in the form of a wiki table."""
lines = []
#TODO: if getFileVersionHistory changes, make sure this follows it
for (datetime, username, resolution, size, comment) \
in self.getFileVersionHistory():
lines.append(u'| %s || %s || %s || %s || <nowiki>%s</nowiki>'
% (datetime, username, resolution, size, comment))
return u'{| border="1"\n! date/time || username || resolution || size || edit summary\n|----\n' + u'\n|----\n'.join(lines) + '\n|}'
def usingPages(self, step=None, total=None, content=False):
"""Yield Pages on which the image is displayed.
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in total
@param content: if True, load the current content of each iterated page
(default False)
"""
return self.site.imageusage(self,
step=step, total=total, content=content)
class Category(Page):
"""A page in the Category: namespace"""
@deprecate_arg("insite", None)
def __init__(self, source, title=u"", sortKey=None):
"""All parameters are the same as for Page() constructor.
"""
Page.__init__(self, source, title, ns=14)
if self.namespace() != 14:
raise ValueError(u"'%s' is not in the category namespace!"
% title)
self.sortKey = sortKey
@deprecate_arg("forceInterwiki", None)
@deprecate_arg("textlink", None)
@deprecate_arg("noInterwiki", None)
def aslink(self, sortKey=None):
"""Return a link to place a page in this Category.
Use this only to generate a "true" category link, not for interwikis
or text links to category pages.
@param sortKey: The sort key for the article to be placed in this
Category; if omitted, default sort key is used.
@type sortKey: (optional) unicode
"""
key = sortKey or self.sortKey
if key:
titleWithSortKey = '%s|%s' % (self.title(withSection=False),
key)
else:
titleWithSortKey = self.title(withSection=False)
return '[[%s]]' % titleWithSortKey
@deprecate_arg("startFrom", None)
@deprecate_arg("cacheResults", None)
def subcategories(self, recurse=False, step=None, total=None,
content=False):
"""Iterate all subcategories of the current category.
@param recurse: if not False or 0, also iterate subcategories of
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate direct subcats and
first-level sub-sub-cats, but no deeper.)
@type recurse: int or bool
@param step: limit each API call to this number of categories
@param total: iterate no more than this number of
subcategories in total (at all levels)
@param content: if True, retrieve the content of the current version
of each category description page (default False)
"""
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
if not hasattr(self, "_subcats"):
self._subcats = []
for member in self.site.categorymembers(self,
namespaces=[14], step=step, total=total,
content=content):
subcat = Category(self.site, member.title())
self._subcats.append(subcat)
yield subcat
if total is not None:
total -= 1
if not total:
return
if recurse:
for item in subcat.subcategories(
recurse, step=step, total=total,
content=content):
yield item
if total is not None:
total -= 1
if not total:
return
else:
for subcat in self._subcats:
yield subcat
if total is not None:
total -= 1
if not total:
return
if recurse:
for item in subcat.subcategories(
recurse, step=step, total=total,
content=content):
yield item
if total is not None:
total -= 1
if not total:
return
@deprecate_arg("startFrom", None)
def articles(self, recurse=False, step=None, total=None,
content=False, namespaces=None, sortby="",
starttime=None, endtime=None,startsort=None,
endsort=None):
"""
Yields all articles in the current category.
By default, yields all *pages* in the category that are not
subcategories!
@param recurse: if not False or 0, also iterate articles in
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate articles in first-level
subcats, but no deeper.)
@type recurse: int or bool
@param step: limit each API call to this number of pages
@param total: iterate no more than this number of pages in
total (at all levels)
@param namespaces: only yield pages in the specified namespaces
@type namespace: int or list of ints
@param content: if True, retrieve the content of the current version
of each page (default False)
@param sortby: determines the order in which results are generated,
valid values are "sortkey" (default, results ordered by category
sort key) or "timestamp" (results ordered by time page was
added to the category). This applies recursively.
@type sortby: str
@param starttime: if provided, only generate pages added after this
time; not valid unless sortby="timestamp"
@type starttime: pywikibot.Timestamp
@param endtime: if provided, only generate pages added before this
time; not valid unless sortby="timestamp"
@type endtime: pywikibot.Timestamp
@param startsort: if provided, only generate pages >= this title
lexically; not valid if sortby="timestamp"
@type startsort: str
@param endsort: if provided, only generate pages <= this title
lexically; not valid if sortby="timestamp"
@type endsort: str
"""
if namespaces is None:
namespaces = [x for x in self.site.namespaces()
if x >= 0 and x <> 14]
for member in self.site.categorymembers(self,
namespaces=namespaces,
step=step, total=total,
content=content, sortby=sortby,
starttime=starttime, endtime=endtime,
startsort=startsort, endsort=endsort,
):
yield member
if total is not None:
total -= 1
if not total:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
for subcat in self.subcategories(step=step):
for article in subcat.articles(recurse, step=step, total=total,
content=content,
namespaces=namespaces, sortby=sortby,
starttime=starttime, endtime=endtime,
startsort=startsort, endsort=endsort,
):
yield article
if total is not None:
total -= 1
if not total:
return
def members(self, recurse=False, namespaces=None, step=None, total=None,
content=False):
"""Yield all category contents (subcats, pages, and files)."""
for member in self.site.categorymembers(self,
namespaces, step=step, total=total,
content=content):
yield member
if total is not None:
total -= 1
if not total:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
for subcat in self.subcategories(step=step):
for article in subcat.members(
recurse, namespaces, step=step,
total=total, content=content):
yield article
if total is not None:
total -= 1
if not total:
return
def isEmptyCategory(self):
"""Return True if category has no members (including subcategories)."""
for member in self.site.categorymembers(self, total=1):
return False
return True
def isHiddenCategory(self):
"""Return True if the category is hidden."""
# FIXME
# This should use action=query&list=allcategories
# setting acfrom and acto to the category title and adding
# acprop=hidden but currently fails in some cases
# (see bug 48824)
return '__HIDDENCAT__' in self.expand_text()
def copyTo(self, cat, message):
"""
Copy text of category page to a new page. Does not move contents.
@param cat: New category title (without namespace) or Category object
@type cat: unicode or Category
@param message: message to use for category creation message
If two %s are provided in message, will be replaced
by (self.title, authorsList)
@type message: unicode
@return: True if copying was successful, False if target page
already existed.
"""
# This seems far too specialized to be in the top-level framework
# move to category.py? (Although it doesn't seem to be used there,
# either)
if not isinstance(cat, Category):
cat = self.site.category_namespace() + ':' + cat
targetCat = Category(self.site, cat)
else:
targetCat = cat
if targetCat.exists():
pywikibot.output(u'Target page %s already exists!'
% targetCat.title(),
level = pywikibot.WARNING)
return False
else:
pywikibot.output('Moving text from %s to %s.'
% (self.title(), targetCat.title()))
authors = ', '.join(self.contributingUsers())
try:
creationSummary = message % (self.title(), authors)
except TypeError:
creationSummary = message
targetCat.put(self.get(), creationSummary)
return True
def copyAndKeep(self, catname, cfdTemplates, message):
"""Copy partial category page text (not contents) to a new title.
Like copyTo above, except this removes a list of templates (like
deletion templates) that appear in the old category text. It also
removes all text between the two HTML comments BEGIN CFD TEMPLATE
and END CFD TEMPLATE. (This is to deal with CFD templates that are
substituted.)
Returns true if copying was successful, false if target page already
existed.
@param catname: New category title (without namespace)
@param cfdTemplates: A list (or iterator) of templates to be removed
from the page text
@return: True if copying was successful, False if target page
already existed.
"""
# I don't see why we need this as part of the framework either
# move to scripts/category.py?
catname = self.site.category_namespace() + ':' + catname
targetCat = Category(self.site, catname)
if targetCat.exists():
pywikibot.warning(u'Target page %s already exists!'
% targetCat.title())
return False
else:
pywikibot.output('Moving text from %s to %s.'
% {'oldcat': self.title(), 'authors': targetCat.title()})
authors = ', '.join(self.contributingUsers())
creationSummary = message % (self.title(), authors)
newtext = self.get()
for regexName in cfdTemplates:
matchcfd = re.compile(r"{{%s.*?}}" % regexName, re.IGNORECASE)
newtext = matchcfd.sub('', newtext)
matchcomment = re.compile(
r"<!--BEGIN CFD TEMPLATE-->.*?<!--END CFD TEMPLATE-->",
re.IGNORECASE | re.MULTILINE | re.DOTALL)
newtext = matchcomment.sub('', newtext)
pos = 0
while (newtext[pos:pos+1] == "\n"):
pos = pos + 1
newtext = newtext[pos:]
targetCat.put(newtext, creationSummary)
return True
#### DEPRECATED METHODS ####
@deprecated("list(Category.subcategories(...))")
def subcategoriesList(self, recurse=False):
"""DEPRECATED: Equivalent to list(self.subcategories(...))"""
return sorted(list(set(self.subcategories(recurse))))
@deprecated("list(Category.articles(...))")
def articlesList(self, recurse=False):
"""DEPRECATED: equivalent to list(self.articles(...))"""
return sorted(list(set(self.articles(recurse))))
@deprecated("Category.categories()")
def supercategories(self):
"""DEPRECATED: equivalent to self.categories()"""
return self.categories()
@deprecated("list(Category.categories(...))")
def supercategoriesList(self):
"""DEPRECATED: equivalent to list(self.categories(...))"""
return sorted(list(set(self.categories())))
ip_regexp = re.compile(r'^(?:(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|'
r'(((?=(?=(.*?(::)))\3(?!.+\4)))\4?|[\dA-F]{1,4}:)'
r'([\dA-F]{1,4}(\4|:\b)|\2){5}'
r'(([\dA-F]{1,4}(\4|:\b|$)|\2){2}|'
r'(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4}))\Z',
re.IGNORECASE)
class User(Page):
"""A class that represents a Wiki user.
"""
@deprecate_arg("site", "source")
@deprecate_arg("name", "title")
def __init__(self, source, title=u''):
"""Initializer for a User object.
All parameters are the same as for Page() constructor.
"""
if len(title) > 1 and title[0] == u'#':
self._isAutoblock = True
title = title[1:]
else:
self._isAutoblock = False
Page.__init__(self, source, title, ns=2)
if self.namespace() != 2:
raise ValueError(u"'%s' is not in the user namespace!"
% title)
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock.
pywikibot.output(
"This is an autoblock ID, you can only use to unblock it.")
def name(self):
return self.username
@property
def username(self):
""" Convenience method that returns the title of the page with
namespace prefix omitted, aka the username, as a Unicode string.
"""
if self._isAutoblock:
return u'#' + self.title(withNamespace=False)
else:
return self.title(withNamespace=False)
def isRegistered(self, force=False):
""" Return True if a user with this name is registered on this site,
False otherwise.
@param force: if True, forces reloading the data from API
@type force: bool
"""
if self.isAnonymous():
return False
else:
return self.getprops(force).get('missing') is None
def isAnonymous(self):
return ip_regexp.match(self.username) is not None
def getprops(self, force=False):
""" Return a Dictionnary that contains user's properties. Use cached
values if already called before, otherwise fetch data from the API.
@param force: if True, forces reloading the data from API
@type force: bool
"""
if force:
del self._userprops
if not hasattr(self, '_userprops'):
self._userprops = list(self.site.users([self.username,]))[0]
if self.isAnonymous():
r = list(self.site.blocks(users=self.username))
if r:
self._userprops['blockedby'] = r[0]['by']
self._userprops['blockreason'] = r[0]['reason']
return self._userprops
@deprecated('User.registration()')
def registrationTime(self, force=False):
""" Return registration date for this user, as a long in
Mediawiki's internal timestamp format, or 0 if the date is unknown.
@param force: if True, forces reloading the data from API
@type force: bool
"""
if self.registration():
return long(self.registration().strftime('%Y%m%d%H%M%S'))
else:
return 0
def registration(self, force=False):
""" Return registration date for this user as a pywikibot.Timestamp
object, or None if the date is unknown.
@param force: if True, forces reloading the data from API
@type force: bool
"""
reg = self.getprops(force).get('registration')
if reg:
return pywikibot.Timestamp.fromISOformat(reg)
def editCount(self, force=False):
""" Return edit count for this user as int. This is always 0 for
'anonymous' users.
@param force: if True, forces reloading the data from API
@type force: bool
"""
if 'editcount' in self.getprops(force):
return self.getprops()['editcount']
else:
return 0
def isBlocked(self, force=False):
""" Return True if this user is currently blocked, False otherwise.
@param force: if True, forces reloading the data from API
@type force: bool
"""
return 'blockedby' in self.getprops(force)
def isEmailable(self, force=False):
""" Return True if emails can be send to this user through mediawiki,
False otherwise.
@param force: if True, forces reloading the data from API
@type force: bool
"""
return 'emailable' in self.getprops(force)
def groups(self, force=False):
""" Return a list of groups to wich this user belongs. The return value
is guaranteed to be a list object, possibly empty.
@param force: if True, forces reloading the data from API
@type force: bool
"""
if 'groups' in self.getprops(force):
return self.getprops()['groups']
else:
return []
def getUserPage(self, subpage=u''):
""" Return a pywikibot.Page object corresponding to this user's main
page, or a subpage of it if subpage is set.
@param subpage: subpage part to be appended to the main
page title (optional)
@type subpage: unicode
"""
if self._isAutoblock:
#This user is probably being queried for purpose of lifting
#an autoblock, so has no user pages per se.
raise AutoblockUser("This is an autoblock ID, you can only use to unblock it.")
if subpage:
subpage = u'/' + subpage
return Page(Link(self.title() + subpage, self.site))
def getUserTalkPage(self, subpage=u''):
""" Return a pywikibot.Page object corresponding to this user's main
talk page, or a subpage of it if subpage is set.
@param subpage: subpage part to be appended to the main
talk page title (optional)
@type subpage: unicode
"""
if self._isAutoblock:
#This user is probably being queried for purpose of lifting
#an autoblock, so has no user talk pages per se.
raise AutoblockUser("This is an autoblock ID, you can only use to unblock it.")
if subpage:
subpage = u'/' + subpage
return Page(Link(self.title(withNamespace=False) + subpage,
self.site, defaultNamespace=3))
def sendMail(self, subject, text, ccme = False):
""" Send an email to this user via mediawiki's email interface.
Return True on success, False otherwise.
This method can raise an UserActionRefuse exception in case this user
doesn't allow sending email to him or the currently logged in bot
doesn't have the right to send emails.
@param subject: the subject header of the mail
@type subject: unicode
@param text: mail body
@type text: unicode
@param ccme: if True, sends a copy of this email to the bot
@type ccme: bool
"""
if not self.isEmailable():
raise UserActionRefuse('This user is not mailable')
if not self.site.has_right('sendemail'):
raise UserActionRefuse('You don\'t have permission to send mail')
params = {
'action': 'emailuser',
'target': self.username,
'token': self.site.token(self, 'email'),
'subject': subject,
'text': text,
}
if ccme:
params['ccme'] = 1
mailrequest = pywikibot.data.api.Request(**params)
maildata = mailrequest.submit()
if 'error' in maildata:
code = maildata['error']['code']
if code == u'usermaildisabled ':
pywikibot.output(u'User mail has been disabled')
elif 'emailuser' in maildata:
if maildata['emailuser']['result'] == u'Success':
pywikibot.output(u'Email sent.')
return True
return False
@deprecated("contributions")
@deprecate_arg("limit", "total") # To be consistent with rest of framework
def editedPages(self, total=500):
""" Deprecated function that wraps 'contributions' for backwards
compatibility. Yields pywikibot.Page objects that this user has
edited, with an upper bound of 'total'. Pages returned are not
guaranteed to be unique.
@param total: limit result to this number of pages.
@type total: int.
"""
for item in self.contributions(total=total):
yield item[0]
@deprecate_arg("limit", "total") # To be consistent with rest of framework
@deprecate_arg("namespace", "namespaces")
def contributions(self, total=500, namespaces=[]):
""" Yield tuples describing this user edits with an upper bound of
'limit'. Each tuple is composed of a pywikibot.Page object,
the revision id (int), the edit timestamp (as a pywikibot.Timestamp
object), and the comment (unicode).
Pages returned are not guaranteed to be unique.
@param total: limit result to this number of pages
@type total: int
@param namespaces: only iterate links in these namespaces
@type namespaces: list
"""
for contrib in self.site.usercontribs(user=self.username,
namespaces=namespaces, total=total):
ts = pywikibot.Timestamp.fromISOformat(contrib['timestamp'])
yield Page(self.site, contrib['title'], contrib['ns']), \
contrib['revid'], ts, contrib.get('comment', None)
@deprecate_arg("number", "total")
def uploadedImages(self, total=10):
""" Yield tuples describing files uploaded by this user.
Each tuple is composed of a pywikibot.Page, the timestamp (str in
ISO8601 format), comment (unicode) and a bool for pageid > 0.
Pages returned are not guaranteed to be unique.
@param total: limit result to this number of pages
@type total: int
"""
if not self.isRegistered():
raise StopIteration
for item in self.site.logevents(logtype='upload', user=self.username,
total=total):
yield ImagePage(self.site, item.title().title()), \
unicode(item.timestamp()), item.comment(), item.pageid() > 0
class WikibasePage(Page):
"""
The base page for the Wikibase extension.
There really should be no need to call this directly
"""
def __init__(self, site, title=u"", **kwargs):
Page.__init__(self, site, title, **kwargs)
self.repo = self.site
self._isredir = False # Wikibase pages cannot be a redirect
def __cmp__(self, other):
"""Test for equality and inequality of WikibasePage objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by namespace first, then by title.
This is basically the same as Page.__cmp__ but slightly different.
"""
if not isinstance(other, Page):
# especially, return -1 if other is None
return -1
if self.site != other.site:
return cmp(self.site, other.site)
if self.namespace() != other.namespace():
return cmp(self.namespace(), other.namespace())
return cmp(self.title(), other.title())
def title(self, **kwargs):
if self.namespace() == 0:
self._link._text = self.getID()
del self._link._title
return Page(self).title(**kwargs)
def __defined_by(self, singular=False):
"""
returns the parameters needed by the API to identify an item.
Once an item's "p/q##" is looked up, that will be used for all future
requests.
@param singular: Whether the parameter names should use the singular
form
@type singular: bool
"""
params = {}
if singular:
id = 'id'
site = 'site'
title = 'title'
else:
id = 'ids'
site = 'sites'
title = 'titles'
#id overrides all
if hasattr(self, 'id'):
params[id] = self.id
return params
#the rest only applies to ItemPages, but is still needed here.
if hasattr(self, '_site') and hasattr(self, '_title'):
params[site] = self._site.dbName()
params[title] = self._title
else:
quit()
params[id] = self.getID()
return params
def exists(self):
if not hasattr(self, '_content'):
try:
self.get()
return True
except pywikibot.NoPage:
return False
return 'lastrevid' in self._content
def get(self, force=False, *args):
"""
Fetches all page data, and caches it
force will override caching
args can be used to specify custom props.
"""
if force or not hasattr(self, '_content'):
data = self.repo.loadcontent(self.__defined_by(), *args)
self.id = data.keys()[0]
self._content = data[self.id]
if 'lastrevid' in self._content:
self.lastrevid = self._content['lastrevid']
else:
raise pywikibot.NoPage(self)
#aliases
self.aliases = {}
if 'aliases' in self._content:
for lang in self._content['aliases']:
self.aliases[lang] = list()
for value in self._content['aliases'][lang]:
self.aliases[lang].append(value['value'])
#labels
self.labels = {}
if 'labels' in self._content:
for lang in self._content['labels']:
self.labels[lang] = self._content['labels'][lang]['value']
#descriptions
self.descriptions = {}
if 'descriptions' in self._content:
for lang in self._content['descriptions']:
self.descriptions[lang] = self._content['descriptions'][lang]['value']
return {'aliases':self.aliases,
'labels':self.labels,
'descriptions':self.descriptions,
}
def getID(self, numeric=False, force=False):
"""
@param numeric Strip the first letter and return an int
@type numeric bool
@param force Force an update of new data
@type force bool
"""
if not hasattr(self, 'id') or force:
self.get(force=force)
if numeric:
return int(self.id[1:])
return self.id
def latestRevision(self):
if not hasattr(self, 'lastrevid'):
self.get()
return self.lastrevid
def __normalizeLanguages(self, data):
"""
Helper function to convert any site objects
into the language they may represent.
@param data The dict to check
@type data dict
"""
for key in data:
if isinstance(key, pywikibot.site.BaseSite):
data[key.language()] = data[key]
del data[key]
return data
def getdbName(self, site):
"""
Helper function to normalize site
objects into dbnames
"""
if isinstance(site, pywikibot.site.BaseSite):
return site.dbName()
return site
def editEntity(self, data, **kwargs):
"""
Enables updating of entities through wbeditentity
This function is wrapped around by:
*editLabels
*editDescriptions
*editAliases
*ItemPage.setSitelinks
@param data Data to be saved
@type data dict
"""
if hasattr(self, 'lastrevid'):
baserevid = self.lastrevid
else:
baserevid = None
updates = self.repo.editEntity(self.__defined_by(singular=True), data,
baserevid=baserevid, **kwargs)
self.lastrevid = updates['entity']['lastrevid']
def editLabels(self, labels, **kwargs):
"""
Labels should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the label.
"""
labels = self.__normalizeLanguages(labels)
for key in labels:
labels[key] = {'language': key, 'value': labels[key]}
data = {'labels': labels}
self.editEntity(data, **kwargs)
def editDescriptions(self, descriptions, **kwargs):
"""
Descriptions should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the description.
"""
descriptions = self.__normalizeLanguages(descriptions)
for key in descriptions:
descriptions[key] = {'language': key, 'value': descriptions[key]}
data = {'descriptions': descriptions}
self.editEntity(data, **kwargs)
def editAliases(self, aliases, **kwargs):
"""
Aliases should be a dict, with the key
as a language or a site object. The
value should be a list of strings.
"""
aliases = self.__normalizeLanguages(aliases)
for (key, strings) in aliases.items():
aliases[key] = [{'language': key, 'value': i} for i in strings]
data = {'aliases': aliases}
self.editEntity(data, **kwargs)
class ItemPage(WikibasePage):
def __init__(self, site, title=None):
"""
defined by qid XOR site AND title
options:
site=pywikibot.DataSite & title=Q42
site=pywikibot.Site & title=Main Page
"""
super(ItemPage, self).__init__(site, title, ns=0)
self.id = title.lower()
@classmethod
def fromPage(cls, page):
"""
Get the ItemPage based on a Page that links to it
"""
repo = page.site.data_repository()
i = cls(repo, 'null')
del i.id
i._site = page.site
i._title = page.title()
return i
def __make_site(self, dbname):
"""
Converts a Site.dbName() into a Site object.
Rather hackish method that only works for WMF sites
"""
lang = dbname.replace('wiki','')
lang = lang.replace('_','-')
return pywikibot.Site(lang, 'wikipedia')
def get(self, force=False, *args):
"""
Fetches all page data, and caches it
force will override caching
args are the values of props
"""
if force or not hasattr(self, '_content'):
super(ItemPage, self).get(force=force, *args)
#claims
self.claims = {}
if 'claims' in self._content:
for pid in self._content['claims']:
self.claims[pid] = list()
for claim in self._content['claims'][pid]:
c = Claim.fromJSON(self.repo, claim)
c.on_item = self
self.claims[pid].append(c)
#sitelinks
self.sitelinks = {}
if 'sitelinks' in self._content:
for dbname in self._content['sitelinks']:
#Due to issues with locked/obsolete sites
#this part is commented out
#site = self.__make_site(dbname)
#self.sitelinks[site] = pywikibot.Page(site, self._content['sitelinks'][dbname]['title'])
self.sitelinks[dbname] = self._content['sitelinks'][dbname]['title']
return {'aliases': self.aliases,
'labels': self.labels,
'descriptions': self.descriptions,
'sitelinks': self.sitelinks,
'claims': self.claims
}
def getSitelink(self, site, force=False):
"""
Returns the title (unicode string) for the specific site
site is a pywikibot.Site or database name
force will override caching
If the item doesn't have that language, raise NoPage
"""
if force or not hasattr(self, '_content'):
self.get(force=force)
dbname = self.getdbName(site)
if not dbname in self.sitelinks:
raise pywikibot.NoPage(self)
else:
return self.sitelinks[dbname]
def setSitelink(self, sitelink, **kwargs):
"""
A sitelink can either be a Page object,
or a {'site':dbname,'title':title} dictionary.
"""
self.setSitelinks([sitelink], **kwargs)
def removeSitelink(self, site, **kwargs):
"""
A site can either be a Site object,
or it can be a dbName.
"""
self.removeSitelinks([site], **kwargs)
def removeSitelinks(self, sites, **kwargs):
"""
Sites should be a list, with values either
being Site objects, or dbNames.
"""
data = list()
for site in sites:
site = self.getdbName(site)
data.append({'site': site, 'title': ''})
self.setSitelinks(data, **kwargs)
def setSitelinks(self, sitelinks, **kwargs):
"""
Sitelinks should be a list. Each item in the
list can either be a Page object, or a dict
with a value for 'site' and 'title'.
"""
data = {}
for obj in sitelinks:
if isinstance(obj, Page):
dbName = self.getdbName(obj.site)
data[dbName] = {'site': dbName, 'title': obj.title()}
else:
#TODO: Do some verification here
dbName = obj['site']
data[dbName] = obj
data = {'sitelinks': data}
self.editEntity(data, **kwargs)
def addClaim(self, claim, bot=True):
"""
Adds the claim to the item
@param claim The claim to add
@type claim Claim
@param bot Whether to flag as bot (if possible)
@type bot bool
"""
self.repo.addClaim(self, claim, bot=bot)
claim.on_item = self
def removeClaims(self, claims, **kwargs):
"""
Removes the claims from the item
@type claims: list
"""
self.repo.removeClaims(claims, **kwargs)
class PropertyPage(WikibasePage):
"""
Any page in the property namespace
Should be created as:
PropertyPage(DataSite, 'Property:P21')
"""
def __init__(self, source, title=u""):
WikibasePage.__init__(self, source, title, ns=120)
self.id = self.title(withNamespace=False).lower()
if not self.id.startswith(u'p'):
raise ValueError(u"'%s' is not a property page!" % self.title())
def get(self, force=False, *args):
if force or not hasattr(self, '_content'):
WikibasePage.get(self, force=force, *args)
self.type = self._content['datatype']
def getType(self):
"""
Returns the type that this item uses
Examples: item, commons media file, StringValue, NumericalValue
"""
if not hasattr(self, 'type'):
self.type = self.repo.getPropertyType(self)
return self.type
class QueryPage(WikibasePage):
"""
For future usage, not implemented yet
"""
def __init__(self, site, title):
WikibasePage.__init__(self, site, title, ns=122)
self.id = self.title(withNamespace=False).lower()
if not self.id.startswith(u'u'):
raise ValueError(u"'%s' is not a query page!" % self.title())
class Claim(PropertyPage):
"""
Claims are standard claims as well as references.
"""
def __init__(self, site, pid, snak=None, hash=None, isReference=False,
isQualifier=False):
"""
Defined by the "snak" value, supplemented by site + pid
"""
PropertyPage.__init__(self, site, 'Property:' + pid)
self.snak = snak
self.hash = hash
self.isReference = isReference
self.isQualifier = isQualifier
if self.isQualifier and self.isReference:
raise ValueError(u'Claim cannot be both a qualifier and reference.')
self.sources = []
self.qualifiers = {}
self.target = None
self.snaktype = 'value'
self.on_item = None # The item it's on
@staticmethod
def fromJSON(site, data):
"""
Creates the claim object from JSON returned
in the API call.
"""
claim = Claim(site, data['mainsnak']['property'])
if 'id' in data:
claim.snak = data['id']
elif 'hash' in data:
claim.isReference = True
claim.hash = data['hash']
else:
claim.isQualifier = True
claim.snaktype = data['mainsnak']['snaktype']
if claim.getSnakType() == 'value':
if claim.getType() == 'wikibase-item':
claim.target = ItemPage(site, 'Q' +
str(data['mainsnak']['datavalue']['value']['numeric-id']))
elif claim.getType() == 'commonsMedia':
claim.target = ImagePage(site.image_repository(), 'File:' +
data['mainsnak']['datavalue']['value'])
elif claim.getType() == 'globecoordinate':
claim.target = pywikibot.Coordinate.fromWikibase(data['mainsnak']['datavalue']['value'], site)
else:
#This covers string type
claim.target = data['mainsnak']['datavalue']['value']
if 'references' in data:
for source in data['references']:
claim.sources.append(Claim.referenceFromJSON(site, source))
if 'qualifiers' in data:
for prop in data['qualifiers']:
for qualifier in data['qualifiers'][prop]:
qual = Claim.qualifierFromJSON(site, qualifier)
if prop in claim.qualifiers:
claim.qualifiers[prop].append(qual)
else:
claim.qualifiers[prop] = [qual]
return claim
@staticmethod
def referenceFromJSON(site, data):
"""
Reference objects are represented a
bit differently, and require some
more handling.
"""
mainsnak = data['snaks'].values()[0][0]
wrap = {'mainsnak': mainsnak, 'hash': data['hash']}
return Claim.fromJSON(site, wrap)
@staticmethod
def qualifierFromJSON(site, data):
"""
Qualifier objects are represented a bit
differently like references, but I'm not
sure if this even requires it's own function.
"""
wrap = {'mainsnak': data}
return Claim.fromJSON(site, wrap)
def setTarget(self, value):
"""
Sets the target to the passed value.
There should be checks to ensure type compliance
"""
types = {'wikibase-item': ItemPage,
'string': basestring,
'commonsMedia': ImagePage,
'globecoordinate': pywikibot.Coordinate,
}
if self.getType() in types:
if not isinstance(value, types[self.getType()]):
raise ValueError("%s is not type %s."
% (value, str(types[self.getType()])))
self.target = value
def changeTarget(self, value=None, snaktype='value', **kwargs):
"""
This actually saves the new target.
"""
if value:
self.setTarget(value)
data = self.repo.changeClaimTarget(self, snaktype=snaktype,
**kwargs)
#TODO: Re-create the entire item from JSON, not just id
self.snak = data['claim']['id']
def getTarget(self):
"""
Returns object that the property is associated with.
None is returned if no target is set
"""
return self.target
def getSnakType(self):
"""
Returns the "snaktype"
Can be "value", "somevalue" or "novalue"
"""
return self.snaktype
def setSnakType(self, value):
if value in ['value', 'somevalue', 'novalue']:
self.snaktype = value
else:
raise ValueError("snaktype must be 'value', 'somevalue', or 'novalue'.")
def changeSnakType(self, value=None, **kwargs):
"""
This actually saves the new snakvalue.
TODO: Is this function really needed?
"""
if value:
self.setSnakType(value)
self.changeTarget(snaktype=self.getSnakType(), **kwargs)
def getSources(self):
"""
Returns a list of Claims
"""
return self.sources
def addSource(self, source, **kwargs):
"""
source is a Claim.
adds it as a reference.
"""
data = self.repo.editSource(self, source, new=True, **kwargs)
source.hash = data['reference']['hash']
self.on_item.lastrevid = data['pageinfo']['lastrevid']
self.sources.append(source)
def _formatDataValue(self):
"""
Format the target into the proper JSON datavalue that Wikibase wants
"""
if self.getType() == 'wikibase-item':
value = {'entity-type': 'item',
'numeric-id': self.getTarget().getID(numeric=True)}
elif self.getType() == 'string':
value = self.getTarget()
elif self.getType() == 'commonsMedia':
value = self.getTarget().title(withNamespace=False)
elif self.getType() == 'globecoordinate':
value = self.getTarget().toWikibase()
else:
raise NotImplementedError('%s datatype is not supported yet.' % self.getType())
return value
class Revision(object):
"""A structure holding information about a single revision of a Page."""
def __init__(self, revid, timestamp, user, anon=False, comment=u"",
text=None, minor=False):
"""All parameters correspond to object attributes (e.g., revid
parameter is stored as self.revid)
@param revid: Revision id number
@type revid: int
@param text: Revision wikitext.
@type text: unicode, or None if text not yet retrieved
@param timestamp: Revision time stamp
@type timestamp: pywikibot.Timestamp
@param user: user who edited this revision
@type user: unicode
@param anon: user is unregistered
@type anon: bool
@param comment: edit comment text
@type comment: unicode
@param minor: edit flagged as minor
@type minor: bool
"""
self.revid = revid
self.text = text
self.timestamp = timestamp
self.user = user
self.anon = anon
self.comment = comment
self.minor = minor
class Link(object):
"""A Mediawiki link (local or interwiki)
Has the following attributes:
- site: The Site object for the wiki linked to
- namespace: The namespace of the page linked to (int)
- title: The title of the page linked to (unicode); does not include
namespace or section
- section: The section of the page linked to (unicode or None); this
contains any text following a '#' character in the title
- anchor: The anchor text (unicode or None); this contains any text
following a '|' character inside the link
"""
illegal_titles_pattern = re.compile(
# Matching titles will be held as illegal.
u'''[^ %!\"$&'()*,\\-.\\/0-9:;=?@A-Z\\\\^_`a-z~\u0080-\uFFFF+]'''
# URL percent encoding sequences interfere with the ability
# to round-trip titles -- you can't link to them consistently.
u'|%[0-9A-Fa-f]{2}'
# XML/HTML character references produce similar issues.
u'|&[A-Za-z0-9\x80-\xff]+;'
u'|&#[0-9]+;'
u'|&#x[0-9A-Fa-f]+;'
)
def __init__(self, text, source=None, defaultNamespace=0):
"""Constructor
@param text: the link text (everything appearing between [[ and ]]
on a wiki page)
@type text: unicode
@param source: the Site on which the link was found (not necessarily
the site to which the link refers)
@type source: Site
@param defaultNamespace: a namespace to use if the link does not
contain one (defaults to 0)
@type defaultNamespace: int
"""
assert source is None or isinstance(source, pywikibot.site.BaseSite), \
"source parameter should be a Site object"
self._text = text
self._source = source or pywikibot.Site()
self._defaultns = defaultNamespace
# preprocess text (these changes aren't site-dependent)
# First remove anchor, which is stored unchanged, if there is one
if u"|" in self._text:
self._text, self._anchor = self._text.split(u"|", 1)
else:
self._anchor = None
# Clean up the name, it can come from anywhere.
# Convert HTML entities to unicode
t = html2unicode(self._text)
# Convert URL-encoded characters to unicode
t = url2unicode(t, site=self._source)
# Normalize unicode string to a NFC (composed) format to allow
# proper string comparisons. According to
# http://svn.wikimedia.org/viewvc/mediawiki/branches/REL1_6/phase3/includes/normal/UtfNormal.php?view=markup
# the mediawiki code normalizes everything to NFC, not NFKC
# (which might result in information loss).
t = unicodedata.normalize('NFC', t)
# This code was adapted from Title.php : secureAndSplit()
#
if u'\ufffd' in t:
raise pywikibot.Error("Title contains illegal char (\\uFFFD)")
# Replace underscores by spaces
t = t.replace(u"_", u" ")
# replace multiple spaces with a single space
while u" " in t: t = t.replace(u" ", u" ")
# Strip spaces at both ends
t = t.strip()
# Remove left-to-right and right-to-left markers.
t = t.replace(u"\u200e", u"").replace(u"\u200f", u"")
self._text = t
def __repr__(self):
return "pywikibot.page.Link(%r, %r)" % (self.title, self.site)
def parse_site(self):
"""Parse only enough text to determine which site the link points to.
This method does not parse anything after the first ":"; links
with multiple interwiki prefixes (such as "wikt:fr:Parlais") need
to be re-parsed on the first linked wiki to get the actual site.
@return: tuple of (familyname, languagecode) for the linked site.
"""
t = self._text
fam = self._source.family
code = self._source.code
while u":" in t:
# Initial colon
if t.startswith(u":"):
# remove the colon but continue processing
# remove any subsequent whitespace
t = t.lstrip(u":").lstrip(u" ")
continue
prefix = t[ :t.index(u":")].lower() # part of text before :
ns = self._source.ns_index(prefix)
if ns:
# The prefix is a namespace in the source wiki
return (fam.name, code)
if prefix in fam.langs:
# prefix is a language code within the source wiki family
return (fam.name, prefix)
known = fam.get_known_families(site=self._source)
if prefix in known:
if known[prefix] == fam.name:
# interwiki prefix links back to source family
t = t[t.index(u":")+1: ].lstrip(u" ")
# strip off the prefix and retry
continue
# prefix is a different wiki family
return (known[prefix], code)
break
return (fam.name, code) # text before : doesn't match any known prefix
def parse(self):
"""Parse text; called internally when accessing attributes"""
self._site = self._source
self._namespace = self._defaultns
t = self._text
# This code was adapted from Title.php : secureAndSplit()
#
firstPass = True
while u":" in t:
# Initial colon indicates main namespace rather than default
if t.startswith(u":"):
self._namespace = 0
# remove the colon but continue processing
# remove any subsequent whitespace
t = t.lstrip(u":").lstrip(u" ")
continue
fam = self._site.family
prefix = t[ :t.index(u":")].lower()
ns = self._site.ns_index(prefix)
if ns:
# Ordinary namespace
t = t[t.index(u":"): ].lstrip(u":").lstrip(u" ")
self._namespace = ns
break
if prefix in fam.langs.keys()\
or prefix in fam.get_known_families(site=self._site):
# looks like an interwiki link
if not firstPass:
# Can't make a local interwiki link to an interwiki link.
raise pywikibot.Error(
"Improperly formatted interwiki link '%s'"
% self._text)
t = t[t.index(u":"): ].lstrip(u":").lstrip(u" ")
if prefix in fam.langs.keys():
newsite = pywikibot.Site(prefix, fam)
else:
otherlang = self._site.code
familyName = fam.get_known_families(site=self._site)[prefix]
if familyName in ['commons', 'meta']:
otherlang = familyName
try:
newsite = pywikibot.Site(otherlang, familyName)
except ValueError:
raise pywikibot.Error("""\
%s is not a local page on %s, and the %s family is
not supported by PyWikiBot!"""
% (self._text, self._site(), familyName))
# Redundant interwiki prefix to the local wiki
if newsite == self._site:
if not t:
# Can't have an empty self-link
raise pywikibot.Error(
"Invalid link title: '%s'" % self._text)
firstPass = False
continue
self._site = newsite
else:
break # text before : doesn't match any known prefix
if u"#" in t:
t, sec = t.split(u'#', 1)
t, self._section = t.rstrip(), sec.lstrip()
else:
self._section = None
# Reject illegal characters.
m = Link.illegal_titles_pattern.search(t)
if m:
raise pywikibot.InvalidTitle(
u"contains illegal char(s) '%s'" % m.group(0))
# Pages with "/./" or "/../" appearing in the URLs will
# often be unreachable due to the way web browsers deal
#* with 'relative' URLs. Forbid them explicitly.
if u'.' in t and (
t == u'.' or t == u'..'
or t.startswith(u"./")
or t.startswith(u"../")
or u"/./" in t
or u"/../" in t
or t.endswith(u"/.")
or t.endswith(u"/..")
):
raise pywikibot.InvalidTitle(
"(contains . / combinations): '%s'"
% self._text)
# Magic tilde sequences? Nu-uh!
if u"~~~" in t:
raise pywikibot.InvalidTitle("(contains ~~~): '%s'" % self._text)
if self._namespace != -1 and len(t) > 255:
raise pywikibot.InvalidTitle("(over 255 bytes): '%s'" % t)
if self._site.case() == 'first-letter':
t = t[:1].upper() + t[1:]
# Can't make a link to a namespace alone...
# "empty" local links can only be self-links
# with a fragment identifier.
if not t and self._site == self._source and self._namespace != 0:
raise pywikibot.Error("Invalid link (no page title): '%s'"
% self._text)
self._title = t
# define attributes, to be evaluated lazily
@property
def site(self):
if not hasattr(self, "_site"):
self.parse()
return self._site
@property
def namespace(self):
if not hasattr(self, "_namespace"):
self.parse()
return self._namespace
@property
def title(self):
if not hasattr(self, "_title"):
self.parse()
return self._title
@property
def section(self):
if not hasattr(self, "_section"):
self.parse()
return self._section
@property
def anchor(self):
if not hasattr(self, "_anchor"):
self.parse()
return self._anchor
def canonical_title(self):
"""Return full page title, including localized namespace."""
if self.namespace:
return "%s:%s" % (self.site.namespace(self.namespace),
self.title)
else:
return self.title
def astext(self, onsite=None):
"""Return a text representation of the link.
@param onsite: if specified, present as a (possibly interwiki) link
from the given site; otherwise, present as an internal link on
the source site.
"""
if onsite is None:
onsite = self._source
title = self.title
if self.namespace:
title = onsite.namespace(self.namespace) + ":" + title
if self.section:
title = title + "#" + self.section
if onsite == self.site:
return u'[[%s]]' % title
if onsite.family == self.site.family:
return u'[[%s:%s]]' % (self.site.code, title)
if self.site.family.name == self.site.code:
# use this form for sites like commons, where the
# code is the same as the family name
return u'[[%s:%s]]' % (self.site.code,
title)
return u'[[%s:%s:%s]]' % (self.site.family.name,
self.site.code,
title)
def __str__(self):
return self.astext().encode("ascii", "backslashreplace")
def __cmp__(self, other):
"""Test for equality and inequality of Link objects.
Link objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Link objects are sortable by site, then namespace, then title.
"""
if not isinstance(other, Link):
# especially, return -1 if other is None
return -1
if not self.site == other.site:
return cmp(self.site, other.site)
if self.namespace != other.namespace:
return cmp(self.namespace, other.namespace)
return cmp(self.title, other.title)
def __unicode__(self):
return self.astext()
def __hash__(self):
return hash(u'%s:%s:%s' % (self.site.family.name,
self.site.code,
self.title))
@staticmethod
def fromPage(page, source=None):
"""
Create a Link to a Page.
@param source: Link from site source
"""
link = Link.__new__(Link)
link._site = page.site
link._section = page.section()
link._namespace = page.namespace()
link._title = page.title(withNamespace=False,
allowInterwiki=False,
withSection=False)
link._anchor = None
link._source = source or pywikibot.Site()
return link
@staticmethod
def langlinkUnsafe(lang, title, source):
"""
Create a "lang:title" Link linked from source.
Assumes that the lang & title come clean, no checks are made.
"""
link = Link.__new__(Link)
link._site = pywikibot.Site(lang, source.family.name)
link._section = None
link._source = source
link._namespace = 0
if ':' in title:
ns, t = title.split(':', 1)
ns = link._site.ns_index(ns.lower())
if ns:
link._namespace = ns
title = t
if u"#" in title:
t, sec = title.split(u'#', 1)
title, link._section = t.rstrip(), sec.lstrip()
else:
link._section = None
link._title = title
return link
# Utility functions for parsing page titles
def html2unicode(text, ignore = []):
"""Return text, replacing HTML entities by equivalent unicode characters."""
# This regular expression will match any decimal and hexadecimal entity and
# also entities that might be named entities.
entityR = re.compile(
r'&(?:amp;)?(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|(?P<name>[A-Za-z]+));')
# These characters are Html-illegal, but sadly you *can* find some of
# these and converting them to unichr(decimal) is unsuitable
convertIllegalHtmlEntities = {
128 : 8364, # €
130 : 8218, # ‚
131 : 402, # ƒ
132 : 8222, # „
133 : 8230, # …
134 : 8224, # †
135 : 8225, # ‡
136 : 710, # ˆ
137 : 8240, # ‰
138 : 352, # Š
139 : 8249, # ‹
140 : 338, # Œ
142 : 381, # Ž
145 : 8216, # ‘
146 : 8217, # ’
147 : 8220, # “
148 : 8221, # ”
149 : 8226, # •
150 : 8211, # –
151 : 8212, # —
152 : 732, # ˜
153 : 8482, # ™
154 : 353, # š
155 : 8250, # ›
156 : 339, # œ
158 : 382, # ž
159 : 376 # Ÿ
}
#ensuring that illegal   and , which have no known values,
#don't get converted to unichr(129), unichr(141) or unichr(157)
ignore = set(ignore) | set([129, 141, 157])
result = u''
i = 0
found = True
while found:
text = text[i:]
match = entityR.search(text)
if match:
unicodeCodepoint = None
if match.group('decimal'):
unicodeCodepoint = int(match.group('decimal'))
elif match.group('hex'):
unicodeCodepoint = int(match.group('hex'), 16)
elif match.group('name'):
name = match.group('name')
if name in htmlentitydefs.name2codepoint:
# We found a known HTML entity.
unicodeCodepoint = htmlentitydefs.name2codepoint[name]
result += text[:match.start()]
try:
unicodeCodepoint = convertIllegalHtmlEntities[unicodeCodepoint]
except KeyError:
pass
if unicodeCodepoint and unicodeCodepoint not in ignore:
result += unichr(unicodeCodepoint)
else:
# Leave the entity unchanged
result += text[match.start():match.end()]
i = match.end()
else:
result += text
found = False
return result
def url2unicode(title, site, site2 = None):
"""Convert url-encoded text to unicode using site's encoding.
If site2 is provided, try its encodings as well. Uses the first encoding
that doesn't cause an error.
"""
# create a list of all possible encodings for both hint sites
encList = [site.encoding()] + list(site.encodings())
if site2 and site2 != site:
encList.append(site2.encoding())
encList += list(site2.encodings())
firstException = None
# try to handle all encodings (will probably retry utf-8)
for enc in encList:
try:
t = title.encode(enc)
t = urllib.unquote(t)
return unicode(t, enc)
except UnicodeError, ex:
if not firstException:
firstException = ex
pass
# Couldn't convert, raise the original exception
raise firstException
| 39.865703 | 139 | 0.564376 | 14,901 | 132,394 | 4.974633 | 0.108718 | 0.017484 | 0.005342 | 0.005612 | 0.306461 | 0.263467 | 0.226679 | 0.199873 | 0.175887 | 0.157756 | 0 | 0.006152 | 0.345605 | 132,394 | 3,320 | 140 | 39.877711 | 0.849246 | 0.065524 | 0 | 0.283425 | 0 | 0.003315 | 0.074176 | 0.010088 | 0 | 0 | 0 | 0.004217 | 0.000552 | 0 | null | null | 0.002762 | 0.00884 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21629f0708dae873e8a5adb69fcdb728a21e4126 | 315 | py | Python | app/gql/__init__.py | yoshiya0503/flask-graphQL-example | 469bc38d183ebb0ed7e9a14427d813cb1fdb3b3e | [
"MIT"
] | null | null | null | app/gql/__init__.py | yoshiya0503/flask-graphQL-example | 469bc38d183ebb0ed7e9a14427d813cb1fdb3b3e | [
"MIT"
] | null | null | null | app/gql/__init__.py | yoshiya0503/flask-graphQL-example | 469bc38d183ebb0ed7e9a14427d813cb1fdb3b3e | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
schema object
schema
"""
__author__ = 'Yoshiya Ito <myon53@gmail.com>'
__version__ = '1.0.0'
__date__ = '2019-12-02'
import graphene
from app.gql.query import Query
from app.gql.mutation import Mutation
schema = graphene.Schema(query=Query, mutation=Mutation)
| 21 | 56 | 0.72381 | 45 | 315 | 4.8 | 0.644444 | 0.064815 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054348 | 0.12381 | 315 | 14 | 57 | 22.5 | 0.728261 | 0.212698 | 0 | 0 | 0 | 0 | 0.188285 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
dcbe975027f4df9e2601fd2aaf93136cc3a06d31 | 1,181 | py | Python | examples/src/main/python/python_example.py | bensenberner/spline-spark-agent | b391bae118233f88f9f2f235d35d11d93055507e | [
"Apache-2.0"
] | 65 | 2020-01-09T08:30:35.000Z | 2022-03-29T11:02:01.000Z | examples/src/main/python/python_example.py | bensenberner/spline-spark-agent | b391bae118233f88f9f2f235d35d11d93055507e | [
"Apache-2.0"
] | 359 | 2020-01-03T15:47:38.000Z | 2022-03-21T14:22:30.000Z | examples/src/main/python/python_example.py | bensenberner/spline-spark-agent | b391bae118233f88f9f2f235d35d11d93055507e | [
"Apache-2.0"
] | 57 | 2020-03-30T09:16:47.000Z | 2022-03-18T19:55:18.000Z | #
# Copyright 2017 ABSA Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Enable Spline tracking.
# For Spark 2.3+ we recommend the codeless approach to enable Spline - by setting spark.sql.queryExecutionListeners
# (See: examples/README.md)
# Otherwise execute the following method to enable Spline manually.
sc._jvm.za.co.absa.spline.harvester \
.SparkLineageInitializer.enableLineageTracking(spark._jsparkSession)
# Execute a Spark job as usual:
spark.read \
.option("header", "true") \
.option("inferschema", "true") \
.csv("data/input/batch/wikidata.csv") \
.write \
.mode('overwrite') \
.csv("data/output/batch/python-sample.csv")
| 36.90625 | 115 | 0.740051 | 165 | 1,181 | 5.284848 | 0.666667 | 0.068807 | 0.029817 | 0.036697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010091 | 0.160881 | 1,181 | 31 | 116 | 38.096774 | 0.869828 | 0.690093 | 0 | 0 | 0 | 0 | 0.284058 | 0.185507 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcbed17561796433a09061693546b7d9c9b7c335 | 314 | py | Python | scribedb/__init__.py | Decathlon/scribedb | 153ae8854bc77c33a799f2757a2d4629f3909f8c | [
"Apache-2.0"
] | 3 | 2020-03-17T09:13:50.000Z | 2020-07-17T08:30:07.000Z | scribedb/__init__.py | doc22940/scribedb | 153ae8854bc77c33a799f2757a2d4629f3909f8c | [
"Apache-2.0"
] | 5 | 2020-03-09T15:11:20.000Z | 2020-09-17T20:50:08.000Z | scribedb/__init__.py | doc22940/scribedb | 153ae8854bc77c33a799f2757a2d4629f3909f8c | [
"Apache-2.0"
] | 1 | 2020-04-08T01:02:20.000Z | 2020-04-08T01:02:20.000Z |
"""
scribeDB is a light tool which compares data at schema level.
Let us say we have two schemas deployed inside PostgreSQL
and Oracle RDBs.
A minimal usage example:
#
"""
from . import oracle, postgres, rdbms, scribedb
__version__ = '0.1.0'
__all__ = ['__version__', 'oracle', 'postgres', 'scribedb','rdbms']
| 20.933333 | 67 | 0.72293 | 44 | 314 | 4.886364 | 0.795455 | 0.130233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011407 | 0.16242 | 314 | 14 | 68 | 22.428571 | 0.806084 | 0.519108 | 0 | 0 | 0 | 0 | 0.302817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
dcc701ccb8967fd5ddcba24fee8eaeca6fa55888 | 900 | py | Python | 57 - Square root convergents/converg.py | jamtot/PyProjectEuler | 98c2cf5dbfa3d38a2727f2ad204e41f01b624dda | [
"MIT"
] | null | null | null | 57 - Square root convergents/converg.py | jamtot/PyProjectEuler | 98c2cf5dbfa3d38a2727f2ad204e41f01b624dda | [
"MIT"
] | null | null | null | 57 - Square root convergents/converg.py | jamtot/PyProjectEuler | 98c2cf5dbfa3d38a2727f2ad204e41f01b624dda | [
"MIT"
] | null | null | null | """
3 7 17 41 99 239 577 1393 n = 2n(i-1(previous)) + n(i-2(num before previous))
- - -- -- -- --- --- ----
2 5 12 29 70 169 408 985 (same as above, next number is 2 times previous plus
the number before that)
"""
def nextexp(expansions):
expansions.append(expansions[-1]*2+expansions[-2])
def thousandexps(expans):
while len(expans) < 1000:
nextexp(expans)
if __name__ == "__main__":
numerator_expansions = [3, 7, 17, 41, 99, 239, 577, 1393]
denominator_expansions = [2, 5, 12, 29, 70, 169, 408, 985]
thousandexps(numerator_expansions)
thousandexps(denominator_expansions)
numerators_with_more_digits = 0
for i in xrange(len(numerator_expansions)):
if (len(str(numerator_expansions[i])) > len(str(denominator_expansions[i]))):
numerators_with_more_digits+=1
print numerators_with_more_digits
| 32.142857 | 85 | 0.645556 | 122 | 900 | 4.565574 | 0.467213 | 0.136445 | 0.096948 | 0.129264 | 0.125673 | 0.125673 | 0.125673 | 0.125673 | 0 | 0 | 0 | 0.119253 | 0.226667 | 900 | 27 | 86 | 33.333333 | 0.681034 | 0 | 0 | 0 | 0 | 0 | 0.012232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcc80839aa9d4bea48c925ed65c8c0a36c832df3 | 255 | py | Python | swarmdjango/core/models/Heading.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | null | null | null | swarmdjango/core/models/Heading.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | 51 | 2020-08-31T16:50:09.000Z | 2021-05-10T03:04:18.000Z | swarmdjango/core/models/Heading.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | null | null | null | from django.db import models
from core.models import Change
class Heading(models.Model):
title = models.TextField()
text = models.TextField()
subHeadings = models.ManyToManyField('self', blank=True)
log = models.ManyToManyField('Change') | 28.333333 | 60 | 0.733333 | 30 | 255 | 6.233333 | 0.633333 | 0.160428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156863 | 255 | 9 | 61 | 28.333333 | 0.869767 | 0 | 0 | 0 | 0 | 0 | 0.039063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcccd3460841aa56fdd3f12e2c4b74b5af2f3969 | 1,708 | py | Python | progress.py | zzeleznick/config | f7d519004573968aa47fff850480618fd57dc0d1 | [
"MIT"
] | null | null | null | progress.py | zzeleznick/config | f7d519004573968aa47fff850480618fd57dc0d1 | [
"MIT"
] | null | null | null | progress.py | zzeleznick/config | f7d519004573968aa47fff850480618fd57dc0d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=UTF-8
import sys
from blessings import Terminal
import time
import numpy as np
import argparse
# import subprocess;
# subprocess.call(["printf", "\033c"]);
def process_input():
out = None
args = sys.argv[1:]
if args:
out = args[0]
return out
def main():
sys.stderr.write("\x1b[2J\x1b[H")
term = Terminal()
task = process_input()
if not task: task = 'Important stuff'
print term.bold(task)
h = 1
i = 0
limit = 10**5
vals = np.random.random(limit) * 10
prompt = "Progress:"
sep = '|'
units = 'bits'
padding = ' ' * max(3, int(term.width * 0.025))
rhs = len(str(limit)) * 2 + len(sep) + len(units)
lhs = len(prompt) + 5
maxbarsize = max(0, int(term.width * .9) - rhs - lhs - len(padding))
def makeLine(idx = None):
if not idx: idx = i
n = ('%0.0f' % (float(idx)/limit * 100)).zfill(2)
pct = '(%s%s)' % (n, '%')
right = ('%d %s %d %s' % (idx, sep, limit, units)).rjust(rhs)
bar = '=' * int(float(idx) / limit * maxbarsize)
space = ' ' * (maxbarsize - len(bar))
mid = '%s[%s>%s]%s' % (' ', bar, space, padding)
return ' '.join([prompt, pct, mid, right])
print makeLine()
last = time.time()
while i < limit:
if time.time() - last > 0.3:
time.sleep(.05)
print term.move(h, 0) + makeLine()
last = time.time()
if i < len(vals):
time.sleep(vals[i]/10**5)
i += vals[i]
else:
i += np.random.randint(10)
print term.move(h, 0) + makeLine(limit)
print term.bold('Success!')
if __name__ == '__main__':
main() | 25.878788 | 72 | 0.522834 | 234 | 1,708 | 3.773504 | 0.401709 | 0.04077 | 0.029445 | 0.0453 | 0.052095 | 0.052095 | 0 | 0 | 0 | 0 | 0 | 0.035146 | 0.300351 | 1,708 | 66 | 73 | 25.878788 | 0.703766 | 0.052693 | 0 | 0.037736 | 0 | 0 | 0.060062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.113208 | null | null | 0.09434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dccfb668a1df9c3316a2a14e3756a083e3ab3839 | 3,734 | py | Python | google/ads/google_ads/v4/proto/services/campaign_bid_modifier_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/services/campaign_bid_modifier_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/services/campaign_bid_modifier_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v4.proto.resources import campaign_bid_modifier_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2
from google.ads.google_ads.v4.proto.services import campaign_bid_modifier_service_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2
class CampaignBidModifierServiceStub(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignBidModifier = channel.unary_unary(
'/google.ads.googleads.v4.services.CampaignBidModifierService/GetCampaignBidModifier',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.FromString,
)
self.MutateCampaignBidModifiers = channel.unary_unary(
'/google.ads.googleads.v4.services.CampaignBidModifierService/MutateCampaignBidModifiers',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.FromString,
)
class CampaignBidModifierServiceServicer(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def GetCampaignBidModifier(self, request, context):
"""Returns the requested campaign bid modifier in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignBidModifiers(self, request, context):
"""Creates, updates, or removes campaign bid modifiers.
Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignBidModifierServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignBidModifier': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignBidModifier,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.SerializeToString,
),
'MutateCampaignBidModifiers': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignBidModifiers,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v4.services.CampaignBidModifierService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 53.342857 | 186 | 0.831012 | 414 | 3,734 | 6.905797 | 0.210145 | 0.069255 | 0.099685 | 0.052466 | 0.693949 | 0.684855 | 0.665967 | 0.619098 | 0.619098 | 0.56943 | 0 | 0.00814 | 0.111676 | 3,734 | 69 | 187 | 54.115942 | 0.853784 | 0.124799 | 0 | 0.2 | 1 | 0 | 0.114953 | 0.086293 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.075 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcd1979ad4bab59c6389ed6cdcd41efd03611f23 | 8,840 | py | Python | hg_agent_forwarder/forwarder.py | hostedgraphite/hg-agent-forwarder | b5bc9a69f1cbd49ba925dd33fa4f28617f1e33dc | [
"MIT"
] | null | null | null | hg_agent_forwarder/forwarder.py | hostedgraphite/hg-agent-forwarder | b5bc9a69f1cbd49ba925dd33fa4f28617f1e33dc | [
"MIT"
] | null | null | null | hg_agent_forwarder/forwarder.py | hostedgraphite/hg-agent-forwarder | b5bc9a69f1cbd49ba925dd33fa4f28617f1e33dc | [
"MIT"
] | 2 | 2018-07-17T15:11:38.000Z | 2018-08-19T12:16:55.000Z | import threading
import Queue
import os
import logging
import json
import time
import random
import requests
import multitail2
import errno
import tempfile
from requests.auth import HTTPBasicAuth
from utils import Datapoint
class MetricForwarder(threading.Thread):
'''
Simple metric data forwarder.
Forwards data over http, has a simple exponential
backoff in case of connectivity issues.
'''
def __init__(self, config, shutdown_e, *args, **kwargs):
super(MetricForwarder, self).__init__(*args, **kwargs)
self.config = config
self.name = "Metric Forwarder"
# Because multitail2 blocks on read, if there is no data being written
# to spool, we can end up with the spool reader blocking (and thus not
# noticing "shutdown"). However, it's fine for this thread to exit with
# the interpreter.
self.daemon = True
self.url = config.get('endpoint_url',
'https://agentapi.hostedgraphite.com/api/v1/sink')
self.api_key = self.config.get('api_key')
self.progress = self.load_progress_file()
self.shutdown_e = shutdown_e
self.spool_reader = SpoolReader('/var/opt/hg-agent/spool/*.spool.*',
progresses=self.progress,
shutdown=self.shutdown_e)
self.progress_writer = ProgressWriter(self.config,
self.spool_reader,
self.shutdown_e)
self.progress_writer.start()
self.retry_interval = random.randrange(200, 400)
self.request_session = requests.Session()
self.request_session.auth = HTTPBasicAuth(self.api_key, '')
self.request_timeout = config.get('request_timeout', 10)
self.batch = ""
self.batch_size = 0
self.batch_time = time.time()
self.batch_timeout = config.get('batch_timeout', 0.5)
self.max_batch_size = config.get('max_batch_size', 250)
def run(self):
while not self.shutdown_e.is_set():
try:
for line in self.spool_reader.read():
if self.shutdown_e.is_set():
break
datapoint = Datapoint(line, self.api_key)
if datapoint.validate():
self.extend_batch(datapoint)
else:
logging.error("Invalid line in spool.")
# invalid somehow, pass
continue
if self.should_send_batch():
self.forward()
except Exception as e:
continue
def extend_batch(self, data):
'''
Add a metric to the current metric batch.
'''
if not self.batch_time:
self.batch_time = time.time()
try:
metric = data.metric
value = data.value
ts = data.timestamp
except AttributeError:
# somehow, this dp is invalid, pass it by.
pass
else:
metric_str = "%s %s %s" % (metric, value, ts)
if self.batch_size == 0:
self.batch = metric_str
else:
self.batch = "%s\n%s" % (self.batch, metric_str)
self.batch_size += 1
def should_send_batch(self):
'''
Check to see if we should send the
current batch.
True if timeout is > 10 or batch
size is reached.
'''
now = time.time()
if (now - self.batch_time) > self.batch_timeout and self.batch_size != 0:
return True
elif self.batch_size > self.max_batch_size:
return True
return False
def forward(self):
not_processed = True
backoff = 0
while not_processed and not self.shutdown_e.is_set():
if self.send_data():
not_processed = False
else:
# Back off exponentially up to 6 times before levelling
# out. E.g. for a retry_interval of 300, that'll result
# in retries at 300, 600, 1200, 2400, 4800, 9600, 9600, ...
interval = (2**backoff) * self.retry_interval
if backoff < 5:
backoff += 1
logging.error('Metric sending failed, retry in %s ms',
interval)
time.sleep(interval / 1000.0)
def send_data(self):
try:
req = self.request_session.post(self.url,
data=self.batch,
stream=False,
timeout=self.request_timeout)
if req.status_code == 429:
logging.info("Metric forwarding limits hit \
please contact support.")
# Ensure exception info is logged for HTTP errors.
req.raise_for_status()
except Exception as e:
logging.error("Metric forwarding exception: %s", e)
return False
else:
# reset batch info now that send has succeeded.
self.batch = ""
self.batch_size = 0
self.batch_time = time.time()
return True
def shutdown(self):
'''Shut down this forwarder.
Deals with the forwarder's progress thread: we want to be certain that
the progress thread has a chance to finish what it's doing if it is
mid-write, so we wait on it. As the forwarder itself is a daemon thread
(which *may* block reading spools via multitail2), it will exit once
everything else is done anyway.
NB: called from outside the forwarder's thread of execution.
'''
while self.progress_writer.is_alive():
self.progress_writer.join(timeout=0.1)
time.sleep(0.1)
def load_progress_file(self):
progress_cfg = self.config.get('progress', {})
progress = {}
try:
progressfile = progress_cfg.get('path',
'/var/opt/hg-agent/spool/progress')
if progressfile is not None:
progress = json.load(file(progressfile))
except (ValueError, IOError, OSError) as e:
logging.error(
'Error loading progress file on startup; '
'spool files will be read from end: %s', e
)
return progress
class SpoolReader(object):
'''
Tails files matching a glob. yields lines from them.
'''
def __init__(self, spoolglob, progresses=None, shutdown=None):
self.progresses = progresses or {}
self.shutdown_e = shutdown
self.data_reader = multitail2.MultiTail(spoolglob,
skip_to_end=False,
offsets=progresses)
def read(self):
for (filename, byteoffset), line in self.data_reader:
if self.shutdown_e.is_set():
break
line_byte_len = len(bytes(line))
# + 1 for newline '\n'
self.progresses[filename] = byteoffset + line_byte_len + 1
try:
if len(line) > 1:
yield line
except ValueError:
logging.error('Could not parse line: %s', line)
continue
class ProgressWriter(threading.Thread):
'''
'''
def __init__(self, config, spool_reader, shutdown_e, *args, **kwargs):
super(ProgressWriter, self).__init__(*args, **kwargs)
self.shutdown_e = shutdown_e
self._config = config
self.interval = self._config.get('interval', 10)
self.spool_reader = spool_reader
self.final_path = '/var/opt/hg-agent/spool/'
def run(self):
while not self.shutdown_e.is_set():
try:
self.atomicwrite()
except Exception as e:
logging.error("Unhandled exception while writing progress: %s",
e)
time.sleep(self.interval)
def atomicwrite(self):
try:
content = json.dumps(self.spool_reader.progresses)
except:
content = {}
try:
os.makedirs('/var/opt/hg-agent/spool/', 0755)
except OSError as err:
if err.errno != errno.EEXIST:
raise
fd, temp_path = tempfile.mkstemp(dir='/var/opt/hg-agent/spool/')
with os.fdopen(fd, 'w') as fh:
fh.write(content)
os.chmod(temp_path, 0644)
os.rename(temp_path, "%s/%s" % (self.final_path, "progress"))
| 36.378601 | 81 | 0.541176 | 989 | 8,840 | 4.712841 | 0.28817 | 0.036687 | 0.027891 | 0.013946 | 0.145033 | 0.102553 | 0.046342 | 0.035615 | 0.035615 | 0.035615 | 0 | 0.01513 | 0.371946 | 8,840 | 242 | 82 | 36.528926 | 0.824388 | 0.064367 | 0 | 0.230337 | 0 | 0 | 0.074185 | 0.018614 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005618 | 0.073034 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcd91413a4ee5c2dc186ccc8e249ed33ec01e5cc | 4,145 | py | Python | datawin-parse.py | BlackLotus/ogme | c163d51d495e61b96d8123cbb0efe8eae0e4fb66 | [
"MIT"
] | null | null | null | datawin-parse.py | BlackLotus/ogme | c163d51d495e61b96d8123cbb0efe8eae0e4fb66 | [
"MIT"
] | null | null | null | datawin-parse.py | BlackLotus/ogme | c163d51d495e61b96d8123cbb0efe8eae0e4fb66 | [
"MIT"
] | null | null | null | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import os, sys
import binascii
import struct
import StringIO
# chunk names are 4 bytes long
# not quite sure how long the header is but I guess 8 bytes for now :>
# actually it's not really a header but the first chunk
# 4 bytes for the name (FORM) and 4 bytes for the length (filesize-8)
# so this is not needed anymore
# dwheader=binascii.unhexlify("464F524DE6EF5A03")
datawin = open("data.win", "r+")
# datawin.seek(0,2)
# dwsize=datawin.tell()
# datawin.seek(0,0)
# header=datawin.read(8)
chunks = {}
# if header==dwheader:
# print "Doogie doogie Dodger"
# else:
# print "Wrong magic bytes"
# quit()
def read_chunk(data):
# data has to be a StringIO/file... not!
if type(data) != type("foo"):
print "Somehow the data is not a string"
quit()
else:
dsize = len(binascii.hexlify(data)) / 2
data = StringIO.StringIO(data)
data.seek(0, 0)
chunkname = data.read(4)
if chunkname.isupper():
print "Reading " + chunkname
else:
print "Reading " + binascii.hexlify(chunkname)
chunksize = data.read(4)
if len(chunksize) != 4:
data.seek(0, 0)
return [data.read()]
# TODO: _THIS_ is stupid! ... let's correct this with a nice struct or somethin
# later...
foo = binascii.hexlify(chunksize)
chunksize = foo[-2:] + foo[4:6] + foo[2:4] + foo[:2]
chunksize = int(chunksize, 16)
if chunksize + 8 == dsize:
chunk = data.read(chunksize)
return [chunkname, chunk]
elif chunksize + 8 > dsize:
data.seek(0, 0)
return [data.read()]
else:
chunk = data.read(chunksize)
rest = data.read()
if len(rest) == 0:
print "Something went terrible, terrible WRONG :( WTF IS HAPPENING?????"
return [chunkname, chunk]
else:
return [chunkname, chunk, rest]
def extract_chunks(data):
if type(data) == type("Foo"):
realdatasize = len(data)
data = StringIO.StringIO(data)
else:
realdatasize = len(data.read())
data.seek(0, 2)
datasize = data.tell()
if datasize != realdatasize:
print "OK WHY ISN'T THIS WORKING??"
quit()
data.seek(0, 0)
while data.tell() != datasize:
chunk = read_chunk(data.read())
if len(chunk) == 1:
return chunk[0]
elif len(chunk) == 2:
print "One big chunk you chump"
if type(chunk[1]) == type("foo"):
return {chunk[0]: extract_chunks(chunk[1])}
else:
print "OMG LIKE THAT'S TOTALLY LIKE A DICTIONARY THAT SOMEHOW GOT LIKE RETURNED! WHAT THE EFF"
return {chunk[0]: chunk[1]}
elif len(chunk) == 3:
if type(chunk[1]) == type("foo"):
newchunk = {chunk[0]: extract_chunks(chunk[1])}
else:
newchunk = {chunk[0]: chunk[1]}
if type(chunk[2]) == type("foo"):
newdict = extract_chunks(chunk[2])
if type(newdict) == type({}):
newchunk.update(newdict)
else:
print "Ok this is a defect... this shouldn't happen.I mean _never_. It means I split a chunk that wasn't a chunk.This is a fail.... correct it... somehow"
newchunk.update({"DEFECT": newdict})
else:
newchunk.update(chunk[2])
return newchunk
final_dict = extract_chunks(datawin)
# print type(final_dict)
print len(final_dict["FORM"]["STRG"])
print type(final_dict["FORM"]["STRG"])
# for foo in final_dict["FORM"]:
# print foo
# while datawin.tell()!=dsize:
# extract chunk from data
# übergib chunk to while like this
# chunk=read_chunk(datawin)
#
# chunks[chunk[0]]=StringIO.StringIO(chunk[1])
#
# chunk=read_chunk(chunks["FORM"])
#
#
# if check_chunk(StringIO.StringIO(chunk[1]))==0:
# chunk=read_chunk(StringIO.StringIO(chunk[1]))
# while dwsize!=datawin.tell():
# chunk=read_chunk()
# chunks[chunk[0]]=chunk[1]
# check if chunk
# add chunk to dictionary
# check if chunk
# add to dictionary
| 30.255474 | 174 | 0.587696 | 550 | 4,145 | 4.390909 | 0.269091 | 0.027329 | 0.012422 | 0.016563 | 0.096066 | 0.059627 | 0.043892 | 0 | 0 | 0 | 0 | 0.02369 | 0.27696 | 4,145 | 136 | 175 | 30.477941 | 0.782115 | 0.277443 | 0 | 0.308642 | 0 | 0.012346 | 0.14934 | 0 | 0 | 0 | 0 | 0.007353 | 0 | 0 | null | null | 0 | 0.049383 | null | null | 0.123457 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dcdb3ea3ab1893474827b46227655af67fd0f6e0 | 1,701 | py | Python | arrhenuis/termodyn.py | xtotdam/leipzig-report | fe4146f9201a8453683ef1221fc768f135677153 | [
"MIT"
] | null | null | null | arrhenuis/termodyn.py | xtotdam/leipzig-report | fe4146f9201a8453683ef1221fc768f135677153 | [
"MIT"
] | null | null | null | arrhenuis/termodyn.py | xtotdam/leipzig-report | fe4146f9201a8453683ef1221fc768f135677153 | [
"MIT"
] | null | null | null | from arrhenius import stringify
"""
This script generates cool table with termodynamic parameters, taken from 'termodyn-acid.data.txt' and 'termodyn-anion.data.txt'
"""
head = '''
\\begin{center}
\\begin{tabular}{ x{1cm} x{2cm} x{2cm} x{2cm} x{2cm} x{2cm} } \\hline
Acid & E$_A$ \\newline kJ/mol & A \\newline M$^{-1}$s$^{-1}$ & \
$\\Delta \\text{S} \\ddag$ \\newline J / mol ${\\cdot}$ K & $\\Delta \\text{H} \\ddag$ \\newline kJ / mol & $\\Delta \\text{G} \\ddag$ \\newline kJ / mol \\\\ \\hline
'''
feet = '''
\\hline
\\end{tabular}
\\end{center}
'''
acidstrings, anionstrings = list(), list()
data = open('termodyn-acid.data.txt').readlines()
for line in data:
acro = line.split()[0]
ea, a, s, h, g = map(float, line.split()[1:])
string = '{} & ${:.1f}$ & ${:.2e}$ & ${:.1f}$ & ${:.1f}$ & ${:.1f}$ \\\\'.format(acro, ea, a, s, h, g)
acidstrings.append(string)
data = open('termodyn-anion.data.txt').readlines()
for line in data:
acro = line.split()[0]
ea, a, s, h, g = map(float, line.split()[1:])
string = '{} & ${:.1f}$ & ${:.2e}$ & ${:.1f}$ & ${:.1f}$ & ${:.1f}$ \\\\'.format(acro, ea, a, s, h, g)
anionstrings.append(string)
latex = 'Reactions of OH radicals with halogenated acids \n\n' + \
head + ' \n '.join(acidstrings) + '\n' + feet + '\n\n' + \
'Reactions of OH radicals with haloacetate and halopropiate anions \n\n' + \
head + ' \n '.join(anionstrings) + '\n' + feet
for i in range(5, 16):
latex = latex.replace('e+{:02d}'.format(i), ' \\cdot 10^{{{0}}} '.format(i))
latex = latex.replace('$nan$', '---')
with open('termodyn.tex', 'w') as f:
f.write(latex)
| 36.191489 | 167 | 0.542034 | 236 | 1,701 | 3.902542 | 0.377119 | 0.021716 | 0.021716 | 0.034745 | 0.314875 | 0.236699 | 0.236699 | 0.236699 | 0.214984 | 0.214984 | 0 | 0.022288 | 0.208701 | 1,701 | 46 | 168 | 36.978261 | 0.661961 | 0 | 0 | 0.294118 | 1 | 0.147059 | 0.472021 | 0.029625 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.029412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.