blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f1f35cd3e6246382f9d1003ac1d2188b27d3137 | 14a913fce4b538b22f28409645cd6abe3455808f | /bigquery_storage/to_dataframe/main_test.py | 8335b437063c827cd6d43c4af690752455ae19dd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iamLoi/Python-Random-Number-Generator | 8da7dbd37cb13a01232c8ed49b9df35a99c63d73 | 7579e8b15130802aaf519979e475c6c75c403eda | refs/heads/master | 2022-08-29T19:05:32.649931 | 2019-09-14T14:48:58 | 2019-09-14T14:48:58 | 208,454,877 | 2 | 1 | Apache-2.0 | 2022-08-05T21:57:49 | 2019-09-14T14:51:05 | Python | UTF-8 | Python | false | false | 5,502 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def clients():
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_create_client]
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_storage_v1beta1
# Explicitly create a credentials object. This allows you to use the same
# credentials for both the BigQuery and BigQuery Storage clients, avoiding
# unnecessary API calls to fetch duplicate authentication tokens.
credentials, your_project_id = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Make clients.
bqclient = bigquery.Client(
credentials=credentials,
project=your_project_id,
)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
# [END bigquerystorage_pandas_tutorial_create_client]
# [END bigquerystorage_pandas_tutorial_all]
return bqclient, bqstorageclient
def test_table_to_dataframe(capsys, clients):
from google.cloud import bigquery
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_table]
# Download a table.
table = bigquery.TableReference.from_string(
"bigquery-public-data.utility_us.country_code_iso"
)
rows = bqclient.list_rows(
table,
selected_fields=[
bigquery.SchemaField("country_name", "STRING"),
bigquery.SchemaField("fips_code", "STRING"),
],
)
dataframe = rows.to_dataframe(bqstorage_client=bqstorageclient)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_table]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "country_name" in out
def test_query_to_dataframe(capsys, clients):
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_query_results]
# Download query results.
query_string = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
"""
dataframe = (
bqclient.query(query_string)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_query_results]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "stackoverflow" in out
def test_session_to_dataframe(capsys, clients):
from google.cloud import bigquery_storage_v1beta1
bqclient, bqstorageclient = clients
your_project_id = bqclient.project
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_session]
table = bigquery_storage_v1beta1.types.TableReference()
table.project_id = "bigquery-public-data"
table.dataset_id = "new_york_trees"
table.table_id = "tree_species"
# Select columns to read with read options. If no read options are
# specified, the whole table is read.
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.selected_fields.append("species_common_name")
read_options.selected_fields.append("fall_color")
parent = "projects/{}".format(your_project_id)
session = bqstorageclient.create_read_session(
table,
parent,
read_options=read_options,
# This API can also deliver data serialized in Apache Avro format.
# This example leverages Apache Arrow.
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
# We use a LIQUID strategy in this example because we only read from a
# single stream. Consider BALANCED if you're consuming multiple streams
# concurrently and want more consistent stream sizes.
sharding_strategy=(
bigquery_storage_v1beta1.enums.ShardingStrategy.LIQUID
),
)
# This example reads from only a single stream. Read from multiple streams
# to fetch data faster. Note that the session may not contain any streams
# if there are no rows to read.
stream = session.streams[0]
position = bigquery_storage_v1beta1.types.StreamPosition(stream=stream)
reader = bqstorageclient.read_rows(position)
# Parse all Avro blocks and create a dataframe. This call requires a
# session, because the session contains the schema for the row blocks.
dataframe = reader.to_dataframe(session)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_session]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "species_common_name" in out
| [
"noreply@github.com"
] | noreply@github.com |
d8147b401cc194546eaec87496a9399bd71110d4 | 972a0fe8628b4df5c92f0fde2b675e922366d65e | /word_game/ps3.py | f26fa5bb6dc2d2be386115aa0c30a0116e84da02 | [
"Giftware"
] | permissive | kaminskykasuga/Practice | daee224965ac40e217917f7a149c4a09146b2abc | 2dcdd1bb5706fe727179e534a752a1a6ab7a5d73 | refs/heads/master | 2023-02-09T08:50:39.640768 | 2020-12-25T00:45:30 | 2020-12-25T00:45:30 | 300,264,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,246 | py | # 6.0001 Problem Set 3
#
# The 6.0001 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
#
# Name : <your name>
# Collaborators : <your collaborators>
# Time spent : <total time>
import math
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,
'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print(" ", len(wordlist), "words loaded.")
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x, 0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def get_word_score(word, n):
"""
Returns the score for a word. Assumes the word is a
valid word.
You may assume that the input word is always either a string of letters,
or the empty string "". You may not assume that the string will only contain
lowercase letters, so you will have to handle uppercase and mixed case strings
appropriately.
The score for a word is the product of two components:
The first component is the sum of the points for letters in the word.
The second component is the larger of:
1, or
7*wordlen - 3*(n-wordlen), where wordlen is the length of the word
and n is the hand length when the word was played
Letters are scored as in Scrabble; A is worth 1, B is
worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
word: string
n: int >= 0
returns: int >= 0
"""
a = 0
for i in word:
if i != '*':
a += SCRABBLE_LETTER_VALUES[i.lower()]
b = 7 * len(word) - 3 * (n - len(word))
if 1 > b:
b = 1
return a * b
#
# Make sure you understand how this function works and what it does!
#
def display_hand(hand):
"""
Displays the letters currently in the hand.
For example:
display_hand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter, end=' ') # print all on the same line
print() # print an empty line
#
# Make sure you understand how this function works and what it does!
# You will need to modify this for Problem #4.
#
def deal_hand(n):
"""
Returns a random hand containing n lowercase letters.
ceil(n/3) letters in the hand should be VOWELS (note,
ceil(n/3) means the smallest integer not less than n/3).
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand = {}
num_vowels = int(math.ceil(n / 3))
for i in range(num_vowels-1):
x = random.choice(VOWELS)
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = random.choice(CONSONANTS)
hand[x] = hand.get(x, 0) + 1
hand.update([('*', 1)])
return hand
#
# Problem #2: Update a hand by removing letters
#
def update_hand(hand, word):
"""
Does NOT assume that hand contains every letter in word at least as
many times as the letter appears in word. Letters in word that don't
appear in hand should be ignored. Letters that appear in word more times
than in hand should never result in a negative count; instead, set the
count in the returned hand to 0 (or remove the letter from the
dictionary, depending on how your code is structured).
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
new_hand = hand.copy()
for i in word.lower():
if ((i.isalpha()) or (i == '*')) and (i in hand):
if new_hand[i] > 1:
new_hand[i] -= 1
else:
del new_hand[i]
return new_hand
#
# Problem #3: Test word validity
#
def is_valid_word(word, hand, word_list):
"""
Returns True if word is in the word_list and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or word_list.
word: string
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: boolean
"""
if '*' in word:
for i in VOWELS:
if word.lower().replace('*', i) in word_list:
break
if i == 'u':
return False
else:
if word.lower() not in word_list:
return False
for i in word.lower():
if (i not in hand) or (hand[i] < word.lower().count(i)):
return False
return True
#
# Problem #5: Playing a hand
#
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
a = 0
for i in hand:
a += hand[i]
return a
def play_hand(hand, word_list):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word.
* When any word is entered (valid or invalid), it uses up letters
from the hand.
* An invalid word is rejected, and a message is displayed asking
the user to choose another word.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters.
The user can also finish playing the hand by inputing two
exclamation points (the string '!!') instead of a word.
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: the total score for the hand
"""
score = 0
while hand != {}:
print('\nCurrent hand: ', end='')
display_hand(hand)
word = input('Enter word, or “!!” to indicate that you are finished: ')
if word == '!!':
break
elif is_valid_word(word, hand, word_list):
score += get_word_score(word, HAND_SIZE)
print('"' + word + '" earned', get_word_score(word, HAND_SIZE), 'points. Total:', score, 'points')
else:
print('This is not a valid word.', end='')
if update_hand(hand, word) != {}:
print(' Please choose another word.')
else:
print()
hand = update_hand(hand, word)
if hand == {}:
print('Ran out of letters')
print('Total score for this hand:', score, 'points')
return score
# Keep track of the total score
# As long as there are still letters left in the hand:
# Display the hand
# Ask user for input
# If the input is two exclamation points:
# End the game (break out of the loop)
# Otherwise (the input is not two exclamation points):
# If the word is valid:
# Tell the user how many points the word earned,
# and the updated total score
# Otherwise (the word is not valid):
# Reject invalid word (print a message)
# update the user's hand by removing the letters of their inputted word
# Game is over (user entered '!!' or ran out of letters),
# so tell user the total score
# Return the total score as result of function
#
# Problem #6: Playing a game
#
#
# procedure you will use to substitute a letter in a hand
#
def substitute_hand(hand, letter):
"""
Allow the user to replace all copies of one letter in the hand (chosen by user)
with a new letter chosen from the VOWELS and CONSONANTS at random. The new letter
should be different from user's choice, and should not be any of the letters
already in the hand.
If user provide a letter not in the hand, the hand should be the same.
Has no side effects: does not mutate hand.
For example:
substitute_hand({'h':1, 'e':1, 'l':2, 'o':1}, 'l')
might return:
{'h':1, 'e':1, 'o':1, 'x':2} -> if the new letter is 'x'
The new letter should not be 'h', 'e', 'l', or 'o' since those letters were
already in the hand.
hand: dictionary (string -> int)
letter: string
returns: dictionary (string -> int)
"""
letters = CONSONANTS + VOWELS
for i in hand:
letters = letters.replace(i, '')
new_letter = random.choice(letters)
new_hand = {}
for i in hand:
if i != letter:
new_hand[i] = hand[i]
else:
new_hand[new_letter] = hand[i]
return new_hand
def play_game(word_list):
"""
Allow the user to play a series of hands
* Asks the user to input a total number of hands
* Accumulates the score for each hand into a total score for the
entire series
* For each hand, before playing, ask the user if they want to substitute
one letter for another. If the user inputs 'yes', prompt them for their
desired letter. This can only be done once during the game. Once the
substitute option is used, the user should not be asked if they want to
substitute letters in the future.
* For each hand, ask the user if they would like to replay the hand.
If the user inputs 'yes', they will replay the hand and keep
the better of the two scores for that hand. This can only be done once
during the game. Once the replay option is used, the user should not
be asked if they want to replay future hands. Replaying the hand does
not count as one of the total number of hands the user initially
wanted to play.
* Note: if you replay a hand, you do not get the option to substitute
a letter - you must play whatever hand you just had.
* Returns the total score for the series of hands
word_list: list of lowercase strings
"""
total = 0
replay = False
substitute = False
while True:
try:
number_of_hands = int(input('Enter total number of hands: '))
assert number_of_hands > 0
break
except:
print('This is not a valid number. Please try again')
for i in range(number_of_hands):
hand = deal_hand(HAND_SIZE)
if substitute is False:
print('Current hand: ', end='')
display_hand(hand)
while True:
answer = input('Would you like to substitute a letter? ')
if (answer.lower() != 'yes') and (answer.lower() != 'no'):
print('This is not a valid answer. Please answer "yes" or "no"')
else:
break
if answer.lower() == 'yes':
hand = substitute_hand(hand, input('Which letter would you like to replace: ').lower())
substitute = True
score = play_hand(hand, word_list)
print('--------')
if replay is False:
while True:
answer = input('Would you like to replay the hand? ')
if (answer.lower() != 'yes') and (answer.lower() != 'no'):
print('This is not a valid answer. Please answer "yes" or "no"')
else:
break
if answer.lower() == 'yes':
new_score = play_hand(hand, word_list)
replay = True
if new_score > score:
score = new_score
print('--------')
total += score
print('Total score for this hand:', total)
#
# Build data structures used for entire session and play game
# Do not remove the "if __name__ == '__main__':" line - this code is executed
# when the program is run directly, instead of through an import statement
#
if __name__ == '__main__':
word_list = load_words()
play_game(word_list) | [
"nelsireni@gmail.com"
] | nelsireni@gmail.com |
f691f0aaea24767b63e9cf391127da7194c7efe7 | 9cb61215c23ac8727d840fc723edc4f64133becf | /ex13.py | 61854a2eb61aaaed563193be3f50faa10a1331d7 | [] | no_license | dkuldeep11/learnpythonhardway | d35bb67ec5116f03eb365345f1f7e3317d665040 | 9063226e7a39a00cb2cf3a3b38fee046b7a5efe5 | refs/heads/master | 2021-01-23T00:07:26.743403 | 2014-07-13T23:24:47 | 2014-07-13T23:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from sys import argv
#a1, a2, a3 = argv
a1 = argv
#print a1, a2, a3
print a1
| [
"dholek@usxxzeldim1m1.corp.emc.com"
] | dholek@usxxzeldim1m1.corp.emc.com |
7732740d9942842ec90fefc669ae808d4df1cc43 | e850b0799c80238cee7525086045aabc95bb9a5e | /CRUD/crudoperation/migrations/0002_auto_20201029_0851.py | 15222e8778d000367178ebc868586aea3a7f14f7 | [] | no_license | anushapanta/Django-CRUD-with-forms | 3c0642074496010e1b88ae219e736c1c5c3f764e | 067928e9b5b4a1c5f3a070520a460432d6319d15 | refs/heads/master | 2023-01-08T10:50:38.800394 | 2020-10-29T03:12:27 | 2020-10-29T03:12:27 | 307,575,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # Generated by Django 3.0.6 on 2020-10-29 03:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crudoperation', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Student',
new_name='StudentDetails',
),
]
| [
"anusha474232@gmail.com"
] | anusha474232@gmail.com |
1df461cffc8c702acf4cbc9c3e8fb0a00e1bdaa8 | eafde321dbe5f4dd30c87ddaad2431523a69adc3 | /todo/migrations/0001_initial.py | a7788b043c2308985fc16fd0fe041c3ea65b87a6 | [] | no_license | nandibula03/TODO_Project | e1acac35e3be3bd0b30444c937947e6f47414e0e | f72afedc224d0d2e0016e7930185ae4584de650d | refs/heads/main | 2023-01-22T20:34:03.098574 | 2020-11-22T13:13:12 | 2020-11-22T13:13:12 | 315,040,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Generated by Django 3.1.2 on 2020-11-20 12:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('memo', models.TextField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('datecompleted', models.DateTimeField(null=True)),
('important', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"bulanandi2015@gmail.com"
] | bulanandi2015@gmail.com |
e5d3b218fb8e020f6f1d02c888dba5e26b8db438 | 0b6fbd1b8982d2bd50b0cbe36ec2ca5fe410ef0f | /mysite/chat/routing.py | bb83c83861e3f60389b29c26089a67ff15e8f7ae | [] | no_license | sohamnavadiya/django-channel-example | 17908419256ad6df4d166b17197964b550a89f44 | 8044d294402d9232e70ec99b0ef15993d16510d7 | refs/heads/master | 2020-03-19T04:49:02.093856 | 2018-06-03T10:02:08 | 2018-06-03T10:02:08 | 135,870,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from django.conf.urls import url
from django.urls import path
from . import consumers
websocket_urlpatterns = [
url(r'^ws/chat/(?P<room_name>[^/]+)/$', consumers.ChatConsumer),
] | [
"som.insomniacs@gmail.com"
] | som.insomniacs@gmail.com |
54dead29b1debe23d37ef787e60717e6ac88e662 | d0ca36093b690328d199be84f23c660f0b9eabf3 | /platformio/stima_v4/test/nucleo_uavcan/uavcan-slave-th/nnvg_compile.py | a4b2f11d5ecc653c0d792aefe45bd14ae43398dc | [] | no_license | r-map/rmap | 9bb001b7680463d9d6a1dfefb554453f722fbcf2 | 88e3135ef981a418bb3c7ab652bfe381b6361e05 | refs/heads/master | 2023-09-04T00:00:01.294693 | 2023-09-01T18:14:25 | 2023-09-01T18:14:25 | 39,561,369 | 57 | 61 | null | 2023-06-07T09:45:06 | 2015-07-23T10:34:48 | C | UTF-8 | Python | false | false | 1,143 | py | #Import("env")
import subprocess
print("execute","nnvg --target-language c -O include --target-endianness little --generate-support only")
subprocess.check_output(["nnvg", "--target-language", "c", "-O", "include", "--target-endianness", "little", "--generate-support", "only"])
print("execute","nnvg -I data_types/uavcan -I data_types/rmap --allow-unregulated-fixed-port-id --target-language c -O include data_types/unreg --target-endianness little --generate-support never")
subprocess.check_output(["nnvg","-I","data_types/uavcan", "-I", "data_types/uavcan", "--target-language", "c", "-O", "include","data_types/uavcan", "--target-endianness", "little", "--generate-support", "never"])
print("execute","nnvg -I data_types/uavcan -I data_types/rmap --allow-unregulated-fixed-port-id --target-language c -O include data_types/unreg --target-endianness little --generate-support never")
subprocess.check_output(["nnvg","-I","data_types/uavcan", "-I", "data_types/rmap", "--allow-unregulated-fixed-port-id", "--target-language", "c", "-O", "include","data_types/rmap", "--target-endianness", "little", "--generate-support", "never"]) | [
"m.gasperini@digiteco.it"
] | m.gasperini@digiteco.it |
46658515c8bdc9f7b818c3afffe82a2d8af6297c | 88a7bf3fc30d4b9c309a82729cd60acc63d13721 | /matriz_cont.py | be33c4cd78b2ffb04e0dcc03453c0dd5cb44d66c | [] | no_license | AbidelLux/matrAbdi | 4cbd2964e97ade283e15e5da6b8b4fca4ba3688b | 3e5024bd1005e842ad632535f2fb13e0dd14e6df | refs/heads/master | 2023-06-04T12:35:56.769199 | 2021-06-24T04:40:48 | 2021-06-24T04:40:48 | 379,804,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,904 | py | import os
class matriz_cont():
def __init__(self)-> None:
self.head = None
def filaNoExiste_ColNoExites(self,posx,posy,listaCabeza,nuevoNodo):
listax = listaCabeza.cabX
while listax is not None:
if(listax.fil_x == posx):
break
listax=listax.siguiente
listay = listaCabeza.cabY
while listay is not None:
if(listay.fil_y == posy):
break
listay = listay.abajo
self.insertar_en_columna_final(listax, nuevoNodo)
self.insertar_en_fila_final(listay, nuevoNodo)
def Insertar(self, posx, posy, listaCabeza, nuevoNodo):
listax = listaCabeza.cabX
while listax is not None:
if (listax.fil_x == posx):
break
listax = listax.siguiente
listay = listaCabeza.cabY
while listay is not None:
if (listay.fil_y == posy):
break
listay = listay.abajo
self.validar_insertar_columna(listax,nuevoNodo,posy)
self.validar_insertar_fila(listay, nuevoNodo, posx)
#validar inserciones y dejar fila en 0,0 1,0 2,0 3,0 4,0
def validar_insertar_fila(self, node, nuevoNodo, nuevoX):
aux = node
while node is not None:
sig = node.siguiente
ant = node.anterior
if sig is None:
if ant is None:
self.insertar_en_fila_final(aux, nuevoNodo)
break
if ant is None:
if sig is not None:
auxMin = min(node.fil_x, nuevoX, sig.fil_x)
auxMax = max(node.fil_x, nuevoX, sig.fil_x)
if auxMax == sig.fil_x and auxMin == node.fil_x:
self.insertar_en_fila_antes(aux, sig.fil_x, nuevoNodo)
break
if sig is None:
if ant is not None:
auxMin = min(node.fil_x, nuevoX, ant.fil_x)
auxMax = max(node.fil_x, nuevoX, ant.fil_x)
if auxMax == nuevoX and auxMin == ant.fil_x:
self.insertar_en_fila_final(aux, nuevoNodo)
break
if sig is not None:
if ant is not None:
auxMin = min(node.fil_x, nuevoX, sig.fil_x)
auxMax = max(node.fil_x, nuevoX, sig.fil_x)
if auxMax == sig.fil_x and auxMin == node.fil_x:
self.insertar_en_fila_antes(aux, sig.fil_x, nuevoNodo)
break
node = node.siguiente
#validar inserciones por columna y dejar en (0,0) (0,1) (0,2) (0,3) (0,4)
def validar_insertar_columna(self, node, nuevoNodo, nuevoy):
aux = node
while node is not None:
sig = node.abajo
ant = node.arriba
if sig is None:
if ant is None:
self.insertar_en_columna_final(aux, nuevoNodo)
break
if ant is None:
if sig is not None:
auxMin = min(node.fil_y, nuevoy, sig.fil_y)
auxMax = max(node.fil_y, nuevoy, sig.fil_y)
if auxMax == sig.fil_y and auxMin == node.fil_y:
self.insertar_en_columna_antes(aux, sig.fil_y, nuevoNodo)
break
if sig is None:
if ant is not None:
auxMin = min(node.fil_y, nuevoy, ant.fil_y)
auxMax = max(node.fil_y, nuevoy, ant.fil_y)
if auxMax == nuevoy and auxMin == ant.fil_y:
self.insertar_en_columna_final(aux, nuevoNodo)
break
if sig is not None:
if ant is not None:
auxMin = min(node.fil_y, nuevoy, sig.fil_y)
auxMax = max(node.fil_y, nuevoy, sig.fil_y)
if auxMax == sig.fil_y and auxMin == node.fil_y:
self.insertar_en_columna_antes(aux, sig.fil_y, nuevoNodo)
break
node = node.abajo
#************************************************************************************
#mostrar pos filas y columnas
def mostrar_columna(self, posx, listaCabeza):
listax = listaCabeza.cabX
while listax is not None:
#if (listax.fil_x == posx):
#break
aux = listax
while aux is not None:
print(aux.fil_x, aux.fil_y, aux.cont, "->", end="")
aux = aux.abajo
print("")
listax = listax.siguiente
def mostrar_fila(self, posy, listaCabeza):
listay = listaCabeza.cabY
while listay is not None:
#if (listay.fil_y == posy):
#break
aux=listay
while aux is not None:
print(aux.fil_x, aux.fil_y, aux.cont, "->", end="")
aux = aux.siguiente
print("")
listay = listay.abajo
#while listay is not None:
# print(listay.fil_x,listay.fil_y,listay.cont,"->",end="")
# listay =listay.siguiente
#print("")
#********************Inserciones******************************
def insertar_en_fila_final(self, node, nuevo_nodo_cont):
while node.siguiente is not None:
node = node.siguiente
node.siguiente = nuevo_nodo_cont
nuevo_nodo_cont.anterior = node
print("anterior<-[]->Null")
def insertar_en_fila_antes(self, node, x, nuevo_nodo_cont):
while node is not None:
if node.fil_x == x:
break
node = node.siguiente
nuevo_nodo_cont.siguiente = node
nuevo_nodo_cont.anterior = node.anterior
if node.anterior is not None:
node.anterior.siguiente = nuevo_nodo_cont
node.anterior = nuevo_nodo_cont
print('caby<->[]<->[]<->[]->Null')
def insertar_en_columna_final(self, node, nuevo_nodo_cont):
while node.abajo is not None:
node = node.abajo
node.abajo = nuevo_nodo_cont
nuevo_nodo_cont.arriba = node
print("cabx")
print(" | ")
print(" [] ")
print(" | ")
print("Null")
def insertar_en_columna_antes(self, node, y, nuevo_nodo_cont):
while node is not None:
if node.fil_y == y:
break
node = node.abajo
nuevo_nodo_cont.abajo = node
nuevo_nodo_cont.arriba = node.arriba
if node.arriba is not None:
node.arriba.abajo = nuevo_nodo_cont
node.arriba = nuevo_nodo_cont
print("cabx")
print(" | ")
print(" [] ")
print(" | ")
print(" [] ")
print(" | ")
print(" Null")
def report_graphiz(self,listaCabeza):
Graph = "digraph L {" + "\n"
Graph = Graph +"node [shape=box, color=cornflowerblue ];"+"\n"
listax = listaCabeza.cabX
listay = listaCabeza.cabY
while listay is not None:
if listay.abajo is None:
break
Graph = Graph + '\"' + "(" + str(listay.fil_x) + "," + str(listay.fil_y) + ")" + "=" + listay.cont + '\"' + ";\n"
Graph = Graph + '\"' + "(" + str(listay.fil_x) + "," + str(listay.fil_y) + ")" + "=" + listay.cont + '\"' + "->" + '\"' + "(" + str(listay.abajo.fil_x) + "," + str(listay.abajo.fil_y) + ")" + "=" + listay.abajo.cont + '\"' + ";\n"
listay = listay.abajo
while listax is not None:
aux = listax
while aux is not None:
if aux.abajo is not None:
Graph = Graph +'\"'+"("+str(aux.fil_x)+","+str(aux.fil_y)+")"+"="+aux.cont+'\"'+";\n"
Graph = Graph + '\"'+"(" +str(aux.fil_x)+ "," +str(aux.fil_y)+ ")" + "=" + aux.cont + '\"' + "->" +'\"' + "(" +str(aux.abajo.fil_x)+","+str(aux.abajo.fil_y)+ ")" + "=" + aux.abajo.cont + '\"'+ ";\n"
aux = aux.abajo
listax = listax.siguiente
listarankSameCaby = listaCabeza.cabY
while listarankSameCaby is not None:
if listarankSameCaby.siguiente is not None:
Graph = Graph + "rank=same{\n"
Graph = Graph + '\"' + "(" + str(listarankSameCaby.fil_x) + "," + str(listarankSameCaby.fil_y) + ")" + "=" + listarankSameCaby.cont + '\"' + "->" + '\"' + "(" + str(listarankSameCaby.siguiente.fil_x) + "," + str(listarankSameCaby.siguiente.fil_y) + ")" + "=" + listarankSameCaby.siguiente.cont + '\"' + ";\n"
Graph = Graph + "} \n"
#Graph = Graph + '\"' + "(" + str(listaCabeza.cabY.fil_x) + "," + str(listaCabeza.cabY.fil_y) + ")" + "=" + listaCabeza.cabY.cont + '\"' + "->" + '\"' + "(" + str(listaCabeza.cabY.siguiente.fil_x) + "," + str(listaCabeza.cabY.siguiente.fil_y) + ")" + "=" + listaCabeza.cabY.siguiente.cont + '\"' + ";\n"
listarankSameCaby= listarankSameCaby.abajo
listarankSameCab0 = listaCabeza.cabX
while listarankSameCab0 is not None:
auxRankSame = listarankSameCab0
while auxRankSame is not None:
if auxRankSame.siguiente is not None:
if auxRankSame.anterior is not None:
Graph = Graph + "rank=same{\n"
Graph = Graph + '\"'+"("+ str(auxRankSame.fil_x)+","+str(auxRankSame.fil_y)+")" + "=" +auxRankSame.cont+'\"'+"->"+'\"'+"("+str(auxRankSame.siguiente.fil_x)+","+str(auxRankSame.siguiente.fil_y)+")"+"="+auxRankSame.siguiente.cont + '\"' + ";\n"
Graph = Graph + '\"' + "(" + str(auxRankSame.fil_x) + "," + str(auxRankSame.fil_y) + ")" + "=" + auxRankSame.cont + '\"' + "->" + '\"' + "(" + str(auxRankSame.anterior.fil_x) + "," + str(auxRankSame.anterior.fil_y) + ")" + "=" + auxRankSame.anterior.cont + '\"' + ";\n"
Graph = Graph + "} \n"
if auxRankSame.anterior is not None:
if auxRankSame.siguiente is None:
Graph = Graph + "rank=same{\n"
Graph = Graph + '\"' + "(" + str(auxRankSame.fil_x) + "," + str(auxRankSame.fil_y) + ")" + "=" + auxRankSame.cont + '\"' + "->" + '\"' + "(" + str(auxRankSame.anterior.fil_x) + "," + str(auxRankSame.anterior.fil_y) + ")" + "=" + auxRankSame.anterior.cont + '\"' + ";\n"
Graph = Graph + "} \n"
auxRankSame = auxRankSame.abajo
listarankSameCab0 = listarankSameCab0.siguiente
Graph = Graph + "\n " + "}"
print(Graph)
nuevo_arch = open('Matriz.dot', 'w')
nuevo_arch.write(Graph)
nuevo_arch.seek(0)
comando = "dot -Tpng Matriz.dot -o Matriz.png"
os.system(comando)
os.system("Matriz.png") | [
"3031591380108@ingenieria.usac.edu.gt"
] | 3031591380108@ingenieria.usac.edu.gt |
e8a4561139ac67ec9ff6b9e1ef9435bd1a4287c1 | ba09263d1a059cc431710b261d09c26770a3faa4 | /start.py | 04affd579457a4cada2497c1aed70f99661fa700 | [] | no_license | makarowdmitry/sss | f71413b2985ac1f081f301415f437fdd247cede0 | 6d4e1e3c07ccbc87bcd1dab3a6fe389f852623ab | refs/heads/master | 2021-01-02T08:22:03.639518 | 2015-07-09T23:57:15 | 2015-07-09T23:57:15 | 38,263,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,884 | py | # coding: utf8
#! /usr/bin/env python
# import paramiko #, base64
# 1. формируем файл скрипта
# 2. заливаем его на сервер и исполняем
# 3. Чекаем все прокси
import re
import os
import paramiko
PROXY_USER = "goemailgo"
PROXY_PASS ="q8uir"
def create_script_install_proxy(ip,pr_user,pr_pass):
create_file = open('install_socks.py','w')
python_file = '''#! /usr/bin/env python
import os
a = os.system("""
service iptables stop
service iptables save
chkconfig iptables off
cd /usr/local/src
yum update -y
yum install -y mc nano gcc make wget
wget http://3proxy.ru/0.6.1/3proxy-0.6.1.tgz
tar -xvzf 3proxy-0.6.1.tgz
cd 3proxy-0.6.1
make -f Makefile.Linux
mkdir /usr/local/etc/3proxy
mkdir /usr/local/etc/3proxy/bin
mkdir /usr/local/etc/3proxy/logs
mkdir /usr/local/etc/3proxy/stat
cp src/3proxy /usr/local/etc/3proxy/bin
cp ./scripts/rc.d/proxy.sh /etc/init.d/3proxy
chkconfig 3proxy on
""")
def create_conf_proxy(ip_serv,login_proxy,pass_proxy):
os.chdir("/usr/local/etc/3proxy")
# os.chdir("/home/tp")
proxy_conf = """daemon
auth strong
users """+login_proxy+":CL:"+pass_proxy+"""
socks -n -a -p3128 -i"""+ip_serv+" -e"+ip_serv+"""
flush
allow """+login_proxy+"""
"""
conf_proxy_create = open("3proxy.cfg","w")
conf_proxy_create.writelines(proxy_conf)
conf_proxy_create.close()
return "ok"
create_conf_proxy("'''+str(ip)+'","'+str(pr_user)+'","'+str(pr_pass)+'''")
b = os.system("""
reboot
""")
'''
create_file.writelines(python_file)
create_file.close()
data_vps = open('data_vps.txt','r').readlines()
for vps in data_vps:
try:
vps_lst = vps.split(',')
create_script_install_proxy(vps_lst[0],PROXY_USER,PROXY_PASS)
host_remote = re.sub("^\s+|\n|\r|\s+$", '', vps_lst[0])
username_serv = re.sub("^\s+|\n|\r|\s+$", '', vps_lst[1])
pass_serv = re.sub("^\s+|\n|\r|\s+$", '', vps_lst[2])
print host_remote+' - ok'
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host_remote, username=username_serv, password=pass_serv,port=22)
stdin, stdout, stderr = client.exec_command('yum install python')
client.close()
transport = paramiko.Transport((host_remote, 22))
transport.connect(username=username_serv, password=pass_serv)
sftp = paramiko.SFTPClient.from_transport(transport)
remotepath = '/root/install_socks.py'
localpath = os.path.join(os.getcwd(),'install_socks.py')
sftp.put(localpath, remotepath)
sftp.close()
transport.close()
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host_remote, username=username_serv, password=pass_serv,port=22)
stdin, stdout, stderr = client.exec_command('python install_socks.py')
client.close()
# os.system('python install_socks.py')
os.remove('install_socks.py')
add_ams_socks_str = host_remote+',3128,SOCKS5,'+PROXY_USER+','+PROXY_PASS+'\n'
add_ams_socks = open('ams.txt','a')
add_ams_socks.write(add_ams_socks_str)
add_ams_socks.close()
except:
print host_remote+' - error'
continue
# a = os.system("""
# service iptables stop
# service iptables save
# chkconfig iptables off
# cd /usr/local/src
# yum update -y
# yum install -y mc nano gcc make
# wget http://3proxy.ru/0.6.1/3proxy-0.6.1.tgz
# tar -xvzf 3proxy-0.6.1.tgz
# cd 3proxy-0.6.1
# make -f Makefile.Linux
# mkdir /usr/local/etc/3proxy
# mkdir /usr/local/etc/3proxy/bin
# mkdir /usr/local/etc/3proxy/logs
# mkdir /usr/local/etc/3proxy/stat
# cp src/3proxy /usr/local/etc/3proxy/bin
# cp ./scripts/rc.d/proxy.sh /etc/init.d/3proxy
# chkconfig 3proxy on
# """)
# def create_conf_proxy(ip_serv,login_proxy,pass_proxy):
# os.chdir("/home/tp")
# proxy_conf = """daemon
# auth strong
# users """+login_proxy+":CL:"+pass_proxy+"""
# socks -n -a -p3128 -i"""+ip_serv+" -e"+ip_serv+"""
# flush
# allow """+login_proxy+"""
# """
# conf_proxy_create = open('3proxy.cfg','w')
# conf_proxy_create.writelines(proxy_conf)
# conf_proxy_create.close()
# return "ok"
# create_conf_proxy(ip,"goemailgo",)
# # print proxy_conf
# key = paramiko.RSAKey(data=base64.decodestring('AAA...'))
# client = paramiko.SSHClient()
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# client.connect(vps_lst[0], username=vps_lst[1], password=vps_lst[2])
# chan = client.invoke_shell()
# chan.send('ls -l')
# print chan.recv(1024)
# now, connect and use paramiko Transport to negotiate SSH2 across the connection
# try:
# t = paramiko.Transport((hostname, Port))
# t.connect(hostkey, username, password, gss_host=socket.getfqdn(hostname),
# gss_auth=UseGSSAPI, gss_kex=DoGSSAPIKeyExchange)
# sftp = paramiko.SFTPClient.from_transport(t)
# # dirlist on remote host
# dirlist = sftp.listdir('.')
# print("Dirlist: %s" % dirlist)
# # copy this demo onto the server
# try:
# sftp.mkdir("demo_sftp_folder")
# except IOError:
# print('(assuming demo_sftp_folder/ already exists)')
# with sftp.open('demo_sftp_folder/README', 'w') as f:
# f.write('This was created by demo_sftp.py.\n')
# with open('demo_sftp.py', 'r') as f:
# data = f.read()
# sftp.open('demo_sftp_folder/demo_sftp.py', 'w').write(data)
# print('created demo_sftp_folder/ on the server')
# # copy the README back here
# with sftp.open('demo_sftp_folder/README', 'r') as f:
# data = f.read()
# with open('README_demo_sftp', 'w') as f:
# f.write(data)
# print('copied README back here')
# # BETTER: use the get() and put() methods
# sftp.put('demo_sftp.py', 'demo_sftp_folder/demo_sftp.py')
# sftp.get('demo_sftp_folder/README', 'README_demo_sftp')
# t.close()
# stdin, stdout, stderr = client.exec_command('cd /usr','pwd')
# stdin, stdout, stderr = client.exec_command('pwd')
# for line in stdout:
# print '... ' + line.strip('\n')
# client.close()
| [
"makarow.dmitry@gmail.com"
] | makarow.dmitry@gmail.com |
945243effbaa22c0f12880fd4f48299da9340a75 | 624497d346efc953f201c6906be74b9c29ee17d6 | /Des/python/Des/main.py | 7f98e3f87ef70dae831de4e4d524bf58caa3a679 | [] | no_license | ChebukinMatvey/Cryptography | ff7e427b07d19c47eb38f5b33756a98bc0f5a718 | b5fb921bad4d19aabcca788a4ba3e527748ae4c9 | refs/heads/master | 2020-05-17T16:01:58.914509 | 2019-05-02T09:18:24 | 2019-05-02T09:18:24 | 183,806,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #!/usr/bin/python3
from functions import *
from encrypt import encrypt
from decrypt import decrypt
if __name__=="__main__":
file = open("./info","rb")
ip = read_ip(file)
r_ip = reverse_ip(ip)
keys = read_keys(file)
matrix = read_matrix(file)
test = 17179869187
ci = process_block(test,ip,r_ip,matrix,keys)
print(process_block(ci,ip,r_ip,matrix,keys))
input()
#encrypt("./text","./info","./encrypted")
#decrypt("./encrypted","./decrypted")
| [
"matveynanaren@gmail.com"
] | matveynanaren@gmail.com |
9defdb8cd613a9d3058622b33439b19d0cc2b924 | d45bc9b5429d5cc6eb56b921097f9b2546a88bf2 | /src/commands/__init__.py | 8aa591d8a1103edd9a314542e01367d6e061948e | [] | no_license | DACUS1995/Voice2Command | d8e6291e0bc91e68b668f429eccf912d452a3443 | 312516d6e1c2d9aace0e8be7d2c4a44f9bb6d202 | refs/heads/main | 2023-03-03T23:02:48.699071 | 2021-02-15T20:57:09 | 2021-02-15T20:57:09 | 302,987,955 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from .simple_commands import SimpleCommand
SIMPLE_COMMAND_TYPE = "SimpleCommand" | [
"studormarian@gmail.com"
] | studormarian@gmail.com |
8b702529300a28ebc932d16a695f5311094c469d | 48ffde5f19dce603afb5caffe2e71d752c526a52 | /tests/PyroTests/test_serialize.py | c740011d7425daf3732284e6fc5e82dcd0d82f30 | [
"MIT"
] | permissive | pevogam/Pyro4 | 704b5aec18e1ade7457830d1c7fcc406c4d464f5 | 4d009f6a111c071d22c21e1ab7ba43c5e9310b56 | refs/heads/master | 2020-03-25T07:46:31.978715 | 2018-08-04T12:24:51 | 2018-08-04T12:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,946 | py | """
Tests for the data serializer.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import array
import sys
import collections
import copy
import pprint
import pickle
import base64
import unittest
import serpent
import math
import uuid
import Pyro4.util
import Pyro4.errors
import Pyro4.core
import Pyro4.futures
from Pyro4.configuration import config
from testsupport import *
class SerializeTests_pickle(unittest.TestCase):
SERIALIZER = "pickle"
def setUp(self):
self.previous_serializer = config.SERIALIZER
config.SERIALIZER = self.SERIALIZER
self.ser = Pyro4.util.get_serializer(config.SERIALIZER)
config.REQUIRE_EXPOSE = True
def tearDown(self):
config.SERIALIZER = self.previous_serializer
def testSerItself(self):
s = Pyro4.util.get_serializer(config.SERIALIZER)
p, _ = self.ser.serializeData(s)
s2 = self.ser.deserializeData(p)
self.assertEqual(s, s2)
self.assertTrue(s == s2)
self.assertFalse(s != s2)
def testSerUnicode(self):
data = unicode("x")
self.ser.serializeData(data)
self.ser.serializeCall(data, unicode("method"), [], {})
def testSerCompression(self):
d1, c1 = self.ser.serializeData("small data", compress=True)
d2, c2 = self.ser.serializeData("small data", compress=False)
self.assertFalse(c1)
self.assertEqual(d1, d2)
bigdata = "x" * 1000
d1, c1 = self.ser.serializeData(bigdata, compress=False)
d2, c2 = self.ser.serializeData(bigdata, compress=True)
self.assertFalse(c1)
self.assertTrue(c2)
self.assertTrue(len(d2) < len(d1))
self.assertEqual(bigdata, self.ser.deserializeData(d1, compressed=False))
self.assertEqual(bigdata, self.ser.deserializeData(d2, compressed=True))
def testSerErrors(self):
e1 = Pyro4.errors.NamingError(unicode("x"))
e1._pyroTraceback = ["this is the remote traceback"]
orig_e = copy.copy(e1)
e2 = Pyro4.errors.PyroError(unicode("x"))
e3 = Pyro4.errors.ProtocolError(unicode("x"))
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e1, True)
p, _ = self.ser.serializeData(e1)
e = self.ser.deserializeData(p)
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e, False)
self.assertIsInstance(e, Pyro4.errors.NamingError)
self.assertEqual(repr(orig_e), repr(e))
self.assertEqual(["this is the remote traceback"], e._pyroTraceback, "remote traceback info should be present")
p, _ = self.ser.serializeData(e2)
e = self.ser.deserializeData(p)
self.assertIsInstance(e, Pyro4.errors.PyroError)
self.assertEqual(repr(e2), repr(e))
p, _ = self.ser.serializeData(e3)
e = self.ser.deserializeData(p)
self.assertIsInstance(e, Pyro4.errors.ProtocolError)
self.assertEqual(repr(e3), repr(e))
def testSerializeExceptionWithAttr(self):
ex = ZeroDivisionError("test error")
ex._pyroTraceback = ["test traceback payload"]
Pyro4.util.fixIronPythonExceptionForPickle(ex, True) # hack for ironpython
data, compressed = self.ser.serializeData(ex)
ex2 = self.ser.deserializeData(data, compressed)
Pyro4.util.fixIronPythonExceptionForPickle(ex2, False) # hack for ironpython
self.assertEqual(ZeroDivisionError, type(ex2))
self.assertTrue(hasattr(ex2, "_pyroTraceback"))
self.assertEqual(["test traceback payload"], ex2._pyroTraceback)
def testSerCoreOffline(self):
uri = Pyro4.core.URI("PYRO:9999@host.com:4444")
p, _ = self.ser.serializeData(uri)
uri2 = self.ser.deserializeData(p)
self.assertEqual(uri, uri2)
self.assertEqual("PYRO", uri2.protocol)
self.assertEqual("9999", uri2.object)
self.assertEqual("host.com:4444", uri2.location)
self.assertEqual(4444, uri2.port)
self.assertIsNone(uri2.sockname)
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
p, _ = self.ser.serializeData(uri)
uri2 = self.ser.deserializeData(p)
self.assertEqual(uri, uri2)
self.assertEqual("PYRO", uri2.protocol)
self.assertEqual("12345", uri2.object)
self.assertEqual("./u:/tmp/socketname", uri2.location)
self.assertIsNone(uri2.port)
self.assertEqual("/tmp/socketname", uri2.sockname)
proxy = Pyro4.core.Proxy("PYRO:9999@host.com:4444")
proxy._pyroTimeout = 42
proxy._pyroMaxRetries = 78
self.assertIsNone(proxy._pyroConnection)
p, _ = self.ser.serializeData(proxy)
proxy2 = self.ser.deserializeData(p)
self.assertIsNone(proxy._pyroConnection)
self.assertIsNone(proxy2._pyroConnection)
self.assertEqual(proxy2._pyroUri, proxy._pyroUri)
self.assertEqual(0, proxy2._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, proxy2._pyroMaxRetries, "must be reset to defaults")
def testNested(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
uri1 = Pyro4.core.URI("PYRO:1111@host.com:111")
uri2 = Pyro4.core.URI("PYRO:2222@host.com:222")
_ = self.ser.serializeData(uri1)
data = [uri1, uri2]
p, _ = self.ser.serializeData(data)
[u1, u2] = self.ser.deserializeData(p)
self.assertEqual(uri1, u1)
self.assertEqual(uri2, u2)
def testSerDaemonHack(self):
# This tests the hack that a Daemon should be serializable,
# but only to support serializing Pyro objects.
# The serialized form of a Daemon should be empty (and thus, useless)
with Pyro4.core.Daemon(port=0) as daemon:
d, _ = self.ser.serializeData(daemon)
d2 = self.ser.deserializeData(d)
self.assertTrue(len(d2.__dict__) == 0, "deserialized daemon should be empty")
self.assertTrue("Pyro4.core.Daemon" in repr(d2))
self.assertTrue("unusable" in repr(d2))
try:
config.AUTOPROXY = False
obj = pprint.PrettyPrinter(stream="dummy", width=42)
obj.name = "hello"
daemon.register(obj)
o, _ = self.ser.serializeData(obj)
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
# only pickle, cloudpickle and dill can deserialize the PrettyPrinter class without the need of explicit deserialization function
o2 = self.ser.deserializeData(o)
self.assertEqual("hello", o2.name)
self.assertEqual(42, o2._width)
finally:
config.AUTOPROXY = True
def testPyroClasses(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
s, c = self.ser.serializeData(uri)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertTrue("Pyro4.core.URI" in repr(uri))
self.assertEqual("PYRO:object@host:4444", str(uri))
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
s, c = self.ser.serializeData(uri)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
proxy = Pyro4.core.Proxy(uri)
proxy._pyroAttrs = set("abc")
proxy._pyroMethods = set("def")
proxy._pyroOneway = set("ghi")
proxy._pyroTimeout = 42
proxy._pyroHmacKey = b"secret"
proxy._pyroHandshake = "apples"
proxy._pyroMaxRetries = 78
proxy._pyroSerializer = "serializer"
s, c = self.ser.serializeData(proxy)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.Proxy)
self.assertEqual(proxy._pyroUri, x._pyroUri)
self.assertEqual(set("abc"), x._pyroAttrs)
self.assertEqual(set("def"), x._pyroMethods)
self.assertEqual(set("ghi"), x._pyroOneway)
self.assertEqual(b"secret", x._pyroHmacKey)
self.assertEqual("apples", x._pyroHandshake)
self.assertEqual("serializer", x._pyroSerializer)
self.assertEqual(0, x._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, x._pyroMaxRetries, "must be reset to defaults")
self.assertTrue("Pyro4.core.Proxy" in repr(x))
self.assertTrue("Pyro4.core.Proxy" in str(x))
daemon = Pyro4.core.Daemon()
s, c = self.ser.serializeData(daemon)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.Daemon)
self.assertTrue("Pyro4.core.Daemon" in repr(x))
self.assertTrue("unusable" in repr(x))
self.assertTrue("Pyro4.core.Daemon" in str(x))
self.assertTrue("unusable" in str(x))
wrapper = Pyro4.futures._ExceptionWrapper(ZeroDivisionError("divided by zero"))
s, c = self.ser.serializeData(wrapper)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.futures._ExceptionWrapper)
self.assertEqual("divided by zero", str(x.exception))
self.assertTrue("ExceptionWrapper" in repr(x))
self.assertTrue("ExceptionWrapper" in str(x))
def testPyroClassesForDict(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
state = uri.__getstate_for_dict__()
self.assertEqual(('PYRO', 'object', None, 'host', 4444), state)
uri2 = Pyro4.core.URI("PYRONAME:xxx")
uri2.__setstate_from_dict__(state)
self.assertEqual(uri, uri2)
proxy = Pyro4.core.Proxy(uri)
proxy._pyroAttrs = set("abc")
proxy._pyroMethods = set("def")
proxy._pyroOneway = set("ghi")
proxy._pyroTimeout = 42
proxy._pyroHmacKey = b"secret"
proxy._pyroHandshake = "apples"
proxy._pyroMaxRetries = 78
proxy._pyroSerializer = "serializer"
state = proxy.__getstate_for_dict__()
b64_secret = "b64:"+base64.b64encode(b"secret").decode("utf-8")
self.assertEqual(('PYRO:object@host:4444', tuple(set("ghi")), tuple(set("def")), tuple(set("abc")), 42, b64_secret, "apples", 78, "serializer"), state)
proxy2 = Pyro4.core.Proxy("PYRONAME:xxx")
proxy2.__setstate_from_dict__(state)
self.assertEqual(proxy, proxy2)
self.assertEqual(proxy._pyroUri, proxy2._pyroUri)
self.assertEqual(proxy._pyroAttrs, proxy2._pyroAttrs)
self.assertEqual(proxy._pyroMethods, proxy2._pyroMethods)
self.assertEqual(proxy._pyroOneway, proxy2._pyroOneway)
self.assertEqual(proxy._pyroHmacKey, proxy2._pyroHmacKey)
self.assertEqual(proxy._pyroHandshake, proxy2._pyroHandshake)
self.assertEqual(proxy._pyroSerializer, proxy2._pyroSerializer)
self.assertEqual(0, proxy2._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, proxy2._pyroMaxRetries, "must be reset to defaults")
daemon = Pyro4.core.Daemon()
state = daemon.__getstate_for_dict__()
self.assertEqual(tuple(), state)
daemon2 = Pyro4.core.Daemon()
daemon2.__setstate_from_dict__(state)
def testProxySerializationCompat(self):
proxy = Pyro4.core.Proxy("PYRO:object@host:4444")
proxy._pyroSerializer = "serializer"
pickle_state = proxy.__getstate__()
self.assertEqual(9, len(pickle_state))
pickle_state = pickle_state[:8]
proxy.__setstate__(pickle_state)
self.assertIsNone(proxy._pyroSerializer)
proxy._pyroSerializer = "serializer"
serpent_state = proxy.__getstate_for_dict__()
self.assertEqual(9, len(serpent_state))
serpent_state = serpent_state[:8]
proxy.__setstate_from_dict__(serpent_state)
self.assertIsNone(proxy._pyroSerializer)
def testAutoProxyPartlyExposed(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(MyThingPartlyExposed, Pyro4.core.pyroObjectToAutoProxy)
t1 = MyThingPartlyExposed("1")
t2 = MyThingPartlyExposed("2")
with Pyro4.core.Daemon() as d:
d.register(t1, "thingy1")
d.register(t2, "thingy2")
data = [t1, ["apple", t2]]
s, c = self.ser.serializeData(data)
data = self.ser.deserializeData(s, c)
self.assertEqual("apple", data[1][0])
p1 = data[0]
p2 = data[1][1]
self.assertIsInstance(p1, Pyro4.core.Proxy)
self.assertIsInstance(p2, Pyro4.core.Proxy)
self.assertEqual("thingy1", p1._pyroUri.object)
self.assertEqual("thingy2", p2._pyroUri.object)
self.assertEqual({"prop1", "readonly_prop1"}, p1._pyroAttrs)
self.assertEqual({"exposed", "oneway"}, p1._pyroMethods)
self.assertEqual({'oneway'}, p1._pyroOneway)
def testAutoProxyFullExposed(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(MyThingPartlyExposed, Pyro4.core.pyroObjectToAutoProxy)
t1 = MyThingFullExposed("1")
t2 = MyThingFullExposed("2")
with Pyro4.core.Daemon() as d:
d.register(t1, "thingy1")
d.register(t2, "thingy2")
data = [t1, ["apple", t2]]
s, c = self.ser.serializeData(data)
data = self.ser.deserializeData(s, c)
self.assertEqual("apple", data[1][0])
p1 = data[0]
p2 = data[1][1]
self.assertIsInstance(p1, Pyro4.core.Proxy)
self.assertIsInstance(p2, Pyro4.core.Proxy)
self.assertEqual("thingy1", p1._pyroUri.object)
self.assertEqual("thingy2", p2._pyroUri.object)
self.assertEqual({"prop1", "prop2", "readonly_prop1"}, p1._pyroAttrs)
self.assertEqual({'classmethod', 'method', 'oneway', 'staticmethod', 'exposed', "__dunder__"}, p1._pyroMethods)
self.assertEqual({'oneway'}, p1._pyroOneway)
def testRegisterTypeReplacementSanity(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(int, lambda: None)
with self.assertRaises(ValueError):
self.ser.register_type_replacement(type, lambda: None)
with self.assertRaises(ValueError):
self.ser.register_type_replacement(42, lambda: None)
def testCustomClassFail(self):
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
self.skipTest("pickle, cloudpickle and dill simply serialize custom classes")
o = pprint.PrettyPrinter(stream="dummy", width=42)
s, c = self.ser.serializeData(o)
try:
_ = self.ser.deserializeData(s, c)
self.fail("error expected, shouldn't deserialize unknown class")
except Pyro4.errors.ProtocolError:
pass
def testCustomClassOk(self):
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
self.skipTest("pickle, cloudpickle and dill simply serialize custom classes just fine")
o = MyThingPartlyExposed("test")
Pyro4.util.SerializerBase.register_class_to_dict(MyThingPartlyExposed, mything_dict)
Pyro4.util.SerializerBase.register_dict_to_class("CUSTOM-Mythingymabob", mything_creator)
s, c = self.ser.serializeData(o)
o2 = self.ser.deserializeData(s, c)
self.assertIsInstance(o2, MyThingPartlyExposed)
self.assertEqual("test", o2.name)
# unregister the deserializer
Pyro4.util.SerializerBase.unregister_dict_to_class("CUSTOM-Mythingymabob")
try:
self.ser.deserializeData(s, c)
self.fail("must fail")
except Pyro4.errors.ProtocolError:
pass # ok
# unregister the serializer
Pyro4.util.SerializerBase.unregister_class_to_dict(MyThingPartlyExposed)
s, c = self.ser.serializeData(o)
try:
self.ser.deserializeData(s, c)
self.fail("must fail")
except Pyro4.errors.SerializeError as x:
msg = str(x)
self.assertIn(msg, ["unsupported serialized class: testsupport.MyThingPartlyExposed",
"unsupported serialized class: PyroTests.testsupport.MyThingPartlyExposed"])
def testData(self):
data = [42, "hello"]
ser, compressed = self.ser.serializeData(data)
self.assertFalse(compressed)
data2 = self.ser.deserializeData(ser, compressed=False)
self.assertEqual(data, data2)
def testUnicodeData(self):
data = u"euro\u20aclowbytes\u0000\u0001\u007f\u0080\u00ff"
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(data, data2)
def testUUID(self):
data = uuid.uuid1()
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
uuid_as_str = str(data)
self.assertTrue(data2==data or data2==uuid_as_str)
def testSet(self):
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(data, data2)
def testCircular(self):
data = [42, "hello", Pyro4.core.Proxy("PYRO:dummy@dummy:4444")]
data.append(data)
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed)
self.assertIs(data2, data2[3])
self.assertEqual(42, data2[0])
def testCallPlain(self):
ser, compressed = self.ser.serializeCall("object", "method", ("vargs1", "vargs2"), {"kwargs": 999})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertTrue(len(vargs) == 2)
self.assertTrue(vargs[0] == "vargs1")
self.assertTrue(vargs[1] == "vargs2")
self.assertDictEqual({"kwargs": 999}, kwargs)
def testCallPyroObjAsArg(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
uri = Pyro4.core.URI("PYRO:555@localhost:80")
ser, compressed = self.ser.serializeCall("object", "method", [uri], {"thing": uri})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertEqual([uri], vargs)
self.assertEqual({"thing": uri}, kwargs)
def testCallCustomObjAsArg(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
e = ZeroDivisionError("hello")
ser, compressed = self.ser.serializeCall("object", "method", [e], {"thing": e})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertIsInstance(vargs, list)
self.assertIsInstance(vargs[0], ZeroDivisionError)
self.assertEqual("hello", str(vargs[0]))
self.assertIsInstance(kwargs["thing"], ZeroDivisionError)
self.assertEqual("hello", str(kwargs["thing"]))
def testSerializeException(self):
e = ZeroDivisionError()
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertEqual("", str(e2))
e = ZeroDivisionError("hello")
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertEqual("hello", str(e2))
e = ZeroDivisionError("hello", 42)
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertIn(str(e2), ("('hello', 42)", "(u'hello', 42)"))
e.custom_attribute = 999
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e, True)
ser, compressed = self.ser.serializeData(e)
e2 = self.ser.deserializeData(ser, compressed)
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e2, False)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertIn(str(e2), ("('hello', 42)", "(u'hello', 42)"))
self.assertEqual(999, e2.custom_attribute)
def testSerializeSpecialException(self):
self.assertIn("GeneratorExit", Pyro4.util.all_exceptions)
e = GeneratorExit()
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, GeneratorExit)
def testRecreateClasses(self):
self.assertEqual([1, 2, 3], self.ser.recreate_classes([1, 2, 3]))
d = {"__class__": "invalid"}
try:
self.ser.recreate_classes(d)
self.fail("error expected")
except Pyro4.errors.ProtocolError:
pass # ok
d = {"__class__": "Pyro4.core.URI", "state": ['PYRO', '555', None, 'localhost', 80]}
uri = self.ser.recreate_classes(d)
self.assertEqual(Pyro4.core.URI("PYRO:555@localhost:80"), uri)
number, uri = self.ser.recreate_classes([1, {"uri": d}])
self.assertEqual(1, number)
self.assertEqual(Pyro4.core.URI("PYRO:555@localhost:80"), uri["uri"])
def testProtocolVersion(self):
self.assertGreaterEqual(config.PICKLE_PROTOCOL_VERSION, 2)
self.assertEqual(pickle.HIGHEST_PROTOCOL, config.PICKLE_PROTOCOL_VERSION)
def testUriSerializationWithoutSlots(self):
orig_protocol = config.PICKLE_PROTOCOL_VERSION
config.PICKLE_PROTOCOL_VERSION = 2
try:
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
import pickletools
d = pickletools.optimize(d)
result1 = b'\x80\x02cPyro4.core\nURI\n)\x81(U\x04PYROU\x03objNU\tlocalhostM\xd2\x04tb.'
result2 = b'\x80\x02cPyro4.core\nURI\n)\x81(X\x04\x00\x00\x00PYROX\x03\x00\x00\x00objNX\t\x00\x00\x00localhostM\xd2\x04tb.'
self.assertTrue(d in (result1, result2))
finally:
config.PICKLE_PROTOCOL_VERSION = orig_protocol
def testFloatPrecision(self):
f1 = 1482514078.54635912345
f2 = 9876543212345.12345678987654321
f3 = 11223344.556677889988776655e33
floats = [f1, f2, f3]
d, compr = self.ser.serializeData(floats)
v = self.ser.deserializeData(d, compr)
self.assertEqual(floats, v, "float precision must not be compromised in any serializer")
def testSourceByteTypes_deserialize(self):
# uncompressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.deserializeCall(bytearray(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(bytearray(ser), False)
self.assertEqual([4, 5, 6], d)
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.deserializeCall(buffer(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(buffer(ser), False)
self.assertEqual([4, 5, 6], d)
# compressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3]*100, {"kwarg": 42}, True)
ser, _ = self.ser.serializeData([4, 5, 6]*100, True)
_, _, vargs, _ = self.ser.deserializeCall(bytearray(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(bytearray(ser), True)
self.assertEqual(300, len(d))
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.deserializeCall(buffer(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(buffer(ser), True)
self.assertEqual(300, len(d))
@unittest.skipIf(sys.platform == "cli", "ironpython can't properly create memoryviews from serialized data")
def testSourceByteTypes_deserialize_memoryview(self):
# uncompressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.deserializeCall(memoryview(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(memoryview(ser), False)
self.assertEqual([4, 5, 6], d)
# compressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3]*100, {"kwarg": 42}, True)
ser, _ = self.ser.serializeData([4, 5, 6]*100, True)
_, _, vargs, _ = self.ser.deserializeCall(memoryview(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(memoryview(ser), True)
self.assertEqual(300, len(d))
def testSourceByteTypes_loads(self):
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.loadsCall(bytearray(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(bytearray(ser))
self.assertEqual([4, 5, 6], d)
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.loadsCall(buffer(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(buffer(ser))
self.assertEqual([4, 5, 6], d)
@unittest.skipIf(sys.platform == "cli", "ironpython can't properly create memoryviews from serialized data")
def testSourceByteTypes_loads_memoryview(self):
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.loadsCall(memoryview(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(memoryview(ser))
self.assertEqual([4, 5, 6], d)
def testSerializeDumpsAndDumpsCall(self):
self.ser.dumps(uuid.uuid4())
self.ser.dumps(Pyro4.URI("PYRO:test@test:4444"))
self.ser.dumps(Pyro4.Proxy("PYRONAME:foobar"))
self.ser.dumpsCall("obj", "method", (1, 2, 3), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, array.array('i', [1, 2, 3])), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, array.array('i', [1, 2, 3])), {"arg1": array.array('i', [1, 2, 3])})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.URI("PYRO:test@test:4444")), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.URI("PYRO:test@test:4444")), {"arg1": Pyro4.URI("PYRO:test@test:4444")})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.Proxy("PYRONAME:foobar")), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.Proxy("PYRONAME:foobar")), {"arg1": Pyro4.Proxy("PYRONAME:foobar")})
class SerializeTests_cloudpickle(SerializeTests_pickle):
SERIALIZER = "cloudpickle"
@unittest.skip('not implemented')
def testUriSerializationWithoutSlots(self):
pass
def testSerializeLambda(self):
l = lambda x: x * x
ser, compressed = self.ser.serializeData(l)
l2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(l2(3.), 9.)
def testSerializeLocalFunction(self):
def f(x):
return x * x
ser, compressed = self.ser.serializeData(f)
f2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(f2(3.), 9.)
is_ironpython_without_dill = False
try:
import dill
except ImportError:
if sys.platform == "cli":
is_ironpython_without_dill = True
@unittest.skipIf(is_ironpython_without_dill, "dill with ironpython has issues so it's fine if we don't test this")
class SerializeTests_dill(SerializeTests_pickle):
SERIALIZER = "dill"
def testProtocolVersion(self):
import dill
self.assertEqual(dill.HIGHEST_PROTOCOL, config.DILL_PROTOCOL_VERSION)
@unittest.skip('not implemented')
def testUriSerializationWithoutSlots(self):
pass
def testSerializeLambda(self):
l = lambda x: x * x
ser, compressed = self.ser.serializeData(l)
l2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(l2(3.), 9.)
def testSerializeLocalFunction(self):
def f(x):
return x * x
ser, compressed = self.ser.serializeData(f)
f2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(f2(3.), 9.)
class SerializeTests_serpent(SerializeTests_pickle):
SERIALIZER = "serpent"
def testCircular(self):
with self.assertRaises(ValueError): # serpent doesn't support object graphs (since serpent 1.7 reports ValueError instead of crashing)
super(SerializeTests_serpent, self).testCircular()
def testSet(self):
# serpent serializes a set into a tuple on older python versions, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
if serpent.can_use_set_literals:
self.assertEqual(data, data2)
else:
self.assertEqual(tuple(data), data2)
def testDeque(self):
# serpent converts a deque into a primitive list
deq = collections.deque([1, 2, 3, 4])
ser, compressed = self.ser.serializeData(deq)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual([1, 2, 3, 4], data2)
@unittest.skipIf(sys.version_info < (2, 7), "ordereddict is in Python 2.7+")
def testOrderedDict(self):
od = collections.OrderedDict()
od["a"] = 1
od["b"] = 2
od["c"] = 3
def recreate_OrderedDict(name, values):
self.assertEqual("collections.OrderedDict", name)
return collections.OrderedDict(values["items"])
Pyro4.util.SerializerBase.register_dict_to_class("collections.OrderedDict", recreate_OrderedDict)
ser, compressed = self.ser.serializeData(od)
self.assertIn(b"collections.OrderedDict", ser)
self.assertIn(b"[('a',1),('b',2),('c',3)]", ser)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(od, data2)
def testUriSerializationWithoutSlots(self):
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
result1 = b"# serpent utf-8 python3.2\n{'__class__':'Pyro4.core.URI','state':('PYRO','obj',None,'localhost',1234)}"
result2 = b"# serpent utf-8 python3.2\n{'state':('PYRO','obj',None,'localhost',1234),'__class__':'Pyro4.core.URI'}"
result3 = b"# serpent utf-8 python2.6\n{'state':('PYRO','obj',None,'localhost',1234),'__class__':'Pyro4.core.URI'}"
result4 = b"# serpent utf-8 python2.6\n{'__class__':'Pyro4.core.URI','state':('PYRO','obj',None,'localhost',1234)}"
self.assertTrue(d in (result1, result2, result3, result4))
class SerializeTests_json(SerializeTests_pickle):
SERIALIZER = "json"
def testCircular(self):
with self.assertRaises(ValueError): # json doesn't support object graphs
super(SerializeTests_json, self).testCircular()
def testSet(self):
# json serializes a set into a list, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(list(data), data2)
def testUriSerializationWithoutSlots(self):
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
result1 = b'{"__class__": "Pyro4.core.URI", "state": ["PYRO", "obj", null, "localhost", 1234]}'
result2 = b'{"state": ["PYRO", "obj", null, "localhost", 1234], "__class__": "Pyro4.core.URI"}'
self.assertTrue(d in (result1, result2))
class SerializeTests_marshal(SerializeTests_pickle):
SERIALIZER = "marshal"
def testCircular(self):
with self.assertRaises(ValueError): # marshal doesn't support object graphs
super(SerializeTests_marshal, self).testCircular()
@unittest.skip("marshaling is implementation dependent")
def testUriSerializationWithoutSlots(self):
pass
class SerializeTests_msgpack(SerializeTests_pickle):
SERIALIZER = "msgpack"
@unittest.skip("circular will crash msgpack")
def testCircular(self):
pass
def testSet(self):
# msgpack serializes a set into a list, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(list(data), data2)
@unittest.skip("msgpack is implementation dependent")
def testUriSerializationWithoutSlots(self):
pass
class GenericTests(unittest.TestCase):
def testSerializersAvailable(self):
Pyro4.util.get_serializer("pickle")
Pyro4.util.get_serializer("marshal")
try:
import json
Pyro4.util.get_serializer("json")
except ImportError:
pass
try:
import serpent
Pyro4.util.get_serializer("serpent")
except ImportError:
pass
try:
import cloudpickle
Pyro4.util.get_serializer("cloudpickle")
except ImportError:
pass
try:
import dill
Pyro4.util.get_serializer("dill")
except ImportError:
pass
def testAssignedSerializerIds(self):
self.assertEqual(1, Pyro4.util.SerpentSerializer.serializer_id)
self.assertEqual(2, Pyro4.util.JsonSerializer.serializer_id)
self.assertEqual(3, Pyro4.util.MarshalSerializer.serializer_id)
self.assertEqual(4, Pyro4.util.PickleSerializer.serializer_id)
self.assertEqual(5, Pyro4.util.DillSerializer.serializer_id)
self.assertEqual(6, Pyro4.util.MsgpackSerializer.serializer_id)
self.assertEqual(7, Pyro4.util.CloudpickleSerializer.serializer_id)
def testSerializersAvailableById(self):
Pyro4.util.get_serializer_by_id(1) # serpent
Pyro4.util.get_serializer_by_id(2) # json
Pyro4.util.get_serializer_by_id(3) # marshal
Pyro4.util.get_serializer_by_id(4) # pickle
# ids 5, 6 and 7 (dill, msgpack, cloudpickle) are not always available, so we skip those.
self.assertRaises(Pyro4.errors.SerializeError, lambda: Pyro4.util.get_serializer_by_id(0))
self.assertRaises(Pyro4.errors.SerializeError, lambda: Pyro4.util.get_serializer_by_id(8))
def testDictClassFail(self):
o = pprint.PrettyPrinter(stream="dummy", width=42)
d = Pyro4.util.SerializerBase.class_to_dict(o)
self.assertEqual(42, d["_width"])
self.assertEqual("pprint.PrettyPrinter", d["__class__"])
try:
_ = Pyro4.util.SerializerBase.dict_to_class(d)
self.fail("error expected")
except Pyro4.errors.ProtocolError:
pass
def testDictException(self):
x = ZeroDivisionError("hello", 42)
expected = {
"__class__": None,
"__exception__": True,
"args": ("hello", 42),
"attributes": {}
}
if sys.version_info < (3, 0):
expected["__class__"] = "exceptions.ZeroDivisionError"
else:
expected["__class__"] = "builtins.ZeroDivisionError"
d = Pyro4.util.SerializerBase.class_to_dict(x)
self.assertEqual(expected, d)
x.custom_attribute = 999
expected["attributes"] = {"custom_attribute": 999}
d = Pyro4.util.SerializerBase.class_to_dict(x)
self.assertEqual(expected, d)
def testDictClassOk(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
d = Pyro4.util.SerializerBase.class_to_dict(uri)
self.assertEqual("Pyro4.core.URI", d["__class__"])
self.assertIn("state", d)
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertEqual(4444, x.port)
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
d = Pyro4.util.SerializerBase.class_to_dict(uri)
self.assertEqual("Pyro4.core.URI", d["__class__"])
self.assertIn("state", d)
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertEqual("/tmp/socketname", x.sockname)
def testCustomDictClass(self):
o = MyThingPartlyExposed("test")
Pyro4.util.SerializerBase.register_class_to_dict(MyThingPartlyExposed, mything_dict)
Pyro4.util.SerializerBase.register_dict_to_class("CUSTOM-Mythingymabob", mything_creator)
d = Pyro4.util.SerializerBase.class_to_dict(o)
self.assertEqual("CUSTOM-Mythingymabob", d["__class__"])
self.assertEqual("test", d["name"])
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, MyThingPartlyExposed)
self.assertEqual("test", x.name)
# unregister the conversion functions and try again
Pyro4.util.SerializerBase.unregister_class_to_dict(MyThingPartlyExposed)
Pyro4.util.SerializerBase.unregister_dict_to_class("CUSTOM-Mythingymabob")
d_orig = Pyro4.util.SerializerBase.class_to_dict(o)
clsname = d_orig["__class__"]
self.assertTrue(clsname.endswith("testsupport.MyThingPartlyExposed"))
try:
_ = Pyro4.util.SerializerBase.dict_to_class(d)
self.fail("should crash")
except Pyro4.errors.ProtocolError:
pass # ok
def testExceptionNamespacePy2(self):
data = {'__class__': 'exceptions.ZeroDivisionError',
'__exception__': True,
'args': ('hello', 42),
'attributes': {"test_attribute": 99}}
exc = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertIsInstance(exc, ZeroDivisionError)
self.assertEqual("ZeroDivisionError('hello', 42)", repr(exc))
self.assertEqual(99, exc.test_attribute)
def testExceptionNamespacePy3(self):
data = {'__class__': 'builtins.ZeroDivisionError',
'__exception__': True,
'args': ('hello', 42),
'attributes': {"test_attribute": 99}}
exc = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertIsInstance(exc, ZeroDivisionError)
self.assertEqual("ZeroDivisionError('hello', 42)", repr(exc))
self.assertEqual(99, exc.test_attribute)
def testExceptionNotTagged(self):
data = {'__class__': 'builtins.ZeroDivisionError',
'args': ('hello', 42),
'attributes': {}}
with self.assertRaises(Pyro4.errors.SerializeError) as cm:
_ = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertEqual("unsupported serialized class: builtins.ZeroDivisionError", str(cm.exception))
def testWeirdFloats(self):
ser = Pyro4.util.get_serializer(config.SERIALIZER)
p, _ = ser.serializeData([float("+inf"), float("-inf"), float("nan")])
s2 = ser.deserializeData(p)
self.assertTrue(math.isinf(s2[0]))
self.assertEqual(1.0, math.copysign(1, s2[0]))
self.assertTrue(math.isinf(s2[1]))
self.assertEqual(-1.0, math.copysign(1, s2[1]))
self.assertTrue(math.isnan(s2[2]))
def mything_dict(obj):
return {
"__class__": "CUSTOM-Mythingymabob",
"name": obj.name
}
def mything_creator(classname, d):
assert classname == "CUSTOM-Mythingymabob"
assert d["__class__"] == "CUSTOM-Mythingymabob"
return MyThingPartlyExposed(d["name"])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"irmen@razorvine.net"
] | irmen@razorvine.net |
959b36ffc39fe17b4ec4cb1d925ad67bca447215 | 0452408a98e03408508b4889ed68a8d0f2d9f8cf | /alphatwirl/roottree/Events.py | dcd99a451aa2f0d07fe62f0b79f023eb3c2325ed | [
"BSD-3-Clause"
] | permissive | benkrikler/alphatwirl | 3318e79b89ce0e79c4a4399c7a40c789531f0e60 | cda7d12fec21291ea33af23234fc08be19430934 | refs/heads/master | 2021-01-23T12:54:05.101466 | 2018-09-26T13:13:18 | 2018-09-26T13:13:18 | 93,210,643 | 0 | 0 | BSD-3-Clause | 2018-03-19T12:27:16 | 2017-06-02T23:18:59 | Python | UTF-8 | Python | false | false | 2,629 | py | # Tai Sakuma <tai.sakuma@gmail.com>
##__________________________________________________________________||
class Events(object):
"""An iterative object for events.
You can use this class to iterate over entries in a ROOT TTree.
You can instantiate this class with a TTree object and an
optionally a maximum number of entries to loop over::
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
events = Events(tree)
Then, the "for" loop for the tree entries can be::
for event in events:
Note: "event" and "events" are the same object. In each iteration,
"event" (and "events") is loaded with the next entry in the tree.
A content of the tree, e.g., a branch, can be accessed as an
attribute of "event"::
event.jet_pt
In order to access to a particular entry, you can use an index.
For example, to get 11th entry (the index for the first entry is
0)::
event = events[10]
Note: Again "event" and "events" are the same object.
"""
def __init__(self, tree, maxEvents=-1, start=0):
if start < 0:
raise ValueError("start must be greater than or equal to zero: {} is given".format(start))
self.tree = tree
nEventsInTree = self.tree.GetEntries()
start = min(nEventsInTree, start)
if maxEvents > -1:
self.nEvents = min(nEventsInTree - start, maxEvents)
else:
self.nEvents = nEventsInTree - start
self.maxEvents = maxEvents
self.start = start
self.iEvent = -1
def __len__(self):
return self.nEvents
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
self._repr_contents()
)
def _repr_contents(self):
return 'tree = {!r}, maxEvents={!r}, start={!r}, nEvents={!r}, iEvent={!r}'.format(
self.tree,
self.maxEvents,
self.start,
self.nEvents,
self.iEvent
)
def __getitem__(self, i):
if i >= self.nEvents:
self.iEvent = -1
raise IndexError("the index is out of range: " + str(i))
self.iEvent = i
self.tree.GetEntry(self.start + self.iEvent)
return self
def __iter__(self):
for self.iEvent in range(self.nEvents):
self.tree.GetEntry(self.start + self.iEvent)
yield self
self.iEvent = -1
def __getattr__(self, name):
return getattr(self.tree, name)
##__________________________________________________________________||
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
cf363c988b6badef51b74389c99ca6acff643e5a | 97543ae8e1ad7bf3d17dd87171aaac04f6737b5f | /bibliopixel/drivers/ledtype.py | b1a962f06ec533a9bbfeac352c4d4ccbe0cf78b5 | [
"MIT"
] | permissive | dr-aryone/BiblioPixel | a3c630bf1cd5db2b014b86775d283c61565a193e | fd97e6c651a4bbcade64733847f4eec8f7704b7c | refs/heads/master | 2020-05-27T16:19:15.043592 | 2019-03-23T08:52:37 | 2019-03-25T11:10:39 | 188,698,414 | 2 | 1 | MIT | 2019-05-26T15:12:38 | 2019-05-26T15:12:37 | null | UTF-8 | Python | false | false | 588 | py | from enum import IntEnum
class LEDTYPE(IntEnum):
"""Enumerated LED type names to be used with
:py:mod:`bibliopixel.drivers.serial` and
:py:mod:`bibliopixel.drivers.SPI`
"""
GENERIC = 0 # Use if the serial device only supports one chipset
LPD8806 = 1
WS2801 = 2
# These are all the same
WS2811 = 3
WS2812 = 3
WS2812B = 3
NEOPIXEL = 3
APA104 = 3
# 400khz variant of above
WS2811_400 = 4
TM1809 = 5
TM1804 = 5
TM1803 = 6
UCS1903 = 7
SM16716 = 8
APA102 = 9
SK9822 = 9
LPD1886 = 10
P9813 = 11
| [
"adammhaile@gmail.com"
] | adammhaile@gmail.com |
b792f2d676652222eaf549ca8143d596860efca3 | e39e6bf58db7ba160e42fdf2b2cc484a05b37be1 | /maths2/maxgcd-delete-one.py | 5968326cd7c2fc417448a9d26241c44817204da9 | [] | no_license | mohits1005/DSAlgo | fee93e3eb96c41dd573f679d6b67b0215f887451 | bc82cc2de7ddbe7d31786a4f3e5826eddbd0e05e | refs/heads/main | 2023-04-10T03:58:38.487125 | 2021-04-09T06:36:50 | 2021-04-09T06:36:50 | 320,060,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | '''
Delete one
Problem Description
Given an integer array A of size N. You have to delete one element such that the GCD(Greatest common divisor) of the remaining array is maximum.
Find the maximum value of GCD.
Problem Constraints
2 <= N <= 105
1 <= A[i] <= 109
Input Format
First argument is an integer array A.
Output Format
Return an integer denoting the maximum value of GCD.
Example Input
Input 1:
A = [12, 15, 18]
Input 2:
A = [5, 15, 30]
Example Output
Output 1:
6
Output 2:
15
Example Explanation
Explanation 1:
If you delete 12, gcd will be 3.
If you delete 15, gcd will be 6.
If you delete 18, gcd will 3.
Maximum vallue of gcd is 6.
Explanation 2:
If you delete 5, gcd will be 15.
If you delete 15, gcd will be 5.
If you delete 30, gcd will be 5.
'''
class Solution:
# @param A : list of integers
# @return an integer
def gcd(self, A, B):
gcd = 1
if B == 0:
return 0
if A < B:
A, B = B, A
while B > 0:
if A % B == 0:
gcd = B
A = A % B
A,B = B,A
return gcd
def solve(self, A):
leftgcd = [0 for elem in A]
leftgcd[0] = A[0]
for i in range(1, len(A)):
leftgcd[i] = self.gcd(leftgcd[i-1],A[i])
rightgcd = [0 for elem in A]
rightgcd[len(A)-1] = A[len(A)-1]
for i in range(len(A)-2, -1, -1):
rightgcd[i] = self.gcd(rightgcd[i+1],A[i])
maxgcd = 0
ans = 0
for i in range(0, len(A)):
if i == 0:
if rightgcd[1]>maxgcd:
maxgcd = rightgcd[1]
ans = 0
elif i == len(A)-1:
if leftgcd[len(A)-2]>maxgcd:
maxgcd = rightgcd[len(A)-2]
ans = len(A)-1
else:
commongcd = self.gcd(leftgcd[i-1],rightgcd[i+1])
if commongcd > maxgcd:
maxgcd = commongcd
ans = i
return A[ans]
| [
"mohit@chalkstreet.com"
] | mohit@chalkstreet.com |
c45d4e7adee31fd52a7bbce6e5be6f33014affe9 | b4d116e844419a641ef36a595d0477871072665e | /optimaldispatch/problem/optimal_dispatch.py | c2e88ecbcb8bc8cc5d1d377765f882d1559b09ee | [
"MIT"
] | permissive | andremaravilha/optimal-dispatch | fe40aea30237e8688736a4d414f187d90b5e4e04 | eb77dd694496c2c30e239475ea800d9062c47575 | refs/heads/main | 2023-06-22T14:53:22.782203 | 2023-06-15T00:10:47 | 2023-06-15T00:10:47 | 327,088,058 | 8 | 1 | MIT | 2021-02-04T14:56:27 | 2021-01-05T18:41:54 | Python | UTF-8 | Python | false | false | 29,970 | py | import copy
import math
import json
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.lines as mlines
from scipy.stats import norm
# Exports
__all__ = ["OptimalDispatch"]
class OptimalDispatch(object):
"""
Class that defines the Optimal Dispatch problem.
"""
def __init__(self, instance_file, cvar_epsilon=None):
"""
Constructor.
:param instance_file: JSON file with instance data
"""
# Parse JSON file
data = json.loads(instance_file.read())
# Time horizon (24 hours divided into intervals)
self.n_intervals = data["n_intervals"]
self.business_hours = data["business_hours"]
# Demand
self.demand = data["demand"]
# Energy price
self.selling_price = data["selling_price"]
self.buying_price = data["buying_price"]
# Generators
self.n_generators = data["n_generators"]
self.generators = data["generators"]
# Battery
self.battery = data["battery"]
# Fuel
self.biogas = data["biogas"]
self.ethanol = data["ethanol"]
self.biomethane = data["biomethane"]
self.gnv = data["gnv"]
# Bus fleet
self.buses_demand = data["buses_demand"]
# Solar energy
self.solar_energy = data["solar_energy"]
# CVaR e-constraint
self.enable_cvar = cvar_epsilon is not None
self.epsilon = cvar_epsilon
def evaluate(self, solution):
"""
Calculate objective function of the given solution.
:param solution: Solution to evaluate.
:return: Objective function value.
"""
# Expand solution
solution = self.__expand_solution(solution)
# Return total cost and constraints
return solution["data"]["total_cost"], solution["constraints"]["inequality"], solution["constraints"]["equality"]
def calculate_infeasibility(self, g, h, eps=1e-5):
"""
Calculate infeasibility for all constraints.
:param g: inequality constraints.
:param h: equality constraints.
:param eps: precision.
:return: two lists: the first regarding to the inequality constraints and the
second regarding to the equality constraints.
"""
inf_g = [max(0, value - eps) for value in g]
inf_h = [abs(value) > eps for value in h]
return inf_g, inf_h
def is_feasible(self, solution, eps=1e-5):
"""
Check if a give solution is feasible.
:param solution: Solution to check feasibility.
:return: True if the solution is feasible, or False otherwise.
"""
_, g, h = self.evaluate(solution)
inf_g, inf_h = self.calculate_infeasibility(g, h, eps)
return (sum(inf_g) + sum(inf_h)) > 0
def draw_solution(self, solution, label="optimaldispatch", block=False, interactive=False, show=True):
"""
Draw a solution.
"""
# Expand solution
solution = self.__expand_solution(solution)
data = solution["data"]
# Computes infeasibility
inf_g, inf_h = self.calculate_infeasibility(solution["constraints"]["inequality"],
solution["constraints"]["equality"])
infeasibility = sum(inf_g) + sum(inf_h)
# Set interactivity on plot
if interactive:
plt.ion()
else:
plt.ioff()
# Intervals (timeline: x-axis)
intervals = list(range(0, self.n_intervals))
xtick_label = [dates.num2date(1 + (i * (1.0 / self.n_intervals))).strftime('%H:%M') for i in intervals]
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
# Create a figure (or set it as the current one if already exists) and set its size
plt.figure(label, figsize=(15, 10), clear=True)
# Plot electrical demands
plt_demands = plt.plot(intervals, data["electric_energy_demand"], color='black')
previous = [0] * self.n_intervals
# Plot solar energy
plt_solar_energy = plt.bar(intervals, self.solar_energy, color='tab:orange', bottom=previous)
previous = [previous[j] + self.solar_energy[j] for j in range(0, self.n_intervals)]
# Plot generators (biogas and ethanol)
rate_biogas = [0] * self.n_intervals
rate_ethanol = [0] * self.n_intervals
for i in range(0, self.n_generators):
biogas = data["generators"]["biogas_rate"][i]
ethanol = data["generators"]["ethanol_rate"][i]
for j in range(0, self.n_intervals):
rate_biogas[j] += biogas[j]
rate_ethanol[j] += ethanol[j]
plt_engine_biogas = plt.bar(intervals, rate_biogas, color='limegreen', bottom=previous)
previous = [previous[j] + rate_biogas[j] for j in range(0, self.n_intervals)]
plt_engine_ethanol = plt.bar(intervals, rate_ethanol, color='tab:green', bottom=previous)
previous = [previous[j] + rate_ethanol[j] for j in range(0, self.n_intervals)]
# Plot battery energy
battery_energy_use = [max(0, -x) for x in data["battery"]["energy"]]
plt_battery_energy = plt.bar(intervals, battery_energy_use, color='tab:blue', bottom=previous)
previous = [previous[j] + battery_energy_use[j] for j in range(0, self.n_intervals)]
# Purchased electricity
purchased_electric_energy = data["commercialization"]["electric_energy_buying"]
plt_purchased = plt.bar(intervals, purchased_electric_energy, color='gold', bottom=previous)
previous = [previous[j] + purchased_electric_energy[j] for j in range(0, self.n_intervals)]
# Defines the basis for plotting energy use beyond demand
previous_excess = data["electric_energy_demand"].copy()
# Battery charging
battery_charging_energy = [max(0, x) for x in data["battery"]["energy"]]
battery_charging_values = [battery_charging_energy[j] for j in range(0, self.n_intervals) if battery_charging_energy[j] > 0]
battery_charging_intervals = [intervals[j] for j in range(0, self.n_intervals) if battery_charging_energy[j] > 0]
battery_charging_previous = [previous_excess[j] for j in range(0, self.n_intervals) if battery_charging_energy[j] > 0]
plt_battery_charging = plt.bar(battery_charging_intervals, battery_charging_values, facecolor=None,
edgecolor="tab:red", hatch="///", fill=False, bottom=battery_charging_previous)
previous_excess = [previous_excess[j] + battery_charging_energy[j] for j in range(0, self.n_intervals)]
# Sold electricity
sold_electric_energy = data["commercialization"]["electric_energy_selling"]
sold_electric_values = [sold_electric_energy[j] for j in range(0, self.n_intervals) if sold_electric_energy[j] > 0]
sold_electric_intervals = [intervals[j] for j in range(0, self.n_intervals) if sold_electric_energy[j] > 0]
sold_electric_previous = [previous_excess[j] for j in range(0, self.n_intervals) if sold_electric_energy[j] > 0]
plt_sold = plt.bar(sold_electric_intervals, sold_electric_values, facecolor=None, edgecolor="black",
hatch="...", fill=False, bottom=sold_electric_previous)
previous_excess = [previous_excess[j] + sold_electric_energy[j] for j in range(0, self.n_intervals)]
# Legend
plt_demands_proxy = mlines.Line2D([], [], color='black', marker=None)
plt_objects = [plt_demands_proxy, plt_solar_energy, plt_engine_biogas, plt_engine_ethanol, plt_battery_energy,
plt_purchased, plt_sold, plt_battery_charging]
legends = ["Electrical demands", "PV", "Engine (biogas)", "Engine (ethanol)", "Battery use",
"Purchased electricity", "Sold electricity", "Battery charging"]
plt.legend(plt_objects, legends, loc="upper left", ncol=2, fontsize=14)
# Other attributes
plt.xlabel("Time (HH:MM)", fontsize=14)
plt.ylabel("Energy (kWh/2)", fontsize=14)
#plt.xticks(intervals, xtick_label, rotation='vertical')
plt.xticks(intervals, xtick_label, rotation=45)
# Solution details
battery_final_load = data["battery"]["load"][-1]
biogas_produced = data["biogas"]["production"]
biogas_for_engine = data["biogas"]["used_for_engines"]
biomethane_produced = data["biomethane"]["production"]
details = ""
details += "Total cost: {:.2f} {}\n".format(data["total_cost"], "(infeasible solution)" if infeasibility > 1E-6 else "")
details += "Biogas production: {:.2f} kWh\n".format(biogas_produced)
details += "Engine biogas consumption: {:.2f} kWh ({:.2f}%)\n".format(biogas_for_engine, (biogas_for_engine / biogas_produced) * 100)
details += "Bus fleet biomethane: {:.2f} kWh\n".format(biomethane_produced)
details += "Purchased VNG: {:.2f} m³\n".format(data["bus_fleet"]["gnv_m3"])
details += "Battery final load: {:.2f} kWh\n".format(battery_final_load)
if self.enable_cvar:
details += "CVaR: {:.2f} (Probability level: {:.2f})".format(data["cvar"]["cvar"], data["cvar"]["alpha"])
#plt.figtext(0.0, 0.0, details, horizontalalignment='left', color='black', fontsize=16)
#plt.text(0.0, -100, details, ha="left", fontsize=16, wrap=True)
plt.gcf().text(0.05, 0.005, details, fontsize=14)
plt.subplots_adjust(bottom=0.26, top=0.99, left=0.05, right=0.99)
# Show
if show:
plt.show(block=block)
# Allow time to draw
if not block:
plt.pause(1e-3)
def solution_to_json(self, solution, file):
# Get solution attributes
generators_status, generators_rate, generators_fuel_composition, battery_energy, biogas_production = solution
cost, g, h = self.evaluate(solution)
inf_g, inf_h = self.calculate_infeasibility(g, h)
infeasibility = sum(inf_g) + sum(inf_h)
data = dict()
# cost and infeasibility
data["cost"] = cost
data["infeasibility"] = infeasibility
# CVaR data
data["cvar"] = {
"enabled": self.enable_cvar,
"epsilon": self.epsilon
}
# Generators' data
data["generators"] = []
for i in range(0, self.n_generators):
data_generator = dict()
data_generator["status"] = generators_status[i]
data_generator["rate"] = generators_rate[i]
data_generator["fuel_composition"] = generators_fuel_composition[i]
data["generators"].append(data_generator)
# Battery and biogas data
data["battery_energy"] = battery_energy
data["biogas_production"] = biogas_production
json.dump(data, file, indent=2)
def __expand_solution(self, solution):
"""
Computes a set of auxiliary data from a given solution, returning them into a dictionary. Besides the auxiliary
data computed from the solution, the dictionary contains the attributes of the solution itself. An expanded
solution is used to evaluates its cost, evaluates its constraints, and draw it.
The dictionary returned has the following structure:
TODO
:param solution: Solution to compute auxiliary data.
"""
# Dictionary to store the all solution attributes and its auxiliary data
expanded_solution = {
"data": {
"total_cost": None,
"electric_energy_demand": [],
"battery": {
"energy": [],
"load": [],
"cost": None
},
"generators": {
"status": [[] for _ in range(0, self.n_generators)],
"rate": [[] for _ in range(0, self.n_generators)],
"fuel_composition": [[] for _ in range(0, self.n_generators)],
"efficiency": [[] for _ in range(0, self.n_generators)],
"biogas_consumption": [[] for _ in range(0, self.n_generators)],
"ethanol_consumption": [[] for _ in range(0, self.n_generators)],
"biogas_rate": [[] for _ in range(0, self.n_generators)],
"ethanol_rate": [[] for _ in range(0, self.n_generators)],
"status_changed": [[] for _ in range(0, self.n_generators)],
"fuel_cost": [None for _ in range(0, self.n_generators)],
"up_down_cost": [None for _ in range(0, self.n_generators)],
},
"bus_fleet": {
"biomethane_m3": None,
"gnv_m3": None,
"cost": None
},
"commercialization": {
"electric_energy_buying": [],
"electric_energy_selling": [],
"cost": None
},
"biogas": {
"production": None,
"production_cost": None,
"used_for_engines": None,
"used_for_biomethane": None
},
"biomethane": {
"production": None,
"production_cost": None
},
"cvar": {
"cvar": None,
"alpha": None
}
},
"constraints": {
"equality": [],
"inequality": []
}
}
# Get solution attributes
generators_status, generators_rate, generators_fuel_composition, battery_energy, biogas_production = solution
# Interval slots per hour
intervals_per_hour = int(self.n_intervals / 24)
# Calculate biogas cost per KW
biogas_cost = ((self.biogas["maintenance_cost"][0] * biogas_production + self.biogas["maintenance_cost"][1] +
self.biogas["input_cost"] + self.biogas["transport_cost"]) / biogas_production)
# Battery
battery_energy = battery_energy.copy()
battery_load = ([0.0] * self.n_intervals) + [self.battery["initial_load"]]
battery_cost = [0.0] * self.n_intervals
for j in range(0, self.n_intervals):
# Battery load
if battery_energy[j] > 0:
battery_load[j] = battery_load[j - 1] + battery_energy[j] * self.battery["eff_charge"]
else:
battery_load[j] = battery_load[j - 1] + battery_energy[j] / self.battery["eff_discharge"]
# Battery use cost
battery_cost[j] = abs(battery_energy[j] * self.battery["cost"])
expanded_solution["data"]["battery"]["energy"] = battery_energy
expanded_solution["data"]["battery"]["load"] = battery_load[0:(len(battery_load) - 1)]
expanded_solution["data"]["battery"]["cost"] = sum(battery_cost)
# end: battery
# Generators
generators_status = copy.deepcopy(generators_status)
generators_rate = copy.deepcopy(generators_rate)
generators_fuel_composition = copy.deepcopy(generators_fuel_composition)
generators_status_changed = [[] for _ in range(0, self.n_generators)]
generators_efficiency = [[] for _ in range(0, self.n_generators)]
generators_consumption = [[] for _ in range(0, self.n_generators)]
generators_biogas_consumption = [[] for _ in range(0, self.n_generators)]
generators_ethanol_consumption = [[] for _ in range(0, self.n_generators)]
generators_biogas_rate = [[] for _ in range(0, self.n_generators)]
generators_ethanol_rate = [[] for _ in range(0, self.n_generators)]
generators_fuel_cost = [0 for _ in range(0, self.n_generators)]
generators_up_down_cost = [0 for _ in range(0, self.n_generators)]
for i in range(0, self.n_generators):
# Status changing (Up / Down)
status_aux = generators_status[i] + [self.generators[i]["initial_state"]]
generators_status_changed[i] = [status_aux[j] != status_aux[j - 1] for j in range(0, self.n_intervals)]
# Makes sure generator rate is zero if it is down (status equal to zero)
generators_rate[i] = [generators_rate[i][j] * generators_status[i][j] for j in range(0, self.n_intervals)]
# Efficiency
label = [None, "efficiency_1", "efficiency_2", "efficiency_3"]
a = [self.generators[i][label[generators_fuel_composition[i][j]]][0] for j in range(0, self.n_intervals)]
b = [self.generators[i][label[generators_fuel_composition[i][j]]][1] for j in range(0, self.n_intervals)]
generators_efficiency[i] = [a[j] * (generators_rate[i][j] / self.generators[i]["upper_rate"]) + b[j]
for j in range(0, self.n_intervals)]
# Fuel consumption
generators_consumption[i] = [(generators_rate[i][j] / generators_efficiency[i][j])
for j in range(0, self.n_intervals)]
# Biogas consumption
generators_biogas_consumption[i] = [(generators_consumption[i][j] *
(0.20 * 2 ** (generators_fuel_composition[i][j] - 1) +
0.10 * (generators_fuel_composition[i][j] - 1)))
for j in range(0, self.n_intervals)]
# Ethanol rate
generators_ethanol_consumption[i] = [(generators_consumption[i][j] - generators_biogas_consumption[i][j])
for j in range(0, self.n_intervals)]
# Biogas rate
generators_biogas_rate[i] = [(generators_rate[i][j] *
(0.20 * 2 ** (generators_fuel_composition[i][j] - 1) +
0.10 * (generators_fuel_composition[i][j] - 1)))
for j in range(0, self.n_intervals)]
# Ethanol consumption
generators_ethanol_rate[i] = [(generators_rate[i][j] - generators_biogas_rate[i][j])
for j in range(0, self.n_intervals)]
# Calculates fuel cost and up/down cost
for j in range(0, self.n_intervals):
# Fuel cost
generators_fuel_cost[i] += (generators_biogas_consumption[i][j] * biogas_cost +
generators_ethanol_consumption[i][j] * self.ethanol["cost"])
# Up/Down cost
if generators_status_changed[i][j] and generators_status[i][j] == 1:
generators_up_down_cost[i] += self.generators[i]["up_cost"]
elif generators_status_changed[i][j] and generators_status[i][j] == 0:
generators_up_down_cost[i] += self.generators[i]["down_cost"]
# end: generators cost
expanded_solution["data"]["generators"]["stats"] = generators_status
expanded_solution["data"]["generators"]["rate"] = generators_rate
expanded_solution["data"]["generators"]["fuel_composition"] = generators_fuel_composition
expanded_solution["data"]["generators"]["efficiency"] = generators_efficiency
expanded_solution["data"]["generators"]["biogas_consumption"] = generators_biogas_consumption
expanded_solution["data"]["generators"]["ethanol_consumption"] = generators_ethanol_consumption
expanded_solution["data"]["generators"]["biogas_rate"] = generators_biogas_rate
expanded_solution["data"]["generators"]["ethanol_rate"] = generators_ethanol_rate
expanded_solution["data"]["generators"]["status_changed"] = generators_status_changed
expanded_solution["data"]["generators"]["fuel_cost"] = generators_fuel_cost
expanded_solution["data"]["generators"]["up_down_cost"] = generators_up_down_cost
#end: generators
# Calculates the amount of biogas remaining
used_biogas = sum([sum(generators_biogas_consumption[i]) for i in range(0, self.n_generators)])
remaining_biogas = max(0, biogas_production - used_biogas)
expanded_solution["data"]["biogas"]["production"] = biogas_production
expanded_solution["data"]["biogas"]["production_cost"] = biogas_cost
expanded_solution["data"]["biogas"]["used_for_engines"] = used_biogas
expanded_solution["data"]["biogas"]["used_for_biomethane"] = remaining_biogas
# Calculates available biomethane and its cost
biomethane_available = remaining_biogas * self.biomethane["efficiency"]
biomethane_cost = (biogas_cost * remaining_biogas +
self.biomethane["maintenance_cost"][0] * biomethane_available +
self.biomethane["maintenance_cost"][1])
expanded_solution["data"]["biomethane"]["production"] = biomethane_available
expanded_solution["data"]["biomethane"]["production_cost"] = biomethane_cost
# Convert available biomethane from KWh to m^3
biomethane_available = biomethane_available / 10.92
# Updates demand with energy required by biogas production and biomethane production
demand = self.demand.copy()
# Business hours (in hours and intervals of the time horizon)
working_hours = self.business_hours["end"] - self.business_hours["start"]
start_interval = self.business_hours["start"] * intervals_per_hour
end_interval = self.business_hours["end"] * intervals_per_hour
# Biogas energy consumption
biogas_energy_consumption = ((self.biogas["consumption"][0] * biogas_production + self.biogas["consumption"][1]) /
(end_interval - start_interval))
for j in range(start_interval, end_interval):
demand[j] = demand[j] + biogas_energy_consumption
# Biomethane energy consumption
total_compression_capacity = self.biomethane["compression_capacity"] * working_hours
if biomethane_available <= total_compression_capacity:
compression_time_start = start_interval
compression_time_end = end_interval
compression_intervals = compression_time_end - compression_time_start
else:
compression_intervals = math.ceil((biomethane_available / self.biomethane["compression_capacity"]) *
intervals_per_hour)
compression_time_start = start_interval
compression_time_end = compression_time_start + compression_intervals
if compression_time_end > self.n_intervals:
compression_time_start = max(0, compression_time_start - (compression_time_end - self.n_intervals))
compression_time_end = self.n_intervals
biomethane_energy_consumption = ((self.biomethane["energy_consumption"] * biomethane_available) /
compression_intervals)
for j in range(compression_time_start, compression_time_end):
demand[j] = demand[j] + biomethane_energy_consumption
# Bus fleet
buses_gnv_required = max(0, self.buses_demand - biomethane_available)
buses_fuel_cost = biomethane_cost + (self.gnv["cost"] * buses_gnv_required)
expanded_solution["data"]["bus_fleet"]["biomethane_m3"] = biomethane_available
expanded_solution["data"]["bus_fleet"]["gnv_m3"] = buses_gnv_required
expanded_solution["data"]["bus_fleet"]["cost"] = buses_fuel_cost
# Electrical Energy Commercialization (buy / sell)
commercialization_electric_energy = demand.copy()
commercialization_electric_energy_buying = [0.0] * self.n_intervals
commercialization_electric_energy_selling = [0.0] * self.n_intervals
commercialization_electric_energy_cost = [0.0] * self.n_intervals
for j in range(0, self.n_intervals):
# Energy commercialized
commercialization_electric_energy[j] -= self.solar_energy[j]
commercialization_electric_energy[j] += battery_energy[j]
for i in range(0, self.n_generators):
commercialization_electric_energy[j] -= generators_status[i][j] * generators_rate[i][j]
# Cost of energy commercialized
if commercialization_electric_energy[j] > 0:
commercialization_electric_energy_buying[j] = abs(commercialization_electric_energy[j])
commercialization_electric_energy_cost[j] = commercialization_electric_energy[j] * self.buying_price[j]
else:
commercialization_electric_energy_selling[j] = abs(commercialization_electric_energy[j])
commercialization_electric_energy_cost[j] = commercialization_electric_energy[j] * self.selling_price[j]
expanded_solution["data"]["commercialization"]["electric_energy_buying"] = commercialization_electric_energy_buying
expanded_solution["data"]["commercialization"]["electric_energy_selling"] = commercialization_electric_energy_selling
expanded_solution["data"]["commercialization"]["cost"] = sum(commercialization_electric_energy_cost)
# end: electrical energy commercialization
# Calculates total cost
total_cost = (sum(commercialization_electric_energy_cost) + sum(battery_cost) + buses_fuel_cost +
sum(generators_up_down_cost) + sum(generators_fuel_cost))
expanded_solution["data"]["total_cost"] = total_cost
expanded_solution["data"]["electric_energy_demand"] = demand
# -----------------------------------------------------------
# CONSTRAINTS
expanded_solution["constraints"]["equality"] = []
expanded_solution["constraints"]["inequality"] = []
# Constraint: ramp rate limit
for i in range(0, self.n_generators):
rate = generators_rate[i] + [self.generators[i]["initial_state"] * self.generators[i]["lower_rate"]]
status = generators_status[i] + [self.generators[i]["initial_state"]]
diff = [abs((rate[j] * status[j]) - (rate[j - 1] * status[j - 1])) for j in range(0, self.n_intervals)]
# Ramp rate constraints: (diff - limit) * <= 0, if generator is up
constraints = [(diff[j] - self.generators[i]["ramp_rate_limit"]) * int(status[j])
for j in range(0, self.n_intervals)]
expanded_solution["constraints"]["inequality"] += constraints
# end: ramp rate limit
# Constraint: minimum window length without generator status changing
constraints_generator_window = []
for i in range(0, self.n_generators):
status_changed = generators_status_changed[i]
for j in range(0, (len(status_changed) - self.generators[i]["up_down_window"] + 1)):
start_window = j
end_window = j + self.generators[i]["up_down_window"]
# Window constraint: changes - 1 <= 0
constraint = sum(status_changed[start_window:end_window]) - 1
expanded_solution["constraints"]["inequality"].append(constraint)
# end: windows minimum length
# Constraint: fuel available (used - available <= 0)
total_used_ethanol = sum([sum(generators_ethanol_consumption[i]) for i in range(0, self.n_generators)])
expanded_solution["constraints"]["inequality"].append(used_biogas - biogas_production)
expanded_solution["constraints"]["inequality"].append(total_used_ethanol - self.ethanol["disponibility"])
# end: fuel available
# Constraint: maximum and minimum battery load
battery_free_load = [(self.battery["max_load"] - battery_load[i - 1]) / self.battery["eff_charge"]
for i in range(0, self.n_intervals)]
battery_available_load = [battery_load[i - 1] * self.battery["eff_discharge"]
for i in range(0, self.n_intervals)]
constraint_battery1 = 0
constraint_battery2 = 0
for i in range(0, self.n_intervals):
if battery_energy[i] < 0:
constraint_battery1 += max((abs(battery_energy[i]) - battery_available_load[i]), 0)
else:
constraint_battery1 += max((battery_energy[i] - battery_free_load[i]), 0)
constraint_battery2 += sum(
[(abs(battery_load[i]) if (battery_load[i] < 0 or battery_load[i] > self.battery["max_load"]) else 0)
for i in range(0, self.n_intervals)])
expanded_solution["constraints"]["inequality"].append(constraint_battery1)
expanded_solution["constraints"]["inequality"].append(constraint_battery2)
# end: battery limit
# Constraints: CVaR (if enabled)
if self.enable_cvar:
z = (biogas_production - self.biogas["mean_production"]) / self.biogas["deviation_production"]
alpha = norm.cdf(z)
biogas_cvar = abs(alpha ** -1 * norm.pdf(z) * self.biogas["deviation_production"] - self.biogas["mean_production"])
diff_biogas = self.biogas["mean_production"] - biogas_cvar
cvar = diff_biogas * 0.27 * self.buying_price[40]
expanded_solution["data"]["cvar"]["cvar"] = cvar
expanded_solution["data"]["cvar"]["alpha"] = alpha
constraint_cvar = min((cvar - self.epsilon * 82), 0)
expanded_solution["constraints"]["inequality"].append(constraint_cvar)
# end: CVaR
# Return the expanded solution
return expanded_solution
| [
"andre.maravilha@outlook.com"
] | andre.maravilha@outlook.com |
78ac77bbaba347ba0643688428339f03ef0ddee3 | 02b04b202550248a2b78ed069d94b7607640c866 | /DataTypes/Numbers/max.py | 5c3f7d5ee91951b547605f887089c82a0ca3b66a | [] | no_license | keshavkummari/python-nit-7am | c391fe96783c224b44419a258738168230e182cd | 0bc867ad673e40ad401d7473aab4791f21ee1945 | refs/heads/master | 2020-03-30T15:05:18.376222 | 2018-11-05T02:30:44 | 2018-11-05T02:30:44 | 151,347,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | #!/usr/bin/python
"""9. max(x1, x2,...) : The largest of its arguments: the value closest to positive infinity
Python Number max() Method:"""
print "max(80, 100, 1000) : ", max(80, 100, 1000)
print "max(-20, 100, 400) : ", max(-20, 100, 400)
print "max(-80, -20, -10) : ", max(-80, -20, -10)
print "max(0, 100, -400) : ", max(0, 100, -400)
#max() Method; min() Method
print "max method", max(30, -30, 40, 50)
print "min method", min(30, -30, 40, 50)
#!/usr/bin/python
#10. min(x1, x2,...): The smallest of its arguments: the value closest to negative infinity
#Python Number min() Method:
print "min(-20, 100, 400) : ", min(-20, 100, 400)
print "min(80, 100, 1000) : ", min(80, 100, 1000)
print "min(-80, -20, -10) : ", min(-80, -20, -10)
print "min(0, 100, -400) : ", min(0, 100, -400)
| [
"keshav.kummari@gmail.com"
] | keshav.kummari@gmail.com |
4356ecc0edf85ae785239d3ca4e92b8805d2cea1 | 4336835f308f8ca161fe13deb4a4e1a43bab5710 | /TwitterBot/modules/DayOfTheWeek.py | 8cfc4531d142f7e0b3c2553cfd6f8477d9d651ff | [
"MIT"
] | permissive | shogo82148/JO_RI_bot | da32b615f49f344a6ff7b64b7da92e330ed03a84 | 653008faf8356a6c6e2b44f0154f646774aff79b | refs/heads/master | 2016-09-05T15:10:23.401670 | 2016-02-17T11:38:29 | 2016-02-17T11:38:29 | 3,275,117 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
曜日計算機能
"""
import re
import datetime
import unicodedata
from TwitterBot.lib.jholiday import holiday_name
_re_date = re.compile(ur'(西暦|平成|昭和|大正|明治)?(元|\d+)[/年](\d+)[/月](\d+)日?は何曜日')
_week_name = [u'月', u'火', u'水', u'木', u'金', u'土', u'日']
def hook(bot, status):
m = _re_date.search(unicodedata.normalize('NFKC',status.text))
if not m:
return False
nengo = m.group(1)
if m.group(2)==u'元':
year = 1
else:
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
try:
if year==0:
raise Exception()
if nengo==u'平成':
#1989年1月8日から
year += 1988
if year==1989 and month==1 and day<8:
raise Exception()
elif nengo==u'昭和':
#1926年12月25日から1989年1月7日まで
year += 1925
if year==1926 and (month<12 or day<25):
raise Exception()
if year==1989 and (month>1 or day>7):
raise Exception()
elif nengo==u'大正':
#1912年7月30日から1926年12月25日まで
year += 1911
if year==1912 and (month<7 or (month==7 and day<30)):
raise Exception()
if year==1926 and month==12 and day>25:
raise Exception()
elif nengo==u'明治':
#1868年1月25日から1912年7月30日まで
year += 1867
if year==1868 and month==1 and day<25:
raise Exception()
if year==1912 and (month>7 or (month==7 and day>30)):
raise Exception()
date = datetime.date(year, month, day)
hname = holiday_name(year, month, day)
weekday = date.weekday()
if hname:
text = u'%d年%d月%d日は%s曜日、%sです。' % (year, month, day, _week_name[weekday], hname)
else:
text = u'%d年%d月%d日は%s曜日です。' % (year, month, day, _week_name[weekday])
except Exception, e:
print e
text = u'そんな日付は存在しません。'
text += ' [%s]' % bot.get_timestamp()
bot.reply_to(text, status)
return True
| [
"ichinose@stn.nagaokaut.ac.jp"
] | ichinose@stn.nagaokaut.ac.jp |
9bf2a6ca5b589024158f74c33601bb177949e1ab | 0a6923222175b160ddb5d2341c755dbcfdd5f8db | /scripts/exploration_noise/exploration_noise_generatorV1.py | 51e4ddbaaa1b34a7d1d6ac2efc013f711a53170c | [] | no_license | Osiron007/AgentSmith | 4e48d8cb252f440daf3babdc6b6ef78d9b643f52 | 2076ea69b12dd96c73e55dd1ce0de87bca0680ad | refs/heads/master | 2020-04-01T10:45:33.444836 | 2019-04-10T09:01:34 | 2019-04-10T09:01:34 | 153,130,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | import numpy as np
from random import randint
from OU import OU
OU = OU() #Ornstein-Uhlenbeck Process
class exploration_noise_generator(object):
def __init__(self,static_params, OU_mu, OU_theta, OU_sigma):
print("Init exploration noise generator")
#defines if different working points should be generated or not
self.static = static_params
#if static_params = true use this parameter
self.mu = OU_mu #gleichgewichtswert
self. theta = OU_theta #anziehungskraft
self.sigma = OU_sigma #diffusionsterm
self.working_point_left = 0.0
self.working_point_right = 0.0
#NoiseV2
self.noise_cnt = 0
def get_exploration_noiseV1(self, current_value, wheel_side):
#print("Get noise")
if(wheel_side == 1):
self.mu = self.working_point_left
if(wheel_side == 2):
self.mu = self.working_point_right
return OU.function(current_value, self.mu, self.theta, self.sigma)
def generate_working_points(self):
# generate working point
# avaliable working points: 1, 0.75, 0.5, 0.25, 0.0, -0.25, -0.5, -0.75, -1
working_point_category = randint(0, 8)
# print("RND: " + str(working_point_category))
if working_point_category == 0:
working_point = 0.0
if working_point_category > 0 and working_point_category < 5:
working_point = working_point_category * 0.25
if working_point_category >= 5:
working_point_category = working_point_category - 4
working_point = -working_point_category * 0.25
self.working_point_left = working_point
working_point_category = randint(0, 8)
# print("RND: " + str(working_point_category))
if working_point_category == 0:
working_point = 0.0
if working_point_category > 0 and working_point_category < 5:
working_point = working_point_category * 0.25
if working_point_category >= 5:
working_point_category = working_point_category - 4
working_point = -working_point_category * 0.25
self.working_point_right = working_point
print("Current Working Points: left: " + str(self.working_point_left))
print("Current Working Points: right: " + str(self.working_point_right))
def get_exploration_noiseV2(self, current_value, wheel_side):
# always 6 cycles without noise and 12 cycles with noise
# to ensure exploration without blocking
if self.noise_cnt > 6:
# reset cnt
if self.noise_cnt > 24:
self.noise_cnt = 0
else:
self.noise_cnt = self.noise_cnt + 1
# generate noise
if (current_value > 0):
noise = randint(-180, -1)
else:
noise = randint(1, 180)
noise = np.float(np.float(noise) / 100)
# print("############")
# print("Noise: " + str(noise))
return noise # OU.function(current_value, self.mu, self.theta, self.sigma)
else:
self.noise_cnt = self.noise_cnt + 1
return 0.0
| [
"nieland@neobotix.de"
] | nieland@neobotix.de |
f776bc6f0532dcf9d21e83a48bec4ef05b6a364d | ddda1fd239dfa5b2000dfa6acafb876e246ae039 | /main_1.py | 06aa9d5d1a9cc2d0915a5b6473d54f0d5c179db8 | [] | no_license | 7dy/rewaq | e321f9b635653746a775b204fca2537b378a2b67 | 0874eaad583e5fb2af4a93b33f23b5522b292154 | refs/heads/main | 2023-06-04T03:15:37.247861 | 2021-07-08T19:29:42 | 2021-07-08T19:29:42 | 382,566,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import requests, os
url = "https://spclient.wg.spotify.com/signup/public/v1/account/"
while True:
os.system("cls")
email = input("Enter email: ")
password = input("Enter password: ")
name = input("Enter name: ")
payload = {
'creation_point': 'https://login.app.spotify.com/?utm_source=spotify&utm_medium=desktop-win32-store&utm_campaign=msft_1&referral=msft_1&referrer=msft_1',
'key': '4c7a36d5260abca4af282779720cf631',
'platform': 'desktop',
'birth_month': '4',
'displayname': f'{name}',
'birth_day': '12',
'password_repeat': f'{password}',
'creation_flow': 'desktop',
'referrer': 'msft_1',
'iagree': '1',
'password': f'{password}',
'birth_year': '1998',
'gender': 'male',
'email': f'{email}',
'username': f'{name}'
}
headers = {
'Host': 'spclient.wg.spotify.com',
'client-token': 'AAD0FIQS2mUNeTalJyIQyiswa7WFnTRCkYIQDTrCdLThJVxhGeUWswLkEm1wlKHHd0TGI1uWz8mSFCwGVYNjeZDF+p4LsknfroAOmwMyuwvOtAxXpSgcp0JY8VNo2ATJPZXhuh/ckNEFSdbDErsFuXTcXf2QFAmIkbizztsDEue0r7z2BY21zNzpJbje4GYBvmBRIlg6eYmz1yNDAneetd+RAMdDWrQmlK4TMD8YRsIkD+UxIY4P7Q==',
'Cookie': '__gads=ID=b2928c2f22f3c8b2:T=1615518277:S=ALNI_MbruMuMNn1y7OYJUcnjwcxn6ZEzVg'
}
r = requests.post(url, headers=headers, data=payload)
print(r.text, r.status_code)
if r.json()['status'] == 1:
try:
with open('claimed.txt', "a") as prime:
prime.write(f'{email} : {password} : {name}\n')
except:
print(f'{email} : {password} : {name}\n')
input() | [
"noreply@github.com"
] | noreply@github.com |
dad057624e2f581bdeb4c651cff96a0694f620db | e18f23521733d09aa09c8b36ec92e11f9a9e9773 | /distinct_substring.py | 7ac4ab2e2e48402141c0fb6dbc8b69e460ea11bb | [] | no_license | architkithania/coding-challenges | 73b1fce9cdb4641aa86b64d7d8af5bf870c8d49d | 54b1c7149af57b6ee37bb0494561bdea33c38df5 | refs/heads/master | 2020-08-22T07:17:37.486776 | 2020-01-15T16:43:19 | 2020-01-15T16:43:19 | 216,345,966 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # Problem 13
def sub_string(string, k):
maximum = 0
for i in range(0,len(string) - 1):
dist = []
x = helper(string[i:], k, 0, dist)
if maximum < x:
maximum = x
return maximum
def helper(string, k, length, dist):
if k == 0:
return length
if len(string) > 1:
sub_str = string[1:]
if string[0] not in dist:
dist.append(string[0])
if string[1] in dist:
return helper(sub_str, k, length + 1, dist)
return helper(sub_str, k - 1, length + 1, dist)
else:
return length | [
"archit.kithania@gmail.com"
] | archit.kithania@gmail.com |
e60100860f11115391c31f76f415de05c4338c12 | 51a43ec71ee214c3c995cd56d5e4a316334c4a65 | /myProfiles/migrations/0012_auto__add_usersupport__chg_field_userproject_description.py | bd3dd1591d4c89d7427f875e57f8e936750ec0e5 | [] | no_license | jaredpark/zparkweb | ba305305903626c29bf5c6ca784716f67be6a71a | 4ec3aa2182c018313e2f528cef6e1c92cc7a3647 | refs/heads/master | 2018-12-28T19:24:26.692215 | 2015-04-22T23:47:37 | 2015-04-22T23:47:37 | 32,129,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,524 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSupport'
db.create_table('myProfiles_usersupport', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['myProfiles.UserProfile'], related_name='support')),
('has_basic_support', self.gf('django.db.models.fields.BooleanField')(default=True)),
('has_monthly_support', self.gf('django.db.models.fields.BooleanField')(default=False)),
('monthly_hours', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, blank=True, default=0, decimal_places=2)),
))
db.send_create_signal('myProfiles', ['UserSupport'])
# Changing field 'UserProject.description'
db.alter_column('myProfiles_userproject', 'description', self.gf('django.db.models.fields.CharField')(max_length=400))
def backwards(self, orm):
# Deleting model 'UserSupport'
db.delete_table('myProfiles_usersupport')
# Changing field 'UserProject.description'
db.alter_column('myProfiles_userproject', 'description', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']"})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'", 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'object_name': 'ContentType', 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'myProfiles.userimages': {
'Meta': {'object_name': 'UserImages'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['myProfiles.UserProfile']", 'related_name': "'images'", 'unique': 'True'}),
'user_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True', 'null': 'True'})
},
'myProfiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True', 'default': "''"}),
'balance_due_date': ('django.db.models.fields.DateField', [], {'blank': 'True', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True', 'default': "''"}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True', 'default': "''"}),
'current_balance': ('django.db.models.fields.DecimalField', [], {'decimal_places': '2', 'max_digits': '6', 'blank': 'True', 'default': '0.0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '60', 'blank': 'True', 'default': "''"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'default': "''"}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '400', 'blank': 'True', 'default': "''"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True', 'default': "''"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total_balance': ('django.db.models.fields.DecimalField', [], {'decimal_places': '2', 'max_digits': '6', 'blank': 'True', 'default': '0.0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True', 'default': "'http://www.none.com'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'zipcode': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'blank': 'True', 'default': '1'})
},
'myProfiles.userproject': {
'Meta': {'object_name': 'UserProject'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'default': "'Project'"}),
'design_url': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'progress_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'default': "'http://www.none.com'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['myProfiles.UserProfile']", 'related_name': "'project'"})
},
'myProfiles.usersupport': {
'Meta': {'object_name': 'UserSupport'},
'has_basic_support': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_monthly_support': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monthly_hours': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'blank': 'True', 'default': '0', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['myProfiles.UserProfile']", 'related_name': "'support'"})
}
}
complete_apps = ['myProfiles'] | [
"jaredjamespark@gmail.com"
] | jaredjamespark@gmail.com |
665415ef3bd75fb530c985c7c930cd48611c48cf | a0a92f864201cb60db71121ef74bc2ead0e14494 | /pets/config/database.py | dccd39eec749eddba0a1ec32f096c4b266d0e5a2 | [] | no_license | pedrorio/PetsApi | cae962e053b0425d83a138ca4e7b457784fea6c4 | a7a5dea20bd6ba2442e5ce278812a4103255c323 | refs/heads/main | 2023-04-18T07:07:46.447571 | 2021-04-24T03:48:21 | 2021-04-24T03:48:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import databases
import sqlalchemy
DATABASE_URL = "mysql://localhost/example"
Database = databases.Database(DATABASE_URL)
Engine = sqlalchemy.create_engine(str(Database.url)) | [
"pedrocacaisrio@gmail.com"
] | pedrocacaisrio@gmail.com |
1d050f98b864400e6e443fe081aa9e0fb9b350ef | 06ef40ecee930b7854bc333a9c8fecc5afe289fb | /tutorial/03_Django_and_React/tic_tac_toe/tic_tac_toe/settings.py | 79b603087402800d2f54796435d477f6ae65e643 | [] | no_license | shu-ramen/webdev | 10cfbdca47044c379212ecfe31de2a1adc64f116 | cbca9d32a5a699f5f4060a905965ab8e0139af2e | refs/heads/master | 2020-05-04T21:36:29.111693 | 2019-04-28T10:17:33 | 2019-04-28T10:17:33 | 179,482,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | """
Django settings for tic_tac_toe project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wr62=jf1(7ylrh8o$x!sd@#kevlo5maydr)qi=)i==ytcyv2kp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'game'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tic_tac_toe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tic_tac_toe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | [
"shu.h7.research@gmail.com"
] | shu.h7.research@gmail.com |
fc50cebae1eba552d9f6fcd70e43f8e3211fb392 | 3bbf0336963d4aa7a64bbf52bb10f17bfc7a1753 | /ft_nm/compare_dir.py | a2735dbf7edab5000eaca806397e64a1f210dcb3 | [] | no_license | Ngoguey42/proj19_unix_nm-otool | e5bf4b177b9004a4d861b399aa3cffdf7f77cd7e | 812a2bd693a18b841450a22a53f619b79106b157 | refs/heads/master | 2021-01-10T15:52:49.592782 | 2016-02-23T10:24:58 | 2016-02-23T10:24:58 | 51,018,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | # **************************************************************************** #
# #
# ::: :::::::: #
# compare_dir.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: ngoguey <ngoguey@student.42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2016/02/17 11:25:41 by ngoguey #+# #+# #
# Updated: 2016/02/17 12:09:33 by ngoguey ### ########.fr #
# #
# **************************************************************************** #
import os
import re
import subprocess
from sys import argv
#python compare_dir.py /bin /sbin /usr/lib /usr/sbin /usr/bin
#python compare_dir.py ~/.brew/lib ~/.brew/bin
#python compare_dir.py /Applications/Unity/Unity.app/Contents/PlaybackEngines/iossupport/Trampoline/Libraries/
if __name__ == "__main__":
for dirpath in argv[1:]:
# print "Reading directory: %s" %(dirpath)
# outFile = open("diffs_" + dirpath.replace('/', '-') + ".txt", "w")
for root, dirs, files in os.walk(dirpath):
for file in files:
fpath = "%s/%s" %(root, file)
ref = subprocess.Popen(['nm', '-mp', fpath]
, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
refout, referr = ref.communicate()
mine = subprocess.Popen(['./ft_nm', '-mp', fpath]
, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mineout, mineerr = mine.communicate()
if mineout != refout:
print "diff over %s" %(fpath)
# outFile.write("diff '%s;\n" %(fpath))
# outFile.flush()
# outFile.close()
| [
"ngoguey@student.42.fr"
] | ngoguey@student.42.fr |
584017b8c603df166692fd584d0144e09d4a261b | 42a0befb594a6c081f3e788016c53889cfa2a9fb | /Codechef/factorial_easy_problem.py | e88719072ec1628291baff54eafc1b59d0b7f57f | [] | no_license | Laksh8/competitive-programming | f436e8c525220ad95ef1c7a9d3aa98b4689d4f92 | 14c20e5cc32263c89a73524ab596efbbba2cc85a | refs/heads/master | 2022-12-24T23:54:16.313515 | 2020-09-08T06:59:11 | 2020-09-08T06:59:11 | 293,727,288 | 2 | 1 | null | 2020-10-04T14:15:38 | 2020-09-08T06:57:35 | Python | UTF-8 | Python | false | false | 207 | py | testcase = int(input())
while testcase > 0:
num = int(input())
sum=0
divisor=5
while (num)>=5:
num = num // divisor
sum = sum + num
print(sum)
testcase = testcase - 1
| [
"lakshitkhanna311@gmail.com"
] | lakshitkhanna311@gmail.com |
edd2690a5b80bee5f27437cef21e1a4995e9a870 | 9a4755588bbe924270e0d92e04d3409281fbaf5b | /main/displayer.py | a0d3b2201298b0f2f77b759cc72098e188f45c3e | [] | no_license | chaobai-li/authenticated-info-displayer | 209488a8229d17b9d67371435e4aa576ef0bb0b3 | c19c6d477a3b96cda3d65f1833d28ade07aff7ba | refs/heads/master | 2021-01-25T11:48:49.936003 | 2018-03-03T11:59:48 | 2018-03-03T11:59:48 | 123,431,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | __pragma__("alias", "S", "$")
class Displayer:
def __init__(self, authenticator):
self.authenticator = authenticator
self.authenticator.eventLogin.append(self.__initialize)
self.authenticator.eventLogin.append(lambda: self.__toggle(True))
self.authenticator.eventLogout.append(lambda: self.__toggle(False))
self.initialized = False
def __toggle(self, v):
S('[data-auth-display-toggle]').toggle(v)
def __initialize(self):
if self.initialized: return
self.database = firebase.database()
interests = list(S('[data-auth-display]'))
for each in interests:
path = S(each).attr("data-auth-display")
template = S(each).attr("data-auth-display-template")
targetAttr = S(each).attr("data-auth-display-attribute")
useHtml = S(each).attr("data-auth-display-html")
self.__bindListener(each, path, template, targetAttr, useHtml)
self.initialized = True
def __bindListener(self, domObj, path, template, targetAttr, useHtml):
if not template:
template = "{}"
def updater(dbValue):
text = template.format(dbValue.val())
if targetAttr:
S(domObj).attr(targetAttr, text)
else:
if useHtml:
S(domObj).html(text)
else:
S(domObj).text(text)
self.database.ref(path).on("value", updater)
| [
"contact@chaobai.li"
] | contact@chaobai.li |
b9554abc231f4c37bde663fe622fb5a85107a16d | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/compute/v20190701/virtual_machine_scale_set_extension.py | ee871d7ddaf93e66afd1b6cd43de3dd831e08f3e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,493 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['VirtualMachineScaleSetExtensionArgs', 'VirtualMachineScaleSetExtension']
@pulumi.input_type
class VirtualMachineScaleSetExtensionArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
vm_scale_set_name: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualMachineScaleSetExtension resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set where the extension should be create or updated.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] name: The name of the extension.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vmss_extension_name: The name of the VM scale set extension.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vm_scale_set_name", vm_scale_set_name)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if name is not None:
pulumi.set(__self__, "name", name)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if provision_after_extensions is not None:
pulumi.set(__self__, "provision_after_extensions", provision_after_extensions)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if type is not None:
pulumi.set(__self__, "type", type)
if type_handler_version is not None:
pulumi.set(__self__, "type_handler_version", type_handler_version)
if vmss_extension_name is not None:
pulumi.set(__self__, "vmss_extension_name", vmss_extension_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vmScaleSetName")
def vm_scale_set_name(self) -> pulumi.Input[str]:
"""
The name of the VM scale set where the extension should be create or updated.
"""
return pulumi.get(self, "vm_scale_set_name")
@vm_scale_set_name.setter
def vm_scale_set_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_scale_set_name", value)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@auto_upgrade_minor_version.setter
def auto_upgrade_minor_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade_minor_version", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@protected_settings.setter
def protected_settings(self, value: Optional[Any]):
pulumi.set(self, "protected_settings", value)
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@provision_after_extensions.setter
def provision_after_extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provision_after_extensions", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[Any]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the extension; an example is "CustomScriptExtension".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
@type_handler_version.setter
def type_handler_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_handler_version", value)
@property
@pulumi.getter(name="vmssExtensionName")
def vmss_extension_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VM scale set extension.
"""
return pulumi.get(self, "vmss_extension_name")
@vmss_extension_name.setter
def vmss_extension_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vmss_extension_name", value)
class VirtualMachineScaleSetExtension(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Describes a Virtual Machine Scale Set Extension.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] name: The name of the extension.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set where the extension should be create or updated.
:param pulumi.Input[str] vmss_extension_name: The name of the VM scale set extension.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualMachineScaleSetExtensionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Describes a Virtual Machine Scale Set Extension.
:param str resource_name: The name of the resource.
:param VirtualMachineScaleSetExtensionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualMachineScaleSetExtensionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualMachineScaleSetExtensionArgs.__new__(VirtualMachineScaleSetExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = auto_upgrade_minor_version
__props__.__dict__["force_update_tag"] = force_update_tag
__props__.__dict__["name"] = name
__props__.__dict__["protected_settings"] = protected_settings
__props__.__dict__["provision_after_extensions"] = provision_after_extensions
__props__.__dict__["publisher"] = publisher
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["settings"] = settings
__props__.__dict__["type"] = type
__props__.__dict__["type_handler_version"] = type_handler_version
if vm_scale_set_name is None and not opts.urn:
raise TypeError("Missing required property 'vm_scale_set_name'")
__props__.__dict__["vm_scale_set_name"] = vm_scale_set_name
__props__.__dict__["vmss_extension_name"] = vmss_extension_name
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20170330:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20171201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20180401:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20180601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20181001:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20190301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20191201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20200601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20201201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20210301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20210301:VirtualMachineScaleSetExtension")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualMachineScaleSetExtension, __self__).__init__(
'azure-native:compute/v20190701:VirtualMachineScaleSetExtension',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachineScaleSetExtension':
"""
Get an existing VirtualMachineScaleSetExtension resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualMachineScaleSetExtensionArgs.__new__(VirtualMachineScaleSetExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = None
__props__.__dict__["force_update_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["protected_settings"] = None
__props__.__dict__["provision_after_extensions"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["publisher"] = None
__props__.__dict__["settings"] = None
__props__.__dict__["type"] = None
__props__.__dict__["type_handler_version"] = None
return VirtualMachineScaleSetExtension(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> pulumi.Output[Optional[Any]]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def publisher(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def settings(self) -> pulumi.Output[Optional[Any]]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
| [
"noreply@github.com"
] | noreply@github.com |
ad8af855ab3fd1b05aaba8ed4a163b845ecdce4f | f1f4c01cf737acc0e17680942f52dcfdf6064862 | /lib/sayStream.py | 6a17494b0059ea8211f5a16964b611c605fbddc2 | [] | no_license | ksk001100/legacy-toyotter | 7e9386c0f6275fef753a45c18dceff85d0b50af8 | 2de80d4579f4755ab5f40e568ed9dfb5c1c18e8b | refs/heads/master | 2021-08-20T03:29:24.082716 | 2017-11-28T04:23:48 | 2017-11-28T04:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from oauth import *
from datetime import timedelta
import re
import os
class Listener(StreamListener):
def on_status(self, status):
status.created_at += timedelta(hours=9)
Tweet = status.text
m = re.search('(?<=@)\w+', Tweet)
if(m != None):
Tweet = Tweet.replace(
'@' + m.group(0), "\033[35m" + "@" + m.group(0) + "\033[0m")
Tweet = Tweet.replace("\n", " ")
if(('http' or '@') not in Tweet):
if(status.retweet_count <= 0):
os.system("python3 " + os.path.abspath(os.path.dirname(__file__)) + "/speech.py \'%s %s\'" %
(status.author.name, Tweet))
return True
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True
def on_timeout(self):
print('Timeout...')
return True
def main():
listener = Listener()
stream = Stream(getOauth(), listener)
stream.userstream()
if __name__ == '__main__':
main()
| [
"hm.pudding0715@gmail.com"
] | hm.pudding0715@gmail.com |
a86f4e04fd293b02902b13f84e13a6a1da39451e | 8b2af3cff75ba2a6f8557cdea0d852b9076ff6a3 | /day014/venv/Scripts/easy_install-script.py | 765b7c6bbfc60d30e4d3962d2ae52fb465a43cb6 | [] | no_license | AlexYangLong/Foundations-of-Python | 98e5eaf7e7348120049f1ff4bb3d31393ad05592 | bcf3a1fe526140fd2b05283c104488698ebc99fd | refs/heads/master | 2020-03-16T21:45:34.232670 | 2018-05-11T10:19:21 | 2018-05-11T10:19:21 | 133,013,526 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | #!"D:\for python\0413\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"alex@alex.com"
] | alex@alex.com |
1fd9ca593a5596cd0a5a8ea27932ff3692214f11 | 38b6ca67e47e184197bb6595549b3bdd39533f81 | /web/models.py | e435ca422c39fd2b1e32efb33f84ab7993cf96be | [] | no_license | kirarenctaon/xai_orininal | 763c55c0d1a19f2a178da53bab7ff1d458ce1c72 | f66e5f59b817be321a049afcfc53a73ee2bc1a95 | refs/heads/master | 2021-09-08T01:26:23.778507 | 2018-03-05T05:36:03 | 2018-03-05T05:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,819 | py | from django.db import models
#from django_summernote import models as summer_model
#from django_summernote import fields as summer_fields
# Create your models here.
# - Home
# - About
# - Greeting(page)
# - Member(list)
# - Lab(list)
# - Project(page)
# - Research
# - LectureNote(imagelist,video)
# - LectureVideo(imagelist)
# - DemoResource(imagelist)
# - Publication(textlist)
# - Patent(textlist)
# - Report(textlist)
# - News&Info
# - Notice(textlist)
# - News(imagelist)
# - Gallery(imagelist)
# - Community(board)
# - OPEN SOURCE
# - Github(textlist)
# - Related Project(imagelist)
# - Contact(page)
class TopMenu(models.Model):
title = models.CharField(max_length=100)
class meta:
ordering = ['title']
def __str__(self):
return self.title
class SubMenu(models.Model):
title = models.CharField(max_length=100)
topmenu_id = models.ForeignKey('TopMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
# About
class Greeting(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content= models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Member(models.Model):
name = models.CharField(max_length=100)
position = models.CharField(max_length=100)
department = models.TextField()
education = models.TextField()
career = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
# IMAGE UPLOAD
testImage = models.ImageField(upload_to="member", default='noImage')
def __str__(self):
return self.name
class Lab(models.Model):
name = models.CharField(max_length=100, null=True)
professor = models.CharField(max_length=45)
research_on = models.TextField()
link = models.CharField(max_length=100)
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.name
class Project(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class DemoResource(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Publication(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Patent(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
# News&Info
class Notice(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
contentk = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class News(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Gallery(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Community(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class Github(models.Model):
title = models.CharField(max_length=100)
writer = models.CharField(max_length=45)
date = models.DateField()
content = models.TextField()
image = models.CharField(max_length=100)
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class RelatedProject(models.Model):
title = models.CharField(max_length=200, null=True)
Institutions = models.CharField(max_length=45)
Authors = models.CharField(max_length=200, null=True)
Publication_title = models.CharField(max_length=200, null=True)
Publication_link = models.CharField(max_length=200, null=True)
Sourcecode =models.CharField(max_length=200, null=True)
image = models.ImageField(upload_to='RelatedProject/')
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title
class AutoNews(models.Model):
company = models.CharField(max_length=50)
title = models.CharField(max_length=100)
writer = models.CharField(max_length=50, default='설명가능인공지능')
datetime = models.DateTimeField()
content = models.TextField()
image_raw = models.ImageField(upload_to='AutomaticNews/%Y/%m/%d')
image_predict = models.ImageField(upload_to='AutomaticNews/%Y/%m/%d')
report_pdf = models.FileField(upload_to='AutomaticNews/%Y/%m/%d')
submenu_id = models.ForeignKey('SubMenu', on_delete=models.PROTECT)
def __str__(self):
return self.title | [
"kirarenctaon@gmail.com"
] | kirarenctaon@gmail.com |
8c4c74718986652edcd39437112ef420294e692a | 3c197f583a507ebeed662504a1ac0ce79842e4e6 | /jpx_scoreboard/mic_test.py | 26252b9baa48a34123aa7813196e93f15652cb6e | [] | no_license | SamuelFicke/jpx_scoreboard | 02e469e2fed592bdc46e131b0310eb6fbb69b9f5 | 976f953d73b32441faf43cbc3d9f9a1887bec6b8 | refs/heads/master | 2020-03-25T08:52:16.590637 | 2018-08-05T17:59:21 | 2018-08-05T17:59:21 | 143,635,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py |
import speech_recognition as sr
for index, name in enumerate(sr.Microphone.list_microphone_names()):
print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name))
| [
"noreply@github.com"
] | noreply@github.com |
36fccbf050fa7ee2780c63073c2ce69a3bf45a94 | d21c434ee00bb7855923fa85c53e1f6b905f8beb | /common/schedculer.py | 64046f19400b59cd1d0d07d93de1aa13477a7e7b | [] | no_license | lfsoftware13/SequenceGraph | 32263b5095a3e4502326ca84e9e1a6f09c08333f | b44d5a942855a1ffcd7b5bbd37ebbe1577ca0737 | refs/heads/master | 2020-03-21T08:52:18.811299 | 2018-07-08T07:29:40 | 2018-07-08T07:29:40 | 138,370,359 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from torch.optim.lr_scheduler import LambdaLR
class LinearScheduler(LambdaLR):
def __init__(self, optimizer, total_epoch, warm_up, last_epoch=-1):
def rate(epoch):
x = epoch/total_epoch
s = (x <= warm_up)
return (s*(x/warm_up)+(1-s))*(1-x)
super().__init__(optimizer, rate, last_epoch)
| [
"liweiwuhome@hotmail.com"
] | liweiwuhome@hotmail.com |
cf550ea8b5a7f3638b9bec4ef4e8ec1e243f0ce3 | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /functions_legacy/FitVAR1.py | 348409cfa3620731799498087218091ba4892c20 | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import matplotlib.pyplot as plt
from numpy import ones, diff, eye
from RobustLassoFPReg import RobustLassoFPReg
def FitVAR1(X, p=None, nu=10**9, lambda_beta=0, lambda_phi=0, flag_rescale=0):
# This function estimates the 1-step parameters of the VAR[0] process via lasso regression (on first differences)
# INPUTS
# X : [matrix] (n_ x t_end) historical series of independent variables
# p : [vector] (1 x t_end) flexible probabilities
# nu : [scalar] degrees of freedom of multivariate Student t
# lambda_beta : [scalar] lasso regression parameter for loadings
# lambda_phi : [scalar] lasso regression parameter for covariance matrix
# flag_rescale : [boolean flag] if 0 (default), the series is not rescaled before estimation
# OPS
# output1 : [vector](n_ x 1) output1 = alpha
# output2 : [matrix](n_ x n_) output2 = b
# output3 : [matrix](n_ x n_) output3 = sig2_U
## Code
dX = diff(X,1,1)
n_, t_ = dX.shape
if p is None:
p = ones((1,t_))/t_
# robust lasso + glasso regression
alpha, beta, sig2_U = RobustLassoFPReg(dX, X[:,:-1], p, nu, 10**-6, lambda_beta, lambda_phi, flag_rescale)
output1 = alpha
output2 = (eye(n_)+beta)
output3 = sig2_U
return output1, output2, output3
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
d3a5d9b1a751b322b7ebd80ac2137a9f3c33a870 | d25b5b9524832c51d2da91939cc46336650173bf | /Code_stage/pixel_fusion_neuro.py | 5761da0391c31f9ba0176d0c9fce4d85f00012c3 | [] | no_license | Dimitri78000/Code_perso | 177b1b5f91b29c51ab0145d7f9bdaa38118b6f56 | a19f1928ddad457abfdea261bf8c248a73f42d0d | refs/heads/master | 2021-05-09T09:41:11.406365 | 2018-07-30T09:29:27 | 2018-07-30T09:29:27 | 119,448,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,843 | py | #
# To make it work, change the variable path line ~340, and install module if necessary
## Import
# install numpy scipy pyqt matplotlib pandas sympy opencv
import numpy as np
import matplotlib.pyplot as plt
import time
import random as rd
import cv2
import os
np.set_printoptions(threshold=np.nan)
## Required functions
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
## Core functions
def initialize_parameters_he(layers_dims,init_multiplier=1):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) *init_multiplier* (np.sqrt(2. / layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
return parameters
def initialize_parameters_deep(layer_dims, init_multiplier):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * init_multiplier
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = np.dot(W, A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): #ONLY IF LEN(layers_dims)==3 !!
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
for i in range(0,D1.shape[0]):
for j in range (0,D1.shape[1]):
D1[i][j] = ( D1[i][j] < keep_prob ) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
for i in range(0,D2.shape[0]):
for j in range (0,D2.shape[1]):
D2[i][j] = ( D2[i][j] < keep_prob ) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A1
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters["W" + str(l)], parameters["b" + str(l)], activation='relu')
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], activation='sigmoid')
caches.append(cache)
#assert(AL.shape == (1,X.shape[1])) # WHYYY ???
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
AL = change_zeros_and_ones_y(AL)
m = Y.shape[1]
# Compute loss from aL and y.
cost = (-1./ m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1-Y), np.log( 1-AL)))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def compute_cost_2(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1./m * np.sum(logprobs)
return cost
def compute_cost_with_regularization(A3, Y, parameters, lambd=0.1):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
L2_regularization_cost = (1. / m)*(lambd / 2) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
cost = cross_entropy_cost + L2_regularization_cost
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1. / m) * np.dot(dZ, cache[0].T)
db = (1. / m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(cache[1].T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation="sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation="relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def L_layer_model(X, Y, layers_dims, init_multiplier=0.01, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
#parameters = initialize_parameters_deep(layers_dims)
parameters = initialize_parameters_deep(layers_dims,init_multiplier)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL, caches = L_model_forward(X, parameters)
if i==(num_iterations-1):
print(AL)
# Compute cost.
cost = compute_cost(AL, Y)
# Backward propagation.
grads = L_model_backward(AL, Y, caches)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate=learning_rate)
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == y)/m)))
return p
##
def create_x_and_y_for_test_version2(parameters,video_num,image_num,steps=1):
assert(video_num>= 1 and video_num<= 7)
assert(image_num>= 1 and image_num<= 30)
flag_create_array_x=True
img_i = cv2.imread(path+"/v"+str(video_num)+"/intensity/3s_i"+str(image_num)+".jpg", 0)
resized_img_i = cv2.resize(img_i, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_i",resized_img_i)
img_c = cv2.imread(path+"/v"+str(video_num)+"/color/3s_c"+str(image_num)+".jpg", 0)
resized_img_c = cv2.resize(img_c, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_c",resized_img_c)
img_m = cv2.imread(path+"/v"+str(video_num)+"/motion/3s_m"+str(image_num)+".jpg", 0)
resized_img_m = cv2.resize(img_m, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_m",resized_img_m)
img_o = cv2.imread(path+"/v"+str(video_num)+"/orientation/3s_o"+str(image_num)+".jpg", 0)
resized_img_o = cv2.resize(img_o, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_o",resized_img_o)
img_ssxs = cv2.imread(path+"/v"+str(video_num)+"/eye_tracker/ss_xs_visual_"+str(image_num)+".jpg", 0)
resized_img_ssxs = cv2.resize(img_ssxs, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_ssxs",resized_img_ssxs)
img_eye_tracker = cv2.imread(path+"/v"+str(video_num)+"/eye_tracker/x"+str(image_num)+".jpg", 0)
resized_img_eye_tracker = cv2.resize(img_eye_tracker, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_eye_tracker",resized_img_eye_tracker)
img_eye_tracker_ss = cv2.imread(path+"/v"+str(video_num)+"/eye_tracker/ss_x"+str(image_num)+".jpg", 0)
resized_img_eye_tracker_ss = cv2.resize(img_eye_tracker_ss, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_eye_tracker_ss",resized_img_eye_tracker_ss)
img_eye_tracker_ss_real = cv2.imread(path+"/v"+str(video_num)+"/eye_tracker/ss_xs"+str(image_num)+".jpg", 0)
resized_img_eye_tracker_ssxs = cv2.resize(img_eye_tracker_ss_real, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow("img_eye_tracker_ssxs",resized_img_eye_tracker_ssxs)
for i in range (0, img_i.shape[0],steps): # lign all images have the same shape, images["v1i1"] or images["v5m19"]
for j in range(0, img_i.shape[1],steps): # colomn
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
if flag_create_array_x:
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
array_x=np.array([[pixel_i,pixel_c,pixel_m,pixel_o]])
flag_create_array_x=False
else:
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
array_x=np.concatenate(( array_x , np.array([[pixel_i,pixel_c,pixel_m,pixel_o]]) ), axis=1)
probas, caches = L_model_forward(array_x.T, parameters)
print("Construction de y_neuro et y_neuro_visuel :")
y_neuro=np.zeros((8,10))
y_neuro_visuel=np.zeros((8,10))
for k in range (0,probas.shape[1]): # probas.shape[1] = 10 * 8 = 80
i=k//10
j=k%10
if round(probas[0][k])==0:
y_neuro[i][j] = 0
y_neuro_visuel[i][j] =0
else:
y_neuro[i][j] = 1
y_neuro_visuel[i][j] = 255
resized_img_y = cv2.resize(y_neuro_visuel, (360, 288),interpolation = cv2.INTER_AREA)
cv2.imshow('image_created_by_neuro',resized_img_y)
return y_neuro,y_neuro_visuel
def create_x_and_y_for_test(images,parameters,video_num,image_num,steps=1):
assert(video_num>= 1 and video_num<= 7)
assert(image_num>= 1 and image_num<= 30)
flag_create_x_y=True
img_i = cv2.imread(path+"/v"+str(video_num)+"/intensity/s_i"+str(image_num)+".jpg", 0)
img_c = cv2.imread(path+"/v"+str(video_num)+"/color/s_c"+str(image_num)+".jpg", 0)
img_m = cv2.imread(path+"/v"+str(video_num)+"/motion/s_m"+str(image_num)+".jpg", 0)
img_o = cv2.imread(path+"/v"+str(video_num)+"/orientation/s_o"+str(image_num)+".jpg", 0)
img_xs= cv2.imread(path+"/v"+str(video_num)+"/eye_tracker/s_xs"+str(image_num)+".jpg", 0)
for i in range (0, img_i.shape[0],steps): # lign all images have the same shape, images["v1i1"] or images["v5m19"]
for j in range(0, img_i.shape[1],steps): # colomn
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
pixel_xs = img_xs[i][j]
array_x=np.array([[i,j,pixel_i,pixel_c,pixel_m,pixel_o]]).T #two [[]] against weird "rank one" array with numpy librairy
#array_x=np.array([[pixel_i,pixel_c,pixel_m,pixel_o]]) #two [[]] against weird "rank one" array with numpy librairy
array_y=np.array([[pixel_xs]])
if (flag_create_x_y):
x,y=array_x,array_y
flag_create_x_y=False
else:
x = np.concatenate((x, array_x), axis=1) # We can't use np.append because it create weird "rank 1 shape = (6,)" array
y = np.concatenate((y, array_y), axis=1)
probas, caches = L_model_forward(x, parameters)
print("Construction de y_neuro et y_neuro_visuel :")
y_neuro=np.zeros((288,360))
y_neuro_visuel=np.zeros((288,360))
for k in range (0,probas.shape[1]): # probas.shape[1] = 360 * 288 = 103680
i=k//360
j=k%360
if round(probas[0][k])==0:
y_neuro[i][j] = 0
y_neuro_visuel[i][j] =0
else:
y_neuro[i][j] = 1
y_neuro_visuel[i][j] = 255
cv2.imshow('image',y_neuro_visuel)
cv2.resizeWindow('image', 720,576)
return y_neuro,y_neuro_visuel
def create_eye_tracker_with_neuro(x, parameters):
img = cv2.imread(images["blank"],0)
probas, caches = L_model_forward(x, parameters)
#def print_mislabeled_images(classes, X, y, p):
## Path !! YOU HAVE TO CHANGE IT DO YOUR CONFIGURATION !!
path="C:/Users/dimit/Documents/GitHub/Code_perso/Code_stage/stage_saillance_fusion_neuro" # Use : '/', Don't use : '\'
## Resize in smaller picture
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def resize_data_set():
for k in range(1,7+1): #Number of video
for i in range (1, 30+1): #Number of picture in each video, limited by eye_tracker
"""
img_i = cv2.imread(path+"/v"+str(k)+"/intensity/i"+str(i)+".jpg", 0)
cv2.imwrite(path+"/v"+str(k)+"/intensity/3s_i"+str(i)+".jpg",image_resize(img_i,width=90) )
img_c = cv2.imread(path+"/v"+str(k)+"/color/c"+str(i)+".jpg", 0)
cv2.imwrite(path+"/v"+str(k)+"/color/3s_c"+str(i)+".jpg",image_resize(img_c,width=90) )
img_m = cv2.imread(path+"/v"+str(k)+"/motion/m"+str(i)+".jpg", 0)
cv2.imwrite(path+"/v"+str(k)+"/motion/3s_m"+str(i)+".jpg",image_resize(img_m,width=90) )
img_o = cv2.imread(path+"/v"+str(k)+"/orientation/o"+str(i)+".jpg", 0)
cv2.imwrite(path+"/v"+str(k)+"/orientation/3s_o"+str(i)+".jpg",image_resize(img_o,width=90) )
img_x = cv2.imread(path+"/v"+str(k)+"/eye_tracker/x"+str(i)+".jpg", 0)
cv2.imwrite(path+"/v"+str(k)+"/eye_tracker/4s_x"+str(i)+".jpg",image_resize(img_x,width=45) )
"""
img_x = cv2.imread(path+"/v"+str(k)+"/eye_tracker/x"+str(i)+".jpg", 0)
copy_img_x=np.copy(img_x)
cv2.imwrite(path+"/v"+str(k)+"/eye_tracker/ss_x"+str(i)+".jpg",image_resize(copy_img_x,width=10) )
## Creating eye_tracker_simplified in 0 and 1
def clear_eye_tracker_data_base():
for k in range(1,7+1): #Number of video
for i in range (1, 30+1): #Number of picture in each video, limited by eye_tracker
#os.remove(path+"/v"+str(k)+"/eye_tracker/4s_x"+str(i)+".jpg")
#os.remove(path+"/v"+str(k)+"/eye_tracker/4s_xs"+str(i)+".jpg")
# os.remove(path+"/v"+str(k)+"/eye_tracker/s_xs_visual"+str(i)+".jpg")
os.remove(path+"/v"+str(k)+"/eye_tracker/ss_xs_visual_"+str(i)+".jpg")
# os.remove(path+"/v"+str(k)+"/eye_tracker/ss_x"+str(i)+".jpg")
os.remove(path+"/v"+str(k)+"/eye_tracker/ss_xs"+str(i)+".jpg")
#os.remove(path+"/v"+str(k)+"/eye_tracker/xs_visual_"+str(i)+".jpg")
#os.remove(path+"/v"+str(k)+"/eye_tracker/xs"+str(i)+".jpg")
def create_eye_tracker_simplified(seuil_div):
for k in range(1,7+1): #Number of video
for i in range (1, 30+1): #Number of picture in each video, limited by eye_tracker
img = cv2.imread(path+"/v"+str(k)+"/eye_tracker/ss_x"+str(i)+".jpg", 0) # 0 means gray_scale
img_copy_1=np.copy(img)
img_copy_2=np.copy(img)
cv2.imwrite(path+"/v"+str(k)+"/eye_tracker/ss_xs"+str(i)+".jpg",simplify_img(img_copy_1,seuil_div,1))
cv2.imwrite(path+"/v"+str(k)+"/eye_tracker/ss_xs_visual_"+str(i)+".jpg",simplify_img(img_copy_2,seuil_div,255))
def simplify_img(img, seuil_division, high=1): # high = 1 or 255
# First, we find the maximum pixel in the picture
temp_max = img[0][0]
i_max,j_max=0,0
for i in range (0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i][j] > temp_max :
temp_max = img[i][j]
i_max,j_max=i,j
#Then, we change pixel in either 0 or 1/255
for i in range (0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i][j] >= (img[i_max][j_max]/seuil_division):
img[i][j] = high # Put 255 if you want to cv2.imshow('image',img) and see what it is doing
else:
img[i][j] = 0
return img
## Create the library "images" with all image
# Launch this code one time to create the librairy images
def create_librairy_images():
images={}
images["blank"]= cv2.imread(path+"/blank.jpg", 0)
for k in range(1,7+1): #Number of video
for i in range (1, 30+1): #Number of picture in each video, limited by eye_tracker
img_i = cv2.imread(path+"/v"+str(k)+"/intensity/i"+str(i)+".jpg", 0)
images["v"+str(k)+"i"+str(i)] = img_i
img_i = cv2.imread(path+"/v"+str(k)+"/intensity/3s_i"+str(i)+".jpg", 0)
images["v"+str(k)+"3si"+str(i)] = img_i
img_c = cv2.imread(path+"/v"+str(k)+"/color/c"+str(i)+".jpg", 0)
images["v"+str(k)+"c"+str(i)] = img_c
img_c = cv2.imread(path+"/v"+str(k)+"/color/3s_c"+str(i)+".jpg", 0)
images["v"+str(k)+"3sc"+str(i)] = img_c
img_m = cv2.imread(path+"/v"+str(k)+"/motion/m"+str(i)+".jpg", 0)
images["v"+str(k)+"m"+str(i)] = img_m
img_m = cv2.imread(path+"/v"+str(k)+"/motion/3s_m"+str(i)+".jpg", 0)
images["v"+str(k)+"3sm"+str(i)] = img_m
img_o = cv2.imread(path+"/v"+str(k)+"/orientation/o"+str(i)+".jpg", 0)
images["v"+str(k)+"o"+str(i)] = img_o
img_o = cv2.imread(path+"/v"+str(k)+"/orientation/3s_o"+str(i)+".jpg", 0)
images["v"+str(k)+"3so"+str(i)] = img_o
img_xs = cv2.imread(path+"/v"+str(k)+"/eye_tracker/ss_xs_visual_"+str(i)+".jpg", 0)
images["v"+str(k)+"ssxs_visual_"+str(i)] = img_xs
img_ssxs = cv2.imread(path+"/v"+str(k)+"/eye_tracker/ss_xs"+str(i)+".jpg", 0)
images["v"+str(k)+"ssxs"+str(i)] = img_ssxs
img_ssx = cv2.imread(path+"/v"+str(k)+"/eye_tracker/ss_x"+str(i)+".jpg", 0)
images["v"+str(k)+"ssx"+str(i)] = img_ssx
return images
## Create librairy x and y
def create_x_and_y(images, steps ,shuffle,video,image):
assert(video>=1 and video<=7)
assert(image>=1 and image<=30)
x,y={},{}
#First, we create x and y librairy thanks to index and a group of pixels
compt=0
index=[]
for k in range(1,video+1): # Number of video
for l in range (1, image+1): # Number of picture in each video, limited by eye_tracker
for i in range (0, images["v1i1"].shape[0],steps): # lign all images have the same shape, images["v1i1"] or images["v5m19"]
for j in range(0, images["v1i1"].shape[1],steps): # colomn
pixel_i = images["v"+str(k)+"i"+str(l)][i][j]
pixel_c = images["v"+str(k)+"c"+str(l)][i][j]
pixel_m = images["v"+str(k)+"m"+str(l)][i][j]
pixel_o = images["v"+str(k)+"o"+str(l)][i][j]
pixel_xs = images["v"+str(k)+"xs"+str(l)][i][j]
array_x=np.array([[i,j,pixel_i,pixel_c,pixel_m,pixel_o]]) #two [[]] against weird "rank one" array with numpy librairy
#array_x=np.array([[pixel_i,pixel_c,pixel_m,pixel_o]]) #two [[]] against weird "rank one" array with numpy librairy
array_y=np.array([[pixel_xs]])
x[str(compt)]=array_x
y[str(compt)]=array_y
index.append(compt)
compt+=1
#Then, we shuffle the index
if shuffle:
rd.shuffle(index)
x_shuffle,y_shuffle={},{}
compt=0
for k in range(1,video+1): # Number of video
for l in range (1, image+1): # Number of picture in each video, limited by eye_tracker
for i in range (0, images["v1i1"].shape[0],steps): # lign
for j in range(0, images["v1i1"].shape[1],steps): # colomn
x_shuffle[str(index[compt])] = x[str(compt)]
y_shuffle[str(index[compt])] = y[str(compt)]
compt+=1
assert( len(x)==len(y)==len(x_shuffle)==len(y_shuffle) )
return x_shuffle,y_shuffle
else:
assert(len(x)==len(y))
return x,y
def create_x_and_y_version2(shuffle, steps=1,video=7,image=30):
assert(video>=1 and video<=7)
assert(image>=1 and image<=30)
x,y={},{}
#First, we create x and y librairy thanks to index and a group of pixels
compt=0
index=[]
for k in range(1,video+1): # Number of video
for l in range (1, image+1): # Number of picture in each video, limited by eye_tracker
img=cv2.imread(path+"/v1/intensity/3s_i1.jpg", 0)# lign all 3s images have the same shape, 90*72 ,images["v13si1"] or images["v53sm19"]
flag_create_array_x,flag_create_array_y=True,True
img_i = cv2.imread(path+"/v"+str(k)+"/intensity/3s_i"+str(l)+".jpg", 0)
img_c = cv2.imread(path+"/v"+str(k)+"/color/3s_c"+str(l)+".jpg", 0)
img_m = cv2.imread(path+"/v"+str(k)+"/motion/3s_m"+str(l)+".jpg", 0)
img_o = cv2.imread(path+"/v"+str(k)+"/orientation/3s_o"+str(l)+".jpg", 0)
img_ssxs = cv2.imread(path+"/v"+str(k)+"/eye_tracker/ss_xs"+str(l)+".jpg", 0)
for i in range (0, img.shape[0],steps):
for j in range(0, img.shape[1],steps): # colomn
if flag_create_array_x:
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
array_x=np.array([[pixel_i,pixel_c,pixel_m,pixel_o]])
flag_create_array_x=False
else:
pixel_i = img_i[i][j]
pixel_c = img_c[i][j]
pixel_m = img_m[i][j]
pixel_o = img_o[i][j]
array_x=np.concatenate(( array_x , np.array([[pixel_i,pixel_c,pixel_m,pixel_o]]) ), axis=1)
x[str(compt)]=array_x
img_2 = cv2.imread(path+"/v1/eye_tracker/ss_xs1.jpg", 0)
for i in range (0, img_2.shape[0],steps): # lign all ssxs images have the same shape, 10*8 , images["v14sxs1"] or images["v54sxs19"]
for j in range(0, img_2.shape[1],steps):
if flag_create_array_y:
pixel_xs = img_ssxs[i][j]
array_y=np.array([[pixel_xs]])
flag_create_array_y=False
else:
pixel_xs = img_ssxs[i][j]
array_y=np.concatenate(( array_y , np.array([[pixel_xs]]) ), axis=1)
y[str(compt)]=array_y
compt+=1
index.append(compt)
#Then, we shuffle the index
if shuffle:
rd.shuffle(index)
x_shuffle,y_shuffle={},{}
compt=0
for k in range(1,video+1): # Number of video
for l in range (1, image+1): # Number of picture in each video, limited by eye_tracker
x_shuffle[str(index[compt])] = x[str(compt)]
y_shuffle[str(index[compt])] = y[str(compt)]
compt+=1
assert( len(x)==len(y)==len(x_shuffle)==len(y_shuffle) )
return x_shuffle,y_shuffle
else:
assert(len(x)==len(y))
return x,y
## Balance 1 and 0
def taux_de_1(y):
n=len(y)
compt=0
for k in range(1,n):
if(y[str(k)]==1):
compt+=1
return compt, compt/n
def taux_de_0_version2(images):
compt_all,compt_zero=0,0
for k in range(1,video+1): # Number of video
for l in range (1, image+1): # Number of picture in each video, limited by eye_tracker
image_ssxs = images["v"+str(k)+"3si"+str(l)]
for i in range (0, images["v13si1"].shape[0],steps): # lign all 3s images have the same shape, 90*72 ,images["v13si1"] or images["v53sm19"]
for j in range(0, images["v13si1"].shape[1],steps): # colomn
if flag_create_array_x:
pixel_i = images["v"+str(k)+"3si"+str(l)][i][j]
pixel_c = images["v"+str(k)+"3sc"+str(l)][i][j]
pixel_m = images["v"+str(k)+"3sm"+str(l)][i][j]
pixel_o = images["v"+str(k)+"3so"+str(l)][i][j]
compt=0
for k in range(1,n):
if(y[str(k)]==1):
compt+=1
return compt, compt/n
def balance_x_y(x,y, shuffle):
x_balanced,y_balanced={},{}
nbre_de_1,taux = taux_de_1(y)
compt_x,compt_y=0,0
n=len(x)
assert(n==len(y))
compt=0
index=[]
for k in range(1,n):
if(y[str(k)]==0):
if(compt_x<nbre_de_1):
x_balanced[str(compt)]=x[str(k)]
y_balanced[str(compt)]=y[str(k)]
index.append(compt)
compt+=1
compt_x+=1
else:
if(compt_y<nbre_de_1):
x_balanced[str(compt)]=x[str(k)]
y_balanced[str(compt)]=y[str(k)]
index.append(compt)
compt+=1
compt_y+=1
if shuffle:
rd.shuffle(index)
x_balanced_shuffle,y_balanced_shuffle={},{}
for k in range(0,len(index)):
x_balanced_shuffle[str(index[k])] = x_balanced[str(k)]
y_balanced_shuffle[str(index[k])] = y_balanced[str(k)]
return x_balanced_shuffle,y_balanced_shuffle
else:
return x_balanced,y_balanced
## Create train_x, train_y, test_x, test_y
def create_train_test(x,y,pourcentage_of_test=0.8): #pourcentage_of_test
assert(len(x)==len(y))
train_size=int(len(x)*pourcentage_of_test)
test_size=len(x) - train_size
print("Train size : "+str(train_size)+", and test size : " + str(test_size) )
train_x, train_y = x[str(1)].T, y[str(1)].T
test_x, test_y = x[str(train_size+1)].T, y[str(train_size+1)].T
for k in range(2,train_size):
train_x = np.concatenate((train_x, x[str(k)].T), axis=1)
train_y = np.concatenate((train_y, y[str(k)].T), axis=1)
for k in range(train_size+2,len(x)):
test_x = np.concatenate((test_x, x[str(k)].T), axis=1)
test_y = np.concatenate((test_y, y[str(k)].T), axis=1)
return train_x, train_y, test_x, test_y
##
def change_zeros_and_ones_y(AL):
for i in range(0,AL.shape[0]):
for j in range(0, AL.shape[1]):
if AL[i][j]==0:
AL[i][j]=10**(-6)
if AL[i][j]==1:
AL[i][j]=1-10**(-6)
return AL
## Time to have result !
# NOT USED ANYMORE create_eye_tracker_simplified(seuil_div=10)
# NOT USED ANYMORE images = create_librairy_images()
#print("Etape 1 : Done")
#x,y=create_x_and_y(images, 100, shuffle=True, video=7,image=30)
#x,y=create_x_and_y_version2(shuffle=True)
#x,y=balance_x_y(x,y, True)
print("Etape 1 : Done --- Size of the data set :" +str(len(x)) )
train_x, train_y, test_x, test_y = create_train_test(x,y,pourcentage_of_test=0.8)
print("Etape 2 : Done")
layers_dims=[25920,120,80] #25920 = 90*72*4, 80=10*8
#layers_dims=[4,2,1]
parameters = L_layer_model(train_x, train_y,layers_dims, init_multiplier=0.001, learning_rate = 0.0075, num_iterations = 300, print_cost = True)
print("Train accuracy :")
pred_train = predict(train_x, train_y, parameters)
print("Test accuracy :")
pred_testpred_tes = predict(test_x, test_y, parameters)
y_neuro,y_neuro_visuel = create_x_and_y_for_test_version2(parameters,1,1,steps=1) | [
"dimi_du_24@hotmail.fr"
] | dimi_du_24@hotmail.fr |
a706d9d72e96133ce2aa017e8108a5f0aaf7493e | 71e038ed3ff0d204734e51adec21e3b2ae3d3bc1 | /app/api/follower_routes.py | 39874d476d7c9399c90e9c04f7a53f33be0f373b | [] | no_license | mimike/hangIn | dc24f44d4e553cd1d43d9287cd175afa367335c1 | 376a62bf2de36563fd40ce54ee8c63fb6c5f43d6 | refs/heads/main | 2023-06-24T02:20:21.548670 | 2021-07-30T02:19:09 | 2021-07-30T02:19:09 | 364,103,801 | 0 | 0 | null | 2021-05-17T00:53:42 | 2021-05-04T01:05:39 | Python | UTF-8 | Python | false | false | 5,257 | py |
from flask import Blueprint, jsonify, redirect, request
from flask_login import login_required, current_user
from app.models import db, User
import json
follower_routes = Blueprint('follower', __name__)
#get all followers for user id don't need
# @follower_routes.route('/follower/<int:user_id>')
# def get_followers(id):
# @login_required
# id = User.query.get(id)
# followers = user.followers
# for follower in followers:
# return {"user": user.to_dict()}
#post follow
@follower_routes.route('/', methods=['POST'])
@login_required
def followUser():
user_id = request.json["userId"]
user = User.query.filter_by(id = user_id).first() #friend's id
user.follows.append(current_user)
db.session.add(current_user)
db.session.commit()
# return {"user": user.to_user_name_to_dic() } # {"user": {"user": Jerry Wright}}
return user.get_follower_info() #{{id: 4"name": Jerry Wr, avatar: asdf.url}}
#GET follow ALLLLLLLL of the people im following (and followers) works
@follower_routes.route('/follows/<int:id>', methods=['GET'])
@login_required
def getAllFollowingFollower(id):
#user_id == current user id
# how do u query FOLLOWS?? its a joins table!!!
profileUser = User.query.filter_by(id = id).first()
#users = User.follows.query.filter_by(user_id == current_user.id).all() #friend's id
followers = {
user.id: user.get_follower_info() for user in profileUser.follows
}
following = {
follower.id: follower.get_follower_info() for follower in profileUser.followers
}
return {"followers": followers, "following": following}
@follower_routes.route('/', methods=['DELETE'])
@login_required
def deleteFollower():
user_id = request.json["userId"]
user = User.query.filter_by(id = user_id).first()
user.follows.remove(current_user)
db.session.commit()
return user.get_follower_info()
# GETwe want to see who the users following GET ALL of my followers fklter follower_id ==
# @follower_routes.route('/')
# @login_required
# def getAllFollowers(id):
# users = User.follows.query.filter_by(follower_id == current_user.id).all() #friend's id
# return users.get_follower_info()
#post unfollow
@follower_routes.route('/unfollows/<int:user_id>', methods=['POST'])
@login_required
def unfollowUser(id):
user = User.query.filter_by(id = id).first() #friend's id
current_user.follows.remove(user)
db.session.add(current_user)
db.commit()
return {"user": user.to_user_name_to_dic() }# from flask import Blueprint, jsonify, redirect, request
# from flask_login import login_required, current_user
# from app.models import db, User
# import json
# follower_routes = Blueprint('follower', __name__)
# #get all followers for user id don't need
# # @follower_routes.route('/follower/<int:user_id>')
# # def get_followers(id):
# # @login_required
# # id = User.query.get(id)
# # followers = user.followers
# # for follower in followers:
# # return {"user": user.to_dict()}
# #post follow
# @follower_routes.route('/', methods=['POST'])
# @login_required
# def followUser():
# user_id = request.json["userId"]
# user = User.query.filter_by(id = user_id).first() #friend's id
# user.follows.append(current_user)
# db.session.add(current_user)
# db.session.commit()
# # return {"user": user.to_user_name_to_dic() } # {"user": {"user": Jerry Wright}}
# return user.get_follower_info() #{{id: 4"name": Jerry Wr, avatar: asdf.url}}
# #GET follow ALLLLLLLL of the people im following (and followers) works
# @follower_routes.route('/follows/<int:id>', methods=['GET'])
# @login_required
# def getAllFollowingFollower(id):
# #user_id == current user id
# # how do u query FOLLOWS?? its a joins table!!!
# profileUser = User.query.filter_by(id = id).first()
# #users = User.follows.query.filter_by(user_id == current_user.id).all() #friend's id
# followers = {
# user.id: user.get_follower_info() for user in profileUser.follows
# }
# following = {
# follower.id: follower.get_follower_info() for follower in profileUser.followers
# }
# print("!!follow!", {"following": following, "followers": followers})
# return {"followers": followers, "following": following}
# @follower_routes.route('/', methods=['DELETE'])
# @login_required
# def deleteFollower():
# user_id = request.json["userId"]
# user = User.query.filter_by(id = user_id).first()
# user.follows.remove(current_user)
# db.session.commit()
# return user.get_follower_info()
# # GETwe want to see who the users following GET ALL of my followers fklter follower_id ==
# # @follower_routes.route('/')
# # @login_required
# # def getAllFollowers(id):
# # users = User.follows.query.filter_by(follower_id == current_user.id).all() #friend's id
# # return users.get_follower_info()
# #post unfollow
# @follower_routes.route('/unfollows/<int:user_id>', methods=['POST'])
# @login_required
# def unfollowUser(id):
# print("ID!", id)
# user = User.query.filter_by(id = id).first() #friend's id
# current_user.follows.remove(user)
# db.session.add(current_user)
# db.commit()
# return {"user": user.to_user_name_to_dic() }
| [
"mimi.g.ke@gmail.com"
] | mimi.g.ke@gmail.com |
acf4a8ba77d495634ff0faee2a75f6a1bdff7442 | 3feef041e930580fc5cfbba35a4d3cd914cbff71 | /code/utils/write_tfrecord.py | 8711d4c0f320939fde5ec17d833eb0cb5d7bb9ab | [] | no_license | huwenjie333/cnn-population-mapping | 1645d2e879647858198ca8cd8934f497510d6636 | 8cecb5b85642d9a47bbc92cb60616f96c8ea81f4 | refs/heads/master | 2021-04-12T04:38:38.008524 | 2018-09-17T04:27:53 | 2018-09-17T04:27:53 | 125,925,055 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,066 | py | import tensorflow as tf
import pandas as pd
import numpy as np
import random
import gdal
from PIL import Image
import os,io,time
os.environ['CUDA_VISIBLE_DEVICES'] = ''
###################################### parameters #############################################
# the diretory of images where the paths in ANNOS_CSV relative to
IMG_DIR = '/home/timhu/all_tif'
# central crop width and height
Wl8 = Hl8 = 150 # Landsat 30m resolution per pixel
Ws1 = Hs1 = 450 # Sentinel-1 10m resolution per pixel
# annotation csv for image relative path and labels
ANNOS_CSV = '/home/timhu/dfd-pop/data/annos_csv/state24_paths_density_labels_13k_Feb10.csv'
# read csv files
df_annos = pd.read_csv(ANNOS_CSV, index_col=0)
len_train = len(df_annos[df_annos.partition == 'train'])
len_val = len(df_annos[df_annos.partition == 'val'])
len_test = len(df_annos[df_annos.partition == 'test'])
# set the output path for new TFRecord file, and
record_train_path = '/home/timhu/dfd-pop/data/TFrecord/state24_l8s1_density_train_'+str(len_train)+'.tfrecord'
record_val_path = '/home/timhu/dfd-pop/data/TFrecord/state24_l8s1_density_val_'+str(len_val)+'.tfrecord'
record_test_path = '/home/timhu/dfd-pop/data/TFrecord/state24_l8s1_density_test_'+str(len_test)+'.tfrecord'
#############################################################################################
# helper functions to convert Python values into instances of Protobuf "Messages"
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
# helper functions to load TIF image path as numpy array
def load_tif_from_file(file, crop_width, crop_height):
img_file = gdal.Open(file)
width, height = img_file.RasterXSize, img_file.RasterYSize
if crop_width > width:
raise Exception("Requested width exceeds tif width.")
if crop_height > height:
raise Exception("Requested height exceeds tif height.")
# returns np array of shape (C, H, W)
img_data = img_file.ReadAsArray((width - crop_width)//2, (height - crop_height)//2, crop_width, crop_height)
img_data = np.moveaxis(img_data, 0, -1)
return img_data
# compress the the image numpy array to JPEG bytes, which saves file sizes of TFrecord
def convert_jpeg_bytes(image_path, crop_width, crop_height):
im_array = load_tif_from_file(image_path, crop_width, crop_height)
im = Image.fromarray(im_array)
im_bytes = io.BytesIO()
im.save(im_bytes, format='JPEG')
return im_bytes.getvalue()
if __name__ == '__main__':
sess = tf.Session()
# create a TFRecordWriter for each writer
train_writer = tf.python_io.TFRecordWriter(record_train_path)
val_writer = tf.python_io.TFRecordWriter(record_val_path)
test_writer = tf.python_io.TFRecordWriter(record_test_path)
# shuffle the whole datasets
df_annos = df_annos.sample(frac=1).reset_index(drop=True)
len_df = len(df_annos)
start_time = time.time()
# loop through each row in CSV and write images and labels to TFrecord
for (i, row) in df_annos.iterrows():
if i % 100 == 0:
duration = time.time() - start_time
start_time = time.time()
print ('finish writing: %d/%d (%.3f sec)' % (i, len_df, duration))
# read in each row and take the image and labels needed for TFrecord
l8_path = os.path.join(IMG_DIR, row.l8_vis_path)
s1_path = os.path.join(IMG_DIR, row.s1_vis_path)
# l8_bytes = convert_jpeg_bytes(l8_path, Wl8, Hl8)
# s1_bytes = convert_jpeg_bytes(s1_path, Ws1, Hs1)
l8_array = load_tif_from_file(l8_path, Wl8, Hl8)
s1_array = load_tif_from_file(s1_path, Ws1, Hs1)
# covert python values to the format of Protobuf "Messages"
next_features = {'image_l8': _bytes_feature(l8_array.tostring()),
'image_s1': _bytes_feature(s1_array.tostring()),
'density_class': _int64_feature(row.pop_density_class),
'density_val': _float_feature(row.pop_density),
'density_log2': _float_feature(row.pop_density_log2),
'longitude': _float_feature(row.longitude),
'latitude': _float_feature(row.latitude)}
# Create an instance of an Example protocol buffer
next_example = tf.train.Example(features=tf.train.Features(feature=next_features))
if row.partition == 'train':
# Serialize to string and write to the file
train_writer.write(next_example.SerializeToString())
if row.partition == 'val':
val_writer.write(next_example.SerializeToString())
if row.partition == 'test':
test_writer.write(next_example.SerializeToString())
train_writer.close()
val_writer.close()
test_writer.close() | [
"wenjie.hu2013@gmail.com"
] | wenjie.hu2013@gmail.com |
d8c80ee0a2954ef4a10f0ebfbf034248dcc2d365 | a8fb5d37de019221e5897a98bd176c566037f813 | /Playground/objgraph_/obj_graph.py | 0890b1611b57af3cdb6b08c6f9339df38174a04e | [] | no_license | tdworowy/PythonPlayground | b743dc2b870d1681b24e654e2af3fe5957710265 | ff090fb44046c9c37501f5dbbcb08d56552540d4 | refs/heads/master | 2023-01-05T00:28:55.725894 | 2022-12-27T10:06:40 | 2022-12-27T10:06:40 | 72,983,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import objgraph
class Staff:
def __init__(self, ele):
self.ele = ele
def get(self):
return self.ele
def example(count):
x = range(3)
y = [Staff(i) for i in x]
if count == 0:
return Staff(y)
else:
return example(count - 1)
def example2():
y = 1
for i in range(10):
y = Staff(y)
return y
def example3():
l = []
l1 = []
for x in range(7):
z = example(5)
q = example2()
l.append(z)
l.append(q)
l.append((z, q))
l1.append(l)
l.append(l1)
return Staff(l)
def test1():
objgraph.show_refs(example(3), filename="obj.png", refcounts=True)
def test2():
x = range(100)
y = map(example, x)
objgraph.show_refs(y, filename="obj2.png", refcounts=True)
def test3():
objgraph.show_refs(example2(), filename="obj3.png", refcounts=True, max_depth=5, too_many=10)
def test4():
"""Take lot of time"""
objgraph.show_refs(example3(), filename="obj4.png", refcounts=True, max_depth=10, too_many=100)
def test5():
objgraph.show_refs(example3(), filename="obj5.png", refcounts=True, max_depth=10, too_many=20)
if __name__ == "__main__":
test5()
| [
"dworowytomasz@gmail.com"
] | dworowytomasz@gmail.com |
55d2099c22b2ef1df7eed3cdac7b86d9e3c15d97 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_230/ch33_2020_03_30_20_03_55_797460.py | 27b68ff699defdc00de4fdca5d880421d1e22da1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def eh_primo(n):
div=3
if n%2==0 and n!=2 or n==1 or n==0:
return False
while n > div:
if n%div==0:
return False
div +=2
return True
def primos_entre(a,b):
n_primos=0
while a<=b:
if eh_primo(a):
n_primos +=1
x+=1
return n_primos | [
"you@example.com"
] | you@example.com |
030cb9969471b2b7ec33c9976d6cd6225da8fbc6 | 2e46785208e4e547ebe8d8ad8b7069043d5b9c4d | /part1/main.py | d2029ae0c0f94f6fc25b7545d2cd2da3d9976717 | [] | no_license | EelaiWind/dl_hw2_vgg19net | c44371db42fe71c2ad2fd62bf2043d1e5e72a766 | bb88bd247c082015f901d7f05835d581ba67c2b9 | refs/heads/master | 2021-01-19T00:32:49.949577 | 2017-04-04T11:44:54 | 2017-04-04T12:15:50 | 87,181,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py |
from __future__ import print_function
import sys
import os
import utils
from vgg19_model import Vgg19Model
import tensorflow as tf
import numpy as np
import cv2
if len(sys.argv) < 2:
print('usage: %s <image file>' % sys.argv[0], file=sys.stderr)
sys.exit()
model = Vgg19Model(os.path.join('..', 'vggnet', 'vgg19.npy'))
input_tensor = tf.placeholder("float", shape=[None, utils.IMAGE_SIZE, utils.IMAGE_SIZE, 3])
output_tensor = model.build_model(input_tensor)
image = utils.load_image(sys.argv[1])
image = image.reshape([1, utils.IMAGE_SIZE, utils.IMAGE_SIZE, 3])
cv2.imwrite('tmp.jpg', image[0])
for tensor in tf.trainable_variables():
print(tensor.name)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
output = sess.run(output_tensor, feed_dict={input_tensor:image})
utils.print_top_5(output, os.path.join('..', 'vggnet', 'image_labels.txt'))
| [
"tony333ts@gmail.com"
] | tony333ts@gmail.com |
3f04ca17a88dccb12cfee866808b2859f10daab5 | bc6af72eaca8448f617e730180ffada534b0094f | /first_test.py | f9217a6a9475aba06e20c7b488ecb0b0f02efedf | [] | no_license | RiteshDave87/nse_python_angularjs | 09d8eefe10dda612bbc43337a8fc48ed01ae36b7 | 1421ea6bd95de0eb213e58340cb63399f8e60dd4 | refs/heads/master | 2020-04-08T12:02:11.325518 | 2018-11-27T12:31:58 | 2018-11-27T12:31:58 | 159,331,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from flask import Flask, render_template
from nsetools import Nse
import os
app = Flask(__name__)
nse = Nse()
port = int(os.getenv('PORT', 64781))
@app.route('/getquote/<name>')
def getQuote(name):
quote = nse.get_quote(name, as_json=True)
return quote
@app.route('/topgainers')
def fetchTopGainers():
top_gainers = nse.get_top_gainers(as_json=True)
return top_gainers
@app.route('/toplosers')
def fetchTopLosers():
top_losers = nse.get_top_losers(as_json=True)
return top_losers
@app.route('/')
def main():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port) | [
"noreply@github.com"
] | noreply@github.com |
7a99a3690efa5e02a155ba1eb1590c783ffc6acd | cc9b47814fdf9796d1ca6f2c2470c7cc8ec2e6e3 | /MoviesToJson.py | dd07cb915bf5c746e88301a43dd8a2ffdb7aa476 | [] | no_license | Md-Shahnawaz/elasticsearch_workshop | d985bf98291a39331ce14c18ff9339f3c3898505 | d9181ac4f7bea63a5ce02bdf2cadc260a80b0a51 | refs/heads/master | 2022-10-20T07:50:26.309256 | 2020-01-31T07:23:02 | 2020-01-31T07:23:02 | 237,380,142 | 0 | 0 | null | 2022-10-04T23:56:48 | 2020-01-31T07:21:49 | Java | UTF-8 | Python | false | false | 1,051 | py | import csv
import re
from collections import deque
import elasticsearch
from elasticsearch import helpers
csvfile = open('/Users/1022590/Shahnawaz/Personal/elasticsearch/ml-latest-small/movies.csv', 'r')
reader = csv.DictReader( csvfile )
def movies():
for movie in reader:
movie_dict = {}
title = re.sub(" \(.*\)$", "", re.sub('"','', movie['title']))
year = movie['title'][-5:-1]
if (not year.isdigit()):
year = "2016"
genres = movie['genres'].split('|')
movie_dict["id"] = movie["movieId"]
movie_dict["title"] = title
movie_dict["year"] = year
movie_dict["genre"] = genres
yield movie_dict
es = elasticsearch.Elasticsearch()
es.indices.delete(index="movies", ignore=404)
es.indices.create(index='movies', body={
'settings' : {
'index' : {
'number_of_shards':1 ,
'number_of_replicas':1
}
}
})
deque(helpers.parallel_bulk(es, movies(), index="movies", doc_type="doc"), maxlen=0)
es.indices.refresh() | [
"mohammad.shahnawaz@jda.com"
] | mohammad.shahnawaz@jda.com |
7adda36eb4f8400d8a204de7f1eeb194d5a056cd | f5e4cde32540aad6f7c5a589f77e34f7a4dc040c | /assets/levels_maps/__init__.py | 9d0c6ea3f76d6335cd95f6eb9a78d1075a8c4468 | [
"MIT"
] | permissive | Barbapapazes/dungeons-dragons | 559259c456b86d04ec4a71d322500aa25041efff | 7e3dfdb1e7506ff95819d21fa93c1f56ae1dba96 | refs/heads/dev | 2023-03-06T09:42:28.888333 | 2021-01-27T14:34:45 | 2021-01-27T14:34:45 | 299,598,533 | 0 | 1 | MIT | 2021-02-19T11:09:15 | 2020-09-29T11:40:40 | Python | UTF-8 | Python | false | false | 29 | py | """Used to saved the maps"""
| [
"esteban.soubiran@insa-cvl.fr"
] | esteban.soubiran@insa-cvl.fr |
78f16863cdeaac8e02056fb27103caf1469e5b38 | 53823a4a2c67fb4dd70f10e49da4f5370c9fa620 | /translator.py | 2b7d4711ae3c5c92892257468ba92db1ecc9c13d | [
"MIT"
] | permissive | dignissimus/Universal-Translator | d52b42b6f481a59cf17354385c66931a64573c31 | 40239a579ae10b6b9cb46995e87c6a2cde8b3109 | refs/heads/master | 2023-07-02T23:15:52.708991 | 2021-08-16T19:06:46 | 2021-08-16T19:06:46 | 396,930,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from arpeggio import ParserPython, visit_parse_tree
from parser import document
from visitor import DocumentVisitor
def debug():
parser = ParserPython(document)
while True:
tree = parser.parse(input(">> "))
print(tree)
for node in tree:
print(node)
def main():
parser = ParserPython(document, skipws=False)
with open("Sound Changes/English/PIE to Common Germanic") as file:
content = file.read()
tree = parser.parse(content)
entries = visit_parse_tree(tree, DocumentVisitor())
for entry in entries:
print(entry)
if __name__ == '__main__':
main()
| [
"sam.z.ezeh@gmail.com"
] | sam.z.ezeh@gmail.com |
8c5bc5f4bd79e3341f8f0e73ae8eb742781ec259 | 4d05be863b63a56a90b4c46b15069827b33ecaae | /Algorithms/leetcode_charlie/001_two_sum.py | 620d566dbd384fec815ccab50c6a4b01c5519abe | [] | no_license | leeo1116/PyCharm | e532fa9754056019508cc454214ee1a8ad9b26a9 | b6942c05c27556e5fe47879e8b823845c84c5430 | refs/heads/master | 2022-11-06T00:43:14.882453 | 2017-07-13T04:50:00 | 2017-07-13T04:50:00 | 36,851,636 | 0 | 1 | null | 2022-10-20T10:44:39 | 2015-06-04T06:09:09 | Python | UTF-8 | Python | false | false | 946 | py | __doc__ = """
Given an array of integers, find two numbers such that they add up to a specific target number.The function twoSum
should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.You may assume that each input would
have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
class Solution(object):
def __init__(self, index):
self.index = index
def two_sum(self, nums, target):
print('#{0} Solution:\n'.format(self.index))
num_scanned = {}
for i, num in enumerate(nums):
if num_scanned.get(target-num, None) is not None:
return num_scanned[target-num]+1, i+1
else:
num_scanned[num] = i
s = Solution(1)
solution = s.two_sum([0, 4, 3, 0], 0)
print(solution)
| [
"leeo1116@gmail.com"
] | leeo1116@gmail.com |
39a4fa8f6cba79c9385a9038846830b5d4b3a494 | 361a11aabeaf7512db16177331ddb46d844b8c3b | /Examples/coin_bayes.py | 0f8dcfabc32c8a36c8334aae945e105cfe663d2d | [] | no_license | rickyfernandez/hartnel-summer-course | 5d506ddce1149784fae64514619daad706f35189 | 0f6b616f4cf0d2dd9fd93224085965a072221494 | refs/heads/master | 2016-09-06T18:06:57.080590 | 2014-06-16T16:48:51 | 2014-06-16T16:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | import matplotlib.pyplot as plt
from random import choice
import numpy as np
import scipy.integrate
def LogLikelihood(N, R, H):
# N number of flips, R number of heads, and H belief of bias in heads
p = np.zeros(H.size)
# find non zero values
i = (H > 0) & ((1-H) > 0)
# take natural log of likihood ignoring zero values
p[i] = R*np.log(H[i]) + (N-R)*np.log(1.0-H[i])
p[~i] = 0
return p, i
# unfair coin - heads 0.25 of the time
coin = [0, 1, 1, 1]
data = np.array([choice(coin) for i in xrange(1, 4097)])
# our initial ignorance of the coin - equal
# probabilty for any bias
prior = np.ones(1000)
bias = np.linspace(0, 1, len(prior))
posterior = []; posterior.append(prior)
events = [1, 2, 3, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
for flips in events:
heads = sum(data[0:flips] == 1)
post, i = LogLikelihood(flips, heads, bias)
# subtract max value
j = np.argmax(post[i])
post[i] -= post[j]
# take anti-log
post[i] = np.exp(post[i])
# normalize the probability
norm = scipy.integrate.simps(post, bias)
post /= norm
posterior.append(post)
# calculate mean and standard deviation
mu = scipy.integrate.simps(posterior[-1]*bias, bias)
vr = scipy.integrate.simps(posterior[-1]*(mu-bias)**2, bias)
sd = np.sqrt(vr)
print "Bayes-The coin bias is", mu
print "Bayes-limits:", mu+2.0*sd, mu-2.0*sd
print "Frequency-The coin bias is", sum(data==1)/float(len(data))
# plot the evolution of the bias
fig, axes = plt.subplots(nrows=5, ncols=3)
for i, ax in zip(posterior, axes.flat):
ax.plot(bias, i)
ax.axvline(.75, color = "r", ls="--")
ax.set_xticks([0, 0.5, 1])
ax.set_yticks([])
plt.show()
| [
"rafernandezjr@gmail.com"
] | rafernandezjr@gmail.com |
39dedc189e92631f1e8d8f27f939130d747cac6c | 441f12165b12038253b519d39ea04c52bdd49d38 | /logo/ReportINC_GUI.spec | f9b4b02c36eee3c849d6d21db01f0979ef48888c | [] | no_license | mirzalijafarov/reportinc | 621588d94c37f90129818f2249da8b5f13b4467a | 86ef7b86e8f6ba0c5860aeb8df63b624a66db625 | refs/heads/master | 2022-12-03T12:07:44.278550 | 2020-08-11T07:51:21 | 2020-08-11T07:51:21 | 273,668,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['ReportINC_GUI.py'],
pathex=['E:\\MOOCs\\My Projects\\ReportINC'],
binaries=[],
datas=[("E:\\MOOCs\\My Projects\\ReportINC\\venv\\Lib\\site-packages\\tinycss2\\VERSION", "tinycss2"),
("E:\\MOOCs\\My Projects\\ReportINC\\venv\\Lib\\site-packages\\cssselect2\\VERSION", "cssselect2")],
hiddenimports=["tinycss2", "cssselect2"],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='ReportINC_GUI',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"mirzali.jafarov@gmail.com"
] | mirzali.jafarov@gmail.com |
1aacbb0b5513e41ca28c746d059a2643a867134c | 8b44b30e6baf2d140ca475c1320b485e3a1d0570 | /tseries1_batch.py | dcf29f33230e570f53dc31793d19612ab04346e7 | [] | no_license | cbegeman/e3sm-cryo-analysis-scripts | b1e39f6874c3bbdd21c51b7c7cc3378d163a5447 | 6d18967bf8a83156229a93243211541c3b36a75d | refs/heads/master | 2023-09-01T12:41:05.742681 | 2023-08-18T22:50:43 | 2023-08-18T22:50:43 | 184,796,628 | 0 | 0 | null | 2019-05-03T17:41:38 | 2019-05-03T17:41:38 | null | UTF-8 | Python | false | false | 2,531 | py | #!/usr/bin/env python
'''
Script to compare some scalar values from different runs of Thwaites melt variability experiment.
'''
import sys
import os
import netCDF4
import datetime
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
from matplotlib import cm
from math import pi
import weddell_mod as wed
run_incr = ['ISMF','ISMF-noEAIS','ISMF-3dGM','ISMF-noDIB']
#placename = 'wed_pyc_Ryan_shallow'
#placename = 'gyre_interior'
placename = 'wed_pyc_Ryan_shelf'
#var_incr = ['taux','tauy']
#var_incr = ['T','S','rho','u','v']
#var_incr = ['T','S']
#var_incr = ['unormal']
var_incr = ['rho','rho']
rholim = [1027.2,1027.9]
lat = -70
lon = 340
#filename = 'ISMF_ISMF-noEAIS_rho_wed_pyc_Ryan_shallow_abovepyc_t070-101'
year_range = [10,180]
#wed.tseries1(run_incr,['mean'],year_range=year_range,
# placename = 'wed_pyc_Ryan',
# apply_filter = True, cutoff = 1/4,
# print_to_file=True,create_figure=True,
# input_filename = 'ISMF_zpyc_wed_pyc_Ryan_70-101_zlim-4500--3500')
#wed.tseries1(run_incr,['mean'],year_range=year_range,
# placename = 'wed_pyc_Ryan',
# apply_filter = True, cutoff = 1/4,
# print_to_file=True,create_figure=True,
# input_filename = 'ISMF_zpyc_wed_pyc_Ryan_70-101_zlim-2000--500')
#wed.tseries1(run_incr,var_incr,year_range=year_range,
# placename = 'gyre_interior',
# print_to_file=True,create_figure=True,
# apply_filter = True, cutoff = 1/4,
# ztop_pyc = [True], zbottom_pyc = [False],
# year_overlay=False,overwrite=True)
#wed.tseries1(run_incr,var_incr,year_range=year_range,
# placename = placename,
# print_to_file=True,create_figure=True,
# varlim = rholim,
# #apply_filter = True, cutoff = 1/4,
# ztop_pyc = [False], zbottom_pyc = [True],
# year_overlay=False,overwrite=True)
wed.tseries1(run_incr,var_incr,year_range=year_range,
placename = placename,
print_to_file=True,create_figure=False,
varlim = rholim,
#input_filename = filename,
#apply_filter = True, cutoff = 1/4,
ztop_pyc = [True,False], zbottom_pyc = [False,True],
year_overlay=False,overwrite=True)
#zeval = [-100,-400],
#zrange = [-100,-500],
#lat=lat, lon=lon,
#option = 'coord',placename = 'S4E',#'M31W',
#zrange=[0,20],zab=True,
| [
"cmbranecky@gmail.com"
] | cmbranecky@gmail.com |
6701845b6d646ab2746bd48246a9031546292106 | eec6967b2c794270bebd43916bac8eee92f17b10 | /Long program.py | cff5df2e43c371b886e2db9447afa29b75f18e3d | [] | no_license | oliviachang29/python-elementary-school | 1dd5f4f7f98a4a4529666d7b19ab1bc9be3e7323 | 1f6c27894990860bf7e86cb0ea93e6bd5d05ae73 | refs/heads/master | 2020-03-23T15:18:21.468984 | 2018-07-20T16:54:03 | 2018-07-20T16:54:03 | 141,736,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #Annoying Program
#Annoys the user by repeatedly printing() things.
long = input("Put in something long: ")
number = 0
for letter in long:
print("Umm...hi?")
number += 1
if number > 5:
print("You put in something quite long! You did it", number, "whole times!")
else:
print("Luckily your input wasn't very long.")
print("\a")
input("\n\nPress enter and quit.")
| [
"hello@oliviachang.me"
] | hello@oliviachang.me |
a64e6b4374c6eb913bd70fe5a1c26df57803bb93 | d8b8ff78310d874434d58dc36b452149c0ffcf09 | /class7.py | a7a237bf574d6c0162560786090acb99ddd8f01f | [] | no_license | Manas2909/Python-Stuff | 6133a14cc1539feac6e5f6bb67d2184ad2c0f0e8 | c222c76b977e4499d9d906e15e56143a1b967574 | refs/heads/master | 2020-09-15T04:01:17.208827 | 2019-12-03T07:27:25 | 2019-12-03T07:27:25 | 223,343,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 13:23:58 2019
@author: Manas
"""
class sample():
x=5
y=10
@classmethod
def modifyx(cls):
cls.x=10
@classmethod
def modifyy(cls):
cls.y=5
s=sample()
s2=sample()
#print(s.x," ",s.y)
#print(s2.x," ",s2.y)
s.modifyx()
s2.modifyy()
print(s.x," ",s.y)
print(s2.x," ",s2.y) | [
"noreply@github.com"
] | noreply@github.com |
9c665678062fbdc64fbc43c40dc5e6b95d684db0 | a3782034a943037e9ce94c2370ab974e5a14cb08 | /featureC.py | 298d5519a2641baaf66a5a4064487796f287c13f | [] | no_license | Issa900/keuleumPhD | 5f0584adaa600334134c80a523ad5d8926b8c52a | 7dd12fe326b1a299b72767b417d51cc66b64bbde | refs/heads/master | 2021-06-16T04:31:37.704438 | 2021-01-12T14:04:45 | 2021-01-12T14:04:45 | 129,396,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script is for parsing the output files of the program FeatureCount; it counts the number of reads mapped on sex chromosome versus autosomes in Marchantia polymorpha
#*** takes as input files i) output file of FeatureCount (count.txt), and ii) a list of specific genes_id
##Usage:
# $ python featureC.py count.txt list_of_Xgenes.txt
import os, sys, argparse, re
import itertools
from itertools import islice
import operator
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#import seaborn as sns
import statistics, math
import scipy.stats as stats
def match_X_scaffold (list_X_specificGene, input_file):
" Match genes on X chromosomes file found in the gtf file and extract the gene_id of the corresponding by storig it in the dict geneMatch "
print('\n Analyses of the number of reads mapped on sex chromosome versus autosomes \n \n')
with open (input_file, 'r') as f, open (list_X_specificGene) as xGene:
next(xGene)
geneXlist=[]
for line in xGene:
line=line.strip()
if line:
gene_X=line.split()[0]
geneXlist.append(gene_X)
all_reads=[]
chr_dict={}
gene_dict={}
geneMatch={}
Y_reads=[]
X_reads=[]
A_reads=[]
#nb=0
genenb=0
for line in islice(f, 2, None):
if line.startswith('Geneid'):
pass
genenb+=1
line = line.strip()
tmp=line.split()
Gene_id=tmp[0]
Chr=tmp[1]
chr_name=Chr.split(';')[0]
exonNum=len(Chr.split(';'))
matchLen=int(tmp[5])
coverage=int(tmp[-1])+1
all_reads.append(int(coverage))
if Gene_id in gene_dict:
gene_dict[Gene_id]+=coverage
else:
gene_dict[Gene_id]=coverage
if chr_name in chr_dict:
chr_dict[chr_name]+=coverage
else:
chr_dict[chr_name]=coverage
#Count the coverage of reads matched with eacht X genes in the list
if Gene_id in geneXlist:
#nb+=1
X_reads.append(coverage)
if Gene_id not in geneMatch:
geneMatch[Gene_id]=coverage
else:
geneMatch[Gene_id]+=coverage
elif chr_name=='Chr_Y_A' or chr_name=='Chr_Y_B':
Y_reads.append(coverage)
else:
A_reads.append(coverage)
# To count the number of features (exons), grouped into Meta-(gene)
#if chr_name in MYdict and int(matchLen)>=100:
#MYdict[chr_name]+=exonNum
#else:
#MYdict[chr_name]=exonNum
ChrY_count = tmp_autosomal_count = ChrX_count = 0
#Count total number of reads on Y and autosome only
for key in chr_dict:
if key=='Chr_Y_B' or key=='Chr_Y_A':
#Y_reads.append(int(gene_dict[key]))
ChrY_count+=int(chr_dict[key])
else:
tmp_autosomal_count+=int(chr_dict[key])
for gene_name in geneMatch:
i=int(geneMatch[gene_name])
ChrX_count+=i
autosomal_count = int(tmp_autosomal_count) - int(ChrX_count)
print ('# Total number of genes annotated =\n', genenb)
print('\n# Total number of reads mapped on:', '\n Y Chromosome =', ChrY_count, '\n X Chromosome =', ChrX_count, '\n Autosomes =', autosomal_count, '\nTotal =', ChrY_count + autosomal_count+ChrX_count, '\n\n')
# Can have this value by grep -c 'gene'feature_output.count
# Count the number of reads for the chromosome Y and autosomes.
#readLen=sorted(gene_dict.items(), key=operator.itemgetter(1))
#print(readLen[0])
#Min, Max and sdt of reads mapped
print('# Difference in the number of reads mapped\n')
minY=sorted(Y_reads)[0]
maxY=sorted(Y_reads)[-1]
sd_Y=statistics.stdev(Y_reads)
moy_Y=sum(Y_reads)/len(Y_reads)
print('# Y Chromosome \n Minimum =', minY, '\n Maximum =', maxY, '\n SD =', sd_Y)
minX=sorted(X_reads)[0]
maxX=sorted(X_reads)[-1]
sd_X=statistics.stdev(X_reads)
moy_X=sum(X_reads)/len(X_reads)
print('# X Chromosome \n Minimum =', minX, '\n Maximum =', maxX, '\n SD =', sd_X)
minA=sorted(A_reads)[0]
maxA=sorted(A_reads)[-1]
sd_A=statistics.stdev(A_reads)
moy_A=sum(A_reads)/len(A_reads)
print('# Autosomes \n Minimum =', minA, '\n Maximum =', maxA, '\n SD =', sd_A)
# Plot dist of coverage
#fig, ax = plt.subplots()
#ax.plot(axes, sdval)
all_reads.sort()
##Normalization
#mean_r = np.mean(all_reads)
#std_r=np.std(all_reads)
#s = stats.norm.pdf(all_reads, mean_r, std_r)
#plt.plot(all_reads, s)
plt.plot(range(len(all_reads)), (all_reads), color='red', label= 'sorted data')
plt.grid(True)
plt.title(u"Distribution of scaffold coverage")
plt.xlabel('Number of genes annotated', fontsize=12)
plt.ylabel('Number of read mapped', fontsize=12)
#plt.show()
plt.savefig(count_file+'.sorted.pdf')
#read gtf_file
#with open (gtf_file) as gtf:
#for gene in gtf:
#tmp=gene.split()
#gene_id=tmp[11].strip('";')
#gene_name=str(tmp[13].strip('";'))
#if gene_id in dico_gene.keys():
#list(dico_gene[gene_id]).append(gene_name)
#else:
#dico_gene[gene_id]=gene_name
#for x in dico_gene:
#val=dico_gene[x]
#for gene in geneXlist:
#if str(val)==str(gene): #[str(i) for i in geneXlist]:
#geneMatch[x]=gene
##print((geneMatch))
count_file=sys.argv[1] # Output file of the program FeatureCount(.txt)
#gtf=sys.argv[2] # GTF file (Dowload the GFF3 file from Marchantia.info and convert into GTF by gffread package
x=sys.argv[2] # List of genes identified on the X chromosome
#f1=read_count(count_file)
ID_Xgenes=match_X_scaffold(x, count_file)
| [
"diop.seydina@laposte.net"
] | diop.seydina@laposte.net |
97ea993e3168ee900ba2082c994b3b1006f89dd2 | 774cba8ddd1b616f788144174a2d12d58840d702 | /flaskProject/app/app.py | a4ae09e1d0166a0d223593e075cde883bfe56df4 | [] | no_license | BirolAYDIN/dockerhub_python | cef94a7168dfc9625660bae4271a30409bfff943 | 1dd238fac7df0970a84edd58c7ab2009fd0ffb00 | refs/heads/master | 2022-12-03T16:47:28.481303 | 2020-08-16T15:34:20 | 2020-08-16T15:34:20 | 287,054,669 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | from flask import Flask,render_template
import pandas as pd
import re
df = pd.read_json('data.json')
## Akademik personel sayıları ########################################################
prof = df.loc[df['unvan_adSoyad'].str.contains('Prof. Dr.',flags=re.I,regex=True)]
doc = df.loc[df['unvan_adSoyad'].str.contains('Doç. Dr.',flags=re.I,regex=True)]
dr = df.loc[df['unvan_adSoyad'].str.contains('Dr. Öğr. Üyesi',flags=re.I,regex=True)]
ogr_gor = df.loc[df['unvan_adSoyad'].str.contains('Öğr. Gör.',flags=re.I,regex=True)]
ar_gor = df.loc[df['unvan_adSoyad'].str.contains('Ara*ş.',flags=re.I,regex=True)]
diger = len(df) - len(prof + doc + dr + ogr_gor + ar_gor)
## Akademik personel sayıları ########################################################
## AKADEMİK PERSONEL GENEL DAĞILIMI ##############################################################################
tip_fak = df.loc[df['birim'].str.contains('Tıp Fakültesi',flags=re.I,regex=True)]
Teknik= df.loc[df['birim'].str.contains('Teknik Bilimler Meslek Yüksekokulu',flags=re.I,regex=True)]
Iktisadi = df.loc[df['birim'].str.contains('İktisadi ve İdari Bilimler Fakültesi',flags=re.I,regex=True)]
Corlu_Muh = df.loc[df['birim'].str.contains('Çorlu Mühendislik Fakültesi',flags=re.I,regex=True)]
Veteriner= df.loc[df['birim'].str.contains('Veteriner Fakültesi',flags=re.I,regex=True)]
Ilahiyat = df.loc[df['birim'].str.contains('İlahiyat Fakültesi',flags=re.I,regex=True)]
Guz_San= df.loc[df['birim'].str.contains('Güzel Sanatlar, Tasarım ve Mimarlık Fakültesi',flags=re.I,regex=True)]
Ziraat = df.loc[df['birim'].str.contains('Ziraat Fakültesi',flags=re.I,regex=True)]
Fen_Edeb= df.loc[df['birim'].str.contains('Fen - Edebiyat Fakültesi',flags=re.I,regex=True)]
Corlu_Meslek= df.loc[df['birim'].str.contains('Çorlu Meslek Yüksekokulu',flags=re.I,regex=True)]
## AKADEMİK PERSONEL GENEL DAĞILIMI ##############################################################################
data = [ len(prof) ,len(doc) ,len(dr) ,len(ogr_gor) ,len(ar_gor) , diger , len(Teknik), len(Iktisadi), len(Corlu_Meslek), len(Veteriner), len(Ilahiyat), len(Guz_San),
len(Ziraat),len(Fen_Edeb),len(Corlu_Muh),len(tip_fak) ]
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html',data = data)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000)
| [
"57332230+BirolAYDIN@users.noreply.github.com"
] | 57332230+BirolAYDIN@users.noreply.github.com |
04f671f1dd831bc8813fd1d8b1a5d464a43d2ca1 | ce56da8cddb04370d61e43aac156da85a3c4b707 | /src/process_clang.py | aa02fc41a58917282ef1ffe314a5b9eed6246958 | [
"MIT"
] | permissive | mitchute/clang-format-postprocess | 5966da47e478ace188ff84c7eddf7b7c175da0b4 | 337c72e63318ded5cada2c34906fc644d7d1323e | refs/heads/master | 2020-08-30T10:06:20.032123 | 2019-11-01T01:51:22 | 2019-11-01T01:51:22 | 218,343,970 | 0 | 0 | null | 2019-10-29T17:27:54 | 2019-10-29T17:25:02 | Python | UTF-8 | Python | false | false | 10,232 | py | import argparse
import logging
import os
import subprocess
from collections import defaultdict
from src.base import Base
class ClangPostProcess(Base):
def __init__(self, source_dir, list_file=None, output_dir=None):
Base.__init__(self, source_dir=source_dir, list_file=list_file, output_dir=output_dir)
self.exe_dir = os.path.normpath(os.path.join(os.path.abspath(__file__), "..", "..", "bin"))
class Method(object):
def __init__(self):
self.name = None
self.exe = None
self.global_storage = Method()
self.has_local_qualifiers = Method()
self.has_local_storage = Method()
self.is_static_storage_cls = Method()
# check hasGlobalStorage
self.global_storage.name = "global-detect-hasGlobalStorage"
self.global_storage.exe = os.path.join(self.exe_dir, self.global_storage.name)
if not os.path.exists(self.global_storage.exe):
raise SystemExit("'{}' exe does not exist".format(self.global_storage.name))
else:
logging.info("'{}' exe found".format(self.global_storage.name))
# check hasLocalQualifiers
self.has_local_qualifiers.name = "global-detect-hasLocalQualifiers"
self.has_local_qualifiers.exe = os.path.join(self.exe_dir, self.has_local_qualifiers.name)
if not os.path.exists(self.has_local_qualifiers.exe):
raise SystemExit("'{}' exe does not exist".format(self.has_local_qualifiers.name))
else:
logging.info("'{}' exe found".format(self.has_local_qualifiers.name))
# check hasLocalStorage
self.has_local_storage.name = "global-detect-hasLocalStorage"
self.has_local_storage.exe = os.path.join(self.exe_dir, self.has_local_storage.name)
if not os.path.exists(self.has_local_storage.exe):
raise SystemExit("'{}' exe does not exist".format(self.has_local_storage.name))
else:
logging.info("'{}' exe found".format(self.has_local_storage.name))
# check isStaticStorageClass
self.is_static_storage_cls.name = "global-detect-isStaticStorageClass"
self.is_static_storage_cls.exe = os.path.join(self.exe_dir, self.is_static_storage_cls.name)
if not os.path.exists(self.is_static_storage_cls.exe):
raise SystemExit("'{}' exe does not exist".format(self.is_static_storage_cls.name))
else:
logging.info("'{}' exe found".format(self.is_static_storage_cls.name))
self.prev_line_no = 0
@staticmethod
def run_exe(caller, exe_path, f_path):
args = ["-p",
"/Users/mmitchel/Projects/EnergyPlus/dev/develop/cmake-build-debug",
"-extra-arg=-I/usr/local/opt/llvm@7/include/c++/",
"-extra-arg=-I/usr/local/opt/llvm@7/include/c++/v1",
"-extra-arg=-I/usr/local/opt/llvm@7//lib/clang/7.0.0/include/",
"-extra-arg=-I/usr/local/opt/llvm@7/lib/clang/7.1.0/include"]
logging.debug(" ".join([exe_path,
args[0],
args[1],
args[2],
args[3],
args[4],
args[5]]))
try:
return subprocess.check_output([exe_path,
args[0],
args[1],
args[2],
args[3],
args[4],
args[5],
f_path],
shell=False)
except:
f_name = f_path.split("/")[-1]
logging.error("Failed on caller: '{}' for file: {}".format(caller, f_name))
def run_has_global_storage(self, f_path):
logging.info("Running: {}".format(self.global_storage.name))
return self.run_exe(self.global_storage.name, self.global_storage.exe, f_path)
def run_has_local_qualifiers(self, f_path):
logging.info("Running: {}".format(self.has_local_qualifiers.name))
return self.run_exe(self.has_local_qualifiers.name, self.has_local_qualifiers.exe, f_path)
def run_has_local_storage(self, f_path):
logging.info("Running: {}".format(self.has_local_storage.name))
return self.run_exe(self.has_local_storage.name, self.has_local_storage.exe, f_path)
def run_is_static_storage_class(self, f_path):
logging.info("Running: {}".format(self.is_static_storage_cls.name))
return self.run_exe(self.is_static_storage_cls.name, self.is_static_storage_cls.exe, f_path)
def process_output(self, stream, f_path, method):
f_name = f_path.split('/')[-1]
lines = stream.decode('utf-8').split('\n')
start_idx = 0
for line in lines:
if f_name in line:
break
start_idx += 1
var_lst = []
for idx in range(start_idx, len(lines) - 1):
line = lines[idx]
var_lst.append(self.process_line(line, method))
return var_lst
def process_line(self, line, method):
line = line.replace("\'", "\"")
name = line.split("\"")[1]
namespace = ""
if "::" in name:
tokens = name.split("::")
name = tokens[-1]
namespace = tokens[-2]
position_str = line.split("<")[1]
position_str = position_str.split(">")[0]
if ".cc" in position_str:
line_no = position_str.split(":")[1]
self.prev_line_no = line_no
elif "line" in position_str:
line_no = position_str.split(":")[1]
self.prev_line_no = line_no
else:
line_no = self.prev_line_no
d = {"name": name, "namespace": namespace, "line-no": line_no, method: True}
if "is static local:" in line:
static_local = int(line.split(":")[-1].strip())
if static_local == 1:
d["is-static"] = True
return d
def process_single_file(self, f_path):
glob_store_lst = self.process_output(self.run_has_global_storage(f_path),
f_path, 'has-global-storage')
local_quals_lst = self.process_output(self.run_has_local_qualifiers(f_path),
f_path, 'has-local-qualifier')
local_storage_lst = self.process_output(self.run_has_local_storage(f_path),
f_path, 'has-local-storage')
static_storage_lst = self.process_output(self.run_is_static_storage_class(f_path),
f_path, 'is-static-storage-cls')
return self.merge_lists(glob_store_lst, local_quals_lst, local_storage_lst, static_storage_lst)
@staticmethod
def merge_lists(*args):
main_lst = []
for lst_num, var_lst in enumerate(args):
for d_var in var_lst:
var_merged = False
if lst_num == 0:
pass
else:
for main_idx, d_var_main in enumerate(main_lst):
if (d_var_main["name"] == d_var["name"]) and (d_var_main["line-no"] == d_var["line-no"]):
main_lst[main_idx] = {**d_var_main, **d_var}
var_merged = True
if not var_merged:
main_lst.append(d_var)
for idx, d in enumerate(main_lst):
main_lst[idx] = defaultdict(str, d)
return main_lst
def process(self):
with open(self.output_dir, "a+") as f:
f.write("file,"
"name,"
"namespace,"
"line-no,"
"is-static,"
"has-global-storage,"
"has-local-qualifier,"
"has-local-storage,"
"is-static-storage-cls"
"\n")
for file in self.files:
f_name = file.split('/')[-1]
try:
logging.info("{} : started".format(file))
single_file_output = self.process_single_file(file)
for d in single_file_output:
f.write("{},{},{},{},{},{},{},{},{}\n".format(f_name,
d["name"],
d["namespace"],
d["line-no"],
d['is-static'],
d['has-global-storage'],
d['has-local-qualifier'],
d['has-local-storage'],
d['is-static-storage-cls']))
logging.info("{} : completed".format(file))
except:
logging.error("{} : failed".format(file))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', dest="source_dir", help="path to the EnergyPlus /src/EnergyPlus dir")
parser.add_argument('-o', dest="output_dir", help="path to the output directory")
parser.add_argument('-l', dest="list_file",
help="(optional) path to list file with .cc file names to process. If list file not given, "
"all .cc files in /src/EnergyPlus will be processed.")
results = parser.parse_args()
if results.source_dir is None:
raise SystemExit("source_dir '-s' argument required")
else:
P = ClangPostProcess(source_dir=results.source_dir, list_file=results.list_file, output_dir=results.output_dir)
P.process()
| [
"matt.mitchell@nrel.gov"
] | matt.mitchell@nrel.gov |
9c7bc9a98256be1927a4268b0ffcccb8e9551e69 | 72744aac49c4a9e9f687175c3f19fbc53a3e822c | /09/solution.py | 9cfcd0c06cb7e3b5cf65bbef5ed5d69ddd9c4f40 | [] | no_license | LukasNickel/aoc | 71d97c51159252b00221f0b0b55e921e8dd22119 | 4501f239fd5ea9b5a1a0576f2abb9914d4b8b334 | refs/heads/main | 2023-02-02T05:48:14.597328 | 2020-12-18T19:19:07 | 2020-12-18T19:19:07 | 317,523,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | from itertools import combinations
def part1(puzzle, plength=25, n=25):
indices_to_test = range(plength, len(puzzle) - 1)
for i in indices_to_test:
if not valid(puzzle[i], puzzle[i-n:i]):
return puzzle[i]
return -1
def valid(number, previous):
for c in combinations(previous, 2):
if sum(c) == number:
return True
return False
def part2(puzzle, weak_number=-1):
for i in range(0, len(puzzle)):
sum_seq = 0
for j in range(i, len(puzzle)):
sum_seq += puzzle[j]
if sum_seq == weak_number:
seq = puzzle[i:j+1]
return min(seq) + max(seq)
elif sum_seq > weak_number:
break
return -1
def main():
with open('input') as f:
puzzle = [int(l) for l in f.readlines()]
with open('example') as f:
example = [int(l) for l in f.readlines()]
assert part1(example, plength=5, n=5) == 127
print('Solution Part 1:', part1(puzzle))
assert part2(example, 127) == 62
print('Solution Part 2:', part2(puzzle, part1(puzzle)))
if __name__ == '__main__':
main()
| [
"lukas.nickel@tu-dortmund.de"
] | lukas.nickel@tu-dortmund.de |
7f90a5762637be673cd26114939a10fbf0ebfd25 | 2ac1419480f7177ce7cbce2ea51734ac2f14d810 | /Algorithm_problems/Python/level1/정수_제곱근_판별.py | 5216c6a7cf3572aac00149223edc99890e5a8980 | [] | no_license | swarthyPig/Algorithm-training-and-SQL-query | d0e854c8118dfa0565848f0a98facaa68034fb99 | 562888ee5a6386f4ae52d60d3012de66931159cc | refs/heads/master | 2020-07-07T16:05:04.045583 | 2020-03-03T09:38:53 | 2020-03-03T09:38:53 | 203,399,615 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 245 | py | def solution(n):
length = n ** 0.5 # 판단할 수의 제곱근을 구함
if length % 1 == 0: # 구한 제곱근을 1로 나누었을때 0으로 떨어지면 특정수의 제곱임
return (length+1) ** 2
return -1 | [
"noreply@github.com"
] | noreply@github.com |
05aa2d5a5e47841960192b0a7cb15acfc232b161 | 40f0ba4f9cc695e7fa37cdc0cdfdeb18cf7f04b6 | /conftest.py | 01032a2c0bbb166b7e1288a848d1765e8f2f69ea | [] | no_license | stmirage/exre3Stepicautomata | 0c303f051886e1529ca1414048f24a85896870a4 | a6ae1af30e0f26148ea7257c1040b1862b677324 | refs/heads/master | 2022-04-22T17:23:33.394747 | 2020-04-13T14:53:46 | 2020-04-13T14:53:46 | 255,343,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def pytest_addoption(parser):
parser.addoption('--language', action='store', default="en",
help="Choose language:")
@pytest.fixture(scope="function")
def browser(request):
options = Options()
language = request.config.getoption("language")
options.add_experimental_option('prefs', {'intl.accept_languages': language})
browser = webdriver.Chrome(options=options)
yield browser
print("\nquit browser..")
browser.quit()
| [
"noreply@github.com"
] | noreply@github.com |
f8940de643087082e5912d2288535fcea3c528d7 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1073585/snippet.py | 3b5fdb9cca782aeebdcb2fd67a5527bed28bd730 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 306 | py | def proc_starttime(pid):
p = re.compile(r"^btime (\d+)$", re.MULTILINE)
m = p.search(open("/proc/stat").read())
btime = int(m.groups()[0])
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
stime = int(open("/proc/%d/stat" % pid).read().split()[21]) / clk_tck
return btime + stime | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
adcee77f722346395c212f4a7c3a39d85b093444 | d19de77a7ef09e4ecc92ca018267c3973115533a | /Home/views.py | 5ed4eb3a19789cc5498652f6381cce090587d556 | [] | no_license | NisheshDangol/django_vrs | 3d9e64438f99338227048cc0dd4a5d8908f3a1b9 | add0362443803ec9a6f976c4be508cbae4fad4ab | refs/heads/master | 2021-01-16T05:28:09.475834 | 2020-03-29T01:05:17 | 2020-03-29T01:05:17 | 242,991,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
# Create your views here.
from account.models import Cars, Bikes, Order, CustomerProfile
def index(request):
car = Cars.objects.all()
bike = Bikes.objects.all()
return render(request, 'Home/index.html', {'car':car, 'bike':bike})
def contact(request):
return render(request, 'Home/contact.html')
def services(request):
return render(request, 'Home/services.html')
def cars(request):
car = Cars.objects.all()
return render(request, 'Home/cars.html', {'car': car})
def bikes(request):
bike = Bikes.objects.all()
return render(request, 'Home/bikes.html', {'bike':bike})
def index(request):
car = Cars.objects.all()
return render(request, 'Home/index.html', {'car': car})
@login_required()
def booking_detail(request):
order=Order.objects.filter(customer=request.user)
return render(request, 'Home/booking_detail.html', {'order':order})
@login_required()
def cancel_booking(request,id):
order = Order.objects.get(id=id)
if request.method == 'POST':
if order.status == 'Pending':
order.status = "Canceled"
order.save()
return redirect('booking_detail_home')
return render(request, 'Home/cancel_booking.html', {'order':order})
| [
"dangolnaruto@gmail.com"
] | dangolnaruto@gmail.com |
82f8213abcbc8b1efceb4816d3795db9393c7af8 | 5fccc1b044d8fa9e3916f1039af76cf394013c8d | /TestDjango1/myproject/urls.py | e4977e2db916584eb5a9dc8615d0d8f5d5c598e4 | [] | no_license | MohamedRagabAnas/DjangoExamples | 79288973188740264271b86619945d5c2c4e5dbc | f31a15da8c21fc235a3c221ef6b67731ca397939 | refs/heads/master | 2020-03-25T07:29:34.157396 | 2018-08-05T22:32:06 | 2018-08-05T22:32:06 | 143,564,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from myapp.views import hello
from django.conf.urls import url, include
from myapp.views import BSTemplate
from myapp.views import hello_template
from myapp.views import hello_template_simple
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', admin.site.urls),
url(r'^hello/', hello, name = 'hello'),
url(r'^hello_template/', hello_template, name = 'hello_template'),
url(r'^BSTemplate/', BSTemplate, name = 'BSTemplate'),
url(r'^hello_template_simple/', hello_template_simple, name = 'hello_template_simple'),
url(r'^articles/', include('article.url')),
]
| [
"midoragab353@gmail.com"
] | midoragab353@gmail.com |
71873a2b0afbe8912deb5770f42b94cecc5ae5d1 | b9918acb388cb2ff29c8ffa33a1fdb07996d8782 | /returns.py | 2edc3e320b31a349d7d79d365cc46dc8f5748074 | [] | no_license | NoahHobbs/Store-Returns | 75c83a24a5d5e6555f9b1694726d6fbd54fe6f6a | e573acb82af63b50be921de0c6c0b6405f080ddc | refs/heads/master | 2023-01-28T16:32:06.572691 | 2020-12-07T23:03:09 | 2020-12-07T23:03:09 | 319,166,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,165 | py | """
Noah Hobbs
11/12/2020
Kohls/Amazon return program with GUI
"""
import PySimpleGUI as sg
class ItemReturn:
# Item return is acting like my node class for a linked list
def __init__(self, items, company):
self.items = items # items to be returned
self.company = company # name of the company that the return is from
self.nxt = None # used to get the next item in the queue
self.size = len(self.items) # size of the returns
class ReturnQueue:
def __init__(self):
self.queue_size = 0 # the size of the queue
self.amazon_front = None # front of the amazon returns
self.kohls_front = None # front of the kohls returns
def make_return(self, item_return):
"""
Basically is my enqueue method
:param item_return: the item to be enqueued
:return: no return but the method increments the queue size and
enqueues what item is passed
"""
company = "amazon" if item_return.company.lower() == "amazon" else \
"kohls"
front = getattr(self, f'{company}_front')
if front is None:
setattr(self, f'{company}_front', item_return)
else:
setattr(self, f'{company}_front',
self.set_front(front, item_return))
self.queue_size += 1
def set_front(self, one, two):
"""
Sets the front of my queue
"""
if one is None:
return two
if two is None:
return one
temp = None
if len(one.items) < len(two.items):
temp = one
one.nxt = self.set_front(one.nxt, two)
else:
temp = two
two.nxt = self.set_front(one, two.nxt)
return temp
def return_all(self, company=None):
"""
:param company: If the user passes a company it will return the
returns of that company (Used heavily in the GUI implementation)
:return: Returns all the items that have been enqueued
"""
if not company:
kohls_return = self.return_all("kohls")
amazon_return = self.return_all("amazon")
return kohls_return + amazon_return
my_list = []
company = "amazon" if company.lower() == "amazon" else "kohls"
while getattr(self, f'{company}_front'):
my_list.append(self.next_return(company))
return my_list
def next_return(self, company=None):
"""
Gets the next item in the queue
:param company: Either amazon or kohls
:return: returns the current front of the queue
"""
if not company:
kohls_return = self.next_return("kohls")
amazon_return = self.next_return("amazon")
return [kohls_return, amazon_return]
company = "amazon" if company.lower() == "amazon" else "kohls"
front = getattr(self, f'{company}_front')
curr = front
nx = getattr(self, f'{company}_front').nxt
curr.nxt = None
setattr(self, f'{company}_front', nx)
return curr.items
def amt_of_returns(self):
# this method was mainly for testing because the attributes take the
# place of it later in the program
# I left it just to give an idea of how the program morphed over time
return f'{self.queue_size} Customers have made returns'
if __name__ == '__main__':
# making my return queue object
return_queue = ReturnQueue()
# Creating my Theme
sg.theme('Topanga')
# Creating my layout
layout = [[sg.Text('Enter your returns (Separate values by comma)')],
[sg.InputText(key="returned")],
[sg.Radio('Kohl\'s', "RADIO1", default=False, key="kohls")],
[sg.Radio(
'Amazon', "RADIO1", default=False, key="amazon",)],
[sg.Button('Enter')],
[sg.Text('Amazon to process'), sg.Text('Kohl\'s to process')],
[sg.Text('', key='amazon_text', size=(16, 12)),
sg.Text('', key='kohls_text', size=(10, 12))],
[sg.Button('Process all'), sg.Button('Process Kohl\'s',
key="return_kohls"),
sg.Button('Process Amazon', key="return_amazon")],
[sg.Button('Exit')]]
# Create the Window
window = sg.Window('Returns', layout, size=(300, 350))
# reads the user events or if the user closes
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Exit': # if the user closes
# the window
break
# if the user enters their returns get the company that they select
# and update the text box to be empty
if event == 'Enter':
if values['returned'] == '' or values['returned'] == ' ':
sg.Popup('Please make at least one return', title='ERROR')
continue
if values["kohls"]:
# setting the company to Kohls if the radio button is selected
company = "Kohls"
set_window = window.FindElement('kohls_text')
# this line ensures that I won't have the list of returns and
# with the text showing how many returns were made
if 'process' in set_window.get():
set_window.Update('')
set_window.Update(f'{set_window.get()}\n{values["returned"]}')
else:
# setting the company to Amazon if the radio button is selected
company = "Amazon"
set_window = window.FindElement('amazon_text')
# this line ensures that I won't have the list of returns and
# with the text showing how many returns were made
if 'process' in set_window.get():
set_window.Update('')
set_window.Update(f'{set_window.get()}\n{values["returned"]}')
# change variable after deleting other main
my_item4 = ItemReturn(values["returned"].split(','), company)
return_queue.make_return(my_item4)
print(f'You entered {values["returned"]}')
# clearing my text field after the user enters a return
window.FindElement('returned').Update('')
if event == 'Process all':
# will return both Kohl's and Amazon returns regardless of their
# company
x = return_queue.return_all("Kohls")
total_kohls = sum([len(r) for r in x])
window.FindElement('kohls_text').Update(f'{total_kohls} '
f'Kohl\'s returns '
'processed')
y = return_queue.return_all("Amazon")
total_amazon = sum([len(r) for r in y])
window.FindElement('amazon_text').Update(f'{total_amazon} '
f'Amazon returns '
'processed')
if event == "return_kohls":
# Returns specifically Kohl's items
x = return_queue.return_all("Kohls")
total_num_returns = sum([len(r) for r in x])
window.FindElement('kohls_text').Update(f'{total_num_returns} '
f'Kohl\'s returns '
'processed')
if event == "return_amazon":
# Returns specifically Amazon items
x = return_queue.return_all("Amazon")
total_num_returns = sum([len(r) for r in x])
window.FindElement('amazon_text').Update(f'{total_num_returns} '
f'Amazon returns '
'processed')
# this line resolved some issues with the GUI I was having
window.Refresh()
# closing my window
window.close()
# once the user exits the GUI this line executes
input('Press any key to continue')
| [
"mlzgphoenix@gmail.com"
] | mlzgphoenix@gmail.com |
a5897403872d864b1847a0425bdab46f4f20fda5 | a58b82ad1c0e09af9aa154330583284fb9703d59 | /temp.py | 8169c04585f70c3a384ff3902a892de10aa041fa | [] | no_license | et75/StarGazer | ae6800b1a08d69b02a5ec69bad54b29f29b5760b | da11b7847b30497d9af8a91b7eb7060446e22b34 | refs/heads/master | 2023-06-26T18:38:50.083227 | 2021-07-23T08:07:18 | 2021-07-23T08:07:18 | 388,609,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from datetime import datetime
from datetime import timedelta
today = datetime.today
date = today - timedelta(days=1) | [
"43972987+et75@users.noreply.github.com"
] | 43972987+et75@users.noreply.github.com |
60f6265eaeb4048480fe49adcdba9dce4d85bf06 | d2b02e1aa8639f6ddcb5d01558e61455b66ad988 | /app_blink/demo_kivy/demo_1.py | 9097b78c7c10dd2c97b2c404603de474727ee698 | [] | no_license | Wanchatpookhuntod/Detect-factors-CVS | 076d37048d49d4a7811172d5add36bf5f8a17fad | ca30f0e75094e9310bc971edf1677e0ef97023d9 | refs/heads/master | 2022-03-22T04:07:21.416254 | 2019-12-06T07:06:10 | 2019-12-06T07:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | # from kivy.app import App
# from kivy.uix.label import Label
# from kivy.lang import Builder
# from kivy.properties import ObjectProperty
# from kivy.lang.builder import Builder
# from kivy.uix.widget import Widget
# from kivy.uix.gridlayout import GridLayout
# from kivy.uix.boxlayout import BoxLayout
# from kivy.uix.button import Button
#
#
# class Outline_App(BoxLayout):
# def __init__(self, **kwargs):
# super(Outline_App, self).__init__(**kwargs)
# self.orientation="vertical"
# self.add_widget(Button(text = 'Hello 1',size=(200, 50), size_hint=(None, None)))
# self.add_widget(Button(text = 'Hello 1'))
# self.add_widget(Button(text = 'Hello 1'))
# self.add_widget(Button(text = 'Hello 1'))
# self.row_force_default =False
# self.row_default_height=50
#
#
#
#
# class MyApp(App):
# def build(self):
# return Outline_App()
#
# if __name__ == '__main__':
# MyApp().run()
'''
Camera Example
==============
This example demonstrates a simple use of the camera. It shows a window with
a buttoned labelled 'play' to turn the camera on and off. Note that
not finding a camera, perhaps because gstreamer is not installed, will
throw an exception during the kv language processing.
'''
# Uncomment these lines to see all the messages
# from kivy.logger import Logger
# import logging
# Logger.setLevel(logging.TRACE)
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
import time
Builder.load_string('''
<CameraClick>:
orientation: 'vertical'
Camera:
id: camera
resolution: (640, 480)
play: False
ToggleButton:
text: 'Play'
on_press: camera.play = not camera.play
on_press: test_p
size_hint_y: None
height: '48dp'
Button:
text: 'Capture'
size_hint_y: None
height: '48dp'
on_press: root.capture()
''')
class CameraClick(BoxLayout):
def capture(self):
'''
Function to capture the images and give them the names
according to their captured time and date.
'''
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
camera.export_to_png("IMG_{}.png".format(timestr))
print("Captured")
def test_p(self):
print("hello")
class TestCamera(App):
def build(self):
return CameraClick()
TestCamera().run() | [
"exandwhy666@yahoo.com"
] | exandwhy666@yahoo.com |
3c84cf1aa382ae73435312ccf759eef54d752f84 | 845f627d3b28f88e7a5367ba8bf3b669cf5a6eae | /script/report/report.py | 36a843d34ee338301436cb85b89184b33530581b | [] | no_license | quanrd/nf-reseq-om | 42e5066c99326c30e6aa650acbdc0ab2d4e52683 | 1ed90fff58fba5095f3454be07b803e82ced98b6 | refs/heads/master | 2022-11-18T22:03:49.556357 | 2020-01-06T06:40:13 | 2020-01-06T06:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,136 | py | import os
import glob
import sys
import jinja2
import fire
import pandas as pd
from io import StringIO
from pathlib import Path, PurePath
pd.set_option('precision', 3)
script_dir, _ = os.path.split(os.path.abspath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath='{}/template'.format(script_dir)
)
)
template = env.get_template('index.html')
# code that fills in display_dictionary with the values to send to the template
def format_map_df(map_df):
t_map_df = map_df.T
t_map_df.columns = [each.rstrip(':') for each in t_map_df.columns]
int_df = t_map_df.iloc[:, [0, 1, 4, 3]]
float_df = t_map_df.iloc[:, -3:]
int_df = int_df.astype('int')
int_df = int_df.applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def format_reads_df(reads_df):
int_df = reads_df.iloc[:, 0:4]
float_df = reads_df.iloc[:, -5:]
float_df = float_df.applymap(lambda x: f'{x:.3f}')
int_df = int_df.astype('int').applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def table2dict(table_file, name, sep='\t', format_func=None):
table_dict = dict()
if table_file.is_file():
table_df = pd.read_csv(table_file, sep=sep, index_col=0)
if format_func is not None:
table_df = format_func(table_df)
table_df.sort_index(inplace=True)
table_df = table_df.reset_index()
for idx_i in table_df.index:
table_dict.setdefault(
f'{name}_body', []).append(list(table_df.loc[idx_i]))
table_dict[f'{name}_header'] = list(table_df.columns)
if 'snp' in name:
table_dict['snp'] = True
return table_dict
def plot2report(plot_path, outpath, plot_flag, plot_name=None):
plot_dict = dict()
plots = glob.glob(str(plot_path))
outpath = PurePath(outpath)
if plots:
plot = plots[0]
plot_path = PurePath(plot)
if plot_name is None:
plot_name = plot_path.stem
outfile_path = outpath / f'{plot_name}{plot_path.suffix}'
os.system(f'cp {plot_path} {outfile_path}')
plot_dict[plot_flag] = True
return plot_dict
def plotlist2report(plot_list, outpath, plot_flag):
plot_dict = dict()
if plot_list:
for plot in plot_list:
os.system(f'cp {plot} {outpath}')
plot_dict[plot_flag] = [PurePath(each).name for each in plot_list]
return plot_dict
def exom_report(result_dir, proj_name, report_dir=None):
result_dir = Path(result_dir)
if report_dir is None:
report_dir = result_dir / 'report'
else:
report_dir = Path(report_dir)
if report_dir.is_dir():
os.system(f'rm -r {report_dir}')
display_dictionary = {}
display_dictionary['project_name'] = proj_name
# add fastqc table
qc_table = result_dir / 'qc/data.summary.csv'
display_dictionary.update(
table2dict(qc_table, 'seq', sep=',', format_func=format_reads_df))
# add aligment table
align_table = result_dir / 'alignment/mapping.summary.csv'
display_dictionary.update(
table2dict(
align_table, 'align', sep=',', format_func=format_map_df))
# snp stats
# summary
snp_summary_table = result_dir / 'snp/overall.varSummary.txt'
display_dictionary.update(
table2dict(snp_summary_table, 'snp_summary'))
snp_number_table = result_dir / 'snp/overall.varNum.txt'
display_dictionary.update(
table2dict(snp_number_table, 'snp_number'))
snp_impact_table = result_dir / 'snp/overall.varImpact.txt'
display_dictionary.update(
table2dict(snp_impact_table, 'snp_impact'))
snp_effect_table = result_dir / 'snp/overall.varEffects.txt'
display_dictionary.update(
table2dict(snp_effect_table, 'snp_effect'))
snp_region_table = result_dir / 'snp/overall.varRegion.txt'
display_dictionary.update(
table2dict(snp_region_table, 'snp_region'))
report_dir.mkdir(parents=True, exist_ok=True)
os.system('cp -r {script_dir}/template/* {report_dir}'.format(
script_dir=script_dir,
report_dir=report_dir
))
# plots
report_plot_path = report_dir / 'imgs'
mapping_plot = result_dir / 'plot/alignment/Mapping_stats.png'
display_dictionary.update(
plot2report(mapping_plot, report_plot_path, 'mapping_plot'))
# genome_cov_plot = result_dir / 'plot/alignment/Reads_coverage_genome.png'
# display_dictionary.update(
# plot2report(genome_cov_plot, report_plot_path, 'genome_cov_plot')
# )
exon_cov_plot = result_dir / 'plot/alignment/Reads_coverage_exon.png'
display_dictionary.update(
plot2report(exon_cov_plot, report_plot_path, 'exon_cov_plot')
)
variant_summary_plot = result_dir / \
'plot/variants/Variant_stats_summary.png'
if variant_summary_plot.exists():
display_dictionary.update(
plot2report(variant_summary_plot,
report_plot_path, 'variant_summary')
)
variant_summary_plot_dir = result_dir / 'plot/variants/'
for dir_i in variant_summary_plot_dir.iterdir():
if dir_i.is_dir():
example_sample = dir_i.name
varType_plot = glob.glob(f'{result_dir}/plot/variants/*/*_varType.png')
display_dictionary.update(
plotlist2report(varType_plot, report_plot_path,
'varType_plots'))
varRegion_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varRegion.png')
display_dictionary.update(
plotlist2report(varRegion_plot, report_plot_path,
'varRegion_plots'))
varImpact_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varImpact.png')
display_dictionary.update(
plotlist2report(varImpact_plot,
report_plot_path, 'varImpact_plots'))
# varEffects_high_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-HIGH.png'
# display_dictionary.update(
# plot2report(varEffects_high_plot, report_plot_path,
# 'variant_effect_high', 'varEffects-HIGH'))
# varEffects_moderate_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODERATE.png'
# display_dictionary.update(
# plot2report(varEffects_moderate_plot,
# report_plot_path,
# 'variant_effect_moderate', 'varEffects-MODERATE'))
# varEffects_low_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-LOW.png'
# display_dictionary.update(
# plot2report(varEffects_low_plot, report_plot_path,
# 'variant_effect_low', 'varEffects-LOW'))
# varEffects_modifier_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODIFIER.png'
# display_dictionary.update(
# plot2report(varEffects_modifier_plot,
# report_plot_path,
# 'variant_effect_modifier', 'varEffects-MODIFIER'))
# deltaSNP_plot = result_dir / 'mapping/*deltaSNP.png'
# Gprime_plot = result_dir / 'mapping/*Gprime.png'
# negLog10Pval_plot = result_dir / 'mapping/*negLog10Pval.png'
# plot2report(deltaSNP_plot, report_plot_path, 'deltaSNP')
# plot2report(Gprime_plot, report_plot_path, 'Gprime')
# plot2report(negLog10Pval_plot, report_plot_path, 'negLog10Pval')
# display_dictionary.update({'pca': True, 'snp_index': True})
display_html = template.render(display_dictionary)
report_html = report_dir / 'index.html'
with open(report_html, 'w') as out_inf:
out_inf.write(display_html)
os.system(f'tar -zcf {report_dir}.tar.gz -C {report_dir.parent} {report_dir.name}')
if __name__ == '__main__':
fire.Fire(exom_report)
| [
"ricekent@163.com"
] | ricekent@163.com |
31787e02aedd1f148d6b4c403a741f5e498ef610 | 6acdc703e8c244f4345fddc0a1d40be2610b2a97 | /doc/source/conf.py | d2f1eb1f127fae1483deda14ceb99051f292ab05 | [
"MIT"
] | permissive | constrict0r/sysconfig | 9fe0530b2ead5766c080fe1e5352ac72878a646f | 609700aee8dd133920037d2fabd0c72faf9d1c8b | refs/heads/master | 2020-12-20T14:49:36.915408 | 2020-08-06T19:54:46 | 2020-08-06T19:54:46 | 183,437,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,936 | py | # Configuration file for the Sphinx documentation builder.
import os
import sys
project = "sysconfig"
copyright = "2019, constrict0r"
author = "constrict0r"
version = "0.0.1"
release = "0.0.1"
sys.path.insert(0, os.path.abspath("../.."))
extensions = [
"sphinxcontrib.restbuilder",
"sphinxcontrib.globalsubs",
"sphinx-prompt",
"sphinx_substitution_extensions"
]
templates_path = ["_templates"]
exclude_patterns = []
html_static_path = ["_static"]
html_theme = "sphinx_rtd_theme"
master_doc = "index"
img_base_url = "https://gitlab.com/" + author + "/img/raw/master/"
img_url = img_base_url + project + "/"
author_img = ".. image:: " + img_url + "author.png\n :alt: author"
author_slogan = "The Travelling Vaudeville Villain."
github_base_url = "https://github.com/"
github_url = github_base_url + author + "/" + project
github_link = "`Github <" + github_url + ">`_."
gitlab_base_url = "https://gitlab.com/"
gitlab_url = gitlab_base_url + author + "/" + project
gitlab_badge = gitlab_url + "/badges/master/pipeline.svg\n :alt: pipeline"
gitlab_ci_url = gitlab_url + "/pipelines"
gitlab_ci_link = "`Gitlab CI <" + gitlab_ci_url + ">`_."
gitlab_link = "`Gitlab <" + gitlab_url + ">`_."
travis_base_url = "https://travis-ci.com/"
travis_url = travis_base_url + author + "/" + project
travis_badge = ".. image:: " + travis_url + ".svg\n :alt: travis"
travis_ci_url = travis_url
travis_link = "`Travis CI <" + travis_url + ">`_."
readthedocs_url = "https://" + project + ".readthedocs.io"
readthedocs_badge = "/projects/" + project + "/badge\n :alt: readthedocs"
readthedocs_link = "`Readthedocs <" + readthedocs_url + ">`_."
global_substitutions = {
"AUTHOR_IMG": author_img,
"AUTHOR_SLOGAN": author_slogan,
"AVATAR_IMG": ".. image:: " + img_url + "avatar.png\n :alt: avatar",
"DEFAULT_VAR_NAME": 'system_skeleton',
"DEPLOY_IMG": ".. image:: " + img_url + "/deploy.png\n :alt: deploy",
"DOOMBOT_IMG": ".. image:: " + img_url + "/doombot.png\n :alt: doombot",
"ENJOY_IMG": ".. image:: " + img_url + "/enjoy.png\n :alt: enjoy",
"GITLAB_BADGE": ".. image:: " + gitlab_badge,
"GITLAB_CI_LINK": gitlab_ci_link,
"GITHUB_LINK": github_link,
"GITLAB_LINK": gitlab_link,
"INGREDIENT_IMG": ".. image:: " + img_url +
"/ingredient.png\n :alt: ingredient",
"MAIN_IMG": ".. image:: " + img_url + "/main.png\n :alt: main",
"PROJECT": project,
"READTHEDOCS_BADGE": ".. image:: https://rtfd.io" + readthedocs_badge,
"READTHEDOCS_LINK": readthedocs_link,
"TRAVIS_BADGE": travis_badge,
"TRAVIS_LINK": travis_link
}
role_var = '-e "{system_skeleton: [\'https://gitlab.com/huertico/server\']}"'
substitutions = [
("|AUTHOR|", author),
("|DEFAULT_ROLE_VARS|", role_var),
("|DEFAULT_VAR_NAME|", 'system_skeleton'),
("|DEFAULT_VAR_VALUE|", "[\'https://gitlab.com/huertico/server\']"),
("|PROJECT|", project)
]
| [
"constrict0r@protonmail.com"
] | constrict0r@protonmail.com |
b76a1d0b2a99dc6e31c8d2d4e463e660bcb932fa | bc4496fa0aa4f4f3062acf0ed8c0717e9e53a00a | /Lesson2Folder-Pygame/Ex5/pyCam.py | 5a7e5120cae0313ab2d4882d45161bdc1d5ec9c8 | [] | no_license | EarthenSky/Python-Practice | 9c44c9385688ce1b8d067b3d5f788fdea034c9df | fd53c5ec6da5bb61b6a82de72c828db9813dd2a4 | refs/heads/master | 2018-07-15T15:21:21.547954 | 2018-06-11T21:03:08 | 2018-06-11T21:03:08 | 119,651,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | # This module acts as a camera and interfaces the pygame draw functions.
import pygame
# Sets up the camera dictionary.
_camera_dict = {}
# This is the camera class.
class camera:
"""This is a camera. You can change it's position. Using camera like a
string will return the id value."""
# Init camera, id can be int or string.
def __init__(self, id, pos):
self._id = id
self._pos = pos
# Using a camera like a string will output it's id value.
def __str__(self):
return self._id
# Get the id value, (can be str or int.)
def get_id(self):
return self._id
# Get the camera's position.
def get_position(self):
return self._pos
# Set the camera's position.
def set_position(self, pos):
self._pos = pos
# Instantiates a camera object.
def create_camera(id, pos):
"""This function creates a camera object. Please pass a unique id to
specify a camera to draw from. Returns the passed id."""
_camera_dict[id] = camera(id, pos)
return id
def get_camera(id):
"""This function returns a camera object based on it's id."""
return _camera_dict[id]
def draw_rect(camera_id, surface, colour, rect, width=0):
"""Blits a rectangle into specified rectangle. Width 0 makes the rect filled."""
pygame.draw.rect(surface, colour, (rect[0] - _camera_dict[camera_id].get_position()[0], rect[1] - _camera_dict[camera_id].get_position()[1], rect[2], rect[3]), width)
def draw_img(camera_id, surface, img, scale_rect, cut_rect=None):
"""Blits an image to a surface at the top left position. The rectangle parameter is structured like -> (x, y, width, height)"""
surface.blit(img, (scale_rect[0] - _camera_dict[camera_id].get_position()[0], scale_rect[1] - _camera_dict[camera_id].get_position()[1]), cut_rect)
"""
# Just in case...
# Set position.
#position = (scale_rect[0] - _camera_dict[camera_id].get_position()[0], scale_rect[1] - _camera_dict[camera_id].get_position()[1])
# Set camera rotation.
#blit_img = pygame.transform.rotate(img, _camera_dict[camera_id].get_rotation())
#blit_img_rect = blit_img.get_rect(center=position)
# Blit
#surface.blit(blit_img, blit_img_rect, cut_rect)
# Some Basic camera controlls...
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
pyCam.get_camera(main_cam).set_position( (pyCam.get_camera(main_cam).get_position()[0], pyCam.get_camera(main_cam).get_position()[1] + 10) )
elif event.key == pygame.K_s:
pyCam.get_camera(main_cam).set_position( (pyCam.get_camera(main_cam).get_position()[0], pyCam.get_camera(main_cam).get_position()[1] - 10) )
elif event.key == pygame.K_a:
pyCam.get_camera(main_cam).set_position( (pyCam.get_camera(main_cam).get_position()[0] - 10, pyCam.get_camera(main_cam).get_position()[1]) )
elif event.key == pygame.K_d:
pyCam.get_camera(main_cam).set_position( (pyCam.get_camera(main_cam).get_position()[0] + 10, pyCam.get_camera(main_cam).get_position()[1]) )
"""
| [
"gabegames27@gmail.com"
] | gabegames27@gmail.com |
1805853c8730f9367bcee41c2c7faf88dfd129bd | ee10c52fcad29619f742176766e5dcaa052b50a9 | /internrank.py | 3087e872de080453b6f906cc10c7f81adee19553 | [] | no_license | nicholson2208/InternSlackBot | 24adda7bd41bf4e6a8af4bfe697257583cc33ccf | 7140ef829a916a2a330ff04e8f15ca3e5ef2f1cd | refs/heads/master | 2021-04-19T00:11:06.569427 | 2017-06-30T16:54:28 | 2017-06-30T16:54:28 | 94,591,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,144 | py | import os
import time
from slackclient import SlackClient
from pymongo import MongoClient
from intern_utils import *
# make mongo client
mongo_client = MongoClient('localhost', 34567)
db = mongo_client.pymongo_test
interns = db.interns
"""
interns_data = {
'name': 'matt',
'points': 1000
}
result = interns.insert_one(interns_data)
print('One post: {0}'.format(result.inserted_id))
interns_data = {
'name': 'jimmy',
'points': 100
}
result1 = interns.insert_one(interns_data)
print('One post: {0}'.format(result1.inserted_id))
"""
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "points"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def get_about():
print "about"
return "intern.rank is a bot created by the interns to haze themselves. Try typing '@intern.rank' help to get " \
"started "
def get_status():
print "status called"
#get top spot
top=interns.find().sort("points", -1)[0]
name = top["name"]
points = top["points"]
return "The current leader is "+name+", with "+str(points)+"."
def update_rank():
print "update rank called"
iter=0
response=""
#sort in ascending order or something
for intern in interns.find().sort("points", -1):
iter+=1
response+=str(iter)+". "+intern["name"] +"\t"+str(intern["points"]) + "\n"
print intern["points"]
return response
def get_rankings():
print "rankings called"
response= update_rank()
return response #"Who even cares, Matt is going to win."
def add_points(command, sender):
# TODO: change this to be not 3 calls
print "points called"
assert command[0] == "points"
name, points_to_add = command[1],float(command[2])
if check_awarding_privileges(sender):
awarder = employees.find_one({"userID":sender})
awarders_points = float(awarder["awarding_points"])
awarder_name = awarder["name"]
if awarders_points <= points_to_add:
return "Sorry, you can\'t award {0} points, you only have {1}. Points replenish on Monday.".format(
str(points_to_add), str(awarders_points))
if name == "matt" and points_to_add<0:
return "You can't take points away from Matt!"
old_points=interns.find_one({"name" : name})["points"]
new_points= old_points+points_to_add
interns.find_one_and_update( {"name" : name}, {"$set":{"points": new_points}})
new_awarders_points = awarders_points - points_to_add
employees.find_one_and_update( {"userID" : sender}, {"$set":{"awarding_points": new_awarders_points}})
response = str(points_to_add) + " points to "+name + "! " + name + " now has " + str(new_points) + " points. \n"
response += awarder_name + " has " + str(new_awarders_points) + " left to award for the week."
return response
else:
return "Sorry, you can't award points."
def get_intern():
print "intern called"
if round(time.time())%2==0:
return "The super special intern is Matt right now."
return "The super special intern is Jimmy right now."
def get_help():
print "help called"
response = "Use the the format '@intern.rank + <<command>>' and try one of the following commands:\n"
response+="about\nintern\nJimmy\nMatt\n"
response+="points <<intern_name>> <<number of points to add>> \n"
response+="rankings\nstatus"
return response
def get_Matt():
# these functions are really dumb idk
print "Matt called"
return "@matt.nicholson I think someone wants to talk to you"
def get_Jimmy():
# these functions are dumb
print "Jimmy called"
bankrupt("jimmy") # lol
return "@jimmycarlson I think someone wants to talk to you"
def handle_command(command, channel, sender):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
command = command.split(" ")
if command[0] == "status":
response = get_status()
elif command[0] == "rankings":
response = get_rankings()
elif command[0] == "points":
response = add_points(command, sender)
elif command[0] == "intern":
response = get_intern()
elif command[0] == "help":
response = get_help()
elif command[0] == "matt":
response = get_Matt()
elif command[0] == "jimmy":
response = get_Jimmy()
elif command[0] == "about":
response = get_about()
else:
response="Sorry, I didn't quite catch that. Type @intern.rank help for more options"
print response
slack_client.api_call("chat.postMessage", channel=channel, text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), output['channel'], output_list[0]["user"]
return None, None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel, sender = parse_slack_output(slack_client.rtm_read())
if command and channel:
print "command detected " + str(command) + "\n"
handle_command(command, channel, sender)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| [
"matthew.n.nicholson1@gmail.com"
] | matthew.n.nicholson1@gmail.com |
17b37f4a03a4049d3afd2397497d08fa832d5305 | dcc62f725e8d1fdebc3be5192960584198d19813 | /meiduo_mall/meiduo_mall/utils/category.py | 535a51cb88960d742c97a2c71d02a628b6f21fb7 | [] | no_license | 1923488289/youprojects | e51cbb7958963fb8a3a82405f5df18e9a066b1ee | ebd1856dab02e45db69d2d5307473f0f22855988 | refs/heads/master | 2022-12-11T12:40:55.832289 | 2019-09-24T15:31:34 | 2019-09-24T15:31:34 | 210,625,080 | 0 | 0 | null | 2022-12-08T01:49:05 | 2019-09-24T14:36:24 | HTML | UTF-8 | Python | false | false | 1,745 | py | from goods.models import GoodsChannel
def get_category():
# 1.查询频道
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
categories = {}
# 2.遍历频道,获取一级分类、二级分类数据
for channel in channels:
# 3.判断频道是否存在
if channel.group_id not in categories:
# 如果不存在则新建频道字典
categories[channel.group_id] = {
'channels': [], # 一级分类
'sub_cats': [] # 二级分类
}
# 3.1获取频道字典
channel_dict = categories[channel.group_id]
# 4.向频道中添加一级分类
channel_dict['channels'].append({
'name': channel.category.name, # 一级分类名称
'url': channel.url # 频道链接
})
# 5.向频道中添加二级分类
catetory2s = channel.category.subs.all()
# 6.遍历,逐个添加二级分类
for catetory2 in catetory2s:
channel_dict['sub_cats'].append({
'name': catetory2.name, # 二级分类名称
'sub_cats': catetory2.subs.all() # 三级分类
})
'''
{
1:{
'channels':[手机,相机,数码],
'sub_cats':[
{
'name':'手机通讯',
'sub_cats':[手机,游戏手机,..]
},
{
。。。。
}
]
},
2:{
'channels':[电脑,办公],
'sub_cats':[]
}
}
'''
return categories
| [
"1923488289@qq.com"
] | 1923488289@qq.com |
bf4936b858155a0f2665e2be1e5c5640a411d52f | a0b5bfca977dc232a1ec3c1b29d56bbdfc838b97 | /Chetan_Portfolio/home/views.py | 613058ad1b33ba9b23ea0d5a9b8b735623166ad7 | [] | no_license | Chetucity/Portfolio | 2d2003433ae2726200db56004e892bda27e3db48 | 2402571dcf6bec1770fb55bae648661cebbabefe | refs/heads/master | 2022-12-28T14:05:26.666668 | 2020-10-15T16:18:56 | 2020-10-15T16:18:56 | 304,234,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | from django.shortcuts import render , HttpResponse
from home.models import Contacts
# Create your views here.
def home(request):
context = {'name': 'chetan','course':'django'}
return render(request,'home.html',context)
def about(request):
return render(request,'about.html')
def projects(request):
return render(request,'projects.html')
def contacts(request):
if request.method == "POST":
name = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
desc = request.POST.get('desc')
# print(name,email,phone,desc)
ins = Contacts(name=name, email=email, phone=phone, desc=desc)
ins.save()
print("data has been printed on db")
return render(request,'contacts.html')
| [
"Chetucity@gmail.com"
] | Chetucity@gmail.com |
a0da2e217526a741286b67b863b952903fd7a915 | ed7a1e546311cfa8223107ee1dfae194784e23ab | /dice.py | 876984520728043bc740d24d6237347ea529f0f0 | [
"Apache-2.0"
] | permissive | WojciechMalicki/dice_analyzer | ab35c075b7c05917ebae10a775808ab94e4aa7f6 | 4e975cf872470b30873c743c3122b446d5806bcf | refs/heads/master | 2022-05-03T09:23:18.560858 | 2022-04-16T21:25:57 | 2022-04-16T21:25:57 | 176,592,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from random import randint
class Dice():
"""symulowanie rzutu kostką"""
def __init__(self, num_sides=6):
"""Domyślnie K6"""
self.num_sides = num_sides
def roll(self):
"""Rzut kością"""
return randint(1, self.num_sides) | [
"wojciechmalicki@gmail.com"
] | wojciechmalicki@gmail.com |
b2a9690624550e9775989e34f20ec89e4d5e21d6 | d81ec837695f61f6ec05d2c85606195f5ebe2e28 | /chatgui.py | 3f887134387ecd07872e9e384a3487094ffdf983 | [] | no_license | shubh2710/jassicaAI | 229e529dd690142c299cd861344f08230b238503 | e06f50f3684953291bd1de960c38f706bacfedf5 | refs/heads/master | 2023-02-07T23:24:27.039652 | 2021-01-03T10:22:27 | 2021-01-03T10:22:27 | 326,377,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,468 | py |
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
slots=i['slots']
break
return_list = []
return_list.append({'tag': tag, 'result': result,'slots':slots})
return return_list
def fillSlots(msg, res):
allSlotsList={'slotAction':['on','off'],'slotRoomName': ['kitchen','myroom']}
tag=res[0]['tag']
responce=res[0]['result']
slots=res[0]['slots']
filledslots={}
for i in slots:
for value in allSlotsList.get(i):
filledslots[i] = None
if msg.find(value)>-1:
print("found slot: "+value)
filledslots[i] = value
break
slotMissingValues=[]
for i in filledslots:
if filledslots[i]==None:
slotMissingValues.append(i)
if len(slotMissingValues)>0:
return 'Some slots are missing'+ str(slotMissingValues)
else:
for i in filledslots:
responce=responce.replace('@'+i,filledslots[i])
return responce
pass
def chatbot_response(msg):
ints = predict_class(msg, model)
resList = getResponse(ints, intents)
res=fillSlots(msg,resList);
return res
#Creating GUI with tkinter
import tkinter
from tkinter import *
def send():
msg = EntryBox.get("1.0",'end-1c').strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + '\n\n')
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
res = chatbot_response(msg)
ChatLog.insert(END, "Bot: " + res + '\n\n')
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
base = Tk()
base.title("Hello")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)
#Create Chat window
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatLog.config(state=DISABLED)
#Bind scrollbar to Chat window
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
#Create Button to send message
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
command= send )
#Create the box to enter message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
#EntryBox.bind("<Return>", send)
#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
base.mainloop()
| [
"shubhamgulati81@mail.com"
] | shubhamgulati81@mail.com |
c219c1a1baa2f8661a41a52ad93bf781da4901c3 | ae74758ff0cae45cfd0943332c30a56852085f6a | /bot.py | 9eb44721bd8106c9fd1c797488a41c5220dca5c2 | [] | no_license | aleksey1593/gygysikbot | d86ec8cac74b2158c0b4fd6548995fe163380449 | 1af60db1aea09e6fc49cd5c1c30a309acd0e2894 | refs/heads/master | 2023-03-01T08:10:21.384366 | 2021-01-25T12:42:00 | 2021-01-25T12:42:00 | 318,996,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,025 | py | import asyncio
import logging
import aioschedule
import os
from sql import Dbase
from aiogram.dispatcher.filters import Text
from aiogram import Bot, Dispatcher, executor, types
from aiogram.types import KeyboardButton
from aiogram.types import ReplyKeyboardRemove
from aiogram.types import ReplyKeyboardMarkup
from parsing import uznat, banki_kursi
API_TOKEN = os.environ['TOKEN']
logging.basicConfig(level=logging.INFO)
bot = Bot(token=API_TOKEN, parse_mode="HTML")
dp = Dispatcher(bot)
# клавиатура
button_USD_prodaza = KeyboardButton('USD продаю')
button_USD_pokupka = KeyboardButton('USD покупаю')
button_EUR_prodaza = KeyboardButton('EUR продаю')
button_EUR_pokupka = KeyboardButton('EUR покупаю')
board = ReplyKeyboardMarkup(resize_keyboard=True).row(button_USD_prodaza, button_USD_pokupka).row(button_EUR_prodaza, button_EUR_pokupka)
bot_db = Dbase()
bot_db.create_table()
# для рассылки
def course_act():
course = uznat()
Dollar = course['USD']
EURO = course['EUR']
text = f'курс USD: {Dollar}, курс EURO:{EURO}'
return text
@dp.message_handler(commands='kurs')
async def welcome(message: types.Message):
await message.answer(course_act())
@dp.message_handler(commands='podpiska')
async def welcome(message: types.Message):
idtel = int(message.from_user.id)
user_info = (message.from_user.first_name, message.from_user.last_name, idtel, 1)
if not bot_db.user_exists((message.from_user.first_name, message.from_user.last_name, idtel)):
bot_db.add_user(user_info)
else:
bot_db.obnovit_podpisky((idtel,))
text = f'Спасибо! Рассылка курсов будет каждый день в 13:00'
await message.answer(text)
@dp.message_handler(commands='otpiska')
async def welcome(message: types.Message):
idtel = int(message.from_user.id)
if not bot_db.user_exists((message.from_user.first_name, message.from_user.last_name, idtel)):
text = f'Вы и не подписывались:)'
else:
bot_db.otpiska((idtel,))
text = f'Вы отписаны.'
await message.answer(text)
@dp.message_handler(commands='start')
async def welcome(message: types.Message):
text = f'Привет, {message.from_user.full_name}! Напиши /help чтобы узнать, что я могу)'
await message.answer(text)
@dp.message_handler(commands='help')
async def help(message: types.Message):
text = f"""Вызовите команду "/kurs" чтобы узнать курсы НБ РБ,
вызовите команду "/podpiska" чтобы подписаться на рассылку курсов НБ РБ,
вызовите команду "/otpiska" чтобы отказаться от рассылки,
вызовите команду "/komkurs"чтобы узнать лучшие коммерческие курсы на данный момент."""
await message.answer(text)
@dp.message_handler(commands='komkurs')
async def menu(message: types.Message):
await message.answer('Выберите операцию', reply_markup=board)
@dp.message_handler(Text(equals='USD продаю'))
async def privet(message: types.Message):
otvet = ''
banki = banki_kursi('продажа', 'USD')
for i in range(len(banki)):
otvet += f'{i+1}) {banki[i]} \n'
await message.answer(otvet, reply_markup=ReplyKeyboardRemove())
@dp.message_handler(Text(equals='USD покупаю'))
async def privet(message: types.Message):
otvet = ''
banki = banki_kursi('покупка', 'USD')
for i in range(len(banki)):
otvet += f'{i+1}) {banki[i]} \n'
await message.answer(otvet, reply_markup=ReplyKeyboardRemove())
@dp.message_handler(Text(equals='EUR продаю'))
async def privet(message: types.Message):
otvet = ''
banki = banki_kursi('продажа', 'EUR')
for i in range(len(banki)):
otvet += f'{i+1}) {banki[i]} \n'
await message.answer(otvet, reply_markup=ReplyKeyboardRemove())
@dp.message_handler(Text(equals='EUR покупаю'))
async def privet(message: types.Message):
otvet = ''
banki = banki_kursi('покупка', 'EUR')
for i in range(len(banki)):
otvet += f'{i+1}) {banki[i]} \n'
await message.answer(otvet, reply_markup=ReplyKeyboardRemove())
@dp.message_handler()
async def vozvrat_k_help(message: types.Message):
await message.answer("Не понял тебя, воспользуйся командой /help чтобы узнать что я умею")
async def rassylka():
for id in bot_db.all_users():
await bot.send_message(id[0], course_act())
async def scheduler():
aioschedule.every().day.at("10:00").do(rassylka)
while True:
await aioschedule.run_pending()
await asyncio.sleep(1)
async def on_startup(x):
asyncio.create_task(scheduler())
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True, on_startup=on_startup)
| [
"gukovaleksey15@gmail.com"
] | gukovaleksey15@gmail.com |
f40bfb50a366403674bf3c6751708ec7b6546f97 | 352bcead4e760ca3a974f9b8d243c06026624eff | /job/migrations/0006_job_imag.py | 99368ec515d7931cb8cc520f763966c593eaad67 | [] | no_license | yousef-alheiba/django-jobs-Bord | d6a47e5564b2bf3a50ceefeeac5a914aba9bcb55 | 3a6bdc715e20373cb178a49c53ce789620db2754 | refs/heads/main | 2023-03-11T16:23:59.539593 | 2021-03-03T13:48:07 | 2021-03-03T13:48:07 | 343,372,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # Generated by Django 3.1.7 on 2021-03-02 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0005_job_category'),
]
operations = [
migrations.AddField(
model_name='job',
name='imag',
field=models.ImageField(default='', upload_to='jobs/'),
preserve_default=False,
),
]
| [
"yalheba@gmail.com"
] | yalheba@gmail.com |
47e1abb7c064d826b443bf59799f8cd064dbe34c | 60d8e7310c0bda4729238098c027f309692e417d | /blog/config.py | 42780be85eb6db9172593935795de47b4b27f808 | [] | no_license | winfred82/blog | a59747240039c281c9f2cc81e932fefb6f1a5eab | 0c12a9cfc00f568c7315221c0b164d21ceac9705 | refs/heads/master | 2021-01-10T02:20:06.753785 | 2016-01-12T06:02:44 | 2016-01-12T06:02:44 | 48,244,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import os
class DevelopmentConfig(object):
SQLALCHEMY_DATABASE_URI = "postgresql://ubuntu:thinkful@localhost:5432/blogful"
DEBUG = True
SECRET_KEY = os.environ.get("BLOGFUL_SECRET_KEY", "")
class TestingConfig(object):
SQLALCHEMY_DATABASE_URI = "postgresql://ubuntu:thinkful@localhost:5432/blogful-test"
DEBUG = False
SECRET_KEY = "Not secret" | [
"winfredzhu@outlook.com"
] | winfredzhu@outlook.com |
0fe5c7c8aafdfa5fddacc726712fe3884b50102b | a2040149f327bfd027011b942ee4d9a04a5c150b | /tkbuilder/example_apps/geotiff_viewer/geotiff_viewer.py | 1a787a9aeb83b4cb7ed103d19264dbcb3c0a1910 | [
"MIT"
] | permissive | BeamIO-Inc/tkbuilder | 728a1f848926cbd4cd2ce0759c7989cab09296ab | 8f4b458cf1bc16703c80a0fc5c8e44f9067311c3 | refs/heads/master | 2022-10-18T05:09:42.922744 | 2020-06-11T21:26:16 | 2020-06-11T21:26:16 | 269,422,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | import os
import tkinter
from tkinter import Menu
from tkbuilder.panel_templates.widget_panel.widget_panel import AbstractWidgetPanel
from tkbuilder.panel_templates.image_canvas_panel.image_canvas_panel import ImageCanvasPanel
from tkbuilder.image_readers.geotiff_reader import GeotiffImageReader
from tkinter import filedialog
from tkbuilder.example_apps.geotiff_viewer.panels.band_selection import BandSelection
class GeotiffViewer(AbstractWidgetPanel):
geotiff_image_panel = ImageCanvasPanel # type: ImageCanvasPanel
band_selection_panel = BandSelection # type: BandSelection
image_reader = None # type: GeotiffImageReader
def __init__(self, master):
self.master = master
master_frame = tkinter.Frame(master)
AbstractWidgetPanel.__init__(self, master_frame)
widgets_list = ["geotiff_image_panel", "band_selection_panel"]
self.init_w_vertical_layout(widgets_list)
self.geotiff_image_panel.set_canvas_size(800, 1080)
self.geotiff_image_panel.canvas.set_current_tool_to_pan()
menubar = Menu()
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.select_file)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.exit)
# create more pulldown menus
popups_menu = Menu(menubar, tearoff=0)
popups_menu.add_command(label="Main Controls", command=self.exit)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Popups", menu=popups_menu)
master.config(menu=menubar)
master_frame.pack()
self.pack()
self.band_selection_panel.red_selection.on_selection(self.callback_update_red_band)
self.band_selection_panel.green_selection.on_selection(self.callback_update_green_band)
self.band_selection_panel.blue_selection.on_selection(self.callback_update_blue_band)
self.band_selection_panel.alpha_selection.on_selection(self.callback_update_alpha_band)
def exit(self):
self.quit()
def select_file(self, fname=None):
if fname is None:
fname = filedialog.askopenfilename(initialdir=os.path.expanduser("~"),
title="Select file",
filetypes=(("tiff files", ("*.tif", "*.tiff", "*.TIF", "*.TIFF")),
("all files", "*.*"))
)
self.image_reader = GeotiffImageReader(fname)
self.geotiff_image_panel.canvas.set_image_reader(self.image_reader)
self.populate_band_selections()
def populate_band_selections(self):
bands = self.image_reader.n_bands
band_selections = [str(band) for band in range(bands)]
band_selections.append("None")
self.band_selection_panel.red_selection.update_combobox_values(band_selections)
self.band_selection_panel.green_selection.update_combobox_values(band_selections)
self.band_selection_panel.blue_selection.update_combobox_values(band_selections)
self.band_selection_panel.alpha_selection.update_combobox_values(band_selections)
self.band_selection_panel.red_selection.set(str(self.image_reader.display_bands[0]))
self.band_selection_panel.green_selection.set(str(self.image_reader.display_bands[1]))
self.band_selection_panel.blue_selection.set(str(self.image_reader.display_bands[2]))
if len(self.image_reader.display_bands) > 3:
self.band_selection_panel.alpha_selection.set(str(self.image_reader.display_bands[3]))
else:
self.band_selection_panel.alpha_selection.set("None")
def callback_update_red_band(self, event):
red_band = self.band_selection_panel.red_selection.get()
band_num = 0
if red_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(red_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_green_band(self, event):
green_band = self.band_selection_panel.green_selection.get()
band_num = 1
if green_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(1)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(1)
self.image_reader.display_bands[1] = int(green_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_blue_band(self, event):
band_num = 2
blue_band = self.band_selection_panel.blue_selection.get()
if blue_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(blue_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_alpha_band(self, event):
alpha_band = self.band_selection_panel.alpha_selection.get()
band_num = 3
if len(self.image_reader.display_bands) == 3:
self.image_reader.display_bands.append(band_num)
if alpha_band == "None":
self.image_reader.display_bands = self.image_reader.display_bands[0:3]
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(alpha_band)
self.geotiff_image_panel.canvas.update_current_image()
if __name__ == '__main__':
root = tkinter.Tk()
app = GeotiffViewer(root)
root.mainloop()
| [
"j.casey@beamio.net"
] | j.casey@beamio.net |
de693df1430585e4e82e8e60b7a7241ff863692c | 20c979fc8a88dc893692c3d83c9907c928c78074 | /prog9.py | 0b026f3b1e73d7694c124def464e67c57bac49f8 | [] | no_license | ParulProgrammingHub/assignment-1-kheniparth1998 | 57edba326325af3b6dfbc6aea59e701ff5634d6c | 8c277dfb8c4a4cdf25ad7f1851d1247a6a3dc86d | refs/heads/master | 2021-01-19T09:14:42.309237 | 2017-02-15T17:24:07 | 2017-02-15T17:24:07 | 82,086,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | a=input("enter maximum marks for a subject: ")
total_marks=a*5.0
sub1=input("enter marks of subject 1: ")
sub2=input("enter marks of subject 2: ")
sub3=input("enter marks of subject 3: ")
sub4=input("enter marks of subject 4: ")
sub5=input("enter marks of subject 5: ")
obtain_marks=sub1+sub2+sub3+sub4+sub5
avg_marks=obtain_marks/5.0
percent=(obtain_marks*100)/total_marks
print "average is :",avg_marks
print "percentage is :",percent
if percent<35:
print "FAIL"
else:
print "PASS"
| [
"noreply@github.com"
] | noreply@github.com |
32648ab53d1309ef15e359a795f00a6b6da3d0cb | 651ff871b1568464f0f64f0b50da5c7e888c01b0 | /release_parser/etaj.py | 2ee43eb34d204eb7345ab456833e6afc8b732f95 | [] | no_license | kinoafisharu/kinoinfo | 1d42e5b736af2c37d1a1b42d9b60979ff38e4145 | 210477ff01d90a2bde25a9715cf7af4972dd8c21 | refs/heads/master | 2021-07-07T01:19:27.651052 | 2020-08-19T12:39:50 | 2020-08-19T12:39:50 | 146,737,229 | 0 | 4 | null | 2020-01-06T05:47:31 | 2018-08-30T11:02:22 | Python | UTF-8 | Python | false | false | 6,786 | py | #-*- coding: utf-8 -*-
import urllib
import urllib2
import re
import datetime
import time
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template.context import RequestContext
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.views.decorators.cache import never_cache
from django.conf import settings
from django.db.models import Q
from bs4 import BeautifulSoup
from base.models import *
from api.views import create_dump_file
from kinoinfo_folder.func import get_month, del_separator, del_screen_type, low
from release_parser.views import film_identification, xml_noffilm, get_ignored_films
from release_parser.kinobit_cmc import get_source_data, create_sfilm, get_all_source_films, unique_func, checking_obj, sfilm_clean
from decors import timer
from release_parser.func import cron_success
# ~1 min
@timer
def get_etaj_schedules():
ignored = get_ignored_films()
data_nof_film = ''
noffilms = []
city_name = 'Челябинск'
cinema_name = 'Этаж'
city_slug = low(del_separator(city_name))
cinema_slug = low(del_separator(cinema_name))
source = ImportSources.objects.get(url='http://etaj.mega74.ru/')
sfilm_clean(source)
films = {}
source_films = SourceFilms.objects.filter(source_obj=source)
for i in source_films:
films[i.source_id] = i
fdict = get_all_source_films(source, source_films)
schedules = get_source_data(source, 'schedule', 'list')
city = City.objects.get(name__name=city_name, name__status=1)
cinema = Cinema.objects.get(name__name=cinema_name, name__status=1, city=city)
city_obj, city_created = SourceCities.objects.get_or_create(
source_id = city_slug,
source_obj = source,
defaults = {
'source_id': city_slug,
'source_obj': source,
'city': city,
'name': city_name,
})
cinema_obj, cinema_created = SourceCinemas.objects.get_or_create(
source_id = cinema_slug,
source_obj = source,
defaults = {
'source_id': cinema_slug,
'source_obj': source,
'city': city_obj,
'cinema': cinema,
'name': cinema_name,
})
today = datetime.datetime.now().date().strftime("%Y%m%d")
dates = [today]
url = '%skino/list/' % source.url
req = urllib.urlopen(url)
if req.getcode() == 200:
data = BeautifulSoup(req.read())
show_days = data.find('div', id='kino-flags')
for a in show_days.findAll('a'):
day = a.get('href').replace('/kino/list/?day=', '')
dates.append(day)
for d in dates:
url = '%skino/list/?day=%s' % (source.url, d)
req = urllib.urlopen(url)
if req.getcode() == 200:
data = BeautifulSoup(req.read())
for div in data.findAll('div', {'class': 'film'}):
title = div.find('h3')
div_info = div.find('div', {'class': 'complete_info_title'})
film_id = div_info.findAll('a', limit=1)[0].get('href')
film_id = film_id.replace('#trailer_film_', '').encode('utf-8')
film_name = title.string.encode('utf-8').strip().replace('«','').replace('»','')
film_slug = del_screen_type(low(del_separator(film_name)))
if film_id not in noffilms and film_slug.decode('utf-8') not in ignored:
obj = films.get(film_id)
next_step = checking_obj(obj)
if next_step:
if obj:
kid = obj.kid
else:
kid, info = film_identification(film_slug, None, {}, {}, source=source)
objt = None
if kid:
create_new, objt = unique_func(fdict, kid, obj)
if create_new:
objt = create_sfilm(film_id, kid, source, film_name)
films[film_id] = objt
if not fdict.get(kid):
fdict[kid] = {'editor_rel': [], 'script_rel': []}
fdict[kid]['script_rel'].append(objt)
elif not obj:
data_nof_film += xml_noffilm(film_name, film_slug, None, None, film_id, info, None, source.id)
noffilms.append(film_id)
if objt:
div_sess = div.find('div', {'class': 'film_sessions_new gradient-091100EEE'})
for t in div_sess.findAll('span', {'class': 'time'}):
hours, minutes = t.string.split(':')
year = int(d[:4])
month = int(d[4:6])
day = int(d[6:8])
dtime = datetime.datetime(year, month, day, int(hours), int(minutes))
sch_id = '%s%s%s%s' % (dtime, cinema_slug, city_slug, film_id)
sch_id = sch_id.replace(' ', '').decode('utf-8')
if sch_id not in schedules:
SourceSchedules.objects.create(
source_id = sch_id,
source_obj = source,
film = objt,
cinema = cinema_obj,
dtime = dtime,
)
schedules.append(sch_id)
create_dump_file('%s_nof_film' % source.dump, settings.NOF_DUMP_PATH, '<data>%s</data>' % data_nof_film)
cron_success('html', source.dump, 'schedules', 'Сеансы')
@timer
def etaj_schedules_export_to_kinoafisha():
from release_parser.views import schedules_export
source = ImportSources.objects.get(url='http://etaj.mega74.ru/')
autors = (source.code, 0)
log = schedules_export(source, autors, False)
# запись лога в xml файл
create_dump_file('%s_export_to_kinoafisha_log' % source.dump, settings.LOG_DUMP_PATH, '<data>%s</data>' % log)
cron_success('export', source.dump, 'schedules', 'Сеансы')
| [
"root@mail.kinoinfo.ru"
] | root@mail.kinoinfo.ru |
9e9bad4bc3e7dda2714a737d0825c060b870f001 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/Darwin/python/lib/python3.7/pickle.py | bfa3c0361b73c6ba406beff72276e2433c78dfc2 | [
"Apache-2.0",
"Python-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 57,994 | py | """Create portable serialized representations of Python objects.
See module copyreg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
from types import FunctionType
from copyreg import dispatch_table
from copyreg import _extension_registry, _inverted_registry, _extension_cache
from itertools import islice
from functools import partial
import sys
from sys import maxsize
from struct import pack, unpack
import re
import io
import codecs
import _compat_pickle
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# Shortcut for use in isinstance testing
bytes_types = (bytes, bytearray)
# These are purely informational; no code uses these.
format_version = "4.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
"3.0", # Protocol 3
"4.0", # Protocol 4
] # Old format versions we can read
# This is the highest protocol number we know how to read.
HIGHEST_PROTOCOL = 4
# The protocol we write by default. May be less than HIGHEST_PROTOCOL.
# We intentionally write a protocol that Python 2.x cannot read;
# there are too many issues with that.
DEFAULT_PROTOCOL = 3
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = b'(' # push special markobject on stack
STOP = b'.' # every pickle ends with STOP
POP = b'0' # discard topmost stack item
POP_MARK = b'1' # discard stack top through topmost markobject
DUP = b'2' # duplicate top stack item
FLOAT = b'F' # push float object; decimal string argument
INT = b'I' # push integer or bool; decimal string argument
BININT = b'J' # push four-byte signed int
BININT1 = b'K' # push 1-byte unsigned int
LONG = b'L' # push long; decimal string argument
BININT2 = b'M' # push 2-byte unsigned int
NONE = b'N' # push None
PERSID = b'P' # push persistent object; id is taken from string arg
BINPERSID = b'Q' # " " " ; " " " " stack
REDUCE = b'R' # apply callable to argtuple, both on stack
STRING = b'S' # push string; NL-terminated string argument
BINSTRING = b'T' # push string; counted binary string argument
SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes
UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = b'X' # " " " ; counted UTF-8 string argument
APPEND = b'a' # append stack top to list below it
BUILD = b'b' # call __setstate__ or __dict__.update()
GLOBAL = b'c' # push self.find_class(modname, name); 2 string args
DICT = b'd' # build a dict from stack items
EMPTY_DICT = b'}' # push empty dict
APPENDS = b'e' # extend list on stack by topmost stack slice
GET = b'g' # push item from memo on stack; index is string arg
BINGET = b'h' # " " " " " " ; " " 1-byte arg
INST = b'i' # build & push class instance
LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg
LIST = b'l' # build list from topmost stack items
EMPTY_LIST = b']' # push empty list
OBJ = b'o' # build & push class instance
PUT = b'p' # store stack top in memo; index is string arg
BINPUT = b'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg
SETITEM = b's' # add key+value pair to dict
TUPLE = b't' # build tuple from topmost stack items
EMPTY_TUPLE = b')' # push empty tuple
SETITEMS = b'u' # modify dict by adding topmost key+value pairs
BINFLOAT = b'G' # push float; arg is 8-byte float encoding
TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = b'\x80' # identify pickle protocol
NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple
EXT1 = b'\x82' # push object from extension registry; 1-byte index
EXT2 = b'\x83' # ditto, but 2-byte index
EXT4 = b'\x84' # ditto, but 4-byte index
TUPLE1 = b'\x85' # build 1-tuple from stack top
TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items
TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items
NEWTRUE = b'\x88' # push True
NEWFALSE = b'\x89' # push False
LONG1 = b'\x8a' # push long from < 256 bytes
LONG4 = b'\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
# Protocol 3 (Python 3.x)
BINBYTES = b'B' # push bytes; counted binary string argument
SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes
# Protocol 4
SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes
BINUNICODE8 = b'\x8d' # push very long string
BINBYTES8 = b'\x8e' # push very long bytes string
EMPTY_SET = b'\x8f' # push empty set on the stack
ADDITEMS = b'\x90' # modify set by adding topmost stack items
FROZENSET = b'\x91' # build frozenset from topmost stack items
NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments
STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks
MEMOIZE = b'\x94' # store top of the stack in memo
FRAME = b'\x95' # indicate the beginning of a new frame
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)])
class _Framer:
_FRAME_SIZE_MIN = 4
_FRAME_SIZE_TARGET = 64 * 1024
def __init__(self, file_write):
self.file_write = file_write
self.current_frame = None
def start_framing(self):
self.current_frame = io.BytesIO()
def end_framing(self):
if self.current_frame and self.current_frame.tell() > 0:
self.commit_frame(force=True)
self.current_frame = None
def commit_frame(self, force=False):
if self.current_frame:
f = self.current_frame
if f.tell() >= self._FRAME_SIZE_TARGET or force:
data = f.getbuffer()
write = self.file_write
if len(data) >= self._FRAME_SIZE_MIN:
# Issue a single call to the write method of the underlying
# file object for the frame opcode with the size of the
# frame. The concatenation is expected to be less expensive
# than issuing an additional call to write.
write(FRAME + pack("<Q", len(data)))
# Issue a separate call to write to append the frame
# contents without concatenation to the above to avoid a
# memory copy.
write(data)
# Start the new frame with a new io.BytesIO instance so that
# the file object can have delayed access to the previous frame
# contents via an unreleased memoryview of the previous
# io.BytesIO instance.
self.current_frame = io.BytesIO()
def write(self, data):
if self.current_frame:
return self.current_frame.write(data)
else:
return self.file_write(data)
def write_large_bytes(self, header, payload):
write = self.file_write
if self.current_frame:
# Terminate the current frame and flush it to the file.
self.commit_frame(force=True)
# Perform direct write of the header and payload of the large binary
# object. Be careful not to concatenate the header and the payload
# prior to calling 'write' as we do not want to allocate a large
# temporary bytes object.
# We intentionally do not insert a protocol 4 frame opcode to make
# it possible to optimize file.read calls in the loader.
write(header)
write(payload)
class _Unframer:
def __init__(self, file_read, file_readline, file_tell=None):
self.file_read = file_read
self.file_readline = file_readline
self.current_frame = None
def read(self, n):
if self.current_frame:
data = self.current_frame.read(n)
if not data and n != 0:
self.current_frame = None
return self.file_read(n)
if len(data) < n:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_read(n)
def readline(self):
if self.current_frame:
data = self.current_frame.readline()
if not data:
self.current_frame = None
return self.file_readline()
if data[-1] != b'\n'[0]:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_readline()
def load_frame(self, frame_size):
if self.current_frame and self.current_frame.read() != b'':
raise UnpicklingError(
"beginning of a new frame before end of current frame")
self.current_frame = io.BytesIO(self.file_read(frame_size))
# Tools used for pickling.
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj)) from None
return obj, parent
def whichmodule(obj, name):
"""Find the module an object belong to."""
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for module_name, module in list(sys.modules.items()):
if module_name == '__main__' or module is None:
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return '__main__'
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0 is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0)
b''
>>> encode_long(255)
b'\xff\x00'
>>> encode_long(32767)
b'\xff\x7f'
>>> encode_long(-256)
b'\x00\xff'
>>> encode_long(-32768)
b'\x00\x80'
>>> encode_long(-128)
b'\x80'
>>> encode_long(127)
b'\x7f'
>>>
"""
if x == 0:
return b''
nbytes = (x.bit_length() >> 3) + 1
result = x.to_bytes(nbytes, byteorder='little', signed=True)
if x < 0 and nbytes > 1:
if result[-1] == 0xff and (result[-2] & 0x80) != 0:
result = result[:-1]
return result
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long(b'')
0
>>> decode_long(b"\xff\x00")
255
>>> decode_long(b"\xff\x7f")
32767
>>> decode_long(b"\x00\xff")
-256
>>> decode_long(b"\x00\x80")
-32768
>>> decode_long(b"\x80")
-128
>>> decode_long(b"\x7f")
127
"""
return int.from_bytes(data, byteorder='little', signed=True)
# Pickling machinery
class _Pickler:
def __init__(self, file, protocol=None, *, fix_imports=True):
"""This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2, 3 and 4. The
default protocol is 3; a backward-incompatible protocol designed
for Python 3.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The *file* argument must have a write() method that accepts a
single bytes argument. It can thus be a file object opened for
binary writing, an io.BytesIO instance, or any other custom
object that meets this interface.
If *fix_imports* is True and *protocol* is less than 3, pickle
will try to map the new Python 3 names to the old module names
used in Python 2, so that the pickle data stream is readable
with Python 2.
"""
if protocol is None:
protocol = DEFAULT_PROTOCOL
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
try:
self._file_write = file.write
except AttributeError:
raise TypeError("file must have a 'write' attribute")
self.framer = _Framer(self._file_write)
self.write = self.framer.write
self._write_large_bytes = self.framer.write_large_bytes
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
self.fix_imports = fix_imports and protocol < 3
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
# Check whether Pickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Pickler.dump().
if not hasattr(self, "_file_write"):
raise PicklingError("Pickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
if self.proto >= 2:
self.write(PROTO + pack("<B", self.proto))
if self.proto >= 4:
self.framer.start_framing()
self.save(obj)
self.write(STOP)
self.framer.end_framing()
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
idx = len(self.memo)
self.write(self.put(idx))
self.memo[id(obj)] = idx, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, idx):
if self.proto >= 4:
return MEMOIZE
elif self.bin:
if idx < 256:
return BINPUT + pack("<B", idx)
else:
return LONG_BINPUT + pack("<I", idx)
else:
return PUT + repr(idx).encode("ascii") + b'\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i):
if self.bin:
if i < 256:
return BINGET + pack("<B", i)
else:
return LONG_BINGET + pack("<I", i)
return GET + repr(i).encode("ascii") + b'\n'
def save(self, obj, save_persistent_id=True):
self.framer.commit_frame()
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid is not None and save_persistent_id:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x is not None:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f is not None:
f(self, obj) # Call unbound method with explicit self
return
# Check private dispatch table if any, or else copyreg.dispatch_table
reduce = getattr(self, 'dispatch_table', dispatch_table).get(t)
if reduce is not None:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = False
if issc:
self.save_global(obj)
return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce is not None:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if isinstance(rv, str):
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if not isinstance(rv, tuple):
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid, save_persistent_id=False)
self.write(BINPERSID)
else:
try:
self.write(PERSID + str(pid).encode("ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
def save_reduce(self, func, args, state=None, listitems=None,
dictitems=None, obj=None):
# This API is called by some subclasses
if not isinstance(args, tuple):
raise PicklingError("args from save_reduce() must be a tuple")
if not callable(func):
raise PicklingError("func from save_reduce() must be callable")
save = self.save
write = self.write
func_name = getattr(func, "__name__", "")
if self.proto >= 2 and func_name == "__newobj_ex__":
cls, args, kwargs = args
if not hasattr(cls, "__new__"):
raise PicklingError("args[0] from {} args has no __new__"
.format(func_name))
if obj is not None and cls is not obj.__class__:
raise PicklingError("args[0] from {} args has the wrong class"
.format(func_name))
if self.proto >= 4:
save(cls)
save(args)
save(kwargs)
write(NEWOBJ_EX)
else:
func = partial(cls.__new__, cls, *args, **kwargs)
save(func)
save(())
write(REDUCE)
elif self.proto >= 2 and func_name == "__newobj__":
# A __reduce__ implementation can direct protocol 2 or newer to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
if id(obj) in self.memo:
write(POP + self.get(self.memo[id(obj)][0]))
else:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[type(None)] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(NEWTRUE if obj else NEWFALSE)
else:
self.write(TRUE if obj else FALSE)
dispatch[bool] = save_bool
def save_long(self, obj):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + pack("<B", obj))
return
if obj <= 0xffff:
self.write(BININT2 + pack("<H", obj))
return
# Next check for 4-byte signed ints:
if -0x80000000 <= obj <= 0x7fffffff:
self.write(BININT + pack("<i", obj))
return
if self.proto >= 2:
encoded = encode_long(obj)
n = len(encoded)
if n < 256:
self.write(LONG1 + pack("<B", n) + encoded)
else:
self.write(LONG4 + pack("<i", n) + encoded)
return
if -0x80000000 <= obj <= 0x7fffffff:
self.write(INT + repr(obj).encode("ascii") + b'\n')
else:
self.write(LONG + repr(obj).encode("ascii") + b'L\n')
dispatch[int] = save_long
def save_float(self, obj):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj).encode("ascii") + b'\n')
dispatch[float] = save_float
def save_bytes(self, obj):
if self.proto < 3:
if not obj: # bytes object is empty
self.save_reduce(bytes, (), obj=obj)
else:
self.save_reduce(codecs.encode,
(str(obj, 'latin1'), 'latin1'), obj=obj)
return
n = len(obj)
if n <= 0xff:
self.write(SHORT_BINBYTES + pack("<B", n) + obj)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINBYTES8 + pack("<Q", n), obj)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINBYTES + pack("<I", n), obj)
else:
self.write(BINBYTES + pack("<I", n) + obj)
self.memoize(obj)
dispatch[bytes] = save_bytes
def save_str(self, obj):
if self.bin:
encoded = obj.encode('utf-8', 'surrogatepass')
n = len(encoded)
if n <= 0xff and self.proto >= 4:
self.write(SHORT_BINUNICODE + pack("<B", n) + encoded)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINUNICODE8 + pack("<Q", n), encoded)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINUNICODE + pack("<I", n), encoded)
else:
self.write(BINUNICODE + pack("<I", n) + encoded)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\0", "\\u0000")
obj = obj.replace("\n", "\\u000a")
obj = obj.replace("\r", "\\u000d")
obj = obj.replace("\x1a", "\\u001a") # EOF on DOS
self.write(UNICODE + obj.encode('raw-unicode-escape') +
b'\n')
self.memoize(obj)
dispatch[str] = save_str
def save_tuple(self, obj):
if not obj: # tuple is empty
if self.bin:
self.write(EMPTY_TUPLE)
else:
self.write(MARK + TUPLE)
return
n = len(obj)
save = self.save
memo = self.memo
if n <= 3 and self.proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
self.write(POP * n + get)
else:
self.write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write = self.write
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if self.bin:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
write(TUPLE)
self.memoize(obj)
dispatch[tuple] = save_tuple
def save_list(self, obj):
if self.bin:
self.write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
self.write(MARK + LIST)
self.memoize(obj)
self._batch_appends(obj)
dispatch[list] = save_list
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_dict(self, obj):
if self.bin:
self.write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
self.write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.items())
dispatch[dict] = save_dict
if PyStringMap is not None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_set(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(set, (list(obj),), obj=obj)
return
write(EMPTY_SET)
self.memoize(obj)
it = iter(obj)
while True:
batch = list(islice(it, self._BATCHSIZE))
n = len(batch)
if n > 0:
write(MARK)
for item in batch:
save(item)
write(ADDITEMS)
if n < self._BATCHSIZE:
return
dispatch[set] = save_set
def save_frozenset(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(frozenset, (list(obj),), obj=obj)
return
write(MARK)
for item in obj:
save(item)
if id(obj) in self.memo:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
write(POP_MARK + self.get(self.memo[id(obj)][0]))
return
write(FROZENSET)
self.memoize(obj)
dispatch[frozenset] = save_frozenset
def save_global(self, obj, name=None):
write = self.write
memo = self.memo
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = obj.__name__
module_name = whichmodule(obj, name)
try:
__import__(module_name, level=0)
module = sys.modules[module_name]
obj2, parent = _getattribute(module, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module_name, name)) from None
else:
if obj2 is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module_name, name))
if self.proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + pack("<B", code))
elif code <= 0xffff:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition('.')[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4:
self.save(module_name)
self.save(name)
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname))
elif self.proto >= 3:
write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
bytes(name, "utf-8") + b'\n')
else:
if self.fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(GLOBAL + bytes(module_name, "ascii") + b'\n' +
bytes(name, "ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto)) from None
self.memoize(obj)
def save_type(self, obj):
if obj is type(None):
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj is type(...):
return self.save_reduce(type, (...,), obj=obj)
return self.save_global(obj)
dispatch[FunctionType] = save_global
dispatch[type] = save_type
# Unpickling machinery
class _Unpickler:
def __init__(self, file, *, fix_imports=True,
encoding="ASCII", errors="strict"):
"""This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so
no proto argument is needed.
The argument *file* must have two methods, a read() method that
takes an integer argument, and a readline() method that requires
no arguments. Both methods should return bytes. Thus *file*
can be a binary file object opened for reading, an io.BytesIO
object, or any other custom object that meets this interface.
The file-like object must have two methods, a read() method
that takes an integer argument, and a readline() method that
requires no arguments. Both methods should return bytes.
Thus file-like object can be a binary file object opened for
reading, a BytesIO object, or any other custom object that
meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and
*errors*, which are used to control compatibility support for
pickle stream generated by Python 2. If *fix_imports* is True,
pickle will try to map the old Python 2 names to the new names
used in Python 3. The *encoding* and *errors* tell pickle how
to decode 8-bit string instances pickled by Python 2; these
default to 'ASCII' and 'strict', respectively. *encoding* can be
'bytes' to read theses 8-bit string instances as bytes objects.
"""
self._file_readline = file.readline
self._file_read = file.read
self.memo = {}
self.encoding = encoding
self.errors = errors
self.proto = 0
self.fix_imports = fix_imports
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# Check whether Unpickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Unpickler.dump().
if not hasattr(self, "_file_read"):
raise UnpicklingError("Unpickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
self._unframer = _Unframer(self._file_read, self._file_readline)
self.read = self._unframer.read
self.readline = self._unframer.readline
self.metastack = []
self.stack = []
self.append = self.stack.append
self.proto = 0
read = self.read
dispatch = self.dispatch
try:
while True:
key = read(1)
if not key:
raise EOFError
assert isinstance(key, bytes_types)
dispatch[key[0]](self)
except _Stop as stopinst:
return stopinst.value
# Return a list of items pushed in the stack after last MARK instruction.
def pop_mark(self):
items = self.stack
self.stack = self.metastack.pop()
self.append = self.stack.append
return items
def persistent_load(self, pid):
raise UnpicklingError("unsupported persistent id encountered")
dispatch = {}
def load_proto(self):
proto = self.read(1)[0]
if not 0 <= proto <= HIGHEST_PROTOCOL:
raise ValueError("unsupported pickle protocol: %d" % proto)
self.proto = proto
dispatch[PROTO[0]] = load_proto
def load_frame(self):
frame_size, = unpack('<Q', self.read(8))
if frame_size > sys.maxsize:
raise ValueError("frame size > sys.maxsize: %d" % frame_size)
self._unframer.load_frame(frame_size)
dispatch[FRAME[0]] = load_frame
def load_persid(self):
try:
pid = self.readline()[:-1].decode("ascii")
except UnicodeDecodeError:
raise UnpicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
self.append(self.persistent_load(pid))
dispatch[PERSID[0]] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID[0]] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE[0]] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE[0]] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE[0]] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
val = int(data, 0)
self.append(val)
dispatch[INT[0]] = load_int
def load_binint(self):
self.append(unpack('<i', self.read(4))[0])
dispatch[BININT[0]] = load_binint
def load_binint1(self):
self.append(self.read(1)[0])
dispatch[BININT1[0]] = load_binint1
def load_binint2(self):
self.append(unpack('<H', self.read(2))[0])
dispatch[BININT2[0]] = load_binint2
def load_long(self):
val = self.readline()[:-1]
if val and val[-1] == b'L'[0]:
val = val[:-1]
self.append(int(val, 0))
dispatch[LONG[0]] = load_long
def load_long1(self):
n = self.read(1)[0]
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG1[0]] = load_long1
def load_long4(self):
n, = unpack('<i', self.read(4))
if n < 0:
# Corrupt or hostile pickle -- we never write one like this
raise UnpicklingError("LONG pickle has negative byte count")
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG4[0]] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT[0]] = load_float
def load_binfloat(self):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT[0]] = load_binfloat
def _decode_string(self, value):
# Used to allow strings from Python 2 to be decoded either as
# bytes or Unicode strings. This should be used only with the
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
if self.encoding == "bytes":
return value
else:
return value.decode(self.encoding, self.errors)
def load_string(self):
data = self.readline()[:-1]
# Strip outermost quotes
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
data = data[1:-1]
else:
raise UnpicklingError("the STRING opcode argument must be quoted")
self.append(self._decode_string(codecs.escape_decode(data)[0]))
dispatch[STRING[0]] = load_string
def load_binstring(self):
# Deprecated BINSTRING uses signed 32-bit length
len, = unpack('<i', self.read(4))
if len < 0:
raise UnpicklingError("BINSTRING pickle has negative byte count")
data = self.read(len)
self.append(self._decode_string(data))
dispatch[BINSTRING[0]] = load_binstring
def load_binbytes(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINBYTES exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES[0]] = load_binbytes
def load_unicode(self):
self.append(str(self.readline()[:-1], 'raw-unicode-escape'))
dispatch[UNICODE[0]] = load_unicode
def load_binunicode(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINUNICODE exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE[0]] = load_binunicode
def load_binunicode8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINUNICODE8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE8[0]] = load_binunicode8
def load_binbytes8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINBYTES8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES8[0]] = load_binbytes8
def load_short_binstring(self):
len = self.read(1)[0]
data = self.read(len)
self.append(self._decode_string(data))
dispatch[SHORT_BINSTRING[0]] = load_short_binstring
def load_short_binbytes(self):
len = self.read(1)[0]
self.append(self.read(len))
dispatch[SHORT_BINBYTES[0]] = load_short_binbytes
def load_short_binunicode(self):
len = self.read(1)[0]
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode
def load_tuple(self):
items = self.pop_mark()
self.append(tuple(items))
dispatch[TUPLE[0]] = load_tuple
def load_empty_tuple(self):
self.append(())
dispatch[EMPTY_TUPLE[0]] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1[0]] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2[0]] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3[0]] = load_tuple3
def load_empty_list(self):
self.append([])
dispatch[EMPTY_LIST[0]] = load_empty_list
def load_empty_dictionary(self):
self.append({})
dispatch[EMPTY_DICT[0]] = load_empty_dictionary
def load_empty_set(self):
self.append(set())
dispatch[EMPTY_SET[0]] = load_empty_set
def load_frozenset(self):
items = self.pop_mark()
self.append(frozenset(items))
dispatch[FROZENSET[0]] = load_frozenset
def load_list(self):
items = self.pop_mark()
self.append(items)
dispatch[LIST[0]] = load_list
def load_dict(self):
items = self.pop_mark()
d = {items[i]: items[i+1]
for i in range(0, len(items), 2)}
self.append(d)
dispatch[DICT[0]] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, args):
if (args or not isinstance(klass, type) or
hasattr(klass, "__getinitargs__")):
try:
value = klass(*args)
except TypeError as err:
raise TypeError("in constructor for %s: %s" %
(klass.__name__, str(err)), sys.exc_info()[2])
else:
value = klass.__new__(klass)
self.append(value)
def load_inst(self):
module = self.readline()[:-1].decode("ascii")
name = self.readline()[:-1].decode("ascii")
klass = self.find_class(module, name)
self._instantiate(klass, self.pop_mark())
dispatch[INST[0]] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
args = self.pop_mark()
cls = args.pop(0)
self._instantiate(cls, args)
dispatch[OBJ[0]] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args)
self.append(obj)
dispatch[NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
dispatch[NEWOBJ_EX[0]] = load_newobj_ex
def load_global(self):
module = self.readline()[:-1].decode("utf-8")
name = self.readline()[:-1].decode("utf-8")
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL[0]] = load_global
def load_stack_global(self):
name = self.stack.pop()
module = self.stack.pop()
if type(name) is not str or type(module) is not str:
raise UnpicklingError("STACK_GLOBAL requires str")
self.append(self.find_class(module, name))
dispatch[STACK_GLOBAL[0]] = load_stack_global
def load_ext1(self):
code = self.read(1)[0]
self.get_extension(code)
dispatch[EXT1[0]] = load_ext1
def load_ext2(self):
code, = unpack('<H', self.read(2))
self.get_extension(code)
dispatch[EXT2[0]] = load_ext2
def load_ext4(self):
code, = unpack('<i', self.read(4))
self.get_extension(code)
dispatch[EXT4[0]] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
if code <= 0: # note that 0 is forbidden
# Corrupt or hostile pickle.
raise UnpicklingError("EXT specifies code <= 0")
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this.
if self.proto < 3 and self.fix_imports:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
if self.proto >= 4:
return _getattribute(sys.modules[module], name)[0]
else:
return getattr(sys.modules[module], name)
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
stack[-1] = func(*args)
dispatch[REDUCE[0]] = load_reduce
def load_pop(self):
if self.stack:
del self.stack[-1]
else:
self.pop_mark()
dispatch[POP[0]] = load_pop
def load_pop_mark(self):
self.pop_mark()
dispatch[POP_MARK[0]] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP[0]] = load_dup
def load_get(self):
i = int(self.readline()[:-1])
self.append(self.memo[i])
dispatch[GET[0]] = load_get
def load_binget(self):
i = self.read(1)[0]
self.append(self.memo[i])
dispatch[BINGET[0]] = load_binget
def load_long_binget(self):
i, = unpack('<I', self.read(4))
self.append(self.memo[i])
dispatch[LONG_BINGET[0]] = load_long_binget
def load_put(self):
i = int(self.readline()[:-1])
if i < 0:
raise ValueError("negative PUT argument")
self.memo[i] = self.stack[-1]
dispatch[PUT[0]] = load_put
def load_binput(self):
i = self.read(1)[0]
if i < 0:
raise ValueError("negative BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[BINPUT[0]] = load_binput
def load_long_binput(self):
i, = unpack('<I', self.read(4))
if i > maxsize:
raise ValueError("negative LONG_BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[LONG_BINPUT[0]] = load_long_binput
def load_memoize(self):
memo = self.memo
memo[len(memo)] = self.stack[-1]
dispatch[MEMOIZE[0]] = load_memoize
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND[0]] = load_append
def load_appends(self):
items = self.pop_mark()
list_obj = self.stack[-1]
try:
extend = list_obj.extend
except AttributeError:
pass
else:
extend(items)
return
# Even if the PEP 307 requires extend() and append() methods,
# fall back on append() if the object has no extend() method
# for backward compatibility.
append = list_obj.append
for item in items:
append(item)
dispatch[APPENDS[0]] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM[0]] = load_setitem
def load_setitems(self):
items = self.pop_mark()
dict = self.stack[-1]
for i in range(0, len(items), 2):
dict[items[i]] = items[i + 1]
dispatch[SETITEMS[0]] = load_setitems
def load_additems(self):
items = self.pop_mark()
set_obj = self.stack[-1]
if isinstance(set_obj, set):
set_obj.update(items)
else:
add = set_obj.add
for item in items:
add(item)
dispatch[ADDITEMS[0]] = load_additems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate is not None:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
inst_dict = inst.__dict__
intern = sys.intern
for k, v in state.items():
if type(k) is str:
inst_dict[intern(k)] = v
else:
inst_dict[k] = v
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD[0]] = load_build
def load_mark(self):
self.metastack.append(self.stack)
self.stack = []
self.append = self.stack.append
dispatch[MARK[0]] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP[0]] = load_stop
# Shorthands
def _dump(obj, file, protocol=None, *, fix_imports=True):
_Pickler(file, protocol, fix_imports=fix_imports).dump(obj)
def _dumps(obj, protocol=None, *, fix_imports=True):
f = io.BytesIO()
_Pickler(f, protocol, fix_imports=fix_imports).dump(obj)
res = f.getvalue()
assert isinstance(res, bytes_types)
return res
def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict"):
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict"):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(s)
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
# Use the faster _pickle if possible
try:
from _pickle import (
PickleError,
PicklingError,
UnpicklingError,
Pickler,
Unpickler,
dump,
dumps,
load,
loads
)
except ImportError:
Pickler, Unpickler = _Pickler, _Unpickler
dump, dumps, load, loads = _dump, _dumps, _load, _loads
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='display contents of the pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
if not args.pickle_file:
parser.print_help()
else:
import pprint
for f in args.pickle_file:
obj = load(f)
pprint.pprint(obj)
| [
"jczhang@bouffalolab.com"
] | jczhang@bouffalolab.com |
708b31742d5ab2a45e7e77695a4833376b7ccbe7 | e74c6dd0063baa70756c4243e18ffc848059fc29 | /scripts/po_summator.py | 0e68b0c6a04b0dff8785d1060932365189783bc6 | [] | no_license | Alakeyska/ros_polynom_pkg | a89ff475798a5d727ddeaeb1f7583eef803e2069 | 7e7ec371856e1017e9dd72b9c8fd182e9763077b | refs/heads/master | 2023-08-16T00:58:16.292603 | 2021-10-08T14:23:29 | 2021-10-08T14:23:29 | 414,976,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python3
from study_pkg.srv import PolyArray, PolyArrayResponse
import rospy
from std_msgs.msg import String
from study_pkg.msg import Array_int64
#тело программы
def start_summator(req):
msg = String()
result = 0
for i in range(len(req.elements)):
result = result + (req.elements[i] ** (len(req.elements) - i))
rospy.loginfo('summator is summing: %s' % result)
msg.data = str(result)
pub.publish(msg)
rospy.init_node('summator')
pub = rospy.Publisher('from_summator', String, queue_size=10)
rospy.Subscriber('to_summator', Array_int64, start_summator, queue_size=10)
rospy.spin()
| [
"Salamakhin67@gmail.com"
] | Salamakhin67@gmail.com |
63e1be1fd76f8448e267454e3deb19a10457c6f3 | b13deada3a24488424fd144a85912b70cf9db2b8 | /Homework 4/srravi_HW4_423.py | 77e697e89318b2457278006631ab7a1213022569 | [] | no_license | sravi97/I210_Information_Infrastructure1 | 33dfc14b3776ca8521098fa687aa959a2b84af9a | 2035fa78eec647c573d96c6f17e15b5233d93b88 | refs/heads/main | 2023-07-04T17:55:26.442289 | 2021-08-24T02:19:34 | 2021-08-24T02:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | #get input from user
sentence = input("Enter a sentence: ")
total = 0
def average():
#splits sentence
words = sentence.split()
for word in words:
total += len(word)
return (total*1/len(list))
print(total/len(list))
| [
"noreply@github.com"
] | noreply@github.com |
d7570072e9d14e0be8339fba58342db421aac083 | 5a18e2da3a1449a18221fa5bac3222344c91b7f5 | /blog/models.py | fa722fe200b60df1591be762c1769c284eb4080a | [] | no_license | duonghao314/dh314blog | d88c88d8edc70f885e5416430a5b9901678b2e97 | c1ebf35c4f1f2bf47fb3b927f7063ee1ca3c682e | refs/heads/master | 2020-05-27T09:57:19.849920 | 2019-06-03T09:40:36 | 2019-06-03T09:40:36 | 188,573,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import os
from uuid import uuid4
import time
from django.db import models
from django.contrib.auth.models import User
from Controller import Functions
from tinymce import HTMLField
# Create your models here.
class Category(models.Model):
catName = models.CharField(max_length=20, null=False)
catKeyWord = models.CharField(max_length=10, null=False)
catViews = models.IntegerField(default=0)
def __str__(self):
return self.catName
class Article(models.Model):
artName = models.CharField(max_length=200, null=False)
artAuthor = models.ForeignKey(User, on_delete=models.CASCADE)
artContent = HTMLField('Content')
artViews = models.IntegerField(default=0)
artDate = models.DateTimeField(auto_now=True)
artCat = models.ForeignKey(Category, verbose_name='catName', on_delete=models.CASCADE)
artImg = models.ImageField(upload_to=Functions.path_and_rename('upload/here/{}'.format(time.strftime("%Y/%m/%d"))))
artUrl = models.CharField(max_length=255, null=False, default='null')
def save(self, *args, **kwargs):
if self.artUrl == 'null':
self.artUrl = Functions.splitTitle(self.artName)
super().save(*args, **kwargs)
else:
super().save(*args, **kwargs)
| [
"48150245+duonghao314@users.noreply.github.com"
] | 48150245+duonghao314@users.noreply.github.com |
f9ee022b9c5c737fd15560ef5f84ed35241375fa | 443a107abf3b808b573e68d3262f0f221c0de26f | /models/bot_dialog_api/user.py | 6ca4bd41e9c03c7a93fcc85c59023b9b24387e7c | [] | no_license | leileixiao/PaddlePaddle-Knover | 844a5879ea328a2030b24e921c3017e9455c62cf | f13313ed29db36645e366ef5f326470c1fc61640 | refs/heads/main | 2023-01-23T15:15:40.590966 | 2020-12-10T07:24:56 | 2020-12-10T07:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | """
定义用户资料
"""
class User:
def __init__(self, user_id):
self.user_id = user_id
self.session_id = ''
self._history = []
self.history = ''
self.MAX_TURN = 7
def get_service_id(self, session_id):
self.session_id = session_id
def update_history(self, text):
self._history.append(text)
self._history = self._history[-self.MAX_TURN*2-1:]
self.history = ','.join(["\""+sent+"\"" for sent in self._history])
def start_new_dialog(self):
self.session_id = ''
self._history = []
self.history = ''
def change_max_turn(self, max_turn):
self.MAX_TURN = max_turn
| [
"623320480@qq.com"
] | 623320480@qq.com |
436a889abd6ef47d828bd057761c08b866dde2ec | 0f8362783581e55d46c91755eabc88ff7fd3d66e | /src/test/ingestDataFromKgs.py | 0d358a74a6595b396dae732faa0d7f2994b0cf67 | [] | no_license | mervynn/CS523-BDT | 9464c403905931590015aba8910677c3dfaf2dc4 | b83ccff8f3dd350a038ba727718e272d98ead223 | refs/heads/master | 2021-04-15T13:05:12.572654 | 2018-03-24T08:29:33 | 2018-03-24T08:29:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | import sgf
import os
for fn in os.listdir('.'):
if os.path.isfile(fn):
print (fn)
with open("/Users/hemingwei/Documents/MUM/BDT/Project/kgs-19-2018-02-new/2018-02-01-1.sgf") as f:
collection = sgf.parse(f.read())
for gameTree in collection:
cnt = 0
for node in gameTree:
print(cnt)
for key, values in sorted(node.properties.items()):
print key
for value in values:
print("[%s]" % value)
cnt += 1
| [
"Mingwei@hemingweideMacBook-Pro-6.local"
] | Mingwei@hemingweideMacBook-Pro-6.local |
5fe3ccaa919261885943ed8056e9b804f3b502cf | d007f1d42ecef761c250a4490f2e7a5ec274dba2 | /main.py | 29c11d4627f6a748f2a5bb621205703dd5947bef | [] | no_license | sam2403/vHackAPI-Python | d6b5c7b8d53f2bdef5acae67e203550bb1fb7ad0 | 9eb58c9a0ce44c2762dd0e825df0f99e2e48bebb | refs/heads/master | 2020-05-20T17:44:09.502191 | 2017-03-09T21:09:25 | 2017-03-09T21:09:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | #!/usr/bin/python2.7
#-*- coding: utf-8
from classes import API
from classes import IP
from console import Console
from update import Update
from utils import Utils
from botnet import Botnet
from random import randrange, uniform
from collections import Counter
import time
import json
def count_keys(mydict):
for key, value in mydict:
if isinstance(value, Mapping):
for item in count_keys(value):
yield 1
yield 1
# Enter username and password
api = API("username","password")
# Enter Max Antivir to attack in normal mode
maxanti_normal = 1500
# Enter Max Antivir to attack tournament
maxanti_tournament = 1900
# Enter Amount of Attacks normal
attacks_normal = 30
# Enter Amount of Attacks in tournament
attacks_tournament = 100
# Enter Updates (inet, hdd, cpu, ram, fw, av, sdk, ipsp, spam, scan, adw)
#updates = ["ipsp", "adw", "fw", "scan", "sdk", "av"]
updates = ["ipsp", "scan", "sdk", "av"]
#updates = ["ipsp", "sdk"]
#Do you want to attack during tournament [True, False]
joinTournament = True
#Time to wait between each cycle in seconds
wait = round(uniform(0,1), 2)
wait_load = round(uniform(1,5), 2)
c = Console(api)
u = Update(api)
b = Botnet(api)
updatecount = 0
attackneeded = False
while True:
attackneeded = False
stat = "0"
while "0" in stat:
stat = u.startTask(updates[updatecount])
if "0" in stat:
print "updating " + updates[updatecount] + " level +1"
#print "Started Update
print "Waiting... in update"
#u.useBooster()
time.sleep(wait_load)
updatecount += 1
if updatecount == 14:
while updatecount > 0:
print(u.getTasks())
#u.useBooster()
if updatecount:
pass
#u.finishAll()
if updatecount >= len(updates):
updatecount = 0
elif "1" in stat:
attackneeded = True
if joinTournament:
if c.getTournament():
attackneeded = True
if attackneeded == False:
wait_load = round(uniform(1,5), 2)
try:
usebooster = u.getTasks()
json_data = json.loads(usebooster)
except ValueError:
print "Connexion Error try again..."
pass
try:
while len(json_data["data"]) > 1:
if int(json_data["boost"]) > 5:
u.useBooster()
print "Use the booster in rest " + str(int(json_data["boost"])-1)
# UPDATE Value
else:
print "you are < 5 boost."
break
usebooster = u.getTasks()
json_data = json.loads(usebooster)
except KeyError:
pass
except TypeError:
pass
if b.attackable():
print "Attacking with Botnet"
attackbot = b.attackall()
print attackbot
if attackneeded:
c.attack(attacks_tournament, maxanti_tournament, wait)
wait = round(uniform(0,1), 2)
else:
print "Waiting... in normal " + str(wait) + "s"
attackneeded = True
if attackneeded:
c.attack(attacks_normal, maxanti_normal, wait)
attackneeded = False
#wait_load = round(uniform(1,5), 2)
| [
"noreply@github.com"
] | noreply@github.com |
6c32637f146447fc95c3941386d8534c7c68f874 | 73d9e70adfbc6043ecdb8de2ea1b2339007ea5e9 | /tests/features/stdin_input_steps.py | 3256a36e7707594433db8cc6b255a844a6491819 | [
"Apache-2.0"
] | permissive | cheesinglee/bigmler | e147df8d98bcc0624b325fccf381577e74e62b1e | cda58f6149e211897c931300083c6b1b3686ff11 | refs/heads/master | 2020-04-06T07:01:11.195760 | 2015-02-12T23:14:31 | 2015-02-12T23:14:31 | 20,578,762 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import os
import time
import csv
import json
from lettuce import step, world
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource
from bigmler.checkpoint import file_number_of_lines
from common_steps import check_debug
from basic_test_prediction_steps import shell_execute
@step(r'I create BigML resources uploading train "(.*)" file to test "(.*)" read from stdin and log predictions in "(.*)"$')
def i_create_all_resources_to_test_from_stdin(step, data=None, test=None, output=None):
if data is None or test is None or output is None:
assert False
command = ("cat " + test + "|bigmler --train " + data +
" --test --store --output " + output + " --max-batch-models 1")
shell_execute(command, output, test=test)
@step(r'I create a BigML source from stdin using train "(.*)" file and logging in "(.*)"$')
def i_create_source_from_stdin(step, data=None, output_dir=None):
if data is None or output_dir is None:
assert False
command = ("cat " + data + "|bigmler --train " +
"--store --no-dataset --no-model --output-dir " +
output_dir + " --max-batch-models 1")
shell_execute(command, output_dir + "/test", test=None)
| [
"merce@bigml.com"
] | merce@bigml.com |
6cc57c66fb302e40e515a5ccdd279bb4d4c7d0e1 | 06324b7f931f3393e30e93900eb4a3b74ac19f70 | /src/utils/migrations/0001_create_superuser.py | ce61fa7f77c6f6748492ec390c785ec620ec95ce | [] | no_license | kliyes/django_project_template_docker | 277b3f22a63f11ea7108bcee77207174e457475d | 81b2e3f09f5b317f14e780949d14fea394534061 | refs/heads/master | 2021-01-20T09:02:09.003957 | 2018-07-19T02:39:37 | 2018-07-19T02:39:37 | 90,213,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.hashers import make_password
from django.db import migrations
def add_admin(apps, schema_editor):
"""
Add a default superuser: admin/admin when migrate
"""
User = apps.get_model("auth", "User")
User.objects.update_or_create(
username="admin", defaults={
"password": make_password("admin"),
"is_superuser": True,
"is_staff": True
})
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.RunPython(add_admin)
]
| [
"tom@kliyes.com"
] | tom@kliyes.com |
5e77a1ffb92b68337f40c5a963671ef0ed55ef31 | a0b9361f4a17d5b5fdbacd45d05bef9a8f35463a | /Deber/progra1.py | bef79cd9e86161edb59027f6c245fdea03342960 | [] | no_license | marbelyveroy/Sumativa4 | a589aa66799683b04ce13edad15e5e9e21b41804 | 847060781969beb2e0df14be1cfea7f8cdda16e1 | refs/heads/main | 2022-12-31T03:27:32.278091 | 2020-10-17T04:20:05 | 2020-10-17T04:20:05 | 304,794,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,949 | py | print('\t\t ESCUELA POLITECNCA NACIONAL')
print('INTEGRANTES:')
print('\t\t Edison Osorio')
print('\t\t Micha Cardenas')
print("\t\t Stalin Maza")
import sys
import math
#FUNCIONES DE LAS FIGURAS
#crear txt triangulo, cuadrado
def crear_E():
archT=open('TRIANGULO.txt','w')
archC=open('CUADRADO.txt','w')
archO=open('OCTAGONO.txt','w')
archO.close()
archT.close()
archC.close()
def triangulo():
archT=open('TRIANGULO.txt','a')
print('\tTRIANGULO')
lado=int(input('Ingrese la longitud del lado (base):\n'))
altura=int(input('Ingrese la altura:\n'))
area=(lado*altura)/2
perimetro=lado*3
print('El area del triangulo es: ',area)
print('El perímetro del triángulo es: ',perimetro)
#grabar los resultados en el txt
archT.write(str(area))
archT.write('\n')
archT.write(str(perimetro))
archT.close()
def cuadrado():
archC=open('CUADRADO.txt','a')
print('\tCUADRADO')
lado=int(input('Ingrese la longitud del lado: \n'))
area=lado*lado
perimetro=lado*4
print('El area del cuadrado es: ',area)
print('El perímetro del cuadrado es: ',perimetro)
#grabar los resultaos en txt
archC.write(str(area))
archC.write('\n')
archC.write(str(perimetro))
archC.close()
def pentagonoR():
print("\tPENTAGONO REGULAR")
lado = int(input('Ingrese la longitud del lado: \n'))
temp = perimetroF(5,lado,"Pentagono")
areaF(temp,apotemaF(lado,5),"Pentagono")#calcula el area
creartxt("PENTAGONO_REGULAR")
grabartxt(temp,areaF,"PENTAGONO_REGULAR")
def hexagonoR():
print("\tHEXAGONO REGULAR")
lado = int(input('Ingrese la longitud del lado: \n'))
temp = perimetroF(6,lado,"Hexagono")
areaF(temp,apotemaF(lado,6),"Hexagono")#calcula el area
creartxt("hexagono_regular")
grabartxt(temp,areaF,"hexagono_regular")
def heptagonoR():
print("\tHEPTAGONO REGULAR")
lado = int(input('Ingrese la longitud del lado: \n'))
temp = perimetroF(7,lado,"Heptagono")
areaF(temp,apotemaF(lado,7),"Heptagono")#calcula el area
#crear el txt de la figura
creartxt("HEPTAGONO")
#guardamos en el txt
grabartxt(temp,areaF,"HEPTAGONO")
def octagono():
archO=open('OCTAGONO.txt','a')
print("\t OCTAGONO REGULAR")
#se puede calcular mediante el ángulo centra lo cual no sirve para sacar el apotema
angulo_central=360/8
#ingresamos el lado
lado=int(input ("Ingrese el lado del octagono regular"))
#calculo del perimetro n *l
perimetro=8*lado
apotem=lado/(2*math.tan(angulo_central/2))
area=lado= 4*lado*apotem
print("el perimetro del octagono regular es",perimetro)
print (" el apotema es ",apotem)
print("el area es ",area)
archO.write(str(area))
archO.write('\n')
archO.write(str(perimetro))
archO.close()
def eneagono():
lado=int(input ("Ingrese el lado del eneagono regular"))
perimetro=lado*9
#Formula sacar el area de una figura de 9 lados
area=9*(lado*lado)/(4 * math.tan(180/2))
print("el perimetro del eneagono regular es",perimetro)
print("el area es ",area)
#crear el txt de la figura
creartxt("ENEAGONO")
#guardamos en el txt
grabartxt(perimetro,area,"ENEAGONO")
def decagono ():
lado=int(input ("Ingrese el lado del decagono regular"))
perimetro=lado*10
#Formula sacar el area del decagono
area=10*(lado*lado)/(4 * math.tan(180/10))
print("el perimetro del decagonno regular es",perimetro)
print("el area es ",area)
#crear el txt de la figura
creartxt("DECAGONO")
#guardamos en el txt
grabartxt(perimetro,area,"DECAGONO")
#OPERACIONES MATEMATICAS
def perimetroF(NumL,LongL,nombre):
per = NumL * LongL #calcula el perimetro
print("El perímetro del ",nombre," es: ",per)
return per
def areaF(per,apotema,nombre):
areaT = (per*apotema)/2 #calcula el area
print("El area del ",nombre," es: ",areaT)
return areaT
def apotemaF(lado,n):
conversion = math.radians(360/n)
tangente = math.tan(conversion/2)
apot = lado/(2*tangente)
return apot
#FUNCIONES PARA CREAR Y GRABAR TXTS
def creartxt(nombre):
name = nombre + ".txt"
archi = open(name,"w")
archi.close()
def grabartxt(perimetro,area,nombre):
name = nombre + ".txt"
print(perimetro)
print(area)
a = str(perimetro)
b = str(area)
archi=open(name,"a")
archi.write("El perimetro es: " + a +"\n")
archi.write("El area es: " + b + "\n")
archi.close()
#MENU Y SWITCH
def switch(NumLados):
if NumLados=='3':
triangulo()
repetir()
elif NumLados=='4':
cuadrado()
repetir()
elif NumLados == '5': #aqui realizamos las opciones del switch de acuerdo a lo que escoga
pentagonoR() #a lo que escoga el usuario.
repetir()
elif NumLados == '6':
hexagonoR()
repetir()
elif NumLados == '7':
heptagonoR()
repetir()
elif NumLados == '8':
octagono()
repetir()
elif NumLados == '9' :
eneagono()
repetir()
elif NumLados =='10':
decagono()
repetir()
else:
print("¡¡ERROR!!..NUMERO DE LADOS DEBE ESTAR EN RANGO DE 3 A 10")
menu()
def menu():
NumLados =input('INGRESE EL # DE LADOS\n')
switch(NumLados) #Este es el que recibe el numero de lados
def repetir():
escoger = input("Ingrese S si desea continuar o N si desea salir\n")
while escoger == "S" or escoger == "s":
menu() #aqui damos la opcion al usuario de si desea continuar en el programa
print ("Programa Terminado")
sys.exit()
def main():
#llamar a la funcion crear txt
crear_E()
#creartxt("juanito")
menu() #llamamos a la funcion del menu
main()
| [
"36525306+marbelyveroy@users.noreply.github.com"
] | 36525306+marbelyveroy@users.noreply.github.com |
7c3a5292dbdf6072cb25a109cfdd13c7983d7548 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/common/types/click_location.py | 89822bb622bd68c49742d2af6245686e64e28b2b | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.common',
marshal='google.ads.googleads.v7',
manifest={
'ClickLocation',
},
)
class ClickLocation(proto.Message):
r"""Location criteria associated with a click.
Attributes:
city (str):
The city location criterion associated with
the impression.
country (str):
The country location criterion associated
with the impression.
metro (str):
The metro location criterion associated with
the impression.
most_specific (str):
The most specific location criterion
associated with the impression.
region (str):
The region location criterion associated with
the impression.
"""
city = proto.Field(
proto.STRING,
number=6,
optional=True,
)
country = proto.Field(
proto.STRING,
number=7,
optional=True,
)
metro = proto.Field(
proto.STRING,
number=8,
optional=True,
)
most_specific = proto.Field(
proto.STRING,
number=9,
optional=True,
)
region = proto.Field(
proto.STRING,
number=10,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
60be40282c9e9385dc8ff890f71905497b7339d9 | 81bfeed1f323557ef1f5d6f283fd943f53de9f85 | /Python/Array/array_rotation.py | 478b22cacd82f1615226849db7230a57a32ca9fe | [] | no_license | simarpreet96/python_basic | f89d4db4e0a52ef61c79cd680888364e9fd736ac | a3512880d3a1e45376345a6fe5ac8afb0da063b1 | refs/heads/master | 2020-11-26T22:05:35.053591 | 2019-12-20T07:38:35 | 2019-12-20T07:38:35 | 229,214,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | #Function to left rotate arr[] of size n by d*/
def leftRotate(arr, d, n):
for i in range(d):
leftRotatebyOne(arr, n)
#Function to left Rotate arr[] of size n by 1*/
def leftRotatebyOne(arr, n):
temp = arr[0]
for i in range(n-1):
arr[i] = arr[i+1]
arr[n-1] = temp
# utility function to print an array */
def printArray(arr,size):
for i in range(size):
print ("%d"% arr[i],end=" ")
# Driver program to test above functions */
arr = [1, 2, 3, 4, 5, 6, 7]
leftRotate(arr, 2, 7)
printArray(arr, 7)
| [
"noreply@github.com"
] | noreply@github.com |
83bb0248c83c6338ef2713a3a5daf256ea2797db | e979202a53a7f14bb2d9c6b9b4752ffd78021d69 | /scripts/obj2gif.py | f2559d9b644ccf472b215bf8de85028a0d2959b5 | [] | no_license | dysdsyd/GCNN-A | 39198ced28bc3040a4514b936999ee89f62043e9 | 54c9973015c72c822390dbd3795bd71cea41cd70 | refs/heads/master | 2022-09-05T09:07:20.842792 | 2020-05-26T02:08:47 | 2020-05-26T02:08:47 | 253,641,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,939 | py | import argparse
import logging
import os,sys
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from typing import Type
import random
from tqdm import tqdm
import pdb
import torch
import pickle
import pandas as pd
import numpy as np
import os
import yaml
import re
from torch import nn, optim
from torch.utils.data import DataLoader
#from pytorch3d.structures import Textures
from pytorch3d.utils import ico_sphere
from pytorch3d.ops import sample_points_from_meshes
from pytorch3d.io import load_obj, save_obj
# from pytorch3d.io import load_objs_as_meshes
from pytorch3d.loss import mesh_laplacian_smoothing
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation #import FuncAnimation
from matplotlib.animation import FuncAnimation
from PIL import Image
import numpy as np
# Data structures and functions for mesh rendering
from pytorch3d.structures import Meshes
from pytorch3d.structures import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
TexturedSoftPhongShader,
HardPhongShader, HardFlatShader
)
from pytorch3d.renderer.mesh.texturing import interpolate_texture_map, interpolate_vertex_colors
from pytorch3d.renderer.blending import (
BlendParams,
hard_rgb_blend,
sigmoid_alpha_blend,
softmax_rgb_blend,
)
mpl.rcParams['savefig.dpi'] = 80
mpl.rcParams['figure.dpi'] = 80
import warnings
warnings.filterwarnings("ignore")
class SimpleShader(nn.Module):
def __init__(self, device="cpu"):
super().__init__()
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
#pdb.set_trace()
pixel_colors = interpolate_vertex_colors(fragments, meshes)
images = hard_rgb_blend(pixel_colors, fragments)
return images # (N, H, W, 3) RGBA image
def render_mesh(mesh, elevation, dist_, batch_size, device, imageSize):
# Initialize an OpenGL perspective camera.
# With world coordinates +Y up, +X left and +Z in, the front of the cow is facing the -Z direction.
# So we move the camera by 180 in the azimuth direction so it is facing the front of the cow.
#R, T = look_at_view_transform(150.0, 1.0, 180)
#dd = camera_loc[0]
#el = camera_loc[1]
#az = camera_loc[2]
#batch_size=50
#
meshes = mesh.extend(batch_size)
#
#el = torch.linspace(0, 180, batch_size)
az = torch.linspace(-180, 180, batch_size)
R, T = look_at_view_transform(dist=dist_, elev=elevation, azim=az)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
#
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=imageSize,
blur_radius=0.0,
faces_per_pixel=1,
bin_size = None, # this setting controls whether naive or coarse-to-fine rasterization is used
max_faces_per_bin = None # this setting is for coarse rasterization
)
#
# Place a point light in front of the object. As mentioned above, the front of the cow is facing the -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -5.0]])#, [0.0, 0.0, 5.0], [0.0, -5.0, 0.0], [0.0, 5.0, 0.0]])
# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
# interpolate the texture uv coordinates for each vertex, sample from a texture image and
# apply the Phong lighting model
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings),
shader=HardPhongShader(
device=device,
cameras=cameras,
lights=lights
))
## render images
images = renderer(meshes)
return images
def images2gif(image_list, filepath, descr):
image_list[0].save(filepath+descr+'.gif', save_all=True, append_images=image_list, optimize=False, duration=400, loop=0)
def render_main(starting_mesh_path, camera_elevation, camera_rdistance, batch_size, image_size, output_filename):
## Set the device
device = torch.device("cuda:0")
#device = torch.device("cpu")
#
verts, faces, aux=load_obj(starting_mesh_path)
faces_idx = faces.verts_idx.to(device)
#pdb.set_trace()
gverts = verts.to(device)
gverts.requires_grad=True
src_mesh = Meshes(verts=[gverts], faces=[faces_idx])
# print('\n ***************** Rendering Mesh as gif *****************')
## render as mesh
num_verts = verts.shape[0]
verts_rgb_colors = 128*torch.ones([1, num_verts, 3]).to(device)
textured_mesh = Meshes(verts=[verts.to(device)], faces=[faces_idx.to(device)], textures=Textures(verts_rgb=verts_rgb_colors))
#pdb.set_trace()
all_images = render_mesh(textured_mesh, camera_elevation, camera_rdistance, batch_size, device, image_size)
all_images_ = [Image.fromarray(np.uint8(img.detach().cpu().squeeze().numpy())) for img in all_images]
#pdb.set_trace()
filepath = os.path.join(output_filename, os.path.splitext(os.path.split(starting_mesh_path)[1])[0])
descr=''
images2gif(all_images_, filepath, descr)
if __name__ == "__main__":
## settings
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_filename', type=str)
parser.add_argument('-wm', '--which_starting_mesh', type=str, default='sphere')
parser.add_argument('-bs', '--batch_size', type=float, default=30)
parser.add_argument('-ims', '--image_size', type=int, default=512)
parser.add_argument('-camD', '--camera_rdistance', type=float, default=10, help='Radial distance of camera from origin')
parser.add_argument('-camEl', '--camera_elevation', type=float, default=45, help='degree Elevation of camera from origin')
#parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
starting_mesh_path = args.which_starting_mesh
camera_elevation = args.camera_elevation
camera_rdistance = args. camera_rdistance
batch_size = args.batch_size
image_size = args.image_size
output_filename = args.output_filename
render_main(starting_mesh_path, camera_elevation, camera_rdistance, batch_size, image_size, output_filename)
| [
"dasyed@umich.edu"
] | dasyed@umich.edu |
750e266256ed3262b2f2e784c4d1fb360f035fff | bfd75153048a243b763614cf01f29f5c43f7e8c9 | /1906101115-江汪霖/day0303/text01.py | cb50f07ed61df13fd4cf475b6b316412349dfbbf | [] | no_license | gschen/sctu-ds-2020 | d2c75c78f620c9246d35df262529aa4258ef5787 | e1fd0226b856537ec653c468c0fbfc46f43980bf | refs/heads/master | 2021-01-01T11:06:06.170475 | 2020-07-16T03:12:13 | 2020-07-16T03:12:13 | 239,245,834 | 17 | 10 | null | 2020-04-18T13:46:24 | 2020-02-09T04:22:05 | Python | UTF-8 | Python | false | false | 116 | py | class Myclass:
i = 123456
def f(self,ostr):
return(ostr)
x = Myclass()
print(x.i,x.f("hello,word")) | [
"917563885@qq.com"
] | 917563885@qq.com |
87599bfddfe13da2e25c0413d1cfdb18b0155c2a | eae6eabfaa42015059b732670c3e56763273b05f | /ПідключеннядодатковихмодулівЗадача3.py | 617d1d9fd84ef7f2423294c28566da5f8bd0e2aa | [] | no_license | lazarch/LessonsMarichka | e8180adf259e16012568cd76c0534d87560f096e | 466b6c87c0919de64084c90529896ab6d3f974e2 | refs/heads/main | 2023-03-06T03:19:55.251054 | 2021-02-23T10:11:58 | 2021-02-23T10:11:58 | 341,508,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | c='Програма для визначення площі круга'
d=c.center(70)
print(d)
import math
d=float(input('Введіть діаметр круга = '))
s=math.pi*math.pow(d,2)/2
print('Площа круга діаметром ',d,' дорівнює ',s)
| [
"noreply@github.com"
] | noreply@github.com |
eb10bd4ea2467d5d09f6e12e3ae29cbdeb1501f3 | 667a4c36df7b9fcf45b26a99cdb5551f9e4f0ba7 | /OrelliaSource/PandaCore/Debug.py | 633286b68a3346d206e57dbe8c5d40e5cfc1ca5b | [] | no_license | wpesetti/Orellia-Source | a129ea56cd38109f70294936875b968efcb94fcd | 810cb36e27f2da6cf980bb6aa24a24d0d24bd5fa | refs/heads/master | 2020-06-02T19:16:29.417969 | 2011-08-03T04:04:33 | 2011-08-03T04:04:33 | 2,120,112 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | #for debugging
G_DEBUG = False
def debug(module="?", text=""):
if(G_DEBUG):
print "DEBUG in ",module, ": ", text | [
"wesley_1@comcast.net"
] | wesley_1@comcast.net |
721575c39af22e75db58a6041bfea183daae1d3c | 67ae56e872b0bbd63ab4c1bbff3f41fd7f059407 | /polls/migrations/0001_initial.py | 5e4c20ee50fb682bd9ab2610bf2de270ad676c1c | [] | no_license | HieuNT-Sun-Asterisk/mysite | 5d8129884acc43ec7ebaa5796b6bc1c680d78f52 | 0da42d60f86d8da7fc68dfe6378295b96b257501 | refs/heads/master | 2022-11-28T12:56:42.636386 | 2020-08-05T09:18:36 | 2020-08-05T09:18:36 | 284,654,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # Generated by Django 3.0.3 on 2020-08-03 09:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| [
"trihieua2@gmail.com"
] | trihieua2@gmail.com |
f45b111df5cd21fc8cd6ffdaa88243d99792b959 | 8aed4ec937edf04b100f373cd46cee606699d74a | /captain_america/spider/migrations/0002_control.py | 0e9fa00972cbb190d49383f0517ba359a88e807e | [] | no_license | xiaguangting/hero | 089f33398276f7ff7df31ae7ab7d36ad787c9e94 | 5feed7de9b404bf5887124e041a5c610894a9995 | refs/heads/master | 2020-05-04T21:43:19.086775 | 2019-10-12T08:19:12 | 2019-10-12T08:19:12 | 179,486,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-08 13:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spider', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Control',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_disabled', models.BooleanField(default=False, verbose_name='\u662f\u5426\u7981\u7528')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('code', models.CharField(max_length=32, verbose_name='\u722c\u866bCODE')),
('num', models.IntegerField(default=1, verbose_name='\u6570\u91cf')),
('minute', models.CharField(default='*', max_length=16, verbose_name='\u5206\u949f')),
('hour', models.CharField(default='*', max_length=16, verbose_name='\u5c0f\u65f6')),
('day', models.CharField(default='*', max_length=16, verbose_name='\u5929')),
('month', models.CharField(default='*', max_length=16, verbose_name='\u6708')),
('week', models.CharField(default='*', max_length=16, verbose_name='\u5468')),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider.Site', verbose_name='\u7ad9\u70b9')),
],
options={
'verbose_name': '\u63a7\u5236',
'verbose_name_plural': '\u63a7\u5236',
},
),
]
| [
"xiakang@fun.tv"
] | xiakang@fun.tv |
9c199557fc04f712720b6e33c2a4ed90717998aa | 2779be59343d231855bbeafd087fc571d0f867dc | /Solo Competition/Google Codejam 2017/Round 3/A/A.py | 76b1324982a2840cf717f1ac6be20adeb01e9b47 | [] | no_license | frankbozar/Competitive-Programming | 4d7dbdf3db414718e77cc5afd69398b1f7f01eb8 | bcb1c363d11ada1f05ccd59bc8c93238c2503b23 | refs/heads/master | 2020-06-15T07:34:24.117330 | 2017-09-25T07:09:47 | 2017-09-25T07:09:47 | 75,315,592 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from itertools import permutations as N
C=lambda n, m: 1 if n==m or m==0 else C(n-1, m)+C(n-1, m-1)
P=lambda n, x: 1 if x==0 else P(n-x%10, x//10)*C(n, x%10)
F=lambda s: 0 if s=='' else F(s[:-1])+ord(s[-1])-ord('0')
G=lambda x: 0 if x==0 else G(x//10)+x%10
def S(n, x):
if G(x)>n:
return 1
s='0'*(n-G(x))
for i in range(n):
s+=chr(ord('0')+n-i)*(x//10**i%10)
if F(s)>n:
return P(n, x)+1
ans=1
for t in set(N(s)):
y=int(''.join(t))
if y!=x:
ans+=S(n, y)
return ans
for c in range(int(input())):
s=input()
print('Case #{0}: {1}'.format(c+1, S(len(s), int(s))))
| [
"frankbozar@frank.local"
] | frankbozar@frank.local |
7f19ef2799af9962a32f9250ed7aacdf2f8f7122 | 6c244ccae728b0acdd6e2668ab6caa140b1c094a | /UI (1).py | de60ba6c4a6db9950a4b5d35eaf451f3d763f687 | [] | no_license | Shruti-49/project | 52f3ae32f3f41b1a9945baa3d044811782a04fa2 | b3e9eeaddb8f024b39868be6df0787c06f308852 | refs/heads/main | 2023-06-04T03:48:22.784227 | 2021-06-15T10:02:11 | 2021-06-15T10:02:11 | 377,115,668 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 22:01:25 2021
@author: Admin
"""
import tkinter as Tk
from tkinter import *
from tkinter import messagebox
import os
import webbrowser
from motiondetector import motion
def Ok():
uname = e1.get()
password = e2.get()
if(uname == "" and password == "") :
messagebox.showinfo("", "Blank Not allowed")
elif(uname == "Admin" and password == "Minorpro6"):
messagebox.showinfo("","Login Success")
path=r'C:\Users\HP\Desktop\projectfinal\recordings'
webbrowser.open(os.path.realpath(path))
root.destroy()
return
else :
messagebox.showinfo("","Incorrect Username or Password")
def openfolder():
root = Tk()
root.title("Login")
root.geometry("300x200")
global e1
global e2
Label(root, text="User ID", font=('Verdana',12)).place(x=10, y=10)
Label(root, text="Password", font=('Verdana',12)).place(x=10, y=40)
e1 = Entry(root)
e1.place(x=140, y=10)
e2 = Entry(root)
e2.place(x=140, y=40)
e2.config(show="*")
Button(root, text="Login", command=Ok, bg="navy blue", fg="white", font=('Verdana',12), height = 2, width = 20).place(x=10, y=100)
class App:
def __init__(self,master):
global passg
title="Always Vigilant"
msgtitle=Message(master,text=title)
msgtitle.config(font=('Verdana',20),width=400)
msgtitle.pack()
msgtitle.pack(padx=5, pady=30)
self.encrypt = Button(master,text="Start Surveillance",bg="navy blue", fg="white",command=motion, font=('Verdana',12), width=25,height=2)
self.encrypt.pack(side=LEFT,padx=5, pady=5)
self.decrypt = Button(master,text="View Previous recordings",bg="navy blue", fg="white",command=openfolder,font=('Verdana',12), width=25,height=2)
self.decrypt.pack(side=RIGHT,padx=5, pady=5)
root=Tk()
root.wm_title("Suspicious Activity Detection")
app=App(root)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
9f1d1285b9c6eb45019b5edb842195eeb9390923 | 76016723a17b5a76d3383f401829649ebcd2b6df | /Ques6a.py | b83108891ccf0a420697df71a931d46560f100cc | [] | no_license | PriyankaGoenka/Task_Two | 3bc8456f6861eb13d8dd0bed585759ee1cd72776 | e8e2c73b3b0f5c67c0047da32298d6edfc85cea7 | refs/heads/main | 2023-03-20T19:11:39.006771 | 2021-03-12T19:43:35 | 2021-03-12T19:43:35 | 346,945,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | x=[1,2,3]
for i in x:
print(i) | [
"goenkapr@msu.edu"
] | goenkapr@msu.edu |
28d39c6dea81506b8dd1e1f53230c70c25166d80 | 6536e42c9a336c80d370d7f07cc4260e4055f683 | /wsgitest.py | f50c5fd0888492f0b48b4b2933233864a3b2cb8a | [
"BSD-2-Clause"
] | permissive | jonashaag/WSGITest | 4ca01144b6217b4769020c0597d075dd03d4549a | fb6f6981f8cc8192b2207a803c078a03bab31a84 | refs/heads/master | 2020-05-20T09:41:52.598794 | 2011-01-17T18:09:21 | 2011-01-17T18:09:21 | 818,200 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | import optparse
parser = optparse.OptionParser()
parser.add_option('-d', dest='default_tests',
action='store_true', default=None)
if __name__ == '__main__':
from wsgitest.run import run_tests
options, files = parser.parse_args()
if not files:
if options.default_tests is None:
options.default_tests = True
if options.default_tests:
from wsgitest import DEFAULT_TESTS_DIR
files.append(DEFAULT_TESTS_DIR)
result = run_tests(files)
print result.summary()
| [
"jonas@lophus.org"
] | jonas@lophus.org |
048d1ae4c6f96f43b3fb93737ab9bb6df35d22f6 | 2d1a07b949114c023c22bbfa8a91e24572bb61c4 | /plox/tools/generate_ast.py | 14eb1f120b4268447756f8e2780c1c03e5283eac | [] | no_license | mrkbryn/pylox | 96e9d64bd7d4bbccb59ecf6ea8fd4a09136031ef | 4692601105544b7d4d0544d9ffcd7c3c040470f1 | refs/heads/master | 2021-09-22T04:19:46.940072 | 2021-08-08T00:34:20 | 2021-08-08T00:34:20 | 244,215,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | def generate_class(name, super_class, props):
print("class {}({}):".format(name, super_class))
print(" def __init__(self, {}):".format(', '.join(props)))
for prop in props:
print(" self.{} = {}".format(prop, prop))
print("")
print(" def __repr__(self):")
prop_list = ", ".join(["self.{}".format(i) for i in props])
formatter = name + "(" + "{}"
for i in range(1, len(props)):
formatter += ", {}"
formatter += ")"
print(" return \"{}\".format({})".format(formatter, prop_list))
print("\n")
expression_template = """
class Expr(object):
def accept(self, visitor):
return visitor.visit(self)
class Stmt(object):
def accept(self, visitor):
return visitor.visit(self)
"""
print(expression_template)
generate_class("Assign", "Expr", ["name", "value"])
generate_class("BinaryExpr", "Expr", ["left", "operator", "right"])
generate_class("UnaryExpr", "Expr", ["operator", "right"])
generate_class("LiteralExpr", "Expr", ["value"])
generate_class("GroupingExpr", "Expr", ["expression"])
generate_class("Variable", "Expr", ["name"])
generate_class("Expression", "Stmt", ["expression"])
generate_class("Print", "Stmt", ["expression"])
generate_class("Var", "Stmt", ["name", "initializer"])
| [
"mab539@cornell.edu"
] | mab539@cornell.edu |
2d91424ff4bd4f35ff4625aaa988a18765ffa52c | 65c9bedbf4f11a69384128eab9621863d325f227 | /po11.py | 08d7d46a23bc1d8d8105a36103ba96e37cef522b | [] | no_license | Gowt859/guvi | d57c2bc7656f9e414c24db78975dc514a12aa6b5 | 339de4e858121d7d6c4ffdb5fc7409e6804b3dbf | refs/heads/master | 2020-06-14T13:13:55.989938 | 2019-07-17T09:11:16 | 2019-07-17T09:11:16 | 195,013,791 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | g1,g2=map(int,input().split())
s=g1**g2
print(s)
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.