blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5dd56a68ce66bf5a90c6a088734b7314a052b9a | 5786d339acf7c7bb578c569eb8f257b5b1b4c393 | /code/torch/models/pfreezing/args.py | 0cd2249eb6016d6bf114ffd3bb95c200681e83f9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | afogarty85/BERTVision | fe65b5b9a43d7ac1a6c7e5618138e5d1ca3b7710 | 3e94122d60ad5e9911ea49adbdffb4e3ebc66f08 | refs/heads/master | 2023-04-11T04:33:39.559446 | 2021-04-27T19:37:15 | 2021-04-27T19:37:15 | 362,228,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | import os, sys
sys.path.append("C:\\BERTVision\\code\\torch")
import torch
import models.args
def get_args():
# retreive the general models.args and attach them here
parser = models.args.get_args()
# set some task specific args
parser.add_argument('--model',
type=str,
default='RTE',
required=True)
parser.add_argument('--checkpoint',
type=str,
default='bert-base-uncased',
required=True,
help='A HuggingFace checkpoint e.g., bert-base-uncased')
parser.add_argument('--num-labels',
default=2,
type=int,
required=True,
help='Number of labels for the task, default=2')
parser.add_argument('--max-seq-length',
default=219,
type=int,
help='Tokenization max length')
parser.add_argument('--save-path',
type=str,
default=os.path.join('model_checkpoints'))
parser.add_argument('--log-path',
type=str,
default=os.path.join('model_logs'))
parser.add_argument('--pickle-path',
type=str,
default=os.path.join('pfreezing_trials'))
parser.add_argument('--warmup-proportion',
default=0.1,
type=float,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--batch-size',
type=int,
default=16,
help='Input batch size for training (default: 16)')
parser.add_argument('--lr',
type=float,
default=2e-5,
help='Learning rate (default: 2e-5)')
parser.add_argument('--num-workers',
type=int,
default=0,
help='Number of CPU cores (default: 0)')
parser.add_argument('--n-trials',
type=int,
default=1000)
parser.add_argument('--shard',
type=float,
default=0.30,
help='Percentage of training set to sample from')
parser.add_mutually_exclusive_group(required=False)
parser.add_argument('--error', dest='error', action='store_true')
parser.add_argument('--no-error', dest='error', action='store_false')
parser.set_defaults(feature=True)
args = parser.parse_args()
return args
#
| [
"apf2bb@virginia.edu"
] | apf2bb@virginia.edu |
a64d29dd9bc1758aa12c482836558f36caa6d738 | 7d6f2bd14823b3bc4e6143a9385b3c5d590f7ead | /qa/pull-tester/tests_config.py | f6888b709c45e4c97ef99c9f78a1eedb65dfd4fd | [
"MIT"
] | permissive | AmirAbrams/uniqredit | 9b6b2f4872398c8d384394ccb65c7dfff6bdbeeb | de763bccb482f71d5d69755caf65652e3dbd907d | refs/heads/master | 2021-01-21T09:39:17.642223 | 2016-07-26T07:18:42 | 2016-07-26T07:18:42 | 65,700,650 | 0 | 0 | null | 2016-08-15T04:48:19 | 2016-08-15T03:10:00 | C++ | UTF-8 | Python | false | false | 431 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2016 The Uniqredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
SRCDIR="/home/minato/uniqredit"
BUILDDIR="/home/minato/uniqredit"
EXEEXT=""
# These will turn into comments if they were disabled when configuring.
ENABLE_WALLET=1
ENABLE_UTILS=1
ENABLE_UNIQREDITD=1
#ENABLE_ZMQ=1
| [
"bitcreditscc@gmail.com"
] | bitcreditscc@gmail.com |
d712d5e72fd8bc200f8343491edb19f221fccdfb | 9f6dac6c9c53f4e1a05aff73da76499178bacd44 | /demographic_data_analyzer.py | 433bc19cf41291895866574e7e5499d71c4e59fa | [] | no_license | staceyland/demographic-data-analyzer | fff2278b79e1f07d8ca1f5349fd83199038ee953 | 5dbe30d4c7b972625c870163fda49fd0b22f4444 | refs/heads/main | 2023-07-05T18:01:02.881512 | 2021-08-31T04:51:52 | 2021-08-31T04:51:52 | 401,576,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,803 | py | import pandas as pd
def calculate_demographic_data(print_data=True):
# Read data from file
df = pd.read_csv("adult.data.csv")
# How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.
race_count = df['race'].value_counts()
# What is the average age of men?
average_age_men = round(df.loc[df['sex'] == 'Male', 'age'].mean(), 1)
# What is the percentage of people who have a Bachelor's degree?
percentage_bachelors = round(float(((df['education'] == 'Bachelors').sum()) / len(df))*100, 1)
# What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?
# What percentage of people without advanced education make more than 50K?
# with and without `Bachelors`, `Masters`, or `Doctorate`
higher_education = df.loc[df['education'].isin(['Bachelors', 'Masters', 'Doctorate'])]
lower_education = df.loc[~df['education'].isin(['Bachelors', 'Masters', 'Doctorate'])]
# percentage with salary >50K
higher_education_rich = round((higher_education['salary'] == '>50K').sum() / len(higher_education) * 100, 1)
lower_education_rich = round((lower_education['salary'] == '>50K').sum() / len(lower_education) * 100, 1)
# What is the minimum number of hours a person works per week (hours-per-week feature)?
min_work_hours = df['hours-per-week'].min()
# What percentage of the people who work the minimum number of hours per week have a salary of >50K?
num_min_workers = df[df['hours-per-week'] == min_work_hours].shape[0]
rich_percentage = round((float(df[(df['hours-per-week'] == min_work_hours) & (df['salary'] == '>50K')].shape[0]) / num_min_workers) * 100, 1)
# What country has the highest percentage of people that earn >50K?
highest_earning_country = (df.loc[df["salary"] == ">50K",
"native-country"].value_counts() / df["native-country"].value_counts()).fillna(0).sort_values(ascending=False).index[0]
highest_earning_country_percentage = round(len(df[(df["native-country"] == highest_earning_country) & (
df["salary"] == ">50K")]) / len(df[df["native-country"] == highest_earning_country]) * 100, 1)
# Identify the most popular occupation for those who earn >50K in India.
top_IN_occupation = df[(df['salary'] == '>50K') & (df['native-country'] == 'India')]['occupation'].value_counts().keys()[0]
# DO NOT MODIFY BELOW THIS LINE
if print_data:
print("Number of each race:\n", race_count)
print("Average age of men:", average_age_men)
print(f"Percentage with Bachelors degrees: {percentage_bachelors}%")
print(f"Percentage with higher education that earn >50K: {higher_education_rich}%")
print(f"Percentage without higher education that earn >50K: {lower_education_rich}%")
print(f"Min work time: {min_work_hours} hours/week")
print(f"Percentage of rich among those who work fewest hours: {rich_percentage}%")
print("Country with highest percentage of rich:", highest_earning_country)
print(f"Highest percentage of rich people in country: {highest_earning_country_percentage}%")
print("Top occupations in India:", top_IN_occupation)
return {
'race_count': race_count,
'average_age_men': average_age_men,
'percentage_bachelors': percentage_bachelors,
'higher_education_rich': higher_education_rich,
'lower_education_rich': lower_education_rich,
'min_work_hours': min_work_hours,
'rich_percentage': rich_percentage,
'highest_earning_country': highest_earning_country,
'highest_earning_country_percentage':
highest_earning_country_percentage,
'top_IN_occupation': top_IN_occupation
}
| [
"noreply@github.com"
] | staceyland.noreply@github.com |
28e11970a757421df8a3c2d034a2856bde5b414f | 93582aa46c835b66a2117bf24178fd80236af89d | /setup.py | e2eaee39d2b4d1cd674afe84307252167e1f9eba | [] | no_license | collective/collective.leadmedia | 0fbe4e03421fcec6f026a80de80c4af28d2f218e | 5fb3749861fd21859ae84686dc29f877859de45b | refs/heads/master | 2023-08-24T01:19:19.470625 | 2019-07-23T13:30:53 | 2019-07-23T13:30:53 | 26,549,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='collective.leadmedia',
version=version,
description="Adds a slideshow to any dexterity folderish type.",
long_description=open("README.rst").read(),
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Plone',
author='Andre Goncalves',
author_email='andre@intk.com',
url='https://github.com/collective/collective.leadmedia',
download_url='https://github.com/collective/collective.leadmedia/tarball/0.1',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
setup_requires=["PasteScript"],
paster_plugins=["ZopeSkel"],
)
| [
"andreslb1@gmail.com"
] | andreslb1@gmail.com |
326168d8de06212813ef98b555650a25305f7aab | fff561e0e4f351d85d038cf87569c23280622157 | /cmsplugin_cascade/generic/cms_plugins.py | 3eaaf072f99d2b3a564045fc617a550d4bb910eb | [
"MIT"
] | permissive | schacki/djangocms-cascade | 9d3e9176e54c7cca619fdc6917c38b1588bc7c88 | 2809f701a1cfa17a53539fac4d9dadaa5ebe40b7 | refs/heads/master | 2021-01-20T22:02:42.959467 | 2015-12-23T19:31:07 | 2015-12-23T19:31:07 | 42,931,185 | 0 | 0 | null | 2015-09-22T12:02:53 | 2015-09-22T12:02:52 | null | UTF-8 | Python | false | false | 1,881 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import PartialFormField
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.mixins import TransparentMixin
class SimpleWrapperPlugin(TransparentMixin, CascadePluginBase):
name = _("Simple Wrapper")
parent_classes = None
require_parent = False
allow_children = True
alien_child_classes = True
TAG_CHOICES = tuple((cls, _("<{}> – Element").format(cls))
for cls in ('div', 'span', 'section', 'article',)) + (('naked', _("Naked Wrapper")),)
glossary_fields = (
PartialFormField('tag_type',
widgets.Select(choices=TAG_CHOICES),
label=_("HTML element tag"),
help_text=_('Choose a tag type for this HTML element.')
),
)
@classmethod
def get_identifier(cls, instance):
identifier = super(SimpleWrapperPlugin, cls).get_identifier(instance)
tag_name = dict(cls.TAG_CHOICES).get(instance.glossary.get('tag_type'))
if tag_name:
return format_html('{0}{1}', identifier, tag_name)
return identifier
def get_render_template(self, context, instance, placeholder):
if instance.glossary.get('tag_type') == 'naked':
return 'cascade/generic/naked.html'
return 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(SimpleWrapperPlugin)
class HorizontalRulePlugin(CascadePluginBase):
name = _("Horizontal Rule")
parent_classes = None
allow_children = False
tag_type = 'hr'
render_template = 'cascade/generic/single.html'
glossary_fields = ()
plugin_pool.register_plugin(HorizontalRulePlugin)
| [
"jacob.rief@gmail.com"
] | jacob.rief@gmail.com |
0f60ebe14d9d2799e58cc2c5c412340c48ead03d | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/seqex/bundle_to_seqex_test.runfiles/pypi__nose_1_3_7/nose/case.py | cd5661e50fd58f7d63994967189c34fdb7209d2c | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 115 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__nose_1_3_7/nose/case.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
a12197afa53535d77f1899ed8fde7ad5811dbb49 | a01c0fcae93c2e37bd8d11fd8b6f987fb0642753 | /0221/D2/2007. 패턴 마디의 길이.py | 2280f3715cb8b8ce388c8c8bf4343d68a4f8f060 | [] | no_license | nocturne9no1/prepare-cote | 5619c3d186530c558231ce45a9142400894e356f | d95e31a14c0753baa607ef667e1b3e13d8fadbca | refs/heads/master | 2023-03-12T04:37:04.696446 | 2021-02-21T14:29:33 | 2021-02-21T14:29:33 | 335,323,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # 문자열 길이 30
# 최대 길이 10
for tc in range(1, int(input())+1):
arr = list(input())
# print(arr)
banbok_len = 1
b = 1
banbok = []
stop = False
while True:
for i in range(b):
banbok.append(arr[i])
for i in range(b):
if banbok[i] != arr[b+i]:
break
else:
stop = True
if stop == True:
break
else:
b += 1
banbok = []
print('#{} {}'.format(tc, len(banbok)))
# banbok_str = ''.join(banbok)
#
# print(banbok_str)
| [
"nocturne9no1@gmail.com"
] | nocturne9no1@gmail.com |
282108c32f9aeea25e523e99267b87f63f7f8b16 | 06849a53382339044b32d877bcd24c48fb1cea29 | /作业20170926C.py | 0a4ca1f9f8ecbcea058c5fe543a34f639ab25367 | [] | no_license | 971226654/python | 0a5f7fd0a0d8eacb255072add52961c20fcfb983 | ebd2b6f1c7499ded1e78e80e5cfbc1a81a4e10ee | refs/heads/master | 2021-08-16T13:46:18.558259 | 2017-11-20T00:53:26 | 2017-11-20T00:53:26 | 111,345,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | a = float(input())
b = 100/661.47*a
c = 100/784.45*a
print("转换为美元:{:.2f}元".format(b))
print("转换为欧元:{:.2f}元".format(c)) | [
"971226654@qq.com"
] | 971226654@qq.com |
9f7fedda697e0d2e9744c1be8b113f750066e6bf | 8e5f20dfc569852b0228c7fad5dc6e07071ba8da | /Атрибуты и методы/Лабораторные задания/task2_LinkedList__setitem__/main.py | c707cb7486e2326328ecabcc8e05e62bf829b3ce | [] | no_license | Tatyana312/PythonPY200 | 5ff28b0496147b61510cb4afa33f08a1b0a8046f | baf4e6c1157f5c73bd67d343d334ecb59ed291a0 | refs/heads/master | 2023-09-04T11:37:15.722008 | 2021-09-30T18:24:38 | 2021-09-30T18:24:38 | 402,518,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,900 | py | from typing import Iterable, Optional, Any
from node import Node
class LinkedList:
def __init__(self, data: Iterable = None):
"""Конструктор связного списка"""
self.len = 0
self.head: Optional[Node] = None
if data is not None:
for value in data:
self.append(value)
def append(self, value: Any):
""" Добавление элемента в конец связного списка. """
append_node = Node(value)
if self.head is None:
self.head = append_node
else:
last_index = self.len - 1
last_node = self.step_by_step_on_nodes(last_index)
self.linked_nodes(last_node, append_node)
self.len += 1
def __len__(self):
return self.len
@staticmethod
def linked_nodes(left_node: Node, right_node: Optional[Node] = None) -> None:
"""
Функция, которая связывает между собой два узла.
:param left_node: Левый или предыдущий узел
:param right_node: Правый или следующий узел
"""
left_node.set_next(right_node)
def step_by_step_on_nodes(self, index: int) -> Node:
""" Функция выполняет перемещение по узлам до указанного индекса. И возвращает узел. """
if not isinstance(index, int):
raise TypeError()
if not 0 <= index < self.len: # для for
raise IndexError()
current_node = self.head
for _ in range(index):
current_node = current_node.next
return current_node
def __getitem__(self, index: int) -> Any:
""" Метод возвращает значение узла по указанному индексу. доходим до нжного узла и возвращаем новое значение"""
node = self.step_by_step_on_nodes(index)
return node.value
# TODO реализовать магический метод __setitem__ не должен возвращать ничего
def __setitem__(self, index: int, value: Any) -> None:
node = self.step_by_step_on_nodes(index)
node.value = value
def to_list(self) -> list:
return [linked_list_value for linked_list_value in self]
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.to_list()})"
def __str__(self) -> str:
return f"{self.to_list()}"
if __name__ == '__main__':
list_ = [1, 2, 3]
linked_list = LinkedList(list_)
print(linked_list)
print(linked_list[1])
# TODO установить элементу с индексом 1 значение 100
linked_list[1] = 100
print(linked_list[1])
| [
"avrorik5@yandex.ru"
] | avrorik5@yandex.ru |
96eba1f27866edfaed208bbc5b525ee4e2b9da42 | 9a51a707e7a562d3898e85dd002a0ee484df8f8d | /template/include/litw-dump2csv.py | e47dc79ab2c6aa18f581635b75fdaa35dd2bfec3 | [] | no_license | nigini/LITW_RuralComputing | 4c955788ac146e613d1493b6f5be816732813b33 | f40a34b80e46435ded6cdcbcec091b44ce13b0c3 | refs/heads/master | 2023-01-24T09:19:52.712230 | 2020-11-17T00:28:57 | 2020-11-17T00:28:57 | 276,216,094 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | import csv
import json
from pandas.io.json import json_normalize
import argparse
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
def clean_json_data(json_db_entry):
clean_json = json_db_entry
clean_json = clean_json.replace('\\\\','\\')
return clean_json
def read_litw_data_dump(filename):
all_data = []
with open(filename, newline='', encoding="latin-1") as csvfile:
datareader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in datareader:
try:
db_id, json_data, timestamp = row
clean_data = clean_json_data(json_data)
data = json.loads(clean_data)
all_data.append(data)
except json.JSONDecodeError:
print('INFO: Could not process the JSON bellow:')
print(clean_data)
pass
except ValueError:
print('INFO: The following line has more fields than it should:')
print(row)
pass
return all_data
def litw_data_by_UUID(litw_dump_data, data_types):
data_by_UUID = {}
for entry in litw_dump_data:
try:
uuid = entry.pop('uuid')
if uuid not in data_by_UUID:
data_by_UUID[uuid]={'uuid':uuid}
data_type = entry.pop('data_type', None)
if data_type in data_types:
data_by_UUID[uuid].update(entry)
except:
print('Could not find the UUID in this entry:')
print(entry)
return data_by_UUID
def litw_data_by_entry(litw_dump_data, data_types):
data_by_line = []
for entry in litw_dump_data:
try:
data_type = entry.pop('data_type', None)
if data_type in data_types:
data_by_line.append(entry)
except:
print('Problem with this entry:')
print(entry)
return data_by_line
def main():
parser = argparse.ArgumentParser(description='Converts LITW data dumps containing JSON data to a CSV file formed by attributed per UUID.')
parser.add_argument('input', help='the LITW data dump CSV file')
parser.add_argument('output', help='the output file name')
parser.add_argument('data_type', help='a comma separated list of data type to be written to the output file, e.g. litw:initialize,study:demographics,study:data')
parser.add_argument('--format', help='either TIDY (one entry per line) or FLAT (DEFAULT: aggregate all data by UUID - works only if all data is saved under a unique name.)')
args = parser.parse_args()
print(args)
DATA_TO_GET = args.data_type.split(',')
INPUT_FILE = args.input
OUTPUT_FILE = args.output
data = read_litw_data_dump(INPUT_FILE)
if args.format == 'TIDY':
data_output = json_normalize(litw_data_by_entry(data, DATA_TO_GET))
else:
data_flat = litw_data_by_UUID(data, DATA_TO_GET)
data_output = json_normalize(list(data_flat.values()))
data_output.to_csv(OUTPUT_FILE, index=False, quoting=csv.QUOTE_ALL)
if __name__ == "__main__":
main()
| [
"nigini@gmail.com"
] | nigini@gmail.com |
8530fbd9d063ff0ac162ee77b6fdab28ec983282 | b3deaac42f88b5b03c6589f3f083acb57e233981 | /tictactoe.py | 29476fe22d27988283df10eaba5ec8a30846cc8e | [] | no_license | ShllokRawat/Misc | be719276f1c831cecb00df358409e42aea1c1a3f | b9dc821a9e5ba4a57cb7c11beec501d8ca413ccb | refs/heads/main | 2023-06-14T01:21:46.278459 | 2021-07-08T14:38:46 | 2021-07-08T14:38:46 | 384,153,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,327 | py | import random
import copy
board = [['-','-','-'],['-','-','-'],['-','-','-']]
'''
- - -
- - -
- - -
'''
name1 = ""
name2 = ""
gametype = ""
player1 = ["", ""]
player2 = ["", ""]
def game():
global gametype
if gametype == "":
gametype = input("Do you want to play against the computer (Y/N)")
counter = 0
global name1
global name2
if name1 == "":
name1 = input("Enter player 1's name: ")
player1[0] = name1
if gametype.upper() == "N" and name2 == "":
name2 = input("Enter player 2's name: ")
player2[0] = name2
if gametype.upper() == "Y":
player2[0] = "Computer"
player1[1], player2[1] = random_move()
print(player1[0] + " is " + player1[1])
print(player2[0] + " is " + player2[1])
current_player = choose_random_player()
print(current_player[0] + " starts!")
print_board()
while True:
if current_player[0] != "Computer":
move = int(input(current_player[0] + " choose a number between 0 and 8: "))
while not possible(move):
move = int(input("Illegal Move. Choose another number between 0 and 8: "))
else:
move = computer_move(current_player[1])
print("Computer plays move: ", move)
make_move(board, current_player[1], move)
counter += 1
if is_winner(board, current_player[1]):
print_board()
print("Winner:", current_player[0])
reset_board()
break
print_board()
if counter == 9:
print("The Game ended in a Tie!")
reset_board()
break
current_player = other(current_player)
def choose_random_player():
x = random.randint(0, 1)
if x == 0:
return player1
else:
return player2
def random_move():
x = random.randint(0, 1)
if x == 0:
return "X", "O"
else:
return "O", "X"
def other(player):
if player == player1:
return player2
else:
return player1
def make_move(b, piece, position):
#piece = X or piece = O
row = position // 3
col = position % 3
b[row][col] = piece
def print_board():
for row in board:
for element in row:
print(element + " ", end="")
print()
def is_winner(board, piece):
if board[0][0] == piece and board[0][1] == piece and board[0][2] == piece:
return True
if board[1][0] == piece and board[1][1] == piece and board[1][2] == piece:
return True
if board[2][0] == piece and board[2][1] == piece and board[2][2] == piece:
return True
if board[0][0] == piece and board[1][0] == piece and board[2][0] == piece:
return True
if board[0][1] == piece and board[1][1] == piece and board[2][1] == piece:
return True
if board[0][2] == piece and board[1][2] == piece and board[2][2] == piece:
return True
if board[0][0] == piece and board[1][1] == piece and board[2][2] == piece:
return True
if board[0][2] == piece and board[1][1] == piece and board[2][0] == piece:
return True
return False
def possible(m):
#make move? return True
#cant make move? return False
if type(m) != int:
return False
if m > 8 or m < 0:
return False
row, col = m // 3, m % 3
if board[row][col] == '-':
return True
else:
return False
def reset_board():
for i in range(len(board)):
for j in range(len(board[i])):
board[i][j] = '-'
def duplicate_board(b):
test_list = copy.deepcopy(b)
return test_list
def choose_random_move(bo, moves):
possible_moves = []
for m in moves:
if possible(m):
possible_moves.append(m)
if possible_moves:
return random.choice(possible_moves)
else:
return None
def computer_move(c_piece):
if c_piece == "X":
player_piece = "O"
else:
player_piece = "X"
#CHECK IF COMPUTER CAN WIN IN THE NEXT MOVE
#RETURN MOVE
for i in range(0,9):
copyboard = duplicate_board(board)
if possible(i):
make_move(copyboard, c_piece, i)
if is_winner(copyboard, c_piece):
return i
#CHECK IF PLAYER CAN WIN STOP HIM
for i in range(0,9):
copyboard = duplicate_board(board)
if possible(i):
make_move(copyboard, player_piece, i)
if is_winner(copyboard, player_piece):
return i
#CORNER PIECE
m = choose_random_move(board, [0,2,6,8])
if m != None:
return m
#CENTER PIECE
if possible(4):
return 4
#SIDES
m = choose_random_move(board, [1,3,5,7])
if m != None:
return m
if __name__ == "__main__":
print("Welcome to tic tac toe")
start = input ("Do you want to play? (Y/N)")
start = start.upper()
while start == "Y":
game()
start = input("Do you want to restart? (Y/N)")
print("The END")
| [
"noreply@github.com"
] | ShllokRawat.noreply@github.com |
ae4a5e36e26dd8270667a942b13d62f5bdf35802 | 4ff323ddab2923e04388445ac875b0c1d5dbfb87 | /fastrates/tests.py | 5784390e31ae70a9872eabf41ca29544bd46b2fd | [
"MIT"
] | permissive | MelihCelik00/fastrates | 8af0536263b89a267b9e5d5d9fa7f452ffbf39cc | c7e2caaf19a435d4e6bac9088f378fef8b938003 | refs/heads/master | 2022-11-10T19:49:25.031081 | 2020-07-04T09:44:28 | 2020-07-04T09:44:28 | 277,082,224 | 1 | 0 | MIT | 2020-07-04T09:49:47 | 2020-07-04T09:49:47 | null | UTF-8 | Python | false | false | 5,004 | py | from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_main_home():
response = client.get("/")
assert response.status_code == 200
def test_latest():
response = client.get("/latest")
assert response.status_code == 200
def test_latest():
response = client.get("/latest?base=USD")
assert response.status_code == 200
def test_latest_symbols():
response = client.get("/latest?symbols=USD,TRY")
assert response.status_code == 200
def test_historical_date():
response = client.get("/historical?date=2010-01-18")
assert response.status_code == 200
assert response.json() == {
"rates":{
"2010-01-18":{
"AUD":1.5511,
"BGN":1.9558,
"BRL":2.5505,
"CAD":1.4741,
"CHF":1.4745,
"CNY":9.8097,
"CZK":25.889,
"DKK":7.4418,
"EEK":15.6466,
"GBP":0.8795,
"HKD":11.1529,
"HRK":7.2893,
"HUF":267.75,
"IDR":13268.3,
"INR":65.408,
"JPY":130.33,
"KRW":1616.18,
"LTL":3.4528,
"LVL":0.7085,
"MXN":18.2228,
"MYR":4.8014,
"NOK":8.1435,
"NZD":1.9456,
"PHP":65.957,
"PLN":4.0227,
"RON":4.1053,
"RUB":42.538,
"SEK":10.1295,
"SGD":1.9965,
"THB":47.202,
"TRY":2.0854,
"USD":1.4369,
"ZAR":10.6356
}
},
"base":"EUR"
}
def test_latest_symbols():
response = client.get("/latest?symbols=USD,TRY")
assert response.status_code == 200
def test_end_at():
response = client.get("")
assert response.status_code == 200
assert response.json() == {
"rates":{
"2010-01-04":{
"AUD":1.1039683091,
"BGN":1.3592327472,
"BRL":1.7316700257,
"CAD":1.0391966085,
"CHF":1.0336368059,
"CNY":6.8272986309,
"CZK":18.2674265064,
"DKK":5.1716589061,
"EEK":10.874000973,
"EUR":0.6949753284,
"GBP":0.6195010077,
"HKD":7.7564806449,
"HRK":5.068872055,
"HUF":187.5390923622,
"IDR":9411.779831816,
"INR":46.2999513517,
"JPY":92.8626033776,
"KRW":1155.3061366321,
"LTL":2.3996108138,
"LVL":0.4929460004,
"MXN":12.9726874696,
"MYR":3.3975258878,
"NOK":5.7189519772,
"NZD":1.373549239,
"PHP":45.9378692056,
"PLN":2.8514837723,
"RON":2.936479255,
"RUB":29.9951351727,
"SEK":7.0838835221,
"SGD":1.3987073459,
"THB":33.2052262145,
"TRY":1.4880116756,
"USD":1.0,
"ZAR":7.3401904232
}
},
"base":"USD"
}
| [
"yagizcanilbey1903@gmail.com"
] | yagizcanilbey1903@gmail.com |
311b3d5d01adbf281ec2f810b8579072154079d4 | a32c2ee4e6b2b1c6f8db02320c4bd50b17940af5 | /modules/YiXinNotSlot/YiXinRegister3.8/YiXinRegister.py | d216ecbed0ac33026d944068a185a506a142af16 | [] | no_license | wszg5/studyGit | 93d670884d4cba7445c4df3a5def8085e5bf9ac0 | bebfc90bc38689990c2ddf52e5a2f7a02649ea00 | refs/heads/master | 2020-04-05T02:55:17.367722 | 2018-11-07T06:01:03 | 2018-11-07T06:01:03 | 156,494,390 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,553 | py | # coding:utf-8
import colorsys
import os
import random
import string
from PIL import Image
from imageCode import imageCode
from slot import Slot
from smsCode import smsCode
from uiautomator import Device
from Repo import *
from zservice import ZDevice
class YiXinRegister:
def __init__(self):
self.repo = Repo()
self.type = 'yixin'
def GenPassword(self, numOfNum=4, numOfLetter=4):
# 选中numOfNum个数字
slcNum = [random.choice( string.digits ) for i in range( numOfNum )]
# 选中numOfLetter个字母
slcLetter = [random.choice( string.lowercase ) for i in range( numOfLetter )]
slcChar = slcLetter + slcNum
genPwd = ''.join( [i for i in slcChar] )
return genPwd
def register(self, d, z, args, password):
str = d.info # 获取屏幕大小等信息
height = str["displayHeight"]
width = str["displayWidth"]
z.toast("开始注册")
d.server.adb.cmd( "shell", "pm clear im.yixin" ).communicate( ) # 清除缓存
d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate( ) # 拉起易信
<<<<<<< HEAD
z.sleep( 10 )
=======
z.sleep( 18 )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
z.heartbeat( )
if d( text='很抱歉,“易信”已停止运行。' ).exists:
d( text='确定' ).click( )
return 'fail'
if d( text='注册' ).exists:
d( text='注册' ).click()
z.sleep( 2 )
self.scode = smsCode(d.server.adb.device_serial())
while True:
material_cate_id = args['repo_material_id']
# material_time_limit = args['material_time_limit']
nicknameLsit = self.repo.GetMaterial(material_cate_id, 0, 1)
if len( nicknameLsit ) == 0:
d.server.adb.cmd( "shell",
"am broadcast -a com.zunyun.zime.toast --es msg \"素材库%s号仓库为空\"" % material_cate_id ).communicate( )
else:
break
nickname = nicknameLsit[0]['content']
while True:
z.heartbeat()
z.toast(u'开始获取手机号')
while True:
if d(resourceId='im.yixin:id/register_phone_number_edittext').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click.bottomright()
number_cate_id = args['repo_number_id']
# number_time_limit = int(args['number_time_limit']) # 号码提取时间间隔
exist_numbers = self.repo.GetNumber(number_cate_id, 0, 1, 'exist')
remain = 1 - len(exist_numbers)
normal_numbers = self.repo.GetNumber(number_cate_id, 0, remain, 'normal')
numbers = exist_numbers + normal_numbers
if len(numbers) == 0:
d.server.adb.cmd("shell",
"am broadcast -a com.zunyun.zime.toast --es msg \"电话号码%s号仓库为空\"" % number_cate_id).communicate()
else:
break
number = numbers[0]["number"]
if d(resourceId='im.yixin:id/register_phone_number_edittext').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click()
try:
PhoneNumber = self.scode.GetPhoneNumber(self.scode.WECHAT_REGISTER, number) # 获取接码平台手机号码
except:
PhoneNumber = None
# PhoneNumber = self.scode.GetPhoneNumber(self.scode.WECHAT_REGISTER) # 获取接码平台手机号码
if PhoneNumber is None:
z.toast(u'讯码查不无此号,重新获取')
continue
else:
z.toast(u'成功获取到手机号')
z.input(PhoneNumber)
if not d( text='中国', resourceId='im.yixin:id/tv_register_country' ).exists:
d( resourceId='im.yixin:id/tv_register_country' ).click( )
z.sleep( 1 )
while True:
if d( text='中国' ).exists:
d( text='中国' ).click( )
break
else:
d.swipe( width / 2, height * 6 / 7, width / 2, height / 7 )
if d(text='下一步').exists:
d(text='下一步').click()
z.sleep(8)
z.heartbeat()
if d(text='为了验证身份,我们将会发送短信验证码到你的手机').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click.bottomright() # 清空输入框
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
continue
if d(textContains='验证码短信已发送至').exists:
break
try:
code = self.scode.GetVertifyCode(PhoneNumber, self.scode.WECHAT_REGISTER, 4) # 获取接码验证码
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
except:
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
code = ''
if code == '':
z.toast( PhoneNumber + '手机号,获取不到验证码' )
return "fail"
z.input(code[0])
z.input(code[1])
z.input(code[2])
z.input(code[3])
if d(resourceId='im.yixin:id/register_username_edittext').exists:
d(resourceId='im.yixin:id/register_username_edittext').click()
z.input(nickname)
if d(resourceId='im.yixin:id/register_password_edittext').exists:
d( resourceId='im.yixin:id/register_password_edittext' ).click()
z.input(password)
if d(text='下一步').exists:
d(text='下一步').click()
z.sleep(3)
if d(text='进入易信',resourceId='im.yixin:id/btn_register_start').exists:
d(text='进入易信',resourceId='im.yixin:id/btn_register_start').click()
z.sleep(20)
<<<<<<< HEAD
=======
if d(text='完善信息').exists:
d( index=1 ).click()
z.sleep(1)
ageArray = ['00后', '95后', '90后', '85后']
age = ageArray[random.randint(0, 3)]
if d(text=age).exists:
d(text=age).click()
if d(text='开启易信').exists:
d(text='开启易信').click()
z.sleep( 20 )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
# d.server.adb.cmd( "shell", "am force-stop im.yixin" ).communicate( ) # 强制停止
# d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate( ) # 拉起易信
z.heartbeat()
if d( text='立即更新' ).exists:
d(text='下次再说').click()
<<<<<<< HEAD
if d(text='消息').exists and d(text='电话').exists and d(text='发现').exists:
z.toast( u'注册成功' )
=======
if d(text='好友').exists and d(text='我').exists and d(text='发现').exists:
z.toast( u'注册成功' )
d(text='我').click()
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
return PhoneNumber
else:
z.toast( u'注册失败,重新注册' )
return "fail"
def action(self, d, z, args):
while True:
z.toast( "正在ping网络是否通畅" )
while True:
ping = d.server.adb.cmd( "shell", "ping -c 3 baidu.com" ).communicate( )
print(ping)
if 'icmp_seq' and 'bytes from' and 'time' in ping[0]:
<<<<<<< HEAD
z.toast( "开始执行:易信注册模块 有卡槽" )
=======
z.toast( "开始执行:易信注册模块 无卡槽" )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
break
z.sleep( 2 )
z.generate_serial( "im.yixin" ) # 随机生成手机特征码
z.toast( "随机生成手机特征码" )
saveCate = args['repo_account_id']
password = self.GenPassword( )
register_result = self.register( d, z, args, password )
if register_result == "fail":
continue
else:
# 入库
featureCodeInfo = z.get_serial( "im.yixin" )
self.repo.RegisterAccount( register_result, password, "", saveCate, "using", featureCodeInfo )
break
if (args['time_delay']):
z.sleep( int( args['time_delay'] ) )
def getPluginClass():
return YiXinRegister
if __name__ == "__main__":
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
clazz = getPluginClass()
o = clazz()
d = Device("HT54VSK01061")
z = ZDevice("HT54VSK01061")
d.server.adb.cmd("shell", "ime set com.zunyun.qk/.ZImeService").communicate()
args = {"repo_account_id": "279", "repo_number_id": "123", "repo_material_id": "139", "slot_time_limit": "2", "time_delay": "3"};
o.action(d, z, args)
# slot = Slot(d.server.adb.device_serial(),'yixin')
# slot.clear(1)
# slot.clear(2)
# d.server.adb.cmd( "shell", "pm clear im.yixin" ).communicate( ) # 清除缓存
# slot.restore( 1 )
# d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate() # 拉起易信
| [
"you@example.com"
] | you@example.com |
798774b735a933fcd930f04736d0b1ee75b00dce | cfc7cdc4beb91f8d60087bf329b63be40322a2e6 | /PythonLessons spring 2021/Week2/task3.py | f33f1b29d368f1f8c6022ddd76b182ca01c3ff04 | [] | no_license | Kostimo/IT2SCHOOL | 0b2ea3a67055757cac1b0df4951469f615cb8cef | 1e1e534731537104639bc1898f30e544108592d3 | refs/heads/master | 2023-03-27T09:57:45.464318 | 2021-03-22T15:22:43 | 2021-03-22T15:22:43 | 347,176,059 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | A = [4, 8, 2, 4, 4, 6, 8, 2, 4, 2]
B = set(A)
print(len(B)) | [
"kostyamoroz50@gmail.com"
] | kostyamoroz50@gmail.com |
69caa4044054308bacd7f6d1b9489fc13d6ef90e | e7a1138cc3b55d20a99540091affb71e68a58e68 | /extension/path.py | a98690f48007c7eb83a3e5116328e35ac02df1a9 | [
"MIT"
] | permissive | ianpreston/oh-my-py | 30d707459a054b4d67aa4b90db25574f1886eb2a | 17e37974c203cb28aa2de340c6ac66143c16bd4e | refs/heads/master | 2020-05-14T14:41:50.441114 | 2014-09-28T22:53:30 | 2014-09-28T22:53:30 | 21,529,236 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | import os
import os.path
import sys
def parse_path(src=None, sep=None):
if src is None:
src = os.environ['PATH']
if sep is None:
sep = ':'
paths = src.split(sep)
paths = [r.strip() for r in paths if r.strip()]
return paths
def initialize_path():
# Get existing $PATH, if set
existing_paths = parse_path()
# Append /etc/paths
with open('/etc/paths', 'r') as f:
new_paths = parse_path(f.read(), '\n')
# Append the virtualenv that contains this IPython installation
venv_path = os.path.dirname(sys.executable)
roots = set([venv_path]) | set(existing_paths) | set(new_paths)
os.environ['PATH'] = ':'.join(roots)
| [
"ian@ian-preston.com"
] | ian@ian-preston.com |
a14d5cd44970e0246f08fd6d8f986f474f932f6d | f1998758024f38d957c107cce69af79377f77d75 | /game_functions.py | 7e3a1aa8b06bb5c551ed0dccde984f17a5094079 | [] | no_license | Selasi3/python-alien-invasion | 2e88f85e4eb2332c8ec46df7ff8fbbdb6a29eff0 | ec35610e3111fee8abb54edb85beb409f1faafc0 | refs/heads/master | 2020-07-24T23:00:15.617039 | 2019-09-12T15:09:31 | 2019-09-12T15:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,394 | py | import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, ship, bullets):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ship):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""Respond to keypresses and mouse events"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):
"""Start a new game when the player clicks play"""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
# Reset the game settings.
ai_settings.initialize_dynamic_settings()
# Hide the mouse cursor
pygame.mouse.set_visible(False)
# Reset the game statistics
stats.reset_stats()
stats.game_active = True
# Reset the scoreboard images.
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Empty rhe list of aliens and bullets.
aliens.empty()
bullets.empty()
# Create a new fleet an center the ship
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def fire_bullet(ai_settings, screen, ship, bullets):
"""Fire bullet, if limit not reached yet"""
# Create a new bullet, add to bullets group
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen, each pass through the loop
screen.fill(ai_settings.bg_color)
# Redraw all bullets, behind ship and aliens.
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Draw the score information
sb.show_score()
# Draw the play button if the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Update the position of bullets, and get rid of old bullets."""
# Update bullets positions.
bullets.update()
# Get rid of bullets that have disappeared
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
# print(len(bullets))
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Check for any bullets that have hit aliens
# If so, get rid of the bullet and the alien.
def check_high_score(stats, sb):
"""Check to see if there's a new high score"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Respond to bullet-alien collisions"""
# Remove any bullets and aliens that have collided
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destroy existing bullets and create new fleet.
# If the entire fleet is destroyed, start a new level
bullets.empty()
ai_settings.increase_speed()
# Increase level
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def check_fleet_edges(ai_settings, aliens):
"""Respond appropriately if any aliens have reached an edge"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Drop the entire fleet and change the fleet's direction"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Respond to ship being hit by alien."""
if stats.ships_left > 0:
# Decrement ships left.
stats.ships_left -= 1
# Update scoreboard
sb.prep_ships()
else:
stats.game_active = False
pygame.mouse.set_visible(True)
# Empty the list of aliens and bullets
aliens.empty()
bullets.empty()
# Create a new fleet and center the ship
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Pause
sleep(0.5)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Check if any aliens have reached the bottom of the screen"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Treat this the same as if the ship got hit.
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Check if the fleet is at an edge, then Update the positions of all aliens in the fleet"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Look for alien-ship collisions
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# look for aliens hitting rhe bottom of the screen.
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def get_number_aliens_x(ai_settings, alien_width):
"""Determine he number of aliens that fit in a row"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determine the number of rows of aliens that fit on the screen"""
available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
# Create an alien and place it in the row.
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""Create a full fleet of aliens."""
# Create an alien and find the number of aliens in a row
# Spacing between each alien is equal to one alien width
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
# Create the fleet of aliens
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
| [
"selasiayittah3@gmail.com"
] | selasiayittah3@gmail.com |
c434fba3c625036fd8b41f266e171b8f5064b297 | 0b07363704d2716ae556061f8e8df277b410fe5e | /fairseq/lmoutschedule/noiselinearschesule.py | c57df9d2d847958e2a3d15fbc981bae8d3b11edf | [
"BSD-3-Clause"
] | permissive | QPanAI/FM-SCA | b2ee4c3529497020d6dbc4abdaf61dc1ecec9f9d | 94bbcf2f9396b5e2d755d346427b5d94b95dd959 | refs/heads/master | 2022-03-03T18:19:32.117572 | 2019-10-28T10:19:32 | 2019-10-28T10:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | from torch import nn
import numpy as np
from fairseq import utils
from . import register_lmoutschedule
@register_lmoutschedule('noiselinear')
class NoiseLinearLmOutSchedule(object):
def __init__(self, args, encoder):
warmup_init_tradeoff = args.tradeoff
warmup_end_tradeoff = args.tradeoff
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_tradeoff - warmup_init_tradeoff) / args.tradeoff_step
self.warmup_init_tradeoff = warmup_init_tradeoff
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_tradeoff * args.tradeoff_step
self.tradeoff_step = args.tradeoff_step
# initial learning rate
self.tradeoff = warmup_init_tradeoff
self.encoder = encoder
self.sigma = args.sigma
self.tradeoff = float(np.clip(np.random.normal(self.tradeoff, self.sigma), 0., 1.))
self.set_tradeoff()
@staticmethod
def add_args(parser):
parser.add_argument('--tradeoff', type=float, default=1.)
parser.add_argument('--tradeoff-step', type=int, default=4000)
parser.add_argument('--sigma', type=float, default=0.3)
def set_tradeoff(self):
self.encoder.tradeoff = self.tradeoff
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor / num_updates
self.tradeoff = float(np.clip(np.random.normal(self.tradeoff, self.sigma), 0., 1.))
self.set_tradeoff()
return self.tradeoff | [
"1462540095@qq.com"
] | 1462540095@qq.com |
ae1e00e901dc13fc95045952bdaf3260dcd7229b | d348d72ea5fd31efbee1033a3c0b2af393e6b778 | /Chapter_10_RP/Exceptions.py | d7d3c93c76f4331ce6e0e93e48e091334137cdc4 | [] | no_license | Remi67/Python_crash_course | d0a1c1c72e09927b5b9e9ccc12a3af9874c8535f | 8ce848aa587c533863e1c84c2293a007d1b9339b | refs/heads/master | 2022-09-23T05:32:43.057188 | 2020-06-02T17:02:49 | 2020-06-02T17:02:49 | 268,809,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # Date: 26-May-2020
# Chapter 10, Exceptions section of the book 'Python crash course' second edition written by Eric MATTHES
print("Give me two numbers, and I'll divide them.")
print("Enter 'q' to quit.")
while True:
first_number = input("\nFirst number: ")
if first_number == 'q':
break
second_number = input("Second number: ")
if second_number == 'q':
break
try:
answer = int(first_number) / int(second_number)
except ZeroDivisionError:
print("You can't divide by 0!")
else:
print(answer) | [
"repaul.rp@gmail.com"
] | repaul.rp@gmail.com |
4885af77b093c461ba60b7083d05670bccd37183 | 67dde96764db112b0d84af97a7638d6558485692 | /libs/data/utils.py | 44941b9f3ed796be8cea17d1dd0b72e870b2ea40 | [
"MIT"
] | permissive | nicolasigor/sleep-stage | 049d8501eb915515b8c2daf27a9107ed9b9f9509 | 57606d1ae72c22f91bfc81df325ca8c21f0b8a5d | refs/heads/master | 2020-07-03T21:29:04.825152 | 2020-05-18T18:00:41 | 2020-05-18T18:00:41 | 202,055,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,155 | py | """utils.py: Module for general data eeg data operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import resample_poly, butter, filtfilt, firwin, lfilter
PATH_THIS_DIR = os.path.dirname(__file__)
PATH_DATA = os.path.join(PATH_THIS_DIR, '..', '..', 'datasets')
from libs.common import checks
def broad_filter(signal, fs, lowcut=0.1, highcut=35):
"""Returns filtered signal sampled at fs Hz, with a [lowcut, highcut] Hz
bandpass."""
# Generate butter bandpass of order 3.
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(3, [low, high], btype='band')
# Apply filter to the signal with zero-phase.
filtered_signal = filtfilt(b, a, signal)
return filtered_signal
def filter_fir(kernel, signal):
filtered_signal = lfilter(kernel, 1.0, signal)
n_shift = (kernel.size - 1) // 2
aligned = np.zeros(filtered_signal.shape)
aligned[:-n_shift] = filtered_signal[n_shift:]
return aligned
def filter_windowed_sinusoidal(
signal, fs, central_freq, ntaps,
sinusoidal_fn=np.cos, window_fn=np.hanning):
# Kernel design
kernel = get_kernel(
ntaps, central_freq, fs, window_fn, sinusoidal_fn)
# Apply kernel
filtered_signal = filter_fir(kernel, signal)
return filtered_signal
def get_kernel(ntaps, central_freq, fs=1, window_fn=np.hanning, sinusoidal_fn=np.cos):
# Kernel design
time_array = np.arange(ntaps) - ntaps // 2
time_array = time_array / fs
b_base = sinusoidal_fn(2 * np.pi * central_freq * time_array)
cos_base = np.cos(2 * np.pi * central_freq * time_array)
window = window_fn(b_base.size)
norm_factor = np.sum(window * (cos_base ** 2))
kernel = b_base * window / norm_factor
return kernel
def resample_signal(signal, fs_old, fs_new):
"""Returns resampled signal, from fs_old Hz to fs_new Hz."""
gcd_freqs = math.gcd(fs_new, fs_old)
up = int(fs_new / gcd_freqs)
down = int(fs_old / gcd_freqs)
signal = resample_poly(signal, up, down)
signal = np.array(signal, dtype=np.float32)
return signal
def resample_signal_linear(signal, fs_old, fs_new):
"""Returns resampled signal, from fs_old Hz to fs_new Hz.
This implementation uses simple linear interpolation to achieve this.
"""
t = np.cumsum(np.ones(len(signal)) / fs_old)
t_new = np.arange(t[0], t[-1], 1 / fs_new)
signal = interp1d(t, signal)(t_new)
return signal
def norm_clip_signal(signal, computed_std, clip_value=10):
norm_signal = signal / computed_std
# Now clip to clip_value (only if clip is not None)
if clip_value:
norm_signal = np.clip(norm_signal, -clip_value, clip_value)
return norm_signal
def power_spectrum(signal, fs):
"""Returns the single-sided power spectrum of the signal using FFT"""
n = signal.size
y = np.fft.fft(signal)
y = np.abs(y) / n
power = y[:n // 2]
power[1:-1] = 2 * power[1:-1]
freq = np.fft.fftfreq(n, d=1 / fs)
freq = freq[:n // 2]
return power, freq
def pages2seq(pages_data, pages_indices):
if pages_data.shape[0] != pages_indices.shape[0]:
raise ValueError('Shape mismatch. Inputs need the same number of rows.')
page_size = pages_data.shape[1]
max_page = np.max(pages_indices)
max_size = (max_page + 1) * page_size
global_sequence = np.zeros(max_size, dtype=pages_data.dtype)
for i, page in enumerate(pages_indices):
sample_start = page * page_size
sample_end = (page + 1) * page_size
global_sequence[sample_start:sample_end] = pages_data[i, :]
return global_sequence
def extract_pages(sequence, pages_indices, page_size, border_size=0):
"""Extracts and returns the given set of pages from the sequence.
Args:
sequence: (1-D Array) sequence from where to extract data.
pages_indices: (1-D Array) array of indices of pages to be extracted.
page_size: (int) number in samples of each page.
border_size: (Optional, int,, defaults to 0) number of samples to be
added at each border.
Returns:
pages_data: (2-D Array) array of shape [n_pages,page_size+2*border_size]
that contains the extracted data.
"""
pages_list = []
for page in pages_indices:
sample_start = page * page_size - border_size
sample_end = (page + 1) * page_size + border_size
page_signal = sequence[sample_start:sample_end]
pages_list.append(page_signal)
pages_data = np.stack(pages_list, axis=0)
return pages_data
def simple_split_with_list(x, y, train_fraction=0.8, seed=None):
"""Splits data stored in a list.
The data x and y are list of arrays with shape [batch, ...].
These are split in two sets randomly using train_fraction over the number of
element of the list. Then these sets are returned with
the arrays concatenated along the first dimension
"""
n_subjects = len(x)
n_train = int(n_subjects * train_fraction)
print('Split: Total %d -- Training %d' % (n_subjects, n_train))
random_idx = np.random.RandomState(seed=seed).permutation(n_subjects)
train_idx = random_idx[:n_train]
test_idx = random_idx[n_train:]
x_train = np.concatenate([x[i] for i in train_idx], axis=0)
y_train = np.concatenate([y[i] for i in train_idx], axis=0)
x_test = np.concatenate([x[i] for i in test_idx], axis=0)
y_test = np.concatenate([y[i] for i in test_idx], axis=0)
return x_train, y_train, x_test, y_test
def split_ids_list_v2(subject_ids, split_id, train_fraction=0.75, verbose=False):
n_subjects = len(subject_ids)
n_train = int(n_subjects * train_fraction)
if verbose:
print('Split IDs: Total %d -- Training %d' % (n_subjects, n_train))
n_val = n_subjects - n_train
start_idx = split_id * n_val
epoch = int(start_idx / n_subjects)
random_idx_1 = np.random.RandomState(seed=epoch).permutation(n_subjects)
random_idx_2 = np.random.RandomState(seed=epoch+1).permutation(n_subjects)
random_idx = np.concatenate([random_idx_1, random_idx_2])
start_idx_relative = start_idx % n_subjects
val_idx = random_idx[start_idx_relative:(start_idx_relative + n_val)]
val_ids = [subject_ids[i] for i in val_idx]
train_ids = [sub_id for sub_id in subject_ids if sub_id not in val_ids]
val_ids.sort()
train_ids.sort()
return train_ids, val_ids
def shuffle_data(x, y, seed=None):
"""Shuffles data assuming that they are numpy arrays."""
n_examples = x.shape[0]
random_idx = np.random.RandomState(seed=seed).permutation(n_examples)
x = x[random_idx]
y = y[random_idx]
return x, y
def shuffle_data_with_ids(x, y, sub_ids, seed=None):
"""Shuffles data assuming that they are numpy arrays."""
n_examples = x.shape[0]
random_idx = np.random.RandomState(seed=seed).permutation(n_examples)
x = x[random_idx]
y = y[random_idx]
sub_ids = sub_ids[random_idx]
return x, y, sub_ids
| [
"nicolasigor.tapia@gmail.com"
] | nicolasigor.tapia@gmail.com |
d1961e74a2e79af96908d797e62f8c02b98f3feb | 6e68ef0a53ce48da79b4906d85fc9785deee4ca5 | /Reverse/urls.py | 393afb2306b734c2dd1c0ad59846b0a9bf76a76c | [] | no_license | shubhamkharose/CODEDAEMON | e3ed8050b5c43ec146c6d253d06121fc37cdb2d4 | 6df7af35c51f5f54b2e2167e3d64d163c9a688f9 | refs/heads/master | 2021-04-06T00:58:01.515828 | 2018-03-15T11:04:31 | 2018-03-15T11:04:31 | 125,353,062 | 1 | 4 | null | 2019-10-28T04:03:58 | 2018-03-15T10:48:53 | JavaScript | UTF-8 | Python | false | false | 930 | py | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
app_name = 'Reverse'
'''
app_name is added bcoz name of
'''
urlpatterns = [
url (r'^check/(?P<problem_name>[0-9A-Za-z_]+)/$',views.check,name='check'),
url (r'^(?P<problem_name>[0-9A-Za-z_]+)/$',views.index,name='index'),
]
| [
"vvt5676@gmail.com"
] | vvt5676@gmail.com |
8b49f0303eb63447307ce0a09911b9db97d5bb4b | 2cf28f4992ce99513a95ab6fff35fa1d25f5f1fa | /App/mapfiles.py | 2a0184ee047300b099f2a6eb4ebfdb21fdd98d55 | [] | no_license | snehesht/GitIgnoreApi | 3e490d0dbb7f3ec888c4dcbc47d07d3f4481979e | 2bcd2e1cc30a37f0e7de7f2d5ea31b5fb2d2572b | refs/heads/master | 2021-01-01T05:05:42.799266 | 2016-05-27T07:11:37 | 2016-05-27T07:11:37 | 59,783,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | import os
import re
# Global Config
DATA_DIR = "gitignore"
FILE_DOT_GITIGNORE_REGEX = "^.*\.gitignore$"
"""
Class to access data
"""
class DataStore(object):
"""docstring for DataStore"""
def __init__(self):
super(DataStore, self).__init__()
self.data = {}
self.load_files()
"""
Load files from data_dir with markdown format and return a map with key as filename and value as contents
"""
def load_files(self):
print("loading files ...")
data = {}
files_in_dir = os.listdir(DATA_DIR)
for f in files_in_dir:
# safegaurd to load only *.md files
if re.match(FILE_DOT_GITIGNORE_REGEX,f) is not None:
# Read each file and save it's content in a dict with key as filename and value as content
with open(DATA_DIR+"/"+f) as fp:
key = f.replace(".gitignore","")
value = fp.read()
data[key.lower()] = value
self.data = data
if self.data == data:
# :SUCCESS
return True
else:
# :FAIL
return False
# Function to access data, this is usually called
def get_data(self):
return self.data
# Reload data from DATA_DIR, usually called when changes are made
def reload(self):
try:
self.load_files()
except:
print("Data reloading failed") | [
"mail@snehesh.me"
] | mail@snehesh.me |
67175736189e77eb4d95c43ea91bc66748416e04 | 8a55b9000920b75f937073c043249090c13b04b1 | /mlcomp/utils/config.py | b036f3030ec955ff17b4b4b841ebe710cec54587 | [
"MIT"
] | permissive | jingmouren/mlcomp | 209f43296325387447549d1d206ffaeab5739d8e | 3fd251429be3892903ab6b3361bcd69c6ea9eeff | refs/heads/master | 2020-07-10T04:31:26.928425 | 2019-08-22T10:07:07 | 2019-08-22T10:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | from collections import defaultdict
from typing import List
import os
import json
import albumentations as A
from mlcomp import DATA_FOLDER
from mlcomp.utils.io import yaml_load
from mlcomp.utils.misc import dict_flatten, dict_unflatten
class Config(dict):
@property
def data_folder(self):
return os.path.join(DATA_FOLDER, self['info']['project'])
@staticmethod
def from_json(config: str):
return Config(json.loads(config))
@staticmethod
def from_yaml(config: str):
return Config(yaml_load(config))
def merge_dicts_smart(target: dict, source: dict, sep='/'):
target_flatten = dict_flatten(target)
mapping = defaultdict(list)
for k, v in target_flatten.items():
parts = k.split(sep)
for i in range(len(parts) - 1, -1, -1):
key = sep.join(parts[i:])
mapping[key].append(k)
for k, v in source.items():
assert len(mapping[k]) == 1, f'ambiguous mapping for {k}'
key = mapping[k][0]
target_flatten[key] = v
return dict_unflatten(target_flatten)
def parse_albu(configs: List[dict]):
res = []
for config in configs:
assert 'name' in config, f'name is required in {config}'
config = config.copy()
name = config.pop('name')
if name == 'Compose':
items = config.pop('items')
aug = A.Compose(parse_albu(items), **config)
else:
aug = getattr(A, name)(**config)
res.append(aug)
return res
__all__ = ['Config', 'merge_dicts_smart', 'parse_albu']
| [
"lightsanweb@yandex.ru"
] | lightsanweb@yandex.ru |
b75bd97af0d87c71caf404ca4aed646d76e18dca | 2ef27655cd1deb9de4074249e559269abd334fa1 | /6 kyu/Decipher Student Messages.py | cbe021ef697c6a0afe2e18953a1c584352271249 | [] | no_license | sieczkah/Codewars_KATA | c7606b9a88693e2550af0ef55808f34c00e77b73 | 68d5d4a133a015e49bcdbff29ee45e3baefcd652 | refs/heads/main | 2023-05-06T03:59:01.403765 | 2021-05-24T19:36:34 | 2021-05-24T19:36:34 | 334,698,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """https://www.codewars.com/kata/5a1a144f8ba914bbe800003f/train/python"""
def decipher_message(message):
lng = int(len(message) ** 0.5) # the coding square is always perfect so we need to know the lenght
words = [message[i::lng] for i in range(lng)] # in 5x5 we take every 5th letter in 6x6 we take every 6th...
return ''.join(words)
| [
"huberts94@gmail.com"
] | huberts94@gmail.com |
8d1442359d544abbbf4aa502b5471a2cf2dcfd37 | f1de34627d3ad20a9f128fcbfa57933631d1a779 | /compressor/topk.py | ed41493ac8af212cc6de7f95540c80bef3a7e3a9 | [] | no_license | ducviet00/DDL-Compression-Benchmark | 59d8e7c9ba001c5944a771d49e12f77fc54a7a41 | 867a1d902652b598f2cf9e99a483bb0d86eaf1e0 | refs/heads/main | 2023-08-14T21:53:23.154087 | 2021-10-07T05:58:25 | 2021-10-07T05:58:25 | 340,362,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import torch
import numpy as np
import time
import math
from horovod.torch.mpi_ops import rank
class TopKCompressor():
"""
Sparse Communication for Distributed Gradient Descent, Alham Fikri Aji et al., 2017
"""
residuals = None
name = 'topk'
@staticmethod
def compress(tensor, name=None, sigma_scale=2.5, ratio=0.05):
with torch.no_grad():
if TopKCompressor.residuals is None:
TopKCompressor.residuals = torch.zeros_like(tensor.data)
numel = tensor.numel()
k = max(int(numel * ratio), 1)
values, indexes = torch.topk(torch.abs(tensor.data), k=k)
values = tensor.data[indexes]
tensor.data.add_(TopKCompressor.residuals.data)
TopKCompressor.residuals.data = tensor.data + 0.0
TopKCompressor.residuals.data[indexes] = 0.
return tensor, indexes, values
@staticmethod
def decompress(tensor, ctc, name=None):
z = tensor
return z | [
"acc13085dy@g0301.abci.local"
] | acc13085dy@g0301.abci.local |
a27689c8daded8fd56ec85aa46fb1ca90090cb4d | 8cfe17ff768050f385fea6194a0efbe2ae7fbae2 | /watch.py | 8ae13c92c4dd0fe249af13031c205e68f5b0feff | [] | no_license | utopfish/zhiwang_spider | c73a4846e241b38f15aeea52d096d9a74f127662 | 80eebf3947d3ee7c00c3cb0e1b9406aa054f17ce | refs/heads/master | 2020-08-03T14:16:05.628021 | 2019-10-16T12:36:37 | 2019-10-16T12:36:37 | 211,782,092 | 0 | 1 | null | 2019-10-10T05:46:49 | 2019-09-30T05:29:59 | Python | UTF-8 | Python | false | false | 862 | py | # -*- coding=utf-8 -*-
#@author:liuAmon
#@contact:utopfish@163.com
#@file:watch.py
#@time: 2019/10/16 19:15
import os
import time
import subprocess
from config import cf
def restart_process(process_name):
red = subprocess.Popen('tasklist', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
tasklist_str = red.stdout.read().decode(encoding='gbk')
re_path = process_name.split("\\")[-1]
if tasklist_str.index(re_path)==tasklist_str.rindex(re_path):
os.system("{}/Scripts/activate.bat && python {}/spider_main.py".format(cf['venv'],cf['rootdir']))
else:
print( '第' + str(count) + '次检测正在运行中')
print("程序正常运行")
pass
# global error_count
# error_count += 1
global count
count=0
while True:
time.sleep(300)
count+=1
restart_process("python.exe")
| [
"utopfish@163.com"
] | utopfish@163.com |
b7444d0126de1f93525cc42cc2acb4ee3834d27a | f433796623ecbfab87e33af22588b30c9ecf6bae | /src/rex.py | 408527864c9415c04639eb04e71b7b699ac4ebca | [] | no_license | Snake4life/bot | 3ca2d253a58c875b16776d43b96e19a95b28946b | 243949f858fc2c69f7ac78aa990aa983bc228d64 | refs/heads/master | 2021-09-01T00:44:35.371461 | 2017-12-23T22:33:23 | 2017-12-23T22:33:23 | 115,833,472 | 0 | 1 | null | 2017-12-31T00:35:03 | 2017-12-31T00:35:02 | null | UTF-8 | Python | false | false | 2,068 | py | #!/usr/bin/python
"""
the rex package is a bittrex exchange adapter.
"""
import bittrex
from operator import itemgetter
from helpers import find
from config import env
Rex = bittrex.Bittrex(api_key="", api_secret="")
# blacklist is where coins who have too much non crypto currency meaning go. sorry :(
blacklist = ["GLD", "1ST", "2GIVE", "EMC2"]
def get_cream(list_of_things):
""" get_cream gets the top 40% of the pack, no dregs please. """
return int(len(list_of_things) * 0.2)
def get_market_summaries():
"""
get_market_summaries gets the top 40% highest volume market summaries for
btc, eth and usdt based markets
TODO: how can we automate the btc/eth/usdt lists into automated list generation based on the split[0] for the MarketName?
"""
summaries = Rex.get_market_summaries()["result"]
currencies = Rex.get_currencies()["result"]
btc_summaries = []
eth_summaries = []
usdt_summaries = []
for summary in reversed(sorted(summaries, key=itemgetter("Volume"))):
market = summary["MarketName"].split("-")[0]
coin = summary["MarketName"].split("-")[1]
entry = {}
entry["symbol"] = coin
coin_info = find(currencies, "Currency", coin)
if coin_info:
entry["name"] = coin_info["CurrencyLong"].lower()
if market == "BTC":
btc_summaries.append(entry)
if market == "ETH":
eth_summaries.append(entry)
if market == "USDT":
usdt_summaries.append(entry)
summaries = btc_summaries[:get_cream(btc_summaries)] + eth_summaries[:get_cream(
eth_summaries)] + usdt_summaries[:get_cream(usdt_summaries)]
# get rid of blacklist terms
for blacklisted in blacklist:
for summary in summaries:
if summary["symbol"] == blacklisted:
summaries.remove(summary)
# ensure no duplicates
final = []
for i in summaries:
if i not in final:
final.append(i)
if env == "test":
return final[:5]
return final
| [
"au.witherow@gmail.com"
] | au.witherow@gmail.com |
1c8c4210566ae5cbe696295f4518604afcffb009 | ee9a916c6f92ae9150a19500201b1e96ce131cfd | /blog/blogs/models.py | 315c05dc21c29b089f3da407d189797cd60a2136 | [] | no_license | annaFalse/falsy_project | 8cb5f61b5005b7b76b9779173bcda35b86c882e0 | e1622ae830feeb93ffbed23fe9c6ca580db63d52 | refs/heads/master | 2020-07-14T11:14:54.690886 | 2016-08-23T22:39:56 | 2016-08-23T22:39:56 | 66,401,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from django.db import models
class Blogs(models.Model):
blog_name= models.CharField(max_length=200)
blog_date= models.DateTimeField('date published')
blog_text= models.CharField(max_length=4000)
def __str__(self):
return self.blog_name
class Meta:
ordering = ('blog_name',)
class Tags(models.Model):
tag = models.CharField(max_length=100)
publications = models.ManyToManyField(Blogs)
def __str__(self):
return self.tag
class Meta:
ordering = ('tag',)
| [
"anna.latysheva13@gmail.com"
] | anna.latysheva13@gmail.com |
8671442e02edca3e4f06a90d331ab36482f9e09c | c5229c07c3917def7d4736f66284f5747702143e | /impossible_creatures/api/views/gameplay.py | 5a5965fd69390639da1797cc970926945687b7a2 | [] | no_license | Balthazar-Perrin/impossible_creatures | 7197237421057c53d4da3470fe441e15a7164b42 | 9af78a95b6d88b6650d984e075ede419b2a2effb | refs/heads/master | 2023-04-20T07:11:06.739294 | 2020-10-13T13:44:51 | 2020-10-13T13:44:51 | 366,512,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,106 | py | from rest_framework import viewsets
from ..serializers import UserSerializer, SpeciesSerializer, AnimalSerializer, TransactionSerializer
from ..models import User, Species, Animal, Transaction
from rest_framework.parsers import JSONParser
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
import json
from rest_framework import status
from rest_framework.response import Response
class Fusion(viewsets.ViewSet):
def create(self, request, *args, **kwargs):
print(request.data)
animalPar1 = Animal.objects.get(id=request.data['animal1_id'])
speciesParent1 = Species.objects.get(id=AnimalSerializer(animalPar1).data.get('species_id'))
animalPar2 = Animal.objects.get(id=request.data['animal2_id'])
speciesParent2 = Species.objects.get(id=AnimalSerializer(animalPar2).data.get('species_id'))
owner = request.user
try:
newSpecies = Species.objects.get(parent1_id=AnimalSerializer(animalPar1).data.get('species_id'), parent2_id=AnimalSerializer(animalPar2).data.get('species_id'))
newDict = {}
newDict['name'] = request.data['nameAnimal']
newDict['species_id'] = SpeciesSerializer(newSpecies).data.get('id')
newDict['owner_id'] = UserSerializer(owner).data.get('id')
animalSerializer = AnimalSerializer(data=newDict)
if animalSerializer.is_valid():
animalSerializer.save()
newDict = {}
newDict['money'] = UserSerializer(owner).data.get('money') + 250
userSerializer = UserSerializer(owner, data=newDict)
if userSerializer.is_valid():
userSerializer.save()
deletePar1 = animalPar1.delete()
deletePar2 = animalPar2.delete()
return Response("A new animal has been created.", status=status.HTTP_201_CREATED)
except ObjectDoesNotExist :
speciesSerializer = SpeciesSerializer(data=request.data)
if speciesSerializer.is_valid():
speciesSerializer.save(name=request.data['name'], parent1_id=speciesParent1, parent2_id=speciesParent2)
newDict = {}
newDict['name'] = request.data['nameAnimal']
newDict['species_id'] = speciesSerializer.data['id']
animalSerializer = AnimalSerializer(data=newDict)
if animalSerializer.is_valid():
animalSerializer.save()
newDict = {}
newDict['money'] = UserSerializer(owner).data.get('money') + 500
userSerializer = UserSerializer(owner, data=newDict)
if userSerializer.is_valid():
userSerializer.save()
deletePar1 = animalPar1.delete()
deletePar2 = animalPar2.delete()
return Response("You discovered a new Species!", status=status.HTTP_201_CREATED)
return Response("Error.", status=status.HTTP_400_BAD_REQUEST)
def up(q0, ser, parents):
parent1=ser(q0).data.get('parent1_id')
parent2=ser(q0).data.get('parent2_id')
if parent1 is not None:
q1=Species.objects.get(id=parent1)
if (ser(q1).data.get('parent1_id') == None and ser(q1).data.get('parent2_id') == None):
parents.append("[base]"+ser(q1).data.get('name'))
else:
parents.append(ser(q1).data.get('name'))
up(q1, ser, parents)
if parent2 is not None:
q2=Species.objects.get(id=parent2)
if (ser(q2).data.get('parent1_id') == None and ser(q2).data.get('parent2_id') == None):
parents.append("[base]"+ser(q2).data.get('name'))
else:
parents.append(ser(q2).data.get('name'))
up(q2, ser, parents)
return parents
class GetParents(viewsets.ViewSet):
def retrieve(self, request, pk):
ser = SpeciesSerializer
q0=Animal.objects.get(id=pk)
speciesId = AnimalSerializer(q0).data.get('species_id')
q0=Species.objects.get(id=speciesId)
parents = []
return HttpResponse(json.dumps(up(q0, ser, parents)))
class GetPercents(viewsets.ViewSet):
def retrieve(self, request, pk):
ser = SpeciesSerializer
q0=Species.objects.get(id=pk)
temp = []
baseparents = []
parents = up(q0, ser, temp)
for parent in parents:
if parent.startswith('[base]'):
baseparents.append(parent[6:])
percents = dict((i, str(baseparents.count(i)*100/len(baseparents))+"%") for i in baseparents)
return HttpResponse(json.dumps(percents))
class Sell(viewsets.ViewSet):
def create(self, request, *args, **kwargs):
print(request)
try :
print('oui')
animal = Animal.objects.get(id=request.data['animal_id'], owner_id=request.data['user_id'])
print('anmal')
newDict = {}
newDict['price'] = request.data['price']
newDict['animal_id'] = request.data['animal_id']
newDict['seller_id'] = request.data['user_id']
ser = TransactionSerializer(data=newDict)
if ser.is_valid():
ser.save()
return Response("Success", status=status.HTTP_201_CREATED)
except ObjectDoesNotExist:
return Response("You do not own this animal", status=status.HTTP_400_BAD_REQUEST) | [
"chou_c@etna-alternance.net"
] | chou_c@etna-alternance.net |
b6de1b390044bb2cb3f192890dfbe90b5ed0158d | 7bb5ed00771dc28670301d1637fa6f8a1d6a8ecc | /deprecated/migrations/versions/3f0e2e913bf5_.py | 6dcdc17ab8f80f37dd073e3bf961234c7a450990 | [
"MIT"
] | permissive | andypoorman/honbot-server | 60bce3d3cf752848039ffec670fca2d130aebc4f | 271594f5ea011a9f2f0762c3bfcca7e33a7b8b3a | refs/heads/master | 2021-01-15T09:32:43.646270 | 2015-07-17T03:50:25 | 2015-07-17T03:50:25 | 39,233,626 | 0 | 0 | null | 2015-07-17T03:46:39 | 2015-07-17T03:46:39 | null | UTF-8 | Python | false | false | 506 | py | """empty message
Revision ID: 3f0e2e913bf5
Revises: 544fa414515c
Create Date: 2015-04-12 16:18:11.397154
"""
# revision identifiers, used by Alembic.
revision = '3f0e2e913bf5'
down_revision = '544fa414515c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| [
"scttcper@gmail.com"
] | scttcper@gmail.com |
164de9d1ead4a87cda123b834cceb0a1e399ec7b | c8efdfc8ae26226ea2273d6d2bb875c3d32f0d6d | /PyQtArduinoSerialDemo/ArduinoSerial_PyQt_Demo_BindingUI_TRYING.py | 8f8b6b2dbf1564cdb4407e7359ac252e90fa2e30 | [
"MIT"
] | permissive | zqy950124/ArduinoPython | 945aee8fbfcf2d84ace8c77beef09eebdd57c279 | 60b4f073d1150af648114ab9fe5bda04bf20e9bd | refs/heads/dev | 2021-01-14T10:48:11.739157 | 2016-03-16T09:25:25 | 2016-03-16T09:25:25 | 54,184,697 | 1 | 0 | null | 2016-03-18T08:09:46 | 2016-03-18T08:09:46 | null | UTF-8 | Python | false | false | 1,533 | py | # -*- coding: utf-8 -*-
"""
http://stackoverflow.com/questions/1904351/python-observer-pattern-examples-tips
binding-a-pyqt-pyside-widget-to-a-local-variable-in-python
http://stackoverflow.com/questions/21992849/binding-a-pyqt-pyside-widget-to-a-local-variable-in-python#
"""
import functools
def event(func):
"""Makes a method notify registered observers"""
def modified(obj, *arg, **kw):
func(obj, *arg, **kw)
obj._Observed__fireCallbacks(func.__name__, *arg, **kw)
functools.update_wrapper(modified, func)
return modified
class Observed(object):
"""Subclass me to respond to event decorated methods"""
def __init__(self):
self.__observers = {} # Method name -> observers
def addObserver(self, methodName, observer):
s = self.__observers.setdefault(methodName, set())
s.add(observer)
def __fireCallbacks(self, methodName, *arg, **kw):
if methodName in self.__observers:
for o in self.__observers[methodName]:
o(*arg, **kw)
class receivedmsg(Observed):
def __init__(self):
Observed.__init__(self)
@event
def receivedmsgok(self, data):
print("Something happened with %s" % (data,))
def myCallback(data):
print("callback fired with %s" % (data,))
f = receivedmsg()
f.addObserver('receivedmsgok', myCallback)
f.receivedmsgok('Hello, World')
| [
"hydro@Hydro"
] | hydro@Hydro |
0bde0292dcb9081f83d3a8ec338a6a3656271aed | 669910b7e04eed4bb0a7f175415dcc870cc864f5 | /bot/twttrbot/management/commands/like_tweet.py | d2e20977488efd66d9a44a00a5b4b3db91eea63b | [
"MIT"
] | permissive | frederikblais/Djanger | b9b72c201204d07f4521d76d31e9fa396e4a8ca1 | 1231c0572cecb9645c25523c1219acd6295c45be | refs/heads/main | 2023-08-23T07:10:54.994820 | 2021-10-26T16:20:02 | 2021-10-26T16:20:02 | 409,977,801 | 1 | 0 | MIT | 2021-10-05T15:31:02 | 2021-09-24T13:41:37 | CSS | UTF-8 | Python | false | false | 1,938 | py | from tweepy import api
from tweepy.streaming import StreamListener
from twttrbot.models import TweetLookUpBadWord, TweetLookUpWord, TweetLookUpCoordinates
from django.core.management.base import BaseCommand
from twttrbot.utils import get_auth_api
import tweepy
class MyStreamListner(tweepy.StreamListener):
def __init__(self):
self.bad_words = list(TweetLookUpBadWord.objects.values_list('keyword', flat = True))
super(MyStreamListner, self).__init__()
def on_connect(self):
print('Connected to Twitter API.')
def on_status(self, status):
tweet_id = status.id
if status.truncated:
tweet_text = status.extended_tweet['full_text']
else:
tweet_text = status.text
if not hasattr(status, 'retweeted_status'):
for bad_words in self.bad_words:
if bad_words in tweet_text:
break
else:
api = get_auth_api()
resp = api.create_favorite(tweet_id)
print('Liked tweet: ',tweet_id)
def on_error(self, status_code):
if status_code == 420:
return False
class Command(BaseCommand):
def handle(self, *args, **kwargs):
try:
filter_words = TweetLookUpWord.objects.values_list('keyword', flat = True)
filter_words = ', '.join(filter_words)
filter_location = TweetLookUpCoordinates.objects.values_list('value', flat = True)
filter_location = [float(cor) for loc in filter_location for cor in loc.split(',')]
api = get_auth_api()
stream_listner = MyStreamListner()
stream = tweepy.Stream(auth = api.auth, listener = stream_listner, tweet_mode = 'extended')
stream.filter(track=[filter_words], locations=filter_location)
except Exception as e:
print(e)
def Run():
print('Executed') | [
"frederik.blais22@gmail.com"
] | frederik.blais22@gmail.com |
f6a16e80cbc7755ea169fd92bc4e1faa9c3b2856 | 6aff55d0d6c140770b5fe9d8d9e7b785b9b303a4 | /src/testes_inic1-2.py | dde6f1d4cafd07affd811a78149a6d472dcc3f3a | [] | no_license | cariodaporr/devops-aula07 | f0a2ed6337be3dcc8c852389283007e9e0def73f | 1cc80aed9494cdeee63e1759f3504daef042622b | refs/heads/master | 2020-03-29T19:08:08.833849 | 2018-09-28T12:52:16 | 2018-09-28T12:52:16 | 149,825,588 | 0 | 0 | null | 2018-09-22T00:44:23 | 2018-09-21T22:39:11 | null | UTF-8 | Python | false | false | 417 | py | import jogovelha
import sys
erroInicializar = False
jogovelha.inicializar()
jogo = jogovelha.tabuleiro()
if len(jogo) != 3:
erroInicializar = True
else:
for linha in jogo:
if len(linha) != 3:
erroInicializar = True
else:
for elemento in linha:
if elemento != '.':
erroInicializar = True
if erroInicializar:
print('Erro!')
sys.exit(1)
else:
sys.exit(0)
| [
"noreply@github.com"
] | cariodaporr.noreply@github.com |
959ee0746b95977a7b889b6b12e476719844568f | 7516dfcd3d2e012d98fa3aec45aafe0e2c64ffe1 | /py/utest/utest_fsoci.py | d3c0f7ac8809b0c7282c29600f364a91671f08a5 | [] | no_license | ReiMatsuzaki/naewdy2 | 64e1c06a7eca228811c83e49eed57c9502ba1c2e | 10f0110417b6d2699a688c64cdf39df0ef6d06c2 | refs/heads/master | 2021-03-16T10:12:02.856923 | 2018-03-15T03:30:00 | 2018-03-15T03:30:00 | 115,374,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | import unittest
from numpy import sqrt
from naewdy2.fsoci import *
class TestFsoci(unittest.TestCase):
def test_sign_ai(self):
self.assertAlmostEqual(0, sign_ai([1,2,3], 4))
self.assertAlmostEqual(1, sign_ai([1,2,3], 3))
self.assertAlmostEqual(-1, sign_ai([1,2,3], 2))
self.assertAlmostEqual(1, sign_ai([1,2,3], 1))
def test_aiaj(self):
self.assertAlmostEqual(1, aiaj([1,2,3], 1, 1, [1,2,3]))
self.assertAlmostEqual(0, aiaj([1,2,3], 4, 1, [1,2,3]))
self.assertAlmostEqual(1, aiaj([1,2,4], 4, 3, [1,2,3]))
self.assertAlmostEqual(-1, aiaj([1,3,4], 4, 2, [1,2,3]))
def test_eij(self):
self.assertAlmostEqual(sqrt(2.0),
eij([1,2,3], [1,2,3],
1, 1,
[1,2,3], [1,2,3]))
if __name__ == '__main__':
unittest.main()
| [
"matsuzaki.rei@gmail.com"
] | matsuzaki.rei@gmail.com |
3f9f318ca5771f723ad8e961f4dca57707bd4ba7 | 1c0ec1e6e8cf526ada26b23de181dc0a7a942c23 | /code/utils/tests/test_vol_std.py | 5236002231fba569f02806bf4e78d793f4655ed9 | [] | no_license | berkeley-stat159/project-zeta | 239b6f4548cf662d352c5361f92870aed45a78a5 | 7c35423fbc1407751e1aea6aac99d5d02a82dfdc | refs/heads/master | 2021-01-10T14:07:05.705790 | 2015-12-17T18:42:28 | 2015-12-17T18:42:28 | 43,347,788 | 0 | 19 | null | 2015-12-13T21:31:31 | 2015-09-29T05:52:04 | Jupyter Notebook | UTF-8 | Python | false | false | 655 | py | """ Tests for vol_std function in diagnostics module
Run with:
nosetests test_vol_std.py
"""
import numpy as np
from .. import diagnostics
from numpy.testing import assert_almost_equal, assert_array_equal
def test_vol_std():
# We make a fake 4D image
shape_3d = (2, 3, 4)
V = np.prod(shape_3d)
T = 10 # The number of 3D volumes
# Make a 2D array that we will reshape to 4D
arr_2d = np.random.normal(size=(V, T))
expected_stds = np.std(arr_2d, axis=0)
# Reshape to 4D
arr_4d = np.reshape(arr_2d, shape_3d + (T,))
actual_stds = diagnostics.vol_std(arr_4d)
assert_almost_equal(expected_stds, actual_stds)
| [
"tcchenbtx@gmail.com"
] | tcchenbtx@gmail.com |
c6887a8ffe4aaa0df0666ed9ab5b8c601c225868 | 941ace80571b53f53ab4e1f44d7b3ee9300e6a84 | /chapter02/lxml_example.py | 3bf1a23c01ccdd4989a11da724357915b61829e3 | [
"MIT"
] | permissive | qigezai/python-scrap | 81d3855caba095cab36f204a6b74c55f43cb7f15 | 3a9ad37a94008a8071b84e64d90c46f59580cca0 | refs/heads/master | 2021-10-10T06:26:18.023662 | 2019-01-07T14:46:19 | 2019-01-07T14:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/5 下午4:50
import urllib2
import lxml.html
def scrape(html):
tree = lxml.html.fromstring(html)
td = tree.cssselect('tr#places_neighbours__row > td.w2p_fw')[0]
area = td.text_content()
return area
if __name__ == '__main__':
html = urllib2.urlopen('http://example.webscraping.com/view/United-Kingdom-239').read()
print scrape(html) | [
"17346503142@163.com"
] | 17346503142@163.com |
10c3a4d5e3d2f35da492858f8236fd8081029116 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/R/robertionita/bookyfy.py | 9069f400e7f3e52096364d8732181bcdb8bb1ad9 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | # bestsellers from the kindle book store
import scraperwiki
import lxml.html
import time
import re
for x in range(1,6):
html = scraperwiki.scrape("http://www.amazon.com/Best-Sellers-Kindle-Store-eBooks/zgbs/digital-text/154606011/ref=zg_bs_154606011_pg_" + str(x) +
"?_encoding=UTF8&pg=" + str(x))
root = lxml.html.fromstring(html)
pos = 0
for el in root.cssselect("div.zg_itemImmersion"):
title = el.cssselect("div.zg_title a")[0].text_content()
link = el.cssselect("div.zg_title a")[0].attrib['href'].rstrip('\n') # Strip newline characters, funky shit happens if you don't
#rank = el.cssselect("span.zg_rankNumber")[0].text_content()
price = el.cssselect("strong.price")[0].text_content()
#release = el.cssselect("div.zg_releaseDate")[0].text_content()
author = el.cssselect("div.zg_byline")[0].text_content()
days_in_list = el.cssselect("td.zg_daysInList")[0].text_content()
pos += 1
booklink = scraperwiki.scrape(link)
bookpage = lxml.html.fromstring(booklink)
def get_rank(bookpage):
## For each book detail page, select the body element for scraping wizardy
for el in bookpage.cssselect("body"):
## Scraping rank
rank = el.cssselect("li#SalesRank b")[0].tail
## Extract rank number from book page using regex
re1='.*?' # Non-greedy match on filler
re2='(\\d+)' # Integer Number 1
rg = re.compile(re1+re2,re.IGNORECASE|re.DOTALL)
m = rg.search(rank)
if m:
rank=m.group(1)
#print "("+int1+")"+"\n"
print "Rank of book:"
print rank
#print lxml.html.tostring(rank)
return rank
rank = get_rank(bookpage)
print rank
record = {"Title" : title,
"Author" : author,
"Link" : link,
"Ranking" : get_rank(bookpage),
"Price" : price,
"sdate" : time.strftime( "%Y-%m-%d" )
}
scraperwiki.sqlite.save(unique_keys=["sdate"], data=record) | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
dbddf5f34bf33ff7cb4facd928b2c338fa2e36bc | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j20204+0704/sdB_GALEX_J20204+0704_lc.py | 9e2b035b6d7b472d8473d414942cee17af805004 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[305.1135,7.070683], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J20204+0704 /sdB_GALEX_J20204+0704_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
340bb41906ca49e24132a54983fe02c5301b97dd | cb06f12739986599739eaa6b7f4c711d1a596540 | /bureau/middlewares.py | 9fe5230968f94662338de3e2a4fb6a469370fccc | [] | no_license | rusrom/scrapy_5_spiders_in_1_project | 741d1e4bb5b8c9ba8f5e7bc8a7c1ecf572a55ad7 | b831e36017a8c66a75cbce6f65a31cdcb35752f3 | refs/heads/master | 2020-04-10T18:01:48.197440 | 2018-12-10T18:22:25 | 2018-12-10T18:22:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BureauSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BureauDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"rusrom@gmail.com"
] | rusrom@gmail.com |
09df6e58f039ee59279116190dc1f7b1dd20fdb3 | 6fa9b02729c8e9dac82d541ab2e88b9ba2f1d747 | /climate-app.py | b1463b57488617f7518dabaa52b716d7c6a80245 | [] | no_license | jferna73/sql-alc-2nd-attempt | df14573caa45c75bc1e92c48b90224298ab9e328 | ef9f9497b0659f8e669c7dd02d2470100ede261d | refs/heads/master | 2022-06-08T13:08:34.075385 | 2020-04-29T22:15:41 | 2020-04-29T22:15:41 | 260,057,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,800 | py | import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False})
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
#weather app
app = Flask(__name__)
latestDate = (session.query(Measurement.date)
.order_by(Measurement.date.desc())
.first())
latestDate = list(np.ravel(latestDate))[0]
latestDate = dt.datetime.strptime(latestDate, '%Y-%m-%d')
latestYear = int(dt.datetime.strftime(latestDate, '%Y'))
latestMonth = int(dt.datetime.strftime(latestDate, '%m'))
latestDay = int(dt.datetime.strftime(latestDate, '%d'))
yearBefore = dt.date(latestYear, latestMonth, latestDay) - dt.timedelta(days=365)
yearBefore = dt.datetime.strftime(yearBefore, '%Y-%m-%d')
@app.route("6909")
def home():
return (f"Welcome to Surf's Up!: Hawai'i Climate API<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/stations ~~~~~ a list of all weather observation stations<br/>"
f"/api/v1.0/precipitaton ~~ the latest year of preceipitation data<br/>"
f"/api/v1.0/temperature ~~ the latest year of temperature data<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"~~~ datesearch (yyyy-mm-dd)<br/>"
f"/api/v1.0/datesearch/2015-05-30 ~~~~~~~~~~~ low, high, and average temp for date given and each date after<br/>"
f"/api/v1.0/datesearch/2015-05-30/2016-01-30 ~~ low, high, and average temp for date given and each date up to and including end date<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br/>"
f"~ data available from 2010-01-01 to 2017-08-23 ~<br/>"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.name).all()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/precipitaton")
def precipitation():
results = (session.query(Measurement.date, Measurement.prcp, Measurement.station)
.filter(Measurement.date > yearBefore)
.order_by(Measurement.date)
.all())
precipData = []
for result in results:
precipDict = {result.date: result.prcp, "Station": result.station}
precipData.append(precipDict)
return jsonify(precipData)
@app.route("/api/v1.0/temperature")
def temperature():
results = (session.query(Measurement.date, Measurement.tobs, Measurement.station)
.filter(Measurement.date > yearBefore)
.order_by(Measurement.date)
.all())
tempData = []
for result in results:
tempDict = {result.date: result.tobs, "Station": result.station}
tempData.append(tempDict)
return jsonify(tempData)
@app.route('/api/v1.0/datesearch/<startDate>')
def start(startDate):
sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.group_by(Measurement.date)
.all())
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
@app.route('/api/v1.0/datesearch/<startDate>/<endDate>')
def startEnd(startDate, endDate):
sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.filter(func.strftime("%Y-%m-%d", Measurement.date) <= endDate)
.group_by(Measurement.date)
.all())
dates = []
for result in results:
date_dict = {}
date_dict["Date"] = result[0]
date_dict["Low Temp"] = result[1]
date_dict["Avg Temp"] = result[2]
date_dict["High Temp"] = result[3]
dates.append(date_dict)
return jsonify(dates)
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | jferna73.noreply@github.com |
db44601c88fb69a19fbad8e5a78f33fb3e10f72e | b4e9730a19748e8da7c1e2fa90052a5aedbc468b | /data_manager.py | 6bbe7911134f2d3861d00a803243fd22382bd7e5 | [] | no_license | matebalogh11/Application_process_sql_basic | bd086c04a79cc607f89629a99053ca284fa6ec75 | 2163df8fdf07e0b39454ad247e3885b1e066145e | refs/heads/master | 2020-12-30T11:58:48.806222 | 2017-06-08T09:04:21 | 2017-06-08T09:04:21 | 91,439,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | import psycopg2
from login import DB, USER, HOST, PW
# This module is for import only!
def data_handler(SQL):
"""Connect to the selected database and gather the requested data"""
try:
connection_str = "dbname='{}' user='{}' host='{}' password='{}'".format(DB, USER, HOST, PW)
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
cursor.execute(SQL)
result = cursor.fetchall()
header = [desc[0] for desc in cursor.description]
return result, header
except Exception:
print("Uh oh.. cannot connect to the database")
finally:
if conn:
conn.close()
| [
"bmate11@bmate11"
] | bmate11@bmate11 |
ebbfb22b0502c984209cde687a2b0e7a7740642b | 3c3e9d6134182174102e6675394bd7cdc0192f21 | /venv/bin/django-admin.py | 56155bbce94deabdd81af4847a6988c8b1f013a4 | [] | no_license | fersoftware/django_resumido | 20ea406e248cc9180148abd6e59de56ebd73e8aa | 04f3f95201ffce604128d4eda0c9d72243badfce | refs/heads/main | 2023-08-24T02:43:47.310209 | 2021-10-19T18:29:34 | 2021-10-19T18:29:34 | 419,040,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | #!/home/br1005/Projetos/django/teste/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"fersoftware@gmail.com"
] | fersoftware@gmail.com |
1bd69b21cade9d9e9b971691a362536e462ed666 | 096548fef4583fa9749679f7d8e32b7768b6d7d1 | /homeassistant/components/advantage_air/__init__.py | cd676728e0db25ef1c719fbc8a9a9389044cc5a6 | [
"Apache-2.0"
] | permissive | Cadair/home-assistant | 7361b38807d54439c4d21cc57a674ea320b820d5 | 30b454ddb7b2c88b2ec26cb305b3ebcba71c0e4f | refs/heads/dev | 2023-01-19T16:58:25.604819 | 2020-10-17T07:40:43 | 2020-10-17T07:40:43 | 79,675,590 | 0 | 0 | Apache-2.0 | 2023-01-13T06:03:41 | 2017-01-21T22:01:19 | Python | UTF-8 | Python | false | false | 3,195 | py | """Advantage Air climate integration."""
from datetime import timedelta
import logging
from advantage_air import ApiError, advantage_air
from homeassistant.const import CONF_IP_ADDRESS, CONF_PORT
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import ADVANTAGE_AIR_RETRY, DOMAIN
ADVANTAGE_AIR_SYNC_INTERVAL = 15
ADVANTAGE_AIR_PLATFORMS = ["binary_sensor", "climate", "cover"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up AdvantageAir."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass, config_entry):
"""Set up AdvantageAir Config."""
ip_address = config_entry.data[CONF_IP_ADDRESS]
port = config_entry.data[CONF_PORT]
api = advantage_air(
ip_address,
port=port,
session=async_get_clientsession(hass),
retry=ADVANTAGE_AIR_RETRY,
)
async def async_get():
try:
return await api.async_get()
except ApiError as err:
raise UpdateFailed(err) from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Advantage Air",
update_method=async_get,
update_interval=timedelta(seconds=ADVANTAGE_AIR_SYNC_INTERVAL),
)
async def async_change(change):
try:
if await api.async_change(change):
await coordinator.async_refresh()
except ApiError as err:
_LOGGER.warning(err)
await coordinator.async_refresh()
if not coordinator.data:
raise ConfigEntryNotReady
hass.data[DOMAIN][config_entry.entry_id] = {
"coordinator": coordinator,
"async_change": async_change,
}
for platform in ADVANTAGE_AIR_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
class AdvantageAirEntity(CoordinatorEntity):
"""Parent class for Advantage Air Entities."""
def __init__(self, instance, ac_key, zone_key=None):
"""Initialize common aspects of an Advantage Air sensor."""
super().__init__(instance["coordinator"])
self.async_change = instance["async_change"]
self.ac_key = ac_key
self.zone_key = zone_key
@property
def _ac(self):
return self.coordinator.data["aircons"][self.ac_key]["info"]
@property
def _zone(self):
if not self.zone_key:
return None
return self.coordinator.data["aircons"][self.ac_key]["zones"][self.zone_key]
@property
def device_info(self):
"""Return parent device information."""
return {
"identifiers": {(DOMAIN, self.coordinator.data["system"]["rid"])},
"name": self.coordinator.data["system"]["name"],
"manufacturer": "Advantage Air",
"model": self.coordinator.data["system"]["sysType"],
"sw_version": self.coordinator.data["system"]["myAppRev"],
}
| [
"noreply@github.com"
] | Cadair.noreply@github.com |
d68194f84de7e3fd692b64e496acb70bb4ef8fc7 | 0aa50fb70906599514f237dbe050045ede4daaaf | /scripts/deploy.py | 9f65c73f971a1f86af281587b9974edf86ac0c84 | [] | no_license | I-M-F/crowdfund_brownie | 2e9ddae4cc2748968204f6849e281b73582e1998 | 64f3a83b0d32af31c6b1afcbf100a527151cdf15 | refs/heads/main | 2023-08-05T02:08:09.135895 | 2021-09-25T12:45:22 | 2021-09-25T12:45:22 | 410,263,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | from brownie import FundMe, MockV3Aggregator, network, config
from scripts.helpful_scripts import deploy_mock, get_account, LOCAL_BLOCKCHAIN_ENV
def deploy_fund_me():
account = get_account()
#pass the price feed address to our contract
# if we are on persistent network like rinkeby, use the associated address
# otherwise deploy mocks
if network.show_active() not in LOCAL_BLOCKCHAIN_ENV:
price_feed_address = config["networks"][network.show_active()]["eth_usd_price_feed"]
else:
deploy_mock()
price_feed_address = MockV3Aggregator[-1].address
fund_me = FundMe.deploy(
price_feed_address,
{"from": account},
publish_source=config["networks"][network.show_active()].get("verify"),
)
print(f"Contract deployed to {fund_me.address}")
return fund_me
def main():
deploy_fund_me()
| [
"matimu.f.ignatius@gmail.com"
] | matimu.f.ignatius@gmail.com |
808ee195b759a16cb41071c38fd23df333d355a7 | c25b4125b76654452fc2d5cc2f0f7a47643df177 | /setup.py | bfee8ef032e3226e132a395cb98d6a4c1d1398ae | [
"MIT"
] | permissive | dfjsdkfj/grparks | 416b7fdd68a533573c5f4bb53dd7bf748a80c221 | 365717804fafb27c6e3d65322b6fd6b2a9315aa7 | refs/heads/master | 2020-12-24T09:02:01.982187 | 2016-02-02T20:40:06 | 2016-02-02T20:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | #!/usr/bin/env python
"""
Setup script for ParkFinder.
"""
import setuptools
from parks import __project__, __version__
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description="Find and validate park data on OpenStreetMap.",
url='https://github.com/friendlycode/gr-parks',
author='Jace Browning',
author_email='jacebrowning@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
],
install_requires=open('requirements.txt').readlines(),
)
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
b05928c46dc64a0fb59f4a4f646790f737b40525 | 12abb1f44d47289b9f847b691be62d2c5c09b77b | /devel_cb/lib/python2.7/dist-packages/rosserial_msgs/srv/__init__.py | ebb897a688b185e7414ecbb5c49f40b98d74a7aa | [] | no_license | riplaboratory/kanaloaWS | 51b0195f2ab34de7303d38fa8a7a6365d65c7ad2 | a1049be4c9c6455ea631a031b4723abcfc13cbdd | refs/heads/master | 2020-04-01T19:44:18.672147 | 2018-10-18T08:30:00 | 2018-10-18T08:30:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | /home/rip-acer-vn7-591g-1/catkin_ws/devel_cb/.private/rosserial_msgs/lib/python2.7/dist-packages/rosserial_msgs/srv/__init__.py | [
"brennanyama@gmail.com"
] | brennanyama@gmail.com |
55d4161efc17fe8eaad018ce8a5260fe47830af7 | 7e18e7f54d344e6d785f3021f4c046b53088159f | /src/routers/hero.py | c037a80b7d7711f562cc23a049604d62741753b9 | [] | no_license | raedinurdiansyah/bmg-mini-project | 8242c9961c400b3104598fbe40978c263c8c592a | 550b6b971c90c38c03b0368da71ba4d9abb12e9f | refs/heads/main | 2023-07-30T21:23:09.089243 | 2021-09-20T16:33:22 | 2021-09-20T16:33:22 | 408,513,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | from http import HTTPStatus
from flask import Blueprint, request
from src.schemas.hero import GetHeroSchema
from src.services.hero import HeroServices
from src.utils.handle_response import handle_response
hero_blueprint = Blueprint("hero", __name__, url_prefix="/v1/hero")
@hero_blueprint.route("", methods=("GET",))
def get_hero():
try:
parameters = GetHeroSchema(only=["id"]).load(data=request.args)
result = HeroServices(**parameters).get_hero_detail()
return handle_response(
status=result.status,
message=result.message,
data=GetHeroSchema(many=True).dump(result.data),
)
except Exception as e:
return handle_response(
error=[{"code": 500, "message": str(e)}],
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
| [
"raedi.nurdiansyah@akseleran.com"
] | raedi.nurdiansyah@akseleran.com |
436421afd17bad489bf8d340575d1e1779242c48 | 01926e4c73945049861b385b9330058187857941 | /data.py | 0ba0c68dfcfdd2d43e6687fe9fe1258a377966c6 | [] | no_license | HassanHeydariNasab/poento | 96f0aa16c2473e31ab1d8ab566c944dee169fa52 | 67660dbf6938dbd94bdbee6f5611c476ec93e735 | refs/heads/master | 2020-04-15T18:14:04.179739 | 2019-01-18T18:13:01 | 2019-01-18T18:13:01 | 164,906,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | COUNTRIES = [
'', 'AF', 'AX', 'AL', 'DZ', 'AS', 'AD', 'AO', 'AI', 'AQ', 'AG', 'AR', 'AM',
'AW', 'AU', 'AT', 'AZ', 'BS', 'BH', 'BD', 'BB', 'BY', 'BE', 'BZ', 'BJ',
'BM', 'BT', 'BO', 'BQ', 'BA', 'BW', 'BV', 'BR', 'IO', 'BN', 'BG', 'BF',
'BI', 'KH', 'CM', 'CA', 'CV', 'KY', 'CF', 'TD', 'CL', 'CN', 'CX', 'CC',
'CO', 'KM', 'CG', 'CD', 'CK', 'CR', 'CI', 'HR', 'CU', 'CW', 'CY', 'CZ',
'DK', 'DJ', 'DM', 'DO', 'EC', 'EG', 'SV', 'GQ', 'ER', 'EE', 'ET', 'FK',
'FO', 'FJ', 'FI', 'FR', 'GF', 'PF', 'TF', 'GA', 'GM', 'GE', 'DE', 'GH',
'GI', 'GR', 'GL', 'GD', 'GP', 'GU', 'GT', 'GG', 'GN', 'GW', 'GY', 'HT',
'HM', 'VA', 'HN', 'HK', 'HU', 'IS', 'IN', 'ID', 'IR', 'IQ', 'IE', 'IM',
'IL', 'IT', 'JM', 'JP', 'JE', 'JO', 'KZ', 'KE', 'KI', 'KP', 'KR', 'KW',
'KG', 'LA', 'LV', 'LB', 'LS', 'LR', 'LY', 'LI', 'LT', 'LU', 'MO', 'MK',
'MG', 'MW', 'MY', 'MV', 'ML', 'MT', 'MH', 'MQ', 'MR', 'MU', 'YT', 'MX',
'FM', 'MD', 'MC', 'MN', 'ME', 'MS', 'MA', 'MZ', 'MM', 'NA', 'NR', 'NP',
'NL', 'NC', 'NZ', 'NI', 'NE', 'NG', 'NU', 'NF', 'MP', 'NO', 'OM', 'PK',
'PW', 'PS', 'PA', 'PG', 'PY', 'PE', 'PH', 'PN', 'PL', 'PT', 'PR', 'QA',
'RE', 'RO', 'RU', 'RW', 'BL', 'SH', 'KN', 'LC', 'MF', 'PM', 'VC', 'WS',
'SM', 'ST', 'SA', 'SN', 'RS', 'SC', 'SL', 'SG', 'SX', 'SK', 'SI', 'SB',
'SO', 'ZA', 'GS', 'SS', 'ES', 'LK', 'SD', 'SR', 'SJ', 'SZ', 'SE', 'CH',
'SY', 'TW', 'TJ', 'TZ', 'TH', 'TL', 'TG', 'TK', 'TO', 'TT', 'TN', 'TR',
'TM', 'TC', 'TV', 'UG', 'UA', 'AE', 'GB', 'US', 'UM', 'UY', 'UZ', 'VU',
'VE', 'VN', 'VG', 'VI', 'WF', 'EH', 'YE', 'ZM', 'ZW'
]
| [
"hsn6@tuta.io"
] | hsn6@tuta.io |
5a4be4a8726e17d57be2935c9640826a09185a18 | 82bc0412fb50bbb49a1565d19f11d6045d52e0e9 | /rr.py | 2efde733adde356be5f960f1a357a53417b0fe09 | [] | no_license | garrett-partenza-us/cpu-simulator | 8c35d36f38d71030cd0d225d87ba89264e799d00 | cbc094ef1bb8defb44b1da54feff25587093a574 | refs/heads/main | 2023-01-22T09:47:45.408236 | 2020-12-06T02:10:17 | 2020-12-06T02:10:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | #SRTN simulator
def RR(data):
#set needed variables, where data is the original data source and copy is a deep copy of data
data, time, results = data, 0, []
#loop until all processes are complete
while True:
#create ready queue
ready = list(x[0] for x in data if x[1]<=time)
#if there are no processes to run and ready queue is empty
if not ready:
results.append([None,None])
time+=1
#otherwise, iterate through all ready processes and run the FIFO algorithm
else:
rq = []
for i, process in enumerate(data):
if data[i][0] in ready:
rq.append(data[i])
for process in rq:
for x, d in enumerate(data):
if data[x][0] == process[0]:
data[x][2]-=1
results.append([data[x][0], data[x][2]])
time+=1
#get rid of completed processes
data = list(x for x in data if x[2]>0)
#break while of all processes are complete
if not data:
break
# print(results)
return results
| [
"gparre1@students.towson.edu"
] | gparre1@students.towson.edu |
0f9135810627817d11f44817e880b6272d23f56a | 3b50605ffe45c412ee33de1ad0cadce2c5a25ca2 | /python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py | bd5b2c77983b93933e91520ae3ae0520e160ed9f | [
"Apache-2.0"
] | permissive | Superjomn/Paddle | f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1 | 7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188 | refs/heads/develop | 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 | Apache-2.0 | 2023-04-14T02:29:52 | 2016-08-30T01:45:54 | C++ | UTF-8 | Python | false | false | 8,965 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
class TestInferencePartialFeed(unittest.TestCase):
def setUp(self):
self.iterations = 10
self.size = 10
def run_network(self, places, use_split, has_persistable):
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name='x', shape=[None, self.size], dtype='float32')
y = fluid.data(name='y', shape=[None, self.size], dtype='float32')
if has_persistable:
lr = fluid.data(name='lr', shape=[1], dtype='float32')
lr.persistable = True
else:
lr = fluid.data(name='lr', shape=[None], dtype='float32')
relu_x = F.relu(x)
relu_y = F.relu(y)
relu_lr = F.relu(lr)
exe = fluid.Executor(places[0])
exe.run(startup_prog)
prog = fluid.CompiledProgram(main_prog).with_data_parallel(
places=places
)
gen_random = lambda shape: np.random.uniform(
low=-1.0, high=1.0, size=shape
).astype('float32')
assert_result = lambda feed, result: np.testing.assert_array_equal(
np.maximum(0, feed), result
)
def assert_merged_unmerged(merged, unmerged):
unmerged = np.concatenate(unmerged, axis=0)
np.testing.assert_array_equal(merged, unmerged)
def feed_split_test():
for place_num in range(1, len(places) * 3):
x_np = gen_random([place_num, self.size])
y_np = gen_random([place_num, self.size])
if not lr.persistable or place_num <= len(places):
lr_np = gen_random([place_num])
else:
lr_np = gen_random([1])
feed = {x.name: x_np, y.name: y_np, lr.name: lr_np}
fetch_list = [relu_x, relu_y, relu_lr]
relu_x_np, relu_y_np, relu_lr_np = exe.run(
prog, feed=feed, fetch_list=fetch_list, return_merged=True
)
(
relu_x_np_unmerged,
relu_y_np_unmerged,
relu_lr_np_unmerged,
) = exe.run(
prog, feed=feed, fetch_list=fetch_list, return_merged=False
)
assert_merged_unmerged(relu_x_np, relu_x_np_unmerged)
assert_merged_unmerged(relu_y_np, relu_y_np_unmerged)
assert_merged_unmerged(relu_lr_np, relu_lr_np_unmerged)
assert_result(x_np, relu_x_np)
assert_result(y_np, relu_y_np)
if not lr.persistable or place_num <= len(places):
assert_result(lr_np, relu_lr_np)
else:
expected_relu_lr_np = max(lr_np[0], 0)
self.assertTrue(np.all(expected_relu_lr_np == relu_lr_np))
def feed_list_test():
for place_num in range(1, len(places) + 1):
x_np_list = []
y_np_list = []
lr_np_list = []
feed_list = []
for _ in range(place_num):
x_np = gen_random([1, self.size])
y_np = gen_random([1, self.size])
lr_np = gen_random([1])
x_np_list.append(x_np)
y_np_list.append(y_np)
lr_np_list.append(lr_np)
feed_list.append(
{x.name: x_np, y.name: y_np, lr.name: lr_np}
)
fetch_list = [relu_x, relu_y, relu_lr]
relu_x_np, relu_y_np, relu_lr_np = exe.run(
prog,
feed=feed_list,
fetch_list=fetch_list,
return_merged=True,
)
(
relu_x_np_unmerged,
relu_y_np_unmerged,
relu_lr_np_unmerged,
) = exe.run(
prog,
feed=feed_list,
fetch_list=fetch_list,
return_merged=False,
)
assert_merged_unmerged(relu_x_np, relu_x_np_unmerged)
assert_merged_unmerged(relu_y_np, relu_y_np_unmerged)
assert_merged_unmerged(relu_lr_np, relu_lr_np_unmerged)
x_np = np.concatenate(x_np_list)
y_np = np.concatenate(y_np_list)
lr_np = np.concatenate(lr_np_list)
assert_result(x_np, relu_x_np)
assert_result(y_np, relu_y_np)
assert_result(lr_np, relu_lr_np)
for _ in range(self.iterations):
if use_split:
feed_split_test()
else:
feed_list_test()
def test_main(self):
places = [fluid.cpu_places(4)]
if fluid.is_compiled_with_cuda():
places.append(fluid.cuda_places())
for p in places:
for has_persistable in [False, True]:
for use_split in [False, True]:
self.run_network(
p, use_split=use_split, has_persistable=has_persistable
)
class TestInferencePartialFeedUsingDataLoader(unittest.TestCase):
def setUp(self):
self.epoch_num = 3
self.batch_num = 101 # a prime number
self.batch_size = 32
def create_reader(self):
def __impl__():
for _ in range(self.batch_num):
yield np.random.random([self.batch_size, 1]).astype('float32'),
return __impl__
def run_network(self, iterable, use_cuda, drop_last):
x = fluid.data(shape=[None, 1], name='x', dtype='float32')
places = fluid.cuda_places() if use_cuda else fluid.cpu_places(4)
loader = fluid.io.DataLoader.from_generator(
feed_list=[x], capacity=16, iterable=iterable, drop_last=drop_last
)
y = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(y)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(
fluid.default_main_program()
).with_data_parallel(places=places, loss_name=loss.name)
loader.set_batch_generator(
self.create_reader(), places=places if iterable else None
)
for _ in range(self.epoch_num):
actual_batch_num = 0
if loader.iterable:
for feed_data in loader():
(x_data,) = exe.run(prog, feed=feed_data, fetch_list=[x])
self.assertEqual(x_data.shape[0] % self.batch_size, 0)
self.assertTrue(x_data.shape[0] != 0)
actual_batch_num += int(x_data.shape[0] / self.batch_size)
else:
loader.start()
try:
while True:
(x_data,) = exe.run(prog, fetch_list=[x])
self.assertEqual(x_data.shape[0] % self.batch_size, 0)
self.assertTrue(x_data.shape[0] != 0)
actual_batch_num += int(
x_data.shape[0] / self.batch_size
)
except fluid.core.EOFException:
loader.reset()
if not drop_last or len(places) == 1:
self.assertEqual(self.batch_num, actual_batch_num)
else:
self.assertGreater(self.batch_num, actual_batch_num)
def test_main(self):
use_cuda_list = (
[False, True] if fluid.is_compiled_with_cuda() else [False]
)
iterable_list = [False, True]
drop_last_list = [False, True]
for iterable in iterable_list:
for use_cuda in use_cuda_list:
for drop_last in drop_last_list:
with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.scope_guard(fluid.Scope()):
self.run_network(iterable, use_cuda, drop_last)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Superjomn.noreply@github.com |
ba2090eb36670814f7650ef6ffa2e6fb27f37fb5 | 692654b45228d813c8dc4c9ade0a6836cd2e7f17 | /other_tools/check_token.py | 2904c24fc086dd871cd4ade0faf53c78feebc2da | [] | no_license | sunary/nlp | dd67dce0a2001670efe0e1dc5f5ef7b014845982 | a9fa796118d51dd80cc9525d50247632caa00b7f | refs/heads/master | 2021-01-22T10:02:44.975681 | 2017-05-20T08:43:23 | 2017-05-20T08:43:23 | 43,935,102 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,720 | py | __author__ = 'sunary'
class CheckToken():
def __init__(self):
self.checker_token = []
self.black_token = []
def set_checker(self, checker):
self.checker_token = checker
def add_token(self, token):
'''
add token to the sorted list of token
Args:
token: the token need to be added
'''
if self.black_token:
position = self._find(token)
if token != self.black_token[position]:
self.black_token[position + 1:position + 1] = [token]
else:
self.black_token.append(token)
pass
def _find(self, token):
if not token:
return 0
left_position = 0
right_position = len(self.black_token) - 1
mid_position= (left_position + right_position)/2
mid_value = self.black_token[mid_position]
while left_position <= right_position:
if token < mid_value:
right_position = mid_position - 1
else:
left_position = mid_position + 1
mid_position = (left_position + right_position)/2
mid_value = self.black_token[mid_position]
return left_position - 1
def check_token(self):
'''
check any token in the sorted list of tokens is in the list
Returns:
bool: True if any token is in the list
Examples:
>>> set_checker([1, 2, 3, 4, 5, 6])
>>> add_token([2, 3])
>>> check_token()
True
>>> add_token([3, 4, 6])
False
'''
for i in range(len(self.checker_token)):
len_token = 1
while True:
list_token = self.checker_token[i: i + len_token]
position = self._find(list_token) + 1
if self.black_token[position - 1] == list_token:
del self.black_token[position - 1]
if position >= len(self.black_token) or len_token > len(self.black_token[position]) or len_token > len(list_token) or\
self.black_token[position][len_token - 1] != list_token[len_token - 1]:
break
len_token += 1
return False
if __name__ == '__main__':
check_token = CheckToken()
check_token.set_checker([1, 2, 3, 2, 2, 4, 45, 46, 4, 45, 52, 1, 21, 4, 5, 3, 4, 5, 1, 2])
check_token.add_token([1, 2])
check_token.add_token([5, 2])
check_token.add_token([3, 4, 1])
check_token.add_token([3, 4])
check_token.add_token([2, 2])
print check_token.black_token
check_token.check_token()
print check_token.black_token | [
"v2nhat@gmail.com"
] | v2nhat@gmail.com |
cc33910210b5a0f0c332798673e332c4b8cb7eb7 | f8aa7306eeea9d2eafc400392acbdff931306e57 | /tests/test_cli.py | abf9c2f77d6c6a7a256664301c1113bc18566435 | [
"Apache-2.0"
] | permissive | b-jazz/warehouse | 929d1a0e7b4de3fd0596ff8334bda31ab5856bdc | 8c5414d709c6fd04c1b013ded680057a7def0833 | refs/heads/master | 2020-12-26T00:34:54.053900 | 2014-03-08T20:30:25 | 2014-03-08T20:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import werkzeug.serving
from warehouse.cli import ServeCommand
from warehouse.serving import WSGIRequestHandler
def test_serve(monkeypatch):
run_simple = pretend.call_recorder(
lambda *a, **kw: None,
)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple)
host, port, app, use_reloader, use_debugger = (
pretend.stub() for x in range(5)
)
ServeCommand()(
app, host, port,
reloader=use_reloader,
debugger=use_debugger,
)
assert run_simple.calls == [
pretend.call(
host, port, app,
use_reloader=use_reloader,
use_debugger=use_debugger,
request_handler=WSGIRequestHandler,
),
]
| [
"donald@stufft.io"
] | donald@stufft.io |
1a7ede2cf172f006375cf5698b2cd7246823fe4b | 06db739facf8fe08f8e0359741021a24e273e7d4 | /venv/Scripts/pip3.7-script.py | ce5591223e151add3cbd0019b47b8a041d6464e1 | [] | no_license | bopopescu/pythonfiles | dbade5a89ad84aaa570a8dade95b32e049540dd9 | 8cd71dda63280aa25b7d4656440ad5fe0c099c67 | refs/heads/master | 2022-11-26T12:21:23.179307 | 2019-08-12T05:58:39 | 2019-08-12T05:58:39 | 281,740,945 | 0 | 0 | null | 2020-07-22T17:25:25 | 2020-07-22T17:25:25 | null | UTF-8 | Python | false | false | 414 | py | #!C:\Users\Genx\PycharmProjects\CETPA\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"vaibhavvyas32@gmail.com"
] | vaibhavvyas32@gmail.com |
07be8ddb466dafa20a12db5953a268be1080fd5d | bd50354627904aed138b5ccfcb8129fabaf0ef4b | /cython (2)/setup.py | 4439b2caa8c819dd119533376dc375bee49dd11a | [] | no_license | TheAeryan/PTC | 7bb2986b2cebde3be896cab732a880745c3da63d | f5c2332c576f2faf3594d51310d93b6ee1e05b8b | refs/heads/master | 2022-11-24T22:44:43.389809 | 2020-08-04T11:32:26 | 2020-08-04T11:32:26 | 220,327,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 12:10:11 2019
@author: jose
"""
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules=cythonize("ejemplosCython.pyx"),
)
| [
"carlosmarista@gmail.com"
] | carlosmarista@gmail.com |
991478e17f1dbe92609293f4de8459c0aeeb116d | 5166ddcedb2de91dfafd8bc033edd8e702560114 | /testing_new_wav.py | 2a884810ef4b75a62869fada85e79f6cfd5a9c1f | [] | no_license | arr0056/Music-Genre-Classification-and-Recognition | b34016befdbbcc8cbc904fddca1834abcdda8610 | 80f4fe1410157c3164d4bf43598c51fba2d3ec88 | refs/heads/main | 2023-04-16T19:48:49.353643 | 2021-04-24T05:56:30 | 2021-04-24T05:56:30 | 353,123,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | from python_speech_features import mfcc
from tempfile import TemporaryFile
import scipy.io.wavfile as wav
import numpy as np
import os
import pickle
import random
import operator
import math
from collections import defaultdict
def distance(instance1 , instance2 , k):
distance = 0
mm1 = instance1[0]
cm1 = instance1[1]
mm2 = instance2[0]
cm2 = instance2[1]
distance = np.trace(np.dot(np.linalg.inv(cm2), cm1))
distance += (np.dot(np.dot((mm2-mm1).transpose(), np.linalg.inv(cm2)), mm2-mm1))
distance += np.log(np.linalg.det(cm2)) - np.log(np.linalg.det(cm1))
distance -= k
return distance
def knn(trainingSet, instance, k):
distances = []
for x in range (len(trainingSet)):
dist = distance(trainingSet[x], instance, k) + distance(instance, trainingSet[x], k)
distances.append((trainingSet[x][2], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def nearestClass(neighbors):
classVote = {}
for x in range(len(neighbors)):
response = neighbors[x]
if response in classVote:
classVote[response] += 1
else:
classVote[response] = 1
sorter = sorted(classVote.items(), key = operator.itemgetter(1), reverse=True)
return sorter[0][0]
def loadDataset(filename):
with open("GTZAN.dat" , 'rb') as f:
while True:
try:
dataset.append(pickle.load(f))
except EOFError:
f.close()
break
directory = "C:/Users/rezaa/OneDrive/Desktop/Auburn Spring 2021/Machine Learning/Final Project/genres/"
f = open("GTZAN.dat" ,'wb')
i = 0
# creates file with information from datasets
for folder in os.listdir(directory):
i += 1
if i == 11:
break
for file in os.listdir(directory+folder):
(rate,sig) = wav.read(directory + folder + "/" + file)
mfcc_feat = mfcc(sig, rate, winlen=0.020, appendEnergy = False)
covariance = np.cov(np.matrix.transpose(mfcc_feat))
mean_matrix = mfcc_feat.mean(0)
feature = (mean_matrix, covariance, i)
pickle.dump(feature, f)
f.close()
# loads dataset and splits it into training and testing
dataset = []
loadDataset("GTZAN.dat")
# testing with a new wav file
results=defaultdict(int)
# path to file
directory = "C:/Users/rezaa/OneDrive/Desktop/Auburn Spring 2021/Machine Learning/Final Project/genres/"
# associating number from 1-10 with genre
i = 1
for folder in os.listdir(directory):
results[i] = folder
i+=1
(rate,sig) = wav.read("C:/Users/rezaa/OneDrive/Desktop/Auburn Spring 2021/Machine Learning/Final Project/test.wav")
mfcc_feat = mfcc(sig,rate,winlen=0.020,appendEnergy=False)
covariance = np.cov(np.matrix.transpose(mfcc_feat))
mean_matrix = mfcc_feat.mean(0)
feature = (mean_matrix, covariance, 0)
prediction = nearestClass(knn(dataset, feature, 5))
print(results[prediction]) | [
"noreply@github.com"
] | arr0056.noreply@github.com |
b5bb00b62ffdd87f97a11ce0449cc13d354d4a8c | 4cea92930f06b1ce2543b73fec00cdf3d54fe738 | /venv/Lib/site-packages/wrap_py/settings.py | efa59a765df0f1e380325a4c51e05f5ee609a4f3 | [] | no_license | Anna-Tushminskaya/Anna | 6fcf02a9f0f78fb67085ba8fb943d24b4f61d7f4 | 3b8b7332a102fcbb3cb50d44a8ff115aa7e37b77 | refs/heads/main | 2023-08-28T22:09:17.021518 | 2021-10-22T08:18:05 | 2021-10-22T08:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | import os, sys
TRANSL_DOMAIN = "wrap_py"
PACK_SOURCE_FOLDER = os.path.split(__file__)[0]
TRANSLATIONS_FOLDER = os.path.join(PACK_SOURCE_FOLDER, "transl", "compiled")
RESOURCE_FOLDER = os.path.join(PACK_SOURCE_FOLDER, "res")
ICON_FILE = os.path.join(RESOURCE_FOLDER, "icon.png") | [
"anna.t.nvkz@gmail.com"
] | anna.t.nvkz@gmail.com |
93250100f4dea25b292e8471b70ae83b71cce42f | e582d60b7996faf7b87c6d857613e63581d415b9 | /elliot/recommender/visual_recommenders/VNPR/visual_neural_personalized_ranking_model.py | 858a318f1ec3594cc6a9eef6e489659da71b7b15 | [] | no_license | Abdel57Grota/Reenvisioning-the-comparison-between-Neural-Collaborative-Filtering-and-Matrix-Factorization | d6e51c32094550789673846acdf9891557b790c1 | 2a2b0148e881cf8ba45c48ad9d42f52421585284 | refs/heads/main | 2023-09-03T09:47:41.894117 | 2021-11-09T09:17:35 | 2021-11-09T09:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,024 | py | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo, Daniele Malitesta, Felice Antonio Merra'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it, daniele.malitesta@poliba.it, felice.merra@poliba.it'
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(0)
class VNPRModel(keras.Model):
def __init__(self,
num_users,
num_items,
embed_mf_size, l_w, mlp_hidden_size, dropout, learning_rate=0.01,
emb_image=None,
name="VNPR",
**kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.l_w = l_w
self.mlp_hidden_size = mlp_hidden_size
self.dropout = dropout
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='U_MF',
dtype=tf.float32)
self.item_mf_embedding_1 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_1',
dtype=tf.float32)
self.item_mf_embedding_2 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_2',
dtype=tf.float32)
self.emb_image = emb_image
self.F = tf.Variable(
self.emb_image, dtype=tf.float32, trainable=False)
self.mlp_layers_1 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_1.add(keras.layers.Dropout(dropout))
self.mlp_layers_1.add(keras.layers.Dense(units, activation='relu'))
self.mlp_layers_2 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_2.add(keras.layers.Dropout(dropout))
self.mlp_layers_2.add(keras.layers.Dense(units, activation='relu'))
self.optimizer = tf.optimizers.Adam(learning_rate)
@tf.function
def call(self, inputs, training=None, mask=None):
user, item1, item2 = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item1)
item_mf_e_2 = self.item_mf_embedding_2(item2)
feature_e_1 = tf.nn.embedding_lookup(self.F, item1)
feature_e_2 = tf.nn.embedding_lookup(self.F, item2)
embedding_input_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e_1], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(embedding_input_1) # [batch_size, 1]
embedding_input_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e_2], axis=2)
mlp_output_2 = self.mlp_layers_2(embedding_input_2) # [batch_size, 1]
return tf.squeeze(mlp_output_1), tf.squeeze(mlp_output_2), user_mf_e, item_mf_e_1, item_mf_e_2
@tf.function
def train_step(self, batch):
with tf.GradientTape() as tape:
user, pos, neg = batch
# Clean Inference
mlp_output_1, mlp_output_2, user_mf_e, item_mf_e_1, item_mf_e_2 = self.call(inputs=(user, pos, neg),
training=True)
difference = tf.clip_by_value(mlp_output_1 - mlp_output_2, -80.0, 1e8)
loss = tf.reduce_sum(tf.nn.softplus(-difference))
# Regularization Component
reg_loss = self.l_w * tf.reduce_sum([tf.nn.l2_loss(user_mf_e),
tf.nn.l2_loss(item_mf_e_1),
tf.nn.l2_loss(item_mf_e_2)])
# Loss to be optimized
loss += reg_loss
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss
@tf.function
def predict(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
u, i = inputs
output_1, output_2, _, _, _ = self.call(inputs=(u, i, i), training=training)
return (output_1 + output_2) * 0.5
@tf.function
def get_recs(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
user, item = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item)
item_mf_e_2 = self.item_mf_embedding_2(item)
feature_e = tf.nn.embedding_lookup(self.F, item)
mf_output_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e], axis=2) # [batch_size, embedding_size]
mf_output_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(mf_output_1) # [batch_size, 1]
mlp_output_2 = self.mlp_layers_2(mf_output_2) # [batch_size, 1]
return tf.squeeze((mlp_output_1+mlp_output_2)/2)
@tf.function
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, -np.inf), k=k, sorted=True)
| [
"claudio.pomo@poliba.it"
] | claudio.pomo@poliba.it |
1c02cfffb3421857d71cc9ec45294cfeeeea32bb | 21436aa94247388e910d43a7f682548971599711 | /src/main.py | 7da951e0ffc01e50654daadd639c1a9a7f70df1c | [] | no_license | qenndrimkrasniqi/VirtualLibrary | 8b3f9a943bcb6b40c6bc893fb0739b76850c6976 | 39f749f21fe36e274e42f942426d81dd765ed753 | refs/heads/main | 2023-06-04T12:47:16.064842 | 2021-06-21T18:46:27 | 2021-06-21T18:46:27 | 377,547,635 | 0 | 0 | null | 2021-06-21T18:46:27 | 2021-06-16T15:43:30 | Python | UTF-8 | Python | false | false | 787 | py | import os
from gui import create_window, populate_table, destroy_table_body
from models import Library, Librarian
source_path = '../Data/Hivzi Sylejmani - Prishtinë'
basename = os.path.basename(source_path)
name, location = basename.split('-')
library = Library(source_path, name.strip(), location.strip())
librarian = Librarian('Sherif', library)
window, results_frm, title_entr, author_entr, genre_entr, search_btn = create_window()
def get_data(e, title, author, genre):
books = librarian.get_books(title.get(), author.get(), genre.get())
destroy_table_body(results_frm)
populate_table(books, results_frm)
search_btn.bind("<Button-1>", lambda event, title=title_entr, author=author_entr, genre=genre_entr:get_data(event, title, author, genre))
window.mainloop() | [
"84874190+ritabreznica@users.noreply.github.com"
] | 84874190+ritabreznica@users.noreply.github.com |
fec0fcb625457516ed993ecaffadefeebb4b6a1a | 9a10893b4b0b5dc8ca528baf4dd6e994c2729ad5 | /guess_game.py | e973e87490f0d12e7fbdac3a44f28f38309750ab | [] | no_license | pramathesh18/school-projects | 756e1fb4da947e08e8ff0bc56523cabddfdc701b | 6a5181373fc08288ec0927fe306697571c482403 | refs/heads/master | 2023-04-23T03:09:50.970857 | 2021-05-06T15:21:00 | 2021-05-06T15:21:00 | 325,188,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from random import randint as ran
n = ran(20,50)
#print (n)
print("Game starts now. \nYou have to guess the correct no. \nYou"" have only 9 chances")
for i in range (9):
a = int ( input ('enter the no._\n'))
if a==n:
print('your guess is right \n you have taken', (i+1), 'guesse to win \ncongrats')
break
elif a>n:
print('the winning number is lesser than your guess\ntry again \nOnly', (8-i),'Guesses is left now')
else :
print('the winning number is greater than your guess \ntry again \nOnly', (8-i),'Guesses is left now')
if i==8:
print('Game over\nthe no. was 18')
| [
"pramathesh18@gmail.com"
] | pramathesh18@gmail.com |
5cdf01aef834430786ada1bf4a8559f5091d842e | 2cc8463ef8a1933bdf9691c053ce8cf3138b28f8 | /topicMining/things/demo_1.py | 43add7c2b7104f6f1dbf5131ae4a2673fb4ff2d4 | [] | no_license | JackiyWu/textMining | 042c25ffd26b0d793b5b4b2a08a655cab014cb43 | 7932f3d2f208d5f28248bbae9c3e460a50dffa47 | refs/heads/master | 2023-03-29T22:00:13.609926 | 2021-03-26T04:48:59 | 2021-03-26T04:48:59 | 351,449,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import math
import random
import numpy as np
class CONFIG:
n_us = 4999
n_s1 =1
n_s2 = 0
n_t = 0
us_to_s1 = 0.00000371
s1_to_s2 = 0.00005
s1_to_t = 0.00009
s2_to_t = 0.000003
C = 0.7
iters = [3000,6999,9856,13331,16989,19536,23698,26985,29631,33654.36985,39632.43756,46956,49632,53698,56321,59632,63214,66987,69632,75621,83698,89654,96632,99965]
N = 40
out_path = 'G:\\result.txt'
params = {
"us_to_s1":1-math.exp(-CONFIG.us_to_s1 * CONFIG.C),
"s1_to_s2":1-math.exp(-CONFIG.s1_to_s2),
"s1_to_t":1-math.exp(-CONFIG.s1_to_t),
"s2_to_t":1-math.exp(-CONFIG.s2_to_t)
}
def changeCondition(data, params):
_len = len(data)
data_copy = data
for i in range(0, _len):
if data[i] == 'US':
for j in range(0, _len):
if i == j:
continue
if data_copy[j] == 'S1' or data_copy[j] == 'S2':
rd = random.random()
if rd < params['us_to_s1']:
data[i] = 'S1'
continue
elif data[i] == 'S1':
rd = random.random()
if rd < params['s1_to_s2']:
data[i] = 'S2'
if rd > 1-params['s1_to_t']:
data[i] = 'T'
elif data[i] == 'S2':
rd = random.random()
if rd < params['s2_to_t']:
data[i] = 'T'
else:
continue
return data
def cal_condition(data):
data = np.array(data)
return [(data == 'US').sum(), (data == 'S1').sum(), (data == 'S2').sum(), (data == 'T').sum()]
rs = []
for it in CONFIG.iters:
data = ['US'] * CONFIG.n_us + ['S1'] * CONFIG.n_s1 + ['S2'] * CONFIG.n_s2 + ['T'] * CONFIG.n_t
result = []
for n in range(CONFIG.N):
for i in range(it):
data = changeCondition(data, params)
result.append(cal_condition(data))
result = np.array(result).mean(axis=0)
rs.append(result)
out = ""
for item in rs:
item = list(map(str, item))
out += ",".join(item)
out += "\n"
with open(CONFIG.out_path, 'w') as ipt:
ipt.write(out) | [
"wujie_dlut@163.com"
] | wujie_dlut@163.com |
cc0b257ba2ef769ff59ee246c8b245375a07630f | 0af72e808835b371232e7560ff960aa318d89d81 | /contest/models.py | ecc611c005c7bee8187a86d7ff7506f4d90e1955 | [] | no_license | robgrigor/cybersports | d3715f93fcc53259e03392931ada758cd1259fcb | 6d31f74d1e08115729c0716d52c8e69251ba056e | refs/heads/master | 2020-07-19T02:02:44.134255 | 2019-09-04T15:49:19 | 2019-09-04T15:49:19 | 206,356,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Tournament(models.Model):
host = models.ForeignKey(User, on_delete=models.CASCADE, default='GevorgArtenyan')
name = models.CharField(max_length=250, default='')
player1 = models.CharField(max_length=250, default='')
player2 = models.CharField(max_length=250, default='')
score1 = models.IntegerField(default=0)
score2 = models.IntegerField(default=0)
date_posted = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tournament-detail', kwargs={'pk': self.pk})
class Result(models.Model):
score1 = models.IntegerField(default=0)
score2 = models.IntegerField(default=0) | [
"robgrigo@yandex.ru"
] | robgrigo@yandex.ru |
a0e4d0fc0edadaf6b668bd6570f6c2ba34a2fc9e | a09740e643d6277ada23c82d8e87853a1cd1a9e5 | /Z_ALL_FILE/Py/omdf5.py | b033ff3d43a5953248d534cd87fd3b5182354496 | [
"Apache-2.0"
] | permissive | FuckBrains/omEngin | c5fb011887c8b272f9951df3880a879456f202e8 | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | refs/heads/main | 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import pandas as pd
dates=['April-10', 'April-11', 'April-12', 'April-13','April-14','April-16']
income1=[10,20,10,15,10,12]
income2=[20,30,10,5,40,13]
df=pd.DataFrame({"Date":dates,
"Income_1":income1,
"Income_2":income2})
print(df.apply(lambda row: "Total income in "+ row["Date"]+ " is:"+str(row["Income_1"]+row["Income_2"]),axis=1))
| [
"omi.kabirr@gmail.com"
] | omi.kabirr@gmail.com |
f87c09f8764e7c0c3547b5eeb145d9a9c2d6fcc6 | 3240ad763edfd513ca0d9f2033bc17011e38d8ba | /src/mercury-common/tests/unit/task_managers/base/test_worker.py | 494ca7ba27141a0f57d1fa810271e5916bf3d363 | [
"Apache-2.0"
] | permissive | BenjamenMeyer/mercury | d2a732f38665985054f1c3a4fd47330e381a3d8c | 95d82ea4a005740153379bb08eb6d84d72cab251 | refs/heads/master | 2021-09-02T12:57:12.515104 | 2017-11-22T17:58:38 | 2017-11-22T17:58:38 | 109,020,280 | 0 | 0 | null | 2017-10-31T16:07:13 | 2017-10-31T16:07:13 | null | UTF-8 | Python | false | false | 2,633 | py | # Copyright 2017 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from mercury.common.task_managers.base import task
from mercury.common.task_managers.base import worker
from tests.unit.base import MercuryCommonUnitTest
class WorkerTest(MercuryCommonUnitTest):
"""Tests for the mercury.common.task_managers.base.worker"""
@mock.patch.object(task, 'Task')
def setUp(self, mock_task):
"""Create a fake worker object"""
mock_task.create.return_value = None
task_handler = task.Task()
max_requests = 1
max_age = 3600
self.fake_worker = worker.Worker(task_handler, max_requests, max_age)
def test_start(self):
"""Test start() executes a new task"""
self.fake_worker.start()
self.fake_worker.task.fetch.assert_called_once()
self.fake_worker.task.execute.assert_called_once()
self.assertEqual(1, self.fake_worker.handled_tasks)
def test_start_kill_signal(self):
"""Test start() doesn't execute task if kill_signal is True"""
self.fake_worker.kill_signal = True
self.fake_worker.start()
self.fake_worker.task.fetch.assert_not_called()
self.fake_worker.task.execute.assert_not_called()
self.assertEqual(0, self.fake_worker.handled_tasks)
def test_start_too_many_requests(self):
"""Test start() doesn't execute more tasks than maximum allowed"""
self.fake_worker.handled_tasks = 1
self.fake_worker.start()
self.fake_worker.task.fetch.assert_not_called()
self.fake_worker.task.execute.assert_not_called()
self.assertEqual(1, self.fake_worker.handled_tasks)
def test_start_no_more_task(self):
"""Test start() continue fetching tasks if none found at first"""
self.fake_worker.task.fetch.side_effect = [None, 'fake_task']
self.fake_worker.start()
self.assertEqual(2, self.fake_worker.task.fetch.call_count)
self.fake_worker.task.execute.assert_called_once()
self.assertEqual(1, self.fake_worker.handled_tasks)
| [
"albousque@gmail.com"
] | albousque@gmail.com |
cb784c7ad511f446b3e53fb1d2f40c12dc522ecb | 113d30a345d035c980246d6cd04b761d297b846e | /src/model/bsln_kde.py | e366b2784acbf1c4112e1a71844e11e5d734da7c | [] | no_license | JHWu92/public-safety-baltimore | 4855da004c5ff70b378371f31645f0e3fa22d5de | 11b77125d70e151aa1feede1240a30e0d0f46a7a | refs/heads/master | 2021-04-27T12:09:27.285644 | 2019-02-04T14:36:43 | 2019-02-04T14:36:43 | 122,574,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,886 | py | # Author: Jiahui Wu <jeffwu@terpmail.umd.edu>
import datetime
import geopandas as gp
import numpy as np
import pandas as pd
from pyproj import Proj, transform
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
from src import constants as C
class KDE:
def __str__(self):
return 'KDE(bandwidth={}, timewindow={}, verbose={})'.format(self.bw, self.tw, self.verbose)
def __init__(self, bw=1, tw=60, verbose=0):
"""
:param tw: int, default=None
time window of data to be considered in estimation
:param bw:
:param verbose:
"""
self.verbose = verbose
self.bw = bw
self.tw = tw
self.estimator = None
def get_last_date(self, coords, last_date):
if last_date is None:
last_date = coords.index.max().normalize() + datetime.timedelta(days=1, seconds=-1)
elif isinstance(last_date, str):
last_date = datetime.datetime.strptime(last_date, '%Y-%m-%d') + datetime.timedelta(seconds=-1)
if self.verbose > 0:
print('last_date = %s' % last_date)
return last_date
def fit(self, x_coords, y_coords=None, last_date=None):
"""
:param x_coords: pd.Series
Indexed and sorted by Date, with values = coords
For compatibility with inputs containing names of coords, such as those for RTM,
coords can be dict. In this case, only len(coords)=1 (1 key) is allowed.
:param y_coords: not used in KDE, for compatibility purpose
:param last_date: string (format='%Y-%m-%d') or DateTime, default None
the last date of the time window. If None, the last date of coords is used
"""
# for compatibility
if isinstance(x_coords, dict):
if len(x_coords) != 1: raise ValueError('input coords is dict, but len!=1')
if self.verbose > 0: print('coords is a dictionary, len==1, keep its value only')
x_coords = list(x_coords.values())[0]
if self.tw is not None:
last_date = self.get_last_date(x_coords, last_date)
# pandas time index slice include both begin and last date,
# to have a time window=tw, the difference should be tw-1
begin_date = last_date - datetime.timedelta(days=self.tw - 1)
x_coords = x_coords.loc[begin_date:last_date]
kde = KernelDensity(bandwidth=self.bw)
kde.fit(x_coords.tolist())
self.estimator = kde
def predict(self, data, now_date=None):
"""
:param coords: pd.Series
:param now_date: not used in KDE,
:return:
"""
# TODO: data could be other spatial unit
# Now it is assumed as coords
pdf = np.exp(self.estimator.score_samples(data.tolist()))
pdf = pd.Series(pdf, index=data.index)
return pdf
def tune(self, coords, bw_choice=None, cv=20, n_jobs=1):
"""
Bandwidth is estimated by gridsearchCV
:param coords: coords for bw estimation
:param bw_choice: list-like, default np.linspace(10, 1000, 30)
:param cv: default 20
"""
if isinstance(coords, pd.Series):
if self.verbose > 0: print('converting pd.Series to list')
coords = coords.tolist()
if bw_choice is None:
if self.verbose > 0: print('use default bw_choice')
bw_choice = np.linspace(10, 1000, 30)
if self.verbose > 0: print(str(bw_choice))
if self.verbose > 0: print('gridsearching bw')
search = GridSearchCV(KernelDensity(), {'bandwidth': bw_choice}, cv=cv, verbose=self.verbose, n_jobs=n_jobs)
search.fit(coords)
if self.verbose > 0: print('best parameters:', search.best_params_)
self.bw = search.best_params_['bandwidth']
| [
"jhwu92@gmail.com"
] | jhwu92@gmail.com |
9831ad1bea0a96e5597c17006078cb51597ae0b6 | aa77d94f9c8959d58d1619d7f13b12e0c2176418 | /UGATIT_inference.py | 1ca24f595c8cc13bbbe5020f2530ebd836f0a063 | [] | no_license | ginjake/image_converter | 323fffad18a576ca39eb5d9b47093b058c7d1acf | 6b352017c75af6c88cd67cc8b3a05f23e9700043 | refs/heads/main | 2023-01-23T15:11:04.528098 | 2020-12-01T12:26:22 | 2020-12-01T12:26:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | #encoding:utf-8
from module.importer import *
from module.dataloader import *
from module.generator import *
#学習済みモデルの読み込み
generator = Generator()
generator.load_state_dict(torch.load('./trained_model/generator_A2B_trained_model_cpu.pth'))
#推論モードに切り替え
generator.eval()
#変換対象となる画像の読み込み
path_list = make_datapath_list('./conversion/target/**/*')
train_dataset = GAN_Img_Dataset(file_list=path_list,transform=ImageTransform(256))
dataloader = torch.utils.data.DataLoader(train_dataset,batch_size=len(path_list),shuffle=False)
target = next(iter(dataloader))
#generatorへ入力、出力画像を得る
converted,_,_ = generator.forward(target)
#画像出力用にディレクトリを作成
os.makedirs("./conversion/converted",exist_ok=True)
#画像を出力
for i,output_img in enumerate(converted):
origin_filename = os.path.basename(path_list[i])
origin_filename_without_ex = os.path.splitext(origin_filename)[0]
filename = "./conversion/converted/{}_converted{}.png".format(origin_filename_without_ex,i)
#そこへ保存
vutils.save_image(output_img,filename,normalize=True)
print(origin_filename + " : converted")
| [
"gravity@kuf.biglobe.ne.jp"
] | gravity@kuf.biglobe.ne.jp |
68cc890b4bfdc1ac245f6d2ace4cb88f55e11803 | 8f5a570db4c1534a72389eed13b9cc94cf4e18df | /back_end/apiData/ripRecipes.py | d3872293f0ff7f43cc1c13ce3e7854242240925c | [] | no_license | ArnabSagar/recipe-express | e87fb6114f9ba338a538e22c36f1e1a225044850 | c94c67bb206a12cd52874a6549cd6cf9f19d3fc1 | refs/heads/master | 2022-04-03T00:21:10.638783 | 2020-02-16T15:28:39 | 2020-02-16T15:28:39 | 240,648,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # Rip a bunch of recipes from Edamame API to use
import requests
import json
import pprint
import time
# base parameters
BASE_URL = "https://api.edamam.com/search"
APP_ID = "e8650a6b"
APP_KEY = "3cf3080f0b821238dfb1cd90a2346546"
# get the keywords to use in api calls
keywordsFile = open("keywordsToRip.txt", 'r')
keywords = keywordsFile.readlines()
keywordsFile.close()
# prepare output file
outFile = open("recipes.txt", 'w')
# iterate through api calls
count = 0
for key in keywords:
count += 1
if(count % 4 == 0):
time.sleep(60)
FULL_URL = BASE_URL + "?q=" + key + "&app_id=" + APP_ID + "&app_key=" + APP_KEY
response = requests.get(url=FULL_URL)
if not response.ok:
print("keyword: " + key + " resulted in failed request!")
continue
data = response.json()
hits = data["hits"]
for hit in hits:
recipe = hit["recipe"]
outFile.write(recipe['label'] + ", ")
outFile.write(recipe['image'] + ", ")
outFile.write(str(recipe['yield']) + ", ")
outFile.write(str(recipe['calories']) + ", ")
ingredients = recipe['ingredients']
for i in range(0, len(ingredients) - 1):
outFile.write(ingredients[i]['text'] + ", ")
outFile.write(ingredients[-1]['text'] + "\n")
#print(data["hits"])
#pprint.pprint(data)
| [
"anderm28@mcmaster.ca"
] | anderm28@mcmaster.ca |
2068d1710140295cd665f7971b3655a7f2367e15 | f0592d39eaf5f8bcbe46c4b16f6fa631be48887f | /tests/contrib/autoguide/test_hessian.py | f26a124db03826f7d6b1f111d1e4ca602e1d5ab1 | [
"MIT"
] | permissive | wsgharvey/pyro | 0bfc762a20c4bcbbe30e61adbcc2c33e32effdb5 | 5c3ef54050d9ad566e5965174d4ad51bd37e55dd | refs/heads/dev | 2021-05-06T18:57:58.458691 | 2018-10-10T01:48:52 | 2018-10-10T01:48:52 | 111,995,379 | 0 | 0 | null | 2017-11-25T10:33:12 | 2017-11-25T10:33:12 | null | UTF-8 | Python | false | false | 827 | py | from __future__ import absolute_import, division, print_function
import torch
import pyro.distributions as dist
from pyro.contrib.autoguide import _hessian
from tests.common import assert_equal
def test_mvn():
tmp = torch.randn(3, 10)
cov = torch.matmul(tmp, tmp.t())
mvn = dist.MultivariateNormal(cov.new_zeros(3), cov)
x = torch.randn(3, requires_grad=True)
y = mvn.log_prob(x)
assert_equal(_hessian(y, x), -mvn.precision_matrix)
def test_multi_variables():
x = torch.randn(3, requires_grad=True)
z = torch.randn(3, requires_grad=True)
y = (x ** 2 * z + z ** 3).sum()
H = _hessian(y, (x, z))
Hxx = (2 * z).diag()
Hxz = (2 * x).diag()
Hzz = (6 * z).diag()
target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1)
assert_equal(H, target_H)
| [
"fritz.obermeyer@gmail.com"
] | fritz.obermeyer@gmail.com |
ce5e4735d303172cc390876e1fc7f4ec5ac23b22 | 023fb57c4d822fb30cf7c54fc5643c42d8b80d0a | /EC2may/urls.py | 29b1a59b2bae1315ea1df34d408900ee5f24b3f8 | [] | no_license | localnewtonsoft/mayproj | 15bd504826df535735f665e8383c4f9b6e6baf20 | 2f7fd8a21d37693c8f419aa85e62ce7e7b68e659 | refs/heads/master | 2023-08-17T19:53:13.020928 | 2020-05-13T07:28:44 | 2020-05-13T07:28:44 | 263,543,554 | 0 | 0 | null | 2021-09-22T19:00:29 | 2020-05-13T06:23:37 | JavaScript | UTF-8 | Python | false | false | 826 | py | """EC2may URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from webapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.Index_view.as_view(),name='home'),
]
| [
"prabhashswain15@gmail.com"
] | prabhashswain15@gmail.com |
ec1a5b541e8647256089a98a71b1385d482516f3 | 9cc12acd5581a4abc786b3d8a0c993586e3bc428 | /chatter/ui/MainWindow.py | 99708c48ad8a84eba4f17a74ba6943ec57bd8455 | [] | no_license | tisaconundrum2/Aivery | d022b0c787630bc10b86473d8d9a9787b980c21e | 5ae658987ee4af6a41979bc818e09211db75315c | refs/heads/master | 2021-07-02T05:19:20.758160 | 2017-09-22T15:33:16 | 2017-09-22T15:33:16 | 104,304,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,800 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\tisaconundrum\Documents\GitHub\Aivery\ui\MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(393, 525)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../images/robot.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setEnabled(True)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout.addWidget(self.textBrowser)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.talkForMePushButton = QtWidgets.QPushButton(self.centralwidget)
self.talkForMePushButton.setObjectName("talkForMePushButton")
self.horizontalLayout.addWidget(self.talkForMePushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 393, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuDebug = QtWidgets.QMenu(self.menuHelp)
self.menuDebug.setObjectName("menuDebug")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew_Chat = QtWidgets.QAction(MainWindow)
self.actionNew_Chat.setObjectName("actionNew_Chat")
self.actionYou_Don_t_Need_Help = QtWidgets.QAction(MainWindow)
self.actionYou_Don_t_Need_Help.setObjectName("actionYou_Don_t_Need_Help")
self.actionOn = QtWidgets.QAction(MainWindow)
self.actionOn.setObjectName("actionOn")
self.actionOff = QtWidgets.QAction(MainWindow)
self.actionOff.setObjectName("actionOff")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionLoad_Brainz = QtWidgets.QAction(MainWindow)
self.actionLoad_Brainz.setObjectName("actionLoad_Brainz")
self.menuFile.addAction(self.actionNew_Chat)
self.menuFile.addAction(self.actionLoad_Brainz)
self.menuDebug.addAction(self.actionOn)
self.menuDebug.addAction(self.actionOff)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.menuDebug.menuAction())
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Aivery"))
self.textBrowser.setToolTip(_translate("MainWindow", "<html><head/><body><p>The box in which you see your chat</p></body></html>"))
self.textBrowser.setWhatsThis(_translate("MainWindow", "<html><head/><body><p><br/></p></body></html>"))
self.lineEdit.setToolTip(_translate("MainWindow", "<html><head/><body><p>Type Here</p></body></html>"))
self.pushButton.setToolTip(_translate("MainWindow", "<html><head/><body><p>Send your messages to the bot</p></body></html>"))
self.pushButton.setText(_translate("MainWindow", "Send"))
self.talkForMePushButton.setToolTip(_translate("MainWindow", "<html><head/><body><p>Make the bot talk to itself</p></body></html>"))
self.talkForMePushButton.setText(_translate("MainWindow", "Talk for me"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.menuDebug.setTitle(_translate("MainWindow", "Debug"))
self.actionNew_Chat.setText(_translate("MainWindow", "New Chat"))
self.actionYou_Don_t_Need_Help.setText(_translate("MainWindow", "You Don\'t Need Help"))
self.actionOn.setText(_translate("MainWindow", "On"))
self.actionOff.setText(_translate("MainWindow", "Off"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionLoad_Brainz.setText(_translate("MainWindow", "Load Brainz"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"ngf4@nau.edu"
] | ngf4@nau.edu |
61a8535384775f8ced5087297d3e3c4ac4284c95 | d581f118d331b6f3846650f0ae4ae4b96c5a4aff | /web/login.py | e0e6d1ba5422805909d60a8763071f98235d7bd9 | [] | no_license | Liangjianghao/learnPython | f5b0306e6873cf9fd2dd79577a6af82aa2e0a864 | 86c8780e204ff085293d432ea952fae3b162ce20 | refs/heads/master | 2021-09-01T12:54:39.361906 | 2017-12-27T03:49:31 | 2017-12-27T03:49:31 | 110,513,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | # -- coding: UTF-8 --
import time
import sys
import requests
import os
import os.path
import chargeAlert
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image,ImageEnhance
import pytesseract
import getstring
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def getCode():
return getstring.getString()
def getNumberOfJingling(zhanghao):
driver = webdriver.Chrome()
driver.get("http://vip.ipjingling.com/")
elem = driver.find_element_by_id("Text_Login_EMail")
elem.send_keys(zhanghao)
# print zhanghao
elem = driver.find_element_by_id("Text_Login_Pwd")
elem.send_keys("123456")
driver.get_screenshot_as_file('/Users/l/Desktop/screen/image.png')#比较好理解
im =Image.open('/Users/l/Desktop/screen/image.png')
box = (915,260,975,290) #设置要裁剪的区域
region = im.crop(box) #此时,region是一个新的图像对象。
region.save("/Users/l/Desktop/img/image_code.png")
# region.show()#显示的话就会被占用,所以要注释掉
# driver.close()
im=Image.open("/Users/l/Desktop/img/image_code.png")
imgry = im.convert('L')#图像加强,二值化
sharpness =ImageEnhance.Contrast(imgry)#对比度增强
sharp_img = sharpness.enhance(2.0)
sharp_img.save("/Users/l/Desktop/img/image_code.png")
# time.sleep(3)
code=pytesseract.image_to_string(sharp_img)
print(code)
# code=getstring.getString()
# if code:
elemCode = driver.find_element_by_id("Text_Login_VerCode")
# elemCode.send_keys(code)
elemCode.send_keys(code)
elemCode.send_keys(Keys.RETURN)
# time.sleep(3)
result=chargeAlert.alert_is_present()(driver)
while result:
print'验证失败'
result.accept()
driver.close()
getNumberOfJingling(zhanghao)
# else:
time.sleep(3)
guajiBtn = driver.find_element_by_id("A_Menu_Run").click()
jifen = driver.find_element_by_id("TD_Run_Score")
guajishu=driver.find_element_by_id("Span_Run_OnNum")
guajishuT=driver.find_element_by_id("Span_Run_MaxNum")
print zhanghao+'--积分:'+jifen.text+'--在线:'+guajishu.text
fo.write(zhanghao+'--积分:'+jifen.text+'--在线:'+guajishu.text)
driver.close()
with open('zh.txt', 'r') as f:
data = f.readlines() #txt中所有字符串读入data
# print data
path=os.path.split( os.path.realpath( sys.argv[0] ) )[0]
fo=open('%s/jl.txt'%(path),'w')
for zhanghao in data:
# print zhanghao.strip()
getNumberOfJingling(zhanghao.strip())
fo.close()
# driver.close()
| [
"l@liangjianghaodeMacBook-Pro.local"
] | l@liangjianghaodeMacBook-Pro.local |
cb6ca8478194f822e8d744c64355272bed12b941 | 22243831ccc94ff871720eb0f1a1f4ddeff8c393 | /valid.py | df1b7191a8aae5f5c532053c50313d2e3ae647da | [] | no_license | woozi1122/pytorch-yolov | 63db0c34cee46ec6ee0d30997e9d7ade65df36b1 | 313dd25c297554d114d28727298ffe1386fd4a0a | refs/heads/master | 2023-09-05T21:38:12.921171 | 2021-11-24T08:12:35 | 2021-11-24T08:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,107 | py | import sys
sys.path.append("./util")
import numpy as np
from torch import nn
import torch
from util.const import CONST
from torch.utils.data import DataLoader
from tqdm import tqdm
from util.dataload import YOLODataSet, yolo_dataset_collate
from terminaltables import AsciiTable
from util.model import MyYOLO, getWeight,initialParam
from util.utils import handleBox,notMaxSuppression,iou
# 获取正确的框框
def getTrueBox(outputs, bboxes):
res = []
for i,output in enumerate(outputs):
# 对于一张图
if output is None: # 没有预测框就跳过
continue
preBoxes = output[:,:4]
preLabels = output[:,6]
preConf = output[:,4] * output[:,5]
targetBboxes = bboxes[bboxes[:,0] == i]
targetBoxes = targetBboxes[:,2:]
targetLabels = targetBboxes[:,1]
detectedBox = []
isCor = torch.zeros_like(preLabels)
for j, preBox in enumerate(preBoxes):
# 对于一个框
if (len(detectedBox) == len(targetLabels)):
break
# print(iou(preBox, targetBoxes, isleftT2rightD=True))
iout, maxI = torch.max(iou(preBox, targetBoxes, isleftT2rightD=True), 0)
if iout > CONST.valIOUTher and maxI not in detectedBox and preLabels[j] == targetLabels[maxI]:
isCor[j] = 1
detectedBox.append(maxI)
res.append([isCor, preConf, preLabels])
return res
#==================================#
# 计算模型参数
#==================================#
def calMap(isCor, preConf, preLabels, targetLabels):
sI = np.argsort(-preConf)
isCor = isCor[sI]
preConf = preConf[sI]
preLabels = preLabels[sI]
uClasses = np.unique(targetLabels)
R = []
P = []
AP = []
for oneCls in uClasses:
sI = preLabels == oneCls
isCorOneCls = isCor[sI]
targetLabelsOneCls = targetLabels[targetLabels == oneCls]
tarTrueC = targetLabelsOneCls.size # 目标框为该类的数量
preTrueC = isCorOneCls.size # 预测框为该类的数量
if preTrueC == 0:
R.append(0)
P.append(0)
AP.append(0)
continue
tpC = isCorOneCls.cumsum()
fpC = (1 - isCorOneCls).cumsum()
r = tpC / tarTrueC
p = tpC / (tpC + fpC)
R.append(r[-1])
P.append(p[-1])
# 在前面添加是往前取矩形,在后面添加是让召回率可以达到1
r = np.concatenate(([0.0], r, [1.0]))
p = np.concatenate(([0.0], p, [0.0]))
# 保证p单调递减
for i in range(p.size - 1, 0, -1):
p[i - 1] = max(p[i], p[i - 1])
# 删除重复项
i = np.where(r[1:] != r[:-1])[0]
ap = np.sum((r[i+1] - r[i]) * p[i+1])
AP.append(ap)
return R,P,AP,uClasses
#==================================#
# show MP
#==================================#
def showMap(R,P,AP,uClasses):
res = [["class","AP", "R", "P"]]
for i,_ in enumerate(uClasses):
res.append([CONST.classes[int(uClasses[i])], "%.4f" % AP[i], "%.4f" % R[i], "%.4f" % P[i]])
res.append([])
res.append(["MAP", "%.4f" % np.average(AP)])
print(AsciiTable(res).table)
#==================================#
# 验证
#==================================#
def valid():
yolo = MyYOLO() # type: nn.Module
getWeight(yolo)
yolo.eval()
yolo.to(CONST.device)
valDataSet = YOLODataSet(train=False, type="coco")
valDataLoader = DataLoader(valDataSet, batch_size=CONST.batchSize, num_workers=CONST.num_workers, shuffle=False, pin_memory=True,
drop_last=True,collate_fn=yolo_dataset_collate)
corBox = []
targetLabels = []
with torch.no_grad():
for imgs, bboxes in tqdm(valDataLoader, desc="Validating"):
imgs = imgs.to(CONST.device)
bboxes = bboxes.to(CONST.device) # 输入的数据为[picNumber,cls,x,y,w,h]
output = yolo(imgs)
output = handleBox(output, yolo) # 处理先验框 返回的数据大小为(batchSize, 10647, 85)
output = notMaxSuppression(output) # 非极大值抑制, 返回的数据为batchSize[x,y,x,y,conf,cls]
# print(f"抑制后的结果:{len(output), [x.shape if x is not None else None for x in output]}")
bboxes[:,2:] = torch.cat([bboxes[:,2:4] - bboxes[:,4:6] / 2, bboxes[:,2:4] + bboxes[:,4:6] / 2], 1) #转换为xyxy
corBox.extend(getTrueBox(output, bboxes))
targetLabels.append(bboxes[:,1])
if len(corBox) == 0:
print("没有任何输出")
exit()
isCor, preConf, preLabels = [torch.cat(x, 0).cpu().numpy() for x in zip(*corBox)]
targetLabels = torch.cat(targetLabels, 0).cpu().numpy()
R,P,AP,uClasses = calMap(isCor, preConf, preLabels, targetLabels)
showMap(R,P,AP,uClasses)
#==================================#
# 主函数
#==================================#
if __name__=='__main__':
valid()
| [
"799034552@qq.com"
] | 799034552@qq.com |
9a1ca3c509638d78a79fcd61b074c55fb44366ad | 9a324e30036e9dde5760fa3c38bb06e889a97de7 | /test-product-test.py | d8437b442c3a3dd6bb56931b9b4eaeddf1e221c8 | [] | no_license | limingzhizhong2018/jiankong1 | 77ca55f32cf1c64e0acfde0128be1110223134aa | 1654ef9cc54de83582dddd270c6b052d140f0ba0 | refs/heads/master | 2020-03-16T13:54:32.462165 | 2018-05-16T14:34:59 | 2018-05-16T14:34:59 | 132,702,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | product_list = [('iphone', 5000), ('MacBook', 12000)]
print(product_list)
salary = input("In put your salary:")
if salary.isdigit():
salary = int(salary)
while True:
for index, item in enumerate(product_list):
print(index, item)
user_choice = input("what is your shopping:")
if user_choice.isdigit():
user_choice = int(user_choice)
else:
input("what is your shopping:")
if user_choice.isdigit():
user_choice = int(user_choice)
| [
"limingzhizhong2018@gmail.com"
] | limingzhizhong2018@gmail.com |
31922d28c6739a750d43018da21479a3ecfee609 | 0cc1dcfd5db2533e182f233a16cfacc0423b9e0d | /parserepo.py | d9aeb8df0cf25c9b62624ffff9890b3d41ff73e3 | [] | no_license | omaidf/Github-Searcher | 105148fe9d594874f87aec40ccf4c53742be15f8 | 82bfbb7ce20ce0efd8670a2abfd77ab877fb1497 | refs/heads/master | 2020-12-19T05:58:02.164418 | 2020-01-22T18:39:49 | 2020-01-22T18:39:49 | 235,640,133 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | import requests
import json
import colorama
import qprompt
from colorama import Fore, Back, Style
from jira import JIRA
import getpass
from github import Github
import os
import pyminizip
import sys
resulttext = []
total_count = 0
def searchterm(term,searchrepo):
global total_count
url = "https://github.$ENTERPRISE.com/api/v3/search/code?q=%s:repo:%s" % (term,searchrepo)
headers = {'Accept':'application/vnd.github.v3.text-match+json'}
search = requests.get(url,headers=headers)
searchdata = json.loads(search.content)
pages = int(searchdata["total_count"])/30
total_count = int(searchdata["total_count"])
print "%s possible findings" % total_count
print "%s pages to iterate" % pages
if pages > 35:
pages = 35
if pages == 0:
scrapeterms(term,searchrepo,pages)
else:
for i in range(1,pages):
scrapeterms(term,searchrepo,i)
def scrapeterms(term,searchrepo,page):
global total_count
url = "https://github.$ENTERPRISE.com/api/v3/search/code?q=%s:repo:%s&page=%s" % (term,searchrepo,page)
headers = {'Accept':'application/vnd.github.v3.text-match+json'}
search = requests.get(url,headers=headers)
searchdata = json.loads(search.content)
results = searchdata["items"]
for result in results:
matches = result["text_matches"]
repo = result["repository"]["full_name"]
for match in matches:
matchedtext = match["matches"]
for sterm in matchedtext:
if term in match["fragment"]:
fragment = "MATCHED CODE: \n%s" % (match["fragment"])
html_url = "CODE LOCATION: \n%s \n" % (result["html_url"])
for lines in matchedtext:
lineinfo = 'Found "%s" on the lines %s.' % (lines["text"],lines["indices"])
total_count -= 1
if total_count == 0:
print "All findings analyzed! Creating JIRA...."
createjira()
else:
makeissue(repo,fragment,html_url,lineinfo)
def createjira():
title = "%s Repository Security" % (searchrepo)
auth_jira = JIRA('https://jira.$ENTERPRISE.com',basic_auth=(jirausername,jirapassword))
issue_dict = {
'project': 'CTM',
'summary': title,
'description': 'Please contact MIR for attachment password',
'issuetype': {'name': 'Security'},
'duedate': '2019-09-17'}
new_issue = auth_jira.create_issue(fields=issue_dict)
print "Created: https://jira.$ENTERPRISE.com/browse/%s" % (new_issue.key)
f= open("findings.txt","w+")
f.writelines(["%s\n" % item for item in resulttext])
f.close()
pyminizip.compress("findings.txt", None, "findings.zip", "password1", 0)
auth_jira.add_attachment(issue=new_issue.key, attachment='findings.zip')
os.remove("findings.txt")
os.remove("findings.zip")
print "Uploaded findings to %s" % (new_issue.key)
sys.exit()
def makeissue(repository,fragment,html_url,lineinfo):
print Fore.RED + fragment
print (Style.RESET_ALL)
body = """
%s
%s
%s
""" % (fragment,html_url,lineinfo)
if qprompt.ask_yesno("Is this a valid result?", dft="n"):
resulttext.append(body)
qprompt.pause()
qprompt.clear()
qprompt.clear()
jirausername = raw_input("JIRA Username:")
jirapassword = getpass.getpass("JIRA Password:")
testauth = JIRA('https://jira.$ENTERPRISE.com',basic_auth=(jirausername,jirapassword))
searchrepo = raw_input("What repository would you like to search :")
search = raw_input("What is your search term :")
searchterm(search,searchrepo) | [
"omaid@faizyar.com"
] | omaid@faizyar.com |
c757c0ad4d66c3d6117981c5779b09e5a59d6751 | 88fd4a9ea39b19476a20575376a65643e763713c | /examples/Types.py | 4f9e1e7f56c18b9fb8b44caee914cce42be17826 | [] | no_license | ericsu921/Python | 9bb7b01edfc34e1928686e413b2a01b7aaa7dc35 | 74617291f2120bcb947a4933c4267c47b9a45699 | refs/heads/master | 2016-09-08T02:02:59.866817 | 2015-03-30T07:58:09 | 2015-03-30T07:58:09 | 33,110,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,451 | py | #!/usr/bin/env python3
# --------
# Types.py
# --------
import sys
from types import FunctionType
print("Types.py")
assert type(type) is type
assert issubclass(type, type) # every class is a subclass of itself, another curiosity of the function issubclass()
assert issubclass(type, object) # every class is a subclass of object
b = True
b = False
assert type(b) is bool
assert type(bool) is type
assert issubclass(bool, bool)
assert issubclass(bool, object)
i = 2 # for ints in python, there is no underbound or overbound overflow machinery of any kind, which clearly exists in both Java and C++. these are indefinite precision integers, so as many integers as memory can hold is what it will take
assert type(i) is int
assert type(int) is type
assert issubclass(int, int)
assert issubclass(int, object)
assert issubclass(bool, int) # bool and int seem to be related which is another curiosity, it is not obvious to me that that would be true. in this case, bool is a child of int
f = 2.3 # this no doubt does have limits to its values, these are not indefinite precision values
assert type(f) is float
assert type(float) is type
assert issubclass(float, float)
assert issubclass(float, object)
c = 2 + 3j # complex numbers, real and imaginery parts of a complex number
assert type(c) is complex
assert type(complex) is type
assert issubclass(complex, complex)
assert issubclass(complex, object)
s = 'abc' # '' or "", you can then put a string inside of a string without having to escape the delimiter, in Java you have to do "Mike says \"hello\" from Austin" to get, Mike says "hello". in python you can just say "Mike says 'hello' from Austin"
s = "abc"
assert type(s) is str
assert type(str) is type
assert issubclass(str, str)
assert issubclass(str, object)
l = [2, "abc", 3.45] # list - can be heterogeneous
assert type(l) is list # (1) mutable, (2) order matters, (3) allows duplicates, (4) implemented with internal array, (5) items can be anything
assert type(list) is type
assert issubclass(list, list)
assert issubclass(list, object)
t = (2, "abc", 3.45) # tuple, same heterogeneity
assert type(t) is tuple # effectively immutable lists
assert type(tuple) is type
assert issubclass(tuple, tuple)
assert issubclass(tuple, object)
s = {2, "abc", 3.45} # set, again, heterogeneous
assert type(s) is set # (1) mutable, (2) order does NOT matter, (3) does NOT allow duplicates, (4) internal hashtable (5) items must be "hashable" and therefore immutable. for a list, all that needs to be define is equality. when you traverse through the list, you need to compare each elem to the elem you are looking for and ask are they equal? a set could be implemented with an internal tree (like a TreeMap is in Java). trees, on the other hand, need some ordering/comparison mechanism in order to find the desired elem. a set could also be implemented with an internal hashtable (like a HashMap is in Java). hashtables need a hash function, and the items you put in the set must be "hashable", and only immutable objects are hashable. a set is not immutable, so you cannot have a set of sets, but it CAN have a set of frozen sets (which are immutable and therefore hashable). a frozen set is just an immutable set, just like a tuple is in some sense just an immutable list. so in some sense, a set IS a hashset in python. sets are implemented with a hashtable, not with a tree.
assert type(set) is type
assert issubclass(set, set)
assert issubclass(set, object)
d = {2 : "def", 3.45 : 3, "abc" : 6.78} # dictionary, again, heterogeneous. similar to Java's HashMap/TreeMap. python uses hashes to implement the dict
assert type(d) is dictionary # key-value pairs, only keys need to be hashable, values can be anything. lookup of key is O(1), lookup of value is O(n). python just needs equality to be defined on keys, nothing special about looking up values. strings are hashable because strings are immutable.
assert type(dict) is type
assert issubclass(dict, dict)
assert issubclass(dict, object)
class A : # built our own type named A. aside: the keyword "class" doesn't have a meaning, just like "def" does not have a meaning. it's just an introducer. in Java, there's a data member named "class" of all classes that are built either by you or by Java, which is invoked using <ClassName>.class. the keyword "class" by itself doesn't have any meaning in Java either.
def __init__ (self, i, f) : # analogous to Java's constuctor. "self" in python is like "this" in Java. in python, you must include "self" as an arg of the __init__ function (not sure if you need to for all functions...), whereas in Java, you don't have to specify this as an arg, but you can still make use of it in the body, just like we can make use of self in the body of a python constructor. this gets called when you make an instance of this class.
self.i = i # without saying "self.i", you would be building a local var named i. here, we are creating an instance variable.
self.f = f # in Java, we would "declare" our data members OUTSIDE of my methods/constructors and then my constructors job in Java would be to "initialize" these data members. in python, the constructor is making the data members come into "existence" in addition to initializing them. they both create AND define vars in the method __init__. if we declare a var outside of a method, that's python's notion of a class var. in Java we would need to invoke the keyword "static." in python, just the existence of it outside a method will make it a class data member.
x = A(2, 3.45) # we build a new instance of A
assert type(x) is A # x is of type A
assert type(A) is type # A must be a type
assert issubclass(A, A)
assert issubclass(A, object) # all classes stem as subclasses from object
class B (A) : # B extends A as a parent class
def __init__ (self, i, f, s) : # B adds another var s
A.__init__(self, i, f) # have B's constructor invoke A's constructor. this line in python is not required. python exhibits replacement overriding. replacement overriding says that the child's definition is going to replace the parent's definition. refinement overriding says that the child's defintion refines the parent's definition, which means that the child's definition is absolutely obligated to call the parent's definition first and then it can do more stuff. Java exhibits refinement overriding. in Java, a child's constructor must ALWAYS call its parent's constructor first. even if we say nothing in the child's constructor, it will invoke the parent's default constructor. if the parent doesn't not have a default constructor, it wouldn't compile. we can use they keyword "super" to invoke one of the parent's non-default constructors. but the child's constructor is obligated, in one way or another, to invoke the parent's constructor. nothing else in Java exhibits refinement overriding. if a child class overrides a parent class's method, no one is twisting our arm to also call the parent's version of that method. we can still do it but we don't have to. everything outside of constructors in Java exhibit replacement overriding. Python exhibits replacement overriding for its constructors. it does not force the child constructor to call parent's constructor. in this line, we chose to do call the parent on our own. we could have chosen not to. however, without this line, something curious would happen. i and f wouldn't be defined. we could call A's methods, which may depend on A's data, which we would not have, and things would not go well. furthermore, python is extremely dynamic in comparison to Java. in Java, when we define a class and the data members within it, all instances of that class will have exactly that set of data, no more, no less, forever. but in python, data members are created by invoking a certain method, (which includes the __init__ method. there's nothing particularly special about the constructor other than the fact that it gets called when we first build the object). well, we can write some other method in python and that method can define some new data for our class. that means that if we define a class in python, and we build instances of that class, depending on what methods we invoke on each of those instances, each of those instances could have a different set of data. and after the instance already exists, we can still change its set of data. so over time that instance can change what data it has, and all instances of the same class may have different data members.
self.s = s
y = B(2, 3.45, "abc")
assert type(y) is B
assert type(B) is type
assert issubclass(B, B)
assert issubclass(B, object)
assert issubclass(B, A)
def inc (v) : # defines a function, (not a method, methods are defined inside a class)
return v + 1
assert type(inc) is FunctionType # nothing special about functions, they're just objects of type FunctionType
assert type(FunctionType) is type
assert issubclass(FunctionType, FunctionType)
assert issubclass(FunctionType, object)
print("Done.")
| [
"ericsu921@gmail.com"
] | ericsu921@gmail.com |
c48c17fe8c408e0c7accacd946f053e7c3b55ab5 | 61d09942ab4aeefa2cd9d6afb2dc5ab7e8c47e07 | /osmosis_aws_driver/__init__.py | 62329507dca1e2397b92b9929ec8d733ce422a41 | [
"Apache-2.0"
] | permissive | frankenstien-831/osmosis-aws-driver | 4e375a0f7213d5ea9f187aaed1095e5301dd63ac | 24c029d5e58334c2010e163e51e8767bbae85568 | refs/heads/master | 2021-02-05T16:22:31.569140 | 2019-04-02T12:27:04 | 2019-04-02T12:27:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
__author__ = """OceanProtocol"""
__version__ = '0.0.2'
| [
"enrique@oceanprotocol.com"
] | enrique@oceanprotocol.com |
4785017c39dd0a0b8c0de104d8060f349a553bb0 | 355ba182e360df464293d7365beb548504993bc3 | /Instacart.py | b79985c7efabfec6cdf8190a66b8527791e9138d | [] | no_license | isabr/instacart | 64bfae96cf47a194c5a09cb50a69254667e431b6 | 1fcd6dc07723f56cc228b07ff9d9945fbb2d0ca0 | refs/heads/master | 2021-01-01T15:57:29.118789 | 2017-07-21T17:57:28 | 2017-07-21T17:57:28 | 97,742,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,467 | py |
# coding: utf-8
# # Initial Setup
# In[1]:
# Import
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pylab as plt
import random
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import PCA
get_ipython().magic(u'matplotlib inline')
# # Import and Organize Data
# In[ ]:
path = input('Enter the location of the files: ')
# In[ ]:
orders = pd.read_csv(path + '\orders.csv')
order_products_priors = pd.read_csv(path + '/order_products__prior.csv')
order_products_train = pd.read_csv(path + '/order_products__train.csv')
products = pd.read_csv(path + '/products.csv')
# In[7]:
# Making a sample of the customers
orders_n_smp = random.sample(set(orders.user_id),2500)
orders_smp = orders[orders.user_id.isin(orders_n_smp)]
# In[9]:
frames = [order_products_priors, order_products_train]
data_01 = pd.concat(frames)
# In[10]:
data = pd.merge(orders_smp, data_01)
data_full_01 = pd.merge(data, products)
# # Dictionary of words
# In[11]:
permitted_chars = "abcdefghijklmnopqrstuvwxyz"
# In[13]:
df = data_full_01
word_count = dict()
deleted_words = list()
index = ()
i = 0
for user_id in df['user_id']:
if i == len(df['user_id']): break
record = df['product_name'][i]
words = record.split()
i = i + 1
for word in words:
word_an = ''.join(e for e in word.lower() if e in permitted_chars)
if len(word_an) > 2:
index = user_id, word_an
word_count[index] = word_count.get(index,0) + 1
# # Apply TfidfTransformer()
# In[14]:
tf_01 = pd.Series(word_count).reset_index()
tf_01.columns = ['user_id', 'product', 'qtd']
# In[75]:
tf_02 = tf_01.pivot(index='user_id', values='qtd', columns='product')
tf_02 = tf_02.fillna(0)
# In[31]:
transformer = TfidfTransformer()
tf_03 = transformer.fit_transform(tf_02)
# In[32]:
tf_04 = pd.DataFrame(tf_03.todense())
tf_04.columns= list(tf_02.columns)
# # Fit, predict and plot clusters
# In[61]:
# Run k-means in reduced data and plot results
data = tf_04
n_digits = 5
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(-0.5, 0.5)
plt.ylim(-0.5, 0.5)
plt.xticks(())
plt.yticks(())
plt.show()
#Souce: http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html
# In[76]:
# Run k-means using all features
kmeans = KMeans(n_clusters=5, random_state=1).fit(tf_04)
tf_02['kmeans'] = kmeans.predict(tf_04)
# # Plot Most Bought Products per Cluster
# In[79]:
products_analysis = pd.merge(data_full_01.reset_index(), tf_02['kmeans'].reset_index(),on=['user_id'], how='outer')
# In[85]:
# Choosing cluster 0 to analyse results
df_0 = DataFrame(products_analysis[products_analysis.kmeans == 0]['product_name'].value_counts()[:10])
# In[86]:
df_0['% of orders including product']= df_0['product_name']/len(set(products_analysis[products_analysis.kmeans == 0]['order_id']))
# In[87]:
df_0 = df_0['% of orders including product']
df_0.plot(kind='bar', stacked=True, subplots=True, figsize=(6, 5), grid=False, title="Cluster 0 - Most purchased products")
| [
"deamorim.isadora@gmail.com"
] | deamorim.isadora@gmail.com |
745e078af6fac62e8b7d0448a12d31bb21a01a17 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/testenv/dayNight.py | 195043c0675c5a8b4e4d78951c90f5ecf6c3de4e | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 8,266 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
dayMusic = loader.loadMusic("phase_4/audio/bgm/TC_nbrhood.mid")
# dayMusic = loader.loadMusic("phase_8/audio/bgm/TB_nbrhood.mid")
# base.cr.playGame.hood.loader.snow.cleanup()
nightMusic = loader.loadMusic("phase_8/audio/bgm/DL_nbrhood.mid")
# Load up some sfx
birdSfx1 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_01.mp3")
birdSfx2 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_02.mp3")
birdSfx3 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_03.mp3")
cricket1 = loader.loadSfx("/c/soundelux/Estate_Cricket_1.mp3")
cricket2 = loader.loadSfx("/c/soundelux/Estate_Cricket_2.mp3")
rooster = loader.loadSfx("/c/soundelux/Estate_rooster.mp3")
# No more tt birds chirping
taskMgr.remove("TT-birds")
# Get rid of the sky that comes with TT central
taskMgr.remove("skyTrack")
base.cr.playGame.hood.sky.hide()
base.cr.playGame.hood.loader.music.stop()
# Load up our own sky models
nightSky = loader.loadModel("phase_8/models/props/DL_sky")
nightSky.setScale(0.8)
nightSky.setTransparency(1)
nightSky.setBin("background", 102)
daySky = loader.loadModel("phase_3.5/models/props/TT_sky")
daySky.setBin("background", 100)
dayCloud1 = daySky.find("**/cloud1")
dayCloud2 = daySky.find("**/cloud2")
dayCloud1.setBin("background", 101)
dayCloud2.setBin("background", 101)
dawnSky = loader.loadModel("phase_6/models/props/MM_sky")
dawnSky.setScale(0.8)
dawnSky.setTransparency(1)
dawnSky.setBin("background", 102
pe = PolylightEffect.make()
brightness = 1.25
darkness = 0.8
pe.setWeight(brightness)
base.localAvatar.node().setEffect(pe)
for sky in (nightSky, daySky, dawnSky):
sky.reparentTo(camera)
sky.setZ(0.0)
sky.setHpr(0.0, 0.0, 0.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
sky.node().setEffect(ce)
sky.setDepthTest(0)
sky.setDepthWrite(0)
# Color scale defines
dawnColor = Vec4(1,0.8,0.4,1)
dayColor = Vec4(1,1,1,1)
duskColor = Vec4(0.8,0.4,0.7,1)
nightColor = Vec4(0.3,0.3,0.5,1)
onAlpha = Vec4(1,1,1,1)
offAlpha = Vec4(1,1,1,0)
# Geom of the hood
geom = base.cr.playGame.hood.loader.geom
# List of butterflies
butterflies = base.cr.doFindAll("DistributedButterfly")
# List of lamps and glow discs
oneLights = geom.findAllMatches("**/prop_post_one_light_DNARoot")
threeLights = geom.findAllMatches("**/prop_post_three_light_DNARoot")
lamps = oneLights + threeLights
discs = []
# List of NodePaths of PolylightNodes
polylights = []
lightIndex = 0
for lamp in oneLights:
lamp.setColorScale(1,1,1,1,1)
disc = loader.loadModel("phase_3.5/models/props/glow")
# Add PolylightNodes
lightIndex += 1
plNode = PolylightNode("pl" + str(lightIndex))
plNode.setRadius(20)
#plNode.setPos(0,0,2)
plNode.setColor(1.0,0.8,0.4)
plNode.setFlickerType(PolylightNode.FSIN)
plNode.setFreq(6.0)
plNode.setOffset(-0.5)
plNodePath = NodePath(plNode)
polylights.append(plNodePath)
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).addLight(plNodePath))
# A glow around the lamp light bulb
disc.setBillboardPointEye()
disc.setPos(0.2,-1,10)
disc.setScale(8)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p13"))
#disc.node().setEffect(pe)
discs.append(disc)
# A glow on the floor
disc = loader.loadModel("phase_3.5/models/props/glow")
disc.setPos(0,0,0.025)
disc.setHpr(0,90,0)
disc.setScale(14)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p13"))
plNodePath.reparentTo(disc)
disc.node().setEffect(pe)
discs.append(disc)
for lamp in threeLights:
lamp.setColorScale(1,1,1,1,1)
disc = loader.loadModel("phase_3.5/models/props/glow")
# Add PolylightNodes
lightIndex += 1
plNode = PolylightNode("pl" + str(lightIndex))
plNode.setRadius(20)
plNode.setColor(1.0,1.0,1.0)
plNode.setFlickerType(PolylightNode.FRANDOM)
#plNode.setFreq(6.0)
plNode.setOffset(-0.5)
plNode.setScale(0.2)
plNode.setAttenuation(PolylightNode.AQUADRATIC)
plNodePath = NodePath(plNode)
polylights.append(plNodePath)
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).addLight(plNodePath))
disc.setBillboardPointEye()
disc.setPos(0,-1,10)
disc.setScale(10)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p23"))
plNodePath.reparentTo(disc)
#disc.node().setEffect(pe)
discs.append(disc)
# A glow on the floor
disc = loader.loadModel("phase_3.5/models/props/glow")
disc.setPos(0,0,0.025)
disc.setHpr(0,90,0)
disc.setScale(14)
disc.setColorScale(1,1,0.8,0.2,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p23"))
#disc.node().setEffect(pe)
discs.append(disc)
def makeNight():
for lamp in lamps:
lamp.setColorScale(1,1,1,1,1)
for disc in discs:
disc.show()
base.playSfx(cricket1, volume=0.3)
dayMusic.stop()
base.playMusic(nightMusic, volume=0.5)
for b in butterflies:
b.butterflyNode.hide()
def makeDay():
for lamp in lamps:
lamp.clearColorScale()
for disc in discs:
disc.hide()
base.playSfx(rooster, volume=0.2)
nightMusic.stop()
base.playMusic(dayMusic, volume=0.7)
for b in butterflies:
b.butterflyNode.show()
def lerpDaySkyFunc(color):
daySky.setColorScale(color, 1)
def lerpDawnSkyFunc(color):
dawnSky.setColorScale(color, 1)
def lerpNightSkyFunc(color):
nightSky.setColorScale(color, 1)
def lerpLightWeightFunc(weight):
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).setWeight(weight))
# Change this to change the day/night cycle length
t = 120.0
tSeg = t / 10.0
dayMusic.stop()
nightMusic.stop()
nightSky.setColorScale(onAlpha)
daySky.setColorScale(offAlpha)
dawnSky.setColorScale(offAlpha)
render.setColorScale(nightColor)
i = Parallel(Sequence(Parallel(LerpColorScaleInterval(render, tSeg, dawnColor),
LerpFunctionInterval(lerpLightWeightFunc, duration=tSeg, toData=darkness, fromData=brightness),
LerpFunctionInterval(lerpNightSkyFunc, duration=tSeg, toData=offAlpha, fromData=onAlpha),
LerpFunctionInterval(lerpDawnSkyFunc, duration=tSeg, toData=onAlpha, fromData=offAlpha),
),
Func(makeDay),
Wait(tSeg),
Parallel(LerpFunctionInterval(lerpDawnSkyFunc, duration=tSeg, toData=offAlpha, fromData=onAlpha),
LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=dayColor, fromData=offAlpha),
LerpColorScaleInterval(render, tSeg, dayColor),
),
Func(base.playSfx, birdSfx1, 0, 1, 0.3),
Wait(tSeg),
Func(base.playSfx, birdSfx2, 0, 1, 0.3),
Parallel(LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=duskColor, fromData=dayColor),
LerpColorScaleInterval(render, tSeg, duskColor),
LerpFunctionInterval(lerpLightWeightFunc, duration=tSeg, toData=brightness, fromData=darkness),
),
Func(makeNight),
Parallel(LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=offAlpha, fromData=duskColor),
LerpFunctionInterval(lerpNightSkyFunc, duration=tSeg, toData=onAlpha, fromData=offAlpha),
LerpColorScaleInterval(render, tSeg, nightColor),
),
Func(base.playSfx, cricket2, 0, 1, 0.2),
Wait(tSeg),
Func(base.playSfx, cricket1, 0, 1, 0.2),
Wait(tSeg),
),
)
i.loop()
"""
# To undo
i.finish()
render.clearColorScale()
dayMusic.stop()
nightMusic.stop()
"""
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
ac2f5ecf9b74b78a7017664f93a377a00d39e0a9 | df09d02691822e52d0f9dcca15e60bb39cd95ad4 | /lab_student_draft/labs/migrations/0006_share_groups_picked.py | a85d31bfda70ab89b963415124d428732406e9a0 | [
"BSD-3-Clause"
] | permissive | jericksanjuan/lab-student-draft | 8d36b1e753d87ee9530123c9e31f6bbbfaeb78df | 90b1a224ac8ea30f0654e8b8dbac0015f09179ff | refs/heads/master | 2021-01-15T18:46:38.407035 | 2014-12-12T08:20:45 | 2014-12-12T08:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('labs', '0005_lab_active'),
]
operations = [
migrations.AddField(
model_name='share',
name='groups_picked',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| [
"jerick@icannhas.com"
] | jerick@icannhas.com |
8b186d80975da42b065d8a2e3e2efbe315c611b0 | 3f48802d71eed41bbc88473344b4f1d14331876b | /backend/fureon/models/base.py | c3586c7c69a90d42a519b9c1e69f59c25d0d44ef | [
"Apache-2.0"
] | permissive | nattofriends/fureon | e27526571baa71355d208e21be4b30cc6596b307 | 5e9b5099bace5ba55d9a8e19ea3b392d131da5cf | refs/heads/master | 2020-05-30T06:55:35.691548 | 2015-05-11T05:57:04 | 2015-05-11T05:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class ModelManager(object):
def __init__(self, session):
self._session = session
def format_list_to_numbered_dict(self, input_list, start_count=0):
numbered_dict = {}
entry_count = start_count
for entry in input_list:
numbered_dict[unicode(entry_count)] = entry
entry_count += 1
return numbered_dict
def format_query_rows_to_dict(self, query):
if isinstance(query, list):
return map(self._format_row_to_dict, query)
else:
return self._format_row_to_dict(query)
def remove_columns_from_query_rows(self, column_names, query_rows):
if isinstance(query_rows, list):
for row in query_rows:
self._remove_columns_from_row(column_names, row)
else:
for row_number, row_data in query_rows.iteritems():
self._remove_columns_from_row(column_names, row_data)
def _format_row_to_dict(self, row):
dict = {}
for column in row.__table__.columns:
dict[column.name] = getattr(row, column.name)
return dict
def _remove_columns_from_row(self, column_names, row):
for column_name in column_names:
row.pop(column_name)
| [
"andytran2@gmail.com"
] | andytran2@gmail.com |
999aa35a45dee89653b8d7612146e2ab02bc0866 | ae48812c80218e25282f9e02d165ad06b05e6b6b | /inventory/migrations/0007_auto_20161024_1108.py | aa4f619d8655b2ec19f15ee03f2777c1d8c1b81e | [] | no_license | muryannye/unionminiproj | 023a08ac2281bb605e8fe4d65948db889ab76125 | 746d1f817d7290ecdf13e6333cc7213d7eb362b7 | refs/heads/master | 2021-05-03T22:46:41.924331 | 2016-10-24T19:22:50 | 2016-10-24T19:22:50 | 71,668,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.dev20161021125907 on 2016-10-24 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0006_auto_20161023_2325'),
]
operations = [
migrations.AlterField(
model_name='computer',
name='comments',
field=models.TextField(default='', max_length=300),
),
]
| [
"Mary@jumping-jack-139.dynamic2.rpi.edu"
] | Mary@jumping-jack-139.dynamic2.rpi.edu |
8d01aff709fff005c6f78d6d965d431c7761e2bb | bf1bcf0e29b3b9d7f63ddde35148f0a581ed127a | /get_emrald.py | e36fe7700f0c257448669ee503e4ca899e2bdaa7 | [
"Apache-2.0"
] | permissive | gurgeh/silly-chess | 7a7cd978b316b6f91a1bc79379b2fe8365dfaadb | 6c2ff766e10184a9b475681bb9945d9bc8f9aa89 | refs/heads/master | 2021-01-20T16:17:32.050220 | 2018-01-24T10:30:10 | 2018-01-24T10:30:10 | 61,201,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import re
import random
import pickle
import progressbar
import requests
URL = "http://chess.emrald.net/psolution.php?Pos="
def get_pos(i):
h = requests.get(URL + str(i))
try:
xs = re.findall("text/javascript'>([^<]+)<", h.text)
except Exception as e:
print('Error %s (%d)' % (e, i))
return
if len(xs) != 3:
print("%d len %d" % (i, len(xs)))
else:
return xs[0]
def get_all_pos(start, end):
bar = progressbar.ProgressBar()
nrs = list(range(start, end))
random.shuffle(nrs)
rets = []
for i in bar(nrs):
ret = get_pos(i)
if ret:
rets.append((i, ret))
try:
with open('emrald.p', 'wb') as outf:
pickle.dump(rets, outf)
except Exception as e:
print(e)
return rets
| [
"david@fendrich.se"
] | david@fendrich.se |
81fb67bfbbafced31af6e9a8ec85def9ce72c428 | 4b8b0be0588f9e5249729f165b72a6b38324837d | /glycresoft_ms2_classification/prediction_tools/__init__.py | 52e2cd4a029490a37c2b53ed85f7619bf145d4ca | [] | no_license | GlycReSoft2/embed_tandem_ms_classifier | 5e2f569f2b74f2f14f1c1c0cede32de99c150890 | 0495f2234562a9c5dd02d545800c077df2305387 | refs/heads/master | 2020-06-02T09:32:55.457664 | 2015-06-20T21:30:19 | 2015-06-20T21:30:19 | 22,615,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | try:
from data_processing import prepare_model_file
from data_processing import save_model_file
from data_processing import call_by_coverage
from data_processing import determine_ambiguity
from data_processing import PredictionResults
from data_processing import convert_csv_to_nested
from classifier_definitions import *
except ImportError:
print("Unable to import parts of prediction_tools")
from .constants import constants
| [
"mobiusklein@gmail.com"
] | mobiusklein@gmail.com |
f575df6d1062f7cc47d9813b8c01948dc2bbc341 | 03bf031efc1f171f0bb3cf8a565d7199ff073f96 | /apps/authentication/migrations/0026_auto_20171108_1027.py | 9424d221e915e1e5808704ce9bb7901f9db440bd | [
"MIT"
] | permissive | emilps/onlineweb4 | a213175678ac76b1fbede9b0897c538c435a97e2 | 6f4aca2a4522698366ecdc6ab63c807ce5df2a96 | refs/heads/develop | 2020-03-30T01:11:46.941170 | 2019-05-10T19:49:21 | 2019-05-10T19:49:21 | 150,564,330 | 0 | 0 | MIT | 2019-05-10T19:49:22 | 2018-09-27T09:43:32 | Python | UTF-8 | Python | false | false | 769 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 09:27
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0025_auto_20170529_1823'),
]
operations = [
migrations.AlterField(
model_name='onlineuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| [
"torjusti@gmail.com"
] | torjusti@gmail.com |
ddced0b87ea73e64b7de8ea9ac382288195ea604 | 5269aeb867df39b74b2649d409ad7167074203d4 | /python_learning/overloading_iter_next_multy.py | c44a5dfe281e425763f354c0ebb7c4b4d5627ed0 | [
"MIT"
] | permissive | KonstantinKlepikov/all-python-ml-learning | fa3afd27b8314d51cce4a3b174071c5fe9421643 | a8a41347b548828bb8531ccdab89c622a0be20e1 | refs/heads/master | 2023-02-26T15:11:38.929089 | 2021-01-29T01:38:43 | 2021-01-29T01:38:43 | 273,067,713 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # example of overloading __iter__ and __next__ - multy iteration
class SkipObject:
"""Return nefw iterator every time for each __iter__() call
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __iter__(self):
return SkipIterator(self.wrapped)
class SkipIterator:
"""Multiply iteration
"""
def __init__(self, wrapped):
self.wrapped = wrapped
self.offset = 0 # iterators counter
def __next__(self):
if self.offset >= len(self.wrapped): # stop iteration
raise StopIteration
else:
item = self.wrapped[self.offset] # iteration
self.offset += 2
return item
if __name__ == '__main__':
text = 'abcde'
skipper = SkipObject(text)
I = iter(skipper) # indexes
print(next(I), next(I), next(I))
for x in skipper:
for y in skipper:
print((x + y)*2, end=' ')
| [
"oformleno@gmail.com"
] | oformleno@gmail.com |
f6f79189fdc7c65f337f59dbd02701294a93b9cd | ad2f2877a4f24f5b78db756e4ea513134bc014db | /introduction_to_data_science_BY_BU/03-134142-041/Lab4/Lab4.py | 35e3238d0853681307e6bd4f830acaf982621fda | [] | no_license | mhamzaaziz1/DataScience | 50e2f89d612d274000086ff49e2cdcd0c3a443fc | 3421321863e9d6070728899dec62791b55f0a174 | refs/heads/master | 2020-03-19T06:41:58.550082 | 2018-06-04T15:57:38 | 2018-06-04T15:57:38 | 136,046,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | #LAB 4 task 1:
count = 0
num = 1
sum = 0
while num != 0:
num=int(input("enter the number:"))
count += 1
sum = sum + num
if count !=0:
print ("sum of values are:",sum)
count -= 1
print("Avg:",sum/count)
else:
print(count)
#LAb 4 task 2:
val1 = int(input(" enter for horizontal values"))
val2 = int(input(" enter for vertical values"))
char1 = '@'
for row in range(val1):
for col in range(val2):
print(char1)
print()
| [
"noreply@github.com"
] | mhamzaaziz1.noreply@github.com |
01a21f3d5eee9b1e8695214f846777b5355c4e67 | 02fa055429a5533b6ccad5e0948931bb53d585ad | /tests/test_expand_config_path.py | 71c0543f5f4cab3838eec900eae63e37fdcdd845 | [
"MIT"
] | permissive | TrendingTechnology/proxmox-pci-switcher | a7d3935131550c9b7095ab26ce8fd6aa9e35d46a | 595f72419bdbc26da1d0cdb07f039cf79eeff086 | refs/heads/master | 2023-07-13T21:12:15.841631 | 2021-08-06T22:47:48 | 2021-08-06T22:47:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | import unittest
from unittest.mock import patch
from proxmox_pci_switcher import expand_config_path
from pathlib import Path
class TestExpandConfigPath(unittest.TestCase):
@patch("proxmox_pci_switcher.os.name", "posix")
def test_expand_config_path_default_linux(self):
_home = Path.home()
self.assertEqual(
f"{_home}/.config/proxmox-pci-switcher/config.yaml",
expand_config_path("~/.config/proxmox-pci-switcher/config.yaml"),
)
@patch("proxmox_pci_switcher.os.name", "nt")
def test_expand_config_path_default_windows(self):
self.assertEqual(
"~\\AppData\\Local\\proxmox-pci-switcher\\config.yaml",
expand_config_path("~/.config/proxmox-pci-switcher/config.yaml"),
)
@patch("proxmox_pci_switcher.os.name", "posix")
def test_expand_config_path_custom_path(self):
self.assertEqual(
"/etc/ppw/config.yaml",
expand_config_path("/etc/ppw/config.yaml"),
)
if __name__ == "__main__":
unittest.main()
| [
"rosiney.gp@gmail.com"
] | rosiney.gp@gmail.com |
97e70d31f12cf6c8e32134cc2ea0a2382e412be0 | 70acf904d89a49d557abb4c460a7aafc08a18c45 | /Questi/urls.py | 151d882e669c870dda6927b1dd88b446445418e0 | [] | no_license | 1r00t/WebPyProject | 7e65177369b9e8ca000bb13abff4990015e65e5f | 8f821854c27b5bca147623f532b2ce052205b900 | refs/heads/master | 2020-09-26T22:06:33.449397 | 2020-01-17T20:47:45 | 2020-01-17T20:47:45 | 226,352,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | """Questi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('', include('Blog.urls')),
]
| [
"root.1drone@gmail.com"
] | root.1drone@gmail.com |
8d2f9bfb463b00cdbbda6c59a9f73a137d4bf980 | 49f0d7d5a9c3fa30e568b544adbee2fd05a37e84 | /dynamic_programming.py | 989fa0c0185083a9415b72ec9bf78e8827c99130 | [] | no_license | dannylicn/codetest | c5c6976fa8fe7f3ca456e005eeb034b38e571ff7 | bce5ee848f8fa9cd1164e82746510c090ce07bb3 | refs/heads/master | 2020-09-11T01:39:47.890845 | 2019-11-20T15:56:23 | 2019-11-20T15:56:23 | 221,898,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,416 | py | # !usr/bin/python3
"""
Different graph algorithms implementation
Arash Tehrani
"""
class DynamicProgramming(object):
def __init__(self):
pass
def longest_increasing_subsequence(self, arr):
"""
longest increasing subsequence
inputs:
- arr: list, input array
outputs:
- length of the maximum increasing subarray
- the maximum increasing subarray
"""
len_arr = [1]*len(arr)
select_arr = list(range(len(arr)))
for j in range(1, len(arr)):
for i in range(j):
if arr[i] < arr[j]:
if len_arr[j] < len_arr[i] + 1:
len_arr[j] = len_arr[i] + 1
select_arr[j] = i
max_tuple = max([v for v in enumerate(len_arr)], key=lambda x:x[1])
idx = max_tuple[0]
n = max_tuple[1]
idx_arr = [idx]
while n > 1:
idx_arr.append(select_arr[idx])
idx = idx_arr[-1]
n -= 1
return max_tuple[1], [arr[i] for i in idx_arr][::-1]
def knapsack01(self, w, val, W):
"""
0/1 Knapsack problem
inputs:
- w: list of ints, weights of the items
- val: list, value of the items
- W: int, max allowable weight (budget)
outputs:
- max_val: max possible value
- items: list of tuples, items picked
"""
assert isinstance(W, int), 'W must be an integer'
arr = [(i, j) for i, j in zip(val, w)]
arr = sorted(arr, key=lambda x: x[1])
score_mat = [[0]*(W+1) for i in range(len(arr))]
for j in range(W+1):
if j >= arr[0][1]:
score_mat[0][j] = arr[0][0]
for i in range(1, len(arr)):
for j in range(1, W+1):
if j < arr[i][1]:
score_mat[i][j] = score_mat[i-1][j]
else:
score_mat[i][j] = max(score_mat[i-1][j],
arr[i][0] + score_mat[i-1][j - arr[i][1]]
)
# pick the elements based on the scores
v = max_val = score_mat[-1][-1]
idx = len(arr) - 1
col = W
items = []
while v != 0:
if score_mat[idx][col] == score_mat[idx-1][col]:
idx -= 1
else:
col -= arr[idx][1]
items.append(arr[idx])
idx -= 1
v = score_mat[idx][col]
return max_val, items
def unbounded_knapsack(self, w, val, W):
"""
Unbounded Knapsack problem which allows repetition of items
inputs:
- w: list of ints, weights of the items
- val: list, value of the items
- W: int, max allowable weight (budget)
outputs:
- max_val: max possible value
- items: list of tuples, items picked
"""
assert isinstance(W, int), 'W must be an integer'
arr = [(i, j) for i, j in zip(val, w)]
arr = sorted(arr, key=lambda x: x[1])
print(arr)
score_mat = [[0]*(W+1) for i in range(len(arr))]
for j in range(W+1):
if j >= arr[0][1]:
score_mat[0][j] = arr[0][0] + score_mat[0][j - arr[0][1]]
for i in range(1, len(arr)):
for j in range(1, W+1):
if j < arr[i][1]:
score_mat[i][j] = score_mat[i-1][j]
else:
score_mat[i][j] = max(score_mat[i-1][j],
arr[i][0] + score_mat[i][j - arr[i][1]]
)
# pick the elements based on the scores
v = max_val = score_mat[-1][-1]
idx = len(arr) - 1
col = W
items = []
while v != 0:
if idx == 0:
col -= arr[idx][1]
items.append(arr[idx])
else:
if score_mat[idx][col] == score_mat[idx-1][col]:
idx -= 1
else:
col -= arr[idx][1]
items.append(arr[idx])
v = score_mat[idx][col]
return max_val, items
def egg_dropping(self, n, k):
"""
egg dropping problem
* O(n*k^2) complexity
inputs:
- n: number of floors
- k: number of eggs
return:
- int, numbner of attempts
"""
T = [[0]*(n+1) for _ in range(k)]
for j in range(n+1):
T[0][j] = j
for i in range(1, k):
for j in range(1, n+1):
if i > j:
T[i][j] = T[i-1][j]
else:
sol = []
for jj in range(1, j+1):
sol.append(max(T[i-1][jj-1], T[i][j-jj]))
print(i, j, sol)
T[i][j] = 1 + min(sol)
return T[-1][-1]
def matrix_chain_multiplication(self, M_list):
"""
Matrix chain multiplication
inputs:
M_list: list of lists of length 2, size of the amtrices
outputs:
int, minimum number of computation
list, order of multiplications
"""
n = len(M_list)
T = [[0]*n for _ in M_list]
order = [[0]*n for _ in M_list]
for v in range(1, n):
for i in range(n - v):
j = i + v
T[i][j] = float('inf')
for k in range(i, j):
w = T[i][k] + T[k+1][j] + M_list[i][0]*M_list[k][1]*M_list[j][1]
if w < T[i][j]:
T[i][j] = w
order[i][j] = k
def order_printer(i, j, order):
if j - i == 1:
return [[i],[j]]
if order[i][j] == i:
return [[i], order_printer(order[i][j]+1, j, order)]
elif order[i][j]+1 == j:
return [order_printer(i, order[i][j], order), [j]]
else:
return [order_printer(i,order[i][j], order),
order_printer(order[i][j]+1, j, order)]
return T[0][-1], order_printer(0, n-1, order)
def optimal_game_strategy(self, arr):
"""
Consider a row of n coins of values v1 . . . vn, where n is even. We play
a game against an opponent by alternating turns. In each turn, a player selects
either the first or last coin from the row, removes it from the row permanently,
and receives the value of the coin. Determine the maximum possible amount of
money we can definitely win if we move first.
inputs:
- arr: list, arr of value of the coins
output:
- scalar, maximum gainable point
"""
n = len(arr)
T = [[[0,0] for _ in range(n)] for _ in range(n)]
for i in range(n):
T[i][i][0] = arr[i]
for L in range(1, n):
for i in range(n - L):
j = i + L
if arr[i] + T[i+1][j][1] > arr[j] + T[i][j-1][1]:
T[i][j][0] = arr[i] + T[i+1][j][1]
T[i][j][1] = T[i+1][j][0]
else:
T[i][j][0] = arr[j] + T[i][j-1][1]
T[i][j][1] = T[i][j-1][0]
return T[0][-1][0]
# -------------------------------------------------------------
if __name__ == '__main__':
dp = DynamicProgramming()
# -----------------------------
# Testing longest increasing subsequence
print('------------------------------------------------')
print('Running longest increasing subsequence')
u, v = dp.longest_increasing_subsequence([0, 2.3, 4, 5, -2, 7, 8])
print(u, v)
# -----------------------------
# Testing 0/1 Knapsack
print('------------------------------------------------')
print('Running 0/1 Knapsack')
max_val, items = dp.knapsack01([5,4,3,1], [7,5,4,1], 7)
print(max_val, items)
# -----------------------------
# -----------------------------
# Testing unbounded Knapsack
print('------------------------------------------------')
print('Running unbounded Knapsack')
max_val, items = dp.unbounded_knapsack([1,3,4,5], [10,40,50,70], 8)
print(max_val, items)
max_val, items = dp.unbounded_knapsack([5,4,3,1], [7,5,4,1], 7)
print(max_val, items)
# -----------------------------
# Testing egg dropping
print('------------------------------------------------')
print('Running egg dropping')
print(dp.egg_dropping(6,2))
# -----------------------------
# Testing matrix chain multiplication
print('------------------------------------------------')
print('Running matrix chain multiplication')
matrices = [[2,3], [3,6], [6,4], [4,5]]
#matrices = [[2,3], [3,2], [1,5], [5,1]]
print(dp.matrix_chain_multiplication(matrices))
# -----------------------------
# Testing Optimal game strategy
print('------------------------------------------------')
print('Running optimal game strategy')
a = [8, 15, 3, 7]
print(dp.optimal_game_strategy(a))
| [
"danny.li@samsung.com"
] | danny.li@samsung.com |
b85a73a1586c716e42c86755109e4360e6d2a396 | b6068ad0383967f40cf338b6005d728edb1b647f | /DeepQNet/RL_brain.py | 1135e8eef781148a2f60be8b5d72d07114b31255 | [] | no_license | WOW5678/ReinforcementLearning | 8d0962232e7f4d9ea88e990f9bca98dad86f0ef0 | 5a8e1624fbecc5d39ca17ab2613a6555fe3d937f | refs/heads/master | 2020-03-30T07:32:02.576419 | 2019-11-22T03:27:03 | 2019-11-22T03:27:03 | 150,946,581 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,352 | py | # -*- coding:utf-8 -*-
'''
Create time: 2018/11/11 16:50
@Author: 大丫头
'''
"""
This part of code is the DQN brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.7.3
"""
import numpy as np
import pandas as pd
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9, # 回报折扣率
e_greedy=0.9, #控制随机策略,0.9的概率选择概率的行为,0.1的概率进行随机选择行为
replace_target_iter=300, # 控制多少步进行target_net的更新
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
#target_net参数的更新操作
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
# c_names(collections_names) are the collections to store variables
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
# c_names(collections_names) are the collections to store variables
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
# 保存每个epsoid的原状态,行为,回报,新状态值
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
#hstack 不改变行数,再列数上进行拼接
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
#根据状态值选择action
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
#选value最大的action
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
# 随机策略 进行aciton的随机选择
action = np.random.randint(0, self.n_actions)
return action
# 进行参数的学习
def learn(self):
# check to replace target parameters
# 当步数达到了replace_target_iter之后 就进行target_net参数的更新
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:], # fixed params
self.s: batch_memory[:, :self.n_features], # newest params
})
# change q_target w.r.t q_eval's action
#???目的是什么呢?每太懂
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
#更新q_value表格
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponding action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
| [
"noreply@github.com"
] | WOW5678.noreply@github.com |
49a442b058c1eb081db28a321b0d5020c9dec449 | a622e8b295d799b7b9125e2b15243b8bdae1dc09 | /1908/190826/미로문제/미로문제.py | 58c26cc2d9d42d035507cc03d22855d8235c45a4 | [] | no_license | otterji/algorithms | 3a36c04bacc22c46f1ee220b3b129cda876db797 | ebd0ade0cd1de8e489d015aa0b2833afeab3898e | refs/heads/master | 2020-11-26T02:48:13.299848 | 2020-04-11T12:09:41 | 2020-04-11T12:09:41 | 228,942,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # 시작점과 끝점의 위치가 항상 1행은 아님. 중간일수도 있음. 제일 먼저 시작점 위치 찾기
# DFS로 풀어도됨
# 백트래킹으로 풀어도됨
# 도착할 수 있는지 여부 찾기
import sys
sys.stdin = open("input.txt", "r")
T = int(input())
for tc in range(1, T+1):
N = int(input())
miro = [list(map(int,input())) for _ in range(N)]
for i in range(N):
for j in range(N):
if miro[i][j] == 2:
start = (i, j)
break
stack = []
visited = []
dx = [0, 0, -1, 1] # 좌 우
dy = [-1, 1, 0, 0] # 상 하
def DFS(miro, s, g):
x, y = s, g
stack.append((x, y))
while stack:
x = stack[-1][0]
y = stack[-1][1]
for i in range(4):
if 0 <= dx[i] + x <= N-1 and 0 <= dy[i] + y <= N-1: # 범위 안에 있으면
if miro[dx[i] + x][dy[i] + y] == 3:
return 1
if miro[dx[i] + x][dy[i] + y] == 0 and (dx[i] + x, dy[i] + y) not in visited:
x = dx[i] + x
y = dy[i] + y
stack.append((x, y))
visited.append((x, y))
break # 얘를 안하니까 값이 안나오네
else:
stack.pop()
return 0
result = DFS(miro, start[0], start[1])
print('#{} {}'.format(tc, result)) | [
"rkdudwl@naver.com"
] | rkdudwl@naver.com |
f89174dbeba42fbd85b9c3ae35f812440de2c6f7 | ea1c39e7244f85f563eec0f5824763c300dfa800 | /GUI | e54530e7d76872e438bb279f5cfead69e861edd3 | [] | no_license | maciosz/adpb | 95bbd635694b3ad887658a19eddce66ae209b556 | b6187bf62c9bf54ecb313e1382af6e10e42ccb93 | refs/heads/master | 2021-01-21T23:44:30.665159 | 2015-02-10T11:21:18 | 2015-02-10T11:21:18 | 29,132,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | #!/usr/bin/python
import EpickiKonwerter
import pops
import sys
import os
from Tkinter import *
from tkFileDialog import *
if __name__ == "__main__":
class layout:
def __init__(self, parent):
# Choose Input format section.
self.frmIform = Frame(parent, bd=5)
self.frmIform.pack()
self.v = IntVar()
self.v.set(1) # Initializing the input choice
input_format = [
("fasta",1),
("fastq",2),
("sam",3),
("gff2",4),
("gff3",5)]
def ShowChoice():
print self.v.get()
Label(self.frmIform,
text="Choose input file format",
justify = LEFT,
padx = 20).pack()
for txt, val in input_format:
Radiobutton(root,
text=txt,
padx = 20,
variable=self.v,
command=ShowChoice,
value=val).pack(anchor=W)
# Separator (start)
sep = Frame(parent, width=2, bd=2, bg='grey')
sep.pack(fill=X, expand=1)
# Separator (end)
# Choose the output format section.
self.frmOform = Frame(parent, bd=5)
self.frmOform.pack()
self.v2 = IntVar()
self.v2.set(1) # Initializing the output choice
output_format = [
("fasta",1),
("fastq",2),
("sam",3),
("gff2",4),
("gff3",5)]
def ShowChoice():
print self.v2.get()
Label(self.frmOform,
text="Choose output file format",
justify = LEFT,
padx = 20).pack()
for txt, val in output_format:
Radiobutton(root,
text=txt,
padx = 20,
variable=self.v2,
command=ShowChoice,
value=val).pack(anchor=W)
# Separator (start)
sep = Frame(parent, width=2, bd=2, bg='grey')
sep.pack(fill=X, expand=1)
# Separator (end)
# Input file section.
self.frmInfile = Frame(parent, bd=5)
self.frmInfile.pack()
self.labelIn = Label(self.frmInfile, text='Input File', width=15)
self.labelIn.pack(side=LEFT)
self.inFilePath = StringVar()
self.boxIn = Entry(self.frmInfile, width=22, textvariable=self.inFilePath)
self.boxIn.pack(side=LEFT)
self.bInFile = Button(self.frmInfile, text='Browse', command=self.bBrowseInClick)
self.bInFile.pack(side=LEFT)
# Output file section.
self.frmOutfile = Frame(parent, bd=5)
self.frmOutfile.pack()
self.labelOut = Label(self.frmOutfile, text='Output File', width=15)
self.labelOut.pack(side=LEFT)
self.outFilePath = StringVar()
self.boxOut = Entry(self.frmOutfile, width=22, textvariable=self.outFilePath)
self.boxOut.pack(side=LEFT)
self.bOutFile = Button(self.frmOutfile, text='Browse', command=self.bBrowseOutClick)
self.bOutFile.pack(side=LEFT)
# Separator (start)
sep = Frame(parent, width=2, bd=2, bg='grey')
sep.pack(fill=X, expand=1)
# Separator (end)
# Choose Filter (yes/no) section.
self.frmFYNform = Frame(parent, bd=5)
self.frmFYNform.pack()
self.v3 = IntVar()
Label(self.frmFYNform,
text="Do you want to use filters?",
justify = LEFT,
padx = 20).pack(anchor=W)
Radiobutton(self.frmFYNform,
text="Yes",
padx = 20,
variable=self.v3,
value=True).pack(anchor=W)
Radiobutton(self.frmFYNform,
text="No",
padx = 20,
variable=self.v3,
value=False).pack(anchor=W)
# Separator (start)
sep = Frame(parent, width=2, bd=2, bg='grey')
sep.pack(fill=X, expand=1)
# Separator (end)
# Filter section.
self.frmFilt = Frame(parent, bd=5)
self.frmFilt.pack()
self.labelFilt = Label(self.frmFilt, text='Type prefered filter.\n', width=30)
self.labelFilt.pack(side=LEFT)
self.userexp = StringVar()
self.boxExp = Entry(self.frmFilt, width=22, textvariable=self.userexp)
self.boxExp.pack(side=LEFT)
# Separator (start)
sep = Frame(parent, width=2, bd=2, bg='grey')
sep.pack(fill=X, expand=1)
# Separator (end)
# "Convert" and "Help" button section
self.frmConv = Frame(parent, bd=5)
self.frmConv.pack()
self.bConverter = Button(self.frmConv, text='Convert', command=self.bConverterClick)
self.bHelp = Button(self.frmConv, text='Display Help', command=self.bHelpClick)
self.bConverter.grid(row=0,column=1, sticky=W)
self.bHelp.grid(row=0,column=0, sticky=W)
# Browse input_file button
def bBrowseInClick(self):
rFilepath = askopenfilename(
defaultextension='*',
initialdir='.',
initialfile='',
parent=self.frmInfile,
title='select a file')
self.inFilePath.set(rFilepath)
print self.boxIn.get()
# Browse output_file button
def bBrowseOutClick(self):
rFilepath = asksaveasfilename(
defaultextension='*',
initialdir='.',
initialfile='',
parent=self.frmInfile,
title='select a file')
self.outFilePath.set(rFilepath)
print self.boxOut.get()
# "Help" button
def bHelpClick(self):
print 'help'
# "Convert" button
def bConverterClick(self):
inputFilePath = str(self.inFilePath.get())
outputFilePath = str(self.outFilePath.get())
expression = str(self.userexp.get())
e = EpickiKonwerter.EpickiKonwerter()
# Input -> Converter
if self.v.get() == 1:
e.readFASTA(inputFilePath)
elif self.v.get() == 2:
e.readFASTQ(inputFilePath)
elif self.v.get() == 3:
e.readSAM(inputFilePath)
elif self.v.get() == 4:
e.readGFF2_GFF3(inputFilePath)
elif self.v.get() == 5:
e.readGFF2_GFF3(inputFilePath)
# Filter
if self.v3.get() == True:
e.filter(expression)
else:
pass
# Converter -> Output
if self.v2.get() == 1:
e.writeFASTA(outputFilePath)
elif self.v2.get() == 2:
e.writeFASTQ(outputFilePath)
elif self.v2.get() == 3:
e.writeSAM(outputFilePath)
elif self.v2.get() == 4:
if self.v.get() == 1:
pops.format_error()
elif self.v.get() == 2:
pops.format_error()
else:
e.writeGFF2(outputFilePath)
elif self.v2.get() == 5:
if self.v.get() == 1:
pops.format_error()
elif self.v.get() == 2:
pops.format_error()
else:
e.writeGFF3(OutputFilePath)
root = Tk()
root.title("EpickiKonwerter GUI")
root.geometry("400x700+100+40") # Default window's size and position.
gui = layout(root)
root.mainloop()
| [
"gromadka.agnieszka@gmail.com"
] | gromadka.agnieszka@gmail.com | |
0d1d77565dbedf75b5e67c8b721c6224a6f868f2 | 2a0bb7788a24fbdf35505ef58df781a7b5d0825b | /test_lesson_6.py | 85f78eeb9db99f7912398da26ae6fa4fbd35f22d | [
"MIT"
] | permissive | Nadezda-26-S/myfirstone | 5c476c83f4a280b150e71b961ad936a06ddebd7d | 455802b5ee5acea8979ef2b5d387b952e5ff18d9 | refs/heads/master | 2020-11-27T21:57:24.164290 | 2020-02-02T15:37:56 | 2020-02-02T15:37:56 | 229,616,907 | 0 | 0 | MIT | 2020-01-10T17:27:39 | 2019-12-22T19:07:10 | Python | UTF-8 | Python | false | false | 1,124 | py | # --------ТЕСТИРОВАНИЕ ФУНКЦИЙ--------
# проверка на простоту
def is_prime(n):
i = 2
while n > i:
if n % i == 0:
break
i += 1
return i == n
print(is_prime(43))
def test_1_is_prime():
assert is_prime(7) == True
def test_2_is_prime():
assert is_prime(344) == False
# выводит список всех делителей числа;
def all_dividers(n):
result = []
i = 2
while i * i <= n:
if n % i == 0:
n //= i
result.append(i)
else:
i += 1
if n > 1:
result.append(n)
return result
print(all_dividers(24))
def test_all_dividers():
assert all_dividers(7) != 7
# выводит самый большой простой делитель числа.
def greatest_pr_div(n):
prime_num = all_dividers(n)
max_num = 0
for i in prime_num:
if i > max_num:
max_num = i
return(max_num)
print(greatest_pr_div(23244))
def test_greatest_pr_div():
assert greatest_pr_div(149) == 149
assert greatest_pr_div(-149) == 0 | [
"hope_air@mail.ru"
] | hope_air@mail.ru |
7697cfa699f4e95fc9116e3ca315a9a2de1ee232 | 2b3523f8586aeca4a09eae7899e927f6f10bd4f5 | /tf_utils.py | ad564fca1f45b603d41319ca47171b9199e296d8 | [] | no_license | Modorn/radar_image_propagation | 2ad146302a371c51bdf251d15fbca3527c66108f | e21215e0c6c63768c0036197b91dd15eb51bbc7d | refs/heads/master | 2020-06-02T01:48:56.153632 | 2019-03-29T07:44:48 | 2019-03-29T07:44:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,151 | py | import numpy as np
import tensorflow as tf
import config as c
def conv2d(input, name, kshape, strides=(1, 1, 1, 1), dtype=np.float32, padding="SAME"):
with tf.name_scope(name):
W = tf.get_variable(name='w_'+name,
shape=kshape,
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
dtype=dtype)
b = tf.get_variable(name='b_' + name,
shape=[kshape[3]],
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
dtype=dtype)
out = tf.nn.conv2d(input, W, strides=strides, padding=padding)
out = tf.nn.bias_add(out, b)
out = tf.nn.leaky_relu(out, alpha=0.2)
return out
def conv2d_act(input, name, kernel, strides, num_filters, dtype=tf.float32, initializer="msar", act_type="leaky", padding="SAME"):
with tf.name_scope(name):
in_c = input.shape.as_list()[-1]
if initializer == "msar":
init = tf.contrib.layers.variance_scaling_initializer()
elif initializer == "xavier":
init = tf.contrib.layers.xavier_initializer(uniform=False)
else:
raise NotImplementedError
W = tf.get_variable(name='w_' + name,
shape=(kernel, kernel, in_c, num_filters),
initializer=init,
dtype=dtype)
b = tf.get_variable(name='b_' + name,
shape=[num_filters],
initializer=tf.zeros_initializer(),
dtype=dtype)
out = tf.nn.conv2d(input, W, strides=(1, strides, strides, 1), padding=padding)
out = tf.nn.bias_add(out, b)
if act_type == "relu":
out = tf.nn.relu(out)
elif act_type == "leaky":
out = tf.nn.leaky_relu(out, alpha=0.2)
return out
def deconv2d(input, name, kshape, n_outputs, strides=(1, 1)):
with tf.name_scope(name):
out = tf.contrib.layers.conv2d_transpose(input,
num_outputs= n_outputs,
kernel_size=kshape,
stride=strides,
padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
biases_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
activation_fn=tf.nn.relu)
return out
def deconv2d_act(input, name, kernel, stride, num_filters, act_type="leaky",
initializer="msra", dtype=tf.float32, padding="SAME", use_bias=True):
with tf.name_scope(name):
input_size = input.shape.as_list()
if len(input_size) == 5:
input = tf.reshape(input, shape=(input_size[0] * input_size[1],
input_size[2],
input_size[3],
input_size[4]))
if initializer == "msra":
init = tf.contrib.layers.variance_scaling_initializer(dtype=dtype)
elif initializer == "xavier":
init = tf.contrib.layers.xavier_initializer(uniform=False)
else:
raise NotImplementedError
if act_type == "relu":
act = tf.nn.relu
elif act_type == "leaky":
act = tf.nn.leaky_relu
else:
raise NotImplementedError
out = tf.layers.conv2d_transpose(input,
filters=num_filters,
kernel_size=(kernel, kernel),
strides=(stride, stride),
padding=padding,
kernel_initializer=init,
activation=act,
use_bias=use_bias)
return out
def maxpool2d(x,name,kshape=(1, 2, 2, 1), strides=(1, 2, 2, 1)):
with tf.name_scope(name):
out = tf.nn.max_pool(x,
ksize=kshape, #size of window
strides=strides,
padding='SAME')
return out
def upsample(input, name, factor=(2, 2)):
size = [int(input.shape[1] * factor[0]), int(input.shape[2] * factor[1])]
with tf.name_scope(name):
out = tf.image.resize_bilinear(input, size=size, align_corners=None, name=None)
return out
def fullyConnected(input, name, output_size, dtype=np.float32):
with tf.name_scope(name):
input_size = input.shape[1:]
input_size = int(np.prod(input_size))
W = tf.get_variable(name='w_'+name,
shape=[input_size, output_size],
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
dtype=dtype)
b = tf.get_variable(name='b_'+name,
shape=[output_size],
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
dtype=dtype)
input = tf.reshape(input, [-1, input_size])
out = tf.nn.relu(tf.add(tf.matmul(input, W), b))
return out
def dropout(input, name, keep_rate):
with tf.name_scope(name):
out = tf.nn.dropout(input, keep_rate)
return out
def weighted_l2(pred, gt):
weight = get_loss_weight_symbol(gt)
l2 = weight * tf.square(pred - gt)
l2 = tf.reduce_sum(l2)
return l2
def get_loss_weight_symbol(data):
balancing_weights = c.BALANCING_WEIGHTS
thresholds = c.THRESHOLDS
weights = tf.zeros_like(data)
if c.USE_BALANCED_LOSS:
for i in range(len(thresholds)):
weights = weights + balancing_weights[i] * tf.to_float(data >= thresholds[i])
return weights
def down_sampling(input, kshape, stride, num_filters, name, padding="SAME"):
input_size = input.shape.as_list()
if len(input_size) == 5:
input = tf.reshape(input, shape=(input_size[0]*input_size[1],
input_size[2],
input_size[3],
input_size[4]))
out = conv2d_act(input,
kernel=kshape,
strides=stride,
num_filters=num_filters,
padding=padding,
name=name)
if len(input_size) == 5:
out_size = out.shape.as_list()
out = tf.reshape(out, shape=(input_size[0],
input_size[1],
out_size[-3],
out_size[-2],
out_size[-1]))
return out
def up_sampling(input, kshape, stride, num_filter, name):
input_size = input.shape.as_list()
if len(input_size) == 5:
input = tf.reshape(input, shape=(input_size[0] * input_size[1],
input_size[2],
input_size[3],
input_size[4]))
out = deconv2d_act(input,
kernel=kshape,
stride=stride,
num_filters=num_filter,
name=name)
if len(input_size) == 5:
out_size = out.shape.as_list()
out = tf.reshape(out, shape=(input_size[0],
input_size[1],
out_size[-3],
out_size[-2],
out_size[-1]))
return out
if __name__ == '__main__':
gt = np.random.rand(5,5) * 255
gt = gt.astype(np.uint8)
print(gt)
print(get_loss_weight_symbol(gt)) | [
"645992705xpf@gmail.com"
] | 645992705xpf@gmail.com |
a73a787c2c5060a2c6363b579bc0de40d965b0d2 | 974370079c37335258b97a6fbe901b58d86ea7e7 | /Stacks/Basic/Stack_ADT.py | f70e7b74483a9e2f3ba43d15a68b2e6f01e5866a | [] | no_license | Tejasj77/Data-Structures-and-Algorithms | 5f3279a80bb66613fa6c8701912d0b4eaa25b527 | ae8aa8ce10a0140c5301e1dfaae1539e42a6f94c | refs/heads/main | 2023-01-30T23:54:13.153228 | 2020-12-15T07:08:30 | 2020-12-15T07:08:30 | 321,554,228 | 0 | 0 | null | 2020-12-15T07:08:31 | 2020-12-15T04:38:16 | null | UTF-8 | Python | false | false | 773 | py | class Stack:
def __init__(self):
self.data = []
def push(self,element):
self.data.append(element)
def pop(self):
if self.isEmpty():
return "Stack is empty"
else:
detach = self.data[len(self.data)-1]
self.data = self.data[0:len(self.data)-1]
return detach
def isEmpty(self):
if(len(self.data)==0):
return True
return False
def top(self):
if self.isEmpty():
return "Stack is empty"
else:
return self.data[len(self.data)-1]
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __str__(self):
return str(self.data)
S = Stack()
S.push(5)
| [
"tejasjoshitj@gmail.com"
] | tejasjoshitj@gmail.com |
18345cb6c3eca2784bdae0c2c3c727e84b69ce15 | 119779982b417ae21f8084795eca2abec0d364c2 | /Movie_list/modules/movie.py | 569e43d7437b4134166ab9af0b1b02ca44dc671d | [] | no_license | phucnguyenw/Nguye-P | d9ca8db2a4f4d217a22986489d7d41976f464f7c | cd84decf3ea46831019c32511ac9c6e7de29bda6 | refs/heads/master | 2020-03-22T05:59:10.678775 | 2018-08-08T15:19:57 | 2018-08-08T15:19:57 | 139,604,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from mongoengine import Document, StringField
class Movie(Document):
title = StringField(max_length=200)
image_url = StringField()
link = StringField() | [
"kjngsorpo@gmail.com"
] | kjngsorpo@gmail.com |
f4ca3e63858078e3b47b64c2df54622b403949c4 | b6f64d50ce3011c17b6a9b1515780ab0f1945e2a | /testCases/test_searchProductByProductName.py | 8dd218634cda80e638dfd5a555c44e7d389448b7 | [] | no_license | amirhach84/nopcommerceApp | 1a61dfc070f602e2b4bff50343233b2ae528c59d | b4827dadb07eb42bde2fcd7e5ce8351927b292a3 | refs/heads/master | 2023-05-31T21:06:54.626004 | 2021-06-27T10:31:25 | 2021-06-27T10:31:25 | 376,508,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,007 | py | import time
import pytest
from pageObjects.LoginPage import LoginPage
from pageObjects.AddcustomerPage import AddCustomer
from pageObjects.SearchCustomerPage import SearchCustomer
from pageObjects.SearchProductPage import SearchProduct
from pageObjects.AddproductPage import AddProduct
from utilities.readProperties import ReadConfig
from utilities.customLogger import LogGen
class Test_SearchProductByProductName_007:
baseURL = ReadConfig.getApplicationURL()
username = ReadConfig.getUseremail()
password = ReadConfig.getPassword()
logger = LogGen.loggen() # Logger
@pytest.mark.sanity
def test_searchProductByProductName(self,setup):
self.logger.info("************* Test_SearchProductByProductName_007 **********")
self.driver=setup
self.driver.get(self.baseURL)
self.driver.maximize_window()
self.lp = LoginPage(self.driver)
self.lp.setUserName(self.username)
self.lp.setPassword(self.password)
self.lp.clickLogin()
self.logger.info("************* Login successful **********")
self.logger.info("******* Open the Products screen **********")
self.addprod = AddProduct(self.driver)
self.addprod.clickOnCatalogMenu()
self.addprod.clickOnProductMenuItem()
self.logger.info("******* Starting Search Product By Product Name **********")
searchprod = SearchProduct(self.driver)
searchprod.setSearchProductName("Apple MacBook Pro 13-inch")
searchprod.clickSearchProd()
time.sleep(5)
status = searchprod.searchByProductName("Apple MacBook Pro 13-inch")
assert True == status
self.logger.info("******* Starting Edit Product **********")
# Change the name of the product
searchprod.clickEditProd()
time.sleep(3)
#searchprod.clickExpandProdInf()
time.sleep(3)
self.addprod.clearProductName()
self.addprod.setProductName("New Version Of Apple MacBook Pro 13-inch")
self.addcust = AddCustomer(self.driver)
self.addcust.clickOnSave()
time.sleep(3)
searchprod.setSearchProductName("New Version Of Apple MacBook Pro 13-inch")
searchprod.clickSearchProd()
time.sleep(5)
status = searchprod.searchByProductName("New Version Of Apple MacBook Pro 13-inch")
assert True == status
# Change the product name again back to the original name
searchprod.clickEditProd()
#searchprod.clickExpandProdInf()
time.sleep(3)
self.addprod.clearProductName()
self.addprod.setProductName("Apple MacBook Pro 13-inch")
time.sleep(3)
self.addcust.clickOnSave()
searchprod.clickSearchProd()
time.sleep(5)
status = searchprod.searchByProductName("Apple MacBook Pro 13-inch")
assert True == status
self.driver.close()
self.logger.info("*************** TC_SearchProductByProductName_007 Finished *********** ") | [
"amirhach@gmail.com"
] | amirhach@gmail.com |
4786776afaaecc13021d610b55fe630db7212388 | 974929774f35b13e2a3778ac298339aad3a849c1 | /decorator/decorator.py | 595077f628538bb2e8ae7764796e1d16410d5fdb | [] | no_license | b7wch/python-learning-code-2 | 793f083ee82c926f0347f53f655aa59128aef407 | 392baefe4f0923d5c545ffe29b288e70e442e3b7 | refs/heads/master | 2023-07-20T11:30:33.671233 | 2018-07-06T10:40:21 | 2018-07-06T10:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# 2017/12/20
def wraper(func):
def _wraper():
print('in wraper')
func()
print('in wraper')
return _wraper
def wraper1(level):
print(level)
def _wraper(func):
def _wraper1(*args, **kwargs):
print(1)
print(level * 3)
func(*args, **kwargs)
print(2)
return _wraper1
return _wraper
@wraper
def f():
print('123')
class Decorator(object):
def __init__(self, func):
self.func = func
def __call__(self):
print('123')
return self.func
@Decorator
def ff():
print('123')
@wraper1('hello')
def g(name):
print('hello {0}'.format(name))
def count():
fs = []
for i in range(1, 4):
def f():
return i * i
fs.append(f)
return fs
if __name__ == '__main__':
# f()
# g('abc')
# f1, f2, f3 = count()
# print(f1())
# print(f2())
# print(f3())
ff()
| [
"yangnianqiu@gmail.com"
] | yangnianqiu@gmail.com |
805609f55b0761eb67b8fbbc752ae17f249eed7e | bff91925bd4f99e9495160eefcfbe620b5f673ff | /app.py | f4b865313f35a1d3c98593423b7fad095f151e9e | [] | no_license | mindcompass/telegram_chatbot | 15cc27956cee3cd7d202c4a13de286fa677f7d13 | bb692919c0066bb282bd488f2ad3dcc742df090e | refs/heads/master | 2020-11-26T23:58:56.314107 | 2019-12-20T09:46:17 | 2019-12-20T09:46:17 | 229,172,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | from flask import Flask, render_template, request
from decouple import config
from bs4 import BeautifulSoup
import random
import requests
app = Flask(__name__)
token =config('TELEGRAM_BOT_TOKEN')
chat_id = config('CHAT_ID')
url = "https://api.telegram.org/bot"
#보이지 않는 파일을 만들면 다른 파일에 있는 변수를 사용할 수 있는건가?
@app.route('/')
def hello():
return "Hello World"
@app.route('/write')
def write() :
return render_template('write.html')
@app.route('/send')
def send() :
text = request.args.get('text')
requests.get(f'{url}{token}/sendMessage?chat_id={chat_id}&text={text}')
return render_template('send.html')
# @app.route(f'/{token}', methods=["POST"])
# #token으로 복잡하게 사용하기 위해서 넣음
# def telegram():
# data=request.get_json()
# chat_id = data['message']['chat']['id']
# text = data['message']['text']
# # chat_id = request.get_json.[][][]
# #if text =="로또" :
# #보낸 사람에게 보내는 방법 getupdats <- 정보가 쌓이는 곳
# # url1=f'https://kapi.kakao.com/v1/translation/translate?src_lang=kr&target_lang=en&query={text}'
# # Authorization : "7e6ebaf4ae41d1259b2ab492fc603254"
# # if text == "안녕":
# # return_text = "안녕하세요."
# # elif text == "로또":
# # numbers = range(1,46)
# # return_text = sorted(random.sample(numbers, 6))
# # elif text =="코스피":
# # req = requests.get("https://finance.naver.com/sise/sise_index.nhn?code=KOSPI").text
# # soup =BeautifulSoup(req, 'html.parser')
# # kospi = soup.select_one("#now_value").text
# # return_text = kospi
# # else :
# # return_text ="지금 지원하는 채팅은 안녕입니다."
# requests.get(f'{url}{token}/sendMessage?chat_id={chat_id}&text={return_text}')
# return "ok ", 200
@app.route(f'/{token}', methods=["POST"]) #post방식일때 무조건 실행. 아무나 못들어오게 토큰입력하고, post방식으로 들어오도록 함
def telegram():
data=request.get_json()
id=data['message']['chat']['id']
message_text = data['message']['text']
if message_text =="안녕":
return_text = "안녕하세요"
elif message_text == "로또":
numbers = range(1,46)
return_text = sorted(random.sample(numbers, 6)) # random.sample(변수, 갯수): 변수중에서 갯수만큼 뽑아줌
elif text =="코스피":
req = requests.get("https://finance.naver.com/sise/sise_index.nhn?code=KOSPI").text
soup =BeautifulSoup(req, 'html.parser')
kospi = soup.select_one("#now_value").text
return_text = kospi
else:
headers = {
"Host": "kapi.kakao.com",
"Authorization": f"KakaoAK 7e6ebaf4ae41d1259b2ab492fc603254"
}
query= message_text
response=requests.get(f'https://kapi.kakao.com/v1/translation/translate?src_lang=kr&target_lang=en&query={query}',headers=headers)
response_text=response.json()['translated_text'][0][0]
return_text = response_text
requests.get(f'{url}{token}/sendMessage?chat_id={id}&text={return_text}')
return "ok", 200
#chat_id 단순히 내 텔레그램으로 보는 내용
if __name__ == ("__main__"):
app.run(debug=True) | [
"kangkang119@naver.com"
] | kangkang119@naver.com |
c4a70cad2f51b03286087f4519056953d18d15a0 | 7cac72b9a88ef964d6ce6536195fd323ed079e51 | /annogesiclib/rbs_overlap.py | 6ad3e55d536b2af6a697d6b73759252a2541eb51 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSD-2-Clause"
] | permissive | felixgrunberger/ANNOgesic | f853c7845b36a0e3966c6871105519ffb3fb4b53 | 0d67609b63d5d79fc0f57151f79b1a5265c38373 | refs/heads/master | 2021-05-04T22:08:28.872924 | 2018-02-02T15:00:04 | 2018-02-02T15:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import os
import csv
import shutil
from annogesiclib.gff3 import Gff3Parser
from annogesiclib.helper import Helper
def read_gff(gff_file, type_):
cdss = []
g_h = open(gff_file)
for entry in Gff3Parser().entries(g_h):
if (Helper().feature_without_notgene(entry)):
if (type_ == "riboswitch") and (entry.feature != "riboswitch"):
cdss.append(entry)
elif (type_ == "thermometer") and (
entry.feature != "RNA_thermometer"):
cdss.append(entry)
cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand))
g_h.close()
return cdss
def check_repeat(tab, strain, strand, start, end):
if (tab["strain"] == strain) and (
tab["strand"] == strand):
if ((tab["start"] <= start) and (
tab["end"] >= end)) or (
(tab["start"] >= start) and (
tab["end"] <= end)) or (
(tab["start"] <= start) and (
tab["end"] <= end) and (
tab["end"] >= start)) or (
(tab["start"] >= start) and (
tab["start"] <= end) and (
tab["end"] >= end)):
return True
def rbs_overlap(table_file, gff_file, type_):
tmp_tab = table_file + "_tmp"
cdss = read_gff(gff_file, type_)
out = open(tmp_tab, "w")
fh = open(table_file, "r")
tables = []
for row in csv.reader(fh, delimiter='\t'):
if not row[0].startswith("#"):
tables.append({"strain": row[1], "strand": row[2],
"start": int(row[4]), "end": int(row[5]),
"info": "\t".join(row)})
fh.close()
for tab in tables:
overlap = False
for cds in cdss:
overlap = check_repeat(tab, cds.seq_id, cds.strand,
cds.start, cds.end)
for com in tables:
if tab != com:
repeat = check_repeat(tab, com["strain"], com["strand"],
com["start"], com["end"])
if (not overlap):
if ((repeat) and (
"print" not in tab.keys()) and (
"print" not in com.keys())) or (
not repeat):
overlap = False
else:
overlap = True
if not overlap:
tab["print"] = True
out.write(tab["info"] + "\n")
out.close()
os.remove(table_file)
shutil.move(tmp_tab, table_file)
| [
"sung-huan.yu@uni-wuerzburg.de"
] | sung-huan.yu@uni-wuerzburg.de |
b28ff83cdbc7d6c9d586d10b424d05227594e0bb | 48ce447f2a3b5201e36d190818a4c70163a0ce16 | /RotateLeftInt.py | 78545338a71c756dad8792445c982dfc0f69e5c4 | [] | no_license | arpita-ak/APS-2020 | 89d2e2c3601959372eb9f9a982776f0d2c86f55b | b7fe75225d77fcf36b472d177b44af59704024cc | refs/heads/master | 2022-04-24T04:39:46.903613 | 2020-04-27T21:33:41 | 2020-04-27T21:33:41 | 236,293,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | """
Left rotate the array of intergers
"""
def leftrotate(N,arr):
temp=arr[0]
for i in range(N-1):
arr[i]=arr[i+1]
arr[N-1]=temp
a=[1,2,3,4,5,6]
N=6
leftrotate(N,a)
print(a)
"""
Left rotate for the strings
"""
def leftrotate(k):
k=k[1:]+k[0]
return k
n='ABCDE'
print(leftrotate(n))
| [
"noreply@github.com"
] | arpita-ak.noreply@github.com |
7ca887133d33000f514b699d46925d3b00acac17 | 762742b3c5cb5706e93e12dbdc3f8c46fc65f0db | /Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py | 7ceb301f00593b3b7e14527d6a35493b28e8f30c | [
"MIT"
] | permissive | EmersonElectricCo/content | 018f95f7fe7de13819e093a3661587a18407e348 | 82c82bbee7d428f0b14991a88c67672e2c02f5af | refs/heads/master | 2021-06-17T04:54:22.938033 | 2021-05-06T16:39:59 | 2021-05-06T16:39:59 | 161,693,191 | 2 | 0 | MIT | 2018-12-18T15:16:49 | 2018-12-13T20:47:26 | Python | UTF-8 | Python | false | false | 10,139 | py | from datetime import datetime
import pytest
import OpenPhish_v2
import demistomock as demisto
from OpenPhish_v2 import (
Client,
_is_reload_needed,
remove_backslash,
reload_command,
status_command,
url_command,
)
from freezegun import freeze_time
from test_data.api_raw import RAW_DATA
MOCK_URL = "http://openphish.com"
MOCK_DELIVERED_MESSAGE = {}
DBOT_KEY = "DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)"
RELOADED_DATA = [
(Client(MOCK_URL, True, False, 2), {}, True), # case no data in memory
(
Client(MOCK_URL, True, False, 2),
{"list": []},
True,
), # case no timestamp and list is emtpy
(
Client(MOCK_URL, True, False, 2),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": None,
},
True,
), # case no timestamp
(
Client(MOCK_URL, True, False, 1),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
True,
),
(
Client(MOCK_URL, True, False, 2),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
False,
),
(
Client(MOCK_URL, True, False, 0.5),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
True,
),
]
@pytest.mark.parametrize("client,data,output", RELOADED_DATA)
def test_is_reload_needed(mocker, client, data, output):
"""
Given:
- data as IntegrationContext
When:
- reload command was required
Then:
- Returns False if last reload occurred in the past fetch_interval_hours. True otherwise
"""
with freeze_time(datetime(2020, 10, 1, 10, 00, 00, 0)):
assert _is_reload_needed(client, data) == output
LINKS = [("goo.co/", "goo.co"), ("goo.co", "goo.co")]
@pytest.mark.parametrize("url, expected_result", LINKS)
def test_remove_backslash(url: str, expected_result: str):
"""
Given:
- string representing url
When:
- saving data from to the integration context or checking a specific url
Then:
- checks the url format is without a backslash as last character
"""
assert remove_backslash(url) == expected_result
def test_reload_command(mocker):
"""
When:
- reloading data from to the api to integration context
Then:
- checks if the reloading finished successfully
"""
mock_data_from_api = RAW_DATA
mocker.patch.object(Client, "http_request", return_value=mock_data_from_api)
mocker.patch.object(demisto, "setIntegrationContext")
client = Client(
url=MOCK_URL, use_ssl=False, use_proxy=False, fetch_interval_hours=1
)
status = reload_command(client)
assert (
status.readable_output
== "Database was updated successfully to the integration context."
)
STANDARD_NOT_LOADED_MSG = "OpenPhish Database Status\nDatabase not loaded.\n"
STANDARD_4_LOADED_MSG = (
"OpenPhish Database Status\n"
"Total **4** URLs loaded.\n"
"Last load time **Thu Oct 01 2020 06:00:00 (UTC)**\n"
)
CONTEXT_MOCK_WITH_STATUS = [
({}, STANDARD_NOT_LOADED_MSG), # case no data in memory
(
{"list": [], "timestamp": "1601532000000"},
STANDARD_NOT_LOADED_MSG,
), # case no timestamp and list is emtpy
(
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)}
STANDARD_4_LOADED_MSG,
),
]
@pytest.mark.parametrize("data,expected_result", CONTEXT_MOCK_WITH_STATUS)
@freeze_time("1993-06-17 11:00:00 GMT")
def test_status_command(mocker, data, expected_result):
"""
Given:
- Integration context
When:
- After status command
Then:
- Returns number of loaded urls if data was loaded.
- Otherwise, returns Database not loaded.
"""
client = Client(MOCK_URL, True, False, 1)
mocker.patch.object(demisto, "getIntegrationContext", return_value=data)
status = status_command(client)
assert status.readable_output == expected_result
CONTEXT_MOCK_WITH_URL = [
(
{"url": "hxxp://lloyds.settlemypayee.uk"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [
{
"Data": "hxxp://lloyds.settlemypayee.uk",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://lloyds.settlemypayee.uk",
"Type": "url",
"Vendor": "OpenPhish",
"Score": 3,
}
],
}
],
),
(
{"url": "hxxp://goo.co"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [{"Data": "hxxp://goo.co"}],
"DBOTSCORE": [
{
"Indicator": "hxxp://goo.co",
"Type": "url",
"Vendor": "OpenPhish",
"Score": 0,
}
],
}
],
),
(
{"url": "hxxp://whatsapp-chat02.zzux.com,hxxp://lloyds.settlemypayee.uk"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [
{
"Data": "hxxp://whatsapp-chat02.zzux.com",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://whatsapp-chat02.zzux.com",
"Score": 3,
"Type": "url",
"Vendor": "OpenPhish",
}
],
},
{
"URL": [
{
"Data": "hxxp://lloyds.settlemypayee.uk",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://lloyds.settlemypayee.uk",
"Score": 3,
"Type": "url",
"Vendor": "OpenPhish",
}
],
},
],
),
]
@pytest.mark.parametrize("url,context,expected_results", CONTEXT_MOCK_WITH_URL)
def test_url_command(mocker, url, context, expected_results):
"""
Given:
- a url
When:
- mocking the integration context data, runnig url_command
Then:
- validating whether the url is malicious (in integration context)
"""
mocker.patch.object(
demisto, "getIntegrationContext", return_value=context,
)
mocker.patch.object(OpenPhish_v2, "_is_reload_needed", return_value=False)
client = Client(MOCK_URL, True, False, 1)
results = url_command(client, **url)
assert len(results) >= 1
for i in range(len(results)):
output = results[i].to_context().get("EntryContext", {})
assert output.get(
"URL(val.Data && val.Data == obj.Data)", []
) == expected_results[i].get("URL")
assert output.get(DBOT_KEY, []) == expected_results[i].get("DBOTSCORE")
| [
"noreply@github.com"
] | EmersonElectricCo.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.