blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
142d88ef89ad094b3a40b182b505dcf5443b83c0 | b9b5035d8dbad615d2c4fc9c158ca08d0f78a48b | /analysis.py | f1aec58c541a6a68d27bd0046bdba8c1a506ef36 | [] | no_license | Shyzay/Algorithms-and-Data-Structures | 4ab1806c6a1b9eb402a663e39d0e058af1fecd03 | d5d54690496030d2bf9ae3bfdba44f2adae80afb | refs/heads/basic_data_structures | 2020-03-27T19:43:36.784114 | 2018-09-01T18:33:42 | 2018-09-01T18:33:42 | 147,008,053 | 0 | 1 | null | 2018-09-14T13:45:35 | 2018-09-01T15:04:06 | Python | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 1 15:54:53 2018
@author: KELS
"""
# O(nlogn)
def analysis():
random_list = [3,0,7, 4] #list input
random_list.sort() #Sort the list
print(random_list[0]) #the minimum will be at the first index after the sort
analysis() | [
"chukwuemekadanchuku@gmail.com"
] | chukwuemekadanchuku@gmail.com |
367e18616152ae54831e9ce196990bc1f4cb2a43 | ec0137c20121884917c60dac94792360ab7aa9a5 | /sql-quiz/quiz.py | 9419bf33a3dfbf33ec48d25d97c63e5eebcd1350 | [] | no_license | anjelicasilva/hackbright-lab-exercises | 6dbfe10939c63e9fb3f1dcd638305061bfe4bded | 021807e832a05a74607bcd0ca34ac2d29096a753 | refs/heads/master | 2022-12-11T00:51:12.458239 | 2019-11-22T23:42:27 | 2019-11-22T23:42:27 | 222,184,490 | 1 | 0 | null | 2022-12-08T02:36:16 | 2019-11-17T02:03:57 | TSQL | UTF-8 | Python | false | false | 16,133 | py | """An interactive, REPL-style quizzer for SQL problems."""
# Author: Joel Burton <joel@hackbrightacademy.com>, based on earlier work by
# Christian Fernandez.
from __future__ import print_function
import os
import pickle
import datetime
import psycopg2
import hashlib
import logging
import readline
log = logging.getLogger(__name__)
MAX_ROWS = 20
PROBLEM_FILE_PATH = "problem_set.pickle"
INTRO = """
Hackbright Academy - Introductory SQL Exercise
----------------------------------------------
You will write a series of SQL queries accomplishing different tasks.
Each problem will include a link to a SQLZoo tutorial that illustrates
the concepts required, as well as a link to syntax reference for the
kind of query you'll be doing.
Type '\\help' without quotes for a list of the available commands.
It will be helpful to refer to the list of tables, found by typing in '\\dt',
or viewing the schema of a given table, (ex: '\\d orders') while formulating
your queries. If you get very stuck each problem includes a hint on how to
formulate your query, accessed by typing '\\hint'.
DON'T FORGET TO END SQL STATEMENTS WITH A SEMICOLON!
"""
HELP = """
The following commands are available:
\\help - Display this message
\\hint - Show a hint about how to formulate the query
\\next - Skip the current problem
\\problem - Show the current problem statement
\\quit - Quit the program
\\dt - List the tables
\\d <tbl> - Show the schema used to create named table
Any other commands will be interpreted as a SQL query and executed against the
problem set database."""
# SQL to get a list of all tables for \dt command
SQL_ALL_TABLES = """
SELECT
n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 1,2;
"""
# SQL to find a table oid, used for \d <tbl>
SQL_FIND_TABLE = """
SELECT c.oid
FROM pg_catalog.pg_class c
WHERE c.relname ~ '^({})$'
AND pg_catalog.pg_table_is_visible(c.oid)
"""
# SQL to get detail on a table, used for \d <tbl>
SQL_DESCRIBE_TABLE = """
SELECT a.attname AS "Column",
pg_catalog.format_type(a.atttypid, a.atttypmod) as "Type",
CASE WHEN a.attnotnull THEN 'not null' ELSE '' END AS "Modifiers"
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = '{}' AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum;
"""
# What's the minimum length a col should show given it's PG type?
COL_TYPE_TO_LEN = {
23: 5, # int
1043: 2, # string
1700: 8, # numeric
1082: 10, # date
16: 5, # boolean
25: 2, # text
705: 2, # text
1042: 2, # char
}
class Problem(object):
"""SQL Problem."""
def __init__(self, num, instruction, task, hint, solution):
self.num = num
self.instruction = instruction
self.task = task
self.hint = hint
self.solution = solution
self.solution_hash = None
def check_solution(self, result):
"""Check if result (as string table) matches hashed solution."""
digest = hashlib.md5(str(result).encode('utf-8')).hexdigest()
return self.solution_hash == digest
@staticmethod
def hash_solution(result):
"""Return hash of solution to store."""
return hashlib.md5(str(result).encode('utf-8')).hexdigest()
class StudentAnswer(object):
"""Correct answer from student."""
PARTS_SPLIT = "\n\n-----\n\n"
def __init__(self, num, task, solution):
self.num = num
self.task = task
self.solution = solution
@classmethod
def from_string(cls, s):
"""Create student answer from string."""
num, task, solution = s.split(cls.PARTS_SPLIT)
return StudentAnswer(num=int(num), task=task, solution=solution)
def to_string(self):
"""Marshall student answer as string."""
return self.PARTS_SPLIT.join([str(self.num), self.task, self.solution])
class StudentProgress(dict):
"""Track student progress and handle reading/writing answer file.
Is a dictionary of answers given by students, along with methods for
reading and writing out to disk.
"""
ANSWER_FILE_PATH = 'answers.sql'
PROBLEM_SPLIT = "\n\n\n==========\n"
def __init__(self):
super(StudentProgress, self).__init__(self)
self.read_answers()
def read_answers(self):
"""Read student answers from file."""
if not os.path.isfile(self.ANSWER_FILE_PATH):
return
with open(self.ANSWER_FILE_PATH, 'r') as f:
for problem in f.read().split(self.PROBLEM_SPLIT):
if not problem:
continue
answer = StudentAnswer.from_string(problem)
self[answer.num] = answer
log.info("Read {} answers".format(len(self)))
def save_answers(self):
"""Save student answers to a file."""
with open(self.ANSWER_FILE_PATH, 'w') as f:
f.write(self.PROBLEM_SPLIT.join(
v.to_string() for k, v in sorted(self.items())))
log.info("Saved {} answers".format(len(self)))
def mark_solved(self, num, task, solution):
"""Note that a problem has been solved and save it."""
self[num] = StudentAnswer(num, task, solution)
self.save_answers()
class Database(object):
"""Database proxy.
Handles connecting, executing functions, and DB utilities.
"""
def __init__(self):
self.cursor, self.conn = self.connect()
@staticmethod
def connect():
"""Connect to DB and return cursor."""
conn = psycopg2.connect("dbname=quiz")
conn.autocommit = True
cursor = conn.cursor()
return (cursor, conn)
@staticmethod
def result_to_str(description, results):
"""Return formatted results like psql
Args:
description (list): List of column metadata
results (list): Result tuples
Returns:
string: Result text
"""
results = list(results)
if not results:
return ""
# Make dictionary of column-name, length-of-column
cols = []
for col in description:
cols.append({'name': col.name,
'len': max(len(col.name), COL_TYPE_TO_LEN.get(col.type_code, 10))})
# Figure out the maximum length of the data in a column
for row in results[:MAX_ROWS]:
for i, col in enumerate(row):
if isinstance(col, str):
cols[i]['len'] = max(cols[i]['len'], len(col))
out = "\n"
# Print column names
for i, col in enumerate(cols):
out += " " + ("{:^" + str(col['len']) + "}").format(col['name']) + " "
if i == len(cols) - 1:
out += "\n"
else:
out += "|"
# Print border below column names
for i, col in enumerate(cols):
out += "-" * (col['len'] + 2)
if i == len(cols) - 1:
out += "\n"
else:
out += "+"
# Print rows of cols
for row in results[:MAX_ROWS]:
for i, col in enumerate(row):
if col is None:
col = ""
if col is True:
col = "True"
if col is False:
col = "False"
if isinstance(col, datetime.date):
col = col.strftime("%Y-%m-%d")
out += " " + ("{:" + str(cols[i]['len']) + "}").format(col) + " "
if i == len(cols) - 1:
out += "\n"
else:
out += "|"
# Print count of rows
if len(results) <= MAX_ROWS:
out += "({} rows)".format(len(results))
else:
out += "({} rows, truncated for display at {})".format(len(results), MAX_ROWS)
return out
def get_raw_result(self, attempt, error_on_empty=False):
"""Execute SQL and return results.
Args:
attempt (str): SQL
error_on_empty (bool): do we raise error (Else print msg) for errors/empty?
Returns:
list: description metadata list
list: tuples of results
"""
try:
self.cursor.execute(attempt)
results = self.cursor.fetchall()
except psycopg2.DatabaseError as e:
err = "There was a problem with your SQL syntax:\n\n\t{}\n".format(e)
if error_on_empty:
raise ValueError(err)
else:
print(err)
return [], []
if results:
return self.cursor.description, results
else:
err = "(your syntax was legal but your query returned no results)"
if error_on_empty:
raise ValueError(err)
else:
print(err)
return [], []
def show_tables(self):
"""Show tables."""
self.cursor.execute(SQL_ALL_TABLES)
results = self.cursor.fetchall()
print(self.result_to_str(self.cursor.description, results))
def show_schema(self, tokens):
"""Show schema for given table."""
if len(tokens) < 2:
return self.show_tables()
table_name = tokens[1]
self.cursor.execute(SQL_FIND_TABLE.format(table_name))
results = self.cursor.fetchall()
if not results:
print("No such table:", table_name)
return
oid = results[0][0]
self.cursor.execute(SQL_DESCRIBE_TABLE.format(oid))
results = self.cursor.fetchall()
output = self.result_to_str(self.cursor.description, results)
if not output:
print("No such table:", table_name)
else:
print(output)
class SQLQuiz(object):
"""Quiz application object.
Handles state of play and is controller for application.
"""
def __init__(self):
self.db = Database()
self.problems = self.read_problems()
self.progress = StudentProgress()
self.current_problem = None
@staticmethod
def read_problems():
"""Read problems off disk."""
with open(PROBLEM_FILE_PATH, 'rb') as f:
return pickle.load(f)
def play(self):
"""Play quiz."""
if len(self.progress) == len(self.problems):
return self.exit("You've already answered all the questions." +
"Remove answers.sql to redo the exercise.")
print(INTRO)
input("Press RETURN to start> ")
print()
self.current_problem = self.problems[0]
while True:
if self.current_problem.num in self.progress:
print("Already answered question", self.current_problem.num)
else:
self.show_problem()
if not self.get_solution():
# True is problem skipped/solved
# False is request to quit program.
self.exit("Quitting.")
return
if self.current_problem != self.problems[-1]:
# There are more problems, so go to the next one
# (this doesn't look like we're advancing, but the problem num is
# 1-based, whereas in the list, it's zero based, so
# going to self.problems[6] is our problem numbered 7.)
self.current_problem = self.problems[self.current_problem.num]
else:
self.exit("Exiting")
return
def exit(self, msg):
"""Hard exit with message."""
self.db.cursor.close()
self.db.conn.close()
print(msg, "\nGoodbye.\n")
return
def show_problem(self):
"""Show problem description and task."""
print("\nProblem {}".format(self.current_problem.num))
print("----------\n")
print(self.current_problem.instruction)
print()
print("Task:", self.current_problem.task)
def get_solution(self):
"""Get input from user until they quit or are correct."""
problem = self.current_problem
# This accumulates the SQL command they are making
sql = ""
while True:
try:
if not sql:
print()
line = input("SQL [{}]> ".format(problem.num))
else:
line = input("... [{}]> ".format(problem.num))
except EOFError:
return False
if not line:
continue
tokens = line.split()
command = tokens[0]
if command in ["\\q", "\\quit"]:
return False
elif command in ["\\problem"]:
self.show_problem()
sql = ""
elif command in ["\\hint"]:
print(problem.hint)
sql = ""
elif command in ["\\dt"]:
self.db.show_tables()
sql = ""
elif command in ["\\d"]:
self.db.show_schema(tokens)
sql = ""
elif command in ["\\help", "\\h", "\\?"]:
print(HELP)
sql = ""
elif command in ["\\next", "\\skip"]:
print("Skipping problem {}".format(problem.num))
return True
elif command in ["\\goto", "\\jumpto"]:
num = int(tokens[1])
# undocumented commands jumps to that numbered problem
print("Jumping to {}".format(num))
# -2 to compensate for:
# 1 gets added for "successful answering this question"
# we number problems from 1, but the python list starts at 0
self.current_problem = self.problems[num - 2]
return True
else:
sql = sql + "\n" + line
if sql.strip().endswith(";"):
description, result = self.db.get_raw_result(sql)
if result:
print(self.db.result_to_str(description, result))
if problem.check_solution(result) is True:
print("\n\tCorrect!")
print(sql)
print("\n\tMoving on...\n")
self.progress.mark_solved(problem.num, problem.task, sql)
return True
else:
print("\n(results do not match answer)\n")
sql = ""
def write_pickle():
"""Write out problems file.
This is only used by instructors, and requires you have the Python module
called problems.
"""
from meta.problems import PROBLEMS
db = Database()
problems = []
for i, p in enumerate(PROBLEMS):
problem = Problem(num=i + 1, **p)
description, result = db.get_raw_result(problem.solution, error_on_empty=True)
problem.solution_hash = problem.hash_solution(result)
problem.solution = None
problems.append(problem)
with open(PROBLEM_FILE_PATH, 'bw') as f:
pickle.dump(problems, f)
if __name__ == "__main__":
import sys
if len(sys.argv) == 2 and sys.argv[1] == "--rebuild":
# If they passed in --rebuild, we'll make the problems file from the text
write_pickle()
else:
quiz = SQLQuiz()
quiz.play()
| [
"anjelicacsilva@gmail.com"
] | anjelicacsilva@gmail.com |
305404f8ab4c09bc9106fed4a41e86550e0a0785 | a5964e6aaa6ce85d0d02b4a602779529f78eb829 | /web/greenskiosk/shopping/migrations/0001_initial.py | b3b8d15ad96a1dedabde2b89cac9c89ef30376f8 | [] | no_license | Lily-Mbugua/group | 0b6a462bc8db8f0d3343579a17551dce84f8035b | 8dbf350857d20a47200aa5f64a785bef8d98c73f | refs/heads/master | 2022-12-26T22:58:25.333494 | 2020-10-01T11:39:16 | 2020-10-01T11:39:16 | 300,247,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | # Generated by Django 3.1.1 on 2020-09-24 07:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('catalogue', '0001_initial'),
('customers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_method', models.CharField(max_length=30)),
('amount', models.DecimalField(decimal_places=10, max_digits=20)),
('date_of_payment', models.DateTimeField()),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customers.customer')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.IntegerField()),
('date_placed', models.DateTimeField()),
('status', models.CharField(max_length=30)),
('delivery_time', models.DateTimeField()),
('order_price', models.DecimalField(decimal_places=10, max_digits=20)),
('shipping_cost', models.DecimalField(decimal_places=10, max_digits=20)),
('total_price', models.DecimalField(decimal_places=10, max_digits=20)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customers.customer')),
('payment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shopping.payment')),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField()),
('status', models.CharField(max_length=30)),
('products', models.ManyToManyField(to='catalogue.Product')),
],
),
]
| [
"wanjerimbugua07@gmail.com"
] | wanjerimbugua07@gmail.com |
d3611c79ce17bccbd521fc4e41fd2942960664d4 | d990980c902fb0336ad94f862548d35535b8adee | /domain/subject.py | d2a9d54187495d0d3c20e7b4599927479c89bd8c | [] | no_license | qi4589746/FaceAttendanceSystem | e1634fc8f6649ca871c56a34bd00b128a54f8f40 | f19da6cd7a4f1aba364cbcf38d911acab6c5aed3 | refs/heads/master | 2022-12-30T02:53:22.076905 | 2019-03-13T09:46:34 | 2019-03-13T09:46:34 | 305,625,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
class Subject:
def __init__(self, id: str, name: str, createTime: int, updateTime: int):
self.updateTime = updateTime
self.createTime = createTime
self.name = name
self.id = id
| [
"t256413@gmail.com"
] | t256413@gmail.com |
4723657bf7951cdf1ed463860ddaf8014ca0851f | 66a88d500abf6d4fc2674c927993a608f09e02d8 | /delete_mail.py | 14c6bd5a477b2c80022d0d1667b925faa343a8bb | [] | no_license | hogwartsdeveloper/stepic_auto_test_Python_selenium | b755e05dbfa67072fbc904d70dfeb0fcbdb1ca49 | f49ad484d09063765a362d890369fb924a22aa77 | refs/heads/main | 2023-02-14T21:49:34.461650 | 2021-01-13T09:39:59 | 2021-01-13T09:39:59 | 328,563,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import
option = Options()
option.add_argument("--user-data-dir=C:/Users/zhah/AppData/Local/Google/Chrome/User Data/")
option.add_argument("--profile-directory=Default")
browser = webdriver.Chrome(options=option)
link = 'https://mail.google.com/'
browser.get(link)
| [
"jannuraidynuly@gmail.com"
] | jannuraidynuly@gmail.com |
90706fcf26eb7cfab7c2bab41589e627cef714a1 | bd494038f932c714fbb243837ee8584bd8da399f | /myproject/rent/forms.py | 4aa803b092de7746d651e2ddc014cb5dd25a8448 | [] | no_license | chawalya/HW459_1 | c2cedb30b0b3e7d64eae39ef2b5ff0fb01923ec9 | a19c420ddb4ab43995c87b29ae81ac854f1539e7 | refs/heads/master | 2021-01-24T20:13:17.431633 | 2018-02-28T07:21:45 | 2018-02-28T07:21:45 | 123,245,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | from django import forms
from django.forms import ModelForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.contrib.admin import widgets
import datetime
from .models import Car,Rent,Person
class CarForm(ModelForm):
class Meta:
model = Car
exclude=[]
def __int__(self, *args, **kwargs):
super(CarForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
class RentForm(ModelForm):
class Meta:
model = Rent
exclude=['user','fee','return_datetime']
def __init__(self, *args, **kwargs):
super(RentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
class PersonForm(ModelForm):
class Meta:
model = Person
exclude=[]
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
| [
"chawalyaa@gmail.com"
] | chawalyaa@gmail.com |
3d357e6bd94564dc803059577ed56fe78c0c5615 | 74db139df457fcc6c32464c08b1d7110c9a9ac3b | /venv/Scripts/rst2xetex.py | ddbfc2ddd7664e8c40256db6ca5e40ee479a1576 | [] | no_license | 45tooclose/python-new | cef3cf71c68d5195f5fb45d6918726a6bc050a7a | 98f3514aa9d7c4664459699d824c2086586abe1e | refs/heads/master | 2021-07-06T20:48:11.948302 | 2018-06-01T12:45:19 | 2018-06-01T12:45:19 | 136,414,887 | 2 | 5 | null | 2020-07-23T01:49:19 | 2018-06-07T03:07:07 | Python | UTF-8 | Python | false | false | 925 | py | #!C:\Users\Kamil\PycharmProjects\python-bootcamp\venv\Scripts\python.exe
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| [
"kamil.sobczak6@gmail.com"
] | kamil.sobczak6@gmail.com |
5ac6bb85df26912fe0149e5095f2645030e3bea8 | 0de1cae227921e23bade3e21b020331c068ed79d | /practice8.py | 23f73827f32f4ac8db3cfddaebeb259398f6a0b6 | [] | no_license | Lyzzp/practice | 31746dd20295682e44dc3c0479de71c226642355 | 420077771fe5f99857738dbce976b5092ca72c08 | refs/heads/master | 2023-03-26T06:49:46.793144 | 2021-03-29T00:32:57 | 2021-03-29T00:32:57 | 351,705,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # coding=UTF-8
"""
@Time:2021/3/25 14:27
@Author:Administrator
@Project:pythonProject1
@Name:practice8+
"""
# 有一个已经排好序的数组,现在输出一个数,要求按原来的规律将它插入数组中。
# a = [1, 4, 6, 9, 13, 16, 19, 28, 40, 100, 0]
# print 'original list is:'
# for i in range(len(a)):
# print a[i]
# number = int(raw_input('insert a new number:\n'))
# end = a[9]
# if number > end:
# a[10] = number
# else:
# for i in range(10):
# if a[i] > number:
# temp1 = a[i]
# a[i] = number
# for j in range(i + 1, 11):
# temp2 = a[j]
# a[j] = temp2
# temp1 = temp2
# break
# for i in range(11):
# print a[i]
a = [1, 4, 6, 9, 13, 16, 19, 28, 40, 100]
number = int(raw_input('input a number:\n'))
if number > a[len(a) - 1]:
a.append(number)
else:
for i in range(len(a)):
if a[i] > number:
a.insert(i, number)
print a
| [
"1244654868@qq.com"
] | 1244654868@qq.com |
799a2a0bb4222d36f68fa3247969c278ff5189b9 | 6b0310f9ea343b2ca492eaebba1f01fa2b930c44 | /Tesla Stock Price.py | 372f0127e5e7899ab641fe7ca95b13acf3cd8faf | [] | no_license | yagyaton/API-Projects | 4e83f8492fb52450245253cbd87d9718e366cba7 | c0759750ce8a0f1d5be73ebbb76630d743e7b5e6 | refs/heads/master | 2023-07-13T16:01:21.605644 | 2021-08-21T18:39:12 | 2021-08-21T18:39:12 | 398,313,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,399 | py | import requests
from twilio.rest import Client
THRESHPRECENTAGE=2
STOCK = "TSLA"
COMPANY_NAME = "Tesla Inc"
ALPHA_API_KEY=alpha_api_key
FUNCTION="TIME_SERIES_DAILY"
NEWS_API_KEY=my_api_key
account_sid=my_sid
auth_token=my_auth_token
## STEP 1: Use https://www.alphavantage.co
# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print("Get News").
response = requests.get(url=f"https://www.alphavantage.co/query?function={FUNCTION}&symbol={STOCK}&apikey={COMPANY_NAME}")
response.raise_for_status()
stockinfo=response.json()
#---------------------------------WEIRD WAY TO GET ALL INFO ABOUT LAST TWO DAYS IN A DICT-----------------------------------------#
#I know the comprehension below is a bit hard to comprehend. You created a list of dates by type coversion of stockinfo['Time Series (Daily)]
#and then slides it for the first two elements, which are the latest two dates, yesterday and day before
#After that, key is each element in that list, and value is its value in the original dictionary.
#That is all.
# dayandbefore={list(stockinfo['Time Series (Daily)'])[:2][n] : stockinfo['Time Series (Daily)'][list(stockinfo['Time Series (Daily)'])[:2][n]] for n in range(len(list(stockinfo['Time Series (Daily)'])[:2]))}
# print(dayandbefore)
#---------------------------------------------------------------------------------------------------------------------------------#
#---------------------------------FUNCTION TO COMPARE STOCK PRICES OF LAST TWO DAYS-----------------------------------------------#
def lets_compare_stocks(x,y):
diff=x-y
perc=(abs(diff)/x)*100
if diff>=0:
return (perc, "🔺")
else:
return (perc, "🔻")
#---------------------------------------------------------------------------------------------------------------------------------#
#---------------------------------GETTING CLOSING STOCK PRICE FOR LAST TWO DAYS---------------------------------------------------#
lasttwodays=list(stockinfo['Time Series (Daily)'])[:2]
yesterdayclose=float(stockinfo['Time Series (Daily)'][lasttwodays[0]]['4. close'])
daybeforeclose=float(stockinfo['Time Series (Daily)'][lasttwodays[1]]['4. close'])
(percentage, symbol)=lets_compare_stocks(yesterdayclose, daybeforeclose)
print(percentage, symbol)
# if percentage>=2:
# print("Get News!")
#---------------------------------------------------------------------------------------------------------------------------------#
#------------------------------GETTING NEWS IF PERCENTAGE ABOVE THRESHHOLD--------------------------------------------------------#
## STEP 2: Use https://newsapi.org
# Instead of printing ("Get News"), actually get the first 3 news pieces for the COMPANY_NAME.
if percentage>=THRESHPRECENTAGE:
newsresponse=requests.get(url=f"https://newsapi.org/v2/everything?q={COMPANY_NAME}&language=en&to={daybeforeclose}&sortBy=publishedAt&apiKey={NEWS_API_KEY}")
newsresponse.raise_for_status()
news=newsresponse.json()
# print(news['articles'][:3])
newsdict={n:{'Headline': news['articles'][n]['title'], 'Brief': news['articles'][n]['description']} for n in range(len(news['articles'][:3]))}
news1=f"Headline: {newsdict[0]['Headline']}\nBrief: {newsdict[0]['Brief']}\n"
news2=f"Headline: {newsdict[1]['Headline']}\nBrief: {newsdict[1]['Brief']}\n"
news3=f"Headline: {newsdict[2]['Headline']}\nBrief: {newsdict[2]['Brief']}\n"
# print(news1)
# print(news2)
# print(news3)
# print(newsdict)
#---------------------------------------------------------------------------------------------------------------------------------#
#----------------------------SENDING THE MESSAGE IF PERCENTAGE ABOVE THRESHHOLD VALUE---------------------------------------------#
## STEP 3: Use https://www.twilio.com
# Send a seperate message with the percentage change and each article's title and description to your phone number.
client=Client(account_sid, auth_token)
message = client.messages \
.create(
body=f"\n{STOCK}: {symbol}{round(percentage,2)}%\n{news1}\n{news2}\n{news3}",
from_='+16822551650',
to='+91 9460186060'
)
print(message.status)
#---------------------------------------------------------------------------------------------------------------------------------#
| [
"noreply@github.com"
] | yagyaton.noreply@github.com |
84eef6cc65ec245e27db562aaabcc91b480142bb | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Hash Table/logger_rate_limiter.py | 2d6dcba5dcff6b6585ced12fe6631fc0e2af2b74 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | class Logger:
def __init__(self):
"""
Initialize your data structure here.
"""
self._msg_dict = {}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
"""
Returns true if the message should be printed in the given timestamp, otherwise returns false.
If this method returns false, the message will not be printed.
The timestamp is in seconds granularity.
"""
if not message in self._msg_dict or 10 <= timestamp - self._msg_dict[message]:
self._msg_dict[message] = timestamp
return True
return False
| [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
377e0a1762965418f5d2a4d4871feeed710a71e8 | 565f95f207f49d987bdc372cd80942be95451731 | /python/misc/hlgrep | d76c0a07ff98af04d294df10bef1e8a2e4b4256a | [] | no_license | dustin/snippets | 76724c2131546bddd35a80da16921a44a49e2262 | 5be535890f9e71e298fec601d55c469c542ea330 | refs/heads/master | 2023-08-18T22:01:39.546961 | 2022-12-29T07:39:40 | 2022-12-29T07:39:40 | 18,840 | 18 | 4 | null | 2013-01-06T08:10:39 | 2008-05-22T07:58:19 | C | UTF-8 | Python | false | false | 306 | #!/usr/bin/env python
import sys
import posix
import re
smso=posix.popen("tput smso").read()
rmso=posix.popen("tput rmso").read()
expression=re.compile("(" + sys.argv[1] + ")")
l=sys.stdin.readline()
while l != '':
s=expression.sub(smso + '\\1' + rmso, l)
sys.stdout.write(s)
l=sys.stdin.readline()
| [
"dustin@spy.net"
] | dustin@spy.net | |
c107cc01614fcfa3b7bbbb7b5e613c0536701ff1 | 319b6071c2931500de7061e991c6a7667477a138 | /Machine Learning/Super-Mario/mario.py | 4cb0b24eeefa9a2a2e32a6262507c501e48a644b | [] | no_license | deepshikhadk1234/Snippets | 71d7301ca5fb054a8bd22608024f58c40e33d6e2 | 1649cfacc450c67a40ac1b68c765ccb00c123a2a | refs/heads/master | 2023-08-27T01:55:38.474154 | 2021-10-12T09:09:46 | 2021-10-12T09:09:46 | 414,146,345 | 0 | 1 | null | 2021-10-09T07:10:42 | 2021-10-06T09:22:06 | Jupyter Notebook | UTF-8 | Python | false | false | 8,119 | py | import torch
from torch import nn
from torchvision import transforms as T
from PIL import Image
import numpy as np
from pathlib import Path
from collections import deque
import random, datetime, os, copy
#matplotlib imports
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#gym imports
import gym
from gym.spaces import Box
from gym.wrappers import FrameStack
from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
#device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Mario:
def __init__(self,state_dim,action_dim,save_dir):
self.state_dim = state_dim
self.action_dim = action_dim
self.save_dir = save_dir
self.use_cuda = torch.cuda.is_available()
#Mario net -> for learning
self.net = MarioNet(self.state_dim,self.action_dim).float()
if self.use_cuda:
self.net = self.net.to(device="cuda")
self.exploration_rate = 1
self.exploration_rate_decay = 0.99999975
self.exploration_rate_min = 0.1
self.curr_step = 0
self.save_every = 10
self.memory = deque(maxlen=100000)
self.batch_size = 32
def act(self,state):
# Given a state , choose an epsilon greedy action and update value of step
#inputs:
#state(LazyFrame):A single observation of the current state, dimensio = (state_dim)
#outputs:
#action_idx(int):an integer representing which action mario will perform
#EXPLORE
if np.random.rand()<self.exploration_rate:
action_idx = np.random.randint(self.action_dim) # parameter passed is lower bound
#EXPLOIT
else:
state = state.__array__() #we can get a copy of an array that doesn’t change the
#data element of original array if we change any element in the new one
if self.use_cuda:
state = torch.tensor(state).cuda()
else:
state = torch.tensor(state)
state = state.unsqueeze(0)
action_values = self.net(state,model="online")
action_idx = torch.argmax(action_values,axis=1).item() # .item() method to
#get a python int from a torch tensor containing single value
#decrease exploration rate
self.exploration_rate += self.exploration_rate_decay
self.exploration_rate = max(self.exploration_rate_min,self.exploration_rate)
#increment step
self.curr_step+=1
return action_idx
#following two functions are for Mario's memory
def cache(self, state, next_state, action, reward, done):
'''
Each time Mario performs an action, he stores the experience to his memory. His
experience includes the current state, action performed, reward from the action,
the next state, and whether the game is done
saves experience to self.memory
Inputs:
state (LazyFrame),
next_state (LazyFrame),
action (int),
reward (float),
done(boolean)
'''
print(type(state))
state = state.__array__()
next_state = next_state.__array__()
state = torch.tensor([state]).to(device)
next_state = torch.tensor([next_state]).to(device)
action = torch.tensor([action]).to(device)
done = torch.tensor([done]).to(device)
self.memory.append((state,next_state,action,reward,done,))
def recall(self):
# retrieve a batch of experience from memory
batch = random.sample(self.memory, self.batch_size)
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
state = torch.tensor(state)
next_state = torch.tensor(next_state)
action = torch.tensor(action)
reward = torch.tensor(reward)
done = torch.tensor(done)
return state, next_state, action.squeeze() ,reward.squeeze(), done.squeeze()
class MarioNet(nn.Module):
'''
input -> 3 x (conv2d + relu) -> flatten -> 2 x (dense + relu) -> output
'''
def __init__(self,input_dim,output_dim):
super().__init__()
c,h,w = input_dim
if h != 84:
raise ValueError(f"Expecting input height : 84, got : {h}")
if w != 84:
raise ValueError(f"Expecting input height : 84, got : {h}")
self.online = nn.Sequential(
nn.Conv2d(in_channels=c, out_channels=32, kernel_size = 8, stride=4),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64 , kernel_size = 4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64 , kernel_size = 4, stride=2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136,512),
nn.ReLU(),
nn.Linear(512, output_dim),
)
self.target = copy.deepcopy(self.online)
for p in self.target.parameters():
p.requires_grad = False
def forward(self,input,model):
if model == 'online':
return self.online(input)
elif model== 'target':
return self.target(input)
else:
raise TypeError("Invalid model argument")
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim,action_dim,save_dir)
self.gamma = 0.9
def td_estimate(self, state, action):
current_Q = self.net(state,model="online")[
np.arange(0,self.batch_size), action
]
# Q_online(s,a)
return current_Q
@torch.no_grad()
def td_target(self, reward, next_state, done):
next_state_Q = self.net(next_state,model="online")
best_action = self.argmax(next_state_Q, axis=1)
next_Q = self.net(next_state,model = 'target')[
np.arange(0, self.batch_size), best_action
]
return (reward + (1 - done.float()) * self.gamma * next_Q).float()
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=0.00025)
self.loss_fn = torch.nn.SmoothL1Loss()
def update_Q_online(self, td_estimate, td_target):
loss = self.loss_fn(td_estimate, td_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def sync_Q_target(self):
# copy online model into target model
self.net.target.load_state_dict(self.net.online.state_dict())
# save checkpoint
def save(self):
save_path = (
self.save_dir / f"mario_net_{int(self.curr_step//self.save_every)}.chkpt"
)
torch.save(
dict(model=self.net.state_dict(),exploration_rate=self.exploration_rate),
save_path,
)
print(f"MarioNet saved to {save_path} at step {self.curr_step}")
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.burnin = 1e4 # min. experiences before training
self.learn_every = 3 # no. of experiences between updates to Q_online
self.sync_every = 100 # no. of experiences between Q_target & Q_online sync
def learn(self):
if self.curr_step % self.sync_every == 0:
self.sync_Q_target()
if self.curr_step % self.save_every == 0:
self.save()
if self.curr_step <self.burnin:
return None, None
if self.curr_step % self.learn_every != 0:
return None, None
# Sample from memory
state, next_state, action, reward, done = self.recall()
#get td estimate and target
td_est = self.td_estimate(state, action)
td_tgt = self.td_target(reward, next_state, done)
loss = self.update_Q_online(td_est, td_tgt)
return (td_est.mean().item(), loss)
| [
"shivanshuman021@gmail.com"
] | shivanshuman021@gmail.com |
15e3c7db846499b785bfb918f0dfec6bcc4444cc | 0ee6b1c26fbb10d230719ca03908a7a128c83463 | /tdmt/utils.py | f5b046b8e720f27efa74666402a2ead705783483 | [] | no_license | noferai/gazprom_hack | b29c0b53eadf07fe574719720c2dffb96226ecee | 6d81b560e0df06c122b108d9f4af675352cb257e | refs/heads/master | 2023-01-12T13:27:15.116716 | 2020-10-18T08:50:38 | 2020-10-18T08:50:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from urllib.parse import unquote_plus, parse_qsl, urlencode, urlparse, urlunparse
def get_clean_next_url(request, fallback_url):
next_url = request.GET.get("next")
if next_url is None:
return fallback_url
unquoted_url = unquote_plus(next_url)
scheme, netloc, path, params, query, fragment = urlparse(unquoted_url)
query_dict = dict(parse_qsl(query))
for param in ("next", "tender"):
try:
del query_dict[param]
except KeyError:
pass
return urlunparse([scheme, netloc, path, params, urlencode(query_dict), fragment])
| [
"azovsky777@gmail.com"
] | azovsky777@gmail.com |
b4eb385678815595baaab25c1335efeaeccbc26b | b5b888fd9cfa17c9a3ef6526cd9ffc476f939da4 | /poker3/card.py | 226bb5746cab0660b5db284d4baa5d847cee358b | [] | no_license | javatican/migulu_python | 77fd65754d30da699f35f800cbd5de8e73e35c64 | f1adecf02f7beed8313fb72bf786d292fc733d34 | refs/heads/master | 2020-12-30T14:33:40.239905 | 2017-05-27T02:52:37 | 2017-05-27T02:52:37 | 91,322,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | class Card:
IMAGE_SIZE=(2179,1216)
CARD_WIDTH=IMAGE_SIZE[0]/13
CARD_HEIGHT=IMAGE_SIZE[1]/5
cards = None
@classmethod
def create_cards(cls):
if Card.cards == None:
Card.cards=[]
card=Card(0, 0, 0)
Card.cards.append(card)
id=1
for i in range(1,5):
for j in range(1,14):
card=Card(id, i, j)
Card.cards.append(card)
id+=1
card=Card(53, 5, 14)
Card.cards.append(card)
card=Card(54, 5, 14)
Card.cards.append(card)
return Card.cards
def __init__(self, id, suit, number):
# 1-52 FOR NORMAL CARDS, 53 FOR BLACK JOKER, 54 FOR RED JOKER, 0 FOR COVER CARD
self.id = id
#1: CLUB, 2: DIAMOND, 3: HEART, 4:SPACE, 5. JOKER, 0 FOR COVER CARD
self.suit= suit
# 1 TO 13, 14 FOR JOKER, O FOR COVER CARD
self.number=number
# valid is True for the cards that are still valid
self.valid=True
self.rect = None
def invalid(self):
self.valid=False
def get_image_rect(self, ratio):
if self.rect==None:
if self.id>=1 and self.id<=52:
x=(self.number-1)*Card.CARD_WIDTH
y=(self.suit-1)*Card.CARD_HEIGHT
elif self.id==0:
x=2*Card.CARD_WIDTH
y=4*Card.CARD_HEIGHT
elif self.id==53:
x=0
y=4*Card.CARD_HEIGHT
elif self.id==54:
x=Card.CARD_WIDTH
y=4*Card.CARD_HEIGHT
else:
print("Error when calling get_image_rect()")
self.rect = (x,y,Card.CARD_WIDTH,Card.CARD_HEIGHT)
return (self.rect[0]*ratio, self.rect[1]*ratio, self.rect[2]*ratio, self.rect[3]*ratio)
def __str__(self):
return "[%d, %d, %d]" % (self.id, self.suit, self.number)
def __repr__(self):
return "[%d, %d, %d]" % (self.id, self.suit, self.number)
| [
"ryan.nieh@gmail.com"
] | ryan.nieh@gmail.com |
70a1baaa3281364acc80c862139540c66915ff93 | 9030ce268efdd03f62fb1a9c9f6a7ced726e4e42 | /migrations/versions/31c6e29748eb_.py | c3cefade9ed2b8e84d5c7874bcafe2465455f193 | [] | no_license | rleschuk/tvservice-server | cccdc3002d408d5f3e6293f3669ec55e0a710fec | 792ecf201cc2e5709dc7539fd341a7f9c15ac9c4 | refs/heads/master | 2022-12-12T23:22:27.051892 | 2018-05-07T06:41:52 | 2018-05-07T06:41:52 | 131,415,810 | 0 | 0 | null | 2022-12-08T00:56:52 | 2018-04-28T14:19:45 | Python | UTF-8 | Python | false | false | 968 | py | """empty message
Revision ID: 31c6e29748eb
Revises: 5a16c2d4cef1
Create Date: 2018-04-22 04:53:41.421145
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '31c6e29748eb'
down_revision = '5a16c2d4cef1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_origins_resource'), 'origins', ['resource'], unique=True)
op.drop_index('ix_origins_name', table_name='origins')
op.create_index(op.f('ix_origins_name'), 'origins', ['name'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_origins_name'), table_name='origins')
op.create_index('ix_origins_name', 'origins', ['name'], unique=False)
op.drop_index(op.f('ix_origins_resource'), table_name='origins')
# ### end Alembic commands ###
| [
"r.leschuk@gmail.com"
] | r.leschuk@gmail.com |
780aa0acafef5342d1200eb4af3069a60183016e | 44693097d051939b3b8f731d97292e7000b10744 | /common/main.py | f7923a105f83cbc683d67ad3227940c1b420bc4d | [] | no_license | Vacteria/vpmlib | 34b25e0da60304ef392c989965a8d2d92fe850f6 | 482f3942c999566907ce3fc19f017d795b40f48c | refs/heads/master | 2020-06-08T17:50:04.696677 | 2012-11-12T22:51:01 | 2012-11-12T22:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
# Copyright 2012 Miguel Angel Reynoso Reyes <miguel@vacteria.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
class Settings(object):
def __init__(self):
self.root = "/"
self.vpmhome = self.root + "/var/vpm"
self.vpmlocaldb = self.root + self.vpmhome + "/data/packages.db.sqlite"
self.vpmsetupdir = self.root + self.vpmhome + "/setup"
self.selector = None
self.verbose = False
self.silent = False
| [
"tallerlinux@gmail.com"
] | tallerlinux@gmail.com |
a35928309c1fa5bf69a6928dedc88f21e8e1bf73 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /examples/chartsheet.py | 3edbd9dbfe920d08f7d3e6d4ecf08d471cba16e0 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,774 | py | #######################################################################
#
# An example of creating an Excel chart in a chartsheet with Python
# and XlsxWriter.
#
# Copyright 2013-2019, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chartsheet.xlsx')
# Add a worksheet to hold the data.
worksheet = workbook.add_worksheet()
# Add a chartsheet. A worksheet that only holds a chart.
chartsheet = workbook.add_chartsheet()
# Add a format for the headings.
bold = workbook.add_format({'bold': 1})
# Add the worksheet data that the charts will refer to.
headings = ['Number', 'Batch 1', 'Batch 2']
data = [
[2, 3, 4, 5, 6, 7],
[10, 40, 50, 20, 10, 50],
[30, 60, 70, 50, 40, 30],
]
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
# Create a new bar chart.
chart1 = workbook.add_chart({'type': 'bar'})
# Configure the first series.
chart1.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure a second series. Note use of alternative syntax to define ranges.
chart1.add_series({
'name': ['Sheet1', 0, 2],
'categories': ['Sheet1', 1, 0, 6, 0],
'values': ['Sheet1', 1, 2, 6, 2],
})
# Add a chart title and some axis labels.
chart1.set_title ({'name': 'Results of sample analysis'})
chart1.set_x_axis({'name': 'Test number'})
chart1.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart1.set_style(11)
# Add the chart to the chartsheet.
chartsheet.set_chart(chart1)
# Display the chartsheet as the active sheet when the workbook is opened.
chartsheet.activate();
workbook.close()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
436e02aa0565561984a172d513c8601de9a8915d | 31d35db3d1cf374f67f7acacbc4e35a2d91cd751 | /Intermediate_Topics/modules_and_testing.py | 2cc6c51eb3d429255f3ea3e0ab3181da91ce191e | [] | no_license | BlueSquare23/Learning_Python | 0d3ab25247055cea3017947e699029689d1146df | 1d571318533d97da8ed1551d744239856e231bcf | refs/heads/master | 2021-07-15T16:14:59.359645 | 2021-07-06T05:20:57 | 2021-07-06T05:20:57 | 175,972,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | #!/usr/bin/python3
#Modules and Testing
#Section 7.4
#See string_functions.py script which works in conjunction with this one.
import string_functions
print(__name__)
str = "123456"
print(string_functions.first_half(str))
print(string_functions.last_half(str))
| [
"johnlradford23@gmail.com"
] | johnlradford23@gmail.com |
f485b1fe84144a2e6e02f8c6db683e8241399c64 | 831fe3255ab2dd7abb9fc79a21756012d57cb863 | /projects/nerf/nerf/raymarcher.py | 3be73d32299a15739202136510193efb2809c1ef | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | ksengin/pytorch3d | 3e84365ed2499c11ef5a443c4ab28bda85e71f7e | 1fffa20541c9fa3248e02473bee294724922d989 | refs/heads/master | 2021-05-16T21:24:36.150263 | 2021-03-19T07:07:23 | 2021-03-19T07:07:23 | 250,474,512 | 0 | 0 | NOASSERTION | 2020-03-27T08:00:17 | 2020-03-27T08:00:17 | null | UTF-8 | Python | false | false | 2,796 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d.renderer import EmissionAbsorptionRaymarcher
from pytorch3d.renderer.implicit.raymarching import (
_check_density_bounds,
_check_raymarcher_inputs,
_shifted_cumprod,
)
class EmissionAbsorptionNeRFRaymarcher(EmissionAbsorptionRaymarcher):
"""
This is essentially the `pytorch3d.renderer.EmissionAbsorptionRaymarcher`
which additionally returns the rendering weights. It also skips returning
the computation of the alpha-mask which is, in case of NeRF, equal to 1
everywhere.
The weights are later used in the NeRF pipeline to carry out the importance
ray-sampling for the fine rendering pass.
For more details about the EmissionAbsorptionRaymarcher please refer to
the documentation of `pytorch3d.renderer.EmissionAbsorptionRaymarcher`.
"""
def forward(
self,
rays_densities: torch.Tensor,
rays_features: torch.Tensor,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
"""
Args:
rays_densities: Per-ray density values represented with a tensor
of shape `(..., n_points_per_ray, 1)` whose values range in [0, 1].
rays_features: Per-ray feature values represented with a tensor
of shape `(..., n_points_per_ray, feature_dim)`.
eps: A lower bound added to `rays_densities` before computing
the absorbtion function (cumprod of `1-rays_densities` along
each ray). This prevents the cumprod to yield exact 0
which would inhibit any gradient-based learning.
Returns:
features: A tensor of shape `(..., feature_dim)` containing
the rendered features for each ray.
weights: A tensor of shape `(..., n_points_per_ray)` containing
the ray-specific emission-absorbtion distribution.
Each ray distribution `(..., :)` is a valid probability
distribution, i.e. it contains non-negative values that integrate
to 1, such that `weights.sum(dim=-1)==1).all()` yields `True`.
"""
_check_raymarcher_inputs(
rays_densities,
rays_features,
None,
z_can_be_none=True,
features_can_be_none=False,
density_1d=True,
)
_check_density_bounds(rays_densities)
rays_densities = rays_densities[..., 0]
absorption = _shifted_cumprod(
(1.0 + eps) - rays_densities, shift=self.surface_thickness
)
weights = rays_densities * absorption
features = (weights[..., None] * rays_features).sum(dim=-2)
return features, weights
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b0990cdf0e99c2b2a487505ba308a69b702245e5 | 83ad58baf732e0d37f0e0f678112d61c5de4bb70 | /preprocess_data.py | b336d6660c40fa87bb5754dcd869883eb0e269cd | [] | no_license | Rudra9335/Amazon-Helpfulness-Rating | 9c85edb9eea2562e5d59d02842145bfc1b799d49 | 88876e45e70131f23f33d7ea81c9b32567ef9570 | refs/heads/master | 2021-01-21T19:13:14.825581 | 2017-05-23T04:28:10 | 2017-05-23T04:28:10 | 92,129,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | __author__ = 'Abhishek'
import gzip
import json
def readGz(f):
for l in gzip.open(f):
yield eval(l)
data = []
with open('processed_data.json', 'w') as outfile:
for l in readGz("train.json.gz"):
data.append(l)
if(l['helpful']['outOf'] > 10):
json.dump(l, outfile)
outfile.write('\n')
| [
"noreply@github.com"
] | Rudra9335.noreply@github.com |
295aab0f16ff27ff19e76c106508bfd801852f4d | f820881f55456ee884c039cd565a463f7c4347bb | /lyfeshoppe/blueprints/user/views.py | 468bddaaf7beaf802dde201aa607dee13c8db163 | [] | no_license | himudianda/lyfeshoppe | 1842e9e722331eb9e049ffa7b1fdabc781d28f01 | 5e149f7a970252d37e5a1c4f68da5ecf2ced0d26 | refs/heads/master | 2021-01-18T16:32:10.766663 | 2017-05-13T05:35:19 | 2017-05-13T05:35:19 | 46,232,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,097 | py | from flask import Blueprint, redirect, request, flash, url_for, render_template
from flask_login import login_required, login_user, current_user, logout_user
from flask_babel import gettext as _
from lyfeshoppe.lib.safe_next_url import safe_next_url
from lyfeshoppe.lib.role_redirects import get_dashboard_url
from lyfeshoppe.lib.oauth_providers import OAuthSignIn
from lyfeshoppe.blueprints.user.decorators import anonymous_required
from lyfeshoppe.blueprints.user.models import User
from lyfeshoppe.blueprints.user.forms import LoginForm, BeginPasswordResetForm, PasswordResetForm, SignupForm, \
UpdateLocale
user = Blueprint('user', __name__, template_folder='templates')
@user.route('/authorize/<provider>')
@anonymous_required()
def oauth_authorize(provider):
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@user.route('/callback/<provider>')
@anonymous_required()
def oauth_callback(provider):
oauth = OAuthSignIn.get_provider(provider)
user_data = oauth.callback()
provider = user_data.get('type', None)
# Some validations
if provider == 'facebook':
if not user_data.get('id', None):
flash(_('Facebook Authentication failed.'), 'error')
return redirect(url_for('user.signup'))
if not user_data.get('email', None):
flash(_('Facebook login failed. Your email was not received from facebook.'), 'error')
return redirect(url_for('user.signup'))
if not user_data.get('name', None):
flash(_('Facebook login failed. Your full name is not registered with Facebook.'), 'error')
return redirect(url_for('user.signup'))
# Validations done; now save user data
if provider == 'facebook':
params = {
"social_id": provider + '$' + user_data['id'],
"fb_id": provider + '$' + user_data['id'],
"fb_link": user_data.get('link', None),
"fb_verified": user_data.get('verified', False),
"fb_added": True,
"first_name": user_data.get('first_name', None),
"last_name": user_data.get('last_name', None),
"age_range_min": user_data['age_range'].get('min', None) if user_data.get('age_range', None) else None,
"age_range_max": user_data['age_range'].get('max', None) if user_data.get('age_range', None) else None,
"gender": user_data.get('gender', None),
"timezone": user_data.get('timezone', None),
"locale": user_data.get('locale', None),
"email": user_data['email']
}
user = User.find_by_identity(user_data['email'])
if not user:
user = User.create(**params)
else:
# Check user has facebook data populated. If not populate it now
if not user.fb_added and not user.fb_id:
user.update(**params)
else:
flash(_('Facebook Authentication failed.'), 'error')
return redirect(url_for('user.signup'))
login_user(user, True)
return redirect(get_dashboard_url())
@user.route('/login', methods=['GET', 'POST'])
@anonymous_required()
def login():
form = LoginForm(next=request.args.get('next'))
if form.validate_on_submit():
u = User.find_by_identity(request.form.get('identity'))
if u and u.authenticated(password=request.form.get('password')):
# As you can see remember me is always enabled, this was a design
# decision I made because more often than not users want this
# enabled. This allows for a less complicated login form.
#
# If however you want them to be able to select whether or not they
# should remain logged in then perform the following 3 steps:
# 1) Replace 'True' below with: request.form.get('remember', False)
# 2) Uncomment the 'remember' field in user/forms.py#LoginForm
# 3) Add a checkbox to the login form with the id/name 'remember'
if login_user(u, remember=True):
u.update_activity_tracking(request.remote_addr)
u.give_referral_points()
# Handle optionally redirecting to the next URL safely.
next_url = request.form.get('next')
if next_url:
return redirect(safe_next_url(next_url))
return redirect(get_dashboard_url())
else:
flash(_('This account has been disabled.'), 'error')
else:
flash(_('Email/password is incorrect OR try social (facebook) login.'), 'error')
return render_template('user/login.jinja2', form=form)
@user.route('/logout')
@login_required
def logout():
logout_user()
flash(_('You have been logged out.'), 'success')
return redirect(url_for('user.login'))
@user.route('/account/begin_password_reset', methods=['GET', 'POST'])
@anonymous_required()
def begin_password_reset():
form = BeginPasswordResetForm()
if form.validate_on_submit():
u = User.initialize_password_reset(request.form.get('identity'))
flash(_('An email has been sent to %(email)s.',
email=u.email), 'success')
return redirect(url_for('user.login'))
return render_template('user/begin_password_reset.jinja2', form=form)
@user.route('/account/password_reset', methods=['GET', 'POST'])
@anonymous_required()
def password_reset():
form = PasswordResetForm(reset_token=request.args.get('reset_token'))
if form.validate_on_submit():
u = User.deserialize_token(request.form.get('reset_token'))
if u is None:
flash(_('Your reset token has expired or was tampered with.'),
'error')
return redirect(url_for('user.begin_password_reset'))
form.populate_obj(u)
u.password = User.encrypt_password(request.form.get('password', None))
u.save()
if login_user(u):
flash(_('Your password has been reset.'), 'success')
return redirect(get_dashboard_url())
return render_template('user/password_reset.jinja2', form=form)
@user.route('/signup', methods=['GET', 'POST'])
@anonymous_required()
def signup():
form = SignupForm()
if form.validate_on_submit():
u = User()
form.populate_obj(u)
u.password = User.encrypt_password(request.form.get('password', None))
u.save()
if login_user(u):
flash(_('Awesome, thanks for signing up!'), 'success')
return redirect(get_dashboard_url())
return render_template('user/signup.jinja2', form=form)
@user.route('/settings/update_locale', methods=['GET', 'POST'])
@login_required
def update_locale():
form = UpdateLocale(locale=current_user.locale)
if form.validate_on_submit():
form.populate_obj(current_user)
current_user.save()
flash(_('Your locale settings have been updated.'), 'success')
return redirect(url_for('user.settings'))
return render_template('user/update_locale.jinja2', form=form)
| [
"harshit.imudianda@gmail.com"
] | harshit.imudianda@gmail.com |
2acd9d7dea7e1fa0cacc060cc757bb60e46e4906 | 55ab48d4b82a2f4e39475f1db9adb9dca6421dfb | /tcp_check/tests/common.py | 2a9c94922b97d3876217ec8e64869c7e53f99507 | [] | permissive | brentm5/integrations-core | a1e0ec5014273368946b79ac9aa5d4e101fd5411 | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | refs/heads/master | 2020-04-19T21:51:19.225718 | 2019-04-08T14:08:32 | 2019-04-08T14:08:32 | 168,453,264 | 0 | 0 | BSD-3-Clause | 2019-01-31T03:01:28 | 2019-01-31T03:01:26 | null | UTF-8 | Python | false | false | 397 | py | # (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
CHECK_NAME = "tcp_check"
INSTANCE = {
'host': 'datadoghq.com',
'port': 80,
'timeout': 1.5,
'name': 'UpService',
'tags': ["foo:bar"]
}
INSTANCE_KO = {
'host': '127.0.0.1',
'port': 65530,
'timeout': 1.5,
'name': 'DownService',
'tags': ["foo:bar"],
}
| [
"noreply@github.com"
] | brentm5.noreply@github.com |
ada1ac04d0162f1f086d1ebfc1bb718c67f74aee | 2a34a824e1a2d3bac7b99edcf19926a477a157a0 | /src/cr/vision/core/colors.py | 2865015e52642389b5b3c74caf559bef6dda8111 | [
"Apache-2.0"
] | permissive | carnotresearch/cr-vision | a7cb07157dbf470ed3fe560ef85d6e5194c660ae | 317fbf70c558e8f9563c3d0ba3bebbc5f84af622 | refs/heads/master | 2023-04-10T22:34:34.833043 | 2021-04-25T13:32:14 | 2021-04-25T13:32:14 | 142,256,002 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | '''
List of common colors in b g r format
'''
ALICEBLUE = (255, 248, 240)
ANTIQUEWHITE = (215, 235, 250)
AQUA = (255, 255, 0)
AQUAMARINE = (212, 255, 127)
AZURE = (255, 255, 240)
BEIGE = (220, 245, 245)
BISQUE = (196, 228, 255)
BLACK = (0, 0, 0)
BLANCHEDALMOND = (205, 235, 255)
BLUE = (255, 0, 0)
BLUEVIOLET = (226, 43, 138)
BROWN = (42, 42, 165)
BURLYWOOD = (135, 184, 222)
CADETBLUE = (160, 158, 95)
CHARTREUSE = (0, 255, 127)
CHOCOLATE = (30, 105, 210)
CORAL = (80, 127, 255)
CORNFLOWERBLUE = (237, 149, 100)
CORNSILK = (220, 248, 255)
CRIMSON = (60, 20, 220)
CYAN = (255, 255, 0)
DARKBLUE = (139, 0, 0)
DARKCYAN = (139, 139, 0)
DARKGOLDENROD = (11, 134, 184)
DARKGRAY = (169, 169, 169)
DARKGREEN = (0, 100, 0)
DARKGREY = (169, 169, 169)
DARKKHAKI = (107, 183, 189)
DARKMAGENTA = (139, 0, 139)
DARKOLIVEGREEN = (47, 107, 85)
DARKORANGE = (0, 140, 255)
DARKORCHID = (204, 50, 153)
DARKRED = (0, 0, 139)
DARKSALMON = (122, 150, 233)
DARKSEAGREEN = (143, 188, 143)
DARKSLATEBLUE = (139, 61, 72)
DARKSLATEGRAY = (79, 79, 47)
DARKSLATEGREY = (79, 79, 47)
DARKTURQUOISE = (209, 206, 0)
DARKVIOLET = (211, 0, 148)
DEEPPINK = (147, 20, 255)
DEEPSKYBLUE = (255, 191, 0)
DIMGRAY = (105, 105, 105)
DIMGREY = (105, 105, 105)
DODGERBLUE = (255, 144, 30)
FIREBRICK = (34, 34, 178)
FLORALWHITE = (240, 250, 255)
FORESTGREEN = (34, 139, 34)
FUCHSIA = (255, 0, 255)
GAINSBORO = (220, 220, 220)
GHOSTWHITE = (255, 248, 248)
GOLD = (0, 215, 255)
GOLDENROD = (32, 165, 218)
GRAY = (128, 128, 128)
GREEN = (0, 128, 0)
GREENYELLOW = (47, 255, 173)
GREY = (128, 128, 128)
HONEYDEW = (240, 255, 240)
HOTPINK = (180, 105, 255)
INDIANRED = (92, 92, 205)
INDIGO = (130, 0, 75)
IVORY = (240, 255, 255)
KHAKI = (140, 230, 240)
LAVENDER = (250, 230, 230)
LAVENDERBLUSH = (245, 240, 255)
LAWNGREEN = (0, 252, 124)
LEMONCHIFFON = (205, 250, 255)
LIGHTBLUE = (230, 216, 173)
LIGHTCORAL = (128, 128, 240)
LIGHTCYAN = (255, 255, 224)
LIGHTGOLDENRODYELLOW = (210, 250, 250)
LIGHTGRAY = (211, 211, 211)
LIGHTGREEN = (144, 238, 144)
LIGHTGREY = (211, 211, 211)
LIGHTPINK = (193, 182, 255)
LIGHTSALMON = (122, 160, 255)
LIGHTSEAGREEN = (170, 178, 32)
LIGHTSKYBLUE = (250, 206, 135)
LIGHTSLATEGRAY = (153, 136, 119)
LIGHTSLATEGREY = (153, 136, 119)
LIGHTSTEELBLUE = (222, 196, 176)
LIGHTYELLOW = (224, 255, 255)
LIME = (0, 255, 0)
LIMEGREEN = (50, 205, 50)
LINEN = (230, 240, 250)
MAGENTA = (255, 0, 255)
MAROON = (0, 0, 128)
MEDIUMAQUAMARINE = (170, 205, 102)
MEDIUMBLUE = (205, 0, 0)
MEDIUMORCHID = (211, 85, 186)
MEDIUMPURPLE = (219, 112, 147)
MEDIUMSEAGREEN = (113, 179, 60)
MEDIUMSLATEBLUE = (238, 104, 123)
MEDIUMSPRINGGREEN = (154, 250, 0)
MEDIUMTURQUOISE = (204, 209, 72)
MEDIUMVIOLETRED = (133, 21, 199)
MIDNIGHTBLUE = (112, 25, 25)
MINTCREAM = (250, 255, 245)
MISTYROSE = (225, 228, 255)
MOCCASIN = (181, 228, 255)
NAVAJOWHITE = (173, 222, 255)
NAVY = (128, 0, 0)
OLDLACE = (230, 245, 253)
OLIVE = (0, 128, 128)
OLIVEDRAB = (35, 142, 107)
ORANGE = (0, 165, 255)
ORANGERED = (0, 69, 255)
ORCHID = (214, 112, 218)
PALEGOLDENROD = (170, 232, 238)
PALEGREEN = (152, 251, 152)
PALETURQUOISE = (238, 238, 175)
PALEVIOLETRED = (147, 112, 219)
PAPAYAWHIP = (213, 239, 255)
PEACHPUFF = (185, 218, 255)
PERU = (63, 133, 205)
PINK = (203, 192, 255)
PLUM = (221, 160, 221)
POWDERBLUE = (230, 224, 176)
PURPLE = (128, 0, 128)
RED = (0, 0, 255)
ROSYBROWN = (143, 143, 188)
ROYALBLUE = (225, 105, 65)
SADDLEBROWN = (19, 69, 139)
SALMON = (114, 128, 250)
SANDYBROWN = (96, 164, 244)
SEAGREEN = (87, 139, 46)
SEASHELL = (238, 245, 255)
SIENNA = (45, 82, 160)
SILVER = (192, 192, 192)
SKYBLUE = (235, 206, 135)
SLATEBLUE = (205, 90, 106)
SLATEGRAY = (144, 128, 112)
SLATEGREY = (144, 128, 112)
SNOW = (250, 250, 255)
SPRINGGREEN = (127, 255, 0)
STEELBLUE = (180, 130, 70)
TAN = (140, 180, 210)
TEAL = (128, 128, 0)
THISTLE = (216, 191, 216)
TOMATO = (71, 99, 255)
TURQUOISE = (208, 224, 64)
VIOLET = (238, 130, 238)
WHEAT = (179, 222, 245)
WHITE = (255, 255, 255)
WHITESMOKE = (245, 245, 245)
YELLOW = (0, 255, 255)
YELLOWGREEN = (50, 205, 154)
| [
"shailesh@indigits.com"
] | shailesh@indigits.com |
83ef5f92fe38434ba6cab8b23f51c000ed6735e5 | fb4a589b87fde22d43fe4345794c00bbc3785085 | /resources/oci-lib/lib/python3.6/site-packages/services/events/src/oci_cli_events/generated/events_cli.py | 2f9059015121b13e588187ead12c46120419a5ed | [] | no_license | dickiesanders/oci-cli-action | a29ccf353a09cb110a38dc9c7f9ea76260c62a48 | ef409321a0b9bdbce37e0e39cfe0e6499ccffe1f | refs/heads/master | 2022-12-18T02:52:07.786446 | 2020-09-19T09:44:02 | 2020-09-19T09:44:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,182 | py | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli.cli_root import cli
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
@cli.command(cli_util.override('events.events_root_group.command_name', 'events'), cls=CommandGroupWithAlias, help=cli_util.override('events.events_root_group.help', """API for the Events Service. Use this API to manage rules and actions that create automation
in your tenancy. For more information, see [Overview of Events]."""), short_help=cli_util.override('events.events_root_group.short_help', """Events API"""))
@cli_util.help_option_group
def events_root_group():
pass
@click.command(cli_util.override('events.rule_group.command_name', 'rule'), cls=CommandGroupWithAlias, help="""The configuration details of an Events rule. For more information, see [Managing Rules for Events].""")
@cli_util.help_option_group
def rule_group():
pass
events_root_group.add_command(rule_group)
@rule_group.command(name=cli_util.override('events.change_rule_compartment.command_name', 'change-compartment'), help=u"""Moves a rule into a different compartment within the same tenancy. For information about moving resources between compartments, see [Moving Resources to a Different Compartment].""")
@cli_util.option('--rule-id', required=True, help=u"""The [OCID] of this rule.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment into which the resource should be moved.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the if-match parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def change_rule_compartment(ctx, from_json, rule_id, compartment_id, if_match):
if isinstance(rule_id, six.string_types) and len(rule_id.strip()) == 0:
raise click.UsageError('Parameter --rule-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['compartmentId'] = compartment_id
client = cli_util.build_client('events', 'events', ctx)
result = client.change_rule_compartment(
rule_id=rule_id,
change_rule_compartment_details=_details,
**kwargs
)
cli_util.render_response(result, ctx)
@rule_group.command(name=cli_util.override('events.create_rule.command_name', 'create'), help=u"""Creates a new rule.""")
@cli_util.option('--display-name', required=True, help=u"""A string that describes the rule. It does not have to be unique, and you can change it. Avoid entering confidential information.""")
@cli_util.option('--is-enabled', required=True, type=click.BOOL, help=u"""Whether or not this rule is currently enabled.
Example: `true`""")
@cli_util.option('--condition', required=True, help=u"""A filter that specifies the event that will trigger actions associated with this rule. A few important things to remember about filters:
* Fields not mentioned in the condition are ignored. You can create a valid filter that matches all events with two curly brackets: `{}`
For more examples, see [Matching Events with Filters]. * For a condition with fields to match an event, the event must contain all the field names listed in the condition. Field names must appear in the condition with the same nesting structure used in the event.
For a list of reference events, see [Services that Produce Events]. * Rules apply to events in the compartment in which you create them and any child compartments. This means that a condition specified by a rule only matches events emitted from resources in the compartment or any of its child compartments. * Wildcard matching is supported with the asterisk (*) character.
For examples of wildcard matching, see [Matching Events with Filters]
Example: `\\\"eventType\\\": \\\"com.oraclecloud.databaseservice.autonomous.database.backup.end\\\"`""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment to which this rule belongs.""")
@cli_util.option('--actions', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--description', help=u"""A string that describes the details of the rule. It does not have to be unique, and you can change it. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. Exists for cross-compatibility only. For more information, see [Resource Tags].
Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags].
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'actions': {'module': 'events', 'class': 'ActionDetailsList'}, 'freeform-tags': {'module': 'events', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'events', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'actions': {'module': 'events', 'class': 'ActionDetailsList'}, 'freeform-tags': {'module': 'events', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'events', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'events', 'class': 'Rule'})
@cli_util.wrap_exceptions
def create_rule(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, display_name, is_enabled, condition, compartment_id, actions, description, freeform_tags, defined_tags):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['displayName'] = display_name
_details['isEnabled'] = is_enabled
_details['condition'] = condition
_details['compartmentId'] = compartment_id
_details['actions'] = cli_util.parse_json_parameter("actions", actions)
if description is not None:
_details['description'] = description
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('events', 'events', ctx)
result = client.create_rule(
create_rule_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_rule') and callable(getattr(client, 'get_rule')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_rule(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@rule_group.command(name=cli_util.override('events.delete_rule.command_name', 'delete'), help=u"""Deletes a rule.""")
@cli_util.option('--rule-id', required=True, help=u"""The [OCID] of this rule.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the if-match parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def delete_rule(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, rule_id, if_match):
if isinstance(rule_id, six.string_types) and len(rule_id.strip()) == 0:
raise click.UsageError('Parameter --rule-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('events', 'events', ctx)
result = client.delete_rule(
rule_id=rule_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_rule') and callable(getattr(client, 'get_rule')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
oci.wait_until(client, client.get_rule(rule_id), 'lifecycle_state', wait_for_state, succeed_on_not_found=True, **wait_period_kwargs)
except oci.exceptions.ServiceError as e:
# We make an initial service call so we can pass the result to oci.wait_until(), however if we are waiting on the
# outcome of a delete operation it is possible that the resource is already gone and so the initial service call
# will result in an exception that reflects a HTTP 404. In this case, we can exit with success (rather than raising
# the exception) since this would have been the behaviour in the waiter anyway (as for delete we provide the argument
# succeed_on_not_found=True to the waiter).
#
# Any non-404 should still result in the exception being thrown.
if e.status == 404:
pass
else:
raise
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Please retrieve the resource to find its current state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@rule_group.command(name=cli_util.override('events.get_rule.command_name', 'get'), help=u"""Retrieves a rule.""")
@cli_util.option('--rule-id', required=True, help=u"""The [OCID] of this rule.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'events', 'class': 'Rule'})
@cli_util.wrap_exceptions
def get_rule(ctx, from_json, rule_id):
if isinstance(rule_id, six.string_types) and len(rule_id.strip()) == 0:
raise click.UsageError('Parameter --rule-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('events', 'events', ctx)
result = client.get_rule(
rule_id=rule_id,
**kwargs
)
cli_util.render_response(result, ctx)
@rule_group.command(name=cli_util.override('events.list_rules.command_name', 'list'), help=u"""Lists rules for this compartment.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment to which this rule belongs.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return. 1 is the minimum, 50 is the maximum. Default: 10""")
@cli_util.option('--page', help=u"""For list pagination. The value of the opc-next-page response header from the previous \"List\" call. For important details about how pagination works, see [List Pagination].""")
@cli_util.option('--lifecycle-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]), help=u"""A filter to return only rules that match the lifecycle state in this parameter.
Example: `Creating`""")
@cli_util.option('--display-name', help=u"""A filter to return only rules with descriptions that match the displayName string in this parameter.
Example: `\"This rule sends a notification upon completion of DbaaS backup.\"`""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIME_CREATED", "ID", "DISPLAY_NAME"]), help=u"""Specifies the attribute with which to sort the rules.
Default: `timeCreated`
* **TIME_CREATED:** Sorts by timeCreated. * **DISPLAY_NAME:** Sorts by displayName. * **ID:** Sorts by id.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Specifies sort order.
* **ASC:** Ascending sort order. * **DESC:** Descending sort order.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'events', 'class': 'list[RuleSummary]'})
@cli_util.wrap_exceptions
def list_rules(ctx, from_json, all_pages, page_size, compartment_id, limit, page, lifecycle_state, display_name, sort_by, sort_order):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if lifecycle_state is not None:
kwargs['lifecycle_state'] = lifecycle_state
if display_name is not None:
kwargs['display_name'] = display_name
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('events', 'events', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_rules,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_rules,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_rules(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@rule_group.command(name=cli_util.override('events.update_rule.command_name', 'update'), help=u"""Updates a rule.""")
@cli_util.option('--rule-id', required=True, help=u"""The [OCID] of this rule.""")
@cli_util.option('--display-name', help=u"""A string that describes the rule. It does not have to be unique, and you can change it. Avoid entering confidential information.""")
@cli_util.option('--description', help=u"""A string that describes the details of the rule. It does not have to be unique, and you can change it. Avoid entering confidential information.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Whether or not this rule is currently enabled.
Example: `true`""")
@cli_util.option('--condition', help=u"""A filter that specifies the event that will trigger actions associated with this rule. A few important things to remember about filters:
* Fields not mentioned in the condition are ignored. You can create a valid filter that matches all events with two curly brackets: `{}`
For more examples, see [Matching Events with Filters]. * For a condition with fields to match an event, the event must contain all the field names listed in the condition. Field names must appear in the condition with the same nesting structure used in the event.
For a list of reference events, see [Services that Produce Events]. * Rules apply to events in the compartment in which you create them and any child compartments. This means that a condition specified by a rule only matches events emitted from resources in the compartment or any of its child compartments. * Wildcard matching is supported with the asterisk (*) character.
For examples of wildcard matching, see [Matching Events with Filters]
Example: `\\\"eventType\\\": \\\"com.oraclecloud.databaseservice.autonomous.database.backup.end\\\"`""")
@cli_util.option('--actions', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. Exists for cross-compatibility only. For more information, see [Resource Tags].
Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags].
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the if-match parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'actions': {'module': 'events', 'class': 'ActionDetailsList'}, 'freeform-tags': {'module': 'events', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'events', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'actions': {'module': 'events', 'class': 'ActionDetailsList'}, 'freeform-tags': {'module': 'events', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'events', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'events', 'class': 'Rule'})
@cli_util.wrap_exceptions
def update_rule(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, rule_id, display_name, description, is_enabled, condition, actions, freeform_tags, defined_tags, if_match):
if isinstance(rule_id, six.string_types) and len(rule_id.strip()) == 0:
raise click.UsageError('Parameter --rule-id cannot be whitespace or empty string')
if not force:
if actions or freeform_tags or defined_tags:
if not click.confirm("WARNING: Updates to actions and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
if display_name is not None:
_details['displayName'] = display_name
if description is not None:
_details['description'] = description
if is_enabled is not None:
_details['isEnabled'] = is_enabled
if condition is not None:
_details['condition'] = condition
if actions is not None:
_details['actions'] = cli_util.parse_json_parameter("actions", actions)
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('events', 'events', ctx)
result = client.update_rule(
rule_id=rule_id,
update_rule_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_rule') and callable(getattr(client, 'get_rule')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_rule(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
| [
"bytesbay@icloud.com"
] | bytesbay@icloud.com |
ada619c2bad165990d8bef1eedae3e7ff7d01b30 | 815f9282cfd5ab1224f05ec4e35227842d88e852 | /datasets/Greek_National_Exams/generate_preferences.py | 796a86ae50bc63fef84c3416748915e429d8d468 | [
"MIT"
] | permissive | ntzia/stable-marriage | 03c10342128ff3a7102c763474d488e7051527a0 | 80116a3a2aaa45689b404c9e684ea57c99bc5902 | refs/heads/master | 2023-08-22T18:19:51.922748 | 2021-10-14T15:45:38 | 2021-10-14T15:45:38 | 115,878,468 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,838 | py | #!/usr/bin/env python
import sys
import os
import numpy as np
from random import randint
from random import shuffle
import operator
import math
if len(sys.argv) != 7:
print "Usage: " + sys.argv[0] + " percentUsed studentOutFile schoolOutFile schoolClusters (-succinct | -complete) (-diverse | -skewed)"
sys.exit(1)
percent = float(sys.argv[1])
studentOutFile = sys.argv[2]
schoolOutFile = sys.argv[3]
clusters = int(sys.argv[4])
if (sys.argv[5] == "-succinct"):
succ = True
compl = False
elif (sys.argv[5] == "-complete"):
succ = False
compl = True
else:
print "5th argument must be either -succinct or -complete"
sys.exit(1)
if (sys.argv[6] == "-diverse"):
div = True
sk = False
elif (sys.argv[6] == "-skewed"):
div = False
sk = True
else:
print "6th argument must be either -diverse or -skewed"
sys.exit(1)
f_in1 = open("schools.txt", 'r')
f_in2 = open("grades.txt", 'r')
## First generate the preferences of schools
## Also find and report the n
f_out1 = open(studentOutFile, 'w')
## Parse the processed schools file
num_schools = 0
weights_of_schools = []
positions = []
n = 0
line = f_in1.readline()
while line:
if line.startswith("School"):
num_schools = num_schools + 1
tokens = line.split()
weights_of_schools.append(float(tokens[5]))
pos = int(math.ceil(int(tokens[3]) * percent / 100.0))
positions.append(pos)
n = n + pos
line = f_in1.readline()
print str(percent) + "% of the dataset used: n = " + str(n)
## Output the positions used if the output is succinct
if (succ):
f_out = open("positions.txt", 'w')
for school in range(num_schools):
f_out.write(str(positions[school]) + "\n")
f_out.close()
## Map school positions to agents if the output is complete
if (compl):
schools_to_agents = {}
counter = 0
for school in range(num_schools):
schools_to_agents[school] = (counter, counter + positions[school] - 1)
counter = counter + positions[school]
# Decide in which cluster each school belongs
schools_to_clusters = {}
for school in range(num_schools):
schools_to_clusters[school] = randint(0, clusters - 1)
# Compute weight of clusters
weights_of_clusters = []
for c in range(clusters):
weights_of_clusters.append(0.0)
for school in range(num_schools):
c = schools_to_clusters[school]
weights_of_clusters[c] += weights_of_schools[school]
for student in range(n):
## prefs_succinct ranks the schools (length num_schools)
## each student decides his favorite cluster
## half the probability of the rest of the schools is transferred in the favorite cluster
favorite_cluster = schools_to_clusters[np.random.choice(num_schools, 1, p=weights_of_schools)[0]]
weight_of_fav_cluster = weights_of_clusters[favorite_cluster]
weight_of_rest = 1.0 - weight_of_fav_cluster
a = 0.5 * (weight_of_rest / weight_of_fav_cluster)
weights = []
for school in range(num_schools):
if (schools_to_clusters[school] == favorite_cluster):
weights.append((1.0 + a) * weights_of_schools[school])
else:
weights.append(0.5 * weights_of_schools[school])
prefs_succinct = np.random.choice(num_schools, num_schools, replace=False, p=weights)
if (succ):
## Output the succinct prefs
for p in prefs_succinct:
f_out1.write(str(p))
f_out1.write(" ")
f_out1.write("\n")
elif (compl):
## Output the complete preference lists
prefs = []
for school in prefs_succinct:
(agent_start, agent_end) = schools_to_agents[school]
for a in range(agent_start, agent_end + 1):
prefs.append(a)
## prefs ranks all school positions
for p in prefs:
f_out1.write(str(p))
f_out1.write(" ")
f_out1.write("\n")
f_out1.close()
## -----------------------------------------------------------------------------------------------
## Now generate the preferences of schools
f_out2 = open(schoolOutFile, 'w')
# used for processing the "grades.txt file"
## [start, end)
start_grade_of_col = [0, 7000, 8000, 9000, 10000, 10500, 11000, 11500, 12000, 12500, 13000, 13500, 14000, 14500, 15000, 15500, 16000, 16500, 17000, 17500, 18000, 18500, 19000, 19500]
end_grade_of_col = [7000, 8000, 9000, 10000, 10500, 11000, 11500, 12000, 12500, 13000, 13500, 14000, 14500, 15000, 15500, 16000, 16500, 17000, 17500, 18000, 18500, 19000, 19500, 20000]
# Generate the preferences of each cluster
if (div):
# Rating of students among clusters are uncorrelated
prefs_of_clusters = []
for i in range(clusters):
prefs = []
for student in range(0, n):
prefs.append(student)
shuffle(prefs)
prefs_of_clusters.append(prefs)
elif (sk):
# Ratings are based on a single list of grades
ps = []
line = f_in2.readline()
while line:
if line.startswith("Profile 4"):
tokens = line.split()
total = float(tokens[3])
for a in tokens[5:]:
ps.append(float(a) / total)
break
line = f_in2.readline()
f_in2.close()
## Now generate grades based on the statistics read
global_grades = {}
for student in range(0, n):
selected_col = np.random.choice(len(start_grade_of_col), 1, p=ps)[0]
grade = randint(start_grade_of_col[selected_col], end_grade_of_col[selected_col] - 1)
global_grades[student] = grade
# Each cluster adds noise to the grades and sorts
prefs_of_clusters = []
for i in range(clusters):
grades = {}
for student in range(0, n):
grades[student] = global_grades[student] + 10000 * np.random.normal(0, 1)
sorted_grades = sorted(grades.items(), key=operator.itemgetter(1), reverse=True)
prefs = [stud for (stud, gr) in sorted_grades]
prefs_of_clusters.append(prefs)
# Generate the preferences of schools according to their cluster
for school in range(0, num_schools):
c = schools_to_clusters[school]
prefs = prefs_of_clusters[c]
# Create identical lists for each position of the school
for i in range(positions[school]):
for p in prefs:
f_out2.write(str(p))
f_out2.write(" ")
f_out2.write("\n")
f_out2.close()
| [
"ntziavelis@gmail.com"
] | ntziavelis@gmail.com |
2915daa920a718772f982608d13e1abbe0e0de8f | 96e76bcb634e0e48bcf3ae352eb235ed9bc32b36 | /app/migrations/0020_news_date_and_time.py | abf6be4f45c0a5659dc925a56654a48eab0b5a70 | [] | no_license | Ectroverse/EctroverseDjango | cef8a8a2149271c0995f1b60676f636e5dfc23ec | a3dad97b4e7a89694248c21df75ebdcc37e975f0 | refs/heads/master | 2023-04-18T21:12:20.062646 | 2021-04-28T11:06:01 | 2021-04-28T11:06:01 | 291,338,914 | 1 | 3 | null | 2021-01-23T14:32:21 | 2020-08-29T19:50:33 | Python | UTF-8 | Python | false | false | 409 | py | # Generated by Django 3.1 on 2021-01-24 16:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0019_auto_20210124_1638'),
]
operations = [
migrations.AddField(
model_name='news',
name='date_and_time',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| [
"vsavko@gmail.com"
] | vsavko@gmail.com |
6c58b0de7a6aaa29da887706c57a87152a52622a | 7208db50a22368c335e7d7d8b37a3fedb09c60e5 | /cairis/gui/ResponsesDialog.py | 462cb9196456757bb3e23ec2869fb3380d5121b9 | [
"Apache-2.0"
] | permissive | nebloc/cairis | 41c7f20af56c46bddcb3927dc4aa410f6477e6ed | 1277a148a270d5471b59fc238aa6590bc1d3044e | refs/heads/master | 2020-03-24T03:51:11.908096 | 2018-07-27T16:07:36 | 2018-07-27T16:07:36 | 142,434,768 | 0 | 0 | Apache-2.0 | 2018-07-26T11:58:24 | 2018-07-26T11:58:24 | null | UTF-8 | Python | false | false | 4,350 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
import cairis.core.Risk
from ResponseDialog import ResponseDialog
from DialogClassParameters import DialogClassParameters
from ResponseDialogParameters import ResponseDialogParameters
from AcceptEnvironmentPanel import AcceptEnvironmentPanel
from TransferEnvironmentPanel import TransferEnvironmentPanel
from MitigateEnvironmentPanel import MitigateEnvironmentPanel
from DimensionBaseDialog import DimensionBaseDialog
from cairis.core.ARM import *
__author__ = 'Shamal Faily'
class ResponsesDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,RESPONSES_ID,'Responses',(800,300),'response.png')
self.theMainWindow = parent
idList = [RESPONSES_LISTRESPONSES_ID,RESPONSES_BUTTONADD_ID,RESPONSES_BUTTONDELETE_ID]
columnList = ['Name','Type']
self.buildControls(idList,columnList,self.dbProxy.getResponses,'response')
listCtrl = self.FindWindowById(RESPONSES_LISTRESPONSES_ID)
listCtrl.SetColumnWidth(0,300)
def addObjectRow(self,mitListCtrl,listRow,response):
mitListCtrl.InsertStringItem(listRow,response.name())
mitListCtrl.SetStringItem(listRow,1,response.__class__.__name__)
def onAdd(self,evt):
try:
riskDict = self.dbProxy.getDimensionNames('risk')
if (len(riskDict) == 0):
dlg = wx.MessageDialog(self,'Cannot mitigate for non-existing risks','Add response',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
responseTypes = ['Accept','Transfer','Mitigate']
from DimensionNameDialog import DimensionNameDialog
rtDlg = DimensionNameDialog(self,'response',responseTypes,'Select',(300,200))
if (rtDlg.ShowModal() == DIMNAME_BUTTONACTION_ID):
responseType = rtDlg.dimensionName()
responsePanel = MitigateEnvironmentPanel
if (responseType == 'Accept'):
responsePanel = AcceptEnvironmentPanel
elif (responseType == 'Transfer'):
responsePanel = TransferEnvironmentPanel
addParameters = ResponseDialogParameters(RESPONSE_ID,'Add response',ResponseDialog,RESPONSE_BUTTONCOMMIT_ID,self.dbProxy.addResponse,True,responsePanel,responseType)
self.addObject(addParameters)
rtDlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
try:
selectedObjt = self.objts[self.selectedLabel]
responseType = selectedObjt.responseType()
responsePanel = MitigateEnvironmentPanel
if (responseType == 'Accept'):
responsePanel = AcceptEnvironmentPanel
elif (responseType == 'Transfer'):
responsePanel = TransferEnvironmentPanel
updateParameters = ResponseDialogParameters(RESPONSE_ID,'Edit response',ResponseDialog,RESPONSE_BUTTONCOMMIT_ID,self.dbProxy.updateResponse,False,responsePanel,responseType)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
self.dbProxy.associateGrid(self.theMainWindow.FindWindowById(ID_REQGRID))
self.deleteObject('No response','Delete response',self.dbProxy.deleteResponse)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
| [
"shamal.faily@googlemail.com"
] | shamal.faily@googlemail.com |
a76a3b3ce620e6eb8d85c04965a0fff691246222 | 64d7db567d7dbd8609719bc0c52377a45d27e6a2 | /hello_world.py | c9c8ff9358835a799a327d8077ffa5a09c926a82 | [
"MIT"
] | permissive | irot94/profiles-rest-api | d885dfb1d533e1149aa542fde01c1cd55db203cc | 907bd4ffdb63e3d03899cbbfd69bd0373a331232 | refs/heads/main | 2023-03-27T00:23:09.320095 | 2021-03-23T11:30:16 | 2021-03-23T11:30:16 | 350,365,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print('Siemanko')
| [
"irtarasek@gmail.com"
] | irtarasek@gmail.com |
4ad0c640a13b187afed279bd9c4aa6634b7e0d9c | e05818a27aa9ad9db170ed2430ef15eafd6473d7 | /screens/HelpScreen.py | 551ce1387edb387034ced4ef4c932046821ec8a8 | [] | no_license | matdoess/photobox | 1241df3a800b4f1bbeb35c2a4240d322560ff100 | b5d664dcddf6917ea5d84c7f7e43e68494d98e83 | refs/heads/master | 2021-05-16T05:47:31.788273 | 2018-07-23T15:52:36 | 2018-07-23T15:52:36 | 103,319,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | from kivy.uix.screenmanager import Screen
from Camera import Camera
from TelegramBot import TelegramBot
from datetime import datetime
import threading
import settings
class HelpScreen(Screen):
def on_pre_enter(self):
self.ids.HelpImageId.source = settings.myList['config']['images']['help_person']
self.ids.HelpScreenLabel.text = settings.myList['config']['text']['help_text']
def sending_help(self):
self.ids.HelpScreenLabel.text = settings.myList['config']['text']['help_sending_text']
def send_help(self):
print("send_help ausgeführt")
# Create Imagestream Objekt and Take Picture
camera = Camera()
camera.stream()
# Bild wird in camera.imagestream gespeichert
telegrambot = TelegramBot()
senddatetime = datetime.now().strftime("%Y-%m-%d - %H:%M:%S")
telegrambot.text = settings.myList['config']['text']['photobox_sos'] + ' [' + senddatetime + ']'
telegrambot.photo = camera.imagestream
telegrambot.updateHelpPerson()
#telegrambot.send()
telegram_thread = threading.Thread(target=telegrambot.send, args=())
telegram_thread.start()
# # Telegram
# import telegram
# chat_id = settings.myList['private_config']['telegram']['help_person_id']
# #image.save(bio, 'JPEG')
# #bio.seek(0)
# print("Create BOT start")
# bot = telegram.Bot(token=settings.myList['private_config']['telegram']['api-token'])
# print("Create BOT end")
# #print(bot.get_me())
# print("Send message start")
# bot.send_message(chat_id, text=settings.myList['config']['text']['photobox_sos'])
# print("Send image start")
# bot.send_photo(chat_id, photo=camera.imagestream)
# print("Send ENDE")
self.ids.HelpScreenLabel.text = settings.myList['config']['text']['help_success_text']
| [
"johannes.lerch@buttinette.de"
] | johannes.lerch@buttinette.de |
cfcea90004d80936f1157e825bf22cbb2c43710c | 5dc4d2592f4ed00d15f1b9f0ff5e7868346094d7 | /Customer_Churn_Modelling.py | 26f43ed82b47c32b8f4970cf8e0aa1ba2022e667 | [] | no_license | chopradeepanshu/EY-ML | 32d639515d6a60c81113ec599722ea4c7bc83177 | 3ba59f652a2315b1988af51e7c87d04e53dda94d | refs/heads/master | 2020-04-15T22:21:57.282198 | 2019-01-10T14:06:44 | 2019-01-10T14:06:44 | 165,070,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # Logistic Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataframe = pd.read_csv('C:\\Deeps\\Machine Learning Projects\\Use Case\\Churn Modelling\\Data\\ChurnModelling.csv')
list(dataframe)
# EDA
dataframe.dtypes
dataframe["Gender"] = dataframe["Gender"].astype('category')
dataframe["Gender"] = dataframe["Gender"].cat.codes
dataframe["Geography"] = dataframe["Geography"].astype('category')
dataframe["Geography"] = dataframe["Geography"].cat.codes
X = dataframe.iloc[:,3:].values
y = dataframe.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state = 0)
model.fit(X_train, y_train)
# Predicting the Test set results
y_pred = model.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
score = model.score(X_test, y_test)
print(score)
| [
"noreply@github.com"
] | chopradeepanshu.noreply@github.com |
053f1ccda4e39457dc790683227d0bc1b6d7da4d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ges/huaweicloudsdkges/v2/model/list_jobs_resp_job_list.py | 45bcf3b71b8ca926bfc21443a9d3494688005247 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 11,035 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListJobsRespJobList:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'status': 'str',
'job_type': 'str',
'job_name': 'str',
'related_graph': 'str',
'begin_time': 'str',
'end_time': 'str',
'job_detail': 'ShowJobRespJobDetail',
'fail_reason': 'str',
'job_progress': 'float'
}
attribute_map = {
'job_id': 'job_id',
'status': 'status',
'job_type': 'job_type',
'job_name': 'job_name',
'related_graph': 'related_graph',
'begin_time': 'begin_time',
'end_time': 'end_time',
'job_detail': 'job_detail',
'fail_reason': 'fail_reason',
'job_progress': 'job_progress'
}
def __init__(self, job_id=None, status=None, job_type=None, job_name=None, related_graph=None, begin_time=None, end_time=None, job_detail=None, fail_reason=None, job_progress=None):
"""ListJobsRespJobList
The model defined in huaweicloud sdk
:param job_id: 任务ID。
:type job_id: str
:param status: 任务状态。 - pending:等待中 - running:运行中 - success:成功 - failed:失败
:type status: str
:param job_type: 任务类型。
:type job_type: str
:param job_name: 任务名称。
:type job_name: str
:param related_graph: 关联图名称。
:type related_graph: str
:param begin_time: 任务开始时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:type begin_time: str
:param end_time: 任务结束时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:type end_time: str
:param job_detail:
:type job_detail: :class:`huaweicloudsdkges.v2.ShowJobRespJobDetail`
:param fail_reason: 任务失败原因。
:type fail_reason: str
:param job_progress: 任务执行进度,预留字段,暂未使用。
:type job_progress: float
"""
self._job_id = None
self._status = None
self._job_type = None
self._job_name = None
self._related_graph = None
self._begin_time = None
self._end_time = None
self._job_detail = None
self._fail_reason = None
self._job_progress = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
if status is not None:
self.status = status
if job_type is not None:
self.job_type = job_type
if job_name is not None:
self.job_name = job_name
if related_graph is not None:
self.related_graph = related_graph
if begin_time is not None:
self.begin_time = begin_time
if end_time is not None:
self.end_time = end_time
if job_detail is not None:
self.job_detail = job_detail
if fail_reason is not None:
self.fail_reason = fail_reason
if job_progress is not None:
self.job_progress = job_progress
@property
def job_id(self):
"""Gets the job_id of this ListJobsRespJobList.
任务ID。
:return: The job_id of this ListJobsRespJobList.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ListJobsRespJobList.
任务ID。
:param job_id: The job_id of this ListJobsRespJobList.
:type job_id: str
"""
self._job_id = job_id
@property
def status(self):
"""Gets the status of this ListJobsRespJobList.
任务状态。 - pending:等待中 - running:运行中 - success:成功 - failed:失败
:return: The status of this ListJobsRespJobList.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListJobsRespJobList.
任务状态。 - pending:等待中 - running:运行中 - success:成功 - failed:失败
:param status: The status of this ListJobsRespJobList.
:type status: str
"""
self._status = status
@property
def job_type(self):
"""Gets the job_type of this ListJobsRespJobList.
任务类型。
:return: The job_type of this ListJobsRespJobList.
:rtype: str
"""
return self._job_type
@job_type.setter
def job_type(self, job_type):
"""Sets the job_type of this ListJobsRespJobList.
任务类型。
:param job_type: The job_type of this ListJobsRespJobList.
:type job_type: str
"""
self._job_type = job_type
@property
def job_name(self):
"""Gets the job_name of this ListJobsRespJobList.
任务名称。
:return: The job_name of this ListJobsRespJobList.
:rtype: str
"""
return self._job_name
@job_name.setter
def job_name(self, job_name):
"""Sets the job_name of this ListJobsRespJobList.
任务名称。
:param job_name: The job_name of this ListJobsRespJobList.
:type job_name: str
"""
self._job_name = job_name
@property
def related_graph(self):
"""Gets the related_graph of this ListJobsRespJobList.
关联图名称。
:return: The related_graph of this ListJobsRespJobList.
:rtype: str
"""
return self._related_graph
@related_graph.setter
def related_graph(self, related_graph):
"""Sets the related_graph of this ListJobsRespJobList.
关联图名称。
:param related_graph: The related_graph of this ListJobsRespJobList.
:type related_graph: str
"""
self._related_graph = related_graph
@property
def begin_time(self):
"""Gets the begin_time of this ListJobsRespJobList.
任务开始时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:return: The begin_time of this ListJobsRespJobList.
:rtype: str
"""
return self._begin_time
@begin_time.setter
def begin_time(self, begin_time):
"""Sets the begin_time of this ListJobsRespJobList.
任务开始时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:param begin_time: The begin_time of this ListJobsRespJobList.
:type begin_time: str
"""
self._begin_time = begin_time
@property
def end_time(self):
"""Gets the end_time of this ListJobsRespJobList.
任务结束时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:return: The end_time of this ListJobsRespJobList.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListJobsRespJobList.
任务结束时间,格式为UTC,\"yyyy-MM-dd'T'HH:mm:ss\"。
:param end_time: The end_time of this ListJobsRespJobList.
:type end_time: str
"""
self._end_time = end_time
@property
def job_detail(self):
"""Gets the job_detail of this ListJobsRespJobList.
:return: The job_detail of this ListJobsRespJobList.
:rtype: :class:`huaweicloudsdkges.v2.ShowJobRespJobDetail`
"""
return self._job_detail
@job_detail.setter
def job_detail(self, job_detail):
"""Sets the job_detail of this ListJobsRespJobList.
:param job_detail: The job_detail of this ListJobsRespJobList.
:type job_detail: :class:`huaweicloudsdkges.v2.ShowJobRespJobDetail`
"""
self._job_detail = job_detail
@property
def fail_reason(self):
"""Gets the fail_reason of this ListJobsRespJobList.
任务失败原因。
:return: The fail_reason of this ListJobsRespJobList.
:rtype: str
"""
return self._fail_reason
@fail_reason.setter
def fail_reason(self, fail_reason):
"""Sets the fail_reason of this ListJobsRespJobList.
任务失败原因。
:param fail_reason: The fail_reason of this ListJobsRespJobList.
:type fail_reason: str
"""
self._fail_reason = fail_reason
@property
def job_progress(self):
"""Gets the job_progress of this ListJobsRespJobList.
任务执行进度,预留字段,暂未使用。
:return: The job_progress of this ListJobsRespJobList.
:rtype: float
"""
return self._job_progress
@job_progress.setter
def job_progress(self, job_progress):
"""Sets the job_progress of this ListJobsRespJobList.
任务执行进度,预留字段,暂未使用。
:param job_progress: The job_progress of this ListJobsRespJobList.
:type job_progress: float
"""
self._job_progress = job_progress
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListJobsRespJobList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6bcc322d94ee9d15d455bcac104c4d0c5451ce35 | 97bd05421e9e73bd99c83fc4a4d8c660557aba30 | /async/async.py | c09c7f276a88ce171efd8afd6267fb83ebdf8acd | [] | no_license | frank-26/python-demos | 1594ad2987c3500e86c5e9e60c4ae80f89155f14 | e0a4c58b48ee4f91aa02ea558d451137f947210a | refs/heads/master | 2020-09-14T13:32:49.448840 | 2019-11-21T12:47:19 | 2019-11-21T12:47:19 | 223,141,669 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #!/usr/bin/env python3
# async.py
import asyncio
async def count():
print("One")
await asyncio.sleep(1)
print("Two")
async def main():
await asyncio.gather(count(), count(), count())
asyncio.run(main()) | [
"franklyan24@gmail.com"
] | franklyan24@gmail.com |
3d9cb190898bb0de72ad98aa055083f485cc3c08 | f07392633118f7f6aff0a5a9b2a5c9eaab1a0299 | /Examples/packaging/Capitalize/capitalize/capital_mod.py | 2b4d8147fce933a404c366dee5112a3e807866e5 | [] | no_license | UWPCE-PythonCert/Py300 | afc4abca736cfea031292db6bed996465f37604f | 7f93d20ae66ba9a56c4dcc0c1fdafcf79db15349 | refs/heads/master | 2020-05-26T13:43:38.098926 | 2018-03-05T07:11:48 | 2018-03-05T07:11:48 | 85,002,542 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | #!/usr/bin/env python
"""
A really simple module, just to demonstrate packaging
"""
def capitalize_line(instr):
"""
capitalizes the input string
:param instr: the string to capitalize it should be a single line.
:type instr: string
:returns: a capitalized version of instr
"""
return " ".join( word.capitalize() for word in instr.split() )
def capitalize(infilename, outfilename):
"""
reads the contents of infilename, and writes it to outfilename, but with
every word capitalized
note: very primitive -- it will mess some files up!
this is called by the capitalize script
:param infilename: The file name you want to process
:type infilename: string
:param outfilename: the name of the new file that will be created
:type outfilename: string
:returns: None
:raises: IOError if infilename doesn't exist.
"""
infile = open(infilename, 'U')
outfile = open(outfilename, 'w')
for line in infile:
outfile.write(capitalize_line(line))
outfile.write("\n")
return None | [
"PythonCHB@gmail.com"
] | PythonCHB@gmail.com |
654f29881325caa2103913571852440c305a2bae | e893f9d8ff335a6c8b427920f2b2a2e7cea8ae74 | /simulador1.py | 6973e686e65e5e4bde35a459c828cdcd3ee7354d | [] | no_license | manuLgMad/naiveEpidemicSimulation | 00fa1737e4c04249805e8d226114aec91d5c05bd | 6318bda8c9b06f5200164ffc21e30d4ed2bc82fe | refs/heads/master | 2022-04-21T08:23:03.986789 | 2020-03-25T19:09:24 | 2020-03-25T19:09:24 | 249,178,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,035 | py | #https://medium.com/tomas-pueyo/coronavirus-por-qu%C3%A9-debemos-actuar-ya-93079c61e200
#de la anterior url saco algunos datos
#por ejemplo lo que tardas en morir 17 días
#también un 1% de mortalidad
#https://www.lavanguardia.com/ciencia/20200227/473801277042/coronavirus-covid-19-muertos-enfermedades.html
#set de datos muy interesante
#del ine habría que sacar la composición española de la población
muertos= [0, 0.45, 0.31, 0.14, 0.31 , 0.64, 2.16, 5.24 , 17.91]
muertosPorCien = [0.0, 1.7, 1.1, 0.5, 1.1, 2.4, 8.0, 19.3, 65.9]
poblacion= [663189 , 697133, 709583,925240,1153369,959940,695231,514320,367465] # sacado del ine
poblacionPorCien= [9.9, 10.4, 10.6, 13.8, 17.3, 14.4, 10.4, 7.7, 5.5]
'''
poblacion = {
'ed0' : { 'desc':'0-9', 'numPC': 9.9, 'probM' :0.0, 'probInfe':10, 'umbralCurado':14},
'ed1' : { 'desc':'10-19', 'numPC': 10.4, 'probM' :1.7, 'probInfe':50,'umbralCurado':14},
'ed2' : { 'desc':'20-29', 'numPC': 10.6, 'probM' :1.1, 'probInfe':10,'umbralCurado':14},
'ed3' : { 'desc':'30-39', 'numPC': 13.8, 'probM' :0.5, 'probInfe':10,'umbralCurado':14},
'ed4' : { 'desc':'40-49', 'numPC': 17.3, 'probM' :1.1, 'probInfe':10,'umbralCurado':14},
'ed5' : { 'desc':'50-59', 'numPC': 14.4, 'probM' :2.4, 'probInfe':10,'umbralCurado':14},
'ed6' : { 'desc':'60-69', 'numPC': 10.4, 'probM' :8.0, 'probInfe':10,'umbralCurado':14},
'ed7' : { 'desc':'70-79', 'numPC': 7.7, 'probM' :19.3, 'probInfe':10,'umbralCurado':14},
'ed8' : { 'desc':'80+', 'numPC': 5.5 , 'probM' :65.9, 'probInfe':10,'umbralCurado':14}
}
'''
#vamos a ver un diamon princess
poblacion = {
'ed0' : { 'desc':'0-9', 'numPC': 0.9, 'probM' :0.0, 'probInfe':16, 'umbralCurado':14},
'ed1' : { 'desc':'10-19', 'numPC': 5.4, 'probM' :0.1, 'probInfe':16,'umbralCurado':14},
'ed2' : { 'desc':'20-29', 'numPC': 7.6, 'probM' :0.1, 'probInfe':16,'umbralCurado':14},
'ed3' : { 'desc':'30-39', 'numPC': 8.8, 'probM' :0.1, 'probInfe':16,'umbralCurado':14},
'ed4' : { 'desc':'40-49', 'numPC': 17.3, 'probM' :0.3, 'probInfe':16,'umbralCurado':14},
'ed5' : { 'desc':'50-59', 'numPC': 17.4, 'probM' :0.5, 'probInfe':16,'umbralCurado':14},
'ed6' : { 'desc':'60-69', 'numPC': 20.4, 'probM' :1.8, 'probInfe':16,'umbralCurado':14},
'ed7' : { 'desc':'70-79', 'numPC': 16.7, 'probM' :1.9, 'probInfe':16,'umbralCurado':14},
'ed8' : { 'desc':'80+', 'numPC': 5.5 , 'probM' :2.7, 'probInfe':16,'umbralCurado':14}
}
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import binom
def dameProbInfe( probabilidad):
resultado = binom.rvs(1, probabilidad /100)
return( resultado)
class Persona:
def __init__(self, poblacion):
#'desc': '0-9', 'numPC': 9.9, 'probM': 0.0, 'probInfe': 0.1
self.desc = poblacion['desc']
self.ciclosInfeccion = 0
self.umbralCurado = poblacion['umbralCurado']
self.probInfe = poblacion['probInfe']
self.probM = poblacion['probM'] #probalididad de morir
self.estado = "S" # S sano I infectado G grave M muerto C curado D detectado X antesDeDecidirSiViveOmuere T contagiador A no lo pilla
def __repr__(self):
cadena = "desc {} ciclo {} estado {}".format(self.desc, self.ciclosInfeccion, self.estado)
return cadena
def contacta(self):
#en realidad esta debería ser una probabilidad , que por cierto la buscamos, podría ser dependiente de la edad
self.infectado = True
def infecta (self ):
if self.estado == 'S':
if dameProbInfe(self.probInfe)==1 :
self.estado = "I"
def aumentaElCiclo(self):
if self.estado == 'I':
self.ciclosInfeccion += 1
if self.ciclosInfeccion == self.umbralCurado:
#decido si vivo o muero
if dameProbInfe(self.probM)==1 :
self.estado = "M"
else:
self.estado = "C"
def dameInfectados(personas):
contadorI = 0
for per in personas:
if per.estado == 'I':
contadorI += 1
return contadorI
def iniciaEpidemia(numPersonasTotal, tantoPorCienDeA, poblacion):
personas = list ()
inmunes = int(numPersonasTotal * tantoPorCienDeA /100)
numPersonas = numPersonasTotal -inmunes
#poblacionPorCien= [9.9, 10.4, 10.6, 13.8, 17.3, 14.4, 10.4, 7.7, 5.5]
for pobCd in poblacion:
#print (pobCd)
numper = int(numPersonas * poblacion[pobCd]['numPC']/100)
for i in range ( numper ):
personas.append( Persona(poblacion[pobCd]))
medio = int(numPersonas/2)
personas[medio].estado="I" #aquí nada de probabilidades por eso no usamos el método
personas[medio-1].estado="I"
personas[medio-2].estado="I"
personas[medio-3].estado="I" #infectamos a 4 personas que sino no va bien
for pobC in poblacion:
numper = int(inmunes * poblacion[pobC]['numPC']/100)
for u in range ( numper ):
inmu = Persona(poblacion[pobCd])
inmu.estado = 'A'
personas.append( inmu)
#aleatoria personas
return personas
def daUnaVueltaYCambiaLosEstados(personas):
for per in personas:
per.aumentaElCiclo()
def cuentaEstados(personas , estado):
cuenta = 0
for per in personas:
if per.estado == estado:
cuenta += 1
return cuenta
def vueltaDeContagio(personas, numContagiadores, ro):
numContagiadores2 = int(numContagiadores * ro )
for i in range(numContagiadores2):
alea = random.randint(0, len(personas)-1)
personas[alea].infecta()
PorCienDeInm = 75
PersonasTotal = 3711
ro=6.4
personas = iniciaEpidemia(PersonasTotal, PorCienDeInm, poblacion)
listaInfe = list()
listaInfeSum = list()
listaMu = list()
listaCurados = list()
infectados = cuentaEstados(personas, "I")
listaInfe.append(infectados)
listaMu.append(cuentaEstados(personas,"M"))
listaCurados.append(0)
for i in range (50):
vueltaDeContagio(personas, infectados,ro)
daUnaVueltaYCambiaLosEstados(personas)
infectados = cuentaEstados(personas,"I")
listaInfe.append(infectados)
listaMu.append(cuentaEstados(personas,"M"))
#print(listaInfe)
listaCurados.append(cuentaEstados(personas,"C"))
print (listaInfe)
print (listaMu)
print (listaCurados)
print (sum(listaMu))
#print (listaInfe)
'''
plt.plot(listaInfe)
plt.show()
#for i in range(1:contadorI):
#a ver a quien tocamos
plt.plot(listaMu)
plt.show()
'''
uno = np.array(listaCurados)
dos = np.array(listaInfe)
tres = uno + dos
fig, ax = plt.subplots(1, 1)
#x = np.arange(11)
ax.plot(tres, "r")
ax2 = ax.twinx()
ax2.plot(listaMu, "g")
plt.title = "NumPersonas {}, Tanto por cien de inmunes {}".format( PersonasTotal ,PorCienDeInm)
plt.draw()
plt.show() | [
"noreply@github.com"
] | manuLgMad.noreply@github.com |
086e7cd5094e3ff935e8b9311c8664873ac1cfc8 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/l3/start_20200620180546.py | 304413c0994d4341aa3bde6dc27f92c38feb6864 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10 | py | import dis | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
b80da2eee1ac7b4335967099b21a56b7277e7ca6 | 5f9842af23ce264b00c23120a484910278ad6270 | /server/scripts/migrate.py | 4b9411dd6f19515be6d245edf645bf65aaafb916 | [] | no_license | tykva43/address_book | 010635172a658d2843091828a63f783056dc5db1 | 17c189f6fe32966342423c489be63f7362b73622 | refs/heads/master | 2023-08-23T18:50:39.894980 | 2021-10-11T05:50:57 | 2021-10-11T05:50:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | import os
url = os.environ.get('DATABASE_URL')
os.system('yoyo apply --database {} ./migrations'.format(url))
| [
"krikunova.olga@mail.ru"
] | krikunova.olga@mail.ru |
9f8a6b37d2e71ec51c2dc85dc85fa193e285f3d0 | c7b059c99056edb98f8564824fae63d198a1fa16 | /pythonProject/final/source_files/server.py | 47ab596adf468b826a94b30c1a2b990848d37d42 | [
"MIT"
] | permissive | shannonchoang/CS131-Fall-2018 | 1b34f0d4a60b61328d92c848bccefed84028901e | b4f6f88a062f2a19252fe158c129d5789ec8935f | refs/heads/master | 2020-07-07T08:32:52.671849 | 2019-08-22T02:26:46 | 2019-08-22T02:26:46 | 203,304,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,492 | py | #remember to use Python 3.7.2
#Assigned Ports: 11995-11999
#Use servers: 06,07,09,10
#FINAL VERSION as of 11:09 PM
api_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
import aiohttp
import asyncio
import time
import sys
import json
import re
serverIDs = ["Goloman", "Hands", "Holiday", "Welsh", "Wilkes"]
serverPorts = {"Goloman": 11995, "Hands": 11996, "Holiday": 11997, "Welsh": 11998, "Wilkes":11999}
#each server is mapped to array of servers it talks to
serverRelationships = {"Goloman":["Hands", "Holiday", "Wilkes"], "Hands":["Wilkes"], "Holiday":["Welsh", "Wilkes"], "Wilkes":["Goloman", "Hands", "Holiday"], "Welsh":["Holiday"]}
#keeps track of clients and:
#most recent location, server last talked to, time of connection
clients_database = {}
def report_bad_command(command):
print("? " + command + "\n")
sys.exit(1)
def log_error(message):
error_msg = "ERROR, " + message + "\n"
############################ FLOODING implementation ############################
async def at(client, writer, message):
try:
writer.write(message.encode())
await writer.drain()
writer.write_eof()
await log_to_file("AT output is:" + message + "\n")
except:
await log_to_file("Unable to send message: " + message + "\n")
async def log_to_file(message):
curr_logfile = open(logfile_name, 'a+')
curr_logfile.write(message)
curr_logfile.close()
#CLIENT=server that is propagating AT message to all the servers it talks to
async def flood(client,message):
if client not in serverRelationships:
return
message_array = message.split()
clientTime = message_array[5]
relationships = serverRelationships[client]
for partner in relationships:
if client not in clients_database or clientTime > clients_database[client]["time"]:
try:
partner_port = serverPorts[partner]
reader, writer = await asyncio.open_connection('127.0.0.1', port=partner_port)
await log_to_file("Connection made with " + partner + "\n")
#write message to partner and mark
await at(partner,writer, message)
await log_to_file(client + " sent message to its partner:" + partner + "\n")
await log_to_file("Message sent: " + message + "\n")
await log_to_file("Closing connection with " + partner + "\n")
except:
await log_to_file("Unable to connect with " + partner + "\n")
############################ IAMAT implementation ############################
def no_whitespace(s):
for char in s:
curr_char=char
if curr_char.isspace():
return False
return True
#check for badly formatted coordinates
def splitCoordinates(rawCoords):
if ("+" and "-") not in rawCoords:
log_error("bad coordinates supplied")
sys.exit(1)
return None
return rawCoords.replace("+", " +").replace("-", " -").split()
def format_at_message(serverID, timeDiff, dataCopy):
return "AT " + serverID + " " + str(timeDiff) + " " + dataCopy
#message has: "IAMAT", "kiwi.cs.ucla.edu", "+34.068930-118.445127" ,"1520023934.918963997"]
async def valid_iamat(message):
message_string = " ".join(message)
if len(message) != 4:
log_error("bad IAMAT command supplied")
report_bad_command(message_string)
try:
float(message[3])
except:
log_error("bad client send time supplied")
report_bad_command(message_string)
if not no_whitespace(message[1]):
log_error("bad clientID supplied")
report_bad_command(message_string)
coordinates = splitCoordinates(message[2])
try:
latitude = float(coordinates[0])
longitude = float(coordinates[1])
except:
log_error("bad coordinates supplied")
report_bad_command(message_string)
if latitude < -90 and latitude > 90:
log_error("invalid latitude coordinate supplied")
report_bad_command(message_string)
if longitude < -180 and longitude > 180:
log_error("invalid longitude coordinate supplied")
report_bad_command(message_string)
return True
async def iamat(writer, message, client, serverID, rawCoords, clientTime, realTime):
timeDifference = float(clientTime) - realTime
coordinates = splitCoordinates(rawCoords)#check for returned array of size zero
if len(coordinates) < 2:
log_error("unable to process iamatmessage coordinates")
report_bad_command(message_string)
msg_string = " ".join(message)
clientData = msg_string.replace("IAMAT","").lstrip()
if client not in clients_database or clientTime > clients_database[client]["time"]:
clients_database[client] = {"server":serverID, "location":coordinates, "timeDiff":timeDifference, "time": clientTime, "client_msg": clientData}
#format and send message
at_response = format_at_message(serverID, timeDifference, clientData)
await send_message(writer, at_response)
await log_to_file("IAMAT output is:" + at_response+ "\n")
#propogate message
await flood(serverID, at_response) #client here needs to be servername
############################ WHATSAT implementation ############################
#ex entry: {"kiwi.cs.ucla.edu":{"server": "servername", "location"=[lat, long], "time"=time}}
#command is a list with all the parts of the WHATSAT
#i.e. WHATSAT kiwi.cs.ucla.edu 10 5 = ["WHATSAT", "kiwi...", "10", "5"]
async def valid_whatsat(command):
command_string = " ".join(command)
if len(command) != 4:
log_error("invalid whatsat command supplied")
report_bad_command(command_string)
try:
float(command[2])
float(command[3])
return True
except:
log_error("invalid whatsat parameter supplied")
report_bad_command(command_string)
if not no_white_space(message[1]):
log_error("invalid client ID supplied")
report_bad_command(command_string)
return
def formURL(latitude ,longitude,radius):
url = '%slocation=%s,%s&radius=%d&key=%s' % (api_url,latitude, longitude, radius, key)
return url
#based on aiohttp documentation example
async def fetch(session, url):
async with session.get(url) as response:
return await response.json()
def squeezeNewlines(s):
return re.sub('\n\n+', '\n', s)
#note client is the client that information is being requested on
async def whatsat(writer, message, client, serverID, radius, infoBound):
message_string = " ".join(message)
#error handlings
if float(radius) > 50:
log_error("please supply radius less than 50km")
report_bad_command(message_string)
if float(infoBound)> 10:
log_error("please supply information limit less than 29 items")
report_bad_command(message_string)
if client not in clients_database:
log_error("client that info was requested on does not exist in database")
report_bad_command(message_string)
client_info = clients_database[client]
request_url = formURL(client_info["location"][0], client_info["location"][1], float(radius))
async with aiohttp.ClientSession() as session:
api_data = await fetch(session, request_url)
results = api_data["results"]
#truncate results
results_new = results[0:int(infoBound)]
api_data["results"] = results_new
api_data_string = json.dumps(api_data)
#squeeze newlines
api_data_string = squeezeNewlines(api_data_string)
api_data_string = api_data_string.rstrip("\n")
#format and send response
client_info = clients_database[client]
msg_header = format_at_message(client_info["server"], client_info["timeDiff"], client_info["client_msg"])
# print("header is going to be " + msg_header)
whatsat_response = msg_header + "\n" + api_data_string
await send_message(writer, whatsat_response)
await log_to_file("WHATSAT output is:" + whatsat_response + "\n")
############################ SERVER STUFF ############################
async def send_message(writer, message):
writer.write(message.encode())
writer.write_eof()
async def handleClient(reader, writer):
try:
data = await reader.read()
except:
log_error("couldn't read input from client")
sys.exit(1)
try:
data_decoded = data.decode()
message = data_decoded.strip().split()
except:
log_error("error in decoding")
sys.exit(1)
message_string = " ".join(message)
for field in message:
if not no_whitespace(field):
log_error("white space in message field")
report_bad_command(message_string)
message_type = message[0]
client_name = message[1]
if message_type != "IAMAT" and message_type != "WHATSAT" and message_type != "AT":
log_error("incorrect command supplied")
report_bad_command(message_string)
await log_to_file("Input to " + serverID + ": " + " ".join(message)+ "\n") #logging output handled within msg functions
if message_type == "IAMAT":
await valid_iamat(message)
rawCoords = message[2]
clientTime = message[3]
await iamat(writer, message, client_name, serverID, rawCoords, clientTime, time.time())
elif message_type == "WHATSAT":
await valid_whatsat(message)
await whatsat(writer, message, client_name, serverID, message[2], message[3])
elif message_type == "AT":
client_name = message[3]
serverID_AT= message[1]
location = splitCoordinates(message[4])
client_data_arr = message[3:]
await log_to_file("Received message:" + " ".join(message) + "\n")
clientTime = message[4]
#if new data store and flood
if client_name not in clients_database or clientTime > clients_database[client_name]["time"]:
clients_database[client_name] = {"server":serverID_AT, "location":location, "timeDiff":message[2], "time": message[5], "client_msg": " ".join(client_data_arr)}
await flood(serverID, " ".join(message), )
else:
log_error("invalid command supplied:" + message_string)
report_bad_command(message_string)
############################ MAIN ROUTINE ############################
async def main():
if len(sys.argv)!=2:
report_bad_command(" ".join(sys.argv))
global serverID
global server
serverID = sys.argv[1]
if serverID not in serverIDs:
report_bad_command(" ".join(sys.argv))
try:
server = await asyncio.start_server(handleClient, host='127.0.0.1', port=serverPorts[serverID])
global logfile_name
logfile_name = serverID + ".txt"
log_file= open(logfile_name, 'w+')
log_file.write(serverID + "\n")
log_file.write("Starting server:" + serverID + " at " + str(time.time())+ "\n")
log_file.close()
except:
log_error("unable to start server")
report_bad_command(" ".join(sys.argv))
await server.serve_forever()
if __name__=='__main__':
try:
asyncio.run(main())
except KeyboardInterrupt:
server.close()
| [
"shannonhoang0@gmail.com"
] | shannonhoang0@gmail.com |
093145c8b65bf2b5841737540a0b4b14abe41f1d | af220e52cfc1f41e9a9fab4de5608e86d4787a62 | /Convolution on Images.py | 3e693ac162721d412326ed4d6c94c31661ecccc2 | [] | no_license | besthvr12/CNN-and-ANN | 4344b56de21419cb36662d0be1b1f2a4ce619452 | e6a37dd331bb8cd1a1fef626d80ab6ae99211318 | refs/heads/master | 2022-04-06T14:34:23.924693 | 2020-02-24T17:50:55 | 2020-02-24T17:50:55 | 228,059,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | # import the necessary packages
from skimage.exposure import rescale_intensity
import numpy as np
import argparse
import cv2
def convolve(image, K):
# grab the spatial dimensions of the image and kernel
(iH, iW) = image.shape[:2]
(kH, kW) = K.shape[:2]
# allocate memory for the output image, taking care to "pad"
# the borders of the input image so the spatial size (i.e.,
# width and height) are not reduced
pad = (kW - 1) // 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float")
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplication between the ROI and
# the kernel, then summing the matrix
k = (roi * K).sum()
# store the convolved value in the output (x, y)-
# coordinate of the output image
output[y - pad, x - pad] = k
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like
# regions of an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
# construct an emboss kernel
emboss = np.array((
[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]), dtype="int")
# construct the kernel bank, a list of kernels we’re going to apply
# using both our custom ‘convole‘ function and OpenCV’s ‘filter2D‘
# function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY),
("emboss", emboss))
# load the input image and convert it to grayscale
image = cv2.imread('Dog.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# loop over the kernels
for (kernelName, K) in kernelBank:
# apply the kernel to the grayscale image using both our custom
# ‘convolve‘ function and OpenCV’s ‘filter2D‘ function
print("[INFO] applying {} kernel".format(kernelName))
convolveOutput = convolve(gray, K)
opencvOutput = cv2.filter2D(gray, -1, K)
# show the output images
cv2.imshow("Original", gray)
cv2.imshow("{} - convole".format(kernelName), convolveOutput)
cv2.imshow("{} - opencv".format(kernelName), opencvOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | besthvr12.noreply@github.com |
4b9647c17ca63115c2dc89a0306b51de27c9b14f | e04d62d1d68225bd8d7076cce54250943dc54206 | /main.py | bcbb4a0e36fa224e1dbb96575d743263d87b3c4f | [] | no_license | alfonsocanor/TMS_2018 | 3611f280917f8f939b3a754b87099138f9e8dd16 | 7a38fe13fbe1ab7c9c54bb97eb0bbccc8e401c89 | refs/heads/master | 2021-05-13T23:38:43.494629 | 2018-01-13T14:44:59 | 2018-01-13T14:44:59 | 116,522,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,139 | py | import googlemaps
import geocoder
from geopy.distance import vincenty
from urllib.request import urlopen
import json
import os
import csv
import math
import time
''' gmaps = googlemaps.Client(key='AIzaSyBMwXuzxKO2EY2eoNX163Iojydhmi39i1U') #From googlaMaps API
#print(dir(gmaps))
gi = gmaps.distance_matrix('pasaje santa rosa 5040 buenos aires', 'Gascón 1099 buenos aires')
#print(gi)
getInfo = gmaps.geocode('pasaje santa rosa 5040 buenos aires') #Getting latitude and longitude from an address
getInfo2 = gmaps.geocode('Av Cordoba 3301 CABA') #Getting latitude and longitude from an address
LAT1 = getInfo[0]['geometry']['location']['lat']
LONG1 = getInfo[0]['geometry']['location']['lng']
LAT2 = getInfo2[0]['geometry']['location']['lat']
LONG2 = getInfo2[0]['geometry']['location']['lng']
#print(LAT1, LONG1, 'AND', LAT2, LONG2)
A = str('(') + str(LAT1) + str(',') + str(LONG1) + str(')')
B = str('(') + str(LAT2) + str(',') + str(LONG2) + str(')') '''
lineCounterVariable = -1
with open(os.path.join(os.path.dirname(__file__), 'latLngInfo12012018.csv')) as lineCounter:
lineCounter = csv.reader(lineCounter)
for row in lineCounter:
lineCounterVariable += 1
print(lineCounterVariable)
indexRowDistances = 1 #A number consecutive for saving each line in distance process looking up
counterForDelay = 0
clientsDone = 0
conditional = 0
listDistances = []
with open(os.path.join(os.path.dirname(__file__), 'latLngInfo12012018.csv')) as X:
X = csv.reader(X)
for i, linei in enumerate(X):
with open(os.path.join(os.path.dirname(__file__), 'errorServerClientsBackUp12012018.csv')) as errorServerConditional:
errorServerConditional = csv.reader(errorServerConditional)
for l in errorServerConditional:
if l[0] == str(i):
conditional = 1
print('conditional', conditional)
if conditional == 0:
print('es i', i, linei)
with open(os.path.join(os.path.dirname(__file__), 'latLngInfo12012018.csv')) as Y:
Y = csv.reader(Y)
for j, linej in enumerate(Y):
print('es j:', j, linej)
if counterForDelay == 60: #Trying to avoid to be getting out of the server
time.sleep(30)
counterForDelay = 0
try:
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018')) as headersFile:
pass
except FileNotFoundError:
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018'), 'a') as headersFile:
headersFile = csv.writer(headersFile, delimiter = ',')
headersFile.writerow(['conditional', 'id_AddressNumber_A', 'name_A', 'id_AddressNumber_B', 'name_B', 'distanceAB'])
if i < j and i != 0 and j != 0:
lookingForDistance = urlopen('http://router.project-osrm.org/trip/v1/driving/'+linei[5]+','+linei[4]+';'+linej[5]+','+linej[4]+'?source=first&destination=last')
infoFromOSRM = json.load(lookingForDistance)
distanceAB = infoFromOSRM['trips'][0]['legs'][0]['distance']
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018_AB'), 'a') as distanceInfo_AB: #It saves distance clients from A to B
distanceInfo_AB = csv.writer(distanceInfo_AB, delimiter = ',')
distanceInfo_AB.writerow([indexRowDistances, str(linei[0]+'_'+linei[2]+linej[0]+'_'+linej[2]), str(linei[0]+'_'+linei[2]), linei[1], str(linej[0]+'_'+linej[2]), linej[1], str(distanceAB)])
#print(str(linei[0]+'_'+linei[2]+linej[0]+'_'+linej[2]), str(linei[0]+'_'+linei[2]), linei[1], str(linej[0]+'_'+linej[2]), linej[1], str(distanceAB))
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018_BA'), 'a') as distanceInfo_BA: #It moves the order of the clients as B to A and we asume that the distance between them are the same than from A to B
distanceInfo_BA = csv.writer(distanceInfo_BA, delimiter = ',')
distanceInfo_BA.writerow([indexRowDistances, str(linej[0]+'_'+linej[2]+linei[0]+'_'+linei[2]), str(linej[0]+'_'+linej[2]), linej[1], str(linei[0]+'_'+linei[2]), linei[1], str(distanceAB)])
#print(str(linej[0]+'_'+linej[2]+linei[0]+'_'+linei[2]), str(linej[0]+'_'+linej[2]), linej[1], str(linei[0]+'_'+linei[2]), linei[1], str(distanceAB))
print(distanceAB)
indexRowDistances += 1
clientsDone += 1
if clientsDone == (lineCounterVariable - i):
with open(os.path.join(os.path.dirname(__file__), 'errorServerClientsBackUp12012018.csv'), 'a') as errorServer:
errorServer = csv.writer(errorServer, delimiter = ',')
errorServer.writerow([i, linei[0], linei[1]], linei[2])
else:
pass
counterForDelay += 1
else:
pass
clientsDone = 0
conditional = 0
j = 0
with open(os.path.join(os.path.dirname(__file__), 'totalDistances_AB_BA_12012018.csv'), 'a') as consolidateDistances:
consolidateDistances = csv.writer(consolidateDistances, delimiter = ',')
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018_AB')) as distances_AB:
distances_AB = csv.reader(distances_AB)
for row in distances_AB:
consolidateDistances.writerow([row[0], row[1], row[2], row[3], row[4], row[5], row[6]])
with open(os.path.join(os.path.dirname(__file__), 'distanceInfo12012018_BA')) as distances_BA:
distances_BA = csv.reader(distances_BA)
for row in distances_BA:
consolidateDistances.writerow([row[0], row[1], row[2], row[3], row[4], row[5], row[6]])
| [
"lalfonsocanor@gmail.com"
] | lalfonsocanor@gmail.com |
5480e781b5a2d14825bed9572318338becf10151 | c3cac318adc945bc47f7a7a541306ac45a4f3c91 | /KAP3/maxmin_list.py | 6d1b788f7f8d5eef715952e401c2823ed5c2e69c | [] | no_license | jonmagnus/INF1900 | 96684391be4a72978068e3d87935425bf5c58430 | 05caece230fa4c74e3a89fda903df93456d0526c | refs/heads/master | 2020-04-02T11:04:40.158157 | 2018-11-09T10:53:00 | 2018-11-09T10:53:00 | 154,369,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from random import randint
def max_(a):
max_elem = a[0]
for elem in a[1:]:
if (max_elem < elem): max_elem = elem
return max_elem
def min_(a):
min_elem = a[0]
for elem in a[1:]:
if (min_elem > elem): min_elem = elem
return min_elem
a = [randint(-10,10) for i in range(10)]
print a
print 'The largest element is %d\nThe smallest element is %d' %(max_(a), min_(a))
| [
"jonmagnusr@gmail.com"
] | jonmagnusr@gmail.com |
f4223bac982ea9725777698c0b2089aa663912ac | 4a597819ab90544ef33b43b84dd6b1b0c8500baa | /07_SyntaxPro/hm_08_IterAndUniterable.py | e48d5a8d2773e09229cc13757ad49d46204a02b8 | [] | no_license | frankzhuzi/ithm_py_hmwrk | dff1b91e8f5d371684b3b337871356101bd2f139 | 1bb52b63b93c33cac81eacb8146769bb7b0cd31e | refs/heads/master | 2020-09-07T22:11:44.497289 | 2019-11-11T09:32:44 | 2019-11-11T09:32:44 | 220,927,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | def demo(num):
print("Value in the function")
num = 100
print(num)
print("Done...")
gl_num = 99
demo(gl_num)
print(gl_num) | [
"noreply@github.com"
] | frankzhuzi.noreply@github.com |
728812d091d1626b414cdbe9bc793cdbe2b508fe | b71817da629e0ba8858179def4b98e684ec75568 | /Python/2019-7-30-小甲鱼-7.py | c3fded6734334f4aa0bc1ac8b1b0ced29d9ce5f6 | [] | no_license | haole1683/Python_learning | b09a5d7c7aae5e735426c716071d80e4adf715d4 | 327b9ddec58159ebdea65b1078145bc6696b2705 | refs/heads/master | 2020-07-13T08:29:24.229540 | 2019-08-29T00:20:52 | 2019-08-29T00:20:52 | 205,045,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> (3 < 4) and (4 < 5)
True
>>> -3 ** 2
-9
>>> ##注意上面的运算符优先级
>>> ## **优先级高于 -优先级
>>>
>>> (-3) ** 2
9
>>> ##加上括号则优先级先算括号里的
>>> ##同理
>>> 3 ** -2
0.1111111111111111
>>> ##这里就是相当于 3 ** (-2)
>>>
>>> ##逻辑运算符
>>> ## and or not
>>> not True
False
>>> not False
True
>>> not 1
False
>>> not 0
True
>>> not 4
False
>>>
>>> ##综合优先级
>>> ##幂运算(**) > 正负号(+x/-x) > 算术操作符(*/ / / // + - ) >
>>> ## >比较运算符(> >=...) > 逻辑运算符 (and or not)
>>> ##此外逻辑运算符之中 not > and >or
| [
"noreply@github.com"
] | haole1683.noreply@github.com |
6f9b939f82691663c13f89083b151c6d02316bcf | 0833e4118476eb142b6f2fdc5da7b326c28216d2 | /proj1/api/http_server.py | 8eee92e9de93371db3a4ddd51866b380e103a3b1 | [
"Apache-2.0"
] | permissive | unazed/small-HTTP-server | ca232077639e7f52ba997f154d89148785ea3c7f | 023fe32ec0c3068ff9f156e1e66ee8d16e03a13b | refs/heads/master | 2021-05-25T21:11:26.741153 | 2020-04-14T18:01:54 | 2020-04-14T18:01:54 | 253,920,906 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,674 | py | from .socket_server import SocketServer
from sys import _getframe as _gf
from urllib.parse import urlparse, unquote
import threading
import socket
import os
HTTP_VERSION = "HTTP/1.1"
SUPPORTED_METHODS = ("GET", "POST")
SUPPORTED_HEADERS = ("host",)
ERROR_PAGES = {
400: "400.html",
404: "404.html",
405: "405.html",
505: "505.html"
}
error_page_cache = {}
def parse_cookies(dat):
# rfc non-compliant
if not dat: return {}
cookies = {}
for cookie in dat.split("; "):
key, val = cookie.split("=", 1)
cookies[key] = val
return cookies
def parse_post(dat):
# rfc non-compliant
if not dat: return {}
data = {}
for item in dat.split("&"):
key, val = item.split("=", 1)
data[key] = unquote(val)
return data
def parse_http_request(data):
if not data:
return
info = {
"method": None,
"request_uri": None,
"http_version": None,
"ignored": {},
**{hdr: None for hdr in SUPPORTED_HEADERS}
}
try:
data, content = data.split("\r\n\r\n", 1)
except ValueError:
return (False, (400, "invalid HTTP header"))
data = data.split("\r\n")
if not data:
return (False, "empty data")
try:
method, uri, version, *extra = data[0].split(" ", 2)
except ValueError:
return (False, (400, f"invalid request-line `{_}`"))
if method not in SUPPORTED_METHODS:
return (False, (405, f"unsupported method `{method}`"))
info["method"] = method
info["request_uri"] = urlparse(uri)
if version == "HTTP/1.1":
info["http_version"] = version
else:
return (False, (505, f"unsupported HTTP version {version!r}"))
info["http_version"] = version
for line in data[1:]:
header = (dat := line.split(":", 1))[0]
dat[1] = dat[1].lstrip()
if len(dat) != 2:
return (False, (400, f"invalid header-line on {dat[0]}"))
if header.lower() in SUPPORTED_HEADERS:
info[header.lower()] = dat[1]
else:
info["ignored"][dat[0]] = dat[1]
return (True, (info, content))
def construct_http_response(http_resp, data):
if 'Status-code' in http_resp['headers']:
http_resp['status_code'] = http_resp['headers']['Status-code']
del http_resp['headers']['Status-code']
if 'Reason-phrase' in http_resp['headers']:
http_resp['reason_phrase'] = http_resp['headers']['Reason-phrase']
del http_resp['headers']['Reason-phrase']
return f"{http_resp['http_version']} {http_resp['status_code']} {http_resp['reason_phrase']}\r\n" \
+ '\r\n'.join(f"{hdr.split('#', 1)[0]}: {line}" for hdr, line in http_resp['headers'].items()) \
+ f"\r\n\r\n{data}"
class HttpServer(SocketServer):
def __init__(self, root_dir, error_dir="", *args, **kwargs):
super().__init__(*args, **kwargs)
self.root_dir = root_dir
self.routes = {}
self.error_dir = error_dir
if not os.path.exists(root_dir):
self.logger.log(_gf(), f"root dir. {root_dir!r} doesn't exist", True)
elif not os.path.exists(error_dir):
self.logger.log(_gf(), f"error dir. {error_dir!r} doesn't exist", True)
error_dir = os.listdir(error_dir)
for page in ERROR_PAGES.values():
if page not in error_dir:
self.logger.log(_gf(), f"{page!r} doesn't exist in {self.error_dir!r}", True)
def add_route(self, path, handlers):
# handlers = {"method": lambda conn, headers, data: ...}
if not str(path).startswith("/"):
path = f"/{path}"
self.routes[path] = handlers
def _select_page(self, conn, addr, data):
global error_page_cache
success, data = data
http_resp = {
"http_version": HTTP_VERSION,
"status_code": None,
"reason_phrase": "General Error", # tbd
"headers": {
"server": "Unazed/1.0"
}
}
if not success:
code, error = data
_, page = self.routes[f"/{code}"]["GET"](conn, {}, "")
http_resp['status_code'] = code
http_resp['reason_phrase'] = error
return construct_http_response(http_resp, page)
info, content = data
method = info["method"]
if (path := "/" + info["request_uri"].path.split("/", 1)[1]) in self.routes:
if method not in self.routes[path]:
_, error_page = self.routes["/405"]["GET"](conn, {}, "")
http_resp['status_code'] = 405
return construct_http_response(http_resp, error_page)
headers, page = self.routes[path][method](conn, info, content)
http_resp['status_code'] = 200
http_resp['reason_phrase'] = 'OK'
http_resp['headers'].update(headers)
print(f"[{addr[0]}:{addr[1]}] [200] {method} {path!r}")
return construct_http_response(http_resp, page)
else:
_, error_page = self.routes["/404"]["GET"](conn, {}, "")
http_resp['status_code'] = 404
http_resp['reason_phrase'] = "Not found"
print(f"[{addr[0]}:{addr[1]}] [404] {method} {path!r}")
return construct_http_response(http_resp, error_page)
def handle_http_requests(self, *args, **kwargs):
def handle_http_requests(inst, idx, conn, addr, **kwargs):
conn.settimeout(1)
buff = ""
try:
while (dat := conn.recv(512).decode()):
buff += dat
except socket.timeout:
pass
if not buff:
conn.close()
return
conn.settimeout(None)
dat = parse_http_request(buff)
if not dat:
conn.shutdown(socket.SHUT_RDWR)
conn.close()
self.logger.log(_gf(), f"[{addr[0]}:{addr[1]}] received {buff[:10]!r}")
conn.send(self._select_page(conn, addr, dat).encode())
try:
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except OSError:
self.logger.log(_gf(), f"[{addr[0]}:{addr[1]}] abruptly closed")
super().handle_connections(
worker_thd=handle_http_requests,
proxy_worker_thd=lambda fn, *args_, **kwargs_: \
threading.Thread(target=fn, args=args_, kwargs=kwargs_).start(),
*args, **kwargs
)
| [
"leethax0r.coder@gmail.com"
] | leethax0r.coder@gmail.com |
7bd3e5c0e488bb3cd1a5ff231d0d875413fcfb7d | a7fd50490c8fed88cb6ae21f68eee325eed32184 | /wsgi.py | 6d09d011de12af8041a30792531f81275d698b58 | [] | no_license | winnate/Flask-Project | ef1a00614c6c25c4a6c119733e538af74ac9c1cc | 607d92e626a6c88f132ee4b56f461a384e05738b | refs/heads/master | 2020-06-09T08:34:47.947055 | 2016-12-09T15:52:30 | 2016-12-09T15:52:30 | 76,049,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | from helloWorld import app
if __name__ == "__main__":
app.run()
| [
"main@vps99610.vps.ovh.ca"
] | main@vps99610.vps.ovh.ca |
6dce8ab5aa0b8bd0c0ee86d7753accc09fc9c3a9 | 8fa8ded3772dd7a124c1bbb91fc109ed2b63574b | /mycelium/apps/groups/migrations/0024_auto__add_field_grouprule_account__add_field_group_account.py | 6efb4463b3442d5bbdba8fed2d4d0f47a61bb622 | [] | no_license | skoczen/mycelium | 3642b0f5e5ea03d609a3e499c7ad68092101dce0 | da0f169163f4dc93e2dc2b0d934abf4f18c18af0 | refs/heads/master | 2020-04-10T09:21:46.893254 | 2014-05-20T02:27:06 | 2014-05-20T02:27:06 | 2,114,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,761 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("accounts", "0004_create_old_data_account"),
)
def forwards(self, orm):
# Adding field 'GroupRule.account'
db.add_column('groups_grouprule', 'account', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['accounts.Account']), keep_default=False)
# Adding field 'Group.account'
db.add_column('groups_group', 'account', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['accounts.Account']), keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupRule.account'
db.delete_column('groups_grouprule', 'account_id')
# Deleting field 'Group.account'
db.delete_column('groups_group', 'account_id')
models = {
'accounts.account': {
'Meta': {'ordering': "('name',)", 'object_name': 'Account'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Plan']"}),
'subdomain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'accounts.plan': {
'Meta': {'ordering': "('name',)", 'object_name': 'Plan'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'groups.group': {
'Meta': {'ordering': "('name',)", 'object_name': 'Group'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'qi_simple_searchable_search_field': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rules_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'groups.grouprule': {
'Meta': {'ordering': "('group', 'id')", 'object_name': 'GroupRule'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'left_side': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.LeftSide']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.Operator']", 'null': 'True', 'blank': 'True'}),
'right_side_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.RightSideType']", 'null': 'True', 'blank': 'True'}),
'right_side_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rules.leftside': {
'Meta': {'ordering': "('order',)", 'object_name': 'LeftSide'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'add_closing_paren': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allowed_operators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rules.Operator']", 'symmetrical': 'False'}),
'allowed_right_side_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rules.RightSideType']", 'symmetrical': 'False'}),
'choices': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'query_string_partial': ('django.db.models.fields.TextField', [], {})
},
'rules.operator': {
'Meta': {'ordering': "('order',)", 'object_name': 'Operator'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'query_string_partial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'use_filter': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'rules.rightsidetype': {
'Meta': {'object_name': 'RightSideType'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['groups']
| [
"steven@quantumimagery.com"
] | steven@quantumimagery.com |
4476c578877edb97bf48979ff7959da92847869d | c508d75aa25e235a55a0602d93af6d2e12cc8571 | /PNGUpdate.py | 3c2f93d0e7b717207d80c395a97642a2499affde | [] | no_license | imnewbe/Python | 7870407e653cb5d377d63172afdf2aebb2300130 | 781e9c0366bdea821eb6e518f24ebfa657481d6b | refs/heads/master | 2021-09-13T20:41:25.913209 | 2018-05-04T03:19:53 | 2018-05-04T03:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | #encoding:utf-8
from struct import *
from zlib import *
import stat
import sys
import os
import argparse
class PNGproccess():
def __init__(self,filepath):
self.filepath=filepath
def getFile(self):
_dir=[]
pngs=[]
path=self.filepath
files= os.listdir(str(path))
for file in files:
filepath=os.path.join(path,file)
if file[-4:].lower()=='.png':
pngs.append(filepath)
return pngs
def setPNG(self,path):
pngheader = "\x89PNG\r\n\x1a\n"
file = open(path, "rb")
oldPNG = file.read()
file.close()
if oldPNG[:8] != pngheader:
return None
newPNG = oldPNG[:8]
chunkPos = len(newPNG)
# For each chunk in the PNG file
while chunkPos < len(oldPNG):
# Reading chunk
chunkLength = oldPNG[chunkPos:chunkPos+4]
chunkLength = unpack(">L", chunkLength)[0]
chunkType = oldPNG[chunkPos+4 : chunkPos+8]
chunkData = oldPNG[chunkPos+8:chunkPos+8+chunkLength]
chunkCRC = oldPNG[chunkPos+chunkLength+8:chunkPos+chunkLength+12]
chunkCRC = unpack(">L", chunkCRC)[0]
chunkPos += chunkLength + 12
# Parsing the header chunk
if chunkType == "IHDR":
width = unpack(">L", chunkData[0:4])[0]
height = unpack(">L", chunkData[4:8])[0]
# Parsing the image chunk
if chunkType == "IDAT":
try:
# Uncompressing the image chunk
bufSize = width * height * 4 + height
chunkData = decompress( chunkData, -8, bufSize)
except Exception, e:
# The PNG image is normalized
return None
# Swapping red & blue bytes for each pixel
newdata = ""
for y in xrange(height):
i = len(newdata)
newdata += chunkData[i]
for x in xrange(width):
i = len(newdata)
newdata += chunkData[i+2]
newdata += chunkData[i+1]
newdata += chunkData[i+0]
newdata += chunkData[i+3]
# Compressing the image chunk
chunkData = newdata
chunkData = compress( chunkData )
chunkLength = len( chunkData )
chunkCRC = crc32(chunkType)
chunkCRC = crc32(chunkData, chunkCRC)
chunkCRC = (chunkCRC + 0x100000000) % 0x100000000
# Removing CgBI chunk
if chunkType != "CgBI":
newPNG += pack(">L", chunkLength)
newPNG += chunkType
if chunkLength > 0:
newPNG += chunkData
newPNG += pack(">L", chunkCRC)
# Stopping the PNG file parsing
if chunkType == "IEND":
break
return newPNG
def lastsetPNG(self,path):
data = self.setPNG(path)
if data != None:
file = open(path, "wb")
file.write(data)
file.close()
return True
return data
def main(self):
pngs=self.getFile()
complicated=0
if len(pngs)==0:
sys.exit()
while 1 :
for ipng in xrange(len(pngs)):
perc= (float(ipng)/len(pngs))*100.0
print "%.2f%% %s" % (perc, pngs[ipng])
if self.lastsetPNG(pngs[ipng]):
complicated +=1
break
if __name__=="__main__":
parse= argparse.ArgumentParser()
parse.add_argument('-d','--dir',dest='Path',help='file path',action='store')
#args=['-d','C:\Users\haha\Desktop\\123\Payload\CpicJczc.app']
args=parse.parse_args()
if args.Path:
pn=PNGproccess(args.Path)
pn.main()
else:
print'''
Useage : input you file path
'''
sys.exit()
| [
"yckyos261@gmail.com"
] | yckyos261@gmail.com |
ce06337232aea0d020c597bf68c7e23c8dfbacb3 | 73333d3a7bb3be8ca6eb12a80aba7d4f0dbcff99 | /content/migrations/0001_initial.py | 818018818fbb62f6d7beebeb584e61ca14a40a84 | [] | no_license | kleine-alberta/yamdb_final | d251b0d86408cb38a2b7807fc0cc33817fe531bd | 2b0eb72c4ecafee5e066e5f452290b7524e9344d | refs/heads/master | 2023-03-11T08:12:25.001102 | 2021-02-27T23:26:34 | 2021-02-27T23:26:34 | 340,613,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | # Generated by Django 3.0.5 on 2020-11-14 15:45
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True, null=True, unique=True)),
],
),
migrations.CreateModel(
name='Genres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True, null=True, unique=True)),
],
),
migrations.CreateModel(
name='Titles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200)),
('year', models.IntegerField(null=True)),
('rating', models.PositiveSmallIntegerField(blank=True, null=True)),
('description', models.TextField(blank=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='category', to='content.Categories')),
('genre', models.ManyToManyField(blank=True, to='content.Genres')),
],
),
]
| [
"jemand@MacBook-Pro-Jemand.local"
] | jemand@MacBook-Pro-Jemand.local |
133eb8ff9bdd88c775a362eb91c937e712aea0bb | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/workloads/setup.py | 4eb445f63f094bb7c215e4c40b08e0266e1db3d9 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 1,704 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.0a1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='workloads',
version=VERSION,
description='Microsoft Azure Command-Line Tools Workloads Extension.',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/workloads',
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["tests"]),
package_data={'azext_workloads': ['azext_metadata.json']},
install_requires=DEPENDENCIES
)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
842ea8d847ca0d8514eb8efc7bf05841e0aa0d31 | e04d7dedd28e6ae77fdead98cc870e8969e4e7fc | /venv/bin/easy_install-3.6 | 0884f63c734475a699effa551be434a39db10d2c | [] | no_license | damodharn/Python_Week3 | eb9670707ffcf07feb2596431eb747ab90ea0c89 | 3a47a6f464fb066a00f7277de4ca9d9c37850da9 | refs/heads/master | 2020-06-29T21:53:42.311181 | 2019-08-13T13:29:17 | 2019-08-13T13:29:17 | 200,634,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | 6 | #!/home/admin1/PycharmProjects/week3/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"damodharn21@gmail.com"
] | damodharn21@gmail.com |
0b7dfd99cdf13d9ecafbc21d8fc4f34870cc081b | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /crawlers/urllib2s/urllib2_posts.py | e58f5ccf7fb07608478bd5d3e0cbb37eff0ded44 | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import urllib
import urllib2
url = 'http://www.douban.com/accounts/login'
values = {'form_email': '',
'form_password': ''}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
resp = urllib2.urlopen(req)
html = resp.read()
print(html) | [
"anderscui@gmail.com"
] | anderscui@gmail.com |
66259f17ed43af8cc07fab9f59f2c6e11087508a | e84f8bcf2ea91ac12f9850a6f487b8b6bff09235 | /pyfr/backends/cuda/types.py | 0cc8c1b194cd8f1297244c06bf5c39a0ec500c80 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | Aerojspark/PyFR | 2bdbbf8a1a0770dc6cf48100dc5f895eb8ab8110 | b59e67f3aa475f7e67953130a45f264f90e2bb92 | refs/heads/master | 2021-01-14T08:51:48.893378 | 2014-09-01T15:02:28 | 2014-09-01T15:02:28 | 24,884,060 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,726 | py | # -*- coding: utf-8 -*-
import collections
import itertools as it
import numpy as np
import pycuda.driver as cuda
import pyfr.backends.base as base
class CUDAMatrixBase(base.MatrixBase):
def onalloc(self, basedata, offset):
self.basedata = int(basedata)
self.data = self.basedata + offset
self.offset = offset
# Process any initial value
if self._initval is not None:
self._set(self._initval)
# Remove
del self._initval
def _get(self):
# Allocate an empty buffer
buf = np.empty(self.datashape, dtype=self.dtype)
# Copy
cuda.memcpy_dtoh(buf, self.data)
# Slice to give the expected I/O shape
return buf[...,:self.ioshape[-1]]
def _set(self, ary):
# Allocate a new buffer with suitable padding and assign
buf = np.zeros(self.datashape, dtype=self.dtype)
buf[...,:self.ioshape[-1]] = ary
# Copy
cuda.memcpy_htod(self.data, buf)
@property
def _as_parameter_(self):
return self.data
def __long__(self):
return self.data
class CUDAMatrix(CUDAMatrixBase, base.Matrix):
def __init__(self, backend, ioshape, initval, extent, tags):
super(CUDAMatrix, self).__init__(backend, backend.fpdtype, ioshape,
initval, extent, tags)
class CUDAMatrixRSlice(base.MatrixRSlice):
@property
def _as_parameter_(self):
return self.parent.basedata + self.offset
def __long__(self):
return self.parent.basedata + self.offset
class CUDAMatrixBank(base.MatrixBank):
def __long__(self):
return self._curr_mat.data
class CUDAConstMatrix(CUDAMatrixBase, base.ConstMatrix):
def __init__(self, backend, initval, extent, tags):
ioshape = initval.shape
super(CUDAConstMatrix, self).__init__(backend, backend.fpdtype,
ioshape, initval, extent, tags)
class CUDAView(base.View):
def __init__(self, backend, matmap, rcmap, stridemap, vshape, tags):
super(CUDAView, self).__init__(backend, matmap, rcmap, stridemap,
vshape, tags)
self.mapping = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.mapping, None, tags)
if self.nvcol > 1:
self.cstrides = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.cstrides, None, tags)
if self.nvrow > 1:
self.rstrides = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.rstrides, None, tags)
class CUDAMPIMatrix(CUDAMatrix, base.MPIMatrix):
def __init__(self, backend, ioshape, initval, extent, tags):
# Call the standard matrix constructor
super(CUDAMPIMatrix, self).__init__(backend, ioshape, initval, extent,
tags)
# Allocate a page-locked buffer on the host for MPI to send/recv from
self.hdata = cuda.pagelocked_empty((self.nrow, self.ncol),
self.dtype, 'C')
class CUDAMPIView(base.MPIView):
pass
class CUDAQueue(base.Queue):
def __init__(self, backend):
super(CUDAQueue, self).__init__(backend)
# Last kernel we executed
self._last = None
# CUDA stream and MPI request list
self._stream_comp = cuda.Stream()
self._stream_copy = cuda.Stream()
self._mpireqs = []
# Items waiting to be executed
self._items = collections.deque()
def __lshift__(self, items):
self._items.extend(items)
def __mod__(self, items):
self.run()
self << items
self.run()
def __nonzero__(self):
return bool(self._items)
def _exec_item(self, item, rtargs):
if item.ktype == 'compute':
item.run(self._stream_comp, self._stream_copy, *rtargs)
elif item.ktype == 'mpi':
item.run(self._mpireqs, *rtargs)
else:
raise ValueError('Non compute/MPI kernel in queue')
self._last = item
def _exec_next(self):
item, rtargs = self._items.popleft()
# If we are at a sequence point then wait for current items
if self._at_sequence_point(item):
self._wait()
# Execute the item
self._exec_item(item, rtargs)
def _exec_nowait(self):
while self._items and not self._at_sequence_point(self._items[0][0]):
self._exec_item(*self._items.popleft())
def _wait(self):
last = self._last
if last and last.ktype == 'compute':
self._stream_comp.synchronize()
self._stream_copy.synchronize()
elif last and last.ktype == 'mpi':
from mpi4py import MPI
MPI.Prequest.Waitall(self._mpireqs)
self._mpireqs = []
self._last = None
def _at_sequence_point(self, item):
return self._last and self._last.ktype != item.ktype
def run(self):
while self._items:
self._exec_next()
self._wait()
@staticmethod
def runall(queues):
# First run any items which will not result in an implicit wait
for q in queues:
q._exec_nowait()
# So long as there are items remaining in the queues
while any(queues):
# Execute a (potentially) blocking item from each queue
for q in it.ifilter(None, queues):
q._exec_next()
q._exec_nowait()
# Wait for all tasks to complete
for q in queues:
q._wait()
| [
"freddie@witherden.org"
] | freddie@witherden.org |
72495810a78ce69ddce07046d3fd8c3b966f054f | a65d2aea5fa26637ef6c246c292fc7c1f0336559 | /OMTB/addons/omtb/models/profile.py | 92953d4156add422dba299ce7e42b6a82fb7eff7 | [] | no_license | FrancisXavier99/Projects | d1b2163c18a918ddc80803df890a9ef1754228d1 | fc46148d8bbd89be84b7f788ccc212a692176f09 | refs/heads/main | 2023-06-01T15:54:54.072347 | 2021-06-16T14:04:36 | 2021-06-16T14:04:36 | 374,558,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,148 | py | import re, pdb
from datetime import datetime, date
from odoo import models, fields, api, _, tools
from odoo.exceptions import ValidationError, UserError
from dateutil.relativedelta import relativedelta
class User(models.Model):
_name = "user.details"
_description = "About Movies Details"
_rec_name = "user_name"
user_no = fields.Char(string="User NO", required=True, track_visibility="always", default=lambda self: _('New'))
user_name = fields.Char(string="User Name", required=True, track_visibility="always")
user_age = fields.Integer(string="User Age", required=True, track_visibility="always")
user_dob = fields.Date(string="User DOB")
user_gender = fields.Selection([('male', 'Male'), ('female', 'Female'), ('others', 'Others')], default='male')
user_mobile_no = fields.Char(string='Mobile No')
user_email_id = fields.Char(string='Email Id')
user_country_id = fields.Many2one('res.country', string="Country")
user_state_id = fields.Many2one('res.country.state', string="State")
@api.model
def create(self, vals):
if vals.get('user_no', _('New')) == _('New') and self.user_dob == 0:
vals['user_no'] = self.env['ir.sequence'].next_by_code('omtb.user.details') or _('New')
result = super(User, self).create(vals)
return result
# -----------------Age validitation------------
@api.constrains('user_age')
def age_constrains(self):
for rec in self:
if self.user_age <= 18:
raise ValidationError(_("Your age must above 18 to book tickets.."))
# ------------------Email validiaton------------
@api.onchange('user_email_id')
def validate_mail(self):
if self.user_email_id and not tools.single_email_re.match(self.user_email_id):
raise ValidationError("Email is not valid")
@api.constrains('user_mobile_no')
def validate_mobile(self):
mo = re.compile("^[6-9]")
for rec in self:
if not mo.match(rec.user_mobile_no) and len(str(rec.user_mobile_no)) != 10:
raise ValidationError(_("Invalid Mobile Number"))
# elif :
# raise ValidationError(("Invalid Mobile Number"))
return True
class Manager(models.Model):
_name = "manager.details"
_rec_name = "manager_name"
manager_no = fields.Char(string="Manager NO", required=True, track_visibility="always",
default=lambda self: _('New'))
manager_name = fields.Char("Name")
manager_gender = fields.Selection([('male', 'Male'), ('female', 'Female'), ('others', 'Others')], default="male")
manager_dob = fields.Date("DOB")
is_dob = fields.Selection([('age', 'Age'), ('dob', 'DOB')], string="Age or DOB?", default='age', required=True)
manager_age = fields.Integer("Age")
manager_age1 = fields.Char(string="Calculated Age", compute="_calculate_age", store=True)
manager_email_id = fields.Char("Email_ID")
manager_phone_no = fields.Char("Phone Number")
manager_door_no = fields.Char("Door No")
manager_street_name = fields.Char("Street Name")
manager_district = fields.Char("District Name")
manager_country_id = fields.Many2one('res.country', string="Country")
manager_state_id = fields.Many2one('res.country.state', string="State")
start_date = fields.Date("Start Date")
end_date = fields.Date("End Date")
experience = fields.Char("Experience", compute='calculate_experience', store=True)
@api.depends('start_date', 'end_date')
def calculate_experience(self):
for rec in self:
if not rec.start_date:
continue
date_from = datetime.strptime(str(rec.start_date), "%Y-%m-%d").date()
date_to = False
if rec.end_date:
date_to = datetime.strptime(str(rec.end_date), "%Y-%m-%d").date()
difference = relativedelta(date_to if date_to else datetime.today().date(), date_from)
years = difference.years
months = difference.months
days = difference.days
print(years, months, days)
if (years, months, days != 0):
days = '%d days' % (days) if days > 1 else ('%d day' % (days) if days == 1 else '')
months = '%d months' % (months) if months > 1 else ('%d month' % (months) if months == 1 else '')
years = '%d years' % (years) if years > 1 else ('%d year' % (years) if years == 1 else '')
rec.experience = (' ' + years if years else '') + (' ' + months if months else '') + days
# ***************************** Create a orm default sequence*******************
@api.model
def create(self, vals):
if vals.get('manager_no', _('New')) == _('New') and self.manager_dob == 0:
vals['manager_no'] = self.env['ir.sequence'].next_by_code('omtb.manager.details') or _('New')
result = super(Manager, self).create(vals)
return result
# *******************Manager Date of birth Validate***************************
@api.onchange('manager_dob')
def age_onchange(self):
for rec in self:
if rec.manager_dob and self.manager_dob > fields.date.today():
raise ValidationError(_("Please give Valid Date of Birth"))
# ------------------Date of birth validiation-------------------
@api.depends('manager_dob')
def _calculate_age(self):
print("depends called")
self.manager_age1 = False
for rec in self:
if rec.manager_dob:
rec.manager_age1 = datetime.now().year - rec.manager_dob.year
class BloodGroup(models.Model):
_name = 'blood.details'
_rec_name = 'blood_group'
blood_group = fields.Char("Blood Group")
class RecordRules(models.Model):
_inherit = "res.users"
manager = fields.Many2one("manager.details", "Manager Role")
user = fields.Many2one("user.details", "User Role")
| [
"noreply@github.com"
] | FrancisXavier99.noreply@github.com |
f30fbc263e43824efc25dcf9663d9fb6a9abb5c8 | e268dd698fa01c872c0a26771fd26958fd202349 | /app/cam.py | e9eb0fa175f667117400b7e259790fba688b1040 | [] | no_license | paddy667/farmCam | 93134acf20a23f7688a1209d65bfa3abfd952d26 | e7a16f9efde8b07e5da20996f1c487cb8439a694 | refs/heads/master | 2016-09-03T07:33:36.153380 | 2014-04-12T11:46:47 | 2014-04-12T11:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | #!/usr/bin/env python
from Camera import Camera
def RunCam(ws):
t = Camera(ws)
t.run() | [
"paddym1991@googlemail.com"
] | paddym1991@googlemail.com |
91f594c0d623009fa6d5f267254ce89dd81b5e16 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02846/s097636759.py | 3a28136572804ebc45464a64aaca2efeebe9c309 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | import sys
sys.setrecursionlimit(10**9)
INF=10**18
def input():
return sys.stdin.readline().rstrip()
def main():
def nibutan(ok,ng):
while abs(ok-ng) > 1:
mid = (ok + ng) // 2
if solve(mid,2):
ok = mid
else:
ng = mid
return ok
def solve(mid,n):
dif=(d_0+d_1)*(mid-1)
c=0
if dif*(dif+d_0) == 0:
c+=1
elif dif*(dif+d_0) < 0:
c+=1
if (dif+d_0)*(dif+d_0+d_1) < 0:
c+=1
if c==n:
return True
else:
return False
T=list(map(int,input().split()))
A=list(map(int,input().split()))
B=list(map(int,input().split()))
d_0=T[0]*(A[0]-B[0])
d_1=T[1]*(A[1]-B[1])
if d_0==-d_1:
print('infinity')
elif d_0*(d_0+d_1)<0:
if (d_0*2+d_1)*(d_0*2+d_1*2)<0:
n=nibutan(2,10**40)
ans=n*2-1
ans+=solve(n+1,1)
print(ans)
else:
print(1)
else:
print(0)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3dd5461c1eb9bb0714bb5ea7ec1ea8ac8ca09243 | 3285937cce9d3727e3db9514fd0b680fe643242b | /apps/courses/models.py | 037ce642884ac03206d252824d3fd82f30fe7b6d | [] | no_license | codescracker/online_edu | 6f7de116b5d9d8f37b3ef9a2cbe47b39cae14398 | 5f7dee346aa28181c6c17214ba7ecc18b3429cbc | refs/heads/master | 2021-05-16T01:40:54.917522 | 2017-12-04T18:59:56 | 2017-12-04T18:59:56 | 106,333,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,195 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from organizations.models import Organizition, Teacher
# Create your models here.
class Course(models.Model):
organization = models.ForeignKey(Organizition, blank=True, null= True, verbose_name= 'course related organization')
name = models.CharField(max_length=50, verbose_name=u"name of the course")
desc = models.CharField(max_length=200, verbose_name=u"description of the course")
detail = models.TextField(verbose_name=u"detail of the course")
degree = models.CharField(max_length=20,
choices=(("Easy", "easy level course"),
("Medium", "medium level course"),
("Hard", "hard level of course")),
verbose_name=u"difficulty level of degree")
learn_time = models.IntegerField(default=0, verbose_name=u"length of the course")
students = models.IntegerField(default=0, verbose_name="number of student")
like_nums = models.IntegerField(default=0, verbose_name="number of likes")
image = models.ImageField(upload_to="courses/%Y/%m", verbose_name=u"cover image", max_length= 100,
default='courses/%Y/%m/default.jpg')
click_nums = models.IntegerField(default=0, verbose_name=u"number of click")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"time of creation of course")
category = models.CharField(default='backend', max_length=50, verbose_name='category of the course')
tag = models.CharField(default='', max_length=50, verbose_name='tag of the course')
teacher = models.ForeignKey(Teacher, blank=True, null= True, verbose_name= 'course related teacher')
prequist = models.CharField(default='', max_length=50, verbose_name='preqrust for the course')
goal = models.CharField(default='', max_length=50, verbose_name='goal of the course')
is_banner = models.BooleanField(default=False, verbose_name='whether the course will be in banner')
class Meta:
verbose_name = u"courses"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
def get_lectures_num(self):
all_lectures_num = self.lecture_set.all().count()
return all_lectures_num
def get_user_course(self):
user_course = self.usercourse_set.all()[:5]
return user_course
def get_lectures(self):
return self.lecture_set.all()
class Lecture(models.Model):
course = models.ForeignKey(Course, verbose_name=u"course")
name = models.CharField(max_length=100, verbose_name=u"lecture name")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"time of lecture added")
class Meta:
verbose_name = u"lecutre"
verbose_name_plural = verbose_name
def __unicode__(self):
return "{} {}".format(self.course, self.name)
def get_videos(self):
return self.video_set.all()
class Video(models.Model):
lesson = models.ForeignKey(Lecture, verbose_name=u"lesson")
name = models.CharField(max_length= 100, verbose_name=u"video name")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"time of video added")
url = models.URLField(default='', verbose_name='video url')
video_length = models.IntegerField(default=0, verbose_name=u"length of the video")
class Meta:
verbose_name = u"video"
verbose_name_plural = verbose_name
def __unicode__(self):
return "{} {}".format(self.lesson, self.name)
class CourseRsource(models.Model):
course = models.ForeignKey(Course, verbose_name=u"course")
name = models.CharField(max_length=100, verbose_name=u"source name")
download = models.FileField(upload_to="course/resource/%Y/%m", max_length=100)
add_time = models.DateTimeField(default= datetime.now, verbose_name=u"time of source added")
class Meta:
verbose_name = u"course resource"
verbose_name_plural = verbose_name
def __unicode__(self):
return "{} {}".format(self.course, self.name)
| [
"shaowei@Shaoweis-MBP.fios-router.home"
] | shaowei@Shaoweis-MBP.fios-router.home |
c21da64e020196425d949a6cca7398d595768846 | 743b1adb04ce5f168d9314c0cf391f0727bb8a55 | /src/utils.py | 37e31567a893da09b353067193b912d127302f00 | [] | no_license | khavarilab/skin-gene-sigs | 1284c4df722f7249faaa7d324c73105b9cc65b5a | 09e580a70581a9d98ad6da1e7b925670ce04ff46 | refs/heads/master | 2020-05-14T11:54:19.213730 | 2019-09-20T18:54:04 | 2019-09-20T18:54:04 | 181,785,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,481 | py |
import os
import numpy as np
import pandas as pd
def make_matrix(
quant_dirs,
quant_main_dir,
out_dir,
quant_type="rsem"):
"""merge quant files
"""
if quant_type == "rsem":
quant_file = "Quant.genes.results"
for file_idx in range(len(quant_dirs)):
quant_dir = quant_dirs[file_idx]
full_path_file = "{}/{}/{}".format(quant_main_dir, quant_dir, quant_file)
col_header = "{}.{}".format(
quant_dir.split(".")[0].split("-")[-1],
quant_dir.split(".")[1].split("_")[0])
# read in with pandas
file_data = pd.read_csv(full_path_file, sep="\t")
# make a tpm file
tpms = file_data[["gene_id", "TPM"]]
tpms.columns = ["gene_id", col_header]
if file_idx == 0:
all_tpms = tpms.copy()
else:
all_tpms = all_tpms.merge(tpms, on="gene_id")
# make a counts file
counts = file_data[["gene_id", "expected_count"]]
counts.columns = ["gene_id", col_header]
if file_idx == 0:
all_counts = counts.copy()
else:
all_counts = all_counts.merge(counts, on="gene_id")
# move gene ids to index and save out
all_tpms["gene_id"] = all_tpms["gene_id"].str.split(".").str[0]
all_tpms = all_tpms.set_index("gene_id")
all_counts["gene_id"] = all_counts["gene_id"].str.split(".").str[0]
all_counts = all_counts.set_index("gene_id").astype(int)
# save out
tpm_file = "{}/tpms.mat.txt.gz".format(out_dir)
all_tpms.to_csv(tpm_file, sep="\t", compression="gzip")
counts_file = "{}/counts.mat.txt.gz".format(out_dir)
all_counts.to_csv(counts_file, sep="\t", compression="gzip")
return tpm_file, counts_file
def filter_by_ids(matrix_file, filter_list_file):
"""filter file if rowname is filter list file
"""
# load files
keep_ids = pd.read_csv(filter_list_file, header=None).iloc[:,0].values
data = pd.read_csv(matrix_file, index_col=0, sep="\t")
# filter
data = data[data.index.isin(keep_ids)]
# save out
pc_file = "{}.pc.mat.txt.gz".format(matrix_file.split(".mat")[0])
data.to_csv(pc_file, sep="\t")
return pc_file
def filter_for_expressed(mat_pc_files, threshold=1):
"""vis in R (to confirm empirical cutoff) and cut here
threshold is 1 in the log2(TPM) space.
"""
# plot to confirm
plot_file = "{}.log2.expr_distr.pdf".format(mat_pc_files[0].split(".mat")[0])
plot_cmd = "plot.gene_expr_distr.R {} {}".format(mat_pc_files[0], plot_file)
print plot_cmd
os.system(plot_cmd)
# load data
tpm_data = pd.read_csv(mat_pc_files[0], sep="\t", index_col=0)
count_data = pd.read_csv(mat_pc_files[1], sep="\t", index_col=0)
# filter
tpm_data_log2 = np.log2(tpm_data)
tpm_data_filt = tpm_data[np.max(tpm_data_log2.values, axis=1) >= threshold]
count_data_filt = count_data[np.max(tpm_data_log2.values, axis=1) >= threshold]
# save out
tpm_filt_file = "{}.expr_filt.mat.txt.gz".format(mat_pc_files[0].split(".mat")[0])
tpm_data_filt.to_csv(tpm_filt_file, sep="\t")
count_filt_file = "{}.expr_filt.mat.txt.gz".format(mat_pc_files[1].split(".mat")[0])
count_data_filt.to_csv(count_filt_file, sep="\t")
return tpm_filt_file, count_filt_file
def filter_scRNA_for_expressed(sc_vals_file, threshold=0.05):
"""vis in R (to confirm empirical cutoff) and cut here
"""
# plot to confirm
plot_file = "{}.log2.expr_distr.pdf".format(sc_vals_file.split(".mat")[0])
plot_cmd = "plot.gene_expr_distr.singlecell.R {} {}".format(sc_vals_file, plot_file)
print plot_cmd
os.system(plot_cmd)
return None
def run_sequential_deseq2(counts_file, out_prefix, fdr_cutoff=0.20):
"""go to R
"""
diff_cmd = "deseq2.sequential.R {} {} {}".format(
counts_file,
fdr_cutoff,
out_prefix)
print diff_cmd
os.system(diff_cmd)
return
def make_gene_signature_file(
deseq_results_file,
gene_mappings_file,
out_file,
background_file=None,
filt_file=None,
sort_ascending=True):
"""take deseq2 results file and condense to relevant info
"""
# pull in results, filter if needed
deseq2_results = pd.read_csv(deseq_results_file, sep="\t")
if filt_file is not None:
filt_list = pd.read_csv(filt_file, header=None).iloc[:,0].values
deseq2_results = deseq2_results[deseq2_results.index.isin(filt_list)]
deseq2_results["ensembl_gene_id"] = deseq2_results.index.values
# pull in mapping ids
mappings = pd.read_csv(gene_mappings_file, sep="\t")
results = deseq2_results.merge(mappings, on="ensembl_gene_id", how="left")
# keep only needed columns
results = results[[
"ensembl_gene_id",
"hgnc_symbol",
"log2FoldChange",
"lfcSE",
"padj"]]
# reduce duplicates
results = results.drop_duplicates()
# sort by logFC
results = results.sort_values("log2FoldChange", ascending=sort_ascending)
# save out
results.to_csv(out_file, sep="\t", compression="gzip", index=False)
# check each signature with gprofiler
if background_file is not None:
gprofiler_cmd = "run_gprofiler.R {} {} {}".format(
out_file, background_file, os.path.dirname(out_file))
print gprofiler_cmd
os.system(gprofiler_cmd)
return
| [
"dskim89@gmail.com"
] | dskim89@gmail.com |
40c5123d3a35072e677f5943c249111d5c5e8ce8 | 7c36183253f76d9c3b551d44faebd3f98215f660 | /TAHiring/urls.py | b93eb17e5739f938f130db0bcd7469fcf329c96c | [] | no_license | smcarington/utilities | 1bc59b5a9a1d1a89665199f019a21227b05fc56b | 0a029d69eafaeaab9941ee8298a4835dd2acc785 | refs/heads/master | 2021-01-20T07:57:35.392521 | 2017-05-11T15:40:03 | 2017-05-11T15:40:03 | 90,074,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py |
from django.conf.urls import url, include, patterns
from django.contrib import admin
from . import views
urlpatterns = [
# TA Application Form
url(r'^ta_application/personal$',
views.application_form_personal,
name='application_form_personal'),
url(r'^ta_application/courses$',
views.application_form_courses,
name='application_form_courses'),
url(r'^ta_application/availability/(?P<term>\w+)$',
views.application_form_availability,
name='application_form_availability'),
url(r'^ta_application/complete$',
views.application_complete,
name='application_complete'),
# Admin Reviewing TAs
url(r'^ta_application/review/applicants$',
views.review_applicants,
name='review_applicants'),
url(r'^ta_application/review/(?P<tapk>\d+)$',
views.review_applicants,
name='review_applicants'),
# Review courses by schedule and term
url(r'^ta_application/review/course/schedule/(?P<course_pk>\d+)$',
views.review_course_schedule,
name='review_course_schedule'),
url(r'^ta_application/review/course/schedule/(?P<course_pk>\d+)/(?P<term>\w{1})$',
views.review_course_schedule,
name='review_course_schedule'),
# Review courses by table and term
url(r'^ta_application/review/course/table/(?P<course_pk>\d+)$',
views.review_course_table,
name='review_course_table'),
# AJAX url for (un)assigning a ta. Submit to by POST
url(r'^ta_application/assign_ta/$',
views.assign_ta_to_tutorial,
name='assign_ta'),
# Offers
url(r'^ta_application/confirm_offer/(?P<uidb64>[0-9A-Za-z\-]+)/(?P<token>[0-9A-za-z]+)$',
views.confirm_offer,
name='confirm_offer'),
]
| [
"kreizhn@gmail.com"
] | kreizhn@gmail.com |
36a3086b66a4b2ade18dafd6d8258ef29a3e96fe | 22baacd8f20ed28eabf3dc1c0737502de3d8e794 | /server/db/client_dal.py | 3fda4340f55ca12389d7a4ebf081eeff12320b68 | [] | no_license | lgadi/diag | 48398be132c2a0b62fa7673ec9ebcc5afba46d5c | 9b4c1e1baa41eb3e4571f91eca991b36d7ba8213 | refs/heads/master | 2021-01-19T00:51:25.440582 | 2017-04-23T17:31:15 | 2017-04-23T17:31:15 | 87,213,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import logging
from .db_access import DbAccess
class ClientDal:
def __init__(self):
self.db_access = DbAccess()
def save_client(self, client_id, date):
self.db_access.query_and_commit("INSERT INTO customers (account, lastSeen) "
"VALUES (%s, %s) ON DUPLICATE KEY UPDATE lastSeen = %s",
(client_id, date, date)) | [
"yonlif@gmail.com"
] | yonlif@gmail.com |
1c1cd50bb8c1057e0c75c3bb4ac82104e50caeb2 | d0969c8765d00090651ea7d33e13e8976f554b7f | /mergesort.py | f0840b665cf38f546384899951c318824d3193e3 | [] | no_license | Gurinder-Kaur/Trees | ff130c358d18adcfbf6c172f315ad21274e7df99 | 4944123b28f518813fdf2395f696dd4259bb20b7 | refs/heads/master | 2020-04-24T12:46:05.876038 | 2019-02-22T00:40:33 | 2019-02-22T00:40:33 | 171,965,918 | 0 | 0 | null | 2019-02-22T00:39:04 | 2019-02-22T00:19:56 | Python | UTF-8 | Python | false | false | 664 | py | def mergesort(arr,first,last):
if(first<last):
mid=(first+last)/2
mergesort(arr,first,mid)
mergesort(arr,mid+1,last)
merge(arr,first,mid,last)
def merge(arr,first,mid,last):
n1=mid-first+1
n2=last-mid
for i in range(1,n1+1):
arr1[i]=arr[first+i-1]
for j in range(1,n2+1):
arr2[j]=arr[mid+j]
arr1[n1+1]=1000
arr2[n2+1]=1000
i=j=1
for k in range(first,last+1):
if(arr1[i]<arr2[i]):
arr[k]=arr1[i]
i=i+1
else:
arr[k]=arr2[j]
j=j+1
list=[3,4,1,6,9,14,20,21,25,21]
l=len(list)
start=0
mergesort(list,start,l-1)
print(list)
| [
"noreply@github.com"
] | Gurinder-Kaur.noreply@github.com |
5f43276c56036f6fb66ed1d8b271c4f884b8a619 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/display_board_20201108143615.py | 98a8605857b33b641a94ead1694ab306c856f3eb | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,475 | py | from config import *
import pygame as pg
class Display_board():
def __init__(self, screen):
self.screen = screen
self.font_num = pg.font.SysFont("comicsans", NUMBER_SIZE)
self.font_cdt = pg.font.SysFont("comicsans", CANDIDATE_SIZE)
def draw_val(self, val, x, y):
text1 = self.font_num.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))
def draw_cdt(self, val, x, y):
text1 = self.font_cdt.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))
def on_mouse_press(self, x, y, symbol, modifier):
pass
def numbers(self, n):
# Generator that yields odd numbers twice
for i in range(n):
yield i
if i % 2 == 1:
yield i
def draw(self, grid, cell):
for i in range (9):
for j in range (9):
if grid[i][j] != 0:
if type(grid[i][j]) != int:
self.draw_candidates(grid[i][j], cell)
else:
print(cell[0])
text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)
self.screen.blit(text1, (cell[0] + 15, cell[1] + 14))
indent = []
for i in self.numbers(7):
indent.append(i)
# TLX = TOP_LX - (indent[9] * BOX_INDENT) / 2
# TLY = TOP_LY - (indent[9] * BOX_INDENT) / 2
# TRX = TOP_RX + (indent[9] * BOX_INDENT) / 2
# TRY = TOP_RY - (indent[9] * BOX_INDENT) / 2
# BLX = BOT_LX - (indent[9] * BOX_INDENT) / 2
# BLY = BOT_LY - (indent[9] * BOX_INDENT) / 2
for i in range(NUM_LINES):
#print("\nline: ---", i, "---", "indent: ", indent[i])
if i % 3 == 0:
thick = THICK_LINE
else:
thick = THIN_LINE
# print("(TOP_LX, TOP_LY + ", i, " * ", BLOCK_SIZE, " + ", i, " * ", BOX_INDENT, ")")
# print("(TOP_RX + ", indent[9], " * ", BOX_INDENT, ", TOP_LY + ", i, "*", BLOCK_SIZE, "+", i, "*", BOX_INDENT, "), ", thick)
pg.draw.line(self.screen, BLACK, (TOP_LX,
TOP_LY + i * BLOCK_SIZE + indent[i] * BOX_INDENT),
(TOP_RX + (indent[9] * BOX_INDENT),
TOP_RY + i * BLOCK_SIZE + indent[i] * BOX_INDENT), thick)
pg.draw.line(self.screen, BLACK, (TOP_LX + i * BLOCK_SIZE + indent[i] * BOX_INDENT,
TOP_LY),
(BOT_LX + i * BLOCK_SIZE + indent[i] * BOX_INDENT,
BOT_LY + (indent[9] * BOX_INDENT)), thick)
# 3.5 * -- 0 -- 1 -- 1 -- 2 -- 3 -- 3 -- 4 -- 5 -- 5 -- 6
# 0 --- 45 * 1 + 3.5 --- 45 * 2 + 3.5 --- 45 * 3 + 7 --- 45 * 4 + 10.5 --- 45 * 5 + 10.5 --- 45 * 6 + 14 --- 45 * 7 + 17.5 --- 45 * 8 + 17.5 --- 45 * 9 + 21
# Horizontal
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY), (TOP_RX + 6 * 3.5, TOP_RY), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 1 * 45 + 1 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 1 * 45 + 1 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 2 * 45 + 1 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 2 * 45 + 1 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 3 * 45 + 2 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 3 * 45 + 2 * 3.5), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 4 * 45 + 3 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 4 * 45 + 3 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 5 * 45 + 3 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 5 * 45 + 3 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 6 * 45 + 4 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 6 * 45 + 4 * 3.5), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 7 * 45 + 5 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 7 * 45 + 5 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 8 * 45 + 5 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 8 * 45 + 5 * 3.5), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY + 9 * 45 + 6 * 3.5), (TOP_RX + 6 * 3.5, TOP_RY + 9 * 45 + 6 * 3.5), 7)
# Vertical
# pg.draw.line(self.screen, BLACK, (TOP_LX, TOP_LY), (BOT_LX, BOT_LY + 21), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 48.5, TOP_LY), (BOT_LX + 48.5, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 48.5 + 45, TOP_LY), (BOT_LX + 48.5 + 45, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45, TOP_LY), (BOT_LX + 52 + 45 + 45, BOT_LY + 21), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 48.5, TOP_LY), (BOT_LX + 52 + 45 + 45 + 48.5, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 48.5 + 45, TOP_LY), (BOT_LX + 52 + 45 + 45 + 48.5 + 45, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 52 + 45 + 45, TOP_LY), (BOT_LX + 52 + 45 + 45 + 52 + 45 + 45, BOT_LY + 21), 7)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 52 + 45 + 45 + 48.5, TOP_LY), (BOT_LX + 52 + 45 + 45 + 52 + 45 + 45 + 48.5, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 52 + 45 + 45 + 48.5 + 45, TOP_LY), (BOT_LX + 52 + 45 + 45 + 52 + 45 + 45 + 48.5 + 45, BOT_LY + 21), 1)
# pg.draw.line(self.screen, BLACK, (TOP_LX + 52 + 45 + 45 + 52 + 45 + 45 + 52 + 45 + 45, TOP_LY), (BOT_LX + 52 + 45 + 45 + 52 + 45 + 45 + 52 + 45 + 45, BOT_LY + 21), 7)
# For candidate placement
if i % 3 == 0:
pg.draw.line(self.screen, BLACK, (cell[0],
cell[1] + i * (cell[2] / 9)),
((cell[0] + cell[2]),
cell[1] + i * (cell[2] / 9)), 1)
pg.draw.line(self.screen, BLACK, (cell[0] + i * (cell[3] / 9),
cell[1]),
(cell[0] + i * (cell[3] / 9),
cell[1] + cell[3]), 1)
def draw_candidates(self, grid, cell):
new_line = 1
iteration = 1
indent = 15
for number in grid:
if iteration % 3 == 1: # Checking if first in line: 1, 4, 7
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + 3, cell[1] + ((new_line - 1) * indent) + 2))
else:
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + ((iteration - 1) * indent) + 3, cell[1] + ((new_line - 1) * indent) + 2))
if iteration % 3 == 0: # checking if last in line: 3, 6
new_line += 1
iteration = 0
iteration += 1
def update(self, grid, row, col, blk):
font_val = pg.font.SysFont("comicsans", BOLD)
if row != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))
if col != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))
if blk != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))
def find_cell(self, x, y):
# Only applies glow when a cell is selected
if x == -1 and y == -1:
return
width = BLOCK_SIZE
height = BLOCK_SIZE
block_x = block_y = 1
# Adjustment in size if bordering a thick line
# print("cell: ", x, y)
# if x % 3 == 0: # If thick line on the left
# print("column 1, 4 or 7")
# else:
# start_pos_x = TOP_LX + x * BLOCK_SIZE + block * 3.5
temp_x = x
for i in range(3):
if temp_x - 3 >= 0:
block_x += 2
temp_x += -3
i += 1
# print("block x: ", block_x)
# print("extra indent x:", block_x * 3.5)
start_pos_x = TOP_LX + x * BLOCK_SIZE + block_x * 3.5
# if (x + 1) % 3 == 0: # If thick line on the right
# print("column 3, 6 or 9")
# if y % 3 == 0: # If thick line on the top
# print("row 1, 4 or 7")
# else:
# start_pos_y = TOP_LY + y * BLOCK_SIZE# + 1
# if (y + 1) % 3 == 0: # If thick line on the bottom
# print("row 3, 6 or 9")
temp_y = y
for i in range(3):
if temp_y - 3 >= 0:
block_y += 2
temp_y += -3
i += 1
# print("block y: ", block_y)
# print("extra indent y:", block_x * 3.5)
start_pos_y = TOP_LY + y * BLOCK_SIZE + block_y * 3.5
return (start_pos_x, start_pos_y, width, height)
def blink(self, alpha, a_change):
if a_change:
alpha += BLINK_SPEED
if alpha >= 175:
a_change = False
elif a_change == False:
alpha += -BLINK_SPEED
if alpha <= 30:
a_change = True
return (alpha, a_change)
| [
"jle040@uit.no"
] | jle040@uit.no |
880bc884f5809541b1638d7845dfcb552cbdb3ea | cebe592b4fa6a1946f907d460e3ee48fbacbe9eb | /news/views.py | a1504b03cecbf6822fb25e5ec851bc463a43e74b | [] | no_license | GrigorashPasha/Django_News | 8c507074d32ea2eb5ec1a6e23771c0baef07e871 | 7c2e0f2c24cbf2edb3bf0f3870fe04264dd97267 | refs/heads/master | 2023-08-16T04:07:35.510259 | 2021-10-11T13:55:37 | 2021-10-11T13:55:37 | 415,576,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | from django.shortcuts import render, get_object_or_404
from .models import News, Category
def index(request):
news = News.objects.all()
context = {'news': news,
'title': 'News List',
}
return render(request, 'news/index.html', context=context)
def get_category(request, category_id):
news = News.objects.filter(category_id=category_id)
category = Category.objects.get(pk=category_id)
context = {'news': news,
'category': category
}
return render(request, 'news/category.html', context=context)
def view_news(request, news_id):
# news_item = News.objects.get(pk=news_id)
news_item = get_object_or_404(News, pk=news_id)
return render(request, 'news/view_news.html', {'news_item': news_item})
| [
"GrigorashPasha@gmail.com"
] | GrigorashPasha@gmail.com |
70001b56d298f5befbbcdf00e94f61e060b46a96 | 21b0483666d8e5cbdc4a911bda93e1a3392c40ec | /lib/initialConditions.py | 1f3e0b6022939d9fa360915afc8482217719f223 | [] | no_license | Christopher-Bradshaw/fluids_final | 0541111323c640b40ee86f970acb896689bbb867 | 2e33b2ef04fdbd40760c1804a02c86c93c5fd926 | refs/heads/master | 2021-08-24T03:13:43.132099 | 2017-12-07T20:33:31 | 2017-12-07T20:33:31 | 113,245,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,193 | py | import numpy as np
def getPressure(energy, volume, gamma):
return energy * (gamma - 1) / volume
def getFlatConfig():
dx = 1
width = 5
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width)
gaps["energy"] = np.ones(width)
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getVelocityConfig():
config = getFlatConfig()
config["grid"]["velocity"][1:-1] += 0.01
return config
def getShockTubeConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
# initialRho[3] = 1.1
initialRho[:50] = 1.1
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getExpansionConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
# initialRho[3] = 1.1
initialRho[50:] = 0.1
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getSedovConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["energy"][0] *= 2
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
| [
"christopher.peter.bradshaw+github@gmail.com"
] | christopher.peter.bradshaw+github@gmail.com |
9dba178ee6999679708b4581bea8311fc67bbb5d | 3406050d4598df16ad845fc0dd15466b1bbdcd6a | /hw2.py | 30a977646f2534f4221b309158b852bf74f1d4cc | [
"MIT"
] | permissive | ranayukirin/ImageProce_hw2 | e895bee84a0f9d196f7c90804dcf1b36d0ffbc23 | 9f1bbd98d62bb4d4af035de786c15bed6ee7a892 | refs/heads/master | 2020-04-05T13:15:36.538340 | 2018-11-25T10:29:30 | 2018-11-25T10:29:30 | 156,894,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,935 | py | import numpy as np
import matplotlib.image as mpimg # read image
import matplotlib.pyplot as plt # plot image
import tkinter as tk # GUI design
from PIL import Image # used for save image
from numpy.fft import fft2, ifft2, fftshift, ifftshift # fourier transform
# Set information of GUI
window = tk.Tk()
window.title('HW2')
window.geometry('300x450')
# Produce ideal high-pass filter
def ideal_high(shape, cutoff):
h = np.ones(shape=shape)
midx = np.floor(shape[0] / 2)
midy = np.floor(shape[1] / 2)
for x in range(shape[0]):
for y in range(shape[1]):
if np.sqrt(np.square(x - midx) + np.square(y - midy)) < cutoff:
h[x, y] = 0
return h
# Produce Gaussian high-pass filter
def gau_high(shape, sigma, c):
h = np.empty(shape=shape)
midx = np.floor(shape[0] / 2)
midy = np.floor(shape[1] / 2)
for x in range(shape[0]):
for y in range(shape[1]):
h[x, y] = 1 - np.exp(-c * ((np.square(x - midx) + np.square(y - midy)) / (2 * sigma * sigma)))
return h
# Produce butterworth high-pass filter
def btw_high(shape, cutoff, order):
h = np.empty(shape=shape)
midx = np.floor(shape[0] / 2)
midy = np.floor(shape[1] / 2)
for x in range(shape[0]):
for y in range(shape[1]):
h[x, y] = 1 / (1 + np.power((cutoff / np.sqrt(np.square(x - midx) + np.square(y - midy))), 2 * order))
return h
# Deblur fish image with wiener filter
def wiener_fish():
im_fish = np.float32(mpimg.imread('1.jpg')) / 255
k = 0.05
sigma = 60
midx = np.floor(im_fish.shape[0] / 2)
midy = np.floor(im_fish.shape[1] / 2)
# Use Gaussian low-pass filter as degradation filter
h = np.empty(shape=(im_fish.shape[0], im_fish.shape[1]))
for x in range(h.shape[0]):
for y in range(h.shape[1]):
h[x, y] = np.exp((-(np.square(x - midx) + np.square(y - midy)) / (2 * sigma * sigma)))
# h = np.conj(h) / (np.square(np.abs(h)) + k)
w = (1 / h) * np.square(np.abs(h)) / (np.square(np.abs(h)) + k) # wiener filter
fish_res = np.empty(shape=im_fish.shape)
# Filtering in frequency domain for each layer of RGB
for i in range(3):
fi_shift = fftshift(fft2(im_fish[:, :, i]))
fish_filted = fi_shift * w
# fi_spec = np.log(np.abs(fi_shift))
# fi_spec = np.uint8(255 * (fi_spec - fi_spec.min()) / (fi_spec.max() - fi_spec.min()))
fish_back = np.real(ifft2(ifftshift(fish_filted)))
fish_back = np.uint8(255 * (fish_back - fish_back.min()) / (fish_back.max() - fish_back.min()))
fish_res[:, :, i] = fish_back
# Plot and save image
pltshow(im_fish, np.uint8(fish_res))
fish_save = Image.fromarray(np.uint8(fish_res), 'RGB')
fish_save.save('result/1_result(fish).jpg')
# Deblur 2.jpg(word) image with wiener filter
def wiener_word():
im_word = np.float32(mpimg.imread('2.jpg')) / 255
# Set some coefficient of motion filter and wiener filter
t = 1
a = 0.0005
b = -0.0005
k = 2
# Use motion filter as degradation filter
h = np.empty(shape=im_word.shape)
for x in range(im_word.shape[0]):
for y in range(im_word.shape[1]):
temp = (x * a + y * b)
if temp == 0:
temp = ((x + 1e-10) * (a + 1e-15) + (y + 1e-10) * (b + 1e-15))
h[x, y] = (t / (np.pi * temp)) * np.sin(np.pi * temp) * np.exp(-(0 + 1j) * np.pi * temp)
w = np.conj(h) / (np.square(np.abs(h)) + k) # wiener filter
# Filtering in frequency domain
ft_word = fft2(im_word)
wo_shift = fftshift(ft_word)
wo_filted = wo_shift * w
wo_back = np.real(ifft2(ifftshift(wo_filted)))
# Plot and save image
pltshow(im_word, wo_back)
wo_back = 255 * (wo_back - wo_back.min()) / (wo_back.max() - wo_back.min())
wo_save = Image.fromarray(np.uint8(wo_back))
wo_save.save('result/2_result(word).tif')
# Restore flower image with notch filter
def notch():
im_flower = np.uint8(mpimg.imread('4.png')*255)
# Doing fft and produce power spectrum
ft_flower = fft2(im_flower)
f_shift = fftshift(ft_flower)
f_spec = np.log(np.abs(f_shift))
f_spec = np.uint8(255 * (f_spec - f_spec.min()) / (f_spec.max() - f_spec.min()))
# By observing power spectrum of image and produce notch filter to block the period noise
idx = np.argwhere(f_spec > 200)
for i in range(len(idx)):
if idx[i, 0] < 400 or idx[i, 0] > 600:
for x in range(10, -10, -1):
for y in range(10, -10, -1):
if f_spec[idx[i, 0] - x, idx[i, 1] - y] > 140:
f_shift[idx[i, 0] - x, idx[i, 1] - y] = 0
# plt.imshow(np.uint8(f_spec), 'gray')
# plt.show()
# Back to time domain and plot/save image
f_shift_back = ifftshift(f_shift)
flower_back = np.abs(ifft2(f_shift_back))
flower_back = 255 * (flower_back - flower_back.min()) / (flower_back.max() - flower_back.min())
pltshow(im_flower, flower_back)
flower_save = Image.fromarray(np.uint8(flower_back))
flower_save.save('result/4_result(flower).tif')
# Restore flower image with band reject filter
def band_reject():
im_sea = np.uint8(mpimg.imread('3.png')*255)
# Doing fft and produce power spectrum
ft_sea = fft2(im_sea)
s_shift = fftshift(ft_sea)
s_spec = np.log(np.abs(s_shift))
s_spec = np.uint8(255 * (s_spec - s_spec.min()) / (s_spec.max() - s_spec.min()))
# plt.imshow(s_spec, cmap='gray')
# plt.show()
# By observing power spectrum of image and produce band reject filter to block the pattern noise
idx = np.argwhere(s_spec > 190)
for i in range(len(idx)):
if np.abs(idx[i, 0] - 512) + np.abs(idx[i, 1] - 512) > 100:
s_shift[idx[i, 0], idx[i, 1]] = 0
s_shift_back = ifftshift(s_shift)
sea_back = np.abs(ifft2(s_shift_back))
sea_back = 255 * (sea_back - sea_back.min()) / (sea_back.max() - sea_back.min())
# Plot and save image
pltshow(im_sea, sea_back)
sea_save = Image.fromarray(np.uint8(sea_back))
sea_save.save('result/3_result(sea).tif')
# Receive filter name then call the responding function to do homomorphic filtering
def homomor(filt_name, rh, rl):
im_street = np.float32(mpimg.imread('5.jpg')) / 255
ft_street = fft2(np.log(im_street + 0.01))
st_shift = fftshift(ft_street)
# Choose filter
if filt_name == 'ideal':
filt = ideal_high(im_street.shape, 20)
elif filt_name == 'gaussian':
filt = gau_high(im_street.shape, 120, 2)
elif filt_name == 'butterworth':
filt = btw_high(im_street.shape, 120, 2)
else:
raise ValueError('Wrong filter name!')
# Implement homomorphic filter on image then return it
filt = (rh - rl) * filt + rl
st_res = np.exp(np.real(ifft2(ifftshift(st_shift * filt)))) - 0.01
return im_street, st_res
# According which button that user click to input the filter name to homomor and do some process
def homo_choose(name):
im_ori, im_result = homomor(name, 1.2, 0.2)
pltshow(im_ori, im_result)
im_result = 255 * (im_result - im_result.min()) / (im_result.max() - im_result.min())
homo_save = Image.fromarray(np.uint8(im_result))
homo_save.save('result/5_result(' + str(name) + ').tif')
# Plot images with both origin and result image
def pltshow(im_ori, im_result):
if np.ndim(im_result) == 3:
plt.subplot(1, 2, 1), plt.imshow(im_ori), plt.title('Origin')
plt.subplot(1, 2, 2), plt.imshow(im_result), plt.title('Result')
else:
plt.subplot(1, 2, 1), plt.imshow(im_ori, 'gray'), plt.title('Origin')
plt.subplot(1, 2, 2), plt.imshow(im_result, 'gray'), plt.title('Result')
plt.show()
# Set buttons and labels of GUI
lb1 = tk.Label(window, text='A. Image Restoration ',
width=19, height=1).place(x=80, y=35)
btn1 = tk.Button(window, text='(a) wiener fish', width=15, height=2,
command=wiener_fish).place(x=30, y=70)
btn1_2 = tk.Button(window, text='(a) wiener word', width=15, height=2,
command=wiener_word).place(x=150, y=70)
btn2 = tk.Button(window, text='(b) flower', width=15, height=2,
command=notch).place(x=90, y=120)
btn3 = tk.Button(window, text='(c) sea', width=15, height=2,
command=band_reject).place(x=90, y=170)
lb2 = tk.Label(window, text='B. Homomorphic ',
width=15, height=1).place(x=90, y=235)
btn4 = tk.Button(window, text='(a) ideal', width=15, height=2,
command=lambda: homo_choose('ideal')).place(x=90, y=270)
btn5 = tk.Button(window, text='(b) butterworth', width=15, height=2,
command=lambda: homo_choose('butterworth')).place(x=90, y=320)
btn6 = tk.Button(window, text='(c) gaussian', width=15, height=2,
command=lambda: homo_choose('gaussian')).place(x=90, y=370)
# Start GUI
window.mainloop()
| [
"ranayukirin1991@gmail.com"
] | ranayukirin1991@gmail.com |
8eba694257fb2cd1c5750a2505491f7ba4288376 | 551de475a2d0933858afb8821a7688787b0d1233 | /pollster/polls/admin.py | 406f1db5771bdeda4fdcdcf7489080d2d67b8845 | [] | no_license | m-sulecki/POLLSTER_PROJECT | 5417c3b2b8fb95fc63f27304f1d2aef64abe0585 | 40101ff49951e30d49739a4901ca79a92c31525e | refs/heads/master | 2022-12-17T22:30:42.412188 | 2020-09-14T16:23:40 | 2020-09-14T16:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from django.contrib import admin
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['question_text']}),
('Date Information', {'fields': ['pub_date'], 'classes': ['collapse']}),]
inlines = [ChoiceInline]
# admin.site.register(Question)
# admin.site.register(Choice)
admin.site.register(Question, QuestionAdmin)
| [
"msulek59@hotmail.com"
] | msulek59@hotmail.com |
40b4fc7442a3dca396d30cd384a4df70fbca793d | a6d8465aed280c36fb7129e1fa762535bae19941 | /embroidery365/builder/migrations/0015_auto_20171107_1318.py | e8fb24e2d785b3b21a4799b1ab238de547240bcb | [] | no_license | rahuezo/365digitizing_and_embroidery | c61c53f567e73163a67d3fd568a20551a3681ccd | 41a22b6ff8bd83238219f2d34ce13b5a8ef9bb57 | refs/heads/master | 2020-09-02T11:59:07.702947 | 2017-11-11T02:40:01 | 2017-11-11T02:40:01 | 98,377,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('builder', '0014_order_extra_details'),
]
operations = [
migrations.AlterField(
model_name='order',
name='extra_details',
field=models.TextField(blank=True, default='No Specifications Included'),
),
]
| [
"rahuezo@ucdavis.edu"
] | rahuezo@ucdavis.edu |
2ef93f787a9d83908066ad2e141bcdc977dc2348 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma99.py | e33f306588051a905793954fdd141d45e8a365b0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=7
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma99.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
32174dd1625bad81be9d16899386c2ce47c90961 | f4288d0d199e97a9ab43612f1e70684e07d2b678 | /server.py | 9b98d3424d1c629ef778de6d59532aa18b74c782 | [] | no_license | ahmedlahrizi/rasp_temp | 480a791d8f4bcac9212df4b48b6519b816cbdd25 | 4fda739cf94dc092f55200add2df74668ce6c6bc | refs/heads/main | 2023-06-14T01:54:24.656690 | 2021-07-20T08:46:10 | 2021-07-20T08:46:10 | 387,139,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,047 | py | from flask import Flask, render_template_string
app = Flask(__name__)
@app.route("/")
def index():
temp = "20°"
hot = float(temp[:-1]) > 37.5
classs = "hot" if hot else "cold"
return render_template_string("""
<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Temp</title>
<script async defer src="https://buttons.github.io/buttons.js"></script>
</head>
<body>
<header>
<nav>
<h5>Température du rasperry</h5>
<img src="https://upload.wikimedia.org/wikipedia/fr/thumb/3/3b/Raspberry_Pi_logo.svg/1200px-Raspberry_Pi_logo.svg.png"
alt="rasperry">
<a class="github-button" href="https://github.com/ahmedlahrizi/rasp_temp"
data-icon="octicon-star" data-size="large" data-show-count="true"
aria-label="Star ahmedlahrizi/rasp_temp on GitHub">Star</a>
</nav>
</header>
<div class="info">
<h1>La temperature du rasperry est de:
<span class={{ classs }}>
{{ temp }}°
</span>
</h1>
</div>
{% if hot %}
<img src="https://emoji.gg/assets/emoji/hot.png" alt="hot">
{% else %}
<img src="https://emoji.gg/assets/emoji/7655_freezing.png" alt="cold">
{% endif %}
<p>Merci de ne pas envoyer trop de requêtes au serveur (il va cramer)</p>
<footer>
<p>Ahmed LAHRIZI</p>
</footer>
</body>
<style>
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@100&display=swap');
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300&display=swap');
@import url('https://fonts.googleapis.com/css2?family=Work+Sans:wght@500&display=swap');
* {
box-sizing: border-box;
font-family: "Roboto", "Lucida Grande", "DejaVu Sans", "Bitstream Vera Sans", Verdana, Arial, sans-serif;
margin: 0;
padding: 0;
}
html {
background-color: #26293d;
}
header {
background-color: #1E1E2D;
width: 100%;
}
nav {
width: min(70rem, 100%);
margin: auto;
font-size: 3rem;
display: flex;
align-items: center;
gap: 3rem;
justify-content: start;
height: 7rem;
}
header *:last-child {
margin-left: auto;
}
nav > img {
width: 4rem;
height: 4rem;
}
nav > h5 {
color: #FFF;
}
body * {
color: #FFFFFFB2;
}
body {
display: flex;
flex-flow: column nowrap;
gap: 3rem;
}
.info {
font-weight: 100;
color: #FFF;
align-self: center;
margin-top: 2rem;
}
body > img {
width: 10rem;
align-self: end;
margin-right: 3rem;
}
.cold {
color: #90c5ff;
}
.hot {
color: #de3f6a;
}
footer * {
color: #5dbbba;
font-family: 'Work Sans', sans-serif;
font-weight: bold;
font-size: 2rem;
margin-left: 2rem;
}
body > p {
font-size: 2rem;
color: #FFFFFF;
align-self: center;
}
@media all and (max-width: 70.6875rem ) {
:root {
font-size: .8rem;
}
}
@media all and (max-width: 50rem ) {
:root {
font-size: .7rem;
}
}
@media all and (max-width: 41.875rem ) {
:root {
font-size: .4rem;
}
nav *:first-child {
margin-left: 5px;
}
}
</style>
</html>
""",
temp=temp,
hot=hot,
classs=classs)
| [
"ahmed.lahrizi05@gmail.com"
] | ahmed.lahrizi05@gmail.com |
c5686cd8bf048386474ab26abd3f7a030b8c3953 | e64d09d81ebaaa22971f87d320e8da577ae5cdf8 | /NBB/Scripts/it-script.py | 65c803c67ad33407231ad42d643b8c390f991407 | [] | no_license | Rafael-Prochnow/NBB | 95a96eecc110deec7430325e10d6d02269d996ff | 2af1d6d4c1f17c64cd96f49aa04cdc5cfd1576e7 | refs/heads/master | 2023-07-15T11:22:39.479067 | 2021-08-28T17:46:08 | 2021-08-28T17:46:08 | 267,161,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | #!"c:\users\elen- pc\pycharmprojects\untitled1\nbb\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'it==1.0.0','console_scripts','it'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'it==1.0.0'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('it==1.0.0', 'console_scripts', 'it')())
| [
"rafaprochnow31@gmail.com"
] | rafaprochnow31@gmail.com |
410d888700696f56ad4f03ddb9d930fb246715fa | 1b3a632a8bf1d7d909e3b829d0ba9992aaa8985f | /release/1.3/default_settings.py | 07fe04a1cfbc6ed4d6fedafc4d5319decf24ed2f | [] | no_license | fmierlo/django-default-settings | 07d9d2e3a0c36a82af63e7b9cc2571c76142253d | 6e3c506b6cde60fcbea1ac5cb33a69a500416b45 | refs/heads/master | 2021-01-17T17:31:37.499211 | 2016-06-16T09:10:26 | 2016-06-16T09:10:26 | 61,214,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,548 | py | ABSOLUTE_URL_OVERRIDES = {}
ADMINS = ()
ADMIN_FOR = ()
ADMIN_MEDIA_PREFIX = '/static/admin/'
ALLOWED_INCLUDE_ROOTS = ()
APPEND_SLASH = True
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
BANNED_IPS = ()
CACHES = {}
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
COMMENTS_ALLOW_PROFANITIES = False
COMMENTS_BANNED_USERS_GROUP = None
COMMENTS_FIRST_FEW = 0
COMMENTS_MODERATORS_GROUP = None
COMMENTS_SKETCHY_USERS_GROUP = None
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
DATABASES = {}
DATABASE_ENGINE = ''
DATABASE_HOST = ''
DATABASE_NAME = ''
DATABASE_OPTIONS = {}
DATABASE_PASSWORD = ''
DATABASE_PORT = ''
DATABASE_ROUTERS = []
DATABASE_USER = ''
DATETIME_FORMAT = 'N j, Y, P'
DATETIME_INPUT_FORMATS = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M', '%m/%d/%Y', '%m/%d/%y %H:%M:%S', '%m/%d/%y %H:%M', '%m/%d/%y')
DATE_FORMAT = 'N j, Y'
DATE_INPUT_FORMATS = ('%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', '%b %d %Y', '%b %d, %Y', '%d %b %Y', '%d %b, %Y', '%B %d %Y', '%B %d, %Y', '%d %B %Y', '%d %B, %Y')
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
DECIMAL_SEPARATOR = '.'
DEFAULT_CHARSET = 'utf-8'
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
DEFAULT_INDEX_TABLESPACE = ''
DEFAULT_TABLESPACE = ''
DISALLOWED_USER_AGENTS = ()
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_SUBJECT_PREFIX = '[Django] '
EMAIL_USE_TLS = False
FILE_CHARSET = 'utf-8'
FILE_UPLOAD_HANDLERS = ('django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler')
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440
FILE_UPLOAD_PERMISSIONS = None
FILE_UPLOAD_TEMP_DIR = None
FIRST_DAY_OF_WEEK = 0
FIXTURE_DIRS = ()
FORCE_SCRIPT_NAME = None
FORMAT_MODULE_PATH = None
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
INSTALLED_APPS = []
INTERNAL_IPS = ()
LANGUAGES = (('ar', 'Arabic'), ('az', 'Azerbaijani'), ('bg', 'Bulgarian'), ('bn', 'Bengali'), ('bs', 'Bosnian'), ('ca', 'Catalan'), ('cs', 'Czech'), ('cy', 'Welsh'), ('da', 'Danish'), ('de', 'German'), ('el', 'Greek'), ('en', 'English'), ('en-gb', 'British English'), ('es', 'Spanish'), ('es-ar', 'Argentinian Spanish'), ('es-mx', 'Mexican Spanish'), ('es-ni', 'Nicaraguan Spanish'), ('et', 'Estonian'), ('eu', 'Basque'), ('fa', 'Persian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy-nl', 'Frisian'), ('ga', 'Irish'), ('gl', 'Galician'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hr', 'Croatian'), ('hu', 'Hungarian'), ('id', 'Indonesian'), ('is', 'Icelandic'), ('it', 'Italian'), ('ja', 'Japanese'), ('ka', 'Georgian'), ('km', 'Khmer'), ('kn', 'Kannada'), ('ko', 'Korean'), ('lt', 'Lithuanian'), ('lv', 'Latvian'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mn', 'Mongolian'), ('nl', 'Dutch'), ('no', 'Norwegian'), ('nb', 'Norwegian Bokmal'), ('nn', 'Norwegian Nynorsk'), ('pa', 'Punjabi'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pt-br', 'Brazilian Portuguese'), ('ro', 'Romanian'), ('ru', 'Russian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sq', 'Albanian'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('sv', 'Swedish'), ('ta', 'Tamil'), ('te', 'Telugu'), ('th', 'Thai'), ('tr', 'Turkish'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('vi', 'Vietnamese'), ('zh-cn', 'Simplified Chinese'), ('zh-tw', 'Traditional Chinese'))
LANGUAGES_BIDI = ('he', 'ar', 'fa')
LANGUAGE_CODE = 'en-us'
LANGUAGE_COOKIE_NAME = 'django_language'
LOCALE_PATHS = ()
LOGGING = {'loggers': {'django.request': {'level': 'ERROR', 'propagate': True, 'handlers': ['mail_admins']}}, 'version': 1, 'disable_existing_loggers': False, 'handlers': {'mail_admins': {'class': 'django.utils.log.AdminEmailHandler', 'level': 'ERROR'}}}
LOGGING_CONFIG = 'django.utils.log.dictConfig'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
MANAGERS = ()
MEDIA_ROOT = ''
MEDIA_URL = ''
MESSAGE_STORAGE = 'django.contrib.messages.storage.user_messages.LegacyFallbackStorage'
MIDDLEWARE_CLASSES = ('django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware')
MONTH_DAY_FORMAT = 'F j'
NUMBER_GROUPING = 0
PASSWORD_RESET_TIMEOUT_DAYS = 3
PREPEND_WWW = False
PROFANITIES_LIST = ()
SECRET_KEY = '01234567890123456789012345678901234567890123456789'
SEND_BROKEN_LINK_EMAILS = False
SERVER_EMAIL = 'root@localhost'
SESSION_COOKIE_AGE = 1209600
SESSION_COOKIE_DOMAIN = None
SESSION_COOKIE_HTTPONLY = False
SESSION_COOKIE_NAME = 'sessionid'
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_SECURE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_FILE_PATH = None
SESSION_SAVE_EVERY_REQUEST = False
SETTINGS_MODULE = 'project.defaults' ###
SHORT_DATETIME_FORMAT = 'm/d/Y P'
SHORT_DATE_FORMAT = 'm/d/Y'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = ('django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder')
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
STATIC_ROOT = ''
STATIC_URL = None
TEMPLATE_CONTEXT_PROCESSORS = ('django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages')
TEMPLATE_DEBUG = False
TEMPLATE_DIRS = ()
TEMPLATE_LOADERS = ('django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader')
TEMPLATE_STRING_IF_INVALID = ''
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
TEST_DATABASE_NAME = None
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
THOUSAND_SEPARATOR = ','
TIME_FORMAT = 'P'
TIME_INPUT_FORMATS = ('%H:%M:%S', '%H:%M')
TIME_ZONE = 'America/Chicago'
TRANSACTIONS_MANAGED = False
URL_VALIDATOR_USER_AGENT = 'Django/1.3 (http://www.djangoproject.com)'
USE_ETAGS = False
USE_I18N = True
USE_L10N = False
USE_THOUSAND_SEPARATOR = False
YEAR_MONTH_FORMAT = 'F Y'
| [
"fabio@n42.org"
] | fabio@n42.org |
ac2288c1dbb495d0e828a471d90faf6e0ddd521d | 9e5528476fb616d45a12f516f404858a176ccee5 | /problem_set_4/ps4a.py | 8f4b6a1d2c567d2755cad6f52c64b3dde2defa8d | [
"Giftware"
] | permissive | reniass/MIT-Introduction-to-Computer-Science-and-Programming-Using-Python | 610a16b07dd28d54ba056a7c65aac0f9868e68a8 | 9069645b39cf8c2e65a860e6c50939338693f33d | refs/heads/master | 2022-09-25T05:24:15.868548 | 2020-06-02T17:58:21 | 2020-06-02T17:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,639 | py | # The 6.00 Word Game
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print(" ", len(wordList), "words loaded.")
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score = 0
for letter in word:
score = score + SCRABBLE_LETTER_VALUES[letter]
score = score * len(word)
if len(word) == n:
score = score + 50
return score
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter,end=" ") # print all on the same line
print() # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n // 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
new_hand = hand.copy()
for letter in word:
new_hand[letter] -= 1
if new_hand[letter] == 0:
del new_hand[letter]
return new_hand
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
v = 0
new_hand = hand.copy()
for letter in word:
if letter in new_hand:
v += 1
new_hand[letter] -= 1
if new_hand[letter] == 0:
del new_hand[letter]
else:
break
if word in wordList and v == len(word):
return True
else:
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
v = 0
for letter in hand:
v += hand[letter]
return v
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!)
# Keep track of the total score
total_points = 0
word = ""
amount_letter_in_hand = 1
new_hand = {}
# As long as there are still letters left in the hand:
while amount_letter_in_hand != 0:
# Display the hand
print("Current hand: ", end='' )
displayHand(hand)
# Ask user for input
word = input("Enter word, or a '.' to indicate that you are finished: ")
# If the input is a single period:
if word == ".":
# End the game (break out of the loop)
if total_points > 0:
print("Goodbye! Total score: " + str(total_points) + " points.")
break
else:
print("Goodbye! Total score: " + str(total_points) + " points.")
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if isValidWord(word, hand, wordList) == False:
# Reject invalid word (print a message followed by a blank line)
print("Invalid word, please try again. \n ")
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
total_points += getWordScore(word, n)
print("'" + str(word) + "'" + " earned " + str(getWordScore(word, n)) + ". Total: " + str(total_points) + " points." )
# Update the hand
hand = updateHand(hand, word)
amount_letter_in_hand = calculateHandlen(hand)
if amount_letter_in_hand == 0:
print("Run out of letters. Total score: " + str(total_points) + " points.")
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
var1 = True
var2 = 0
L = []
while var1 == True:
user_answer = str(input("Enter n to deal a new hand, r to replay the last hand, or e to end game: "))
if user_answer == "n":
saving_hand = dealHand(HAND_SIZE)
L.append(saving_hand)
playHand(saving_hand,wordList, HAND_SIZE)
var2 += 1
elif user_answer == "r":
if var2 == 0:
print("You have not played a hand yet. Please play a new hand first!")
else:
L.append(saving_hand)
playHand(L[var2 - 1], wordList, HAND_SIZE)
elif user_answer == "e":
var1 = False
else:
print("Invalid command.")
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| [
"renias747@gmail.com"
] | renias747@gmail.com |
fe1d56388958e31ab2b394a4a508e40a320faf3b | e9a0380a7c258b785fb44d81c938fd4b85c961e8 | /articles/migrations/0001_initial.py | c397bbfc1efc28b265b1ca76cbde7acc6c0bec8d | [] | no_license | awesome-academy/simple_api_drf | 9cb92769022795fe74284eddbf729983d8625e04 | b889425df0285153f954502093cf5b7bdb7c0d4e | refs/heads/master | 2023-05-08T16:54:33.026359 | 2021-05-23T14:23:56 | 2021-05-28T13:52:43 | 370,073,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # Generated by Django 3.2.1 on 2021-05-22 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('author', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"do.thi.diem.thao@sun-asterisk.com"
] | do.thi.diem.thao@sun-asterisk.com |
b723760ee19970314f9f76ce4761b88748adc393 | d90af0def0e29ebaebcf986399fcee65e1e2916c | /python/PDB/HSExposure.py | 779b719cece4c85738eaaab62c3ecb814a685d26 | [
"LicenseRef-scancode-biopython"
] | permissive | Zaiyong/csrosetta | 2fdbbdd7da24ce971f7f2297a7cd14723cdd59d6 | 539c60664dba3972062002ff4e636c7f029927cb | refs/heads/master | 2020-12-25T15:18:39.274689 | 2020-02-25T09:15:35 | 2020-02-25T09:15:35 | 65,408,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,412 | py | # Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Half-sphere exposure and coordination number calculation."""
import warnings
from math import pi
from PDB.AbstractPropertyMap import AbstractPropertyMap
from PDB.PDBParser import PDBParser
from PDB.Polypeptide import CaPPBuilder, is_aa
from PDB.Vector import rotaxis
class _AbstractHSExposure(AbstractPropertyMap):
"""
Abstract class to calculate Half-Sphere Exposure (HSE).
The HSE can be calculated based on the CA-CB vector, or the pseudo CB-CA
vector based on three consecutive CA atoms. This is done by two separate
subclasses.
"""
def __init__(self, model, radius, offset, hse_up_key, hse_down_key,
angle_key=None):
"""
@param model: model
@type model: L{Model}
@param radius: HSE radius
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation
of the number of neighbors
@type offset: int
@param hse_up_key: key used to store HSEup in the entity.xtra attribute
@type hse_up_key: string
@param hse_down_key: key used to store HSEdown in the entity.xtra attribute
@type hse_down_key: string
@param angle_key: key used to store the angle between CA-CB and CA-pCB in
the entity.xtra attribute
@type angle_key: string
"""
assert(offset>=0)
# For PyMOL visualization
self.ca_cb_list=[]
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
hse_map={}
hse_list=[]
hse_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
if i==0:
r1=None
else:
r1=pp1[i-1]
r2=pp1[i]
if i==len(pp1)-1:
r3=None
else:
r3=pp1[i+1]
# This method is provided by the subclasses to calculate HSE
result=self._get_cb(r1, r2, r3)
if result is None:
# Missing atoms, or i==0, or i==len(pp1)-1
continue
pcb, angle=result
hse_u=0
hse_d=0
ca2=r2['CA'].get_vector()
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
# neighboring residues in the chain are ignored
continue
ro=pp2[j]
if not is_aa(ro) or not ro.has_id('CA'):
continue
cao=ro['CA'].get_vector()
d=(cao-ca2)
if d.norm()<radius:
if d.angle(pcb)<(pi/2):
hse_u+=1
else:
hse_d+=1
res_id=r2.get_id()
chain_id=r2.get_parent().get_id()
# Fill the 3 data structures
hse_map[(chain_id, res_id)]=(hse_u, hse_d, angle)
hse_list.append((r2, (hse_u, hse_d, angle)))
hse_keys.append((chain_id, res_id))
# Add to xtra
r2.xtra[hse_up_key]=hse_u
r2.xtra[hse_down_key]=hse_d
if angle_key:
r2.xtra[angle_key]=angle
AbstractPropertyMap.__init__(self, hse_map, hse_keys, hse_list)
def _get_cb(self, r1, r2, r3):
"""This method is provided by the subclasses to calculate HSE."""
return NotImplemented
def _get_gly_cb_vector(self, residue):
"""
Return a pseudo CB vector for a Gly residue.
The pseudoCB vector is centered at the origin.
CB coord=N coord rotated over -120 degrees
along the CA-C axis.
"""
try:
n_v=residue["N"].get_vector()
c_v=residue["C"].get_vector()
ca_v=residue["CA"].get_vector()
except:
return None
# center at origin
n_v=n_v-ca_v
c_v=c_v-ca_v
# rotation around c-ca over -120 deg
rot=rotaxis(-pi*120.0/180.0, c_v)
cb_at_origin_v=n_v.left_multiply(rot)
# move back to ca position
cb_v=cb_at_origin_v+ca_v
# This is for PyMol visualization
self.ca_cb_list.append((ca_v, cb_v))
return cb_at_origin_v
class HSExposureCA(_AbstractHSExposure):
"""
Class to calculate HSE based on the approximate CA-CB vectors,
using three consecutive CA positions.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_A_U', 'EXP_HSE_A_D', 'EXP_CB_PCB_ANGLE')
def _get_cb(self, r1, r2, r3):
"""
Calculate the approximate CA-CB direction for a central
CA atom based on the two flanking CA positions, and the angle
with the real CA-CB vector.
The CA-CB vector is centered at the origin.
@param r1, r2, r3: three consecutive residues
@type r1, r2, r3: L{Residue}
"""
if r1 is None or r3 is None:
return None
try:
ca1=r1['CA'].get_vector()
ca2=r2['CA'].get_vector()
ca3=r3['CA'].get_vector()
except:
return None
# center
d1=ca2-ca1
d3=ca2-ca3
d1.normalize()
d3.normalize()
# bisection
b=(d1+d3)
b.normalize()
# Add to ca_cb_list for drawing
self.ca_cb_list.append((ca2, b+ca2))
if r2.has_id('CB'):
cb=r2['CB'].get_vector()
cb_ca=cb-ca2
cb_ca.normalize()
angle=cb_ca.angle(b)
elif r2.get_resname()=='GLY':
cb_ca=self._get_gly_cb_vector(r2)
if cb_ca is None:
angle=None
else:
angle=cb_ca.angle(b)
else:
angle=None
# vector b is centered at the origin!
return b, angle
def pcb_vectors_pymol(self, filename="hs_exp.py"):
"""
Write a PyMol script that visualizes the pseudo CB-CA directions
at the CA coordinates.
@param filename: the name of the pymol script file
@type filename: string
"""
if len(self.ca_cb_list)==0:
warnings.warn("Nothing to draw.", RuntimeWarning)
return
fp=open(filename, "w")
fp.write("from pymol.cgo import *\n")
fp.write("from pymol import cmd\n")
fp.write("obj=[\n")
fp.write("BEGIN, LINES,\n")
fp.write("COLOR, %.2f, %.2f, %.2f,\n" % (1.0, 1.0, 1.0))
for (ca, cb) in self.ca_cb_list:
x,y,z=ca.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
x,y,z=cb.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
fp.write("END]\n")
fp.write("cmd.load_cgo(obj, 'HS')\n")
fp.close()
class HSExposureCB(_AbstractHSExposure):
"""
Class to calculate HSE based on the real CA-CB vectors.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_B_U', 'EXP_HSE_B_D')
def _get_cb(self, r1, r2, r3):
"""
Method to calculate CB-CA vector.
@param r1, r2, r3: three consecutive residues (only r2 is used)
@type r1, r2, r3: L{Residue}
"""
if r2.get_resname()=='GLY':
return self._get_gly_cb_vector(r2), 0.0
else:
if r2.has_id('CB') and r2.has_id('CA'):
vcb=r2['CB'].get_vector()
vca=r2['CA'].get_vector()
return (vcb-vca), 0.0
return None
class ExposureCN(AbstractPropertyMap):
def __init__(self, model, radius=12.0, offset=0):
"""
A residue's exposure is defined as the number of CA atoms around
that residues CA atom. A dictionary is returned that uses a L{Residue}
object as key, and the residue exposure as corresponding value.
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
assert(offset>=0)
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
fs_map={}
fs_list=[]
fs_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
fs=0
r1=pp1[i]
if not is_aa(r1) or not r1.has_id('CA'):
continue
ca1=r1['CA']
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
continue
r2=pp2[j]
if not is_aa(r2) or not r2.has_id('CA'):
continue
ca2=r2['CA']
d=(ca2-ca1)
if d<radius:
fs+=1
res_id=r1.get_id()
chain_id=r1.get_parent().get_id()
# Fill the 3 data structures
fs_map[(chain_id, res_id)]=fs
fs_list.append((r1, fs))
fs_keys.append((chain_id, res_id))
# Add to xtra
r1.xtra['EXP_CN']=fs
AbstractPropertyMap.__init__(self, fs_map, fs_keys, fs_list)
if __name__=="__main__":
import sys
p=PDBParser()
s=p.get_structure('X', sys.argv[1])
model=s[0]
# Neighbor sphere radius
RADIUS=13.0
OFFSET=0
hse=HSExposureCA(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=HSExposureCB(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=ExposureCN(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
for c in model:
for r in c:
try:
print r.xtra['PCB_CB_ANGLE']
except:
pass
| [
"zaiyong@jianings-MacBook-Air.local"
] | zaiyong@jianings-MacBook-Air.local |
9b6713c4988ceef1ba5e4516453df8082c8d8a8d | 6763002b2c7e0c49348a0bffda52b4570c9e2822 | /combination_plot.py | 23384542f001160633c6b00f5767e49803de0267 | [] | no_license | daveronan/music-axions | b83053f1da74f5eee2d948388b4daceb13a4742b | 3fa23a82f34ca92510e95bbc298fa654679b9054 | refs/heads/master | 2022-11-26T10:50:30.356577 | 2020-08-06T10:16:32 | 2020-08-06T10:16:32 | 285,369,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,720 | py | import numpy as np
import matplotlib.pyplot as plt
from utilities import *
# cmap for scatter speaker points
cm = plt.get_cmap('jet')
root = "/Volumes/External/JanData/"
# Speaker channels
# First index: speaker
# Second index: variable (see "speaker_channels.py" for what is used)
# Third index: time step
channels = np.load("Data/channels_R10.npy")
# Speaker parameters, these should match what is given in "speaker_channels.py"
nspeakers=8
R=10 # arbitrary radius for now
Theta = np.pi/2 # In plane
phivec=np.linspace(0.,(2.*np.pi)*(1.-1./nspeakers),nspeakers)
# range of snapshots
imax=359
imin=36
tsteps = imax-imin
# Make plots
for i in range(imin,imax):
print i
phase = np.load(root+"AxPhase_%03d.npy"%i)
density = np.load(root+"AxDensity4_%03d.npy"%i)
shape = np.shape(phase)
# Gradient of phase -> velocity
vel = dTheta(phase,np.sqrt(density))
vx,vy,vz = vel[0],vel[1],vel[2]
v_empty = np.ones(shape)
# Soliton location and velocity
maxdens = density.max()
centre = np.where(density==maxdens)
x0,y0,z0 = centre[0][0],centre[1][0],centre[2][0]
vx_sol = vx[centre]
vy_sol = vy[centre]
vz_sol = vz[centre]
# Subtract soliton velocity
vx = vx-vx_sol*v_empty
vy = vy-vy_sol*v_empty
vz = vz-vz_sol*v_empty
cube=np.shape(phase)
Lx,Ly,Lz=cube[0],cube[1],cube[2]
x,y,z=np.arange(Lx),np.arange(Ly),np.arange(Lz)
zval = z0
zind = np.where(z==z0)
zind = zind[0][0]
# Speaker locations
cart = cartesian(R,Theta,phivec,x0,y0,z0)
xS,yS,zS = cart[0],cart[1],cart[2]
#################
# Plot
###############
fig, ((ax1, var1), (var2, var3)) = pl.subplots(2, 2,figsize=(9,7))
# in ax1 show the "raw" data of density and velocity arrows, overlay speaker locations
# in var1, var2, var3, show three of the variables and trace them over time
# Density mesh
pmesh = ax1.pcolormesh(np.log10(density[:,:,zind]),
cmap = 'Purples',vmin=0,vmax=7)
# Velocity quiver
Qscale=7. # Smaller number = larger arrows
Qangle='xy'
Qwidth=0.02
X,Y = np.meshgrid(np.arange(Lx),np.arange(Ly))
Q = ax1.quiver(X[::3, ::3], Y[::3, ::3], vx[::3, ::3,zind], vy[::3, ::3,zind],
pivot='mid', units='inches',angles=Qangle,scale=Qscale,width=Qwidth)
for j in range(nspeakers):
cmap_num=j*(1./nspeakers)
# Scatter speaker locations on density field
# The density field has x and y labels reversed in location for this plot
# I have verified this is just a plotting problem and not a problem for the speaker channels
ax1.scatter(yS[j],xS[j],color=cm(cmap_num))
ax1.scatter(y0,x0,marker="*",facecolor='r')
x_scat = 1.*(i-imin)
##########
# Plot the three variables sent to each speaker
# Line of whole time series, and moving coloured dot
# Offest y-axis by fixed amount
##########
# Log10 Density
offset = 3.
var1.plot(channels[j,0,:]+offset*j,'-k',zorder=1)
y1_scat = channels[j,0,i-imin]+offset*j
var1.scatter(x_scat,y1_scat,color=cm(cmap_num),zorder=2)
# Log10 Speed
offset = 3.
var2.plot(channels[j,1,:]+offset*j,'-k',zorder=1)
y2_scat = channels[j,1,i-imin]+offset*j
var2.scatter(x_scat,y2_scat,color=cm(cmap_num),zorder=2)
# Log10 Curl
offset = 3.
var3.plot(channels[j,2,:]+offset*j,'-k',zorder=1)
y3_scat = channels[j,2,i-imin]+offset*j
var3.scatter(x_scat,y3_scat,color=cm(cmap_num),zorder=2)
# Set axes
ax1.axis([65, 135, 65, 135])
ax1.set_yticks([])
ax1.set_xticks([])
#var1.axis([0,tsteps,0,30])
var1.set_yticks([])
var1.set_xticks([])
#var2.axis([0,tsteps,-10,90])
var2.set_yticks([])
var2.set_xticks([])
#var3.axis([0,tsteps,0,30])
var3.set_yticks([])
var3.set_xticks([])
plt.savefig('Plots/CombinationMovie/Combination_%03d.png'%i,bbox_inches='tight')
plt.clf()
plt.close() | [
"noreply@github.com"
] | daveronan.noreply@github.com |
7dfcd4cd24d5ed5f45978adad9a7d5d79e6a8df9 | 691793de7d07b17918d076b319281c706f7275c0 | /signing_today_client/api_client.py | 98948f9c78f3f2cce8197afcb845c184beb78226 | [
"MIT"
] | permissive | signingtoday/signingtoday-sdk-python | 1ddfae5340690c80760c500436631d4a8ff9c87f | ed267279622fb59f2ad8fa289157fc9cdf9d8a5b | refs/heads/master | 2020-12-03T15:32:35.755222 | 2020-03-24T08:27:11 | 2020-03-24T08:27:11 | 231,372,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,011 | py | # coding: utf-8
"""
Signing Today Web
*Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from signing_today_client.configuration import Configuration
import signing_today_client.models
from signing_today_client import rest
from signing_today_client.exceptions import ApiValueError
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(signing_today_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.openapi_types and not hasattr(klass,
'get_real_child_model'):
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| [
"smartcloud@bit4id.com"
] | smartcloud@bit4id.com |
93de9a042164784e1ab3c9d2675bfade0049d3b5 | f84c51d8159e973913f5c537f08d285bdb3630e2 | /neural_sp/bin/args_asr.py | 54e0a69463158cfa3a6d65a4044267be461a7bd4 | [
"Apache-2.0"
] | permissive | lahiruts/neural_sp | d302ce0479bcbe813639c531f460d55a6c9c2a65 | 5b314ece12081db8b423d4dc32ce33f4228ff37b | refs/heads/master | 2022-11-20T06:45:34.891542 | 2020-07-22T09:20:55 | 2020-07-22T09:20:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,139 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Args option for the ASR task."""
import configargparse
from distutils.util import strtobool
import os
from neural_sp.bin.train_utils import load_config
def parse_args_train(input_args):
parser = build_parser()
user_args, _ = parser.parse_known_args(input_args)
# register module specific arguments
parser = register_args_encoder(parser, user_args)
user_args, _ = parser.parse_known_args(input_args) # to avoid args conflict
parser = register_args_decoder(parser, user_args)
user_args = parser.parse_args()
return user_args
def parse_args_eval(input_args):
parser = build_parser()
user_args, _ = parser.parse_known_args(input_args)
# Load a yaml config file
dir_name = os.path.dirname(user_args.recog_model[0])
conf_train = load_config(os.path.join(dir_name, 'conf.yml'))
# register module specific arguments
user_args.enc_type = conf_train['enc_type']
parser = register_args_encoder(parser, user_args)
user_args, _ = parser.parse_known_args(input_args) # to avoid args conflict
user_args.dec_type = conf_train['dec_type'] # to avoid overlap
parser = register_args_decoder(parser, user_args)
user_args = parser.parse_args()
# NOTE: If new args are registered after training the model, the default value will be set
# Overwrite config
for k, v in conf_train.items():
if 'recog' not in k:
setattr(user_args, k, v)
return user_args, vars(user_args), dir_name
def register_args_encoder(parser, args):
if args.enc_type == 'tds':
from neural_sp.models.seq2seq.encoders.tds import TDSEncoder as module
elif args.enc_type == 'gated_conv':
from neural_sp.models.seq2seq.encoders.gated_conv import GatedConvEncoder as module
elif 'transformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.transformer import TransformerEncoder as module
elif 'conformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.conformer import ConformerEncoder as module
else:
from neural_sp.models.seq2seq.encoders.rnn import RNNEncoder as module
if hasattr(module, 'add_args'):
parser = module.add_args(parser, args)
return parser
def register_args_decoder(parser, args):
if args.dec_type in ['transformer', 'transformer_xl']:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder as module
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer as module
elif args.dec_type == 'asg':
from neural_sp.models.seq2seq.decoders.asg import ASGDecoder as module
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder as module
if hasattr(module, 'add_args'):
parser = module.add_args(parser, args)
return parser
def build_parser():
parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('--config', is_config_file=True, help='config file path')
parser.add('--config2', is_config_file=True, default=False, nargs='?',
help='another config file path to overwrite --config')
# general
parser.add_argument('--corpus', type=str,
help='corpus name')
parser.add_argument('--n_gpus', type=int, default=1,
help='number of GPUs (0 indicates CPU)')
parser.add_argument('--cudnn_benchmark', type=strtobool, default=True,
help='use CuDNN benchmark mode')
parser.add_argument("--train_dtype", default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training")
parser.add_argument('--model_save_dir', type=str, default=False,
help='directory to save a model')
parser.add_argument('--resume', type=str, default=False, nargs='?',
help='model path to resume training')
parser.add_argument('--job_name', type=str, default=False,
help='job name')
parser.add_argument('--stdout', type=strtobool, default=False,
help='print to standard output during training')
# dataset
parser.add_argument('--train_set', type=str,
help='tsv file path for the training set')
parser.add_argument('--train_set_sub1', type=str, default=False,
help='tsv file path for the training set for the 1st auxiliary task')
parser.add_argument('--train_set_sub2', type=str, default=False,
help='tsv file path for the training set for the 2nd auxiliary task')
parser.add_argument('--dev_set', type=str,
help='tsv file path for the development set')
parser.add_argument('--dev_set_sub1', type=str, default=False,
help='tsv file path for the development set for the 1st auxiliary task')
parser.add_argument('--dev_set_sub2', type=str, default=False,
help='tsv file path for the development set for the 2nd auxiliary task')
parser.add_argument('--eval_sets', type=str, default=[], nargs='+',
help='tsv file paths for the evaluation sets')
parser.add_argument('--nlsyms', type=str, default=False, nargs='?',
help='non-linguistic symbols file path')
parser.add_argument('--dict', type=str,
help='dictionary file path')
parser.add_argument('--dict_sub1', type=str, default=False,
help='dictionary file path for the 1st auxiliary task')
parser.add_argument('--dict_sub2', type=str, default=False,
help='dictionary file path for the 2nd auxiliary task')
parser.add_argument('--unit', type=str, default='wp',
choices=['word', 'wp', 'char', 'phone', 'word_char', 'char_space'],
help='output unit for the main task')
parser.add_argument('--unit_sub1', type=str, default=False,
choices=['wp', 'char', 'phone'],
help='output unit for the 1st auxiliary task')
parser.add_argument('--unit_sub2', type=str, default=False,
choices=['wp', 'char', 'phone'],
help='output unit for the 2nd auxiliary task')
parser.add_argument('--wp_model', type=str, default=False, nargs='?',
help='wordpiece model path for the main task')
parser.add_argument('--wp_model_sub1', type=str, default=False, nargs='?',
help='wordpiece model path for the 1st auxiliary task')
parser.add_argument('--wp_model_sub2', type=str, default=False, nargs='?',
help='wordpiece model path for the 2nd auxiliary task')
# features
parser.add_argument('--input_type', type=str, default='speech',
choices=['speech', 'text'],
help='type of input features')
parser.add_argument('--n_splices', type=int, default=1,
help='number of input frames to splice (both for left and right frames)')
parser.add_argument('--n_stacks', type=int, default=1,
help='number of input frames to stack (frame stacking)')
parser.add_argument('--n_skips', type=int, default=1,
help='number of input frames to skip')
parser.add_argument('--max_n_frames', type=int, default=2000,
help='maximum number of input frames')
parser.add_argument('--min_n_frames', type=int, default=40,
help='minimum number of input frames')
parser.add_argument('--dynamic_batching', type=strtobool, default=True,
help='')
parser.add_argument('--input_noise_std', type=float, default=0,
help='standard deviation of Gaussian noise to input features')
parser.add_argument('--weight_noise_std', type=float, default=0,
help='standard deviation of Gaussian noise to weight parameters')
parser.add_argument('--sequence_summary_network', type=strtobool, default=False,
help='use sequence summary network')
# topology (encoder)
parser.add_argument('--enc_type', type=str, default='blstm',
choices=['blstm', 'lstm', 'bgru', 'gru',
'conv_blstm', 'conv_lstm', 'conv_bgru', 'conv_gru',
'transformer', 'conv_transformer',
'conformer', 'conv_conformer',
'tds', 'gated_conv'],
help='type of the encoder')
parser.add_argument('--enc_n_layers', type=int, default=5,
help='number of encoder RNN layers')
parser.add_argument('--enc_n_layers_sub1', type=int, default=0,
help='number of encoder RNN layers in the 1st auxiliary task')
parser.add_argument('--enc_n_layers_sub2', type=int, default=0,
help='number of encoder RNN layers in the 2nd auxiliary task')
parser.add_argument('--subsample', type=str, default="1_1_1_1_1",
help='delimited list input')
parser.add_argument('--subsample_type', type=str, default='drop',
choices=['drop', 'concat', 'max_pool', '1dconv'],
help='type of subsampling in the encoder')
# topology (decoder)
parser.add_argument('--dec_type', type=str, default='lstm',
choices=['lstm', 'gru', 'transformer', 'transformer_xl',
'lstm_transducer', 'gru_transducer', 'transformer_transducer',
'asg'],
help='type of the decoder')
parser.add_argument('--dec_n_layers', type=int, default=1,
help='number of decoder RNN layers')
parser.add_argument('--tie_embedding', type=strtobool, default=False, nargs='?',
help='tie weights between an embedding matrix and a linear layer before the softmax layer')
parser.add_argument('--ctc_fc_list', type=str, default="", nargs='?',
help='')
parser.add_argument('--ctc_fc_list_sub1', type=str, default="", nargs='?',
help='')
parser.add_argument('--ctc_fc_list_sub2', type=str, default="", nargs='?',
help='')
# optimization
parser.add_argument('--batch_size', type=int, default=50,
help='mini-batch size')
parser.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'adadelta', 'adagrad', 'sgd', 'momentum', 'nesterov', 'noam'],
help='type of optimizer')
parser.add_argument('--n_epochs', type=int, default=25,
help='number of epochs to train the model')
parser.add_argument('--convert_to_sgd_epoch', type=int, default=100,
help='epoch to converto to SGD fine-tuning')
parser.add_argument('--print_step', type=int, default=200,
help='print log per this value')
parser.add_argument('--metric', type=str, default='edit_distance',
choices=['edit_distance', 'loss', 'accuracy', 'ppl', 'bleu', 'mse'],
help='metric for evaluation during training')
parser.add_argument('--lr', type=float, default=1e-3,
help='initial learning rate')
parser.add_argument('--lr_factor', type=float, default=10.0,
help='factor of learning rate for Transformer')
parser.add_argument('--eps', type=float, default=1e-6,
help='epsilon parameter for Adadelta optimizer')
parser.add_argument('--lr_decay_type', type=str, default='always',
choices=['always', 'metric', 'warmup'],
help='type of learning rate decay')
parser.add_argument('--lr_decay_start_epoch', type=int, default=10,
help='epoch to start to decay learning rate')
parser.add_argument('--lr_decay_rate', type=float, default=0.9,
help='decay rate of learning rate')
parser.add_argument('--lr_decay_patient_n_epochs', type=int, default=0,
help='number of epochs to tolerate learning rate decay when validation perfomance is not improved')
parser.add_argument('--early_stop_patient_n_epochs', type=int, default=5,
help='number of epochs to tolerate stopping training when validation perfomance is not improved')
parser.add_argument('--sort_stop_epoch', type=int, default=10000,
help='epoch to stop soring utterances by length')
parser.add_argument('--sort_short2long', type=strtobool, default=True,
help='sort utterances in the ascending order')
parser.add_argument('--shuffle_bucket', type=strtobool, default=False,
help='gather the similar length of utterances and shuffle them')
parser.add_argument('--eval_start_epoch', type=int, default=1,
help='first epoch to start evalaution')
parser.add_argument('--warmup_start_lr', type=float, default=0,
help='initial learning rate for learning rate warm up')
parser.add_argument('--warmup_n_steps', type=int, default=0,
help='number of steps to warm up learing rate')
parser.add_argument('--accum_grad_n_steps', type=int, default=1,
help='total number of steps to accumulate gradients')
# initialization
parser.add_argument('--param_init', type=float, default=0.1,
help='')
parser.add_argument('--asr_init', type=str, default=False, nargs='?',
help='pre-trained seq2seq model path')
parser.add_argument('--asr_init_enc_only', type=strtobool, default=False,
help='Initialize the encoder only')
parser.add_argument('--freeze_encoder', type=strtobool, default=False,
help='freeze the encoder parameter')
# regularization
parser.add_argument('--clip_grad_norm', type=float, default=5.0,
help='')
parser.add_argument('--dropout_in', type=float, default=0.0,
help='dropout probability for the input')
parser.add_argument('--dropout_enc', type=float, default=0.0,
help='dropout probability for the encoder')
parser.add_argument('--dropout_dec', type=float, default=0.0,
help='dropout probability for the decoder')
parser.add_argument('--dropout_emb', type=float, default=0.0,
help='dropout probability for the embedding')
parser.add_argument('--dropout_att', type=float, default=0.0,
help='dropout probability for the attention weights')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay parameter')
parser.add_argument('--ss_prob', type=float, default=0.0,
help='probability of scheduled sampling')
parser.add_argument('--ss_type', type=str, default='constant',
choices=['constant', 'ramp'],
help='type of scheduled sampling')
parser.add_argument('--lsm_prob', type=float, default=0.0,
help='probability of label smoothing')
parser.add_argument('--ctc_lsm_prob', type=float, default=0.0,
help='probability of label smoothing for CTC')
# SpecAugment
parser.add_argument('--freq_width', type=int, default=27,
help='width of frequency mask for SpecAugment')
parser.add_argument('--n_freq_masks', type=int, default=0,
help='number of frequency masks for SpecAugment')
parser.add_argument('--time_width', type=int, default=100,
help='width of time mask for SpecAugment')
parser.add_argument('--n_time_masks', type=int, default=0,
help='number of time masks for SpecAugment')
parser.add_argument('--time_width_upper', type=float, default=1.0,
help='')
parser.add_argument('--adaptive_number_ratio', type=float, default=0.0,
help='adaptive multiplicity ratio for time masking')
parser.add_argument('--adaptive_size_ratio', type=float, default=0.0,
help='adaptive size ratio for time masking')
parser.add_argument('--max_n_time_masks', type=int, default=20,
help='maximum number of time masking')
# MTL
parser.add_argument('--ctc_weight', type=float, default=0.0,
help='CTC loss weight for the main task')
parser.add_argument('--ctc_weight_sub1', type=float, default=0.0,
help='CTC loss weight for the 1st auxiliary task')
parser.add_argument('--ctc_weight_sub2', type=float, default=0.0,
help='CTC loss weight for the 2nd auxiliary task')
parser.add_argument('--sub1_weight', type=float, default=0.0,
help='total loss weight for the 1st auxiliary task')
parser.add_argument('--sub2_weight', type=float, default=0.0,
help='total loss weight for the 2nd auxiliary task')
parser.add_argument('--mtl_per_batch', type=strtobool, default=False, nargs='?',
help='change mini-batch per task')
parser.add_argument('--task_specific_layer', type=strtobool, default=False, nargs='?',
help='insert a task-specific encoder layer per task')
# foroward-backward
parser.add_argument('--bwd_weight', type=float, default=0.0,
help='cross etnropy loss weight for the backward decoder in the main task')
# cold fusion, LM initialization
parser.add_argument('--external_lm', type=str, default=False, nargs='?',
help='LM path')
parser.add_argument('--lm_fusion', type=str, default='',
choices=['', 'cold', 'cold_prob', 'deep', 'cold_attention'],
help='type of LM fusion')
parser.add_argument('--lm_init', type=strtobool, default=False,
help='initialize the decoder with the external LM')
# contextualization
parser.add_argument('--discourse_aware', type=strtobool, default=False, nargs='?',
help='carry over the last decoder state to the initial state in the next utterance')
# MBR
parser.add_argument('--mbr_training', type=strtobool, default=False,
help='Minimum Bayes Risk (MBR) training')
parser.add_argument('--mbr_ce_weight', type=float, default=0.01,
help='MBR loss weight for the main task')
parser.add_argument('--mbr_nbest', type=int, default=4,
help='N-best for MBR training')
parser.add_argument('--mbr_softmax_smoothing', type=float, default=0.8,
help='softmax smoothing (beta) for MBR training')
# TransformerXL
parser.add_argument('--bptt', type=int, default=0,
help='number of tokens to truncate in TransformerXL decoder during training')
parser.add_argument('--mem_len', type=int, default=0,
help='number of tokens for memory in TransformerXL decoder during training')
# distillation related
parser.add_argument('--teacher', default=False, nargs='?',
help='Teacher ASR model for knowledge distillation')
parser.add_argument('--teacher_lm', default=False, nargs='?',
help='Teacher LM for knowledge distillation')
parser.add_argument('--distillation_weight', type=float, default=0.1,
help='soft label weight for knowledge distillation')
# special label
parser.add_argument('--replace_sos', type=strtobool, default=False,
help='')
# decoding parameters
parser.add_argument('--recog_stdout', type=strtobool, default=False,
help='print to standard output during evaluation')
parser.add_argument('--recog_n_gpus', type=int, default=0,
help='number of GPUs (0 indicates CPU)')
parser.add_argument('--recog_sets', type=str, default=[], nargs='+',
help='tsv file paths for the evaluation sets')
parser.add_argument('--recog_first_n_utt', type=int, default=-1,
help='recognize the first N utterances for quick evalaution')
parser.add_argument('--recog_model', type=str, default=False, nargs='+',
help='model path')
parser.add_argument('--recog_model_bwd', type=str, default=False, nargs='?',
help='model path in the reverse direction')
parser.add_argument('--recog_dir', type=str, default=False,
help='directory to save decoding results')
parser.add_argument('--recog_unit', type=str, default=False, nargs='?',
choices=['word', 'wp', 'char', 'phone', 'word_char', 'char_space'],
help='')
parser.add_argument('--recog_metric', type=str, default='edit_distance',
choices=['edit_distance', 'loss', 'accuracy', 'ppl', 'bleu'],
help='metric for evaluation')
parser.add_argument('--recog_oracle', type=strtobool, default=False,
help='recognize by teacher-forcing')
parser.add_argument('--recog_batch_size', type=int, default=1,
help='size of mini-batch in evaluation')
parser.add_argument('--recog_beam_width', type=int, default=1,
help='size of beam')
parser.add_argument('--recog_max_len_ratio', type=float, default=1.0,
help='')
parser.add_argument('--recog_min_len_ratio', type=float, default=0.0,
help='')
parser.add_argument('--recog_length_penalty', type=float, default=0.0,
help='length penalty')
parser.add_argument('--recog_length_norm', type=strtobool, default=False, nargs='?',
help='normalize score by hypothesis length')
parser.add_argument('--recog_coverage_penalty', type=float, default=0.0,
help='coverage penalty')
parser.add_argument('--recog_coverage_threshold', type=float, default=0.0,
help='coverage threshold')
parser.add_argument('--recog_gnmt_decoding', type=strtobool, default=False, nargs='?',
help='adopt Google NMT beam search decoding')
parser.add_argument('--recog_eos_threshold', type=float, default=1.5,
help='threshold for emitting a EOS token')
parser.add_argument('--recog_lm_weight', type=float, default=0.0,
help='weight of fisrt-path LM score')
parser.add_argument('--recog_lm_second_weight', type=float, default=0.0,
help='weight of second-path LM score')
parser.add_argument('--recog_lm_bwd_weight', type=float, default=0.0,
help='weight of second-path bakward LM score. \
First-pass backward LM in case of synchronous bidirectional decoding.')
parser.add_argument('--recog_ctc_weight', type=float, default=0.0,
help='weight of CTC score')
parser.add_argument('--recog_lm', type=str, default=False, nargs='?',
help='path to first path LM for shallow fusion')
parser.add_argument('--recog_lm_second', type=str, default=False, nargs='?',
help='path to second path LM for rescoring')
parser.add_argument('--recog_lm_bwd', type=str, default=False, nargs='?',
help='path to second path LM in the reverse direction for rescoring')
parser.add_argument('--recog_resolving_unk', type=strtobool, default=False,
help='resolving UNK for the word-based model')
parser.add_argument('--recog_fwd_bwd_attention', type=strtobool, default=False,
help='forward-backward attention decoding')
parser.add_argument('--recog_bwd_attention', type=strtobool, default=False,
help='backward attention decoding')
parser.add_argument('--recog_reverse_lm_rescoring', type=strtobool, default=False,
help='rescore with another LM in the reverse direction')
parser.add_argument('--recog_asr_state_carry_over', type=strtobool, default=False,
help='carry over ASR decoder state')
parser.add_argument('--recog_lm_state_carry_over', type=strtobool, default=False,
help='carry over LM state')
parser.add_argument('--recog_softmax_smoothing', type=float, default=1.0,
help='softmax smoothing (beta) for diverse hypothesis generation')
parser.add_argument('--recog_wordlm', type=strtobool, default=False,
help='')
parser.add_argument('--recog_n_average', type=int, default=1,
help='number of models for the model averaging of Transformer')
parser.add_argument('--recog_streaming', type=strtobool, default=False,
help='streaming decoding')
parser.add_argument('--recog_chunk_sync', type=strtobool, default=False,
help='chunk-synchronous beam search decoding for MoChA')
parser.add_argument('--recog_ctc_spike_forced_decoding', type=strtobool, default=False,
help='force MoChA to generate tokens corresponding to CTC spikes')
parser.add_argument('--recog_ctc_vad', type=strtobool, default=True,
help='')
parser.add_argument('--recog_ctc_vad_blank_threshold', type=int, default=40,
help='')
parser.add_argument('--recog_ctc_vad_spike_threshold', type=float, default=0.1,
help='')
parser.add_argument('--recog_ctc_vad_n_accum_frames', type=int, default=4000,
help='')
parser.add_argument('--recog_mma_delay_threshold', type=int, default=-1,
help='delay threshold for MMA decoder')
parser.add_argument('--recog_mem_len', type=int, default=0,
help='number of tokens for memory in TransformerXL decoder during evaluation')
return parser
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
b89e6024ba7fcd2978bed43342381eaea6996fb3 | 5ebfced62f59052560c6adf89bfd2f249877cc75 | /webcomics/series/urls.py | 46b8c581e3ef21673277aa776913f4bad5bfbd5c | [] | no_license | lumenwrites/webcomics | 537c9bd0337ebd087dacdee7b72797b658481f8c | 34200eaf19021147c561bf140a685e398156589e | refs/heads/master | 2021-06-10T17:12:50.317113 | 2017-02-19T09:28:57 | 2017-02-19T09:28:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from django.conf.urls import url
from . import views
from posts.views import SeriesFeed
urlpatterns = [
# url(r'^$', views.BrowseView.as_view(), name='post-list'),
url(r'^create-series/$', views.SeriesCreate.as_view(), name='series-create'),
url(r'^series/(?P<slug>[^\.]+)/edit$', views.SeriesEdit.as_view()),
url(r'^series/(?P<slug>[^\.]+)/delete$', views.series_delete),
# url(r'^browse/$', views.BrowseView.as_view(), name='post-list'),
url(r'^series/(?P<slug>[^\.]+)/subscribe', views.subscribe),
url(r'^series/(?P<slug>[^\.]+)/unsubscribe', views.unsubscribe),
url(r'^series/(?P<slug>[^\.]+)/feed/atom/$', SeriesFeed()),
url(r'^series/(?P<slug>[^\.]+)$', views.SeriesView.as_view(), name='series-detail'),
url(r'^orangemind$', views.SeriesView.as_view(), {'slug': 'orangemind'}, name='series-detail'),
]
| [
"raymestalez@gmail.com"
] | raymestalez@gmail.com |
9eebe25c8f6da87eca5cb900628f905bdf287d16 | 52ad1906e8df7dca7d4afe9b01dd39c44ae28c56 | /DiscreteRods/runExperiments.py | fc121d00147d584b4cc051ee0814ff06ced49a8b | [] | no_license | sameeptandon/surgical | 03066a2c83e6d4dd47f86e5972d7f9848388db87 | b53a601839d4004b9b5b34b03c9c83e8348b4cec | refs/heads/master | 2021-01-18T06:12:38.758981 | 2011-10-17T17:48:31 | 2011-10-17T17:48:31 | 1,872,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import os, sys
control_in_filename = ["w2", "w2s", "w3", "w3s", "w4", "w4s", "x10", "x10s"]
start_ind = 0
end_ind = -1
single_horizon = [0, 5]
noise_thresh = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0]
for control_in in control_in_filename:
for horizon in single_horizon:
for noise in noise_thresh:
for i in range(0, 10):
run_command = "%s %s %s %s %d %d %d %f" % ("./runExperiment", control_in + "_" + str(start_ind) + "_" + str(end_ind) + "_" + str(horizon) + "_" + str(noise) + "_exp" + str(i), control_in, control_in + "_world", start_ind, end_ind, horizon, noise)
print run_command
os.system(run_command)
| [
"alexlee_gk@berkeley.edu"
] | alexlee_gk@berkeley.edu |
58131d1aa71473ab943067660abc9861fcece912 | 239bde952ead44bfd82248c66fc70de66f20ecb3 | /nltkwordnetbot.py | c50da928465d521a75ff24dd924b577a8461bef0 | [
"MIT"
] | permissive | bolavefasfas/nltkwordnetbot | 498a7688f57d3caf13fff5bf121344ba52ef91fc | e1c08b9c9959336b9b44a6ad6afd1dd923f84082 | refs/heads/master | 2023-03-19T02:44:03.424054 | 2017-10-31T23:56:59 | 2017-10-31T23:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,561 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler
import logging
try:
from nltk.corpus import wordnet as wn
except:
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
TOKEN = "bot-token"
# Define Steps
WORD, POS, ACTION = range(3)
userWord = ""
userPos = ""
userAction = ""
posKeyboard = [['Noun', 'Verb', 'Adjective'],
['Adjective-Sattelite', 'Adverb']]
actionKeyboard = [['Synonym', 'Antonym'], ['Definition', 'Example']]
def start(bot, update):
reply_markup = ReplyKeyboardRemove()
update.message.reply_text(
"Welcome, please type your word.", reply_markup=reply_markup)
return WORD
def word(bot, update):
user = update.message.from_user
global userWord
userWord = update.message.text
if wn.synsets(userWord):
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
'Choose part of speech', reply_markup=reply_markup)
return POS
else:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardRemove()
update.message.reply_text(
'Unknown word. Please type a known word', reply_markup=reply_markup)
return WORD
def pos(bot, update):
user = update.message.from_user
global userPos
if update.message.text == "Noun":
userPos = 'n'
wordWithPos = '{}.{}.01'.format(userWord, userPos)
try:
wn.synset(wordWithPos)
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(actionKeyboard, True)
update.message.reply_text(
"What do you want to know?", reply_markup=reply_markup)
return ACTION
except:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech for this word.", reply_markup=reply_markup)
return POS
elif update.message.text == "Verb":
userPos = 'v'
wordWithPos = '{}.{}.01'.format(userWord, userPos)
try:
wn.synset(wordWithPos)
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(actionKeyboard, True)
update.message.reply_text(
"What do you want to know?", reply_markup=reply_markup)
return ACTION
except:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech for this word.", reply_markup=reply_markup)
return POS
elif update.message.text == "Adjective":
userPos = 'a'
wordWithPos = '{}.{}.01'.format(userWord, userPos)
try:
wn.synset(wordWithPos)
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(actionKeyboard, True)
update.message.reply_text(
"What do you want to know?", reply_markup=reply_markup)
return ACTION
except:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech for this word.", reply_markup=reply_markup)
return POS
elif update.message.text == "Adjective-Sattelite":
userPos = 's'
wordWithPos = '{}.{}.01'.format(userWord, userPos)
try:
wn.synset(wordWithPos)
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(actionKeyboard, True)
update.message.reply_text(
"What do you want to know?", reply_markup=reply_markup)
return ACTION
except:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech for this word.", reply_markup=reply_markup)
return POS
elif update.message.text == "Adverb":
userPos = 'r'
wordWithPos = '{}.{}.01'.format(userWord, userPos)
try:
wn.synset(wordWithPos)
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(actionKeyboard, True)
update.message.reply_text(
"What do you want to know?", reply_markup=reply_markup)
return ACTION
except:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech for this word.", reply_markup=reply_markup)
return POS
else:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(posKeyboard, True)
update.message.reply_text(
"Enter valid part of speech", reply_markup=reply_markup)
return POS
def action(bot, update):
user = update.message.from_user
global userAction
wordWithPos = '{}.{}.01'.format(userWord, userPos)
syns = wn.synsets(userWord, pos=userPos)
synsList = []
antsList = []
custom_keyboard = [['Synonym', 'Antonym'],
['Definition', 'Example'], ['New Word']]
for syn in syns:
for l in syn.lemmas():
synsList.append(l.name())
if l.antonyms():
antsList.append(l.antonyms()[0].name())
if update.message.text == "Synonym":
userAction = 'Synonym'
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(custom_keyboard, True)
if synsList:
update.message.reply_text(synsList, reply_markup=reply_markup)
return ACTION
else:
update.message.reply_text(
"There isn't any synonym for this word.", reply_markup=reply_markup)
return ACTION
elif update.message.text == "Antonym":
userAction = 'Antonym'
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(custom_keyboard, True)
if antsList:
update.message.reply_text(antsList, reply_markup=reply_markup)
return ACTION
else:
update.message.reply_text(
"There isn't any antonym for this word.", reply_markup=reply_markup)
return ACTION
elif update.message.text == "Definition":
userAction = "Definition"
logger.info("%s said %s" % (user.first_name, update.message.text))
definition = wn.synset(wordWithPos).definition()
reply_markup = ReplyKeyboardMarkup(custom_keyboard, True)
if definition:
update.message.reply_text(definition, reply_markup=reply_markup)
return ACTION
else:
update.message.reply_text(
"There isn't any definition for this word.", reply_markup=reply_markup)
return ACTION
elif update.message.text == "Example":
userAction = "Example"
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(custom_keyboard, True)
examples = wn.synset(wordWithPos).examples()
if examples:
update.message.reply_text(examples, reply_markup=reply_markup)
return ACTION
else:
update.message.reply_text(
"There isn't any example for this word.", reply_markup=reply_markup)
return ACTION
elif update.message.text == "New Word":
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardRemove()
update.message.reply_text(
"Please type your word.", reply_markup=reply_markup)
return WORD
else:
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardMarkup(custom_keyboard, True)
update.message.reply_text(
"Please choose from keyboard", reply_markup=reply_markup)
return ACTION
def anyText(bot, update):
user = update.message.from_user
logger.info("%s said %s" % (user.first_name, update.message.text))
reply_markup = ReplyKeyboardRemove()
update.message.reply_text('Please /start',
reply_markup=reply_markup)
def unknown(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="Unknown Command")
def cancel(bot, update):
user = update.message.from_user
logger.info("%s canceled the bot." % user.first_name)
reply_markup = ReplyKeyboardRemove()
update.message.reply_text('Ok bye.', reply_markup=reply_markup)
return ConversationHandler.END
def error(bot, update, error):
logger.warn('Update: "%s", Error: "%s"' % (update, error))
def main():
updater = Updater(TOKEN)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
WORD: [MessageHandler(Filters.text, word)],
POS: [MessageHandler(Filters.text, pos)],
ACTION: [MessageHandler(Filters.text, action)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
unknown_handler = MessageHandler(Filters.command, unknown)
any_handler = MessageHandler(Filters.text, anyText)
dp.add_handler(conv_handler)
dp.add_handler(unknown_handler)
dp.add_handler(any_handler)
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | bolavefasfas.noreply@github.com |
daae7ab1b7ac6d998eca5a559c61ec45f2d7095e | 25985aeeee54373d26a164e4cc6a014770e3ebf3 | /windows/w3af/w3af/core/data/nltk_wrapper/.svn/text-base/nltk_wrapper.py.svn-base | 3f652ee04b353c653d75d4761f71621362d73520 | [] | no_license | sui84/tools | 4b750dae90940fbe3a226cba72dc071d8fb88b7c | 651cc08eb50199ce1044c684dbf714ea26df6432 | refs/heads/master | 2021-01-22T19:22:26.964580 | 2017-08-20T15:23:38 | 2017-08-20T15:23:38 | 100,774,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | '''
nltk_wrapper.py
Copyright 2011 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
from nltk.corpus.util import LazyCorpusLoader
from nltk.data import ZipFilePathPointer
from nltk.corpus.reader.wordnet import WordNetCorpusReader
import os
class wordnet_loader(LazyCorpusLoader):
def __init__(self, name, reader_cls, *args, **kwargs):
from nltk.corpus.reader.api import CorpusReader
assert issubclass(reader_cls, CorpusReader)
self.__name = self.__name__ = name
self.__reader_cls = reader_cls
self.__args = args
self.__kwargs = kwargs
def __load(self):
# Find the corpus root directory.
zip_location = os.path.join('plugins', 'discovery', 'wordnet','wordnet.zip')
root = ZipFilePathPointer(zip_location, 'wordnet/')
# Load the corpus.
corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
# This is where the magic happens! Transform ourselves into
# the corpus by modifying our own __dict__ and __class__ to
# match that of the corpus.
self.__dict__ = corpus.__dict__
self.__class__ = corpus.__class__
def __getattr__(self, attr):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
wn = wordnet_loader('wordnet', WordNetCorpusReader)
| [
"sui84@126.com"
] | sui84@126.com | |
849248d7e72b67335a639deab4541372248975c3 | a6d2c7618ba35a87b01749b01c8a41aba374f50f | /skynet/ovn/lsp/cli.py | 746c588b05fbb3a0898c3651068c8edc054b9abc | [
"Apache-2.0"
] | permissive | amorenoz/skynet | 5f5a9d630e3a6c728bcb14294b3430a9d7a9a844 | 4fde38d370fc732e5dac5f74b48680dc52551601 | refs/heads/master | 2023-05-30T18:38:53.266288 | 2021-03-17T08:16:51 | 2021-03-17T08:16:51 | 322,558,981 | 3 | 1 | Apache-2.0 | 2021-03-17T08:16:52 | 2020-12-18T10:13:15 | Python | UTF-8 | Python | false | false | 1,648 | py | import click
from skynet.common.printers import SeriesPrinter
from skynet.context import SkyNetCtxt
from skynet.ovn.lsp.data import LSPProvider
@click.group(name='lsp')
@click.pass_obj
def lspcli(obj: SkyNetCtxt) -> None:
"""
Logical Switch Ports commands
"""
@lspcli.command()
@click.option('-s',
'--switch',
'switch',
help='Only list the Logical Ports corresponding to '
'the specified Logical Switch'
'(Either name or UUID are acceptable values)')
@click.pass_obj
def list(obj: SkyNetCtxt, switch: str) -> None:
"""
List Logical Switch Ports
"""
print(
LSPProvider(obj).list(switch).to_string(
["Name", 'PortType', 'Addresses', 'Options']))
@lspcli.command()
@click.argument('uid', required=True)
@click.pass_obj
def get(obj: SkyNetCtxt, uid: str) -> None:
"""
Get Logical Switch Port Details
"""
lsp = LSPProvider(obj).get(uid)
sprint = SeriesPrinter()
if lsp.lsp.is_empty():
print('Logical Switch Port not found')
return
print(sprint.print(lsp.lsp.data().iloc[0]))
print('Logical Switch:')
if lsp.ls.is_empty():
print("no info available")
else:
print(sprint.print(lsp.ls.data().iloc[0], 4))
if not lsp.pod.is_empty():
print('Pod:')
print(sprint.print(lsp.pod.data().iloc[0], 4))
if not lsp.iface.is_empty():
print('Interface:')
print(sprint.print(lsp.iface.data().iloc[0], 4))
if not lsp.lrp.is_empty():
print('Logical Router Port:')
print(sprint.print(lsp.lrp.data().iloc[0], 4))
| [
"amorenoz@redhat.com"
] | amorenoz@redhat.com |
69181cc8e5a1027d7c2b679066e749d4a7316a16 | 30afb48b8948a5f4426d42ee33a990be606a2528 | /02.08day/01-飞机大战.py | b9a121a70fdbe9d4ba2ec4e80fafdbf639128a98 | [] | no_license | AIGW/1807 | 2fdfbb5b9d2a74ddbfbdda715c91ad7653edec0f | 9ae405c028f4888c601894dbf3efef0aadc5de45 | refs/heads/master | 2020-03-22T11:14:21.622429 | 2018-09-01T12:34:12 | 2018-09-01T12:34:12 | 139,957,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import pygame
from test5 import *
pygame.init()
screen = pygame.display.set_mode((480,700))
bg = pygame.image.load('./images/background.png')
#bg = pygame.image.load('./images/game_loading1.png')
#screen.blit(bg,(0,0))
#pygame.display.update()
hero = pygame.image.load('./images/hero_blowup_n1.png')
screen.blit(hero,(200,500))
herorect = pygame.Rect(200,500,120,120)
#screen.blit(hero,herorect)
#pygame.display.update()
clock = pygame.time.Clock()#创建时钟
enemy = EnemySprite()
enemy1 = EnemySprite()
enemy2 = EnemySprite()
enemy3 = EnemySprite()
enemy4 = EnemySprite()
enemy5 = EnemySprite()
enemy1.rect.x = 50
#enemy1.rect.y = 100
enemy2.rect.x = 80
enemy2.rect.y = 120
enemy3.rect.x = 130
enemy3.rect.y = 180
enemy4.rect.x = 150
enemy4.rect.y = 200
enemy5.rect.x = 200
enemy5.rect.y = 240
enemy1.speed = 2
enemy2.speed = 5
enemy3.speed = 3
enemy4.speed = 2
enemy5.speed = 4
enemy_group = pygame.sprite.Group(enemy,enemy1,enemy2,enemy3)
while True:
clock.tick(60)
herorect.y-=3
screen.blit(bg,(0,0))
screen.blit(hero,herorect)
if herorect.bottom <= 0:
herorect.top = 700
enemy_group.update()#更新
enemy_group.draw(screen)
for event in pygame.event.get():
# 判断用户是否点击了关闭按钮
if event.type == pygame.QUIT:
print("退出游戏...")
pygame.quit()
# 直接退出系统
exit()
pygame.display.update()#更新
pygame.quit()
| [
"414296787@qq.com"
] | 414296787@qq.com |
a52e520226a6a065ac0963816835529e458609bb | f5f60c690193bc7a15e28d794f3c60859025fa9d | /1_echo_server/server/work_with_data.py | 219e58125c46549479176f9a466504324cdda753 | [] | no_license | davadzh/UNIX | 2681055d66cd0f3240e35729277c4c8634753111 | 5c9ccd9f778cfa3f850b8f363a0ca4d5568f8e19 | refs/heads/master | 2023-04-07T07:57:15.289068 | 2021-04-10T13:16:40 | 2021-04-10T13:16:40 | 356,586,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from typing import Tuple, Union
import yaml
class DataProcessing:
def __init__(self) -> None:
self.file_path = "./data/users.yml"
self.data = []
self.read_collection()
def read_collection(self):
with open(self.file_path, "r") as stream:
data = yaml.safe_load(stream)
if data is None:
data = []
self.data = data
def write_collection(self):
with open(self.file_path, "w") as stream:
yaml.dump(self.data, stream)
def user_auth(self, ip: str, password: str) -> Tuple[int, Union[str, None]]:
for user in self.data:
if user["ip_addr"] == ip and user["password"] == password:
return 1, user["username"]
for user in self.data:
if user["ip_addr"] == ip:
return 0, None
return -1, None
def user_reg(self, ip: str, password: str, username: str) -> None:
self.data.append({"ip_addr": ip, "password": password, "username": username})
self.write_collection()
| [
"adzhamyand@gmail.com"
] | adzhamyand@gmail.com |
701240e0e4007a2404d6e6cc066b03a7c73458da | b8013e75b7744f0586add5ef2d88704240aff79b | /main.py | fe045a1e2761473971c75083cefec3fd0813cdd3 | [
"MIT"
] | permissive | charlyvazquez/Shopping-System_Python-PostgreSQL | 20d6ed7bb226d6cb033d94eecaba7ff90e41d6aa | 09ca5bf438b5bb484201bed8cc1ac7181cb93c55 | refs/heads/master | 2023-03-28T07:20:26.430370 | 2021-03-28T22:44:54 | 2021-03-28T22:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,318 | py | from tkinter import ttk
from tkinter import *
import tkinter.messagebox
import psycopg2
class Shop:
def __init__(self, root):
self.root = root
self.root.title('Shopping System')
##### VARIABLES #####
self.userFullname = StringVar()
self.userNickname = StringVar()
self.userAge = IntVar()
self.itemName = StringVar()
self.itemPrice = DoubleVar()
self.itemQuantity = IntVar()
self.orderQuantity = IntVar()
self.userFullnameOrders = StringVar()
self.newStockQty = IntVar()
##### MAIN LABEL FRAME - USERS, ITEMS AND PLACE ORDERS #####
self.mainFrame = LabelFrame(self.root, text = 'REGISTRATION')
self.mainFrame.grid(row = 0, column = 0)
##### FIRST LABEL FRAME - USERS #####
self.userRegisterFrame = LabelFrame(self.mainFrame, text = 'User Register')
self.userRegisterFrame.grid(row = 0, column = 0)
#Info
Label(self.userRegisterFrame, text = 'Fullname').grid(row = 0, column = 0)
Entry(self.userRegisterFrame, textvariable = self.userFullname).grid(row = 0, column = 1)
Label(self.userRegisterFrame, text = 'Username').grid(row = 1, column = 0)
Entry(self.userRegisterFrame, textvariable = self.userNickname).grid(row = 1, column = 1)
Label(self.userRegisterFrame, text = 'Age').grid(row = 2, column = 0)
Entry(self.userRegisterFrame, textvariable = self.userAge).grid(row = 2, column = 1)
#Save User Button
ttk.Button(self.userRegisterFrame, text = 'Save' , command = self.saveUserData).grid(row = 3, column = 0)
##### SECOND LABEL FRAME - PRODUCTS REGISTRATION #####
self.itemsFrame = LabelFrame(self.mainFrame, text = 'Products Register')
self.itemsFrame.grid(row = 0, column = 1)
#Entries
Label(self.itemsFrame, text = "Product's name").grid(row = 0, column = 0)
Entry(self.itemsFrame, textvariable = self.itemName).grid(row = 0, column = 1)
Label(self.itemsFrame, text = 'Price').grid(row = 1, column = 0)
Entry(self.itemsFrame, textvariable = self.itemPrice).grid(row = 1, column = 1)
Label(self.itemsFrame, text = 'Quantity').grid(row = 2, column = 0)
Entry(self.itemsFrame, textvariable = self.itemQuantity).grid(row = 2, column = 1)
#Save Product Button
ttk.Button(self.itemsFrame, text = 'Save Product', command = self.saveItemData).grid(row = 3, column = 0)
##### THIRD LABEL FRAME - USERS INFO #####
#Label frame
self.usersInfoFrame = LabelFrame(self.root, text = 'CLIENTS AND PRODUCTS')
self.usersInfoFrame.grid(row = 3, column = 0, sticky = W+E)
#Treeview for users
self.usersTree = ttk.Treeview(self.usersInfoFrame, height = 5, columns = (1,2,3,4))
self.usersTree.grid(row = 0, column = 0)
self.usersTree.column('#0', width = 50, minwidth = 50, anchor = CENTER)
self.usersTree.column('#1', width = 200, minwidth = 50, anchor = CENTER)
self.usersTree.column('#2', width = 100, minwidth = 50, anchor = CENTER)
self.usersTree.column('#3', width = 200, minwidth = 50, anchor = CENTER)
self.usersTree.column('#3', width = 200, minwidth = 50, anchor = CENTER)
self.usersTree.heading('#0', text = 'ID', anchor = CENTER)
self.usersTree.heading('#1', text = 'Fullname', anchor = CENTER)
self.usersTree.heading('#2', text = 'User name', anchor = CENTER)
self.usersTree.heading('#3', text = 'Age', anchor = CENTER)
self.usersTree.heading('#4', text = 'Date Registered', anchor = CENTER)
##### FOURTH LABEL FRAME - UPDATE STOCK #####
self.updateStockFrame = LabelFrame(self.usersInfoFrame, text = 'UPDATE STOCK')
self.updateStockFrame.grid(row = 0, column = 3)
Label(self.updateStockFrame, text = 'New Quantity').grid(row = 0, column = 0)
Entry(self.updateStockFrame, textvariable = self.newStockQty).grid(row = 0, column = 1)
ttk.Button(self.updateStockFrame, text = 'Update', command = self.updateStockQty).grid(row = 1, column = 0)
##### FIFTH LABEL FRAME - BUTTONS #####
self.actionsLabelFrame = LabelFrame(self.root, text = 'ACTIONS')
self.actionsLabelFrame.grid(row = 4, column = 0, sticky = W+E)
#Buttons
ttk.Button(self.actionsLabelFrame, text = 'Delete User', command = self.deleteUser).grid(row = 0, column = 0)
ttk.Button(self.actionsLabelFrame, text = 'Refresh Users List', command = self.refreshUsersList).grid(row = 0, column = 1)
ttk.Button(self.actionsLabelFrame, text = 'Delete Product', command = self.deleteProduct).grid(row = 0, column = 2)
ttk.Button(self.actionsLabelFrame, text = 'Refresh Products List', command = self.refreshProductsList).grid(row = 0, column = 3)
#Treeview for items
self.itemsTree = ttk.Treeview(self.usersInfoFrame, height = 5, columns = (1,2,3))
self.itemsTree.grid(row = 0, column = 1)
self.itemsTree.column('#0', width = 50, minwidth = 50, anchor = CENTER)
self.itemsTree.column('#1', width = 200, minwidth = 50, anchor = CENTER)
self.itemsTree.column('#2', width = 100, minwidth = 50, anchor = CENTER)
self.itemsTree.column('#3', width = 100, minwidth = 50, anchor = CENTER)
self.itemsTree.heading('#0', text = 'ID', anchor = CENTER)
self.itemsTree.heading('#1', text = "Product", anchor = CENTER)
self.itemsTree.heading('#2', text = 'Price', anchor = CENTER)
self.itemsTree.heading('#3', text = 'Quantity', anchor = CENTER)
##### FOURTH LABEL FRAME - PLACE ORDER #####
self.ordersFrame = LabelFrame(self.usersInfoFrame, text = 'PLACE ORDERS')
self.ordersFrame.grid(row = 0, column = 2)
Label(self.ordersFrame, text = 'Quantity').grid(row = 0, column = 0)
Entry(self.ordersFrame, textvariable = self.orderQuantity).grid(row = 0, column = 1)
ttk.Button(self.ordersFrame, text = 'Place Order', command = self.placeOrder).grid(row = 1, column = 0)
##### FIFTH LEVEL FRAME - SELECT ORDERS FROM ONE USER #####
self.oneOrderFrame = LabelFrame(self.root, text = 'SELECT ORDERS FROM ONE USER')
self.oneOrderFrame.grid(row = 1, column = 0)
#Select all the orders from one user
Label(self.oneOrderFrame, text = "Client's Fullname").grid(row = 1, column = 0)
Entry(self.oneOrderFrame, textvariable = self.userFullnameOrders).grid(row = 1, column = 1)
#Buttons
ttk.Button(self.oneOrderFrame, text = 'Select Active Orders', command = self.ordersFromUser).grid(row = 1, column = 3)
ttk.Button(self.oneOrderFrame, text = 'Refresh Orders', command = self.refreshOrders).grid(row = 2, column = 0)
ttk.Button(self.oneOrderFrame, text = 'Cancel Order', command = self.cancelOrder).grid(row = 2, column = 1)
ttk.Button(self.oneOrderFrame, text = 'Fulfill Order', command = self.fulfilledOrders).grid(row = 2, column = 2)
ttk.Button(self.oneOrderFrame, text = 'See Fulfilled Orders', command = self.seeFulfilledOrders).grid(row = 2, column = 3)
##### SIXTH LABEL FRAME - ORDERS QUERIES #####
self.queriesFrame = LabelFrame(self.root, text = 'ORDERS')
self.queriesFrame.grid(row = 2, column = 0, sticky = W+E)
#Treeview
self.ordersTree = ttk.Treeview(self.queriesFrame, height = 15, columns = (1,2,3,4,5,6))
self.ordersTree.grid(row = 2, column = 0)
self.ordersTree.column('#0', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#1', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#2', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#3', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#4', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#5', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.column('#5', width = 250, minwidth = 200, anchor = CENTER)
self.ordersTree.heading('#0', text = "Customer's Name", anchor = CENTER)
self.ordersTree.heading('#1', text = 'Client ID', anchor = CENTER)
self.ordersTree.heading('#2', text = 'Product', anchor = CENTER)
self.ordersTree.heading('#3', text = 'Price $', anchor = CENTER)
self.ordersTree.heading('#4', text = 'Quantity', anchor = CENTER)
self.ordersTree.heading('#5', text = 'Product ID', anchor = CENTER)
self.ordersTree.heading('#6', text = 'DATE', anchor = CENTER)
#Automatic methods calls
self.getUsers()
self.getItems()
self.getOrders()
def postgresModifyer(self, query, args = ()):
connector = psycopg2.connect(
dbname = 'yourdatabase',
user = 'postgres',
password = 'yourpassword',
host = 'localhost',
port = '5432'
)
cursor = connector.cursor()
cursor.execute(query, args)
connector.commit()
connector.close()
print('Succesfull Query')
def postgresQuery(self, query):
connector = psycopg2.connect(
dbname = 'yourdatabase',
user = 'postgres',
password = 'yourpassword',
host = 'localhost',
port = '5432'
)
cursor = connector.cursor()
result = cursor.execute(query)
row = cursor.fetchall()
connector.commit()
connector.close()
print('Succesfull Query')
return row
def saveUserData(self):
name = self.userFullname.get()
nickname = self.userNickname.get()
age = self.userAge.get()
if(len(name) != 0 and len(nickname) != 0 and age > 0):
query = 'INSERT INTO clients(name, user_name, age) VALUES (%s, %s, %s);'
args = (name, nickname, age)
self.postgresModifyer(query, args)
self.getUsers()
else:
tkinter.messagebox.showwarning('Wrong Input', 'One or more fields are not quite right. Please verify them.')
def saveItemData(self):
name = self.itemName.get()
price = self.itemPrice.get()
qty = self.itemQuantity.get()
if(len(name) != 0 and price >= 0 and qty > 0):
query = 'INSERT INTO items(name, price, quantity) VALUES(%s, %s, %s);'
args = (name, price, qty)
self.postgresModifyer(query, args)
self.getItems()
else:
tkinter.messagebox.showwarning('Wrong Input','One or more fields are not quite right. Please verify them.')
def placeOrder(self):
try:
self.usersTree.item(self.usersTree.selection())['values'][0]#Check name
self.itemsTree.item(self.itemsTree.selection())['values'][0]#Check the id
except IndexError as e:
tkinter.messagebox.showwarning('Selection Error','No clients or items selected')
return
clientId = self.usersTree.item(self.usersTree.selection())['text']
itemId = self.itemsTree.item(self.itemsTree.selection())['text']
desiredQty = self.orderQuantity.get()
#Get the quantity from the selected row
query1 = f'SELECT quantity FROM items WHERE item_id = {itemId};'
stockQty = self.postgresQuery(query1)[0][0] #Select just the first item from the first tuple
#Check for correct values
if(stockQty == 0):
tkinter.messagebox.showwarning('Out os stock', 'The current item is out of stock')
elif(desiredQty == 0):
tkinter.messagebox.showwarning('No items selected', f'Please, select an amount greater than 0 or less than or equal to: {stockQty}')
elif(desiredQty > stockQty):
tkinter.messagebox.showwarning('Stock problem','Not enough items in stock')
else:
#print(f'User:{userId}, Item:{itemId}')
query2 = 'INSERT INTO orders(clientID, itemID, quantity) VALUES(%s, %s, %s); '
args2 = (clientId, itemId, desiredQty)
self.postgresModifyer(query2, args2)
difference = stockQty - desiredQty
query3 = 'UPDATE items SET quantity = %s WHERE item_id = %s;'
args = (difference, itemId)
self.postgresModifyer(query3, args)
self.getOrders()
self.getItems()
def cancelOrder(self):
try:
self.ordersTree.item(self.ordersTree.selection())['values'][0]
except IndexError as e:
tkinter.messagebox.showwarning('Nothing Selected','Please, select an order before cancelling')
return
answer = tkinter.messagebox.askquestion('Verification','Are you sure you want to cancel?')
if(answer == 'yes'):
clientId = self.ordersTree.item(self.ordersTree.selection())['values'][0]
itemId = self.ordersTree.item(self.ordersTree.selection())['values'][4]
qty = self.ordersTree.item(self.ordersTree.selection())['values'][3]
date = self.ordersTree.item(self.ordersTree.selection())['values'][5]
#Take the original quantity, minus the ordered one and put it back
query1 = f"SELECT quantity FROM orders WHERE itemID = {itemId} AND clientID = {clientId} AND ordered_at = '{date}';"
orderedQuantity = self.postgresQuery(query1)[0][0]
query2 = f'SELECT quantity FROM items WHERE item_id = {itemId};'
stockQuantity = self.postgresQuery(query2)[0][0]
updatedQuantity = orderedQuantity + stockQuantity
query3 = f'UPDATE items SET quantity = {updatedQuantity} WHERE item_id = {itemId};'
self.postgresModifyer(query3)
query4 = f"DELETE FROM orders WHERE clientID = {clientId} and itemID = {itemId} AND ordered_at = '{date}';"
self.postgresModifyer(query4)
self.getOrders()
self.getItems()
def fulfilledOrders(self):
try:
self.ordersTree.item(self.ordersTree.selection())['values'][0]
except IndexError as e:
tkinter.messagebox.showwarning('Nothing Selected','Please, select an order before cancelling')
return
clientId = self.ordersTree.item(self.ordersTree.selection())['values'][0]
itemId = self.ordersTree.item(self.ordersTree.selection())['values'][4]
qty = self.ordersTree.item(self.ordersTree.selection())['values'][3]
date = self.ordersTree.item(self.ordersTree.selection())['values'][5]
#Insert the fulfilled order in the corresponding table
query = f'INSERT INTO fulfilled_orders(clientID, itemID, quantity) VALUES({clientId},{itemId},{qty});'
self.postgresModifyer(query)
query2 = f"DELETE FROM orders WHERE clientID = {clientId} AND itemID = {itemId} AND ordered_at = '{date}';"
self.postgresModifyer(query2)
self.getOrders()
top = Toplevel()
top.title('Fulfilled Orders')
#Label frame
self.fulfilledOrdersFrame = LabelFrame(top, text = 'FULFILLED ORDERS')
self.fulfilledOrdersFrame.grid(row = 0, column = 0)
#Treeview
self.fulfilledOrdersTree = ttk.Treeview(self.fulfilledOrdersFrame, height = 10, columns = (1,2,3,4))
self.fulfilledOrdersTree.grid(row = 0, column = 0)
self.fulfilledOrdersTree.column('#0', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#1', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#2', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#3', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#4', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.heading('#0', text = 'Client ID', anchor = CENTER)
self.fulfilledOrdersTree.heading('#1', text = 'Fullname', anchor = CENTER)
self.fulfilledOrdersTree.heading('#2', text = 'Product ID', anchor = CENTER)
self.fulfilledOrdersTree.heading('#3', text = 'Product', anchor = CENTER)
self.fulfilledOrdersTree.heading('#4', text = 'Fulfilled At', anchor = CENTER)
query2 = """SELECT clients.client_id, clients.name, items.item_id, items.name, fulfilled_at
FROM clients
INNER JOIN fulfilled_orders
ON clients.client_id = fulfilled_orders.clientID
INNER JOIN items
ON items.item_id = fulfilled_orders.itemID;"""
buffer = self.postgresQuery(query2)
for i in buffer:
self.fulfilledOrdersTree.insert('', 0, text = i[0], values = i[1:])
def seeFulfilledOrders(self):
top = Toplevel()
top.title('Fulfilled Orders')
#Label frame
self.fulfilledOrdersFrame = LabelFrame(top, text = 'FULFILLED ORDERS')
self.fulfilledOrdersFrame.grid(row = 0, column = 0)
#Treeview
self.fulfilledOrdersTree = ttk.Treeview(self.fulfilledOrdersFrame, height = 10, columns = (1,2,3,4))
self.fulfilledOrdersTree.grid(row = 0, column = 0)
self.fulfilledOrdersTree.column('#0', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#1', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#2', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#3', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.column('#4', width = 150, minwidth = 100, anchor = CENTER)
self.fulfilledOrdersTree.heading('#0', text = 'Client ID', anchor = CENTER)
self.fulfilledOrdersTree.heading('#1', text = 'Fullname', anchor = CENTER)
self.fulfilledOrdersTree.heading('#2', text = 'Product ID', anchor = CENTER)
self.fulfilledOrdersTree.heading('#3', text = 'Product', anchor = CENTER)
self.fulfilledOrdersTree.heading('#4', text = 'Fulfilled At', anchor = CENTER)
query2 = """SELECT clients.client_id, clients.name, items.item_id, items.name, fulfilled_at
FROM clients
INNER JOIN fulfilled_orders
ON clients.client_id = fulfilled_orders.clientID
INNER JOIN items
ON items.item_id = fulfilled_orders.itemID;"""
buffer = self.postgresQuery(query2)
for i in buffer:
self.fulfilledOrdersTree.insert('', 0, text = i[0], values = i[1:])
def ordersFromUser(self):
name = self.userFullnameOrders.get()
if(len(name) != 0):
treeInfo = self.ordersTree.get_children()
for i in treeInfo:
self.ordersTree.delete(i)
query = """ SELECT clients.name AS Client, client_id, items.name As Product, items.price AS Price, orders.quantity, items.item_id, ordered_at
FROM items
INNER JOIN orders
ON items.item_id = orders.itemID
INNER JOIN clients
ON clients.client_id = orders.clientID
WHERE clients.name LIKE '%{}%'
ORDER BY orders.ordered_at DESC;""".format(name)
buffer = self.postgresQuery(query)
for i in buffer:
self.ordersTree.insert('', 0, text = i[0], values = i[1:])
else:
tkinter.messagebox.showwarning('Empty field','Please, before searching enter a name.')
def refreshOrders(self):
self.getOrders()
def refreshUsersList(self):
self.getUsers()
def refreshProductsList(self):
self.getItems()
def deleteUser(self):
try:
self.usersTree.item(self.usersTree.selection())['values'][0]
self.usersTree.item(self.usersTree.selection())['text']
except IndexError as e:
tkinter.messagebox.showwarning('Nothing Selected', 'Please, before deleting select one user')
return
answer = tkinter.messagebox.askquestion('Deletion', 'Are you sure you want to DELETE this user?')
if(answer == 'yes'):
name = self.usersTree.item(self.usersTree.selection())['values'][0]
myId = self.usersTree.item(self.usersTree.selection())['text']
query = 'DELETE FROM clients WHERE name = %s AND client_id = %s;'
args = (name, myId)
self.postgresModifyer(query, args)
self.getUsers()
self.getOrders()
def deleteProduct(self):
try:
self.itemsTree.item(self.itemsTree.selection())['values'][0]
self.itemsTree.item(self.itemsTree.selection())['text']
except IndexError as e:
tkinter.messagebox.showwarning('Nothing Selected', 'Please, before deleting select one item')
return
answer = tkinter.messagebox.askquestion('Deletion', 'Are you sure you want to DELETE this item?')
if(answer == 'yes'):
name = self.itemsTree.item(self.itemsTree.selection())['values'][0]
myId = self.itemsTree.item(self.itemsTree.selection())['text']
query = 'DELETE FROM items WHERE name = %s AND item_id = %s;'
args = (name, myId)
self.postgresModifyer(query, args)
self.getItems()
self.getOrders()
#### UPDATES #####
def updateStockQty(self):
newQty = self.newStockQty.get()
if(newQty < 0):
tkinter.messagebox.showwarning('Wrong Quantity', 'Please, type a valid quantity')
else:
try:
self.itemsTree.item(self.itemsTree.selection())['values'][0]
except IndexError as e:
tkinter.messagebox.showwarning('Nothing Selected', 'Please, before updating select one item')
return
itemId = self.itemsTree.item(self.itemsTree.selection())['text']
query = f'UPDATE items SET quantity = {newQty} WHERE item_id = {itemId};'
self.postgresModifyer(query)
self.getItems()
##### GETTERS #####
def getUsers(self):
treeInfo = self.usersTree.get_children()
for i in treeInfo:
self.usersTree.delete(i)
query = 'SELECT client_id, name, user_name, age, registered FROM clients ORDER BY name DESC;'
buffer = self.postgresQuery(query)
for i in buffer:
self.usersTree.insert('', 0, text = i[0], values = i[1:])
def getItems(self):
treeInfo = self.itemsTree.get_children()
for i in treeInfo:
self.itemsTree.delete(i)
query = 'SELECT item_id, name, price, quantity FROM items ORDER BY name DESC;'
buffer = self.postgresQuery(query)
for i in buffer:
self.itemsTree.insert('', 0, text = i[0], values = i[1:])
def getOrders(self):
treeInfo = self.ordersTree.get_children()
for i in treeInfo:
self.ordersTree.delete(i)
query = """ SELECT clients.name AS Client, client_id, items.name As Product, items.price AS Price, orders.quantity, items.item_id, ordered_at
FROM items
INNER JOIN orders
ON items.item_id = orders.itemID
INNER JOIN clients
ON clients.client_id = orders.clientID
ORDER BY orders.ordered_at DESC;"""
buffer = self.postgresQuery(query)
for i in buffer:
self.ordersTree.insert('', 0, text = i[0], values = i[1:])
if __name__ == '__main__':
root = Tk()
root.resizable(False, False)
Shop(root)
root.mainloop() | [
"fervallefer@outlook.com"
] | fervallefer@outlook.com |
3206bbb3a5ddc4031426089f04566336b57b7675 | 7c1766618e9ae6937639b9176726836f3cc6c680 | /autonomia/telegram_flask.py | 2b2e283a4119792486a3652edba6884e6e353ffa | [
"MIT"
] | permissive | PythonistasBR/bot | d3b423954e4eb7fa398a9b5f135dbf6750fdb6c0 | c4131c89ce71894e9173b89aba50f36ea80d5378 | refs/heads/master | 2023-08-19T06:23:45.135674 | 2023-08-15T07:41:44 | 2023-08-15T07:41:44 | 126,384,755 | 10 | 4 | MIT | 2023-08-15T07:41:45 | 2018-03-22T19:22:36 | Python | UTF-8 | Python | false | false | 3,979 | py | import logging
from collections import defaultdict
import telegram
from telegram import Update
from telegram.ext import ConversationHandler, DictPersistence, Dispatcher
from autonomia.core import autodiscovery, get_handlers, setup_handlers
logger = logging.getLogger(__name__)
class TelegramFlask:
def __init__(self, app=None, persistence=None):
self.app = app
self.persistence = persistence or DictPersistence()
self.bot = None
self.dispatcher = None
if app is not None:
self.init_app(app, persistence)
def init_app(self, app, persistence=None):
self.create_bot(app, persistence)
app.extensions["telegram"] = self
def create_bot(self, app, persistence):
self.app = app
self.persistence = persistence or DictPersistence()
token = app.config.get("API_TOKEN")
autodiscovery(app.config.get("APPS", []))
self.bot = telegram.Bot(token=token)
self.dispatcher = Dispatcher(
self.bot, None, workers=0, use_context=True, persistence=self.persistence
)
setup_handlers(self.dispatcher)
# log all errors
self.dispatcher.add_error_handler(self.error)
def process_update(self, request):
self.reload_state()
update = Update.de_json(request.get_json(force=True), self.bot)
logger.debug("Received Update with ID %d on Webhook" % update.update_id)
self.dispatcher.process_update(update)
def reload_state(self):
if self.persistence.store_user_data:
self.dispatcher.user_data = self.persistence.get_user_data()
if not isinstance(self.dispatcher.user_data, defaultdict):
raise ValueError("user_data must be of type defaultdict")
if self.persistence.store_chat_data:
self.dispatcher.chat_data = self.persistence.get_chat_data()
if not isinstance(self.dispatcher.chat_data, defaultdict):
raise ValueError("chat_data must be of type defaultdict")
if self.persistence.store_bot_data:
self.dispatcher.bot_data = self.persistence.get_bot_data()
if not isinstance(self.dispatcher.bot_data, dict):
raise ValueError("bot_data must be of type dict")
for handler in get_handlers():
if isinstance(handler, ConversationHandler) and handler.persistent:
if not self.persistence:
raise ValueError(
"Conversationhandler {} can not be persistent if dispatcher "
"has no persistence".format(handler.name)
)
handler.persistence = self.persistence
handler.conversations = self.persistence.get_conversations(handler.name)
def setup_webhook(self, app):
domain = app.config.get("WEBHOOK_DOMAIN")
path = app.config.get("WEBHOOK_PATH")
webhook_url = f"https://{domain}/{path}"
try:
response = self.bot.get_webhook_info()
except Exception:
logger.error("Unable to get telegram webhook", exc_info=True)
return False, "Unable to get telegram webhook"
if response.url == webhook_url:
return False, f"Keeping the same webhook url: {webhook_url}"
try:
success = self.bot.set_webhook(webhook_url)
except Exception:
logger.error("Unable to get telegram webhook", exc_info=True)
return False, "Unable to set telegram webhook"
if not success:
return False, f"Unable to set telegram webhook, return: {success}"
return True, f"Change webhook to the new url: {webhook_url}"
@staticmethod
def error(update, context):
raise context.error
# This instance should be used to access bot features directly.
# The attributes telegram_flask.bot and telegram_flask.dispatcher are available
telegram_flask = TelegramFlask()
| [
"fabio@cerqueira.me"
] | fabio@cerqueira.me |
84e5fcfd587583ca2dcb9df4e20f057aa5110f3a | 0393c644ea43dc480c788237a867fb3fe98f8960 | /aims_/annotate_single.py | 1e3078ca05828d09dce348547a8ab9b54ef6383f | [] | no_license | akoustic/AIMS | 75a065d322d6cccb2968c1daac6cb5dede4ed0ba | 553915d8a98a64673e7d5ac3b2cddb5e73265a07 | refs/heads/main | 2023-03-10T10:29:35.490265 | 2021-02-10T14:48:13 | 2021-02-10T14:48:13 | 310,282,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | import base64
import datetime
import dash
import json
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import dash_canvas
import dash_table
import pandas as pd
from dash_canvas.utils import parse_jsonstring_rectangle
from dash_canvas.components import image_upload_zone
from PIL import Image
from io import BytesIO
import base64
from numpy import asarray
import os
from aims_ import app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
dashapp = dash.Dash(name='annotateapp', server=app, url_base_pathname='/annotateinv/',external_stylesheets=external_stylesheets)
list_columns = ['width', 'height', 'left', 'top', 'label']
columns = [{'name': i, "id": i} for i in list_columns]
columns[-1]['presentation'] = 'dropdown'
list_preferred = ['Company Name','Company Address','Invoice Number','Start of Table','End of Table','Subtotal','Tax','Discount','Total']
shortlists = [{'label': i, 'value': i} for i in list_preferred]
dashapp.layout = html.Div([
dcc.Upload(
id='upload-data',
children=[
'Drag and Drop or ',
html.A('Select an Image')],
style={'width': str(100) + '%',
'height': '50px',
'lineHeight': '50px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center'
},
accept='image/*',
multiple=False,
),
dash_canvas.DashCanvas(
id='canvas',
width=1000,
tool='rectangle',
lineWidth=2,
lineColor='rgba(255,0, 0, 0.5)',
hide_buttons=['pencil', 'line'],
goButtonTitle='Label'
),
html.Div([
html.Div([
html.H3('Label images with bounding boxes'),
]),
html.Div([
dash_table.DataTable(
id='table',
columns=columns,
editable=True,
dropdown = {'label': {'options': shortlists}},
),
])
]),
dcc.Input(
id="input_columns",
type="number",
placeholder="Number of columns",
),
dcc.Input(
id="input_filename",
type="text",
placeholder="File name",
),
html.Button('Done', id='done', n_clicks=0),
html.Div(id = 'done-output')
])
prev = None
def checkprev(imgsrc):
global prev
if prev==imgsrc or prev==None:
return True
else:
return False
def update_prev(imgsrc):
global prev
prev = imgsrc
return prev
#---callbacks---
invoice_name = None
@dashapp.callback(Output('canvas', 'image_content'),[Input('upload-data', 'contents')],[State('upload-data', 'filename'),State('upload-data', 'last_modified')], prevent_initial_call=True)
def update_canvas_upload(image_string,image_name,image_lm):
global invoice_name
invoice_name = image_name.split('.')[0]
if image_string is None:
raise ValueError
if image_string is not None:
return image_string
else:
return None
@dashapp.callback(Output('table', 'data'), [Input('canvas', 'json_data'),Input('canvas', 'image_content')], [State('table','data')],prevent_initial_call=True)
def show_string(json_data,img_content,table_data):
if checkprev(img_content):
update_prev(img_content)
j = json.loads(json_data)
if len(j["objects"])>0:
box_coordinates = parse_jsonstring_rectangle(json_data)
if len(box_coordinates)>0:
df = pd.DataFrame(box_coordinates, columns=list_columns[:-1])
stdt = df.to_dict('records')
if table_data!=None:
for i in range(len(table_data)):
if 'label' in table_data[i]:
stdt[i]['label'] = table_data[i]['label']
return stdt
raise dash.exceptions.PreventUpdate
else:
update_prev(img_content)
return None
@dashapp.callback(Output('done-output','children'),[Input('done','n_clicks')],[State('table','data'),State('canvas','image_content'),State('input_columns','value'),State('input_filename','value')],prevent_initial_call=True)
def updateout(_,tab_data,img_content,no_of_columns,filename):
global invoice_name
if img_content!=None and no_of_columns!=None and no_of_columns>0:
if filename==None or filename=='':
filename = invoice_name
no_of_columns = int(no_of_columns)
tab_data.append({"width":no_of_columns,"height":no_of_columns,"left":no_of_columns,"top":no_of_columns,"label":"No of Columns"})
pd.DataFrame.from_records(tab_data).to_csv(os.path.join(app.root_path, 'static/coordinates',filename+'.csv'),index=False)
return html.H3('Annotation results saved as {}'.format(filename))
elif img_content==None:
return html.H3('Load a receipt to annotate and save results')
elif no_of_columns==None or no_of_columns<=0:
return html.H3('Enter appropriate number of columns in table')
| [
"amakanksh@gmail.com"
] | amakanksh@gmail.com |
d9031b16200b2e229191ed4fb1de2c5513080743 | ec4ab98fc3febb5e562b17e252117bcc491ac4c0 | /basicProject-10.py | e41f24ad3781bfc407acaf20a84000dcfa2fbb4e | [] | no_license | fancystuff4/pythonProjects | b8b01df79ad968f3194bacf2bea7e267cc5d8e9e | 64bf6988283b4f27911fd9b11129b039a06d8d38 | refs/heads/master | 2022-12-15T13:33:09.438040 | 2020-09-14T09:59:58 | 2020-09-14T09:59:58 | 295,214,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # patients info
def print_queue(*a_list, **a_dict):
for person in a_list:
for patient , info in a_dict.items():
if person == patient :
print(person+" :")
for key, value in info.items():
print(key +"="+ str(value))
print()
patient_name=['harsh','javeed','salman','raj']
patient_info={'harsh':{'age':25, 'weigth':65},
'raj':{'age':34, 'weigth':64},
'salman':{'age':55, 'weigth':60}
}
print('these are the patients in waiting list')
print_queue(*patient_name,**patient_info) | [
"noreply@github.com"
] | fancystuff4.noreply@github.com |
9d257435dbe5779aef2f564f082771bdb84f5967 | 3f62ff1049a08ed2635161ee7371a879f878ed87 | /run_all.py | 09e810f2c59064a09b8b72793893081efdc84522 | [] | no_license | fangjiantan/PostTest | b6d4fbae6938d87a8c532e3cdf2ad3c90f034773 | c73531dc695551790f5a517d685cff9f8b0d9167 | refs/heads/master | 2020-04-01T09:47:50.031532 | 2019-05-09T06:07:24 | 2019-05-09T06:07:24 | 153,090,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # coding=utf-8
import unittest
import time
from common import HTMLTestRunner
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
curpath = os.path.dirname(os.path.realpath(__file__))
report_path = os.path.join(curpath,"report")
if not os.path.exists(report_path):os.mkdir(report_path)
case_path = os.path.join(curpath,"Testcase")
def add_case(casepath=case_path,rule="test*.py"):
discover = unittest.defaultTestLoader.discover(casepath,pattern=rule)
return discover
def run_case(all_case,reportpath=report_path):
htmlreport = reportpath+r"\result.html"
print("测试报告生成地址:%s"% htmlreport)
fp = open(htmlreport,"wb")
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,verbosity=2,title="测试报告",description="用例执行情况")
runner.run(all_case)
fp.close()
if __name__ == "__main__":
cases = add_case()
run_case(cases) | [
"fangjiantan@sina.cn"
] | fangjiantan@sina.cn |
4bb7bb6e2a7d1c1aeb4b132b781607ddb54d9f4e | c2db6b72a67aba4535f9a36cd201a0a887528cac | /game.py | da6345acad87c10461b9a9f21294ca1b24a2aeb9 | [] | no_license | McWixy/college_snake | 9c311fff7ec1d5262c3704c55cc96dbaa7dce116 | 97739b1a2fb9667e828e2adadfed5849eea42c45 | refs/heads/master | 2020-06-01T02:59:07.023960 | 2019-05-23T16:11:35 | 2019-05-23T16:11:35 | 190,607,184 | 1 | 0 | null | 2019-06-06T15:40:56 | 2019-06-06T15:40:55 | null | UTF-8 | Python | false | false | 2,438 | py | from random import randint
from snake import Snake
class Game:
def __init__(self, game_size : int):
self.size = game_size
self.map = []
for _ in range(self.size):
ligne = []
for _ in range(self.size):
ligne.append(0)
self.map.append(ligne)
self.pomme_is_there = False
def __repr__(self):
info = "[ Game of size "
info += str(self.size)
info += " ]"
return info
def set_snake(self, snake):
self.snake = snake
for x, y in self.snake.segment:
self.map[x][y] = 2
def show_game(self):
for i in range(self.size):
for j in range(self.size):
Ax, Ay = self.apple
if self.snake_there(i, j):
print('S', end=' ')
elif Ax == i and Ay == j:
print('#', end=' ')
else:
print('.', end=' ')
print()
def snake_there(self, x, y):
liste = self.snake.segment
for segment in liste:
Sx, Sy = segment
if Sx == x and Sy == y:
return True
return False
def add_apple(self):
if self.pomme_is_there == False:
while True:
x = randint(0, self.size - 1)
y = randint(0, self.size - 1)
if self.snake_there(x, y) == True:
continue
self.apple = (x, y)
break
self.pomme_is_there = True
def set_snake(self, snake):
self.snake = snake
print(self.snake.segment)
for x, y in self.snake.segment:
self.map[x][y] = 2
def update_game(self):
direction = input("Direction")
print('a')
"""
a/q => gauche = 1
d/d => droite = 2
w/z => haut = 3
s/s => bas = 4
"""
eat = False
if direction == "a":
eat = self.snake.moveLeft(self.apple)
elif direction == "d":
eat = self.snake.moveRight(self.apple)
elif direction == "w":
eat = self.snake.moveUp(self.apple)
elif direction == "s":
eat = self.snake.moveDown(self.apple)
if eat:
self.pomme_is_there = False
self.add_apple()
| [
"m.choulika@icloud.com"
] | m.choulika@icloud.com |
214374daa226d99e5073ab7b542cbb0a073ca027 | fce6762c17fc81009af226f71ca32d2dc8227beb | /Section 4 Matrix multiplications.py | 952e60e048ad9f035f59866a9b471ae7989ef640 | [] | no_license | kuangzijian/Linear-Algebra | 3f9599ef282283dfc6bd49c0c97327a8fa31e671 | 94a872502ff570f04d61cb7bf1db653681f403c3 | refs/heads/master | 2022-11-04T07:05:55.272865 | 2019-07-31T05:32:42 | 2019-07-31T05:32:42 | 196,622,918 | 0 | 1 | null | 2022-10-29T19:00:25 | 2019-07-12T17:57:11 | Python | UTF-8 | Python | false | false | 7,048 | py | import numpy as np
import matplotlib.pyplot as plt
import math
from sympy import *
#Standard matrix multiplication, parts 1 & 2
## rules for multiplication validity
m = 4
n = 3
k = 6
# make some matrices
A = np.random.randn(m,n)
B = np.random.randn(n,k)
C = np.random.randn(m,k)
# test which multiplications are valid.
np.matmul(A,B)
#np.matmul(A,A)
np.matmul(np.matrix.transpose(A),C)
np.matmul(B,np.matrix.transpose(B))
np.matmul(np.matrix.transpose(B),B)
#np.matmul(B,C)
#np.matmul(C,B)
#np.matmul(np.matrix.transpose(C),B)
np.matmul(C,np.matrix.transpose(B))
#Code challenge: matrix multiplication by layering
A = np.abs(np.round(5*np.random.randn(4,2)))
B = np.abs(np.round(5*np.random.randn(2,3)))
print(A)
print(B)
r1 = 0
for i in range(0, len(B)):
r1 = r1 + np.outer(A[:,i], B[i])
print(A[:,i])
print(B[i])
print(r1)
print(np.matmul(A, B))
#Order-of-operations on matrices
n = 2
L = np.random.randn(n,n)
I = np.random.randn(n,n)
V = np.random.randn(n,n)
E = np.random.randn(n,n)
# result of "forward" multiplication and then transpose
res1 = np.matrix.transpose( L @ I @ V @ E )
# result of "flipped" multiplication of transposed matrices
res2 = np.matrix.transpose(E) @ np.matrix.transpose(V) @ np.matrix.transpose(I) @ np.matrix.transpose(L)
# test equality by subtracting (ignore possible computer rounding errors)
res1-res2
#Matrix-vector multiplication
# number of elements
m = 4
# create matrices
N = np.round( 10*np.random.randn(m,m) )
S = np.round( np.matrix.transpose(N)*N/m**2 ) # scaled symmetric
# and vector
w = np.array([-1, 0, 1, 2])
print(S)
print(w)
print(N)
print("with symmetric matrix")
# NOTE: The @ symbol for matrix multiplication is relatively new to Python, a@b is the same as numpy.dot or a.dot(b)
print(S@w) # 1
print(np.matrix.transpose(S@w)) # 2
print(w@S) # 3
print(np.matrix.transpose(w)@np.matrix.transpose(S)) # 4
print(np.matrix.transpose(w)@S) # 5
print("with nonsymmetric matrix")
print(N@w) # 1
print(np.matrix.transpose(N@w)) # 2
print(w@N) # 3
print(np.matrix.transpose(w)@np.matrix.transpose(N)) # 4
print(np.matrix.transpose(w)@N) # 5
#2D transformation matrices
# 2D input vector
v = np.array([ 3, -2 ])
# 2x2 transformation matrix
A = np.array([ [1,-1], [2,1] ])
# output vector is Av (convert v to column)
w = A@np.matrix.transpose(v)
# plot them
plt.plot([0,v[0]],[0,v[1]],label='v')
plt.plot([0,w[0]],[0,w[1]],label='Av')
plt.grid()
plt.axis((-6, 6, -6, 6))
plt.legend()
plt.title('Rotation + stretching')
plt.show()
## pure rotation
# 2D input vector
v = np.array([ 3, -2 ])
# 2x2 rotation matrix
th = np.pi/30
A = np.array([ [math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
# output vector is Av (convert v to column)
w = A@np.matrix.transpose(v)
# plot them
plt.plot([0,v[0]],[0,v[1]],label='v')
plt.plot([0,w[0]],[0,w[1]],label='Av')
plt.grid()
plt.axis((-4, 4, -4, 4))
plt.legend()
plt.title('Pure rotation')
plt.show()
#code challenge: Pure and impure rotation matrices
v = np.array([ 3, -2 ])
# 2x2 rotation matrix
ths = np.linspace(0, 2*np.pi,100)
vecmags = np.zeros([len(ths),2])
for i in range(0, len(ths)):
th = ths[i]
#inpure transformation matrix
A1 = np.array([ [2*math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
#pure transformation matrix
A2 = np.array([ [math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
# output vector is Av (convert v to column)
vecmags[i, 0] = np.linalg.norm(A1 @ v)
vecmags[i, 1] = np.linalg.norm(A2 @ v)
# plot them
plt.plot(ths,vecmags)
plt.grid()
plt.legend(["inpure transformation","pure transformation matrix"])
plt.title('Pure and impure rotation matrices')
plt.show()
#Additive and multiplicative matrix identities
# size of matrices
n = 4
A = np.round( 10*np.random.randn(n,n) )
I = np.eye(n,n)
Z = np.zeros((n,n))
# test both identities
np.array_equal( A@I , A )
np.array_equal( A , A@I )
np.array_equal( A , A+I )
np.array_equal( A , A+I )
np.array_equal( A+Z , A@I )
#Additive and multiplicative symmetric matrices
## the additive method
# specify sizes
m = 5
n = 5
# create matrices
A = np.random.randn(m,n)
S = ( A + np.matrix.transpose(A) )/2
# A symmetric matrix minus its transpose should be all zeros
print( S-np.matrix.transpose(S) )
## the multiplicative method
# specify sizes
m = 5
n = 3
# create matrices
A = np.random.randn(m,n)
AtA = np.matrix.transpose(A)@A
AAt = A@np.matrix.transpose(A)
# first, show that they are square
print( AtA.shape )
print( AAt.shape )
# next, show that they are symmetric
print( AtA - np.matrix.transpose(AtA) )
print( AAt - np.matrix.transpose(AAt) )
#Element-wise (Hadamard) multiplication
# any matrix sizes
m = 13
n = 2
# ...but the two matrices must be the same size
A = np.random.randn(m,n)
B = np.random.randn(m,n)
# note the different syntax compared to @ for matrix multiplication
C = np.multiply( A,B )
print(C)
#code challenge: Symmetry of combined symmetric matrices
print("Create two symmetric matrices")
S = np.round( 2*np.random.randn(3,2) )
S1 = S.dot(np.transpose(S))
print(S1)
S = np.round( 2*np.random.randn(3,2) )
S2 = S.dot(np.transpose(S))
print(S2)
print("compute sum, multiplication, and Hadamard multiplication of the two matrices")
#determine whether the result is still symmetric
print(S1+S2)
print(S1.dot(S2))
print(S1*S2)
#Multiplication of two symmetric matrices
a,b,c,d,e,f,g,h,k,l,m,n,o,p,q,r,s,t,u = symbols('a b c d e f g h k l m n o p q r s t u', real=True)
# symmetric and constant-diagonal matrices
A = Matrix([ [a,b,c,d],
[b,a,e,f],
[c,e,a,h],
[d,f,h,a] ])
B = Matrix([ [l,m,n,o],
[m,l,q,r],
[n,q,l,t],
[o,r,t,l] ])
# confirmation that A and B are symmetric
print( A - A.transpose() )
print( B - B.transpose() )
# ... and constant diagonal
for i in range(0,np.size(A,0)):
print( A[i,i] )
for i in range(0,np.size(B,0)):
print( B[i,i] )
# but AB neq (AB)'
A@B - (A@B).T
# maybe for a submatrix?
n = 3
A1 = A[ 0:n,0:n ]
B1 = B[ 0:n,0:n ]
A1@B1 - (A1*B1).T
#Frobenius dot-product
# any matrix sizes
m = 9
n = 4
# but the two matrices must be the same size
A = np.random.randn(m,n)
B = np.random.randn(m,n)
# first vectorize, then vector-dot-product
Av = np.reshape( A,m*n, order='F' ) # order='F' reshapes by columns instead of by rows
Bv = np.reshape( B,m*n, order='F' )
frob_dp = np.dot( Av,Bv )
# trace method
frob_dp2 = np.trace( np.matrix.transpose(A)@B )
print(frob_dp2)
print(frob_dp)
# matrix norm
Anorm = np.linalg.norm(A,'fro')
Anorm2 = np.sqrt( np.trace( np.matrix.transpose(A)@A ) )
print(Anorm)
print(Anorm2)
#Code challenge: standard and Hadamard multiplication for diagonal matrices
#Create two matrices 4x4 full and diagonal
D1 = np.random.randn(4,4)
D2 = np.diag([4,5,6,7])
#multiply each matrix by itself (A*A): standard and hadmard multiplications
RS1 = D1.dot(D1)
RS2 = D2.dot(D2)
RH1 = D1*D1
RH2 = D2*D2
print(D1)
print(RS1)
print(RH1)
print(D2)
print(RS2)
print(RH2)
| [
"kuangzijian1@hotmail.com"
] | kuangzijian1@hotmail.com |
4231dcd3da50535d305658740cdd49c919fe3ae2 | 63f7e4817cafe56c01c17bfdfdc7281810c1d831 | /main.py | ee82064db4cfa1ca44bb1ef272abff60d72e9787 | [
"MIT"
] | permissive | QuinsZouls/arithmetic-processor | 9440b8e8d8ddb863103b283cdeea05f688f2b617 | b5b7d140566bfd7b87743c612c6ede712f83324d | refs/heads/master | 2023-03-15T12:41:48.752327 | 2021-03-02T22:03:19 | 2021-03-02T22:03:19 | 342,156,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from lib.balanceador import checkBalance
from lib.calculador import parsePostfix, postfixEvaluation
regex = ' ( 3 * 2 - 3 ) + 25 '
if checkBalance(regex) :
parsed = parsePostfix(regex)
print(postfixEvaluation(parsed))
else:
print('Expresión no válida') | [
"alfredomedranosanchez@gmail.com"
] | alfredomedranosanchez@gmail.com |
307ea8911184939786c477ff795b9f1fdc2843e8 | 4d6ded0b601db0e68901977737b362aa63391388 | /setup.py | 144b790eb3925921ffef4aaff000a072929989b8 | [
"MIT"
] | permissive | mtymchenko/npaths | e07c59f93644ee16e18478d930ec044a40769a17 | 5019694784afee9f60ab0b5f0f0ef3051e113077 | refs/heads/master | 2023-03-03T05:14:58.575248 | 2021-02-13T18:32:41 | 2021-02-13T18:32:41 | 255,169,239 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="npaths",
version="0.0.1",
author="Mykhailo Tymchenko",
author_email="mih.timchenko@gmail.com",
description="Analytic modelling of switched N-path capacitive networks.",
url="https://github.com/mtymchenko/npaths",
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=['numpy', 'scipy', 'matplotlib']
)
| [
"mtymchenko@utexas.edu"
] | mtymchenko@utexas.edu |
3482c862a6405f9d46af7e9c72673545f05201a1 | eb8b5cde971573668800146b3632e43ed6e493d2 | /python/oneflow/test/modules/test_instruction_replay.py | e9fbd188d1ecc88127be665d92a6ea691ab0065a | [
"Apache-2.0"
] | permissive | big-data-ai/oneflow | 16f167f7fb7fca2ce527d6e3383c577a90829e8a | b1c67df42fb9c5ab1335008441b0273272d7128d | refs/heads/master | 2023-07-08T21:21:41.136387 | 2021-08-21T11:31:14 | 2021-08-21T11:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow
import oneflow as flow
import oneflow.unittest
def _test_instruction_replay_impl(test_case, device, shape):
x = flow.Tensor(np.random.rand(*shape), device=flow.device(device))
y = flow.Tensor(np.random.rand(*shape), device=flow.device(device))
oneflow._oneflow_internal.debug.start_recording_instructions()
z = x + y
oneflow._oneflow_internal.debug.end_recording_instructions()
test_case.assertTrue(np.allclose(z.numpy(), x.numpy() + y.numpy(), 0.0001, 0.0001))
z.zeros_()
oneflow._oneflow_internal.debug.replay_instructions()
test_case.assertTrue(np.allclose(z.numpy(), x.numpy() + y.numpy(), 0.0001, 0.0001))
oneflow._oneflow_internal.debug.clear_recorded_instructions()
@flow.unittest.skip_unless_1n1d()
class TestIntructionReplay(flow.unittest.TestCase):
def test_instruction_replay(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [[2, 3], [1, 10]]
for arg in GenArgList(arg_dict):
_test_instruction_replay_impl(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | big-data-ai.noreply@github.com |
8237056086a73308b9357d931b8b197a244cf0b9 | 8f298ede5b7218ab82113c11fc1af91a17fb5b03 | /test/es/order_aggregate_test.py | e2205ca99a179405289616509b13cf7b6e00e331 | [] | no_license | hieutt-teko/es-python-prototype | 3c0edd78b99af1a6df33806526dbc4def0455ca2 | 90ebdaa1c546380bb26c91dd0f5949b3ce3c071e | refs/heads/master | 2020-05-26T15:14:57.748242 | 2019-05-23T18:10:51 | 2019-05-23T18:10:51 | 188,281,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from es.order_aggregate import OrderAggregate
from es.events import OrderAttributeChangeEvent
def test_create():
order = OrderAggregate.create(1)
assert order.version == 1
assert len(order.changes) == 1
def test_change_status():
order = OrderAggregate.create(1)
event_1 = OrderAttributeChangeEvent(user_id=1, new_status="draft")
order.apply(event_1)
assert order.user_id == 1
assert order.status == "draft"
assert len(order.changes) == 2
| [
"hieu.tt@teko.vn"
] | hieu.tt@teko.vn |
58c338b56fad5c94e647d5a68d06fd3b3ab40492 | f78aeac5d6003d706bb17aff81fc69edd01a85d3 | /createGraphs.py | 851be58dc90865a7197565d17ef23ce5691ffd95 | [] | no_license | conveyal/analyst-stress-test | 47333deb30730743f2ef44afdd2828198263cc5d | 812e22fccf093b776cf5f3af8bccb5a2ff38b7d2 | refs/heads/master | 2021-03-13T00:03:59.217673 | 2015-07-03T00:43:43 | 2015-07-03T00:43:43 | 38,375,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | #!/usr/bin/python
# Create graph bundles for the cities specified in the config file.
# usage: createGraphs.py config.yaml graph-bucket-name
from sys import argv
import yaml
import tempfile
import urllib
import uuid
import requests
import boto.s3
from zipfile import ZipFile
from os import environ
temp = tempfile.mkdtemp() + '/'
if len(argv) != 3 :
print 'usage: createGraphs.py config.yaml graph-bucket-name'
exit(1)
with open(argv[1]) as configFile:
config = yaml.load(configFile)
s3 = boto.connect_s3()
bucket = s3.get_bucket(argv[2])
for graphId, graph in config.iteritems():
print 'processing graph %s' % graphId
# download all the GTFS files
print ' retrieving GTFS'
with ZipFile(temp + graphId + '.zip', 'w') as out:
for feed, i in zip(config[graphId]['gtfs'], range(len(config[graphId]['gtfs']))):
print ' %s' % feed
fn = temp + uuid.uuid4().hex + '.zip'
r = requests.get(feed, stream=True)
with open(fn, 'wb') as gtfs:
for chunk in r.iter_content(100 * 1024):
print '.',
gtfs.write(chunk)
print ' done.'
out.write(fn, str(i) + '.zip')
print ' retrieving OSM'
r = requests.get('%s/%s,%s,%s,%s.pbf' % (environ['VEX_SERVER'], graph['bounds']['south'], graph['bounds']['west'], graph['bounds']['north'], graph['bounds']['east']), stream=True)
fn = temp + graphId + '.osm.pbf'
with open(fn, 'wb') as osm:
for chunk in r.iter_content(100 * 1024):
print '.',
osm.write(chunk)
out.write(fn, graphId + '.osm.pbf')
# Upload to S3
# TODO: multipart uploads
key = boto.s3.key.Key(bucket)
key.key = graphId + '.zip'
key.set_contents_from_filename(temp + graphId + '.zip')
print 'done'
| [
"matt@indicatrix.org"
] | matt@indicatrix.org |
d82e50b203674c28e29a03a70d96c073783588ec | 9cbda3c2917f181ce2e4aeb2d82be3bf96fa13ce | /myBlog/models.py | 9e22d7ed97074c9e1ca72581591b5e4d2fe2836e | [] | no_license | sunysir/blog | 3edb6751bbb01bfa7995f8a07f6b4c54ce36f1af | 190dc3cd800ce7a843dde29ebe5374caf7323626 | refs/heads/master | 2020-03-23T04:17:57.273742 | 2018-07-16T02:55:29 | 2018-07-16T02:55:29 | 141,074,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | from django.contrib.auth.models import User
from django.db import models
from datetime import datetime
# Create your models here.
from django.urls import reverse
class Tag(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
created_time = models.DateField(default=datetime.utcnow)
modified_time = models.DateField(default=datetime.utcnow)
execrpt = models.CharField(max_length=300, blank=True)
tags = models.ManyToManyField(Tag)
category = models.ForeignKey(Category)
author = models.ForeignKey(User)
view = models.PositiveIntegerField(default=0)
def increase_views(self):
self.view += 1
self.save(update_fields=['view'])
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-created_time']
| [
"694190253@qq.com"
] | 694190253@qq.com |
d7f713061ea881fbefbe1f6de551c49ae26857a7 | 76e43c1b4cb9d9479271a5e73ee739190c038015 | /myspider.py | 3de540accdd9a645f922518d31df55b7a4bdb190 | [] | no_license | nguyenletan/scrapy_google_corona | e182fd4d8e2fd20b73ec3546020b5ed77aac0429 | bebca08e39b49ea49efb064b35336e958e694032 | refs/heads/master | 2021-04-12T16:47:31.848998 | 2020-03-31T01:48:37 | 2020-03-31T01:48:37 | 249,093,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import scrapy
class CoronaSpider(scrapy.Spider):
name = 'CoronaSpider'
start_urls = ['https://google.org/crisisresponse/covid19-map/']
def parse(self, response):
i = 0
for td in response.css('table > tbody tr'):
# print(td.get())
country_name = td.css('td:nth-child(1) > span ::text').get()
# print('country_name', country_name)
confirmed_cases = td.css('td:nth-child(2) ::text').get()
cases_per_1_million_people = td.css('td:nth-child(3) ::text').get()
recovered = td.css('td:nth-child(3) ::text').get()
deaths = td.css('td:nth-child(4) ::text').get()
if country_name is not None:
result = {
'id' : i,
'country_name' : country_name.strip(),
'confirmed_cases' : confirmed_cases.strip(),
'cases_per_1_million_people': cases_per_1_million_people.strip(),
'recovered' : recovered.strip(),
'deaths' : deaths.strip()
}
i = i + 1
yield result
| [
"nguyenletan@gmail.com"
] | nguyenletan@gmail.com |
30b31dbb48ee318100dfe52ceb8b3bf19ac84ee9 | 9aab01a48d1af5c4f1889ae9d27940f8bc738d37 | /Mindshare/project_management/cvr/tables.py | 4143730b11b7ed81bf26920c54a9c284e43bd1ea | [] | no_license | raveena17/workout-ex | 274998170a3cfbf42bffe61d49fce8531eddc3f5 | a9c652535f33d05199b3c5d26b72c721a822a2b7 | refs/heads/master | 2021-09-05T10:06:46.399468 | 2018-01-26T08:36:58 | 2018-01-26T08:36:58 | 119,025,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # import django_tables2 as tables
# from .models import Cvr
# #class django_tables2.columns.LinkColumn(, urlconf=None, , kwargs=None, current_app=None, attrs=None, **extra)
# class CVRTable(tables.Table):
# id = tables.LinkColumn(viewname='edit_cvr', args=[tables.A('pk')])
# class Meta:
# model = Cvr
# exclude = ('comments', 'reason_for_visit', 'actions_taken_during_the_visit', 'next_plan_of_action',)
# # add class="paleblue" to <table> tag
# attrs = {'class': 'paleblue'}
| [
"raveena@5gindia.net"
] | raveena@5gindia.net |
9b6c57dd4f2b4267c065cff6ff2e5d26f7d3a071 | 46e8b33059dc1eb9e1608f71d3ddfbc07eb055a3 | /jchars.py | 41ca6445ac5a86c20a894d71ea5c315781c23dd5 | [] | no_license | sofayam/kanjibridge | b7c735ef45831cfaa11b0e1773f9a3c599c51dfe | d1b12e8ee143fcc4cd9fd2e354153ea0f8bb3248 | refs/heads/master | 2020-07-08T05:35:22.392580 | 2014-01-16T16:33:07 | 2014-01-16T16:33:07 | 203,580,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # -*- coding: utf-8 -*-
jobr = '('
jcbr = ')'
jcomma = '、'
def isKanji(char):
return (ord(char) >= 0x4E00) and (ord(char) <= 0x9FBF)
| [
"mark.andrew@gmail.com"
] | mark.andrew@gmail.com |
096195f9fd77304f13ae4e6d738ede02dafcf24d | b47f26dedee40936366549ab887abfc8daf47540 | /devpro/encurtador/models.py | 6525568f3d0394a4efb283811f38fb8a860ab606 | [] | no_license | felsb3/urlredulce | a774a81ba100e49e98cfa99d150059fff601ec73 | 860f450fc4365b9d688a004384f1b8512345ae38 | refs/heads/master | 2023-06-01T17:10:46.813099 | 2021-06-23T18:51:40 | 2021-06-23T18:51:40 | 379,723,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from django.db import models
# Create your models here.
class UrlRedirect(models.Model):
destino = models.URLField(max_length=512)
slug = models.SlugField(max_length=128, unique=True)
criado_em = models.DateTimeField(auto_now_add=True)
atualizado_em = models.DateTimeField(auto_now=True)
def __str__(self):
return f'UrlRedirect para {self.destino}' | [
"felsb3@gmail.com"
] | felsb3@gmail.com |
372df8f1669c5ad4e46a11be9730b41376018e1e | 508b7e24e494c929469c4e3bed631bd433378e0c | /projectTestWork/8.0.working_with_CSV.py | 34abeaea51c1a6923b1228eec0e407cfd380852d | [] | no_license | Michelleoc/pands-project2021 | 1be759c9a3c2aac703b3abdc6c2140ff3b8cabf2 | 9495d49d6a7a07d4e028a5892600381f55677596 | refs/heads/main | 2023-04-13T17:54:38.500056 | 2021-04-30T20:52:02 | 2021-04-30T20:52:02 | 334,751,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.datasets import load_iris
iris = load_iris()
features = iris.data.T
sepal_length = features[0]
sepal_width = features[1]
petal_length = features[2]
petal_width = features[3]
# https://www.youtube.com/watch?v=fGRAgibY5N4 Machine Learning in Python: Iris Classification - Part 2
sepal_length_label = iris.feature_names[0]
sepal_width_label = iris.feature_names[1]
petal_length_label = iris.feature_names[2]
petal_width_label = iris.feature_names[3]
plt.scatter(sepal_length, sepal_width, c=iris.target)
plt.xlabel(sepal_length_label)
plt.ylabel(sepal_width_label)
plt.show() | [
"oconnormichelle1@yahoo.ie"
] | oconnormichelle1@yahoo.ie |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.