blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
832a298328bc29b34d0110a3029f906ad483a34d | 37c3b81ad127c9e3cc26fa9168fda82460ca9bda | /Baekjoon/boj_20055_컨베이어 벨트 위의 로봇.py | dfdb3152402dc2cfac4c545e7cd087fba933dcf0 | [] | no_license | potomatoo/TIL | 5d85b69fdaed68966db7cfe2a565b7c64ed3e816 | 395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c | refs/heads/master | 2021-07-08T16:19:40.410097 | 2021-04-19T02:33:40 | 2021-04-19T02:33:40 | 238,872,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | def work():
global cnt
while True:
board.rotate(1)
robot.rotate(1)
robot[N-1] = 0
for i in range(N-2, -1, -1):
if robot[i] and not robot[i+1] and board[i+1] > 0:
board[i+1] -= 1
robot[i+1] = 1
robot[i] = 0
robot[N-1] = 0
if not robot[0] and board[0] > 0:
board[0] -= 1
robot[0] = 1
flag = 0
for i in range(len(board)):
if board[i] == 0:
flag += 1
if flag >= K:
break
cnt += 1
from collections import deque
N, K = map(int, input().split())
board = deque(map(int, input().split()))
cnt = 1
robot = deque([0] * len(board))
work()
print(cnt) | [
"duseh73@gmail.com"
] | duseh73@gmail.com |
7477820069e7127b7679f7bebbb2f0d9efd1638d | 3c5044c77a6c01e1a70b1722e8a860851056f28c | /16-1.py | 97b5250c34e13ea99567b0f0574dcb5660117bae | [] | no_license | MANAkudo/pyhton | 2f8c10bbf0b98babb5fea2ecdc4c2c430668e6fd | 90fa56fb44e7e02d05250543375a292dfef28eca | refs/heads/master | 2023-08-03T19:57:48.436313 | 2021-09-22T01:50:32 | 2021-09-22T01:50:32 | 409,027,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | f = open("16_1_read.txt",'w')
f.write("1\n")
f.write("2\n")
f.write("3\n")
f.write("4\n")
f.write("5\n")
f.close() | [
"ykh2135248@stu.o-hara.ac.jp"
] | ykh2135248@stu.o-hara.ac.jp |
fe69d824ce277807f6d3e0d5eaaff8a66490ae4b | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /otp/src/level/ModelEntity.py | 5850215d12244dd9e104ca4eebaf6cf5fd012828 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | py | from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
import BasicEntities
class ModelEntity(BasicEntities.NodePathEntity):
LoadFuncs = {
'loadModelCopy': loader.loadModelCopy,
'loadModel': loader.loadModel,
'loadModelOnce': loader.loadModelOnce,
}
def __init__(self, level, entId):
# TODO: fill in default values automatically for missing attribs
self.collisionsOnly = False
self.loadType = 'loadModelCopy'
self.flattenType = 'light'
self.goonHatType = 'none'
self.entInitialized = False
BasicEntities.NodePathEntity.__init__(self, level, entId)
self.entInitialized = True
self.model = None
self.loadModel()
def destroy(self):
if self.model:
self.model.removeNode()
del self.model
BasicEntities.NodePathEntity.destroy(self)
def loadModel(self):
if self.model:
self.model.removeNode()
self.model = None
if self.modelPath is None:
return
self.model = ModelEntity.LoadFuncs[self.loadType](self.modelPath)
if self.model:
self.model.reparentTo(self)
# hide/show as appropriate
if self.collisionsOnly:
if __dev__:
self.model.setTransparency(1)
self.model.setColorScale(1,1,1,.1)
else:
self.model.hide()
else:
self.model.show()
# HACK SDN: special code for moving crate wall collisions down
if self.modelPath in ("phase_9/models/cogHQ/woodCrateB.bam",
"phase_9/models/cogHQ/metal_crateB.bam",
"phase_10/models/cashbotHQ/CBMetalCrate.bam",
"phase_10/models/cogHQ/CBMetalCrate2.bam",
"phase_10/models/cashbotHQ/CBWoodCrate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate2.bam",
):
# get rid of any scales
#self.model.flattenLight()
# move walls down
cNode = self.find("**/wall")
cNode.setZ(cNode, -.75)
# duplicate the floor and move it down to crate a
# catch effect for low-hopped toons
colNode = self.find("**/collision")
floor = colNode.find("**/floor")
floor2 = floor.copyTo(colNode)
floor2.setZ(floor2, -.75)
"""
# incorporate the entity's overall scale
self.model.setScale(self.getScale())
self.setScale(1)
self.model.flattenLight()
"""
if self.goonHatType is not 'none':
self.goonType = {'hardhat':'pg','security':'sg'}[self.goonHatType]
self.hat = self.model
### this was copied from Goon.createHead
if self.goonType == "pg":
self.hat.find("**/security_hat").hide()
elif self.goonType == "sg":
self.hat.find("**/hard_hat").hide()
###
del self.hat
del self.goonType
if self.flattenType == 'light':
self.model.flattenLight()
elif self.flattenType == 'medium':
self.model.flattenMedium()
elif self.flattenType == 'strong':
self.model.flattenStrong()
def setModelPath(self, path):
self.modelPath = path
self.loadModel()
def setCollisionsOnly(self, collisionsOnly):
self.collisionsOnly = collisionsOnly
self.loadModel()
def setGoonHatType(self, goonHatType):
self.goonHatType = goonHatType
self.loadModel()
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
9305c3a78026026cae6e03d11b5982d9cee7f094 | 0617c812e9bf58a2dbc1c1fef35e497b054ed7e4 | /venv/Lib/site-packages/pyrogram/raw/functions/stats/get_megagroup_stats.py | 320398dd3f9fb86f271aeb14aaca77b3bc298f8c | [] | no_license | howei5163/my_framework | 32cf510e19a371b6a3a7c80eab53f10a6952f7b2 | 492c9af4ceaebfe6e87df8425cb21534fbbb0c61 | refs/heads/main | 2023-01-27T14:33:56.159867 | 2020-12-07T10:19:33 | 2020-12-07T10:19:33 | 306,561,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,553 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetMegagroupStats(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0xdcdf8607``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
dark (optional): ``bool``
Returns:
:obj:`stats.MegagroupStats <pyrogram.raw.base.stats.MegagroupStats>`
"""
__slots__: List[str] = ["channel", "dark"]
ID = 0xdcdf8607
QUALNAME = "pyrogram.raw.functions.stats.GetMegagroupStats"
def __init__(self, *, channel: "raw.base.InputChannel", dark: Union[None, bool] = None) -> None:
self.channel = channel # InputChannel
self.dark = dark # flags.0?true
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetMegagroupStats":
flags = Int.read(data)
dark = True if flags & (1 << 0) else False
channel = TLObject.read(data)
return GetMegagroupStats(channel=channel, dark=dark)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.dark is not None else 0
data.write(Int(flags))
data.write(self.channel.write())
return data.getvalue()
| [
"houwei5163"
] | houwei5163 |
572850f5eb177b3a39baee3d35b40e3eda54643a | 4e879398eaecdc19f056ee538d0732b2e92aa84f | /SistemaDiscusiones/urls.py | 7c2bedce1ec87129887c060f542029d86a8b4848 | [] | no_license | acamposruiz/localdevask | 9311566ab2526e2b6966374e43e7d198fe24045a | 867cfafff33fc214d68c499bd7e97b4f77dcd3b0 | refs/heads/master | 2021-01-25T04:01:16.308722 | 2014-04-28T03:53:46 | 2014-04-28T03:53:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'SistemaDiscusiones.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^', include('apps.home.urls', namespace="home")),
url(r'^', include('apps.users.urls', namespace="users")),
# PYTHON SOCIAL AUTH
url('', include('social.apps.django_app.urls', namespace="social")),
url(r'^admin/', include(admin.site.urls)),
)
| [
"acamposruiz@gmail.com"
] | acamposruiz@gmail.com |
2a2f1f71c97c0e8e03c4f0bcc38faa88410be7f6 | 7e2aefac7b540f4d4bad0fa6dd94dbcdad34d6a3 | /modular/app.py | f7b681f53f3b0400a266359154603d66d9fc1cbf | [] | no_license | imajaydwivedi/Python-BootCamp | ade3a3557d51b7b25a7b2ba3b79952a622896b29 | 2f5e1629a160a33017c9ab548b9d7c88ad57c917 | refs/heads/master | 2023-06-29T04:48:20.744027 | 2023-06-13T15:11:36 | 2023-06-13T15:11:36 | 250,841,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import services.directory as directory
if __name__ == "__main__":
directory.add({"name":"Krishna", "phone": 1234})
directory.add({"name":"Mohan", "phone": 2345})
directory.add({"name":"Koyya", "phone": 3456})
print(directory.list())
print(directory.count())
print(directory.find_by(1))
print(directory.search_by("Koyya"))
directory.remove_by(1)
print(directory.list()) | [
"dwivedaj@arcesium.com"
] | dwivedaj@arcesium.com |
892e7b51d8d330acc1612ca799d59c9a0d25beb4 | 4b2450b65f5802f524ddb8701baa0e71c929889b | /listanelement.py | 873b5eef153b5eefbef4658036e49176c3427331 | [] | no_license | joedave1/python | 21e89dd0638156a3600bfb7fbf7422c73a79fc51 | ae51152a663aa2e512c5be7f6134c4b35d78e88d | refs/heads/master | 2020-06-29T11:22:05.627400 | 2019-08-16T08:51:14 | 2019-08-16T08:51:14 | 200,520,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | x=input("Enter a commc seperated list values: ").split(",")
color=list(x)
print("The first color is %s and the last color is %s"%(color[0],color[-1]))x=input("Enter a commc seperated list values: ").split(",")
color=list(x)
print("The first color is %s and the last color is %s"%(color[0],color[-1]))
| [
"noreply@github.com"
] | joedave1.noreply@github.com |
e34b387068ca8ec0ce9a89b18f694f3e87b653fb | b220bd0c6c7fe6fcea00ac2ae5195c1887b8a37e | /database/dbconn.py | fa05f16ecbde4cfecc85dfd3b816446e8a13ae57 | [] | no_license | itwastheband/AO3rdr-backend | 8f624ddeefbc09995f1784c3092fc1ebcbedbff7 | 19c1ed8ecdeea3250a958006d260207c582cb371 | refs/heads/master | 2022-06-17T03:47:45.741986 | 2020-05-03T23:34:14 | 2020-05-03T23:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,198 | py | import os
import boto
from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.table import Table
from boto.dynamodb2.exceptions import ItemNotFound
from decimal import Decimal
from flask import _app_ctx_stack
import time
class DBconn(object):
def __init__(self):
aws_access_key_id = os.environ['S3_KEY'] # I AM OPS U NO GET MY KEYS
aws_secret_access_key = os.environ['S3_SECRET'] # DIS IS MY JOB
self._conn = DynamoDBConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.works_table = Table('ao3rdr-works', connection=self._conn)
self.immutable_fields = ['work_id', 'user_id']
def get_user(self, user_id):
res = self.works_table.query_2(
user_id__eq=user_id, work_id__eq='settings', attributes=['user_id'])
out = []
for entry in res:
out.append(self.serialize(entry)['user_id'])
return out
def add_user(self, user_id):
""" Adding a user adds a special "work" which is used to store a user's
settings.
"""
return self.works_table.put_item(data={
'user_id': user_id,
'work_id': 'settings',
'created': time.time()
})
def update_work(self, user_id, work_id, data):
item = self.works_table.get_item(user_id=user_id, work_id=work_id)
# update the item
for key, value in data.iteritems():
if key not in self.immutable_fields:
item[key] = value
item['db_updated'] = time.time()
item.partial_save()
def create_work(self, user_id, work_id, data):
data['user_id'] = user_id
data['work_id'] = work_id
if 'created' not in data:
data['created'] = time.time()
self.works_table.put_item(data)
def batch_update(self, data_list):
with self.works_table.batch_write() as batch:
for data in data_list:
batch.put_item(data=data)
def get_work(self, user_id, work_id):
try:
res = self.works_table.get_item(user_id=user_id, work_id=work_id)
except ItemNotFound:
return {}
return self.serialize(res)
def get_all_works(self, user_id):
res = self.works_table.query_2(user_id__eq=user_id)
for entry in res:
yield self.serialize(entry)
def close(self):
self._conn.close()
def serialize(self, item):
out = serialize(dict(item))
return out
def serialize(item):
if isinstance(item, dict):
out = {}
for k, v in item.items():
out[k] = serialize(v)
elif isinstance(item, set) or isinstance(item, list):
out = []
for i in item:
out.append(serialize(i))
elif isinstance(item, Decimal):
out = float(item)
else:
out = item
return out
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
top = _app_ctx_stack.top
if not hasattr(top, 'db_conn'):
top.__setattr__('db_conn', DBconn())
return top.db_conn
'''
# Tips for working with DynameDB
works_table = Table('ao3rdr-works', connection=conn)
# put_item has param overwrite=False
test_data = {
'user_id': 'testuser',
'work_id': '123456',
'rating': 5
}
works_table.put_item(test_data)
# When using get item, must use both primary and secondary keys
works_table.get_item(user_id='testuser', work_id='123456')
# To get by user, query is OK
res = works_table.query_2(user_id__eq='testuser')
for entry in res:
print entry
# entry useful fields: _data, keys(), and index like a dict, eg entry['work_id']
# Use the secondary index
res = works_table.query_2(rating__eq=5, index='rating-index')
for entry in res:
print entry['work_id']
# get_item(table_name, key, attributes_to_get=None, consistent_read=False, object_hook=None)
# put_item(table_name, item, expected=None, return_values=None, object_hook=None)
'''
| [
"darthkrallt@gmail.com"
] | darthkrallt@gmail.com |
ef9b5b666e8749d77a7b64d744affbcd8a64a543 | 963cac9e78c4b742f7e7800200de8d1582799955 | /test/veetou/parserTests.py | 797c7be4f0f217a2fd7bbe13910a3ec1cd8fde32 | [] | no_license | ptomulik/veetou | c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa | b30be2a604f4426f832ec9805547ecd6cc9083fe | refs/heads/master | 2021-01-22T17:28:57.271251 | 2019-01-05T01:46:43 | 2020-05-04T16:23:44 | 85,016,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | #!/usr/bin/env python3
# -*- coding: utf8 -*-
import unittest
import veetou.parser as parser
class Test__Parser(unittest.TestCase):
def test__funcions_symbols__1(self):
self.assertIs(parser.dictmatcher , parser.functions_.dictmatcher)
self.assertIs(parser.fullmatch , parser.functions_.fullmatch)
self.assertIs(parser.fullmatchdict , parser.functions_.fullmatchdict)
self.assertIs(parser.ifullmatch , parser.functions_.ifullmatch)
self.assertIs(parser.imatch , parser.functions_.imatch)
self.assertIs(parser.imatcher , parser.functions_.imatcher)
self.assertIs(parser.match , parser.functions_.match)
self.assertIs(parser.matchdict , parser.functions_.matchdict)
self.assertIs(parser.matcher , parser.functions_.matcher)
self.assertIs(parser.permutexpr , parser.functions_.permutexpr)
self.assertIs(parser.reentrant , parser.functions_.reentrant)
self.assertIs(parser.scatter , parser.functions_.scatter)
self.assertIs(parser.search , parser.functions_.search)
self.assertIs(parser.searchpd , parser.functions_.searchpd)
self.assertIs(parser.skipemptylines , parser.functions_.skipemptylines)
def test__parsererror_symbols__1(self):
self.assertIs(parser.ParserError, parser.parsererror_.ParserError)
def test__parser_symbols__1(self):
self.assertIs(parser.Parser, parser.parser_.Parser)
self.assertIs(parser.RootParser, parser.parser_.RootParser)
def test__addressparser__1(self):
self.assertIs(parser.AddressParser, parser.addressparser_.AddressParser)
def test__contactparser__1(self):
self.assertIs(parser.ContactParser, parser.contactparser_.ContactParser)
def test__footerparser__1(self):
self.assertIs(parser.FooterParser, parser.footerparser_.FooterParser)
def test__headerparser__1(self):
self.assertIs(parser.HeaderParser, parser.headerparser_.HeaderParser)
def test__keymapparser__1(self):
self.assertIs(parser.KeyMapParser, parser.keymapparser_.KeyMapParser)
def test__pageparser__1(self):
self.assertIs(parser.PageParser, parser.pageparser_.PageParser)
def test__preambleparser__1(self):
self.assertIs(parser.PreambleParser, parser.preambleparser_.PreambleParser)
def test__reportparser__1(self):
self.assertIs(parser.ReportParser, parser.reportparser_.ReportParser)
def test__sheetparser__1(self):
self.assertIs(parser.SheetParser, parser.sheetparser_.SheetParser)
def test__summaryparser__1(self):
self.assertIs(parser.SummaryParser, parser.summaryparser_.SummaryParser)
def test__tableparser__1(self):
self.assertIs(parser.TableParser, parser.tableparser_.TableParser)
def test__tbodyparser__1(self):
self.assertIs(parser.TbodyParser, parser.tbodyparser_.TbodyParser)
def test__thparser__1(self):
self.assertIs(parser.ThParser, parser.thparser_.ThParser)
def test__trparser__1(self):
self.assertIs(parser.TrParser, parser.trparser_.TrParser)
if __name__ == '__main__':
unittest.main()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| [
"ptomulik@meil.pw.edu.pl"
] | ptomulik@meil.pw.edu.pl |
3e43c121fa98f0c8fd7478f5ac8cd4cfe08fcd43 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/sql/azure-mgmt-sql/generated_samples/transparent_data_encryption_list.py | 3e2275f884eabc284c7627538174b4de0a236e32 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,661 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python transparent_data_encryption_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.transparent_data_encryptions.list_by_database(
resource_group_name="security-tde-resourcegroup",
server_name="securitytde",
database_name="testdb",
)
for item in response:
print(item)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-08-01-preview/examples/TransparentDataEncryptionList.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9b2f6cdd33b203db499cf006e77db48474b4b153 | 2b240306722b3fba53caf25fc62fd599bb70f082 | /lectures/cs532-s19/assignments/A6/toPush/Python/driver.py | d42194c508f4364eb0d9d53e7a3c25d83dddcea9 | [] | no_license | bayardd/anwala.github.io | cac62b5d13a3e57106aff60f846a2a322938ceaf | 3d3b23f78813aff39760232f68d0b2043722a342 | refs/heads/master | 2020-04-20T04:09:07.304978 | 2019-04-30T17:27:35 | 2019-04-30T17:27:35 | 168,619,026 | 0 | 0 | null | 2019-02-01T00:38:51 | 2019-02-01T00:38:51 | null | UTF-8 | Python | false | false | 5,014 | py | import recommendations
allSimilar = []
file = open("data.txt", 'a')
newline = '\n'
tab = '\t'
file.write(f'First User Chosen: {tab} 368{newline}')
file.write(f'Second User Chosen: {tab} 81 {newline}')
file.write(f'Third User Chosen: {tab} 135 {newline}{newline}')
pref = recommendations.loadMovieLens()
# Get sorted list of user ratings
userRatings1 = (sorted(pref['368'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings2 = (sorted(pref['81'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings3 = (sorted(pref['135'].items(), key =
lambda kv:(kv[1], kv[0])))
# Get top 5 for each user
userRatings1.reverse()
userRatings2.reverse()
userRatings3.reverse()
# Formatted File output
file.write(f'First User Rating: {newline}')
file.write(f'ID 368 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}ID 368 Bottom 3 Rated Movies: {newline}')
userRatings1.reverse()
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Second User Rating: {newline}')
file.write(f'ID 81 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings2.reverse()
file.write(f'{newline}ID 81 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Third User Rating: {newline}')
file.write(f'ID 135 Top 3 Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings3.reverse()
file.write(f'{newline}ID 135 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}{newline}Substitute User ID: 368 {newline}{newline}')
# Find most correlated users
closest_5 = recommendations.topMatches(pref, '368')
# Find least correlated users
furthest_5 = recommendations.worstMatches(pref, '368')
# Output for least and most correlated users
file.write(f'Five other users with highest correlation: {newline}{newline}')
for x in closest_5:
correlationValue = round(x[0])
tempId = x[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
file.write(f'{newline}Five other users with lowest correlation: {newline}')
for y in furthest_5:
correlationValue = round(y[0])
tempId = y[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
recommendedMovies = recommendations.getRecommendations(pref, '368')
file.write(f'{newline}Computed Top 5 Movies to be Watched: {newline}')
for x in range(0,5):
rating = recommendedMovies[x][0]
name = recommendedMovies[x][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}Computed Bottom 5 Movies to be Watched: {newline}')
recommendedMovies.reverse()
for y in range(0,5):
rating = recommendedMovies[y][0]
name = recommendedMovies[y][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}{newline}Favorite Movie: {tab} Jurassic Park (1993){newline}')
file.write(f'Least Favorite Movie: {tab} Children of the Corn: The Gathering (1996){newline}{newline}')
similarMovies = recommendations.calculateSimilarItems(pref)
notSimilarMovies = recommendations.calculateLeastSimilarItems(pref)
file.write(f'Top Recommended Movies to be Watched for Jurassic Park: {newline}')
# print(similarMovies['Jurassic Park (1993)'])
for x in similarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Jurassic Park{newline}')
for x in notSimilarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Top Recommended Movies to be Watched for Children of the Corn: {newline}')
for x in similarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Children of the Corn{newline}')
for x in notSimilarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}') | [
"dbaya001@odu.edu"
] | dbaya001@odu.edu |
9cc95780a34d3bb2c8acb0cde93d72a744ba1ce1 | 5f596cf8fc95e72caa87fcd51aa2446f9e6fc0d4 | /tasks.py | 01c26b63c44c8cec31f1ad19c349b4ea31ffa67d | [
"MIT"
] | permissive | jakobzeitler/causalinfo | 265f34f79a13c6ee9ce1173aae202e960766327f | a8e6b6e9dae8dfd4d2e18010908c4905089538a1 | refs/heads/master | 2020-03-23T17:24:25.087306 | 2017-01-05T08:07:40 | 2017-01-05T08:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | # -*- coding: utf-8 -*-
from invoke import task, run
import os
import sys
@task
def test(cover=False):
"""Run tests (use --cover for coverage tests)"""
if cover:
run('py.test --cov-report term-missing --cov=causalinfo tests', pty=True)
else:
run('py.test -v', pty=True)
@task
def clean():
"""Clean all build and cruft files"""
print("Removing python cruft ...")
run("find . -name '*.pyc' -exec rm -f {} +")
run("find . -name '*.pyo' -exec rm -f {} +")
run("find . -name '*~' -exec rm -f {} +")
run("find . -name '__pycache__' -exec rm -fr {} +")
print("Removing build ...")
run("rm -rf build")
run("rm -rf dist")
run("rm -rf *.egg-info")
print("Removing IPython Notebook checkpoints...")
run("find . -name '__pynb_checkpoints__' -exec rm -fr {} +")
print("Removing generated html ...")
run("rm -f README.html")
@task
def build():
"""Build the distribution"""
print("Building sdist ...")
run('python setup.py sdist', hide='out')
print("Building bdist_wheel ...")
run('python setup.py bdist_wheel', hide='out')
@task
def publish(release=False):
"""Publish to the cheeseshop."""
if release:
run('python setup.py register')
run('twine upload dist/*.tar.gz')
run('twine upload dist/*.whl')
else:
run('python setup.py -r test register')
run('twine upload -r test dist/*.tar.gz')
run('twine upload -r test dist/*.whl')
@task
def readme(browse=True):
run('rst2html.py README.rst > README.html')
if browse:
run('open README.html')
@task
def notebook():
from IPython.terminal.ipapp import launch_new_instance
from socket import gethostname
import warnings
print('Installing in develop mode')
run('python setup.py develop', hide='out')
print('Changing to notebooks folder')
here = os.path.dirname(__file__)
os.chdir(os.path.join(here, 'notebooks'))
old_argv = sys.argv[:]
# Taken from here:
# http://stackoverflow.com/questions/
# 26338688/start-ipython-notebook-with-python-file
try:
warnings.filterwarnings("ignore", module = "zmq.*")
sys.argv = ['ipython', 'notebook']
sys.argv.append("--IPKernelApp.pylab='inline'")
sys.argv.append("--NotebookApp.ip=" + gethostname())
sys.argv.append("--NotebookApp.open_browser=True")
print('Invoking "' + ' '.join(sys.argv) + '"')
launch_new_instance()
finally:
# Not sure this is strictly necessary...
sys.argv = old_argv
os.chdir(here)
print('Removing development package...')
run('python setup.py develop -u', hide='out')
| [
"brett.calcott@gmail.com"
] | brett.calcott@gmail.com |
0abd56daa2dfc8f450f36161ccbb0d4530572899 | 13d384f7eb991b7fe901468f1967f7b2952499a6 | /day-23 turtle-crossing-start/car_manager.py | 1f23d9f92ae79bb4124d80476d54b7f7eac0db84 | [] | no_license | miloscomplex/100_Days_of_Python | f31638fc5a3913dc32850b61c51d2cecac7cdbdf | 6ac67472627867d8bf9cccb496e6395d979b8c89 | refs/heads/main | 2023-08-25T03:00:49.216040 | 2021-10-07T03:32:19 | 2021-10-07T03:32:19 | 395,512,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | import random
from turtle import Turtle
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
TOP_MAX = 250
BOTTOM_MAX = -250
LEFT_DISTANCE = -320
class CarManager(Turtle):
def __init__(self):
super().__init__()
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
self.hideturtle()
def create_car(self):
new_car = Turtle("square")
new_car.color(random.choice(COLORS))
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.penup()
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
new_car.goto(random_x, random_y)
self.all_cars.append(new_car)
def move_cars(self):
for car in self.all_cars:
car.backward(self.car_speed)
if car.xcor() < LEFT_DISTANCE:
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
car.goto(random_x, random_y)
def level_up(self):
self.car_speed += MOVE_INCREMENT
| [
"hicallmesutton@gmail.com"
] | hicallmesutton@gmail.com |
28e33303b4a8e6d06e0a3ae120f751b62b91b62b | e6a3835a1d1f4d7f6318dfd7047c3b527e994537 | /src/utils/utils.py | b353b1889ce8b210b94356a55dc40562aad8e40d | [] | no_license | MMichels/DeepCars | 9f8faec7b547c585888469202859d317e5d28526 | 327a604faa80d476cafb438b82af6537443670e0 | refs/heads/master | 2023-04-13T03:58:01.503567 | 2019-12-17T20:50:44 | 2019-12-17T20:50:44 | 228,690,108 | 0 | 0 | null | 2023-03-25T00:21:00 | 2019-12-17T19:48:14 | Python | UTF-8 | Python | false | false | 471 | py | import os
from pygame import image, error
from pygame.locals import RLEACCEL
def load_image(path, colorkey=None):
try:
img = image.load(path)
except error as message:
print('Não foi possivel abrir a imagem: ', path)
raise SystemExit(message)
img = img.convert_alpha()
if colorkey:
if colorkey == -1:
colorkey = img.get_at((0, 0))
img.set_colorkey(colorkey, RLEACCEL)
return img, img.get_rect()
| [
"michels09@hotmail.com"
] | michels09@hotmail.com |
f7cfc720c7204254c708dca38c4f7baee6ae12b1 | dd126d6b82eb47d90950a355d4948047ae119f9c | /fixture/db.py | 92f71b61a9231f7c5f9209c0af7120114c85c768 | [] | no_license | Korinsky/Python4QA_B24 | 9382c178a7e564272e2628426946ae087ec4ccdc | 0c5d5f812a6cb858a3bf59e45745a7fce206fd7e | refs/heads/main | 2023-07-14T05:36:57.307809 | 2021-08-18T13:05:14 | 2021-08-18T13:05:14 | 377,419,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | py | import pymysql
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_groups_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contacts_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, address, email, email2, email3, home, mobile, work, phone2 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, address, email, email2, email3, homephone, mobilephone, workphone, secondaryphone) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address, email=email, email2=email2, email3=email3,
homephone=homephone, mobilephone=mobilephone, workphone=workphone, secondaryphone=secondaryphone))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
def get_contact_in_group(self):
dict = {}
cursor = self.connection.cursor()
try:
cursor.execute("select id, group_id from address_in_groups where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, group_id) = row
if id in dict.keys():
value = dict.get(id)
value.append(group_id)
else:
value = []
value.append(group_id)
dict[id] = value
finally:
cursor.close()
return dict | [
"72462941+Korinsky@users.noreply.github.com"
] | 72462941+Korinsky@users.noreply.github.com |
1a701ab367bd7353d683543ba01c68dafb9c47e1 | 3536b829b5733807ffca9849e7ad463c43979c09 | /sc2bot/agents/battle_agent.py | 22e9a7bbc1b8d9e4a1550ce87856884d6aaf1e26 | [] | no_license | alanxzhou/sc2bot | 9b8d33dacc32074a70b8b4007f60801d6ff8037c | 0eb2a3f733ea31250e29a123213b407ad9189a40 | refs/heads/master | 2020-09-04T17:40:32.608263 | 2020-03-16T23:32:59 | 2020-03-16T23:32:59 | 219,835,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,381 | py | from abc import ABC, abstractmethod
import copy
from collections import deque
import pickle
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from pysc2.agents.scripted_agent import _xy_locs
from pysc2.agents.base_agent import BaseAgent
from pysc2.lib import actions
from pysc2.lib import features
from sc2bot.utils.epsilon import Epsilon
from sc2bot.utils.replay_memory import ReplayMemory, Transition
from sc2bot.models.nn_models import FeatureCNN, FeatureCNNFCLimited, FeatureCNNFCBig, BeaconCNN2
from sc2bot.agents.rl_agent import BaseRLAgent
import torch
import torch.nn as nn
import torch.optim as optim
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_UNIT_TYPE = 6
_SELECTED = 7
_UNIT_HIT_POINTS = 8
FUNCTIONS = actions.FUNCTIONS
_PLAYER_ENEMY = features.PlayerRelative.ENEMY
class BattleAgent(BaseRLAgent):
"""
Agent where the entire army is selected
"""
def __init__(self, save_name=None, load_name=None):
super(BattleAgent, self).__init__(save_name=save_name, load_name=load_name)
self.initialize_model(FeatureCNNFCBig(3, screen_size=self._screen_size))
self.steps_before_training = 5000
self.obs = None
self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS]
self.train_q_per_step = 1
def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
self.setup(observation_spec, action_spec)
try:
while self.n_episodes < max_episodes:
obs = env.reset()[0]
# remove unit selection from the equation by selecting the entire army on every new game.
select_army = actions.FunctionCall(_SELECT_ARMY, [[False]])
obs = env.step([select_army])[0]
self.reset()
episode_reward = 0
while True:
total_frames += 1
self.obs = obs.observation["feature_screen"][self.features]
s = np.expand_dims(self.obs, 0)
if max_frames and total_frames >= max_frames:
print("max frames reached")
return
if obs.last():
print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}")
self._epsilon.increment()
break
action = self.get_action(s, unsqueeze=False)
env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN)
try:
obs = env.step([env_actions])[0]
r = obs.reward - 10
except ValueError as e:
print(e)
obs = env.step([actions.FunctionCall(_NO_OP, [])])[0]
r = obs.reward - 1000
episode_reward += r
s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0)
done = r > 0
if self._epsilon.isTraining:
transition = Transition(s, action, s1, r, done)
self._memory.push(transition)
if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self.train_q(squeeze=True)
if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self._Qt = copy.deepcopy(self._Q)
if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0):
print('Evaluating...')
self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate
self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0)
self._epsilon.isTraining = True
if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop
self.reward.append(episode_reward)
print(f'Evaluation Complete: Episode reward = {episode_reward}')
break
self.n_episodes += 1
if len(self._loss) > 0:
self.loss.append(self._loss[-1])
self.max_q.append(self._max_q[-1])
if self.n_episodes % save_checkpoints == 0:
if self.n_episodes > 0:
self.save_data(episodes_done=self.n_episodes)
except KeyboardInterrupt:
pass
finally:
print("finished")
elapsed_time = time.time() - start_time
try:
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
except:
print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames))
class BattleAgentBeacon(BattleAgent):
def __init__(self, save_name=None, load_name=None):
super(BattleAgentBeacon, self).__init__(save_name=save_name, load_name=load_name)
self.initialize_model(BeaconCNN2())
self.features = _PLAYER_RELATIVE
def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
self.setup(observation_spec, action_spec)
try:
while self.n_episodes < max_episodes:
obs = env.reset()[0]
# remove unit selection from the equation by selecting the entire army on every new game.
select_army = actions.FunctionCall(_SELECT_ARMY, [[False]])
obs = env.step([select_army])[0]
self.reset()
episode_reward = 0
while True:
total_frames += 1
self.obs = obs.observation["feature_screen"][self.features]
s = np.expand_dims(self.obs, 0)
if max_frames and total_frames >= max_frames:
print("max frames reached")
return
if obs.last():
print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}")
self._epsilon.increment()
break
action = self.get_action(s, unsqueeze=True)
env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN)
try:
obs = env.step([env_actions])[0]
r = obs.reward - 10
except ValueError as e:
print(e)
obs = env.step([actions.FunctionCall(_NO_OP, [])])[0]
r = obs.reward - 1000
episode_reward += r
s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0)
done = r > 0
if self._epsilon.isTraining:
transition = Transition(s, action, s1, r, done)
self._memory.push(transition)
if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self.train_q(squeeze=False)
if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self._Qt = copy.deepcopy(self._Q)
if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0):
print('Evaluating...')
self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate
self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0)
self._epsilon.isTraining = True
if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop
self.reward.append(episode_reward)
print(f'Evaluation Complete: Episode reward = {episode_reward}')
break
self.n_episodes += 1
if len(self._loss) > 0:
self.loss.append(self._loss[-1])
self.max_q.append(self._max_q[-1])
if self.n_episodes % save_checkpoints == 0:
if self.n_episodes > 0:
self.save_data(episodes_done=self.n_episodes)
except KeyboardInterrupt:
pass
finally:
print("finished")
elapsed_time = time.time() - start_time
try:
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
except:
print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames))
class BattleAgentLimited(BattleAgent):
def __init__(self, save_name=None, load_name=None):
super(BattleAgentLimited, self).__init__(save_name=save_name, load_name=load_name)
self.steps_before_training = 256
self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS]
self.radius = 15
self._screen_size = 64
self.initialize_model(FeatureCNNFCLimited(len(self.features), self.radius, screen_size=64))
def get_action(self, s, unsqueeze=True):
# greedy
if np.random.rand() > self._epsilon.value():
s = torch.from_numpy(s).to(self.device)
if unsqueeze:
s = s.unsqueeze(0).float()
else:
s = s.float()
with torch.no_grad():
self._action = self._Q(s).squeeze().cpu().data.numpy()
return self._action.argmax()
# explore
else:
action = np.random.randint(0, self.radius ** 2)
return action
def get_env_action(self, action, obs, command=_MOVE_SCREEN):
relative_action = np.unravel_index(action, [self.radius, self.radius])
y_friendly, x_friendly = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_FRIENDLY).nonzero()
# y_enemy, x_enemy = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
if len(x_friendly) > 0:
action = [int(relative_action[1] - self.radius/2 + round(x_friendly.mean())),
int(relative_action[0] - self.radius/2 + round(y_friendly.mean()))]
friendly_coordinates = np.vstack((x_friendly, y_friendly)).T
if bool(np.sum(np.all(action == friendly_coordinates, axis=1))):
command = _MOVE_SCREEN
elif abs(sum(action)) < 2:
command = _MOVE_SCREEN
else:
# action = [int(relative_action[1] - self.radius/2), int(relative_action[0] - self.radius/2)]
return actions.FunctionCall(_NO_OP, [])
if command in obs.observation["available_actions"]:
return actions.FunctionCall(command, [[0], action])
else:
return actions.FunctionCall(_NO_OP, [])
| [
"alanzhou93@gmail.com"
] | alanzhou93@gmail.com |
5c59103c775af199dd22c783d6c67d60fb97d5d3 | 49e0b6094a6841efd74ba57cd01913b465223333 | /data_structures_and_algorithms_python/challenges/tree_fizz_buzz/tree_fizz_buzz.py | 5883a22406f14bb3defa4c58189abd1927c6c06e | [] | no_license | HamzaQahoush/data-structures-and-algorithms--Python | 1c2fdfc8b90efc190108ed139372591741d5acc7 | 81bc4424065bc6b7ef99ab4dbba60524a75058a4 | refs/heads/master | 2023-07-15T04:03:05.158576 | 2021-08-05T17:34:47 | 2021-08-05T17:34:47 | 376,792,369 | 0 | 1 | null | 2021-08-05T17:29:16 | 2021-06-14T11:00:05 | Python | UTF-8 | Python | false | false | 1,647 | py | class Node :
def __init__(self,value):
self.value = value
self.child = []
def __str__(self):
return str(self.value)
class KAryTree :
def __init__(self):
self.root= None
"""This code done with help from Ahmad Zatar"""
def fizz_Buzz_Tree(KAryTree):
def traverse(node):
if node.child :
for i in range(len(node.child)):
traverse (node.child[i])
if node.child[i].value %5 == 0 and\
node.child[i].value % 3 == 0:
node.child[i].value= "Fizz Buzz"
elif node.child[i].value %5 == 0 : node.child[i].value= "Buzz"
elif node.child[i].value %3 == 0 : node.child[i].value= "Fizz"
else: node.child[i].value =str(node.child[i].value)
traverse(KAryTree.root)
if KAryTree.root.value %5 == 0 and\
KAryTree.root.value %3 ==0 :
KAryTree.root.value ="Fizz Buzz"
if KAryTree.root.value %5 == 0 : KAryTree.root.value ="Buzz"
if KAryTree.root.value %3 ==0 : KAryTree.root.value ="Fizz"
else : KAryTree.root.value= str(KAryTree.root.value)
return KAryTree
if __name__ == "__main__":
kAryTree = KAryTree()
kAryTree.root=Node(1) #root
kAryTree.root.child+=[Node(2)] #child 0
kAryTree.root.child+=[Node(3)] #child 1
kAryTree.root.child+=[Node(5)] #child 2
kAryTree.root.child[0].child+=[Node(5)] #child[0,0]
fizz_Buzz_Tree(kAryTree)
print(kAryTree.root.child[0].value) # 2 -> 2
print(kAryTree.root.child[1].value) # 3 -> Fizz
print(kAryTree.root.child[0].child[0].value) # 5 -> Buzz | [
"hamza.qah@gmail.com"
] | hamza.qah@gmail.com |
8b0d58ef495a25ef7a5bac1d8320f8430110b81a | 4bdb484b1aaf38f38e512042e249c26bb8cb181c | /v-server/shopspider/diy/configs.py | 3e57d1d8796addaa9191b063104920b91f3dcb92 | [] | no_license | fan1018wen/scrapy-spider | 593ec2b6e02724e185e135ecc107400eeb7aec37 | 97d7ea1ce63d6c84ef9e01fb55e9376dbd7b8e83 | refs/heads/master | 2021-01-15T22:14:57.787123 | 2013-09-27T03:59:55 | 2013-09-27T03:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | ##coding=utf-8
# Define some diy functions here
table_prefix = 'P1_WJ_TEST_LANG' #数据表前缀 #pipeline eg TEST --> TEST_SHOP TEST_PRODUCT TEST_PRODUCT_IMAGE
show_messages = True #是否打印相关调试信息 True / False
#-数据库配置---如需修改端口 请移步至 pipeline
db_type = 'oracle' #数据库类型 oracle / mysql #pipeline
db_host = '172.16.4.211' #数据库主机 #pipeline
db_user = 'spider' # 用户名
db_pass = 'spider' # 密码
db_name = 'spider' #mysql为数据库名
db_sid = 'xe' # oracle为服务名 jlproject_primary
handle_image = True #是否处理图片 True / False #pipeline 一般无需修改 处理图片源路径为 http 绝对路径
download_image = False #是否下载图片 True / False #pipeline 一般无需修改
image_dir = '/picdir/php' #图片存放根目录 linux | windows 'D:\\7788\\picdir\\php' 一般无需修改
global conf
conf = {
'table_prefix' : table_prefix,
'show_messages' : show_messages,
'db_type' : db_type,
'db_host' : db_host,
'db_user' : db_user,
'db_pass' : db_pass,
'db_name' : db_name,
'db_sid' : db_sid,
'handle_image' : handle_image,
'download_image' : download_image,
'image_dir' : image_dir
}
#if conf['show_messages'] : | [
"wj922@qq.com"
] | wj922@qq.com |
bb35ccd3ccfc92a049807e3711182d740eb677b8 | eab2dc435028b2548554d97b24eb7b7e3576b953 | /iblrig/check_sync_pulses.py | b53097729443914a5879f7b454f1900b4316e049 | [
"MIT"
] | permissive | k1o0/iblrig | 35edd8570215ca591b1f1e26e47439e633aa587a | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | refs/heads/master | 2021-05-24T12:58:47.552912 | 2020-02-25T20:19:59 | 2020-02-25T20:19:59 | 253,573,669 | 0 | 0 | MIT | 2020-04-06T17:48:28 | 2020-04-06T17:48:28 | null | UTF-8 | Python | false | false | 2,875 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Monday, February 25th 2019, 2:10:38 pm
import logging
import sys
from pathlib import Path
import ibllib.io.raw_data_loaders as raw
import matplotlib.pyplot as plt
import numpy as np
from iblrig.misc import get_port_events
log = logging.getLogger("iblrig")
def sync_check(tph):
events = tph.behavior_data["Events timestamps"]
ev_bnc1 = get_port_events(events, name="BNC1")
ev_bnc2 = get_port_events(events, name="BNC2")
ev_port1 = get_port_events(events, name="Port1")
NOT_FOUND = "COULD NOT FIND DATA ON {}"
bnc1_msg = NOT_FOUND.format("BNC1") if not ev_bnc1 else "OK"
bnc2_msg = NOT_FOUND.format("BNC2") if not ev_bnc2 else "OK"
port1_msg = NOT_FOUND.format("Port1") if not ev_port1 else "OK"
warn_msg = f"""
##########################################
NOT FOUND: SYNC PULSES
##########################################
VISUAL STIMULUS SYNC: {bnc1_msg}
SOUND SYNC: {bnc2_msg}
CAMERA SYNC: {port1_msg}
##########################################"""
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
log.warning(warn_msg)
if __name__ == "__main__":
if len(sys.argv) == 1:
print("I need a file name...")
session_data_file = Path(sys.argv[1])
if not session_data_file.exists():
raise FileNotFoundError(f"{session_data_file}")
if session_data_file.name.endswith(".jsonable"):
data = raw.load_data(session_data_file.parent.parent)
else:
try:
data = raw.load_data(session_data_file)
except Exception:
print("Not a file or a valid session folder")
unsynced_trial_count = 0
frame2ttl = []
sound = []
camera = []
trial_end = []
for trial_data in data:
tevents = trial_data["behavior_data"]["Events timestamps"]
ev_bnc1 = get_port_events(tevents, name="BNC1")
ev_bnc2 = get_port_events(tevents, name="BNC2")
ev_port1 = get_port_events(tevents, name="Port1")
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
unsynced_trial_count += 1
frame2ttl.extend(ev_bnc1)
sound.extend(ev_bnc2)
camera.extend(ev_port1)
trial_end.append(trial_data["behavior_data"]["Trial end timestamp"])
print(f"Found {unsynced_trial_count} trials with bad sync data")
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.plot(camera, np.ones(len(camera)) * 1, "|")
ax.plot(sound, np.ones(len(sound)) * 2, "|")
ax.plot(frame2ttl, np.ones(len(frame2ttl)) * 3, "|")
[ax.axvline(t, alpha=0.5) for t in trial_end]
ax.set_ylim([0, 4])
ax.set_yticks(range(4))
ax.set_yticklabels(["", "camera", "sound", "frame2ttl"])
plt.show()
| [
"nbonacchi@gmail.com"
] | nbonacchi@gmail.com |
ddf50e75e79b2fdf8f47933f714c83b2eaa89e66 | 09d3b183035824f990946cdd8faa11e8bd729e6f | /geo-data/osmgeojson.py | cc3bfcb2ff891030f189c4724e3ddec70e74dbe7 | [] | no_license | srravya/data-greed | 78d20066acef11c2a56f03fca18975227102832d | 566d2c5ad521fd9ffd01df4fd77476bd3cc18c79 | refs/heads/master | 2021-01-11T09:27:46.965503 | 2016-06-22T17:11:28 | 2016-06-22T17:11:28 | 57,985,117 | 0 | 0 | null | 2016-06-08T05:19:22 | 2016-05-03T16:44:09 | Python | UTF-8 | Python | false | false | 2,349 | py | from geojson import Point
from geojson import Feature, FeatureCollection
from geojson import dump, load
from osmapi import OsmApi
import os
def degree_decimal(dms_list):
return dms_list[0] + (dms_list[1] / 60.0) + (dms_list[2] / 3600.0)
DATAFILE='libraries_new.geojson'
TESTFILE='libraries_test.geojson'
# Change the value to switch between test data and actual data
GEODATAFILE=DATAFILE
# COORD_SYSTEM='degree'
COORD_SYSTEM='decimal'
if COORD_SYSTEM == 'decimal':
lat = input('lat: ')
lon = input('lon: ')
elif COORD_SYSTEM == 'degree':
lat_dms = raw_input('deg,min,sec: ')
lon_dms = raw_input('deg,min,sec: ')
lat = degree_decimal([float(x.strip()) for x in lat_dms.split(',')])
lon = degree_decimal([float(y.strip()) for y in lon_dms.split(',')])
def prompt():
print("Select Option")
print("0. Exit")
print("1. Add a node")
print("2. Get node(s)")
def add_to_osm():
connection = OsmApi(passwordfile=u'', api=OSM_EP)
# GeoJSON point is (Easting, Northing) / (Long, Lat) order!
my_point = Point((lon,lat))
''' Properties: {
Name: Name of the library
Operator: Directorate of Public Libraries
Opening Hours: Open hours in OSM format
Address: Door number if available and street
'''
name = raw_input('Name: ')
timings = raw_input('Time: ')
street = raw_input('Street: ')
housenumber = raw_input('Door: ')
postcode = raw_input('PINCODE: ')
my_feature = Feature(geometry=my_point, properties={
'amenity':'library',
'name':name,
'operator':'Directorate of Public Libraries',
'opening_hours':timings,
'addr:country':'IN',
'addr:city':'Chennai',
'addr:street':street,
'addr:housenumber':housenumber,
'address:postcode':postcode,
'marker-color': '#00ff00',
'marker-symbol': 'library'
} )
if os.stat(GEODATAFILE).st_size == 0:
FILE_EMPTY = True
else:
FILE_EMPTY = False
if not FILE_EMPTY:
with open(GEODATAFILE,'r') as data:
current = load(data)
featureSet = current['features']
featureSet.append(my_feature)
print("Total libraries: %d" % len(featureSet))
libraries = FeatureCollection(featureSet)
else:
libraries = FeatureCollection([my_feature])
# Write data to file
with open(GEODATAFILE,'w+') as data:
dump(libraries, data, indent=4, sort_keys=True)
| [
"eternaltyro@gmail.com"
] | eternaltyro@gmail.com |
4748aa5750dba7b48af7c65f6b08a0be79ebbcb4 | 563c1d3093a047d7185c34557345eadf60d0dcd1 | /reservoir-id/classifier_apply.py | b886278192ca1926597017c7d814da1eb2ac04a2 | [
"GPL-3.0-only"
] | permissive | kysolvik/reservoir-id | ea930cbd93199bf6f3bcda58fd5971d3402eb8bc | f3a25d0750d96f369a699547584d7db97b2cb43d | refs/heads/master | 2021-01-19T03:30:41.006479 | 2018-01-17T14:38:42 | 2018-01-17T14:38:42 | 87,315,930 | 0 | 0 | MIT | 2018-01-07T16:53:26 | 2017-04-05T13:59:18 | Python | UTF-8 | Python | false | false | 3,065 | py | #!/usr/bin/env python
"""
Apply classifier exported by classifier_train.py
Inputs: Classifier pkl path, small area cutoff
Outputs: CSV with classified regions
Notes:
1. Make sure that all columns in the apply csv match the train_csv
2. exclude_att_patterns must match
@authors: Kylen Solvik
Date Create: 5/27/17
"""
# Load libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.externals import joblib
import xgboost as xgb
import numpy as np
import sys
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Apply Random Forest classifier to prop_csv.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to pkl with xgb model.',
type=str)
parser.add_argument('class_csv_out',
help='Path for output classified csv',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. Must match trained model. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
dataset = pd.read_csv(args.path_prefix + args.prop_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
# Set nans to 0
X = np.nan_to_num(X)
# Export classifier trained on full data set
clf = joblib.load(args.path_prefix + args.xgb_pkl)
clf_pred = clf.predict(X)
dataset_out = dataset_acut
dataset_out["clf_pred"] = clf_pred
print(str(sum(clf_pred == 1)) + " classified as positive")
print(str(sum(clf_pred == 0)) + " classified as negative")
dataset_out.to_csv(args.path_prefix + args.class_csv_out,index=False)
if __name__ == '__main__':
main()
| [
"kysolvik@gmail.com"
] | kysolvik@gmail.com |
7a4a7b2829526271df0ee298213f785025e1cafc | a473fdce56e422137f0f14514081bf7c10e9aa90 | /source/data_processing.py | f58589cc42ed19b0dfa0cd0c580dc9171fd81ec2 | [] | no_license | mrandic/Bike-Rental-Case | bc264d02d115db178ff35d67b0c98d4644dbf954 | 79d29a5a5a1a914e5936e688d2b1ed850373b301 | refs/heads/main | 2023-08-15T02:10:52.483478 | 2021-09-21T23:34:44 | 2021-09-21T23:34:44 | 409,000,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,767 | py | import pandas as pd
import numpy as np
from dateutil.parser import parse
def processHubwayTripsData(hubway_trips_df):
"""
Create initial features from hubway trips data
:param hubway_trips_df: Hubway trips data
:return: Feature engineered dataframe
"""
hubway_trips_df['start_date'] = hubway_trips_df['start_date'].apply(lambda x: parse(x))
hubway_trips_df['year_start'] = hubway_trips_df['start_date'].apply(lambda x: x.year)
hubway_trips_df['month_start'] = hubway_trips_df['start_date'].apply(lambda x: x.month)
hubway_trips_df['weekday_start'] = hubway_trips_df['start_date'].apply(lambda x: x.dayofweek)
hubway_trips_df['day_start'] = hubway_trips_df['start_date'].apply(lambda x: x.day)
hubway_trips_df['hour_start'] = hubway_trips_df['start_date'].apply(lambda x: x.hour)
hubway_trips_df = hubway_trips_df.rename(columns={'status': 'trip_status'})
return hubway_trips_df
def mapFrequentPostalCodeToGPSData():
"""
Manually map approximate postal code GPS locations obtained from OpenStreetMap service
:return: Feature engineered dataframe
"""
dict = {'zip_code': ["'02118", "'02139", "'02215", "'02116", "'02115", "'02138", "'02114", "'02143", "'02113", "'02134" ],
'zip_code_lat': [42.3407, 42.3643, 42.3476, 42.3514, 42.3480, 42.34733, 42.36033, 42.38371, 42.36285, 42.35595 ],
'zip_code_lng': [-71.0708, -71.1022, -71.1009, -71.0776, -71.0885, -71.16867, -71.06732, -71.10213, -71.05518, -71.13411 ]
}
return pd.DataFrame(data=dict)
def createMasterDataSet(hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df):
"""
Create master dataset from all available datasets
:param hubway_trips_df: Hubway trips data
:param hubway_stations_df: Hubway stations data
:param weather_df: Weather data for Boston (additional added data source)
:param zip_code_gps_df: ZIp code GPS locations (additional added data source)
:return: Master dataset
"""
hubway_trips_df = processHubwayTripsData(hubway_trips_df)
master_df = pd.merge(hubway_trips_df, hubway_stations_df, how='left', left_on='strt_statn', right_on='id')
master_df = master_df.rename(columns={'id': 'id_start', 'terminal': 'terminal_start', 'station': 'station_start',
'municipal': 'municipal_start', 'lat': 'lat_start', 'lng': 'lng_start',
'status': 'status_start'})
master_df = pd.merge(master_df, hubway_stations_df, how='left', left_on='end_statn', right_on='id')
master_df = master_df.rename(
columns={'id': 'id_end', 'terminal': 'terminal_end', 'station': 'station_end', 'municipal': 'municipal_end',
'lat': 'lat_end', 'lng': 'lng_end', 'status': 'status_end'})
master_df = pd.merge(master_df, weather_df, how='left', left_on=['year_start', 'month_start', 'day_start'],
right_on=['Year', 'Month', 'Day'])
master_df = pd.merge(master_df, zip_code_gps_df, how='left', left_on=['zip_code'], right_on=['zip_code'])
return master_df
def importData():
"""
Imports all datasets into working memory using pandas
:return: Pandas dataframes for further analysis
"""
hubway_stations_df = pd.read_csv('hubway_stations.csv', sep=',').sort_values(['station'], ascending=True)
hubway_trips_df = pd.read_csv('hubway_trips.csv', sep=',')
weather_df = pd.read_csv('boston_weather.csv', sep=',')
zip_code_gps_df = mapFrequentPostalCodeToGPSData()
return hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df
def createFeatures(master_df):
"""
Create initial set of features to be used in the project
:param master_df: Master dataframe
:return: Master dataframe
"""
# flag whether user has started and finished bike ride on the same station
master_df['same_st_flg'] = np.where(master_df['strt_statn'] == master_df['end_statn'], 1, 0)
# age feature
master_df['age'] = master_df[(master_df['subsc_type'] == 'Registered')]['year_start'] - \
master_df[(master_df['subsc_type'] == 'Registered')]['birth_date']
# Binned Visibility feature
bins = [0, 2, 4, 6, 8, np.inf]
names = ['0-2', '2-4', '4-6', '6-8', '8+']
master_df['Avg Visibility Range (mi)'] = pd.cut(master_df['Avg Visibility (mi)'], bins, labels=names)
# Binned Temperature feature
bins = [20, 40, 60, 80, np.inf]
names = ['20-40', '40-60', '60-80', '80+']
master_df['Avg Temp Range (F)'] = pd.cut(master_df['Avg Temp (F)'], bins, labels=names)
# Binned Humidity feature
bins = [20, 40, 60, 80, np.inf]
names = ['20-40', '40-60', '60-80', '80+']
master_df['Avg Humidity Range (%)'] = pd.cut(master_df['Avg Humidity (%)'], bins, labels=names)
# Binned Wind Range feature
bins = [0, 5, 10, 15, np.inf]
names = ['0-5', '5-10', '10-15', '15+']
master_df['Avg Wind Range (mph)'] = pd.cut(master_df['Avg Wind (mph)'], bins, labels=names)
# Binned Dew Point feature
bins = [0, 20, 40, 60, np.inf]
names = ['0-20', '20-40', '40-60', '60+']
master_df['Avg Dew Point Range (F)'] = pd.cut(master_df['Avg Dew Point (F)'], bins, labels=names)
# Binned Age feature
bins = [0, 20, 40, 60, np.inf]
names = ['0-20', '20-40', '40-60', '60+']
master_df['Age Range'] = pd.cut(master_df[(master_df['subsc_type'] == 'Registered')]['age'], bins, labels=names)
bike_agg = master_df[['bike_nr', 'seq_id', 'duration']].groupby(by=['bike_nr']).agg(
bike_use_cnt=('seq_id', 'count'), bike_ride_duration_avg=('duration', 'mean')).sort_values(["bike_use_cnt"],
ascending=(
False)).reset_index()
master_df = pd.merge(master_df, bike_agg, how='left', left_on=['bike_nr'], right_on=['bike_nr'])
# Binned bike use frequency range
bins = [0, 500, 1000, 1500, np.inf]
names = ['0-500', '500-1000', '1000-1500', '1500+']
master_df['Bike Use Range'] = pd.cut(master_df['bike_use_cnt'], bins, labels=names)
# Binned bike time usage range
bins = [500, 1000, 1500, np.inf]
names = ['500-1000', '1000-1500', '1500+']
master_df['Bike Avg Time Use Range'] = pd.cut(master_df['bike_ride_duration_avg'], bins, labels=names)
# Clear dataset from outliers (durations above 3000s)
master_df = master_df[(master_df["duration"] > 0) & (master_df["duration"] <= 3000)]
return master_df
def renameColumns(feature_set):
"""
Rename columns to standardized style
:param feature_set: Feature dataframe
:return: Feature dataframe with renamed columns
"""
feature_set = feature_set.rename(
columns={'lat_start': 'latitude',
'lng_start': 'longitude',
'year_start': 'year',
'month_start': 'month',
'weekday_start': 'weekday',
'day_start': 'day',
'hour_start': 'hour',
'municipal_start': 'staton_municipality',
'status_start': 'station_status',
'Bike Use Range': 'bike_freq_use_range',
'Bike Avg Time Use Range': 'bike_avg_dur_range',
'Avg Temp (F)': 'avg_tmp_f',
'Avg Dew Point (F)': 'avg_dew_point_f',
'Avg Humidity (%)': 'avg_humidity_pct',
'Avg Sea Level Press (in)': 'avg_sea_level_press_in',
'Avg Visibility (mi)': 'avg_visibility_mi',
'Avg Wind (mph)': 'avg_wind_mph',
'Snowfall (in)': 'sbowfall_in',
'Precip (in)': 'precip_in',
'Events': 'weather_event'
})
return feature_set
def featureSubset(master_df):
"""
Create initial feature subset
The rest of the variables are excluded after being proven to provide
weak influence on variable importance while building the model.
:param master_df: Master dataframe
:return: Master dataframe with filtered columns
"""
feature_set = master_df[[
'municipal_start',
'lat_start',
'lng_start',
'status_start',
'trip_status',
'year_start',
'month_start',
'weekday_start',
'day_start',
'hour_start',
'subsc_type',
'zip_code',
'gender',
'age',
'Bike Use Range',
'Bike Avg Time Use Range',
'Avg Temp (F)',
'Avg Dew Point (F)',
'Avg Humidity (%)',
'Avg Sea Level Press (in)',
'Avg Visibility (mi)',
'Avg Wind (mph)',
'Snowfall (in)',
'Precip (in)',
'Events',
'duration'
]]
return feature_set
def setFeatureCategoryType(feature_set):
"""
Cast feature data type to a category type
This is needed for proper One Hot Encoding process
:param feature_set: Feature dataframe
:return: Feature dataframe with column types set as categorized
"""
feature_set["bike_freq_use_range"] = feature_set["bike_freq_use_range"].astype('category')
feature_set["bike_avg_dur_range"] = feature_set["bike_avg_dur_range"].astype('category')
feature_set["staton_municipality"] = feature_set["staton_municipality"].astype('category')
feature_set["station_status"] = feature_set["station_status"].astype('category')
feature_set["trip_status"] = feature_set["trip_status"].astype('category')
feature_set["subsc_type"] = feature_set["subsc_type"].astype('category')
feature_set["zip_code"] = feature_set["zip_code"].astype('category')
feature_set["gender"] = feature_set["gender"].astype('category')
feature_set["weather_event"] = feature_set["weather_event"].astype('category')
return feature_set | [
"milos.randic@telenor.no"
] | milos.randic@telenor.no |
39ef41ca372b8c23e5a544cffabddd8ade50fad0 | bb462a56300aff06f6265e500804a4ecc7e290c4 | /mod_int.py | 74edcf9d02e8596531719f955e0156a7cf5b6c2b | [
"CC0-1.0"
] | permissive | nohtaray/competitive-programming.py | 6d4f0b5b6dde3dfee5a12674a1d0143d760b3644 | 7d38884007541061ddd69d617a69a0d9bc6176fa | refs/heads/master | 2023-06-15T01:17:41.744771 | 2023-05-27T14:37:04 | 2023-05-27T14:37:04 | 180,506,267 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | def ModInt(mod):
class _ModInt:
def __init__(self, value):
self.value = value % mod
def __add__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value + other.value)
else:
return _ModInt(self.value + other)
def __sub__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value - other.value)
else:
return _ModInt(self.value - other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value * other.value)
else:
return _ModInt(self.value * other)
def __truediv__(self, other):
raise NotImplementedError()
def __int__(self):
return self.value
def __repr__(self):
return str(self.value)
return _ModInt
if __name__ == '__main__':
MI7 = ModInt(mod=7)
assert int(MI7(1) + MI7(8)) == 2
assert int(MI7(1) + 8) == 2
assert int(8 + MI7(1)) == 2
| [
"ydt.hran2@gmail.com"
] | ydt.hran2@gmail.com |
fb7248f9ab1b81c3bee297715b6eed6deb7193f3 | b2f6b65cba891f3a86e507d4dd312936517ab139 | /utils/modelsize.py | 213406ce9a9a0c028c54e6939f32b41239f2d85d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | leeesangwon/CGNet | 2822d288355e8a535a780c4a6e850608467465dc | d07c0e84d252bed9cbc28e66da4b85bdcc4c6293 | refs/heads/master | 2020-04-14T04:48:48.532572 | 2019-05-09T13:08:26 | 2019-05-09T13:08:26 | 163,646,131 | 1 | 0 | MIT | 2019-05-09T13:08:28 | 2018-12-31T06:45:11 | Python | UTF-8 | Python | false | false | 2,602 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class SizeEstimator(object):
def __init__(self, model, input_size=(1,1,32,32), bits=32):
'''
Estimates the size of PyTorch models in memory
for a given input size
'''
self.model = model
self.input_size = input_size
self.bits = 32
return
def get_parameter_sizes(self):
'''Get sizes of all parameters in `model`'''
mods = list(self.model.modules())
sizes = []
for i in range(1,len(mods)):
m = mods[i]
p = list(m.parameters())
for j in range(len(p)):
sizes.append(np.array(p[j].size()))
self.param_sizes = sizes
return
def get_output_sizes(self):
'''Run sample input through each layer to get output sizes'''
input_ = Variable(torch.FloatTensor(*self.input_size), volatile=True)
mods = list(self.model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
self.out_sizes = out_sizes
return
def calc_param_bits(self):
'''Calculate total number of bits to store `model` parameters'''
total_bits = 0
for i in range(len(self.param_sizes)):
s = self.param_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
self.param_bits = total_bits
return
def calc_forward_backward_bits(self):
'''Calculate bits to store forward and backward pass'''
total_bits = 0
for i in range(len(self.out_sizes)):
s = self.out_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
# multiply by 2 for both forward AND backward
self.forward_backward_bits = (total_bits*2)
return
def calc_input_bits(self):
'''Calculate bits to store input'''
self.input_bits = np.prod(np.array(self.input_size))*self.bits
return
def estimate_size(self):
'''Estimate model size in memory in megabytes and bits'''
self.get_parameter_sizes()
self.get_output_sizes()
self.calc_param_bits()
self.calc_forward_backward_bits()
self.calc_input_bits()
total = self.param_bits + self.forward_backward_bits + self.input_bits
total_megabytes = (total/8)/(1024**2)
return total_megabytes, total
| [
"874314714@qq.com"
] | 874314714@qq.com |
c43dee062a7499d04b64507171d861b11b09912e | df3c8c521a51f2b412118bd9d0e477da06a3b7cc | /build/view_environments/post_create_/create_post/create_post.py | 2a6a13f8a1551a30e01dd4e643e8f14b345f9bfd | [] | no_license | bharatmudragada/fb_post | c30b900731db5844df6b438e5d38a0dfb607412a | c5e7bb185a561bdcfcd7b2e30264554b07106044 | refs/heads/master | 2020-06-21T04:05:22.296755 | 2019-07-17T07:48:22 | 2019-07-17T07:48:22 | 197,339,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from django_swagger_utils.drf_server.decorators.request_response import request_response
from django_swagger_utils.drf_server.default.parser_mapping import PARSER_MAPPING
from django_swagger_utils.drf_server.default.renderer_mapping import RENDERER_MAPPING
from fb_post.build.serializers.definitions.PostContent.PostContentSerializer import PostContentSerializer
from fb_post.build.serializers.definitions.PostId.PostIdSerializer import PostIdSerializer
options = {
'METHOD': 'POST',
'REQUEST_WRAPPING_REQUIRED': True,
'REQUEST_ENCRYPTION_REQUIRED': False,
'REQUEST_IS_PARTIAL': False,
'PARSER_CLASSES': [
PARSER_MAPPING["application/json"]
],
'RENDERER_CLASSES': [
RENDERER_MAPPING["application/json"]
],
'REQUEST_QUERY_PARAMS_SERIALIZER': None,
'REQUEST_HEADERS_SERIALIZER': None,
'REQUEST_SERIALIZER': PostContentSerializer,
'REQUEST_SERIALIZER_MANY_ITEMS': False,
'RESPONSE': {
'201' : {
'RESPONSE_SERIALIZER': PostIdSerializer,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
,
'400' : {
'RESPONSE_SERIALIZER': None,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
},
"SECURITY":{
"oauth" : [
"write"
]
}
}
app_name = "fb_post"
operation_id = "create_post"
group_name = ""
@request_response(options=options, app_name=app_name, operation_id=operation_id, group_name=group_name)
def create_post(request, *args, **kwargs):
args = (request,) + args
from django_swagger_utils.drf_server.wrappers.view_env_wrapper import view_env_wrapper
return view_env_wrapper(app_name, "create_post", group_name, *args, **kwargs)
| [
"bharathmudragada123@gmail.com"
] | bharathmudragada123@gmail.com |
98afb32b4a54532746127c0a78d01a693fc7d98a | 21899ea0e94cb58f8ac99b7c731f59e0232839eb | /src/python/T0/WMBS/Oracle/Subscriptions/HaveJobGroup.py | 59ece2561182f2a6ec7589262150c04280d86513 | [
"Apache-2.0"
] | permissive | dmwm/T0 | a6ee9d61abc05876fc24f8af69fe932a2f542d21 | 1af91d0b1971b7d45ea7378e754f2218ff9a8474 | refs/heads/master | 2023-08-16T10:55:27.493160 | 2023-08-11T09:38:03 | 2023-08-11T09:38:03 | 4,423,801 | 9 | 54 | Apache-2.0 | 2023-09-14T11:43:30 | 2012-05-23T18:33:56 | Python | UTF-8 | Python | false | false | 687 | py | """
_HaveJobGroup_
Oracle implementation of HaveJobGroup
For a given subscription check if there is an existing job group
"""
from WMCore.Database.DBFormatter import DBFormatter
class HaveJobGroup(DBFormatter):
sql = """SELECT 1
FROM wmbs_jobgroup
WHERE wmbs_jobgroup.subscription = :subscription
AND ROWNUM = 1
"""
def execute(self, subscription, conn = None, transaction = False):
results = self.dbi.processData(self.sql, { 'subscription' : subscription },
conn = conn, transaction = transaction)[0].fetchall()
return ( len(results) > 0 and results[0][0] == 1 )
| [
"Dirk.Hufnagel@cern.ch"
] | Dirk.Hufnagel@cern.ch |
f64548cc59fb2b2294373d25879cdab04e508e9f | d121775327c0c2e1d7210eab0f52d1818c56aa0c | /Wikipedia_Scraper/venv/bin/wheel | 12e896c57139377e445ecb2d018a31e72715bb96 | [] | no_license | shmoss/Python-Backend-TownSounds | f396d8fbd55b08730286109dc27c1e948a33c9c8 | ba38bed2894ac45eb344c8fa2a23a49daa6fd3f0 | refs/heads/master | 2021-07-15T07:52:05.267561 | 2021-07-08T21:28:37 | 2021-07-08T21:28:37 | 180,048,120 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/Users/starrmoss/PycharmProjects/hi/Wikipedia_Scraper/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shmoss@wisc.edu"
] | shmoss@wisc.edu | |
b14adaf5a89b66b23c4ea53b5a93cd242caca777 | 0f16edb46a48f9b5a125abb56fc0545ede1d65aa | /test_utilities/src/d1_test/mock_api/tests/test_get.py | d1eaef95d18355fd89576cc41c693343b6516ba0 | [
"Apache-2.0"
] | permissive | DataONEorg/d1_python | 5e685f1af0c356190f2d6df45d1ac849e2f56972 | d72a9461894d9be7d71178fb7310101b8ef9066a | refs/heads/master | 2023-08-29T03:16:38.131760 | 2023-06-27T21:59:37 | 2023-06-27T21:59:37 | 60,103,877 | 15 | 12 | Apache-2.0 | 2023-09-06T18:27:53 | 2016-05-31T16:01:00 | Python | UTF-8 | Python | false | false | 2,721 | py | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import responses
import d1_test.d1_test_case
import d1_test.mock_api.get
class TestMockGet(d1_test.d1_test_case.D1TestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""mock_api.get() returns a Requests Response object."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
assert isinstance(mn_client_v1_v2.get("test_pid_1"), requests.Response)
@responses.activate
def test_1010(self, mn_client_v1_v2):
"""mock_api.get() returns the same content each time for a given PID."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
obj_1a_str = mn_client_v1_v2.get("test_pid_1").content
obj_2a_str = mn_client_v1_v2.get("test_pid_2").content
obj_1b_str = mn_client_v1_v2.get("test_pid_1").content
obj_2b_str = mn_client_v1_v2.get("test_pid_2").content
assert obj_1a_str == obj_1b_str
assert obj_2a_str == obj_2b_str
@responses.activate
def test_1020(self, mn_client_v1_v2):
"""mock_api.get(): Redirects."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
direct_sciobj_bytes = mn_client_v1_v2.get("test_pid_1").content
redirect_sciobj_bytes = mn_client_v1_v2.get(
"<REDIRECT:303:3>test_pid_1"
).content
assert direct_sciobj_bytes == redirect_sciobj_bytes
# @responses.activate
# def test_0012(self):
# """mock_api.get() returns 1024 bytes"""
# obj_str = self.client.get('test_pid_1').content
# self.assertEqual(len(obj_str), 1024)
# @responses.activate
# def test_0013(self):
# """mock_api.get(): Passing a trigger header triggers a DataONEException"""
# self.assertRaises(
# d1_common.types.exceptions.NotAuthorized, self.client.get, 'test_pid',
# vendorSpecific={'trigger': '401'}
# )
| [
"git@dahlsys.com"
] | git@dahlsys.com |
bec7c5ea5c678a589efad67a06df92c0335711e2 | dc29b57b9a025287574117a4e7c7fc27663d6063 | /pydemo/src/wxdemo/gridbagdemo.py | 3dc34973c575305cf8cc3a71ddc85a57d34b5233 | [] | no_license | bspeng922/pyutils | e4d0e988d5c168a3a9e97da2d09c6b714faa2c9a | 4fa6c75a7159e03383c0f89d67d1ca37f3d0f0a5 | refs/heads/master | 2020-04-11T09:59:19.089455 | 2017-01-06T07:42:20 | 2017-01-06T07:42:20 | 7,434,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import wx
class Example(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, "", size=(320,130))
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
text = wx.StaticText(panel, label="Rename To")
tc = wx.TextCtrl(panel)
btnok = wx.Button(panel, label="OK", size=(90,28))
btnclose = wx.Button(panel, label="Close", size=(90,28))
sizer = wx.GridBagSizer(4,4)
sizer.Add(text, pos=(0,0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)
sizer.Add(tc, pos=(1,0), span=(1,5), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
sizer.Add(btnok, pos=(3,3))
sizer.Add(btnclose, pos=(3,4), flag=wx.RIGHT|wx.BOTTOM, border=5)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(2)
panel.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
Example(None, -1).Show()
app.MainLoop()
| [
"bspeng922@gmail.com"
] | bspeng922@gmail.com |
4ee39fb041156b51bf7fa191a298758ceaab2ef0 | bcda171a045e86f8437c9dd5f37a0a1ac2316063 | /anonymization/newtest.py | 1ed85056501ce83aeffe09c6b85218895595e2aa | [] | no_license | blackfeathering/CommunityDeception-master | f1127a9d22869a3bbc8db40ca99c89c0e98279d5 | c49dafd8774e029c0d57aa4f63ad192aacafa07f | refs/heads/master | 2023-04-03T03:41:13.651533 | 2021-03-15T06:16:28 | 2021-03-15T06:16:28 | 255,219,882 | 0 | 0 | null | 2021-03-29T22:52:54 | 2020-04-13T03:13:20 | Python | UTF-8 | Python | false | false | 4,824 | py | import logging.config
import sys
import cmath
from typing import List
from settings import master
from igraph import Graph
from igraph.clustering import VertexClustering
from utils.counter_pre import count_security_index_by_pre
from utils.pre_counter import count_pre_security_index
from utils.counter import count_security_index
from utils.timer import time_mark
import time
logging.config.dictConfig(master.LOGGING_SETTINGS)
logger = logging.getLogger('normal')
class NewtestCommunityCombine(object):
def __init__(self, graph, edges_sum, detection_func, func_args, interval, partitions=None,
path=None, index0=2, index1=0, **kwargs):
self.__graph = graph
self.__edges_sum = edges_sum
self.__detection_func = detection_func
self.__func_args = func_args
self.__interval = interval
self.__partitions = partitions
self.__path = path
self.__community_index_0 = index0
self.__community_index_1 = index1
self.__edge_set = None
self.__degree_list = None
self.__vertex_list = None
self.__vertex_part = None
self.__edge_added_list = None
self.__partitions_expected = None
self.__partitions_expected_degree: List[int] = list()
self.__partitions_expected_volume: List[int] = list()
self.__sorted_partitions_expected: List[List[int]] = list()
self.__degree_distribute: List[int] = list()
self.__start_time = time.time()
self.__end_time = None
def __start(self):
logger.info("CommunityCombine")
logger.info(f'Time : {time_mark(self.__start_time)}')
logger.info(f'Graph: {self.__path}')
logger.info(f'Info : {self.__graph.vcount()} {self.__graph.ecount()}')
logger.info(f'Edges: {self.__edges_sum}')
logger.info(f'Func : {self.__detection_func.__name__}')
logger.info(f'Args : {self.__func_args}')
logger.info(f'Gap : {self.__interval}')
logger.info(f'Parts: {len(self.__partitions)}')
logger.info("Community1")
subgraph0 = self.__partitions.subgraph(self.__community_index_0)
logger.info(f'Community index: {self.__community_index_0}, '
f'Info : {subgraph0.vcount()} {subgraph0.ecount()}')
logger.info("Community2")
subgraph1 = self.__partitions.subgraph(self.__community_index_1)
logger.info(f'Community index: {self.__community_index_1}, '
f'Info : {subgraph1.vcount()} {subgraph1.ecount()}')
logger.info("=" * 60)
def __quit(self):
self.__end_time = time.time()
logger.info("=" * 60)
logger.info(f'Time : {time_mark(self.__end_time)}')
logger.info(f'Total: {(self.__end_time - self.__start_time):10.4f} s')
logger.info("=" * 60)
logger.info("\n\n")
def __preprocess(self):
self.__edge_set = set(self.__graph.get_edgelist())
if not self.__partitions:
self.__partitions = self.__detection_func(self.__graph, **self.__func_args)
self.__set_necessary_info()
def __set_necessary_info(self):
v_degree = list()
v_index = list()
v_partation = list()
memberships = self.__partitions._membership
if self.__community_index_0 > self.__community_index_1:
a = self.__community_index_1
self.__community_index_1 = self.__community_index_0
self.__community_index_0 = a
for index in range(len(memberships)):
if memberships[index] == self.__community_index_0:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(0)
if memberships[index] == self.__community_index_1:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(1)
self.__degree_list = v_degree
self.__vertex_list = v_index
self.__vertex_part = v_partation
# 最终合并的社区编号为self.__community_index_1
partation_expected = VertexClustering(graph=self.__partitions._graph, membership=list(self.__partitions._membership))
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == self.__community_index_1:
partation_expected._membership[i] = self.__community_index_0
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == partation_expected._len - 1:
partation_expected._membership[i] = self.__community_index_1
partation_expected._len -= 1
#print(partation_expected._membership)
self.__partitions_expected = partation_expected
| [
"1960554271@qq.com"
] | 1960554271@qq.com |
4f2cdd1eb56bda921db71669d39b4bbdaf4062e4 | 82dafd9b89abdf334420e50f9d7562984aed8a7d | /cifar10_models/senet.py | a6f47305812f4ead441c3208f43d2a499c2c5841 | [] | no_license | mostafaelhoushi/tensor-decompositions | 844aaed58abeb1e17923860a5e9aebed64465030 | 8c3186dfc4d5d2eb22b0a673e3eaf1bcaa872feb | refs/heads/master | 2020-07-09T03:51:30.214582 | 2020-05-02T12:46:00 | 2020-05-02T12:46:00 | 203,867,675 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,086 | py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['senet18']
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def senet18():
return SENet18()
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| [
"m.elhoushi@ieee.org"
] | m.elhoushi@ieee.org |
d2f27c55bbc9eed109b72828c5be2aad86fb4cd3 | 3cd680e0372f942affeb948eedca8e08d9bfb743 | /22.py | c9a9f60726386d6ffe5ecf4bcdc7f5f02fe04839 | [] | no_license | ug2454/PythonPractice | cb507e380b32ecba14b355a3bd60769a4682b4ab | cbf7211e00d46f166246d5932661a6f110cc1cf0 | refs/heads/master | 2022-11-09T03:52:57.971095 | 2020-06-14T12:11:51 | 2020-06-14T12:11:51 | 272,194,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | import max
numbers=[19,20,30]
print(max.max(numbers)) | [
"u.garg14@gmail.com"
] | u.garg14@gmail.com |
f312f96e09ae162f71d13541059405e61729ea52 | 34d99bff51f26c03fcf05141589f51abeae2ff98 | /HTJK/venv/Lib/site-packages/wqrfnium/wqrfnium.py | 11297b7b76430aef3371b426153664074192804d | [] | no_license | zmbhza/appui | d5b31c60122eabe4d8d484d0d15e333b46a9d46f | 7a5b1072245c53b5a227943b41ef0b54420c7107 | refs/heads/master | 2022-12-21T14:00:41.509390 | 2020-09-27T03:34:15 | 2020-09-27T03:34:15 | 297,602,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,096 | py | # -*- coding: utf-8 -*-
import os,sys
import re,time
import Levenshtein
import xlrd,xlwt
from xlutils.copy import copy
import os,platform
import configparser
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#----------------------------------
# diy your elements_xls_path
def create_xls(elements_xls_path):
if not os.path.exists(elements_xls_path):
book = xlwt.Workbook(encoding='utf-8',style_compression=0)
book.add_sheet('Sheet1',cell_overwrite_ok=True)
book.save(elements_xls_path)
def get_elements(icon):
try:
Data = xlrd.open_workbook(elements_xls_path)
except Exception:
print('Please put the element into the elements.xls first!')
print('First column:icon,Second column:tmp_find_method,Third column:tmp_find_value,Fourth column:index,Fifth column:html_element')
print('For example:seachinput,id,kw,0,<input type="text" class="s_ipt" name="wd" id="kw" maxlength="100" autocomplete="off">')
exit(0)
table = Data.sheet_by_name("Sheet1")
nrows = table.nrows
for i in range(nrows):
element_tmp = table.cell(i,0).value
if element_tmp == icon:
try:
html_element = table.cell(i,4).value
except:
html_element = ''
return [table.cell(i,1).value,table.cell(i,2).value,int(table.cell(i,3).value),html_element,i]
print('not fonund the element: [ %s ],please fixed it by yourself...'%icon)
exit(0)
def update_elements(id,html,tmp,tmp_value,index):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 1,tmp)
ww.get_sheet(0).write(id, 2,tmp_value)
ww.get_sheet(0).write(id, 3,index)
ww.get_sheet(0).write(id, 4,html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def input_html_element(id,html):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 4, html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def likescore(oldstr,newstr):
score = Levenshtein.ratio(str(oldstr), str(newstr))
return score
def search_new(driver,old_html):
try:old_id = re.findall(r'id="(.*?)"',old_html)[0]
except:old_id = None
try:old_name = re.findall(r'name="(.*?)"',old_html)[0]
except:old_name=None
try:old_class = re.findall(r'class="(.*?)"',old_html)[0]
except:old_class=None
try:old_text = re.findall(r'>(.*?)<',old_html)[0]
except:old_text=''
try:old_value = re.findall(r'value="(.*?)"',old_html)[0]
except:old_value=''
try:old_onclick = re.findall(r'onclick="(.*?)"',old_html)[0]
except:old_onclick=None
try:old_style = re.findall(r'style="(.*?)"',old_html)[0]
except:old_style=''
try:old_placeholder = re.findall(r'placeholder="(.*?)"', old_html)[0]
except:old_placeholder=None
try:old_href = re.findall(r'href="(.*?)"',old_html)[0]
except:old_href=None
try:old_type = re.findall(r'type="(.*?)"',old_html)[0]
except:old_type = None
#--------------------------------------------------------get all par
try:
bq = re.findall(r'<(.+?) ',old_html)[0]
except:
bq = re.findall(r'<(.+?)>',old_html)[0]
new_elements = driver.find_elements_by_tag_name(bq)
end_element = new_elements[0]
end_index = 0
tmp_score = 0
for i in range(len(new_elements)):
score = 0
new_id = new_elements[i].get_attribute("id")
new_name = new_elements[i].get_attribute("name")
new_class = new_elements[i].get_attribute("class")
new_text = new_elements[i].text
new_value = new_elements[i].get_attribute("value")
new_onclick = new_elements[i].get_attribute("onclick")
new_style = new_elements[i].get_attribute("style")
new_placeholder = new_elements[i].get_attribute("placeholder")
new_href = new_elements[i].get_attribute("href")
try:new_type = re.findall(r'type="(.*?)"',new_elements[i].get_attribute("outerHTML"))[0]
except:new_type = None
score += likescore(old_id, new_id)
score += likescore(old_name, new_name)
score += likescore(old_class, new_class)
score += likescore(old_text, new_text)
score += likescore(old_value, new_value)
score += likescore(old_onclick, new_onclick)
score += likescore(str(old_style).replace(' ',''), str(new_style).replace(' ',''))
score += likescore(old_placeholder, new_placeholder)
score += likescore(old_href, new_href)
score += likescore(old_type,new_type)
if score > tmp_score:
end_element = new_elements[i]
end_index = i
tmp_score = score
new_html = end_element.get_attribute("outerHTML")
new_tmp = 'tag name' #use id,name
new_tmp_value = bq
new_index = end_index
return [end_element,new_html,new_tmp,new_tmp_value,new_index]
def getelement(driver,icon):
time1 = time.time()
element = get_elements(icon)
if element == 'error':
raise Exception
print('find: %s ...'%icon)
old_html = element[3]
try:
if element[0] == 'link_text': element[0] = 'link text'
if element[0] == 'class' or element[0] == 'class_name': element[0] = 'class name'
el = driver.find_elements(element[0],element[1])[element[2]]
print('success in %s s'%str(time.time()-time1)[:5])
if old_html == '':
html_element = el.get_attribute("outerHTML")
input_html_element(element[-1],html_element)
return el
except Exception:
print('find_faild,begin fix....')
if element[-2] == '':
print('we find this element:%s are you first set,but set wrong.Please set right in first time.'%icon)
exit(0)
newel_detail = search_new(driver,old_html)
newel = newel_detail[0]
new_html = newel_detail[1]
new_tmp = newel_detail[2]
new_tmp_value = newel_detail[3]
new_index = newel_detail[4]
update_elements(element[4],html=new_html,tmp=new_tmp,tmp_value=new_tmp_value,index=new_index)
print('find success in %s s'%str(time.time()-time1)[:5])
return newel
try:
cfp = configparser.ConfigParser()
cfp.read('wqrfnium.ini')
elements_xls_path = cfp.get('Excel','elements_xls_path')
except: # create wqrfnium.ini
cfp = configparser.ConfigParser()
cfp["Excel"] = {"elements_xls_path":""}
with open('wqrfnium.ini','w') as fp:
cfp.write(fp)
elements_xls_path = cfp.get('Excel','elements_xls_path')
def begin_wqrf(path):
global elements_xls_path
if 'xls' not in path.split('.')[-1]:
if path[-1] == '/':
path += 'elements.xls'
else:
path += '/elements.xls'
if elements_xls_path != path:
print("----------------------------------")
print("You are changeing the elements_xls_path,the new path is %s now!"%path)
print("你正在自定义元素表elements.xls的存放路径,新路径为:%s"%path)
print("You'd better handle the old elements_xls : %s by yourself."%elements_xls_path)
print("你最好处理掉旧的元素表:%s"%elements_xls_path)
create_xls(path)
cfp.set("Excel","elements_xls_path",path)
with open("wqrfnium.ini","w+") as f:
cfp.write(f)
elements_xls_path = path
if elements_xls_path == '': #no path
# begin to set the elements
if 'arwin' in platform.system() or 'inux' in platform.system() :
elements_xls_path =os.environ['HOME']+"/elements.xls"
else:
elements_xls_path = "C:\\elements.xls"
print('You are first use wqrfnium,it is creating elements.xls,you must edit elements.xls and play wqrfnium after!')
print('这是您第一次使用wqrfnium,它正在自动创建元素表elements.xls,您必须在这次启动后再去使用wqrfnium和添加元素到elements.xls等操作!')
print('Your elements.xls tmp path is %s' % elements_xls_path)
print('你的元素表elements.xls的临时路径是 %s'%elements_xls_path)
print("First colum is element's icon,second is element's tmp_find_method,third is element's tmp_find_value,forth is element's index,the last is element's html_element")
print("元素表:第一列为元素的标识,第二列为元素的临时定位方式,第三列为元素的临时定位值,第四列为元素的下标,最后一列元素的html标签源码")
print("You can also read the README to get help or wirte email to 1074321997@qq.com")
print("你也可以去阅读README.md来获取更多帮助,或者发送邮件到1074321997@qq.com联系作者")
print('You can use code [begin_wqrf("your diy new elements_xls_path ")] to diy your elements_xls_path!')
print('你可以在文件开头添加代码[begin_wqrf("你的元素表elements.path的自定义存放路径")] 来 自定义 你的元素表存放路径!')
create_xls(elements_xls_path)
cfp.set("Excel", "elements_xls_path", elements_xls_path)
with open("wqrfnium.ini", "w+") as f:
cfp.write(f)
else:
if 'arwin' in platform.system() or 'inux' in platform.system() :
if elements_xls_path == os.environ['HOME']+"/elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s'%elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
else:
if elements_xls_path == "C:\\elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s' % elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
| [
"847160625@qq.com"
] | 847160625@qq.com |
8608678850cf6031586f8b1bce7e8531244232c5 | 7869035b72807394154285d307e0597ee16f11d8 | /src/data_loader.py | 2a23407ac8c03daa931088d7b07b81b5ff04a48b | [] | no_license | tiffany70072/TokenPositioning | cb74edae92e19c16f8ca763935e56b0f2e698b85 | a2ab63640a2aff1abfccaa1c1486d8a97026ef0b | refs/heads/master | 2022-07-19T11:21:04.716882 | 2020-04-17T06:02:18 | 2020-04-17T06:02:18 | 254,995,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import numpy as np
import os
from sklearn.model_selection import train_test_split
def load_data(task, data_name, data_type):
if task == "autoenc-last" or task == 'token-posi':
assert data_type == "train" or data_type == "valid", "no this data type."
data_path = os.path.join("../data", data_name)
encoder_data = np.load(os.path.join(data_path, "encoder_%s.npy" % data_type))
decoder_data = np.load(os.path.join(data_path, "decoder_%s.npy" % data_type))
assert encoder_data.shape[0] == decoder_data.shape[0], "data size not match."
decoder_output = set_decoder_output_data(decoder_data)
return encoder_data, decoder_data, decoder_output
else:
raise "No this task for load_data."
def set_decoder_output_data(decoder_input):
# Reshape 2d array into 3d array for Keras training.
# Shift one time step because decoder_input and decoder_output are different with one time step.
decoder_output = decoder_input.copy()
for i in range(len(decoder_output)):
decoder_output[i, :-1] = decoder_input[i, 1:] # Remove the first token in decoder output.
decoder_output[i, -1] *= 0
decoder_output = np.reshape(decoder_output, [decoder_output.shape[0], decoder_output.shape[1], 1])
return decoder_output
"""
def cut_validation(self):
# TODO: cut training, validation and testing
split_result = data_reader.data_split(self.encoder_in, self.decoder_in, self.decoder_out)
self.encoder_in = split_result[0]
self.decoder_in = split_result[1]
self.decoder_out = split_result[2]
self.encoder_in_valid = split_result[3][:50000] # TODO: Deal with too many data.
self.decoder_in_valid = split_result[4][:50000]
self.decoder_out_valid = split_result[5][:50000]
self.encoder_in_test = split_result[6]
self.decoder_in_test = split_result[7]
self.decoder_out_test = split_result[8]
self.encoder_in = split_result[0]#[:3000]
self.decoder_in = split_result[1]#[:3000]
self.decoder_out = split_result[2]#[:3000]
print("(Cut validation) training size:", self.encoder_in.shape)
print("(Cut validation) validation size:", self.encoder_in_valid.shape)
print("(Cut validation) testing size:", self.encoder_in_test.shape)
""" | [
"tiffany70072@gmail.com"
] | tiffany70072@gmail.com |
a4eb444e3bee4d492386c1d33f6ce720fe415054 | c862c18ea1097ec54df04e09debae9e68d0c9897 | /edit_note_dialog.py | 38cc02deab7901e90daae048cc7d898d15833112 | [] | no_license | YoungTeurus/Organiser_Qt | 605e8428e15f155c77edeb036d23133e22104365 | 499fcb9259f496adbecfc21730bdc9de33dc04dd | refs/heads/master | 2021-02-05T16:30:57.451874 | 2020-03-01T17:43:14 | 2020-03-01T17:43:14 | 243,803,353 | 0 | 0 | null | 2020-03-01T17:43:16 | 2020-02-28T16:12:47 | Python | UTF-8 | Python | false | false | 2,775 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Work\Organiser_Qt\edit_note_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 278)
self.title_line = QtWidgets.QLineEdit(Dialog)
self.title_line.setGeometry(QtCore.QRect(120, 10, 261, 20))
self.title_line.setObjectName("title_line")
self.note_text = QtWidgets.QTextEdit(Dialog)
self.note_text.setGeometry(QtCore.QRect(10, 40, 371, 201))
self.note_text.setObjectName("note_text")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 10, 91, 16))
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 243, 371, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.save_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.save_button.setEnabled(False)
self.save_button.setCheckable(False)
self.save_button.setAutoRepeatDelay(298)
self.save_button.setObjectName("save_button")
self.horizontalLayout.addWidget(self.save_button)
self.delete_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.delete_button.setObjectName("delete_button")
self.horizontalLayout.addWidget(self.delete_button)
self.note_text.raise_()
self.title_line.raise_()
self.label.raise_()
self.horizontalLayoutWidget.raise_()
self.retranslateUi(Dialog)
self.save_button.clicked.connect(Dialog.save)
self.delete_button.clicked.connect(Dialog.delete)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Название заметки"))
self.save_button.setText(_translate("Dialog", "Сохранить изменения"))
self.delete_button.setText(_translate("Dialog", "Удалить заметку"))
| [
"ilya.elfimow@yandex.ru"
] | ilya.elfimow@yandex.ru |
13a4f3ce6cf13557eb0b81be5c554c8af70bd323 | 6984724d0466d477635b23d073affa9b00f01f67 | /Tasks/Ramanenka_Tasks/HT6/app_Calc.py | 139762ac73cc6b004c125c7310934ab7e8c2ccb9 | [] | no_license | RomanPutsilouski/M-PT1-37-21 | 202414fac782e6c68f741e55f9b7697f0c974f45 | ceef9b4e6bcff2a9033615ec761f0e2e73c9467e | refs/heads/main | 2023-05-30T21:10:22.404817 | 2021-06-30T00:26:29 | 2021-06-30T00:26:29 | 348,462,785 | 1 | 0 | null | 2021-06-05T15:44:27 | 2021-03-16T19:06:57 | Python | UTF-8 | Python | false | false | 257 | py | from ht6_calculator_with_brackets import recurs
"""Enter the expression or continue with default expression"""
expression = '(25 -(5- (1-2))/(5-8))'
# equation = input('Expression is: \n')
results = float(recurs(expression))
print(f'Result is: {results}') | [
"margoroma2010@gmail.com"
] | margoroma2010@gmail.com |
22e5a66e84c47b3691015f299972b4f9e43427f4 | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/firestore/v1/firestore_v1_client.py | ac370070865d488484aa602c2024b65bf41079fa | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,606 | py | """Generated client library for firestore version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.firestore.v1 import firestore_v1_messages as messages
class FirestoreV1(base_api.BaseApiClient):
"""Generated client library for service firestore version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://firestore.googleapis.com/'
MTLS_BASE_URL = 'https://firestore.mtls.googleapis.com/'
_PACKAGE = 'firestore'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/datastore']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'FirestoreV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new firestore handle."""
url = url or self.BASE_URL
super(FirestoreV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_databases_collectionGroups_fields = self.ProjectsDatabasesCollectionGroupsFieldsService(self)
self.projects_databases_collectionGroups_indexes = self.ProjectsDatabasesCollectionGroupsIndexesService(self)
self.projects_databases_collectionGroups = self.ProjectsDatabasesCollectionGroupsService(self)
self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self)
self.projects_databases_operations = self.ProjectsDatabasesOperationsService(self)
self.projects_databases = self.ProjectsDatabasesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsDatabasesCollectionGroupsFieldsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_fields resource."""
_NAME = 'projects_databases_collectionGroups_fields'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsFieldsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the metadata and configuration for a Field.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Field) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest',
response_type_name='GoogleFirestoreAdminV1Field',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the field configuration and metadata for this database.
Currently, FirestoreAdmin.ListFields only supports listing fields
that have been explicitly overridden. To issue this query, call
FirestoreAdmin.ListFields with the filter set to
`indexConfig.usesAncestorConfig:false`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListFieldsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/fields',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest',
response_type_name='GoogleFirestoreAdminV1ListFieldsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a field configuration. Currently, field updates apply only to.
single field index configuration. However, calls to
FirestoreAdmin.UpdateField should provide a field mask to avoid
changing any configuration that the caller isn't aware of. The field mask
should be specified as: `{ paths: "index_config" }`.
This call returns a google.longrunning.Operation which may be used to
track the status of the field update. The metadata for
the operation will be the type FieldOperationMetadata.
To configure the default field settings for the database, use
the special `Field` with resource name:
`projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='PATCH',
method_id='firestore.projects.databases.collectionGroups.fields.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleFirestoreAdminV1Field',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsIndexesService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_indexes resource."""
_NAME = 'projects_databases_collectionGroups_indexes'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsIndexesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a composite index. This returns a google.longrunning.Operation.
which may be used to track the status of the creation. The metadata for
the operation will be the type IndexOperationMetadata.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='POST',
method_id='firestore.projects.databases.collectionGroups.indexes.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/indexes',
request_field='googleFirestoreAdminV1Index',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='DELETE',
method_id='firestore.projects.databases.collectionGroups.indexes.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Index) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest',
response_type_name='GoogleFirestoreAdminV1Index',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists composite indexes.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListIndexesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/indexes',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest',
response_type_name='GoogleFirestoreAdminV1ListIndexesResponse',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups resource."""
_NAME = 'projects_databases_collectionGroups'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsDatabasesDocumentsService(base_api.BaseApiService):
"""Service class for the projects_databases_documents resource."""
_NAME = 'projects_databases_documents'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesDocumentsService, self).__init__(client)
self._upload_configs = {
}
def BatchGet(self, request, global_params=None):
r"""Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Args:
request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BatchGetDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('BatchGet')
return self._RunMethod(
config, request, global_params=global_params)
BatchGet.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:batchGet',
http_method='POST',
method_id='firestore.projects.databases.documents.batchGet',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:batchGet',
request_field='batchGetDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBatchGetRequest',
response_type_name='BatchGetDocumentsResponse',
supports_download=False,
)
def BeginTransaction(self, request, global_params=None):
r"""Starts a new transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BeginTransactionResponse) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction',
http_method='POST',
method_id='firestore.projects.databases.documents.beginTransaction',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:beginTransaction',
request_field='beginTransactionRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBeginTransactionRequest',
response_type_name='BeginTransactionResponse',
supports_download=False,
)
def Commit(self, request, global_params=None):
r"""Commits a transaction, while optionally updating documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:commit',
http_method='POST',
method_id='firestore.projects.databases.documents.commit',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:commit',
request_field='commitRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsCommitRequest',
response_type_name='CommitResponse',
supports_download=False,
)
def CreateDocument(self, request, global_params=None):
r"""Creates a new document.
Args:
request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('CreateDocument')
return self._RunMethod(
config, request, global_params=global_params)
CreateDocument.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}',
http_method='POST',
method_id='firestore.projects.databases.documents.createDocument',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['documentId', 'mask_fieldPaths'],
relative_path='v1/{+parent}/{collectionId}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsCreateDocumentRequest',
response_type_name='Document',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='DELETE',
method_id='firestore.projects.databases.documents.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a single document.
Args:
request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='GET',
method_id='firestore.projects.databases.documents.get',
ordered_params=['name'],
path_params=['name'],
query_params=['mask_fieldPaths', 'readTime', 'transaction'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsGetRequest',
response_type_name='Document',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}',
http_method='GET',
method_id='firestore.projects.databases.documents.list',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['mask_fieldPaths', 'orderBy', 'pageSize', 'pageToken', 'readTime', 'showMissing', 'transaction'],
relative_path='v1/{+parent}/{collectionId}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsListRequest',
response_type_name='ListDocumentsResponse',
supports_download=False,
)
def ListCollectionIds(self, request, global_params=None):
r"""Lists all the collection IDs underneath a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListCollectionIdsResponse) The response message.
"""
config = self.GetMethodConfig('ListCollectionIds')
return self._RunMethod(
config, request, global_params=global_params)
ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds',
http_method='POST',
method_id='firestore.projects.databases.documents.listCollectionIds',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:listCollectionIds',
request_field='listCollectionIdsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest',
response_type_name='ListCollectionIdsResponse',
supports_download=False,
)
def Listen(self, request, global_params=None):
r"""Listens to changes.
Args:
request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListenResponse) The response message.
"""
config = self.GetMethodConfig('Listen')
return self._RunMethod(
config, request, global_params=global_params)
Listen.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:listen',
http_method='POST',
method_id='firestore.projects.databases.documents.listen',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:listen',
request_field='listenRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListenRequest',
response_type_name='ListenResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates or inserts a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='PATCH',
method_id='firestore.projects.databases.documents.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime', 'mask_fieldPaths', 'updateMask_fieldPaths'],
relative_path='v1/{+name}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsPatchRequest',
response_type_name='Document',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Rolls back a transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:rollback',
http_method='POST',
method_id='firestore.projects.databases.documents.rollback',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:rollback',
request_field='rollbackRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRollbackRequest',
response_type_name='Empty',
supports_download=False,
)
def RunQuery(self, request, global_params=None):
r"""Runs a query.
Args:
request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RunQueryResponse) The response message.
"""
config = self.GetMethodConfig('RunQuery')
return self._RunMethod(
config, request, global_params=global_params)
RunQuery.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery',
http_method='POST',
method_id='firestore.projects.databases.documents.runQuery',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:runQuery',
request_field='runQueryRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRunQueryRequest',
response_type_name='RunQueryResponse',
supports_download=False,
)
def Write(self, request, global_params=None):
r"""Streams batches of document updates and deletes, in order.
Args:
request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WriteResponse) The response message.
"""
config = self.GetMethodConfig('Write')
return self._RunMethod(
config, request, global_params=global_params)
Write.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:write',
http_method='POST',
method_id='firestore.projects.databases.documents.write',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:write',
request_field='writeRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsWriteRequest',
response_type_name='WriteResponse',
supports_download=False,
)
class ProjectsDatabasesOperationsService(base_api.BaseApiService):
"""Service class for the projects_databases_operations resource."""
_NAME = 'projects_databases_operations'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (FirestoreProjectsDatabasesOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='firestore.projects.databases.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='googleLongrunningCancelOperationRequest',
request_type_name='FirestoreProjectsDatabasesOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (FirestoreProjectsDatabasesOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='DELETE',
method_id='firestore.projects.databases.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (FirestoreProjectsDatabasesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='GET',
method_id='firestore.projects.databases.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsGetRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (FirestoreProjectsDatabasesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations',
http_method='GET',
method_id='firestore.projects.databases.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsListRequest',
response_type_name='GoogleLongrunningListOperationsResponse',
supports_download=False,
)
class ProjectsDatabasesService(base_api.BaseApiService):
"""Service class for the projects_databases resource."""
_NAME = 'projects_databases'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesService, self).__init__(client)
self._upload_configs = {
}
def ExportDocuments(self, request, global_params=None):
r"""Exports a copy of all or a subset of documents from Google Cloud Firestore.
to another storage system, such as Google Cloud Storage. Recent updates to
documents may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
Args:
request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ExportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ExportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:exportDocuments',
http_method='POST',
method_id='firestore.projects.databases.exportDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:exportDocuments',
request_field='googleFirestoreAdminV1ExportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesExportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def ImportDocuments(self, request, global_params=None):
r"""Imports documents into Google Cloud Firestore. Existing documents with the.
same name are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportDocuments operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Firestore.
Args:
request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ImportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ImportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:importDocuments',
http_method='POST',
method_id='firestore.projects.databases.importDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:importDocuments',
request_field='googleFirestoreAdminV1ImportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesImportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(FirestoreV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (FirestoreProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='firestore.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (FirestoreProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations',
http_method='GET',
method_id='firestore.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/locations',
request_field='',
request_type_name='FirestoreProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(FirestoreV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"ige-public@hotmail.com"
] | ige-public@hotmail.com |
5a3a47716a461cf0fbff4da09be385c1328fc34e | 66d915e0d9c0016d5bbb22946539b81866fecb45 | /Soma de numeros1.py | 75775660ac4254c3804a653dfe04dfded9325d39 | [
"MIT"
] | permissive | SricardoSdSouza/Curso-da-USP | 62e00a820b218cce24fb46ef89debd8f786ce66a | 6198c509c52bf6132f904cded2e12ae941f2b973 | refs/heads/main | 2023-06-02T00:19:53.006210 | 2021-06-14T19:57:30 | 2021-06-14T19:57:30 | 376,927,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import math
numero = int(input('numero: '))
n=int(numero)
if numero > 0:
soma = 0
while numero != 0:
resto = numero % 10
numero = (numero - resto) // 10
soma = soma + resto
print("A soma dos números(",n,")é = ",soma)
else:
print('Número invalido...')
| [
"SricardoSdSouza@yahoo.com.br"
] | SricardoSdSouza@yahoo.com.br |
46b305d71e12ec7393424848fdb3b864a16ff25c | c2a168ec9e91415eeadd53ba6042e614c3e8460c | /benchmark_features/hpopt_1/hpop_test_1/ht_13.py | c6733fb7f930bc4ee0b82563d4b43470ae436f78 | [] | no_license | LiYanChalmers/BoschProductionLine | 530098a9de0d08332511b24a31cdd4b4ec5473fb | de864e55be0e8cd174ccacb06afc77e3dc9ec42a | refs/heads/master | 2020-03-21T20:29:14.134812 | 2018-09-03T08:10:08 | 2018-09-03T08:10:08 | 139,010,159 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | # -*- coding: utf-8 -*-
"""
Template for CV parameter search
Tasks:
1. CV
2. Train model
3. Predict on test set
4. Save
a. CV results
b. models trained in CV
c. model trained on the whole train set
d. predictions on test set
To-do:
1. Use models in CV to predict on test set, and save the predictions
a. Rewrite the CV function
b. Overhead of prediction should be small
c. RAM requirement should be small if #columns is not too large
d. In some cases, may need many columns, RAM requirement may be high.
So not implementing this idea now.
"""
import sys
sys.path.insert(0, 'bosch_helper')
from bosch_helper import *
#%% Set parameter
param_id = 13
random_state = 90859
param = {'subsample': 0.95, 'silent': 1, 'objective': 'binary:logistic', 'nthread': 20, 'min_child_weight': 5.5, 'max_depth': 15, 'lambda': 4, 'eta': 0.025, 'colsample_bytree': 0.5, 'booster': 'gbtree', 'base_score': 0.0058, 'alpha': 0}
np.random.seed(random_state)
#%% Load data
x = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'x')
y_train = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'y_train')
x_train = x.loc['train']
x_test = x.loc['test']
#%%
cv_results, clfs, running_time = \
cross_val_predict_skf_rm_xgb(param, x_train, y_train,
num_boost_round=80,
n_splits=5,
n_repeats=3,
random_state=np.random.randint(10**6),
verbose_eval=True)
results = {'clfs_cv': clfs, 'results_cv': cv_results, 'running_time_cv': running_time}
#%% Train on model
dtrain = xgb.DMatrix(x_train, label=y_train)
param['seed'] = np.random.randint(10**6)
clf = xgb.train(param, dtrain,
num_boost_round=60,
feval=mcc_eval, evals=[(dtrain, 'train')])
y_train_pred = clf.predict(dtrain)
# Find best threshold
thresholds = np.linspace(0.01, 0.99, 400)
mcc = np.array([matthews_corrcoef(y_train, y_train_pred>thr) for thr in thresholds])
best_threshold = thresholds[mcc.argmax()]
results['best_threshold_train'] = best_threshold
results['mcc_max_train'] = mcc.max()
results['clf_train'] = clf
#%% Predict on test set
dtest = xgb.DMatrix(x_test)
y_test_pred = clf.predict(dtest)
y_test_pred_int = (y_test_pred>best_threshold).astype(int)
sub = pd.read_csv("sample_submission.csv.zip", index_col=0)
sub["Response"] = y_test_pred_int
sub.to_csv('ht_13.csv.gz', compression='gzip')
results['y_test_pred_prob'] = y_test_pred
results['y_test_pred_int'] = y_test_pred_int
save_pickle(results, 'ht_13.pickle')
| [
"li.yan.chalmers@gmail.com"
] | li.yan.chalmers@gmail.com |
badbe251c1d5142ea01e96e916591f5b6330a6ca | 202b1b82a2b7a70250415ba5d9bd1f6b277a6e84 | /share/qt/extract_strings_qt.py | acf54d0b19bbf49be33497e58552501d9f56933d | [
"MIT"
] | permissive | cmkcoin/cmkcore | 92cc4dcaf63b1d282ea2c2aa15ede822c9c7b0e7 | 5c2a3222ef901d1c6d9315177ba79e3f5094f2a6 | refs/heads/master | 2020-03-15T04:26:42.979962 | 2019-10-19T03:55:45 | 2019-10-19T03:55:45 | 131,965,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/dashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("cmk-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| [
"cmkdev@vps.cmk.io"
] | cmkdev@vps.cmk.io |
b676c5cba48c2e1efd64286543f5f6aadfef51fd | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/wotdecorators.py | 1554469a75cbd2eab8d57565f8457da484b5051a | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,832 | py | # 2017.08.29 21:52:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/wotdecorators.py
import inspect
from functools import update_wrapper
from debug_utils import LOG_WRAPPED_CURRENT_EXCEPTION, CRITICAL_ERROR
from time_tracking import LOG_TIME_WARNING
import time
import time_tracking
def noexcept(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def nofail(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
CRITICAL_ERROR('Exception in no-fail code')
return wrapper
def exposedtoclient(func):
def wrapper(*args, **kwArgs):
try:
lastTick = time.time()
result = func(*args, **kwArgs)
timeSinceLastTick = time.time() - lastTick
if timeSinceLastTick > time_tracking.DEFAULT_TIME_LIMIT:
LOG_TIME_WARNING(timeSinceLastTick, context=(getattr(args[0], 'id', 0),
func.__name__,
args,
kwArgs))
return result
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def singleton(cls):
return cls()
def decorate(func, dec):
argspec = inspect.getargspec(func)
name = func.__name__
signature = inspect.formatargspec(*argspec)
params = inspect.formatargspec(formatvalue=(lambda value: ''), *argspec)
source = 'def %s%s: return __dec%s\n' % (name, signature, params)
code = compile(source, '<decorator-gen>', 'single')
env = {'__dec': dec}
eval(code, env)
return update_wrapper(env[name], func)
def decorator(dec):
def wrapper(func):
return decorate(func, dec(func))
return wrapper
def condition(attributeName, logFunc = None, logStack = True):
def decorator(func):
def wrapper(*args, **kwargs):
attribute = getattr(args[0], attributeName)
if not bool(attribute):
if logFunc:
logFunc('Method condition failed', args, kwargs, stack=logStack)
return
return func(*args, **kwargs)
return decorate(func, wrapper)
return decorator
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\wotdecorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:48 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
864f6c8e44747b438bdd00945bd88e7a810108db | 6cd4d2923292004390a1b23dc26d0a7a4a7df223 | /DjangoRedis/manage.py | 9a25fcc6109e9b625d9a5bb7fcfab9c54f637263 | [] | no_license | Lyle101/docker_redis | 4cc85b6c5c5784c3d032d129810ce49a0e4b09cc | f3b9db02ce65794d84220286c805ba799c0e79dd | refs/heads/master | 2020-04-09T07:11:30.999829 | 2018-12-03T08:07:50 | 2018-12-03T08:07:50 | 160,144,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoPrj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"Chris.Lyle101@gmail.com"
] | Chris.Lyle101@gmail.com |
84fdc9040b3bcc55c94270233da3cce4c9b669d5 | babc56e88a3b5f5038be70ad676d5bd8f1bbf0d2 | /wind_direction_byo.py | 94bc6600dd5986d16cb2cf6d96ba20ac2a7f7738 | [] | no_license | VicenteYago/CustomWeatherStation | 873405ca16aa0b6f4f291cbc0068a6ea10aef745 | c655f947cca2cd0f8827c18f6f7a7c4c11ef4d43 | refs/heads/master | 2022-11-13T06:48:05.736830 | 2020-06-30T00:43:07 | 2020-06-30T00:43:07 | 269,812,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | from gpiozero import MCP3008
import time
import math
adc = MCP3008(channel=0)
count = 0
values = []
volts = [0.4, 1.4, 1.2, 2.8,
2.9, 2.2, 2.5, 1.8,
2.0, 0.7, 0.8, 0.1,
0.3, 0.2, 0.6, 2.7]
volts_dic = {
0.4: 0.0,
1.4: 22.5,
1.2: 45.0,
2.8: 67.5,
2.7: 90.5,
2.9: 112.5,
2.2: 135.0,
2.5: 157.5,
1.8: 180.0,
2.0: 202.5,
0.7: 225.0,
0.8: 247.5,
0.1: 270.0,
0.3: 292.5,
0.2: 315.0,
0.6: 337.5
}
def get_average(angles):
sin_sum = 0.0
cos_sum = 0.0
for angle in angles:
r = math.radians(angle)
sin_sum += math.sin(r)
cos_sum += math.cos(r)
flen = float(len(angles))
s = sin_sum / flen
c = cos_sum / flen
arc = math.degrees(math.atan(s / c))
average = 0.0
if s > 0 and c > 0:
average = arc
elif c < 0:
average = arc + 180
elif s < 0 and c > 0:
average = arc + 360
return 0.0 if average == 360 else average
def get_value(length = 5):
data = []
print("Measuring wind direction for %d seconds..." % length)
start_time = time.time()
while time.time() - start_time <= length:
wind = round(adc.value*3.3,1)
if not wind in volts_dic:
print("Unknown value :", str(wind))
else:
data.append(volts_dic[wind])
return get_average(data)
while True:
print(get_value())
| [
"="
] | = |
8da0a0f25fb1f42f41d710abf1ca39dc617b67dc | 5e4a1d08d199722fde585723d06644e9999c144e | /input.py | af348c0972728af30a24ce077b2d8f0d4bcd81bf | [] | no_license | JustDoIT83/CTI110 | ca30948cd5dc4e30103a4adfb681f5090363462d | 3817c2b935eb166f0086026f0cf73c7e96b2bb8d | refs/heads/master | 2020-04-02T10:14:06.081690 | 2018-10-23T13:25:33 | 2018-10-23T13:25:33 | 154,330,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # get the users name, age, and income
name = input('What is your name?: ')
age = input('What is your age?: ')
income = input('What is your income?: ')
# display the date
print('here is the data you entered')
print('Name:', name)
print('Age:', age)
print('Income:', income)
| [
"noreply@github.com"
] | JustDoIT83.noreply@github.com |
18d5a691ca86297e0db6536e331fc046f0aedd4b | 9d53da8fbd6d6760fb652e84687cf73ef1f3034d | /model/EventPointNetpp/nets.py | b5fdb7fc10cad171466eb6ce22481815099f0d63 | [] | no_license | HowoongJun/localfeature | 8a944256738e7f10f5e0564c499bf88afaf006ba | 0d17fca75d2f67c33652710250c3d0f07d7c8970 | refs/heads/main | 2023-08-27T19:57:10.071631 | 2021-10-28T06:53:30 | 2021-10-28T06:53:30 | 340,907,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | ###
#
# @Brief nets.py
# @Details EventPointNetPP network
# @Org Robot Learning Lab(https://rllab.snu.ac.kr), Seoul National University
# @Author Howoong Jun (howoong.jun@rllab.snu.ac.kr)
# @Date Sep. 01, 2021
# @Version v0.1
#
###
import torch
class CEventPointNetPP(torch.nn.Module):
def __init__(self):
super(CEventPointNetPP, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.conv1_1 = torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv3_1 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv3_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convKp1 = torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.convKp2 = torch.nn.Conv2d(256, 65, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.relu(self.conv1_1(x))
x = self.relu(self.conv1_2(x))
x = self.pool(x)
x = self.relu(self.conv2_1(x))
x = self.relu(self.conv2_2(x))
x = self.pool(x)
x = self.relu(self.conv3_1(x))
x = self.relu(self.conv3_2(x))
x = self.pool(x)
x = self.relu(self.conv4_1(x))
x = self.relu(self.conv4_2(x))
kpt = self.relu(self.convKp1(x))
kpt = self.convKp2(kpt)
desc = self.relu(self.convDsc1(x))
desc = self.convDsc2(desc)
descNorm = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(descNorm, 1))
return kpt, desc
| [
"prestoxic@gmail.com"
] | prestoxic@gmail.com |
3d6c10f42425778b851063b600ddb7ceddf3622d | 161e4fad71b23ac5514f8cc8c04b97ff29039cf2 | /Array/Buy_Sell_Stock.py | 7ca3171b5ee36527ea4e438f7ffb002bbdda2c3b | [] | no_license | yash872/PyDsa | 726d43a0730e9143593327f180fab3eb3367d281 | a3046231c466f2ec5cae94129d2c15d21a082b86 | refs/heads/main | 2023-03-06T12:12:49.731899 | 2021-02-04T17:14:28 | 2021-02-04T17:14:28 | 332,211,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | '''
Best Time to Buy and Sell Stock
You are given an array prices where prices[i] is the price of a given stock on the ith day.
You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.
Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.
Example 1:
Input: prices = [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Note that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.
'''
#------------------------------
# Time-> O(N) | Space-> O(1)
#------------------------------
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_so_far = float('inf')
profit = 0
for price in prices:
profit = max(profit,price-min_so_far)
min_so_far = min(min_so_far,price)
return profit
| [
"noreply@github.com"
] | yash872.noreply@github.com |
c177f0da14bb7731c15a9e25ad35b2bb78f5ca63 | 3d2192385e65889d20b74742755f5369d0d09161 | /stock_colis/models/__init__.py | da8dece232489928427446f10dfd1d1af8ea259d | [] | no_license | FIDINGSARL/audoune | 9ba746a9d7424a41f8775a6e30f42f2a97224edf | 39cecd44497d5fa227cc594a6bf5807eb14976d3 | refs/heads/main | 2023-06-18T09:49:13.778878 | 2021-06-30T15:06:51 | 2021-06-30T15:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | # -*- coding: utf-8 -*-
from . import stock_colis, stock_colis_request
| [
"macbook@MacBook-Pro-de-MacBook.local"
] | macbook@MacBook-Pro-de-MacBook.local |
3ff18915969da0e6505bd95f4d68b34cfdb72eb5 | e2cb95d74ff13247a706a4a949e22fb397efe7b7 | /A2 - Digital Makeup Transfer/src/faceWarp.py | 9a20045a0b4934f6294b0a14c9d6558b1da7a672 | [] | no_license | Aditi-Singla/Digital-Image-Analysis | 945beb48bfbd1f7bb75d76059d5faafcfe88881f | 8fc08ee86c5a168e3dc6d3b22c4be5bf2195458d | refs/heads/master | 2020-04-01T00:36:28.232484 | 2018-07-18T18:45:20 | 2018-07-18T18:45:20 | 152,704,480 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | #!/usr/bin/env python
import numpy as np
import cv2
import sys
import scipy.spatial
# Read points from text file
def readPoints(path) :
points = [];
with open(path) as file :
for line in file :
x, y = line.split()
points.append((np.float32(x), np.float32(y)))
return points
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
return dst
def warpTriangle(img1, img, t1, t) :
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r = cv2.boundingRect(np.float32([t]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
tRect = []
for i in xrange(0, 3):
tRect.append(((t[i][0] - r[0]),(t[i][1] - r[1])))
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
# Get mask by filling triangle
mask = np.zeros((r[3], r[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0);
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r[2], r[3])
warpImage = applyAffineTransform(img1Rect, t1Rect, tRect, size)
# Alpha blend rectangular patches
imgRect = warpImage
# Copy triangular region of the rectangular patch to the output image
img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * ( 1 - mask ) + imgRect * mask
if __name__ == '__main__' :
filename1 = sys.argv[1]
filename2 = sys.argv[2]
# Read images
img1 = cv2.imread(filename1);
img2 = cv2.imread(filename2);
# Convert Mat to float data type
img1 = np.float32(img1)
img2 = np.float32(img2)
# Read array of corresponding points
points1 = readPoints(filename1 + '.txt')
points2 = readPoints(filename2 + '.txt')
tri = scipy.spatial.Delaunay(np.array(points1))
# Allocate space for final output
imgMorph = np.zeros(img2.shape, dtype = img2.dtype)
np.savetxt('tri.txt', np.uint8(tri.vertices), fmt='%d')
for l in tri.vertices :
x = int(l[0])
y = int(l[1])
z = int(l[2])
t1 = [points1[x], points1[y], points1[z]]
t2 = [ points2[x], points2[y], points2[z] ]
# Morph one triangle at a time.
warpTriangle(img1, imgMorph, t1, t2)
# Display Result
cv2.imwrite('warped.jpg', np.uint8(imgMorph))
| [
"aditisksingla@gmail.com"
] | aditisksingla@gmail.com |
defbb28049ad7d422477ecaaabdf790640d21b17 | c5e6a4e0264409f4dc5db9993c8c0cc058d4365a | /8_juego_ahorcado.py | c36c4f69dcc49dcd6f1cc0a09e02d34d9823de2c | [] | no_license | carlosafdz/programacion_python | 05c91eb858ce12b9fd2e9e3fd4e902c66ea2ee2d | 17b0db4dcf923d6de3fdfd9c9e78b1d1a50651ea | refs/heads/master | 2023-05-24T20:32:22.614224 | 2020-03-21T18:26:30 | 2020-03-21T18:26:30 | 248,345,937 | 0 | 0 | null | 2023-05-22T23:22:23 | 2020-03-18T21:22:31 | Python | UTF-8 | Python | false | false | 2,486 | py | import random
IMAGENES = [
'''
+=======+
| |
|
|
|
|
======
''',
'''
+=======+
| |
O |
|
|
|
======
''',
'''
+=======+
| |
O |
| |
|
|
======
''',
'''
+=======+
| |
O |
/| |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ |
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ \ |
|
======
''',
''' '''
]
PALABRAS = ["lavadora","secadora","pepel","computadora"]
def palabra_random():
idx = random.randint(0,len(PALABRAS)-1)
return PALABRAS[idx]
def mostrar_tablero(palabra_escondida,intentos):
print(IMAGENES[intentos])
print('')
print(palabra_escondida)
print("*---**---**---**---**---**---**---**---**---**---*")
def main():
palabra = palabra_random()
palabra_escondida = ["_"] * len(palabra)
intentos = 0
while True:
mostrar_tablero(palabra_escondida,intentos)
letra = input("escoge una letra: ")
indice_letras = []
for i in range(len(palabra)):
if palabra[i] == letra:
indice_letras.append(i)
if len(indice_letras) == 0:
intentos = intentos + 1
if intentos == 7:
mostrar_tablero(palabra_escondida,intentos)
print(f'Perdiste..... la palabra correcta era {palabra}')
break
else:
for i in indice_letras:
palabra_escondida[i] = letra
indice_letras = []
try:
palabra_escondida.index("_")
except ValueError:
print(" ")
print("ganaste!!!")
break
def pruebas_tablero():
mostrar_tablero("palabra",0)
mostrar_tablero("palabra",1)
mostrar_tablero("palabra",2)
mostrar_tablero("palabra",3)
mostrar_tablero("palabra",4)
mostrar_tablero("palabra",5)
mostrar_tablero("palabra",6)
if __name__ == "__main__":
main()
#pruebas_tablero() | [
"carlos.afdzf@hotmail.com"
] | carlos.afdzf@hotmail.com |
016e33094e39966281d2775ad6be6442e4a27330 | 63e06ef221242c2c614750df02b4283989e13052 | /projeto_da_roca/users/migrations/0002_auto_20210521_1213.py | b49e9079706612918bcb18961c11420541017361 | [] | no_license | amandacl/Da_Roca | 97ada3b6abe6df25258a34f82954c07c597daae6 | b6187d62b91f06e0afb523a84194ad12467a89b4 | refs/heads/master | 2023-06-21T11:59:14.891738 | 2021-06-02T02:13:02 | 2021-06-02T02:13:02 | 368,898,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | # Generated by Django 3.2.3 on 2021-05-21 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='house_number',
field=models.IntegerField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='user',
name='cpf',
field=models.CharField(blank=True, max_length=11, null=True, unique=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
| [
"matheus.noronha@solyd.com.br"
] | matheus.noronha@solyd.com.br |
ecb41fb56f8890f13f0b34b3b3a1c309800192b5 | a4957a563bbd3ce322e0cd0fec8e0a37650b5092 | /calculatorv2.py | 289ec6ac8e829cd174995e3ee1cb013560bce9ea | [] | no_license | CodingFluent/Simple-CalculatorV2-Py | 66632717a94d0b27a5c1994b6d5eaf062ee793f7 | 3af99215b4eb8b40cabdc840172506825e27f4e0 | refs/heads/master | 2022-12-10T19:00:53.607598 | 2020-08-31T06:01:54 | 2020-08-31T06:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | a = float(input("Enter First Number => "))
op = str(input("Enter Operation (+, -, *, /, %) => "))
b = float(input("Enter Second Number => "))
if op == "+":
sum = a + b
total = str(f"The sum of {a} + {b} is {sum}")
elif op == "-":
diff = a - b
total = str(f"The difference of {a} - {b} is {diff}")
elif op == "*":
mul = a * b
total = str(f"The multiplication of {a} * {b} is {mul}")
elif op == "/":
div = a / b
total = str(f"The division of {a} / {b} is {div}")
elif op == "%":
mod = a % b
total = str(f"The module of {a} % {b} is {mod}")
else:
total = str("Please Enter an Valid Operation.......")
print (total) | [
"noreply@github.com"
] | CodingFluent.noreply@github.com |
6d27c8039a8ce6ca14e65e11999fb3c5304f2563 | ef4a4c8de95516700134a45800238de9298e1485 | /zadacha3.py | ccb6d7317053767af297787dfcc42f5ddf4e9f3a | [] | no_license | nikolaj74-hub/lessons | a45d67d380982d245f5950fe6eef3041c7ffbd2e | 54437b8e8063668017d7e29612c0623adb8fce94 | refs/heads/master | 2023-01-23T19:11:18.680790 | 2020-12-04T13:46:02 | 2020-12-04T13:46:02 | 311,939,032 | 1 | 0 | null | 2020-12-04T13:42:39 | 2020-11-11T10:38:48 | Python | UTF-8 | Python | false | false | 1,624 | py | # Реализовать базовый класс Worker (работник), в котором определить атрибуты: name,
# surname, position (должность), income (доход). Последний атрибут должен быть
# защищенным и ссылаться на словарь, содержащий элементы: оклад и премия, например,
# {"wage": wage, "bonus": bonus}. Создать класс Position (должность) на базе класса Worker.
# В классе Position реализовать методы получения полного имени сотрудника (get_full_name) и
# дохода с учетом премии (get_total_income). Проверить работу примера на реальных данных
# (создать экземпляры класса Position, передать данные, проверить значения атрибутов,
# вызвать методы экземпляров
class Worker:
def __init__(self, n, sn, pos, w, b):
self.name = n
self.surname = sn
self.position = pos
self.incom = {"wage": w, "bonus": b}
class Position(Worker):
def get_full_name(self):
print(f'{self.name + " " + self.surname}')
def get_full_incom(self):
print(f'доход ={sum(self.incom.values())} тугр.')
a = Position('коля', 'трофимов', 'слесарь', 30000, 300)
print(a.name)
print(a.incom)
print(a.surname)
print(a.position)
a.get_full_name()
a.get_full_incom()
| [
"noreply@github.com"
] | nikolaj74-hub.noreply@github.com |
218046a18f59c8cc6a566f6a16807e74d5250298 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /packages/pyre/platforms/PackageManager.py | 0877270914d7a2f1326787f57abfbb1ac0125b31 | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# the framework
import pyre
# declaration
class PackageManager(pyre.protocol, family='pyre.platforms.packagers'):
"""
Encapsulation of host specific information
"""
# requirements
@pyre.provides
def prefix(self):
"""
The package manager install location
"""
@pyre.provides
def installed(self):
"""
Retrieve available information for all installed packages
"""
@pyre.provides
def packages(self, category):
"""
Provide a sequence of package names that provide compatible installations for the given
package {category}. If the package manager provides a way for the user to select a
specific installation as the default, care should be taken to rank the sequence
appropriately.
"""
@pyre.provides
def info(self, package):
"""
Return information about the given {package}
The type of information returned is determined by the package manager. This method
should return success if and only if {package} is actually fully installed.
"""
@pyre.provides
def contents(self, package):
"""
Generate a sequence of the contents of {package}
The type of information returned is determined by the package manager. Typically, it
contains the list of files that are installed by this package, but it may contain other
filesystem entities as well. This method should return a non-empty sequence if and only
if {pakage} is actually fully installed
"""
@pyre.provides
def configure(self, packageInstance):
"""
Dispatch to the {packageInstance} configuration procedure that is specific to the
particular implementation of this protocol
"""
# framework obligations
@classmethod
def pyre_default(cls, **kwds):
"""
Build the preferred host implementation
"""
# the host should specify a sensible default; if there is nothing there, this is an
# unmanaged system that relies on environment variables and standard locations
from .Bare import Bare
# return the support for unmanaged systems
return Bare
# end of file
| [
"michael.aivazis@orthologue.com"
] | michael.aivazis@orthologue.com |
c8fc1b630938f22c3762d543e169f25db756d2bd | fb23a842c99f9a5238a9c6dfb3ffa6eee5c3e47d | /Salt-api/python版示例/V2/diaoyong.py | f2f32adde67640cdb991d2d8e8fc1ff6f921dc29 | [] | no_license | nanzhushan/Saltstack | 45a492855860a5664f1c0a2099935ae95a17d0de | d9fc85a7be1861b13e6de55de9b6951e405fffb7 | refs/heads/master | 2021-05-31T16:37:09.928023 | 2016-04-11T07:04:10 | 2016-04-11T07:04:10 | 39,339,839 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | #!/usr/bin/python
#coding:utf8
from saltapi import *
#import saltapi
sapi = saltAPI()
#params = {'client':'local', 'fun':'test.ping', 'tgt':'*'}
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hello'}
#arg1也可以写成arg
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hostname'}
params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'touch /root/cc.txt;touch cc1.txt'}
test = sapi.saltCmd(params)
#test = sapi.saltCmd()
print test
| [
"624867243@qq.com"
] | 624867243@qq.com |
e05f09d686cf4fc1af26ff93dd112cabeaac5381 | 60e2b0f728bf7b497e241afdacffaa8ee9203213 | /breast_cancer/breast_cancer_load.py | c7e6f6f48a1be53f7a7d856378b2b85efd42ffca | [] | no_license | yamadayoshi/deep_learning | 43897d59dc3f89ecd4820050b96acacbf653408e | 78bbf5b12011a5d17375b50b75203251003cb3d0 | refs/heads/master | 2021-02-19T01:02:57.934801 | 2020-03-10T20:02:45 | 2020-03-10T20:02:45 | 245,260,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import numpy as np
from keras.models import model_from_json
#read json file
file = open('breast_model.json', 'r')
network = file.read()
file.close()
#load model from json and weights
model = model_from_json(network)
model.load_weights('breast_weights.h5')
novo = np.array([[10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15]])
previsao = model.predict(novo) > 0.8 | [
"andre.yamada@digiage.com"
] | andre.yamada@digiage.com |
d5408abdee9094c62381748340a424756eef3c8c | 9d61daee8ec86d1c3b85ab577c4d0ffc5c4c4a7c | /code kata/summm.py | d8927006714e70f2f8448e2ce4032b3d9075ff48 | [] | no_license | Bhuvaneswaribai/guvi | ec3d2a922059859c778b78920d52936a44edbca8 | ab6bb1193af49dbc431d5eb7ae19050d11aa622c | refs/heads/master | 2020-06-03T00:11:14.636796 | 2019-07-04T11:30:00 | 2019-07-04T11:30:00 | 191,355,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
nuumber=int(input())
a=list(map(int,input().split()))
sum=0
for i in a:
sum+=i
print(sum)
| [
"noreply@github.com"
] | Bhuvaneswaribai.noreply@github.com |
425be2dac09edaf397a3412fc17709976e67201f | de7a39129bf471d4d4be25c65174916a505146e6 | /book/examples/weave_examples_simple.py | 1dc25d425bcf85bc9a527aca248b38e6572a0caa | [] | no_license | jdh2358/py4science | a6da01de9cb16709828bfd801bf7faf847f346bb | a56c742ec2e0a31c2251468d9947ebaf707346d7 | refs/heads/master | 2016-09-05T22:18:38.520426 | 2009-12-05T17:47:26 | 2009-12-05T17:47:26 | 1,418,846 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | """Some simple examples of weave.inline use"""
from weave import inline,converters
import Numeric as nx
from pylab import rand
#-----------------------------------------------------------------------------
# Returning a scalar quantity computed from a Numeric array.
def trace(mat):
"""Return the trace of a matrix.
"""
nrow,ncol = mat.shape
code = \
"""
double tr=0.0;
for(int i=0;i<nrow;++i)
tr += mat(i,i);
return_val = tr;
"""
return inline(code,['mat','nrow','ncol'],
type_converters = converters.blitz)
# In-place operations on arrays in general work without any problems
def in_place_mult(num,mat):
"""In-place multiplication of a matrix by a scalar.
"""
nrow,ncol = mat.shape
code = \
"""
for(int i=0;i<nrow;++i)
for(int j=0;j<ncol;++j)
mat(i,j) *= num;
"""
inline(code,['num','mat','nrow','ncol'],
type_converters = converters.blitz)
def main():
zz = nx.zeros([10,10])
print 'tr(zz)=',trace(zz)
oo = nx.ones([4,4],nx.Float)
print 'tr(oo)=',trace(oo)
aa = rand(128,128)
print 'tr(aa)=',trace(aa)
print 'oo:',oo
in_place_mult(3,oo)
print '3*oo:',oo
if __name__=='__main__':
main()
| [
"jdh2358@gmail.com"
] | jdh2358@gmail.com |
2e9d8f40ea73bf3323400de1ac413068f242e213 | 313978a9a5a1f0824a6f2bfb948e1a4ec0225213 | /4-iteração/lazy iterable e iterator.py | 7337513a1d77423de94a8c51d7d35f8de1e0a3f6 | [] | no_license | wallacex19/python | 71ae310a6a6ec2f1c8c80d4ad2bee7db2d391d13 | 99f11249fec5e001e10b2a155c2608e9b8b420ec | refs/heads/master | 2023-04-08T22:41:34.259091 | 2021-04-23T20:58:18 | 2021-04-23T20:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | # O objeto range em Python 3 (xrange em Python 2) pode ser executado em loop como qualquer outro iterável:
for n in range(3):
print(n)
# E como o range é iterável, podemos obter um iterador a partir dele:
iter(range(3))
# R:<range_iterator object at 0x7fe173542ed0>
# mas objetos range não sao 6-iteradores por si mesmos, nos nao podemos chamar next em um objeto range
next(range(3))
# R:Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: 'range' object is not an iterator
# E, ao contrário de um iterador, podemos fazer um loop em um objeto de intervalo sem consumi-lo:
numbers = range(3)
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
# R:(0, 1, 2)
# Se fizéssemos isso com um iterador, não obteríamos nenhum elemento na segunda vez em que fizemos o loop:
numbers = iter(range(3))
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
#R:()
# Ao contrário dos objetos zip, enumerate ou generator, os objetos range não são 6-iteradores.
#-- ENTÃO O QUE É O RANGE? --##
# O objeto range é "lazy" em certo sentido, porque não gera todos os números que "contém" quando o criamos. Em vez disso, ele nos fornece esses números conforme precisamos deles ao fazer um loop sobre ele.
#
# Aqui está um objeto range e um generator (que é um tipo de iterador):
numbers = range(1_000_000)
square = (n**2 for n in numbers) | [
"pedromadureira000@gmail.com"
] | pedromadureira000@gmail.com |
acc5c7355bf61f8fbde46568884e95f5b124e22c | 4cfb9d75361f3c7f50744878e645073e3a8fc8d4 | /sinx+sinx fft.py | ab1696136d9a7c9f87d523e156f07203ab760d85 | [] | no_license | mychenyoke/gwwave1 | ac99c982b5037e8afff42e3055de366ddd8543dd | 7520846ab848ac2434db11ceb66a271d5ab68270 | refs/heads/master | 2020-03-18T13:47:02.888171 | 2018-05-28T15:18:36 | 2018-05-28T15:18:36 | 134,808,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import numpy as np
import matplotlib.pyplot as plt
omega1=0.1
omega2=0.2
sample_rate=20
a=np.arange(0,100)
sina=np.sin(omega1*a)
sinb=np.sin(omega2*a)+np.sin(omega1*a)
plt.figure(figsize=(10,24))
plt.subplot(4,1,1)
plt.title("sinax")
plt.plot(a,sina)
plt.savefig("sinax")
plt.subplot(4,1,2)
plt.title("sinax+sinbx")
plt.plot(a,sinb)
plt.savefig("sinax+sinbx")
aa=[]
fft_frequency=np.fft.fftfreq(len(a),1/sample_rate)
fft_sina=np.fft.fft(sina)
#print(abs(fft_sina))
aa=abs(fft_sina)
for ab in aa:
print(ab)
fft_sinb=np.fft.fft(sinb)
plt.subplot(4,1,3)
plt.title("FFT_Frequency_sinax")
plt.plot(fft_frequency,abs(fft_sina))
plt.savefig("FFT_Frequency_sinax")
plt.subplot(4,1,4)
plt.title("FFT_Frequency_sinax+sinbx")
plt.plot(fft_frequency,fft_sinb)
plt.savefig("FFT_Frequency_sinax+sinbx") | [
"noreply@github.com"
] | mychenyoke.noreply@github.com |
21a7d146b5d95f1fee3c58b4e611dd502e854c74 | 83fb26fc9fe96c5821c7a13468f205ca6eb4fcda | /ICP exercise and assignment/A01/A01_exercise1.py | 2662b1fc38669910f481aa07bc1481af8bf91817 | [] | no_license | zc2214/Introduction-to-Computer-Programming | e58355fc732a2eacf29aa5141573e64ef1c3f27e | 95f5e36f102c5ebeeb628b61c3fdad416082ab4f | refs/heads/main | 2023-08-11T23:44:54.766836 | 2021-09-22T14:45:54 | 2021-09-22T14:45:54 | 323,836,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # PROGRAMMING ASSIGNMENT 01
# Filename: 'exercise1.py'
#
# Write a program that does the following (in the specified order):
# 1. asks the user to input his family name
# 2. asks the user to input his given name
# 3. then, prints the message Hello <given name> <family name> !!!
#
# WRITE YOUR CODE AFTER THIS LINE
firstname = input("Please enter your firstname")
lastname = input("Please enter your lastname")
print ("Hello",firstname,lastname )
| [
"noreply@github.com"
] | zc2214.noreply@github.com |
71c917f941655f147f642dba17548ed3889df18d | 3328e95f5a8498ab366aec380f0e1822826ba7a9 | /puppy.py | 5ecb1fddd03ca00ec9d69d3d7ed91e3934b08270 | [] | no_license | Abhiram1214/opencv | 6e9dd53cc08c54a8e1ce6f0c297fda451ddb7c31 | 653a9ccddbc188679bc9afe8f83d98a93b47cf3d | refs/heads/main | 2022-12-26T12:03:41.308652 | 2020-10-11T12:01:53 | 2020-10-11T12:01:53 | 301,957,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | import cv2
import numpy as np
'''
img = cv2.imread(r'C:\Users\cvenkatanagasatya\Pictures\Open CV\Computer-Vision-with-Python\DATA\puppy.jpg')
while True:
cv2.imshow('puppy', img)
#if we waited for milli second and we pressed the esc key
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
'''
######################
#####Function#########
#####################
def draw_circle(event, x,y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.namedWindow(winname='Images') #this is connecting the below window to callback function
cv2.setMouseCallback('Images', draw_circle) #windows name with draw_circle
######################################
##### Showing images in OpenCV#########
#######################################
img = np.zeros((512,512,3), np.int8)
while True:
cv2.imshow("Images", img)
if cv2.waitKey(20) & 0xFF==27:
break
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | Abhiram1214.noreply@github.com |
941b70169ea0201bf4913ade211f0567886e5ca5 | 4c85452e12ad3d8ca08f91df21ff4c6812a9e3b7 | /tests/invalid_boards.py | 7ca7cb9830cd75f57154384786df9870880d65b6 | [
"MIT"
] | permissive | lesander/takuzu | 452ad7b0b8abc76647b8542118c91be6e3cb8ee7 | d0a913ce57a3234eaf17afd3c858f17c3f1e31e5 | refs/heads/master | 2022-07-05T17:01:48.117658 | 2020-05-21T23:00:25 | 2020-05-21T23:00:25 | 265,910,685 | 1 | 0 | MIT | 2022-06-22T02:06:48 | 2020-05-21T17:28:17 | Python | UTF-8 | Python | false | false | 299 | py | from takuzu import Takuzu
boards = [ [], [None], [1, 0, None], [ [], [] ], [ [1,0] ], [ [1,0], [1] ] ]
for b in boards:
try:
t = Takuzu(board=b, debug=True)
except AssertionError as e:
pass
else:
raise Exception('board={} should throw AssertionError'.format(b))
| [
"lesander@users.noreply.github.com"
] | lesander@users.noreply.github.com |
820708161506216faa57b389f2f0890d60afef5d | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible/modules/cron.py | 2424f5c065543ddd96be359b69a92e58495389fd | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 26,537 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if I(state=absent).
- Note that if name is not set and I(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to the current user.
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if I(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be C(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the I(cron_file) parameter you must specify the I(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
type: str
default: "*"
hour:
description:
- Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
type: str
default: "*"
day:
description:
- Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use I(special_time).
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if I(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- I(name) and I(value) parameters are the name and the value of environment variable.
type: bool
default: false
version_added: "2.1"
insertafter:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron (or cronie on CentOS)
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
ansible.builtin.cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
ansible.builtin.cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
ansible.builtin.cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
ansible.builtin.cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
ansible.builtin.cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
ansible.builtin.cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
ansible.builtin.cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
ansible.builtin.cron:
name: APP_HOME
env: yes
state: absent
'''
RETURN = r'''#'''
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to current user)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.n_existing = ''
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.b_cron_file, 'rb')
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.n_existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.n_existing = re.sub(pattern, '', self.n_existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'wb')
elif self.cron_file:
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool', default=False),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12', collection_name='ansible.builtin'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12', collection_name='ansible.builtin'
)
if module._diff:
diff = dict()
diff['before'] = crontab.n_existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if env and not name:
module.fail_json(msg="You must specify 'name' while working with environment variables (env=yes)")
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and platform.system() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
68caed12611a8b789a1964a22fb49575eca70c7f | 76d388b5d2e74ff0eda748c7868fadf0704cf700 | /tensorpack/utils/develop.py | 496de1dd245db766c3e4ba256ddb638d5e621b48 | [
"Apache-2.0"
] | permissive | jooyounghun/tensorpack | eebf0867e5a82ffd52660dccfbd34879b8d0f5af | 90cdae380c40a1e91f627520c4a739bd6ee3f18b | refs/heads/master | 2020-03-23T23:24:41.651089 | 2018-07-27T02:57:19 | 2018-07-27T02:57:19 | 142,232,523 | 1 | 0 | Apache-2.0 | 2018-07-25T01:45:06 | 2018-07-25T01:45:05 | null | UTF-8 | Python | false | false | 4,773 | py | # -*- coding: utf-8 -*-
# File: develop.py
# Author: tensorpack contributors
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import os
import functools
from datetime import datetime
import importlib
import types
import six
from . import logger
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if tensorpack is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
def log_deprecated(name="", text="", eos=""):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos=""):
"""
Args:
text, eos: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
# Copied from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
311ba855cf35a4765fce0410377fb7f5eb4aa8a4 | c56448aa3553d1a5ab71099e741fa71c15d539cb | /stations/urls.py | 817356c4760a4af8560f60d4abb533fc1d2a9d3e | [] | no_license | Jack11709/django-underground | 8591cba5fbcd9e2202fbaefa1a95057d4258477d | 60b868ce5dcb5001761c5207cfd764474ec8f19a | refs/heads/master | 2022-06-04T04:11:14.667519 | 2019-10-31T09:50:46 | 2019-10-31T09:50:46 | 218,318,167 | 0 | 0 | null | 2022-05-25T03:24:00 | 2019-10-29T15:19:03 | Python | UTF-8 | Python | false | false | 588 | py | from django.urls import path
from .views import StationList, StationDetail, ZoneList, ZoneDetail, LineList, LineDetail # import our DRF views
urlpatterns = [
path('stations', StationList.as_view(), name='stations-list'),
path('stations/<int:pk>/', StationDetail.as_view(), name='stations-detail'),
path('zones', ZoneList.as_view()),
path('zones/<int:pk>/', ZoneDetail.as_view()),
path('lines', LineList.as_view()),
path('lines/<int:pk>/', LineDetail.as_view())
] # registering all our urls for this project, the route url for this project is in /project/urls.py | [
"jack.may@generalassemb.ly"
] | jack.may@generalassemb.ly |
d86cb55284f9ec406e508cb0da30cb1564736a7e | 919fd48a34ca200086f51905d64c21c3b31b6739 | /CodeMixed-Text-Generator/cm_text_generator/grammar_inference.py | 0449999d7ffce757d92333a845762acfcc6197a9 | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"BSD-2-Clause",
"MIT",
"Python-2.0",
"PSF-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | mohdsanadzakirizvi/CodeMixed-Text-Generator | e89b758ad88a622c058bf1465003ae3c23a55b88 | 47740eeff3ecb46f5294711f4fe5d3a03a6e0b54 | refs/heads/main | 2023-06-15T22:43:21.578533 | 2021-04-13T09:16:24 | 2021-04-27T12:46:19 | 384,061,885 | 0 | 0 | MIT | 2021-07-08T08:54:14 | 2021-07-08T08:54:14 | null | UTF-8 | Python | false | false | 1,853 | py | ###GRAMMAR INFERENCE
from .data_structure_definitions import *
def ruleEnlister(root, grammar):
if root.token=="XXXXX":
cond=False
for rule in grammar: ##check false/true
if (rule.lhs.nonTerminal==root.label and len(rule.rhs)==len(root.children)):
#print "Using old rule!"
cond=True
for counter in range(len(rule.rhs)):
if(rule.rhs[counter].nonTerminal!=root.children[counter].label or rule.rhs[counter].index!=root.children[counter].repeatIndex):
cond=False
if cond==True:
root.ruleNum=rule.ruleNum
if(root.ruleNum==-1):
#print "Making new rule!", str(len(grammar))
lhs=grammarPoint(root.label, -1, -1)
rhs=[]
for child in root.children:
rhs.append(grammarPoint(child.label, child.repeatIndex, root.children.index(child)))
grammar.append(grammarRule(len(grammar), lhs, rhs))
root.ruleNum=len(grammar)-1
for child in root.children:
ruleEnlister(child, grammar)
def projectHindiRules(hinRoot, grammar):
if hinRoot.token=="XXXXX":
# print "\nLABEL: ", hinRoot.label, " ", str(hinRoot.ruleNum)
for child in hinRoot.children:
for count in range(len(grammar[hinRoot.ruleNum].rhs)):
#print "(", child.label, grammar[hinRoot.ruleNum].rhs[count].nonTerminal, child.repeatIndex, grammar[hinRoot.ruleNum].rhs[count].index, ")",
if child.label==grammar[hinRoot.ruleNum].rhs[count].nonTerminal and \
child.repeatIndex==grammar[hinRoot.ruleNum].rhs[count].index:
#print "index assigned: ", ind
grammar[hinRoot.ruleNum].rhs[count].hinRank=hinRoot.children.index(child)
#print "incrementing..."
for child in hinRoot.children:
projectHindiRules(child, grammar) | [
"mohdsanadzakirizvi@gmail.com"
] | mohdsanadzakirizvi@gmail.com |
a4c71809c35378bb39dbbce97d55d2a122ab4dcd | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/05_tictactoe/02grid_plot.py | b2fb6cbc7f65ddac4fc048c6664f6bdd82dfb227 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | #!/usr/bin/env python
import numpy as np
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class gameboard(object):
def __init__(self):
#player 1 puts a "X", player 2 puts a "O"
self.g = [[1,0,1],[0,0,2],[0,2,0]]
self.grid = np.array(self.g)
print(self.grid)
def drawGrid(self):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0,3), ylim = (0,3))
self.myCells = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
for i in self.myCells:
if self.grid[i] == 1:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="red")
ax.add_patch(cell)
elif self.grid[i] == 2:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="blue")
ax.add_patch(cell)
else:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="none")
ax.add_patch(cell)
plt.show()
board = gameboard()
board.drawGrid() | [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
622914c9a6c8f38dd5339009d187c1a23ea57bf5 | 6bd1aa6b80fd93fd65f3e3f9c6b4cc743fabc076 | /Laboratorios-Big-Data/MOOC/KMeans/KMeansHackers.py | 94bd8290cb1a8e974ee767a073e4064bc5d47159 | [] | no_license | RAricardo/Laboratorios-Big-Data | 617a7adc5531d29653b65af0a3a3e885a0aa42e8 | 04ebc65ae83007407e9e14f38774ef77a21cbe31 | refs/heads/master | 2020-04-29T05:09:51.189057 | 2019-04-08T17:00:46 | 2019-04-08T17:00:46 | 175,872,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Databricks notebook source
from pyspark.sql import SparkSession
# COMMAND ----------
spark = SparkSession.builder.appName("Kmeans").getOrCreate()
# COMMAND ----------
data = spark.read.csv("/FileStore/tables/hack_data.csv", inferSchema=True, header=True)
# COMMAND ----------
data.printSchema()
# COMMAND ----------
from pyspark.ml.clustering import KMeans
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
# COMMAND ----------
data.columns
# COMMAND ----------
assembler = VectorAssembler(inputCols=['Session_Connection_Time',
'Bytes Transferred',
'Kali_Trace_Used',
'Servers_Corrupted',
'Pages_Corrupted',
'WPM_Typing_Speed'], outputCol="features")
# COMMAND ----------
final_data = assembler.transform(data)
# COMMAND ----------
final_data.printSchema()
# COMMAND ----------
from pyspark.ml.feature import StandardScaler
# COMMAND ----------
scaler = StandardScaler(inputCol="features", outputCol="Scaled Features")
# COMMAND ----------
scaler_model = scaler.fit(final_data)
# COMMAND ----------
cluster_final_data = scaler_model.transform(final_data)
# COMMAND ----------
kmeans2 = KMeans(featuresCol="Scaled Features", k=2)
# COMMAND ----------
kmeans3 = KMeans(featuresCol="Scaled Features", k=3)
# COMMAND ----------
model_k2 = kmeans2.fit(cluster_final_data)
model_k3 = kmeans3.fit(cluster_final_data)
# COMMAND ----------
model_k3.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
model_k2.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
| [
"rrazopardc@eafit.edu.co"
] | rrazopardc@eafit.edu.co |
96eb58da2807780f7f78eb49453cd03e2e4a57bb | 33f30925224a7db3e3bf6948c6c569ad850e9c76 | /Server/bin/rst2xml.py | 6a7fab179644d60c2959331900cdea30a7350337 | [] | no_license | duelle/CTT | 2bc64fffaf4b2eb3976fedd7aea231a51da8fbe9 | e2da2ab9c599833cc8409728b456a9e37825986b | refs/heads/master | 2022-04-06T15:25:06.747919 | 2020-02-19T14:04:37 | 2020-02-19T14:04:37 | 237,939,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/home/duelle/Repositories/git/RadonCTT/Server/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"duellmann@iste.uni-stuttgart.de"
] | duellmann@iste.uni-stuttgart.de |
bcded7ca3347b631cb06ccb49aa49c5ef2291909 | 6cb18c62758bfbf783d3fabe851d1c4d9f323483 | /setup.py | 9319f44e05f51de89cc40224949e07be98a9e018 | [
"MIT"
] | permissive | bruinxiong/performer-pytorch | 68e505ff5e59d35e339b23661feef377795fd2df | c368b5e4efd46f72e2abaa655dc813021f911014 | refs/heads/main | 2023-01-04T02:25:42.898296 | 2020-10-26T22:41:09 | 2020-10-26T22:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'pytorch-fast-transformers>=0.3.0',
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
c514c9650b93f135aac41cc8d73c464420d4b318 | f7e1ada65e270fe2961df46179798ba522949e5c | /main1.py | 37d1622998e259cd937474a1130d59c95377e6c3 | [] | no_license | fabian6768/WebsiteManager | 36fad06af38298f25592fd2680837c6a1eb6a9b9 | d10148e83e5533bbb3ece9018fd75db33a036138 | refs/heads/master | 2021-01-12T08:29:55.355610 | 2016-12-15T21:49:31 | 2016-12-15T21:49:31 | 76,597,511 | 0 | 1 | null | 2020-10-01T11:34:14 | 2016-12-15T21:28:28 | Python | UTF-8 | Python | false | false | 3,578 | py | #This Is A Program
from csv import *
from tkinter import *
from tkinter import messagebox
import webbrowser as wb
a=1
class Second(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x350")
self.t.configure(background="#ddaf7e")
self.book = []
self.urls = []
self.button = []
self.i = 0
self.j = 0
with open("website.csv", newline="") as csv:
self.csvf = reader(csv)
for row in self.csvf:
self.book.append(row[0])
self.urls.append(row[1])
for name in self.book:
self.button.append(Button(self.t, text=name, font="Verdana 15", width=16))
self.button[self.i].pack(pady=2)
self.i += 1
self.i = 0
for url in self.urls:
self.button[self.i].configure(command=lambda url=url: self.openwww(url))
self.i += 1
self.t.mainloop()
def openwww(self, url):
wb.open(url)
class Third(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x250")
self.t.configure(background="#ddaf7e")
self.first = Label(self.t, text="Name Of BookMark and second text box URL Of bookmark", font="Calibri 15", bg="#ddaf7e")
self.name = Label(self.t, text="Name :", font="Calibri 15", bg="#ddaf7e")
self.url = Label(self.t, text="URL :", font="Calibri 15", bg="#ddaf7e")
self.entry1 = Entry(self.t)
self.entry2 = Entry(self.t)
self.first.grid(row=0, columnspan=2)
self.name.grid(row=1, column=0, sticky=E)
self.url.grid(row=2, column=0, sticky=E)
self.entry1.grid(row=1, column=1, sticky=W)
self.entry2.grid(row=2, column=1, sticky=W)
self.getitall = Button(self.t, text="Get It All", font="Calibri 12", command=lambda: self.getit())
self.getitall.grid(row=3, column=1, sticky=W, padx=20)
self.t.mainloop()
def getit(self):
with open("website.csv", "a", newline="") as csv:
w = writer(csv)
w.writerow([self.entry1.get(), self.entry2.get()])
self.entry1.delete(0, END)
self.entry2.delete(0, END)
class WebsiteManager(object):
def __init__(self):
"""Creating The First Window That Holds Buttons"""
self.r = Tk()
self.r.title("Website Library 123")
self.r.geometry("500x250")
self.r.configure(background="#ddaf7e")
'''Configuring So that the First Window holds buttons'''
self.title = Label(self.r, text="Website Library", bg="#ddaf7e", font="Calibri 26").pack()
self.divider = Label(self.r, text=" "*100, bg="#ddaf7e").pack()
self.saved = Button(self.r, text="View Saved Websites", font="Verdana 15", command=lambda: self.newwind(1)).pack(pady=10)
self.addnew = Button(self.r, text="Add New Websites", font="Verdana 15", command=lambda: self.newwind(2)).pack(pady=10)
self.r.protocol("WM_DELETE_WINDOW", self.on_closing)
self.r.mainloop()
def on_closing(self):
global a
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.r.destroy()
a = 0
def newwind(self, option):
if option == 1:
self.r.destroy()
Second()
elif option == 2:
self.r.destroy()
Third()
def main():
while a == 1:
WebsiteManager()
if __name__ == "__main__":
main()
| [
"fabian6768@yahoo.com"
] | fabian6768@yahoo.com |
9cc2c3e325d074bfd93da7cd26d488883eadd91a | dd83f3a356278cd5ede9efa5ab25a93e258ef6b7 | /slowfast/models/vit_helper.py | afa96024b9244b5160c7ff9fba7708ce3beda16c | [
"Apache-2.0"
] | permissive | XrosLiang/Motionformer | 9debfcaed5c68cce27ec3d1f5ebc409ae81066c5 | 890bded4139dc4b17e344ea9c090bf2de4dd2678 | refs/heads/main | 2023-06-02T16:50:06.222720 | 2021-06-12T11:38:24 | 2021-06-12T11:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,425 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified Model definition
"""Video models."""
from einops import rearrange, repeat
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
from torch import einsum
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torch.hub import load_state_dict_from_url
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.registry import register_model
from . import performer_helper
from . import orthoformer_helper
from . import nystrom_helper
default_cfgs = {
'vit_1k': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
'vit_1k_large': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
}
def qkv_attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class JointSpaceTimeAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.head_dim = head_dim
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
qkv = self.qkv(x).reshape(
B, N, 3,
self.num_heads,
C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
# Joint space-time attention
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class DividedAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
# init to zeros
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
# num of heads variable
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(
t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# Scale q
q *= self.scale
# Take out cls_q, cls_k, cls_v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(
lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims),
(q_, k_, v_)
)
# expand CLS token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = qkv_attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
## to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class TrajectoryAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj_q = nn.Linear(dim, dim, bias=qkv_bias)
self.proj_kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
P = seq_len
F = num_frames
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
# Reshape: 'b n (h d) -> (b h) n d'
q, k, v = map(
lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# remove CLS token from q, k, v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q * self.scale, k, v)
cls_out = rearrange(cls_out, f'(b h) f d -> b f (h d)', f=1, h=h)
if approx == "nystrom":
## Shared spatial landmarks
q_, k_, v_ = map(
lambda t: rearrange(t, f'b h p d -> (b h) p d', h=h), (q_, k_, v_))
x = nystrom_helper.nystrom_spatial_attn(
q_, k_, v_,
landmarks=num_landmarks,
num_frames=F,
inv_iters=6,
use_spatial_landmarks=True
)
x = rearrange(x, f'(b h) p f d -> b h p f d', f=F, h=h)
elif approx == "orthoformer":
x = orthoformer_helper.orthoformer(
q_, k_, v_,
num_landmarks=num_landmarks,
num_frames=F,
)
elif approx == "performer":
# Form random projection matrices:
m = 256 # r = 2m, m <= d
d = self.head_dim
seed = torch.ceil(torch.abs(torch.sum(q_) * performer_helper.BIG_CONSTANT))
seed = torch.tensor(seed)
projection_matrix = performer_helper.create_projection_matrix(
m, d, seed=seed, device=q_.device, dtype=q_.dtype)
q_, k_ = map(lambda t: rearrange(t, f'b h p d -> b p h d'), (q_, k_))
q_prime = performer_helper.softmax_kernel_transformation(
q_,
is_query=True,
projection_matrix=projection_matrix
)
k_prime = performer_helper.softmax_kernel_transformation(
k_,
is_query=False,
projection_matrix=projection_matrix
)
q_prime, k_prime = map(
lambda t: rearrange(t, f'b p h r -> b h p r'), (q_prime, k_prime))
k_prime = rearrange(k_prime, 'b h (f n) r -> b h f n r', f=F)
v_ = rearrange(v_, 'b h (f n) d -> b h f n d', f=F)
kv = torch.einsum('b h f n r, b h f n d -> b h f r d', k_prime, v_)
qkv = torch.einsum('b h p r, b h f r d -> b h p f d', q_prime, kv)
normaliser = torch.einsum('b h f n r -> b h f r', k_prime)
normaliser = torch.einsum('b h p r, b h f r -> b h p f', q_prime, normaliser)
x = qkv / normaliser.unsqueeze(-1)
else:
# Using full attention
q_dot_k = q_ @ k_.transpose(-2, -1)
q_dot_k = rearrange(q_dot_k, 'b q (f n) -> b q f n', f=F)
space_attn = (self.scale * q_dot_k).softmax(dim=-1)
attn = self.attn_drop(space_attn)
v_ = rearrange(v_, 'b (f n) d -> b f n d', f=F, n=P)
x = torch.einsum('b q f n, b f n d -> b q f d', attn, v_)
# Temporal attention: query is the similarity-aggregated patch
x = rearrange(x, '(b h) s f d -> b s f (h d)', b=B)
x_diag = rearrange(x, 'b (g n) f d -> b g n f d', g=F)
x_diag = torch.diagonal(x_diag, dim1=-4, dim2=-2)
x_diag = rearrange(x_diag, f'b n d f -> b (f n) d', f=F)
q2 = self.proj_q(x_diag)
k2, v2 = self.proj_kv(x).chunk(2, dim=-1)
q2 = rearrange(q2, f'b s (h d) -> b h s d', h=h)
x, k2, v2 = map(
lambda t: rearrange(t, f'b s f (h d) -> b h s f d', f=F, h=h), (x, k2, v2))
q2 *= self.scale
attn = torch.einsum('b h s d, b h s f d -> b h s f', q2, k2)
attn = attn.softmax(dim=-1)
x = torch.einsum('b h s f, b h s f d -> b h s d', attn, x)
x = rearrange(x, f'b h s d -> b s (h d)')
# concat back the cls token
x = torch.cat((cls_out, x), dim=1)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def get_attention_module(
attn_type='joint', dim=768, num_heads=12, qkv_bias=False,
attn_drop=0., proj_drop=0.
):
if attn_type == 'joint':
attn = JointSpaceTimeAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
elif attn_type == 'trajectory':
attn = TrajectoryAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
return attn
class Block(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='trajectory',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = get_attention_module(
attn_type=attn_type, dim=dim, num_heads=num_heads,
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
x = x + self.drop_path(
self.attn(
self.norm1(x),
seq_len=seq_len,
num_frames=num_frames,
approx=approx,
num_landmarks=num_landmarks
)[0]
)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class DividedSpaceTimeBlock(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='divided',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.einops_from_space = 'b (f n) d'
self.einops_to_space = '(b f) n d'
self.einops_from_time = 'b (f n) d'
self.einops_to_time = '(b n) f d'
self.norm1 = norm_layer(dim)
self.attn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.timeattn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.norm3 = norm_layer(dim)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
time_output = self.timeattn(self.norm3(x),
self.einops_from_time, self.einops_to_time, n=seq_len)
time_residual = x + time_output
space_output = self.attn(self.norm1(time_residual),
self.einops_from_space, self.einops_to_space, f=num_frames)
space_residual = time_residual + self.drop_path(space_output)
x = space_residual
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Mlp(nn.Module):
def __init__(
self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = img_size if type(img_size) is tuple else to_2tuple(img_size)
patch_size = img_size if type(patch_size) is tuple else to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class PatchEmbed3D(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self, img_size=224, temporal_resolution=4, in_chans=3,
patch_size=16, z_block_size=2, embed_dim=768, flatten=True
):
super().__init__()
self.height = (img_size // patch_size)
self.width = (img_size // patch_size)
self.frames = (temporal_resolution // z_block_size)
self.num_patches = self.height * self.width * self.frames
self.proj = nn.Conv3d(in_chans, embed_dim,
kernel_size=(z_block_size, patch_size, patch_size),
stride=(z_block_size, patch_size, patch_size))
self.flatten = flatten
def forward(self, x):
B, C, T, H, W = x.shape
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
return x
class HeadMLP(nn.Module):
def __init__(self, n_input, n_classes, n_hidden=512, p=0.1):
super(HeadMLP, self).__init__()
self.n_input = n_input
self.n_classes = n_classes
self.n_hidden = n_hidden
if n_hidden is None:
# use linear classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_classes, bias=True)
)
else:
# use simple MLP classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_hidden, bias=True),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(p=p),
nn.Linear(n_hidden, n_classes, bias=True)
)
print(f"Dropout-NLP: {p}")
def forward(self, x):
return self.block_forward(x)
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def adapt_input_conv(in_chans, conv_weight, agg='sum'):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float()
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
if agg == 'sum':
print("Summing conv1 weights")
conv_weight = conv_weight.sum(dim=1, keepdim=True)
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
if agg == 'sum':
print("Summing conv1 weights")
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
conv_weight = conv_weight.repeat(1, in_chans, 1, 1)
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(
model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False
):
# Load state dict
assert(f"{cfg.VIT.PRETRAINED_WEIGHTS} not in [vit_1k, vit_1k_large]")
state_dict = torch.hub.load_state_dict_from_url(url=default_cfgs[cfg.VIT.PRETRAINED_WEIGHTS])
if filter_fn is not None:
state_dict = filter_fn(state_dict)
input_convs = 'patch_embed.proj'
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(
in_chans, state_dict[weight_name], agg='avg')
print(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
print(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifier_name = 'head'
label_offset = cfg.get('label_offset', 0)
pretrain_classes = 1000
if num_classes != pretrain_classes:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
loaded_state = state_dict
self_state = model.state_dict()
all_names = set(self_state.keys())
saved_names = set([])
for name, param in loaded_state.items():
param = param
if 'module.' in name:
name = name.replace('module.', '')
if name in self_state.keys() and param.shape == self_state[name].shape:
saved_names.add(name)
self_state[name].copy_(param)
else:
print(f"didnt load: {name} of shape: {param.shape}")
print("Missing Keys:")
print(all_names - saved_names) | [
"mandelapatrick@devfair0297.h2.fair"
] | mandelapatrick@devfair0297.h2.fair |
1697ff12097d074fe9a08b7e8cfbf1ecd1348016 | cca89a7bbe2da907a38eb00e9a083f57597273f0 | /162. 寻找峰值/pythonCode.py | ecfc5d414241c3d0b4d2b4aac3531e9ced628696 | [] | no_license | xerprobe/LeetCodeAnswer | cc87941ef2a25c6aa1366e7a64480dbd72750670 | ea1822870f15bdb1a828a63569368b7cd10c6ab8 | refs/heads/master | 2022-09-23T09:15:42.628793 | 2020-06-06T16:29:59 | 2020-06-06T16:29:59 | 270,215,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
def binarySearch(l:int,r:int) -> int:
if(l == r): return l
mid = (l + r) // 2
if(nums[mid] > nums[mid + 1]):
return binarySearch(l,mid)
else:
return binarySearch(mid+1,r)
return binarySearch(0,len(nums)-1)
# 峰值元素是指其值大于左右相邻值的元素。
# 给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
# 数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
# 你可以假设 nums[-1] = nums[n] = -∞。
# 示例 1:
# 输入: nums = [1,2,3,1]
# 输出: 2
# 解释: 3 是峰值元素,你的函数应该返回其索引 2。
# 示例 2:
# 输入: nums = [1,2,1,3,5,6,4]
# 输出: 1 或 5
# 解释: 你的函数可以返回索引 1,其峰值元素为 2;
# 或者返回索引 5, 其峰值元素为 6。
# 说明:
# 你的解法应该是 O(logN) 时间复杂度的。
# 链接:https://leetcode-cn.com/problems/find-peak-element/ | [
"changwenhao1@qq.com"
] | changwenhao1@qq.com |
d5e7ae3bd1017599518278f12c78a1b1a2662ff3 | 4138376af721c583944882b68235746cd9637fd6 | /7/sunjiayin/cpNbnet.py | 305e2c1c4681006598eb80310af7c334d54f7acb | [] | no_license | hulaoan/homework-arch-5 | 9df792281b7ac92abc166ad80e69a5c2a59b2c9e | 1c1b07f8ebb1b2f9906c0cd29cef8227fed3c7fd | refs/heads/master | 2021-01-14T13:58:05.883628 | 2015-12-25T05:05:16 | 2015-12-25T05:05:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,361 | py | #!/usr/bin/env python
# coding:utf-8
import socket
import select
import time
import pdb
__all__ = ["nbNet"]
from nbNetUtils import *
class STATE:
def __init__(self):
self.state = "accept" #定义状态
self.have_read = 0 #记录读了的字节
self.need_read = 10 #头文件需要读取10个字节
self.have_write = 0 #记录读了的字节
self.need_write= 0 #需要写的字节
self.buff_read = "" #读缓存
self.buff_write = "" #写缓存
self.sock_obj = "" #sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() #实例化类
tmp_state.sock_obj = sock #定义类中sock
self.conn_state[sock.fileno()] = tmp_state #把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] #取出fd对应连接
sock = sock_state.sock_obj #取出fd的sock
conn, addr = sock.accept() #取出连接请求
conn.setblocking(0) #设置非阻塞模式
return conn #返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj #取出fd的sock
sock.close()#关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) #将fd重epoll中注销
self.conn_state.pop(fd) #踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] #取出fd对应连接
conn= sock_state.sock_obj #取出fd连接请求
if sock_state.need_read <= 0: #需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) #读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %(fd, one_read, sock_state.need_read))
if len(one_read) == 0: #读取数据为0报错
raise socket.error
sock_state.buff_read += one_read #把读取数据存到读缓存中
sock_state.have_read += len(one_read) #已经读取完的数据量
sock_state.need_read -= len(one_read) #还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: #10字节为头文件处理
header_said_need_read = int(sock_state.have_read) #读取数据的量
if header_said_need_read <= 0: #如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read #还需读取数量变化
sock_state.buff_read = '' #读缓存清空
sock_state.printState()
return "readcontent" #还需读取数据
elif sock_state.need_read == 0:
return "process" #读取数据完成,转换状态
else:
return "readmore" #还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: #errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] #取出fd对应的连接构造体
conn = sock_state.sock_obj #取出fd对于连接
last_have_send = sock_state.have_write #已经写数据的量
try:
have_send = conn.send(sock_state.buff_write[last_have_send:]) #发送剩下的数据
sock_state.have_write += have_send #已经写的数据量
sock_state.need_write -= have_send #还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write !=0: #写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" #返回写入完成
else:
return "writemore" #返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() #定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] #取出fd构造体
if select.EPOLLHUP & events: #文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" #fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") #文件描述符出错
sock_state.state = "closing" #对应fd状态为closing
self.state_machine(fd) #状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] #fd构造体
self.sm[sock_state.state](fd) #通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} #定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) #定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() #初始化fd的epoll
self.epoll_sock.register(self.listen_sock.fileno(), select.EPOLLIN ) #linten可以读的描述符
self.logic = logic #业务处理
self.sm = {
"accept" : self.accept2read,
"read" : self.read2process,
"write" : self.write2read,
"process": self.process,
"closing": self.close,
} #状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %self.listen_sock.fileno() )
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) #业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) #发送的数据
sock_state.need_write = len(sock_state.buff_write) #需要发送的长度
sock_state.state = "write" #fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) #fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(conn.fileno(), select.EPOLLIN) #发送数据后重新将fd的epoll改成读
self.setFd(conn) #fd生成构造体
self.conn_state[conn.fileno()].state = "read" #fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
#状态转换
try:
read_ret = self.read(fd) #read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process":# 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent":# readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' #状态为closing关闭连接
self.state_machine(fd)
else:
raise Exception("impossible state returned by self.read")
def write2read(self, fd):
try:
write_ret = self.write(fd) #函数write返回值
except socket.error, msg: #出错关闭连接
write_ret = "closing"
if write_ret == "writemore": #继续写
pass
elif write_ret == "writecomplete":#写完成
sock_state = self.conn_state[fd]
conn = sock_state.sock_obj
self.setFd(conn) #重置见连接fd构造体
self.conn_state[fd].state = "read" #将fd状态设置为read
self.epoll_sock.modify(fd, select.EPOLLIN) #epoll状态为可读
elif write_ret == "closing":# 发生错误关闭
dbgPrint(msg)
self.conn_state[fd].state = 'closing'
self.state_machine(fd)
if __name__ == '__main__':
def logic(d_in):
return(d_in[::-1])
reverseD = nbNet('0.0.0.0', 9060, logic)
reverseD.run()
| [
"sunjiayin@teach.works"
] | sunjiayin@teach.works |
45b76c5185d0e6d5434ffd0717722d4e1b9aa0c1 | c744b20f4d5f4035dd81bf515f6e969a67299309 | /lists/migrations/0006_auto_20150825_1407.py | 34243587e4fd8a04e03184790c2e99036ba5781f | [] | no_license | jian-en/flyingjay-superlists-project | 14c94e16658e6aef76019847423b6fd0ac01eebe | 2c8ad9dfd26d68237b065797f3132872eb0cdaa5 | refs/heads/master | 2021-01-02T09:43:37.631559 | 2015-11-03T03:55:03 | 2015-11-03T03:55:03 | 40,744,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0005_auto_20150823_0227'),
]
operations = [
migrations.AlterField(
model_name='item',
name='text',
field=models.TextField(),
),
]
| [
"fujian_en@126.com"
] | fujian_en@126.com |
55c5e4126f52501d3ab1f9cd4f9c49c47dc30d18 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/ZXR10-MACPING-MIB.py | 805cbd59b0fb3a90dcafa3b37ef03e6abdf405d0 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,798 | py | #
# PySNMP MIB module ZXR10-MACPING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZXR10-MACPING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:42:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
iso, Bits, ModuleIdentity, Gauge32, Unsigned32, enterprises, IpAddress, Counter32, experimental, ObjectIdentity, MibIdentifier, NotificationType, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, mgmt, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Bits", "ModuleIdentity", "Gauge32", "Unsigned32", "enterprises", "IpAddress", "Counter32", "experimental", "ObjectIdentity", "MibIdentifier", "NotificationType", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mgmt", "Counter64")
TruthValue, DisplayString, RowStatus, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "RowStatus", "MacAddress", "TextualConvention")
zxr10L2vpn, = mibBuilder.importSymbols("ZXR10-SMI", "zxr10L2vpn")
zxr10MacPingMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4))
class DisplayString(OctetString):
pass
class OptionType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("ce", 0), ("pe", 1))
zxr10MacPingTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1), )
if mibBuilder.loadTexts: zxr10MacPingTable.setStatus('current')
zxr10MacPingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacSerial"))
if mibBuilder.loadTexts: zxr10MacPingEntry.setStatus('current')
zxr10PingMacSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacSerial.setStatus('current')
zxr10PingMacDestMac = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 2), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacDestMac.setStatus('current')
zxr10PingMacControlOutEtherIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlOutEtherIf.setStatus('current')
zxr10PingMacIfOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("option", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfOption.setStatus('current')
zxr10PingMacPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPacketCount.setStatus('current')
zxr10PingMacTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTimeOut.setStatus('current')
zxr10PingMacHops = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacHops.setStatus('current')
zxr10PingMacControlResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlResultType.setStatus('current')
zxr10PingMacTrapOncompletion = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 9), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTrapOncompletion.setStatus('current')
zxr10PingMacRosStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("not-active", 1), ("start-ping", 2), ("ping-processing", 3), ("ping-completed", 4))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacRosStatus.setStatus('current')
zxr10PingMacEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacEntryOwner.setStatus('current')
zxr10PingMacIfPeOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 12), OptionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfPeOption.setStatus('current')
zxr10PingMacVfiName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacVfiName.setStatus('current')
zxr10PingMacPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPeerAddress.setStatus('current')
zxr10PingMacResultTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2), )
if mibBuilder.loadTexts: zxr10PingMacResultTable.setStatus('current')
zxr10pingMacResultEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"))
if mibBuilder.loadTexts: zxr10pingMacResultEntry.setStatus('current')
zxr10PingMacResultSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSerial.setStatus('current')
zxr10PingMacResultSentPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSentPkts.setStatus('current')
zxr10PingMacResultRcvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRcvPkts.setStatus('current')
zxr10PingMacResultRoundTripMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMinTime.setStatus('current')
zxr10PingMacResultRoundTripMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMaxTime.setStatus('current')
zxr10PingMacResultRoundTripAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripAvgTime.setStatus('current')
zxr10PingMacResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultType.setStatus('current')
zxr10PingMacExtResultDestIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestIfName.setStatus('current')
zxr10PingMacExtResultDestHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestHostName.setStatus('current')
zxr10PingMacExtResultSourceIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceIfName.setStatus('current')
zxr10PingMacExtResultSourceHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceHostName.setStatus('current')
zxr10PingMacExtResultOutVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultOutVlanId.setStatus('current')
zxr10PingMacExtResultInVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultInVlanId.setStatus('current')
zxr10PingMacResultEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultEntryOwner.setStatus('current')
zxr10PingMacResultRoundWobbleMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMinTime.setStatus('current')
zxr10PingMacResultRoundWobbleMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMaxTime.setStatus('current')
zxr10PingMacResultRoundWobbleAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleAvgTime.setStatus('current')
macpingNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3))
macpingTrapResult = NotificationType((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3, 1)).setObjects(("ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultSentPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRcvPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMinTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMaxTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripAvgTime"))
if mibBuilder.loadTexts: macpingTrapResult.setStatus('current')
mibBuilder.exportSymbols("ZXR10-MACPING-MIB", zxr10PingMacResultRoundTripAvgTime=zxr10PingMacResultRoundTripAvgTime, zxr10MacPingMIB=zxr10MacPingMIB, zxr10PingMacPeerAddress=zxr10PingMacPeerAddress, zxr10PingMacTimeOut=zxr10PingMacTimeOut, macpingNotifications=macpingNotifications, zxr10PingMacEntryOwner=zxr10PingMacEntryOwner, zxr10PingMacRosStatus=zxr10PingMacRosStatus, zxr10PingMacIfOption=zxr10PingMacIfOption, zxr10PingMacResultRoundWobbleAvgTime=zxr10PingMacResultRoundWobbleAvgTime, zxr10PingMacResultTable=zxr10PingMacResultTable, OptionType=OptionType, zxr10MacPingTable=zxr10MacPingTable, zxr10PingMacPacketCount=zxr10PingMacPacketCount, zxr10PingMacResultRcvPkts=zxr10PingMacResultRcvPkts, zxr10PingMacSerial=zxr10PingMacSerial, zxr10pingMacResultEntry=zxr10pingMacResultEntry, zxr10PingMacResultRoundWobbleMinTime=zxr10PingMacResultRoundWobbleMinTime, zxr10PingMacResultRoundTripMinTime=zxr10PingMacResultRoundTripMinTime, zxr10MacPingEntry=zxr10MacPingEntry, zxr10PingMacHops=zxr10PingMacHops, zxr10PingMacIfPeOption=zxr10PingMacIfPeOption, zxr10PingMacResultSerial=zxr10PingMacResultSerial, DisplayString=DisplayString, zxr10PingMacExtResultSourceHostName=zxr10PingMacExtResultSourceHostName, zxr10PingMacResultEntryOwner=zxr10PingMacResultEntryOwner, zxr10PingMacControlOutEtherIf=zxr10PingMacControlOutEtherIf, zxr10PingMacResultSentPkts=zxr10PingMacResultSentPkts, zxr10PingMacResultType=zxr10PingMacResultType, zxr10PingMacResultRoundWobbleMaxTime=zxr10PingMacResultRoundWobbleMaxTime, zxr10PingMacResultRoundTripMaxTime=zxr10PingMacResultRoundTripMaxTime, zxr10PingMacExtResultDestIfName=zxr10PingMacExtResultDestIfName, zxr10PingMacExtResultDestHostName=zxr10PingMacExtResultDestHostName, macpingTrapResult=macpingTrapResult, zxr10PingMacVfiName=zxr10PingMacVfiName, zxr10PingMacExtResultOutVlanId=zxr10PingMacExtResultOutVlanId, zxr10PingMacExtResultSourceIfName=zxr10PingMacExtResultSourceIfName, zxr10PingMacControlResultType=zxr10PingMacControlResultType, zxr10PingMacExtResultInVlanId=zxr10PingMacExtResultInVlanId, zxr10PingMacDestMac=zxr10PingMacDestMac, zxr10PingMacTrapOncompletion=zxr10PingMacTrapOncompletion)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
e8f79267ba52969b4af0a0f02f9340977750ba24 | 5002ec313e12d6e5f58d5ef41ea265084ff96373 | /信息收集工具/modular/Subdomain_name_query.py | ff3d469ff26e6418b763ef974be8e1beb300a2bd | [] | no_license | IVorder/python | 9a8dc46d69fb9b5c3d65509348595623b8d47a8a | 6b60a13dda471ed3f1380b6bf014a33f185e6033 | refs/heads/master | 2020-06-21T22:43:41.838924 | 2019-07-18T10:21:28 | 2019-07-18T10:21:28 | 197,569,599 | 10 | 4 | null | 2019-07-18T10:55:49 | 2019-07-18T10:55:47 | null | UTF-8 | Python | false | false | 2,369 | py | # @author:九世
# @time:2019/7/2
# @file:mian.py
from gevent import monkey;monkey.patch_all()
import requests
import config.config
import warnings
import gevent
from multiprocessing import Process
import dns.resolver
from bs4 import BeautifulSoup
from gevent.lock import RLock
warnings.simplefilter("ignore", category=UserWarning)
domains=[]
lock=RLock()
def domain_query():
def wrater(func):
def query(*args,**kwargs):
print('\033[1;32m[+]\033[0m 域名查询:')
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
url='http://site.ip138.com/{}/domain.htm'.format(*args)
rqt=requests.get(url=url,headers=headers)
rgt=BeautifulSoup(rqt.text,'html.parser').find_all('a',target='_blank')
for c in rgt:
if str(*args) in str(c):
domains.append(c.get_text())
return func(*args,**kwargs)
return query
return wrater
def domain_baopo():
def wrter(func):
def bp(*args,**kwargs):
lock.acquire()
path=r'dict/domain.txt'
dp=[]
dk=open(path,'r',encoding='utf-8')
for d in dk.readlines():
dp.append("{}.{}".format("".join(d.split('\n')),*args))
lock.release()
return func(dp,**kwargs)
return bp
return wrter
@domain_query()
def run(url):
pass
def dns_b(domain):
try:
querys=dns.resolver.query(domain,'A')
for q in querys:
domains.append(domain)
except:
pass
def xc(rg):
rt=[]
try:
for r in rg:
rt.append(gevent.spawn(dns_b,r))
gevent.joinall(rt)
except:
pass
@domain_baopo()
def run2(url):
print('\033[1;32m[+]\033[0m 字典爆破域名开始')
rw=[]
calc=0
for c in url:
if calc==config.config.SUBDOMAIN:
p=Process(target=xc,args=(rw,))
p.start()
calc=0
rw.clear()
rw.append(c)
calc+=1
if len(rw)>0:
p = Process(target=xc, args=(rw,))
p.start()
def cat():
qc=list(set(domains))
for q in qc:
print(q) | [
"noreply@github.com"
] | IVorder.noreply@github.com |
f716de44a80a10f01bfaa8b3a8d58b4ec092c945 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.71/bluedon/monitor/sbin/checkproc.py | cd3521785adb14ce48baf65ec961b05655ab0e50 | [] | no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import os, re, sys
rexplogstart = re.compile(r'grep logstart.pl')
rexpwebvisit = re.compile(r'grep webvisit.pl')
def checklogstart():
if not os.path.exists("/usr/local/bdwaf/logs_bridge/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_bridge/data")
if not os.path.exists("/usr/local/bdwaf/logs_proxy/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_proxy/data")
flag = 0
pfp = os.popen('ps ax | grep logstart.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/logstart.pl')
def checkwebvisit():
flag = 0
pfp = os.popen('ps ax | grep webvisit.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/webvisit.pl')
if __name__ == '__main__':
checklogstart()
checkwebvisit()
| [
"hanson_wong@qq.com"
] | hanson_wong@qq.com |
98d80763957c0adf4a839f4d123400647c1b2d7f | 950fd350aba8c7584b8f362b2e5079b5010a1f6a | /lib/Sockets.py | aeb577b91be8e75da756909611e728e080dff370 | [] | no_license | entr0pist/fakeircd | 96814755b0b2041bc14db8f942680c47f5ea56b0 | 43a88be91aa6337e1eacaeadaa20dcdb2bccd3a2 | refs/heads/master | 2020-06-07T10:34:36.562878 | 2015-11-10T04:02:38 | 2015-11-10T04:02:38 | 42,418,758 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | from lib import config
from lib import linechat
from lib.User import User
class Sockets:
def __init__(self):
self.server = linechat.Serve()
def add_sock(self, sock):
self.server.add_sock(sock)
def rm_sock(self, sock):
self.server.rm_sock(sock)
def serve(self):
self.server.serve()
def spawn_all(self):
for server in config.get(None, 'listen'):
if self.server.sock_by_address(server['bind_address'], server['bind_port']):
continue
ssl = False
if 'ssl' in server:
ssl = server['ssl']
s = linechat.Server(User, port=server['bind_port'],
hostname=server['bind_address'], ssl=ssl)
self.server.add_sock(s)
for server in self.server.socks:
try:
sock = server.sock.getsockname()
except:
return
if not config.get_listen_by_host_port(sock):
self.server.rm_sock_by_address(*sock)
def shutdown_all(self):
self.server.close_all()
sockets = Sockets()
| [
"entr0pist@users.noreply.github.com"
] | entr0pist@users.noreply.github.com |
672f47dbc06ff7e663a43bfdf34432fe9a92e2f4 | 5875c68d4e34193b9e565a6f34469612cfdc649c | /pyMap_0.9.4/pyCursors.py | a63f9c2bdf12abc465b5df4d587e61b1599a645e | [] | no_license | Naxs-me/Software_development_tycoon | 59d7059fb21b1655b05ad0057e17033603ec7377 | b8a6166589a6231e607001ef84f927d2d15792c0 | refs/heads/master | 2020-12-15T00:13:25.496993 | 2020-01-19T16:01:03 | 2020-01-19T16:01:03 | 234,924,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #31 lines of code (7/21/2012)
import pygame
import os
#the images size must be a multiple of 8
#the image must contain only 3 colors
#(0,0,0)black, (255,255,255)white, (255,0,255)tranparent(pink)
def set_cursor_from_image(image, hotspot = (0,0)):
#if os.path.isfile((cwd+'/'+image)):
img = pygame.image.load(image).convert()
w,h = img.get_size()
strings = []
size = (w,h)
if w%8 == 0 and h%8 == 0:
black = pygame.Color(0,0,0,255)
white = pygame.Color(255,255,255,255)
trans = pygame.Color(255,0,255,255)
img.lock()
for r in xrange(0, w):
pix_str = ""
for c in xrange(0, h):
color = img.get_at((r,c))
if color == white:
pix_str += 'X'
if color == black:
pix_str += '.'
if color == trans:
pix_str += ' '
strings.append(pix_str)
img.unlock()
new_cursor = pygame.cursors.compile(strings)
pygame.mouse.set_cursor(size, hotspot, *new_cursor)
| [
"naxs.me@gmail.com"
] | naxs.me@gmail.com |
7ef2579880b9b7ec614ed66ecd323b2e3604e749 | 6eaca1b3ada96264bdad964652c19365f982025a | /QPainter/__init__.py | 0a9a28d278c61ebd50c91b5166dc7748582e2115 | [] | no_license | RahulARanger/My_Qt-Py_Book | 4c7e4dfc9a1d1ec8a587d3bbb722fc64f6de1008 | 396280e9110d11c9c297bf83f332411b98c98453 | refs/heads/master | 2023-08-15T01:42:33.415854 | 2021-10-01T19:44:50 | 2021-10-01T19:44:50 | 320,230,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | import RashSetup.__RashModules__.Rash.ApplicationManager
from .MemeGen import *
class UTIL(TabWindow):
def __init__(self, shared: dict):
Rash: RashSetup.__RashModules__.Rash.ApplicationManager.RashMain = shared["RASH"]
super().__init__(Rash)
self.Generator = MemeGenerator(self)
self.easeAdd(self.Generator, "SpongeBob")
| [
"saihanumarahul66@gmail.com"
] | saihanumarahul66@gmail.com |
9d15855256587b846eda68310ac6b8af5d598e25 | eb91e8711243b70b14c38f17dbc7951dab430d2a | /run.py | 8e560403e372b20fd1127318b95c7d69ec841267 | [
"MIT"
] | permissive | LittlePanic/Flask-Vue-Singlepage-Project | 718d96390df99ee0f3654f8578073501ce5e3092 | 3d8ddad9dd2a4a41e76e3e248f31a4505801ea83 | refs/heads/master | 2022-12-23T19:36:06.992444 | 2018-07-05T03:38:59 | 2018-07-05T03:38:59 | 139,595,156 | 0 | 1 | MIT | 2022-12-16T22:20:15 | 2018-07-03T14:26:41 | Python | UTF-8 | Python | false | false | 80 | py | from backend.app import app
if __name__ == "__main__":
app.run(debug=True)
| [
"2177890574@qq.com"
] | 2177890574@qq.com |
185a3393a192094de5e11ae5133799e98d58a651 | 9b04206109e36d5f4f7cc4820546546ac239c5e0 | /greedy/ATM_problem.py | 39cc9ea015a03ed7d3442b6e7512c88cda49fc4d | [] | no_license | joon3007/Algorithm | 28417fffde40a79aac54375b57b31071dcf6bc4d | e45b6379f67272db0997156deca5713aa2113348 | refs/heads/master | 2022-12-14T01:33:25.050675 | 2020-09-09T12:36:02 | 2020-09-09T12:36:02 | 291,960,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | '''
description
인하은행에는 ATM이 1대밖에 없다. 지금 이 ATM앞에 N명의 사람들이 줄을 서있다.
사람은 1번부터 N번까지 번호가 매겨져 있으며, i번 사람이 돈을 인출하는데 걸리는 시간은 Pi분이다.
사람들이 줄을 서는 순서에 따라서, 돈을 인출하는데 필요한 시간의 합이 달라지게 된다.
예를 들어, 총 5명이 있고, P1 = 3, P2 = 1, P3 = 4, P4 = 3, P5 = 2 인 경우를 생각해보자.
[1, 2, 3, 4, 5] 순서로 줄을 선다면, 1번 사람은 3분만에 돈을 뽑을 수 있다.
2번 사람은 1번 사람이 돈을 뽑을 때 까지 기다려야 하기 때문에, 3+1 = 4분이 걸리게 된다.
3번 사람은 1번, 2번 사람이 돈을 뽑을 때까지 기다려야 하기 때문에, 총 3+1+4 = 8분이 필요하게 된다.
4번 사람은 3+1+4+3 = 11분, 5번 사람은 3+1+4+3+2 = 13분이 걸리게 된다.
이 경우에 각 사람이 돈을 인출하는데 필요한 시간의 합은 3+4+8+11+13 = 39분이 된다.
줄을 [2, 5, 1, 4, 3] 순서로 줄을 서면, 2번 사람은 1분만에, 5번 사람은 1+2 = 3분,
1번 사람은 1+2+3 = 6분, 4번 사람은 1+2+3+3 = 9분, 3번 사람은 1+2+3+3+4 = 13분이 걸리게 된다.
각 사람이 돈을 인출하는데 필요한 시간의 합은 1+3+6+9+13 = 32분이다.
이 방법보다 더 필요한 시간의 합을 최소로 만들 수는 없다.
줄을 서 있는 사람의 수 N과 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어졌을 때,
각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 구하는 프로그램을 작성하시오.
input
첫째 줄에 사람의 수 N(1 ≤ N ≤ 1,000)이 주어진다. 둘째 줄에는 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어진다. (1 ≤ Pi ≤ 1,000)
output
첫째 줄에 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 출력한다.
'''
num = int(input())
times = list(map(int, input().split()))
times.sort()
result = 0
time = 0
for i in times:
time += i
result += time
print(result) | [
"joon4141@gmail.com"
] | joon4141@gmail.com |
d732b74a12857a9cfedd5615c35c20fd705c8355 | b05e271e498ab231c8e6fd650826cb98a1887c5f | /main.py | 59838bcf3d74bddadd669b317a56301dacea99a9 | [
"MIT"
] | permissive | tian409/joint-computation-offloading-and-resource-allocation | 1074e6bee92303757561a0b6a6dfee8663584f3f | 13e68b71c8e9ae7347a82294a355266c3ce28a81 | refs/heads/master | 2023-04-03T15:08:49.180165 | 2021-04-04T05:37:46 | 2021-04-04T05:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | # -*- coding: utf-8 -*-
import copy, json, argparse
import torch
from scenario import Scenario
from agent import Agent
from dotdic import DotDic
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_agents(opt, sce, scenario, device):
agents = [] # Vector of agents
for i in range(opt.nagents):
agents.append(Agent(opt, sce, scenario, index=i, device=device)) # Initialization, create a CNet for each agent
return agents
def run_episodes(opt, sce, agents, scenario):
global_step = 0
nepisode = 0
action = torch.zeros(opt.nagents,dtype=int)
reward = torch.zeros(opt.nagents)
QoS = torch.zeros(opt.nagents)
state_target = torch.ones(opt.nagents) # The QoS requirement
f= open("DDPG.csv","w+")
f.write("This includes the running steps:\n")
while nepisode < opt.nepisodes:
state = torch.zeros(opt.nagents) # Reset the state
next_state = torch.zeros(opt.nagents) # Reset the next_state
nstep = 0
while nstep < opt.nsteps:
eps_threshold = opt.eps_min + opt.eps_increment * nstep * (nepisode + 1)
if eps_threshold > opt.eps_max:
eps_threshold = opt.eps_max # Linear increasing epsilon
# eps_threshold = opt.eps_min + (opt.eps_max - opt.eps_min) * np.exp(-1. * nstep * (nepisode + 1)/opt.eps_decay)
# Exponential decay epsilon
for i in range(opt.nagents):
action[i] = agents[i].Select_Action(state, scenario, eps_threshold) # Select action
for i in range(opt.nagents):
QoS[i], reward[i] = agents[i].Get_Reward(action, action[i], state, scenario) # Obtain reward and next state
next_state[i] = QoS[i]
for i in range(opt.nagents):
agents[i].Save_Transition(state, action[i], next_state, reward[i], scenario) # Save the state transition
agents[i].Optimize_Model() # Train the model
if nstep % opt.nupdate == 0: # Update the target network for a period
agents[i].Target_Update()
state = copy.deepcopy(next_state) # State transits
if torch.all(state.eq(state_target)): # If QoS is satisified, break
break
nstep += 1
print('Episode Number:', nepisode, 'Training Step:', nstep)
# print('Final State:', state)
f.write("%i \n" % nstep)
nepisode += 1
f.close()
def run_trial(opt, sce):
scenario = Scenario(sce)
agents = create_agents(opt, sce, scenario, device) # Initialization
run_episodes(opt, sce, agents, scenario)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c1', '--config_path1', type=str, help='path to existing scenarios file')
parser.add_argument('-c2', '--config_path2', type=str, help='path to existing options file')
parser.add_argument('-n', '--ntrials', type=int, default=1, help='number of trials to run')
args = parser.parse_args()
sce = DotDic(json.loads(open(args.config_path1, 'r').read()))
opt = DotDic(json.loads(open(args.config_path2, 'r').read())) # Load the configuration file as arguments
for i in range(args.ntrials):
trial_result_path = None
trial_opt = copy.deepcopy(opt)
trial_sce = copy.deepcopy(sce)
run_trial(trial_opt, trial_sce)
| [
"fenghao2018@bupt.edu.cn"
] | fenghao2018@bupt.edu.cn |
dc95cfc1d53773ef74245ed5c8a5b6bbbf3ce933 | 65e076e4fcc00a67faa0932b3f3a3d3a3a11e2aa | /sdk/python/pulumi_google_native/datastore/v1/_enums.py | 15df09472641b2ebbeb23bd87aeab08fb357fbf9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | TheJaySmith-Google/pulumi-google-native | 816babe5c7316724e02d5b8b9d789df00262bb8e | 566c295a39fe8c3dd16e4a7894ff6de72423e5da | refs/heads/master | 2023-06-05T06:45:19.979837 | 2021-06-23T11:42:27 | 2021-06-23T11:42:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'GoogleDatastoreAdminV1IndexedPropertyDirection',
'IndexAncestor',
]
class GoogleDatastoreAdminV1IndexedPropertyDirection(str, Enum):
"""
Required. The indexed property's direction. Must not be DIRECTION_UNSPECIFIED.
"""
DIRECTION_UNSPECIFIED = "DIRECTION_UNSPECIFIED"
ASCENDING = "ASCENDING"
DESCENDING = "DESCENDING"
class IndexAncestor(str, Enum):
"""
Required. The index's ancestor mode. Must not be ANCESTOR_MODE_UNSPECIFIED.
"""
ANCESTOR_MODE_UNSPECIFIED = "ANCESTOR_MODE_UNSPECIFIED"
NONE = "NONE"
ALL_ANCESTORS = "ALL_ANCESTORS"
| [
"noreply@github.com"
] | TheJaySmith-Google.noreply@github.com |
7bf8347897e39eb95aac73a02b6b6f56d93586c6 | d2fb817130e9d8f40dc25fec5e8e5e7d42f91ec7 | /scons_gbd_docs/Gbd/Docs/Mkdocs/MkdocsBuild.py | a54edcf9ea65abd0a9e048337b5f47f23b444f26 | [
"MIT"
] | permissive | ASoftTech/Scons.Gbd.Docs | 1d8a32aed7a4b43186ea661baee6fef1832eb266 | 4d9fb7585d9565f57306774efb4342fe9b8822f2 | refs/heads/master | 2020-03-08T12:58:35.290077 | 2018-05-28T20:48:23 | 2018-05-28T20:48:23 | 128,145,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | """
This tool will generate the documentation output as html
using markdown files as an input via mkdocs to an output directory
"""
from __future__ import (division, print_function,
absolute_import, unicode_literals)
import SCons.Script
from SCons.Environment import Environment
import os
import sys
import os.path as path
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common import MkdocsCommon
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common.MkdocsConfig import MkdocsConfig
from SCons.Script import Builder
def exists(env):
"""Check if we're okay to load this builder"""
return MkdocsCommon.detect(env)
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
if 'Mkdocs_Config' not in env:
env['Mkdocs_Config'] = MkdocsConfig(env)
env['Mkdocs_Config'].set_defaults()
scanner = env.Scanner(
MkdocsCommon.scanner,
name='MkdocsScanner'
)
bld = Builder(
action=__Build_func,
emitter=MkdocsCommon.emitter,
source_scanner=scanner,
)
env.Append(BUILDERS={'MkdocsBuild': bld})
def __Build_func(target, source, env):
"""Actual builder that does the work after the SConstruct file is parsed"""
cfg = env['Mkdocs_Config']
assert isinstance(cfg, MkdocsConfig)
cmdopts = [cfg.Exe, 'build']
cmdopts.append('--config-file=' + str(source[0]))
if cfg.CleanBuild:
cmdopts.append('--clean')
elif not cfg.CleanBuild:
cmdopts.append('--dirty')
if cfg.Strict:
cmdopts.append('--strict')
if cfg.Theme:
cmdopts.append('--theme=$Mkdocs_Theme')
if cfg.CustomDir:
cmdopts.append('--theme-dir=$Mkdocs_CustomDir')
if env['Mkdocs_SiteDir'] is not None:
cmdopts.append('--site-dir=$Mkdocs_SiteDir')
if cfg.Quiet:
cmdopts.append('--quiet')
if cfg.Verbose:
cmdopts.append('--verbose')
cmdopts = cmdopts + cfg.ExtraArgs
print('Building MkDocs Documentation:')
env.Execute(env.Action([cmdopts], chdir=cfg.WorkingDir))
| [
"garlicbready@googlemail.com"
] | garlicbready@googlemail.com |
aafbc6488301d7e48ce363affc42a6a4fdd24a02 | 5fa4b8a36eec770bd740b6016030d2843cac8329 | /trial_scripts/do_multiprocessing.py | e3269fc1eac7ab4e43440377e0b0e23ed103b1c8 | [] | no_license | sysang/word-prepresentation-training | 79ffe4355b2f66dfd7c09625cc430dd65815c937 | 79565d8f69c31f4938f079517db7ff7c53ec54aa | refs/heads/master | 2022-12-22T10:22:52.649259 | 2020-10-03T17:04:08 | 2020-10-03T17:04:08 | 293,590,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from multiprocessing import Process
from multiprocessing.sharedctypes import RawValue
import ctypes
def f(n):
n.value = 'hello!!'
if __name__ == '__main__':
num = RawValue(ctypes.c_wchar_p, 'abc')
p = Process(target=f, args=(num,))
p.start()
p.join()
print(num.value)
| [
"daosysang@gmail.com"
] | daosysang@gmail.com |
20dcb6e05c6420b481455112a093bca40a513956 | a219c9b0f3ccd1b35c3bb7bb3c7b50e1d9d8ef93 | /arasınav_tbb_s4.py | ce88476ccc8238735b3aadf7d040888c661fa98e | [] | no_license | f0xmulder/python_ornekleri | 3293541b5d4e594dc39e6df623e47ecd4e5e94c2 | d1ebbcefdd7390a4e20a61864b150097f9919e29 | refs/heads/master | 2022-11-04T07:12:20.766931 | 2017-06-22T13:30:45 | 2017-06-22T13:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # Soru 4
element = ""
tur = -1
cikti = ""
def turOgren(deger):#okunan karakterlerin büyük,küçük veya sayı olup olmadığını bu fonksiyon saysinde anlıyoruz.
if ord(deger) >= 65 and ord(deger) < 91:#karakterin ascii kodu bu değer aralığındaysa büyük harf
return 2
elif ord(deger) >= 97 and ord(deger) < 123:#karakterin ascii kodu bu değer aralığındaysa küçük harf
return 1
elif ord(deger) >= 49 and ord(deger) < 58:#karakterin ascii kodu bu değer aralığındaysa sayı
return 0
def elementAyristir(element):#bileşikten ayırdığımız her elementi bu fonksiyonda ayrıştırıyoruz.
transElement = ""
adet = ""
for j in element:
tur = turOgren(j)
if tur == 2 or tur == 1:
transElement = transElement + j
elif tur == 0:
adet = adet + j
if adet == "":#eğer elementten 1 tane varsa bunu if şartı ile kontrol ediyoruz.
adet = "1"
print transElement,"elementinden",adet,"tane var"
while (True):
giris=raw_input("element giriniz: ")
for i in giris:
tur = turOgren(i)
if tur == 2:#buyuk harf
if element == "":
element = i
else:
elementAyristir(element)
element = i
elif tur == 1 :#kucuk harf
element = element + i
elif tur == 0:#sayi
element = element + i
elementAyristir(element)
element = ""
tur = -1
| [
"noreply@github.com"
] | f0xmulder.noreply@github.com |
ea6bb392af9c9e6b8d6c5ecb56a68b0cb11577a6 | 7040d642877f70360ca88a065ccf92b3c63dfd7b | /剑指 Offer 18. 删除链表的节点.py | f351503d1cc241f162b76a62e9ddfe892195285b | [
"BSD-2-Clause"
] | permissive | YuLili-git/leetcode_offer | 077fb1864f1c8e3258f5b9f065b7c0e71c8ccf8f | 268940aa4e57a02fe635b7d6f6038f2b204ca968 | refs/heads/main | 2023-08-24T19:07:37.650616 | 2021-10-13T16:07:28 | 2021-10-13T16:07:28 | 370,324,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #给定单向链表的头指针和一个要删除的节点的值,定义一个函数删除该节点。
#返回删除后的链表的头节点。
#注意:此题对比原题有改动
#示例 1:
#输入: head = [4,5,1,9], val = 5
#输出: [4,1,9]
#解释: 给定你链表中值为 5 的第二个节点,那么在调用了你的函数之后,该链表应变为 4 -> 1 -> 9.
#示例 2:
#输入: head = [4,5,1,9], val = 1
#输出: [4,5,9]
#解释: 给定你链表中值为 1 的第三个节点,那么在调用了你的函数之后,该链表应变为 4 -> 5 -> 9.
#说明:
#题目保证链表中节点的值互不相同
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, head: ListNode, val: int) -> ListNode:
if head.val == val:
return head.next
cur,pre = head, head.next
while pre and pre.val != val:
cur = pre
pre = pre.next
if pre:
cur.next = pre.next
return head
| [
"noreply@github.com"
] | YuLili-git.noreply@github.com |
37307f0abd5565002723b66dd7bdb750cebcbf2a | 69a4e83cad7b3d5e5f35761e7223002a6940d061 | /2/2.py | 98627f4f26b66f99efa3bfbffdaddc29b90b2d8d | [] | no_license | c0mr4d3/adventofcode2020 | 408d01863b1b94872c77ab1b75e210c7b975574c | 6e506d4b170e045643ffdbd095b4a209721670ec | refs/heads/main | 2023-01-21T15:25:22.486170 | 2020-12-04T07:38:13 | 2020-12-04T07:38:13 | 317,858,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | arr = [x[:-1] for x in open("/home/comrade/Funstuff/adventofcode2020/2/input.txt").readlines()]
count = 0
for s in arr:
maxm = int(s[s.index("-")+1:s.index(" ")])
minm = int(s[:s.index("-")])
chrr = s[s.index(" ")+1]
pas = s[s.index(": ")+2:]
if (pas[minm-1]==chrr) != (pas[maxm-1]==chrr):
count+=1
print(count)
| [
"siddharthsingh.17june@gmail.com"
] | siddharthsingh.17june@gmail.com |
ad6320700a9871fd710ca5dc3b06b8878292f571 | 45a5c06c89d84e689b528ebd05f982914dc9f0f2 | /rl_bolts/buffers.py | a53f82d1a6403bd000f4ecf561fe9bcbc8924a79 | [
"Apache-2.0"
] | permissive | jfpettit/rl_bolts | be0f2e56af3bab2effd5c0a0723b5eb13050fa2a | c3c3b3f91ee192048912fd48f2655b46526918a7 | refs/heads/master | 2022-11-30T15:53:32.316481 | 2020-08-14T05:45:47 | 2020-08-14T05:45:47 | 285,760,715 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,576 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_buffers.ipynb (unless otherwise specified).
__all__ = ['PGBuffer', 'ReplayBuffer']
# Cell
import numpy as np
from scipy.signal import lfilter
from typing import Optional, Any, Union
import torch
import gym
# Cell
class PGBuffer:
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
This class was written by Joshua Achaim at OpenAI. It was adapted to use PyTorch Tensors instead of NumPy arrays for the
observations and actions.
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
- gamma (float): reward discount factor.
- lam (float): Lambda parameter for GAE-Lambda advantage estimation
"""
def __init__(
self,
obs_dim: Union[tuple, int],
act_dim: Union[tuple, int],
size: int,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.95,
):
self.obs_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: torch.Tensor,
rew: Union[int, float, np.array],
val: Union[int, float, np.array],
logp: Union[float, np.array],
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observation to store.
- act (torch.Tensor): Current action.
- rew (int or float or np.array): Current reward from environment.
- val (int or float or np.array): Value estimate for the current state.
- logp (float or np.array): log probability of chosen action under current policy distribution.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val: Optional[Union[int, float, np.array]] = 0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
Args:
- last_val (int or float or np.array): Estimate of rewards-to-go. If trajectory ended, is 0.
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
Returns:
- obs_buf (torch.Tensor): Buffer of observations collected.
- act_buf (torch.Tensor): Buffer of actions taken.
- adv_buf (torch.Tensor): Advantage calculations.
- ret_buf (torch.Tensor): Buffer of earned returns.
- logp_buf (torch.Tensor): Buffer of log probabilities of selected actions.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the line implement the advantage normalization trick
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + 1e-8)
return [
self.obs_buf,
self.act_buf,
torch.as_tensor(self.adv_buf, dtype=torch.float32),
torch.as_tensor(self.ret_buf, dtype=torch.float32),
torch.as_tensor(self.logp_buf, dtype=torch.float32)
]
def _combined_shape(
self, length: Union[int, np.array], shape: Optional[Union[int, tuple]] = None
):
"""
Return tuple of combined shapes from input length and tuple describing shape.
Args:
- length (int or np.array): Length of resultant shape.
- shape (int or tuple): Other shape dimensions to combine.
Returns:
- tuple of shape dimensions
"""
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x: np.array, discount: float):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
# Cell
class ReplayBuffer(PGBuffer):
"""
A replay buffer for off-policy RL agents.
This class is borrowed from OpenAI's SpinningUp package: https://spinningup.openai.com/en/latest/
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
"""
def __init__(
self, obs_dim: Union[tuple, int], act_dim: Union[tuple, int], size: int
):
self.obs1_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.obs2_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: Union[float, int, torch.Tensor],
rew: Union[float, int],
next_obs: torch.Tensor,
done: bool,
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observations.
- act (float or int or torch.Tensor): Current action.
- rew (float or int): Current reward
- next_obs (torch.Tensor): Observations from next environment step.
- done (bool): Whether the episode has reached a terminal state.
"""
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size: Optional[int] = 32):
"""
Sample a batch of agent-environment interaction from the buffer.
Args:
- batch_size (int): Number of interactions to sample for the batch.
Returns:
- tuple of batch tensors
"""
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(
obs=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs],
)
return tuple(torch.as_tensor(v, dtype=torch.float32) for _, v in batch.items())
def get(self):
"""
Get all contents of the batch.
Returns:
- list of PyTorch Tensors; full contents of the buffer.
"""
return [
torch.as_tensor(self.obs1_buf, dtype=torch.float32),
torch.as_tensor(self.obs2_buf, dtype=torch.float32),
torch.as_tensor(self.act_buf, dtype=torch.float32),
torch.as_tensor(self.rew_buf, dtype=torch.float32),
torch.as_tensor(self.done_buf, dtype=torch.float32)
] | [
"jfpettit@gmail.com"
] | jfpettit@gmail.com |
39042a14dedf3d1a3d6e06d5f15a0915493b8514 | 66a967fac0bc5dfdfe28ad0fd5464ed9113429bd | /HobbyCoding/src/ListPermutation.py | 6e6d6a9ef2a7a7f0a2211dc22bed93437611220c | [
"Apache-2.0"
] | permissive | inspectorG4dget/Hobby-Coding | a37430320e7a74805bc7740933e217d004fa9714 | 41e82dbcc73e328b43bebd037b2df414f0837ca6 | refs/heads/master | 2020-12-24T17:17:37.589058 | 2012-07-10T05:18:56 | 2012-07-10T05:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | '''
Created on Oct 4, 2010
@author: ashwin
Licensed to Ashwin Panchapakesan under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
Ashwin licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def permute(L):
if L == []:
return []
else:
for i in range(len(L)-1):
a = [L[i]]
b = L[:i]
c = L[i+1 :]
print "B:", b
print "C:", c
d = b + c
return a + permute(d)
def includeMembers(L):
if not L:
return L
else:
for i in L[0]:
includeMembers(L[1:])[-1] += i
if __name__ == "__main__":
print includeMembers(['asdf', 'jkl;']) | [
"topgunzurhero@gmail.com"
] | topgunzurhero@gmail.com |
ce978aea403ff050f84bd8c5e869fff0a69f22c8 | fc22d8e8178aa4a47d360f1c83990ee8be1fc20e | /tools/md5_function.py | d2ce3e93b1ac9467b50883af0188b3663e7af8bb | [] | no_license | moujiangliu/interface | a13b5ebe86439f2bae55cbecd02ab5e65a77288b | b6e968271cb9bd1287a9b4950a6ccb69a7720036 | refs/heads/master | 2023-02-03T08:56:43.205534 | 2020-12-25T17:05:02 | 2020-12-25T17:05:02 | 323,383,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # -*- coding:utf-8 -*-
import base64
import hashlib
class Secret(object):
'''
实现各种加密方式
'''
def __init__(self, string):
self._string = string.encode('utf-8')
def md5(self):
'''
md5加密方法
:return:
'''
try:
sign = hashlib.md5(self._string).hexdigest()
return sign
except:
return False
def sha1(self):
'''
实现sha1的加密方法
:return:
'''
try:
sign = hashlib.sha1(self._string).hexdigest()
return sign
except:
return False
def base64encode(self):
'''
实现一个base64 encode的方法封装
'''
try:
sign = base64.b64encode(self._string).decode('utf-8')
return sign
except:
return False
def base64decode(self):
'''
base64 decode的方法封装 (解码)
:return:
'''
try:
sign = base64.b64decode(self._string).decode('utf-8')
return sign
except:
return False
| [
"moujiang.liu@aliyun.com"
] | moujiang.liu@aliyun.com |
90f284e04501a00ff62afab5f4d11a2ad546a865 | 54dbbf0b3dd9ace6e3b51cb2632ae1d9302ea529 | /编程小白的第一本 Python 入门书/类.py | d34cba3071a14e5a5166c402a9777084329ebe7a | [] | no_license | zzxmona/pythontrain | c42f0bb89f31fea3149b21db38f74f03f3872946 | afcfa9ba533b52adef86d51e98cc96abb3a627d5 | refs/heads/master | 2023-04-30T20:28:44.239500 | 2021-05-31T01:27:49 | 2021-05-31T01:27:49 | 364,789,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | # 类的实例化最好加()以免报错
class z:
con = [1, 2, 3, 4]
name = 'zzx'
__name = 'zzx'
abc = z()
print(abc.con)
print(z.name)
class two:
def __init__(self, final):
self.x = final
def name2(self):
print('zzx', '22')
def name3(self):
return 'zzx'
def name4(self, name5):
print(name5)
x = two('xxx')
print(x.x)
x.name2()
x.name4('zzx name5')
class three():
def __init__(self):
self.name = 'zzx'
def age(self):
return '22'
test3 = three()
print(test3.name)
print(test3.age())
class CocaCola:
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self):
for element in self.formula:
print('Coke has {}!'.format(element))
def drink(self):
print('Energy!')
coke = CocaCola()
class CocaCola2():
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self, logo_name):
self.local_logo = logo_name
def drink(self):
print('Energy!')
coke2 = CocaCola2('可口可乐')
print(coke2.local_logo)
print(coke2.formula)
class five():
name = 'zzx'
age = '22'
sex = '男'
def __init__(self, id):
self.id = id
def lie(self):
print('{} {} {} {}'.format(self.id, self.name, self.age, self.sex))
f = five(201732110226)
f.lie()
class jcfive(five):
test = 'test'
def five2(self):
print(self.test)
jcfive1 = jcfive('zjnu')
jcfive1.lie()
jcfive1.five2()
class te1():
def tes1(self):
return 'tes1'
class te2(te1):
def tes2(self):
print('tes2')
t2 = te2()
print(t2.tes1())
class TestA:
attr = 1
def __init__(self):
self.name = 'zzx'
self.attr = 33
def rename(self):
name2 = 'zzx'
return name2
obj_a = TestA()
print(obj_a.attr)
obj_a.attr = 42
obj_a.name = 'zx'
print(obj_a.attr, obj_a.name)
print(obj_a.rename())
| [
"2577625924@qq.com"
] | 2577625924@qq.com |
fb2e193a24ae586d0c3d286e0fec5f4ca52eaf14 | 674f1ecdd8a196b5a271b556ed7e4d274fde63a1 | /article/migrations/0002_auto_20161129_2304.py | 65e17784a9ab696ab9749961108d38c587c88ee8 | [] | no_license | baby5/Django-Blog | fc57c06bac110c56662bcea20eb9c18579d20827 | 1e2f1a8b0589d87dea023d7e6d78376d0880ca27 | refs/heads/master | 2021-01-13T13:19:13.836449 | 2016-12-20T10:49:23 | 2016-12-20T10:49:23 | 72,647,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 15:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('created_time', models.DateTimeField(auto_now_add=True)),
('last_modified_time', models.DateTimeField(auto_now=True)),
],
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-last_modified_time']},
),
migrations.RenameField(
model_name='article',
old_name='date_time',
new_name='created_time',
),
migrations.AddField(
model_name='article',
name='abstract',
field=models.CharField(blank=True, help_text=b'arbitrary', max_length=54, null=True),
),
migrations.AddField(
model_name='article',
name='last_modified_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='article',
name='likes',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='article',
name='status',
field=models.CharField(choices=[(b'd', b'Draft'), (b'p', b'Published')], default=b'd', max_length=1),
),
migrations.AddField(
model_name='article',
name='topped',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='article',
name='views',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='article.Category'),
),
]
| [
"zxnzysj@163.com"
] | zxnzysj@163.com |
04ae589706bee6d73d70525a05dd97e1c16387fc | bf45d6fe3d0c6ee6e74c0c63c4206eee72361383 | /sketchit/draw.py | 58ebbb85db158fb5ff66bb82afb2a06c4ddb2b3d | [
"MIT"
] | permissive | tambibhavika2000/sketchme | 00d6273b5b4523dc8a1e5f3d22fd58790af80896 | 00c7ccff4531d48fb5ef2c403c4bb0e0b1c749bd | refs/heads/main | 2023-07-13T06:32:13.071137 | 2021-09-01T12:58:01 | 2021-09-01T12:58:01 | 402,060,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import cv2
def sketchit(path):
image=cv2.imread(path)
grey_img=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
invert=cv2.bitwise_not(grey_img)
blur=cv2.GaussianBlur(invert,(21,21),0)
invertedblur=cv2.bitwise_not(blur)
sketch=cv2.divide(grey_img , invertedblur,scale=256.0)
cv2.imwrite('sketch.png',sketch)
path=input("Enter Path of Image: ")
sketchit(path)
| [
"noreply@github.com"
] | tambibhavika2000.noreply@github.com |
ef168493665590dfa9c2c362d6e87e14550a7162 | 1e1ab6aba8ab3d05fe61df3b6a5fabbcdd00676a | /e_commerce_app/api/migrations/0002_remove_event_redundancy.py | 86c973a2de49034f1f646a2664d9eaf5bda0ec1e | [] | no_license | Batuhanipekci/E-Commerce | 4f548f3e59cfa68c422f91419a53dadf175dcad3 | 45350d74e344686f619c1f9c50dac08e8c6eebe2 | refs/heads/master | 2023-06-02T01:05:44.647508 | 2021-06-22T20:19:13 | 2021-06-22T20:19:13 | 378,535,014 | 1 | 0 | null | 2021-06-22T20:19:14 | 2021-06-20T01:26:24 | Python | UTF-8 | Python | false | false | 533 | py | # Generated by Django 3.0.7 on 2021-06-20 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='krdetailsview',
name='event',
),
migrations.RemoveField(
model_name='krtransaction',
name='event',
),
migrations.RemoveField(
model_name='krcounter',
name='event',
),
]
| [
"batuhanipekci@hotmail.com"
] | batuhanipekci@hotmail.com |
246ec729ab0710529af7fd9594413b7242ed91fb | aba0b5002c040fa1b20bae5d7ac81c601395901f | /vistrails/packages/pandas/identifiers.py | 63685ce6e7907a39552e23085e227ce9fd8bac89 | [
"BSD-3-Clause"
] | permissive | skylogic004/VisTrails | 2673ca04160e776db17811d98b070f70e1d2e385 | bc0d95ceac6e75d6ffb083e8cdab8c62a90d4b00 | refs/heads/master | 2021-06-23T01:16:16.697903 | 2017-08-24T21:28:33 | 2017-08-24T21:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from __future__ import division, print_function
identifier = 'org.vistrails.vistrails.pandas'
name = 'pandas'
version = '0.0.1' | [
"matt@skylogic.ca"
] | matt@skylogic.ca |
aa1a467cc3e72429fddfc6663939baa04bc9e374 | bc073560803464da166d661e916d21ad51b2c80e | /files/scripts/contact_detector.py | 5ac2e00abc742896c576349cf11dd4b994ec5bc7 | [] | no_license | SDU-Embedded/event_processors | 680edb4a8107a2661407f43be933795ef0a1e987 | bdea5bbcab7d39f7b1746d1f391c494ffa0fd39d | refs/heads/master | 2021-07-26T21:41:26.831474 | 2020-05-04T07:03:53 | 2020-05-04T07:03:53 | 165,830,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from event_processors import EventProcessor
from event_listeners import PerchEventListener
from state_monitors import LinearStateMonitor
from metric_processors import ProbabilityProcessor
from thresholders import Thresholder
from event_builders import EventBuilder
from event_emitters import EventEmitter
if __name__ == "__main__":
cage1_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=1 )
#cage2_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=2 )
# Setup event listeners
#cage1_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sensor', bird=1 )
#cage2_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sendor', bird=2, debug=True )
# Setup state monitors
cage1_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
#cage2_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
cage1_event_listener.stateTransitionCallback = cage1_state_monitor.setState
#cage2_event_listener.stateTransitionCallback = cage2_state_monitor.setState
# Setup metric processor
metric_processor = ProbabilityProcessor( period=0.1 )
metric_processor.getters.append( cage1_state_monitor.getProbability )
#metric_processor.getters.append( cage2_state_monitor.getProbability )
# Setup thresholders
thresholder = Thresholder( upwards_threshold=0.45, downwards_threshold=0.15 )
metric_processor.setters.append( thresholder.evaluate )
# Setup event builders
builder = EventBuilder( bird="1", type="ats_contact" )
thresholder.emitEvent = builder.evaluate
# Setup event emitters
emitter = EventEmitter( 'manna,hou,bisnap','ats_contact')
builder.send = emitter.send
# Setup and run event processor
event_processor = EventProcessor()
event_processor.tasks.append(cage1_event_listener)
event_processor.tasks.append(cage2_event_listener)
event_processor.tasks.append(cage1_state_monitor)
event_processor.tasks.append(cage2_state_monitor)
event_processor.tasks.append(metric_processor)
event_processor.run()
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'power', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'entropy', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( ProbabilityProcessor( servers=servers, topic='bout', upwards_threshold=0.85, downwards_threshold=0.5, period=0.01, bird="1", type="bout" ) )
#event_processor.tasks[-1].getters.append( event_processor.tasks[0].getProbability )
#event_processor.tasks[-1].getters.append( event_processor.tasks[2].getProbability )
#event_processor.run()
| [
"lelar09@student.sdu.dk"
] | lelar09@student.sdu.dk |
eba5e24cb7ae539f05831d88b27d99b2346a8f0a | ec9129d3eb1880df9f0b54c76510352a7e004b0c | /tools/make_vps_tarball.py | b03537feaa59ec1a6a93c522cfd621963bf12eba | [] | no_license | eugen-don/vps | 4057e6ddb1db274dbd8d78fa926376cfc3a40aa7 | 6a16569868241b35d8137b7f2b2f8db0cf67ff55 | refs/heads/master | 2021-01-11T16:29:53.109075 | 2014-05-14T09:20:33 | 2014-05-14T09:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
import sys
import os
import _env
import ops.os_init as os_init
import conf
assert conf.OS_IMAGE_DIR and os.path.isdir(conf.OS_IMAGE_DIR)
def usage():
print """usage: \n%s [image_path/partion_path] [tarball_dir]
""" % (sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
os._exit(0)
img_path = sys.argv[1]
tarball_dir = sys.argv[2]
if not os.path.exists(img_path):
print "%s not exists" % (img_path)
os._exit(1)
if not os.path.isdir(tarball_dir):
print '%s is not a directory' % (tarball_dir)
os._exit(1)
tarball_path = os_init.pack_vps_fs_tarball(img_path, tarball_dir)
print "%s packed in %s" % (img_path, tarball_path)
if "__main__" == __name__:
main()
| [
"frostyplanet@gmail.com"
] | frostyplanet@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.