blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44c9b9b86cd6eaf6d79ee96831b7c9820294d832 | a94d55cbc23c43febe97cc0e72cdc296f4579656 | /DATA/scripts/areas/updatetable_areas.py | acf8554e8e888cc2f553c8d99efb95eb205a43b4 | [] | no_license | SergioCardenasS/Control-de-Contratos | 383a80889aab797d3881941b88f24ad32a5d3ed7 | 339699cc93b77193372ad8ec09ca25ed938b0c49 | refs/heads/master | 2020-05-21T14:07:54.178386 | 2019-04-26T17:25:10 | 2019-04-26T17:25:10 | 54,640,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Import de Librerias
import sys
from PyQt4 import QtGui
#Import de Modulos
BASE_DIR='../..'
sys.path.insert(0,BASE_DIR)
from constants import *
from models import area
db=get_connection()
cursor=db.cursor()
NEW_AREA_CONTROL_PASS = ""
NEW_AREA_COMERCIAL_PASS = ""
AREA_NEW_ABASTECIMIENTOS_PASS = ""
NEW_AREA_DESARROLLO_PASS = ""
NEW_AREA_INGENIERIA_PASS = ""
NEW_AREA_PLANIFICACION_PASS = ""
new_area=area.Area([AREA_CONTROL_ID,AREA_CONTROL_NAME,NEW_AREA_CONTROL_PASS])
new_area.update(cursor)
new_area=area.Area([AREA_COMERCIAL_ID,AREA_COMERCIAL_NAME,NEW_AREA_COMERCIAL_PASS])
new_area.update(cursor)
new_area=area.Area([AREA_ABASTECIMIENTOS_ID,AREA_ABASTECIMIENTOS_NAME,AREA_NEW_ABASTECIMIENTOS_PASS])
new_area.update(cursor)
new_area=area.Area([AREA_DESARROLLO_ID,AREA_DESARROLLO_NAME,NEW_AREA_DESARROLLO_PASS])
new_area.update(cursor)
new_area=area.Area([AREA_INGENIERIA_ID,AREA_INGENIERIA_NAME,NEW_AREA_INGENIERIA_PASS])
new_area.update(cursor)
new_area=area.Area([AREA_PLANIFICACION_ID,AREA_PLANIFICACION_NAME,NEW_AREA_PLANIFICACION_PASS])
new_area.update(cursor)
db.commit()
db.close()
| [
"christian.benavides@ucsp.edu.pe"
] | christian.benavides@ucsp.edu.pe |
74db189a08942b3b8203106539c834233266f656 | fffecbc273ebe498437f743d4295dd963471b20e | /apps/travelApp/apps.py | 5459849f73efd21df4ff16b7016b7ab542bed208 | [] | no_license | currbear91/TravelApp | a1efe57d0243fc3e288cd71576ea8c67a36b5080 | c92d950a5e51b5a7dc48f641737f86bd96f35011 | refs/heads/master | 2021-01-20T20:27:55.794098 | 2016-07-29T22:26:30 | 2016-07-29T22:26:30 | 64,174,837 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class TravelappConfig(AppConfig):
name = 'travelApp'
| [
"Currbear@Currans-MBP.hsd1.wa.comcast.net"
] | Currbear@Currans-MBP.hsd1.wa.comcast.net |
3d64de308e279de8404995dae364cfe5ff14d58b | 9e36ca3d308a21a485bcc3bf752ee4a82ecc6e7d | /Lista1.py | 62303743c622ec9404940ebda3b704f5b6d08b96 | [] | no_license | Jelowis/DEBER15 | e507c89d52d449af4f8592d407fba163f699e691 | 140cfec44011980e882d518d0e4e561fb7908822 | refs/heads/main | 2023-08-19T04:01:20.508645 | 2021-09-13T03:55:54 | 2021-09-13T03:55:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # Leonardo Altamirano Retto
# 3 Semestre Software
class Lista:
def __init__(self,tamanio=4):
self.lista = []
self.longuitud = 0
self.size = tamanio
def insertar(self,valor):
i=0
enc = False
while i < len(self.lista) and enc:
if self.lista[i]==valor:
enc=True
i=i=+1
if enc:
self.lista= self.lista[:1]+[valor]+self.lista[i:]
self.longuitud+=1
return enc
def append(self,dato):
if self.longuitud < self.size:
self.lista += [dato]
self.longuitud += 1
return True
else:
return False
lista1 = Lista()
lista1.append(2)
lista1.append(5)
lista1.append(20)
print(lista1.insertar(5)) | [
"noreply@github.com"
] | Jelowis.noreply@github.com |
26726fa6fa874f79a109c4fc897e9f4671bd5ca7 | 439386f9097632d44d31d1f599df76ec2820d072 | /性能项目/统一大厅常规checklist/1601/DFQP/src/uilib/exchange_page.py | 100b9e01ac952280dbe97665969e45d263c46165 | [] | no_license | YiFeng0755/testcase | 33693f0940a6497aa40e2e51a0535c9eb6c12b29 | edc19480c3e94cbcbf004aa9d20099ec6d1b9304 | refs/heads/master | 2020-04-28T04:34:28.232022 | 2019-03-11T11:13:25 | 2019-03-11T11:13:25 | 146,287,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author: MindyZhang
'''
兑换奖品场景
'''
from appiumcenter.element import Element
class Exchange_Page(Element):
pass | [
"YoungLiu@boyaa.com"
] | YoungLiu@boyaa.com |
e62cbb42232b3721884213d58342e6477fa8faa9 | 3a1dec7cb8d6da7779580f3ada03d1f7d035656e | /pubsub/monitor/monitor.py | 7d956933bfc6cb0d95782fd7ff4bf1545c997d21 | [] | no_license | tekulvw/modularsecurity | 37b4d91c1696a3930cf1c00765243f51d1bd5d58 | b812d1dd3dc45f78ebcc141b6823d5ee5b2330bd | refs/heads/master | 2020-07-22T06:22:54.249803 | 2017-07-24T00:59:32 | 2017-07-24T00:59:32 | 94,344,902 | 0 | 0 | null | 2017-07-25T17:13:17 | 2017-06-14T15:17:14 | JavaScript | UTF-8 | Python | false | false | 1,781 | py | import json
import base64
from typing import List
from urllib.parse import urlparse
from flask import current_app, request
from storage.getter import get_data
from datastore import devicedatatype
from twilio_util import notify_number
from datastore.device import maybe_update_is_connected
def data_event_handler():
data = request.get_json()
# This data is going to contain the json
# representation of a DeviceData entry.
message = data.get('message')
device_data_json = json.loads(base64.b64decode(message.get('data')).decode('utf-8'))
device_id = int(device_data_json.get("device_id"))
maybe_update_is_connected(device_id)
# Get data at location
# Get previous data location
# Get previous data at previous location
# If previous data == closed and data == open, alarm
# This data is an instance of DeviceData
data_type_entity = devicedatatype.from_device_id(device_id)
if data_type_entity is None or \
data_type_entity['type_name'] == "door":
handle_door(device_data_json)
return '', 204
def handle_door(data: dict):
system_id = data['system_id']
curr_location = data['location']
parsed_loc = urlparse(curr_location)
phones = data['phones']
prev_frames = data['previous']
prev_locations = [f['location']
for f in prev_frames]
if len(prev_locations) == 0:
# Can't compare to anything so get out
return
curr_data = get_data(parsed_loc.path)
data_str = curr_data.decode('utf-8')
curr_json = json.loads(data_str)
if curr_json.get('open') is True:
raise_alarm(phones)
def raise_alarm(numbers: List[str]):
for num in numbers:
notify_number(num)
# TODO: make use of grace period etc
| [
"tekulve.will@gmail.com"
] | tekulve.will@gmail.com |
fc08ad8ba2843ed878a2b2c386ad9a4e9801aef1 | 97fff6d80c7c3689ef1859d4928a0575a827d7f6 | /Python/5-DIP/DIP-Solucao/interfaces/IClienteRepository.py | cfbc3235c30f9d802738234d7820381cac669462 | [] | no_license | thiagomsilvestre/SOLID | db61fcc63e1ad135a451a8f372cbc442ee0ff71d | 8ac9448851d2d792ace3f986346272a6ff889b94 | refs/heads/master | 2021-07-02T21:36:29.304848 | 2020-09-21T21:12:00 | 2020-09-21T21:12:00 | 171,590,025 | 3 | 3 | null | 2020-09-21T21:12:02 | 2019-02-20T02:56:36 | null | UTF-8 | Python | false | false | 501 | py | import abc
import sys
from os import path
sys.path.append(path.join(path.dirname(__file__), '..'))
from ..Cliente import Cliente
class IClienteRepository(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'adicionarCliente') and
callable(subclass.adicionarCliente) or
NotImplemented)
@abc.abstractmethod
def adicionarCliente(self, cliente: Cliente):
raise NotImplementedError
| [
"mateusfiori@Mateuss-MacBook-Pro.local"
] | mateusfiori@Mateuss-MacBook-Pro.local |
2860ff4b77c74263c721195210a5b83c94a501b9 | f3917b41c8549175dd72490a409bfaaadedc397d | /TelegramSection/Tg_Bot_Client.py | 2eb1a828e9b15f90f45e428db2a7b1c4467d709e | [] | no_license | 320Jackson/Telegram_LINE_Adapter | fedf5dcf16d355ccb9b8b43db3f2b351d5e307e5 | d7638eed27528337d113539ae11b9ce4fa8acc5f | refs/heads/master | 2023-01-28T01:18:02.252298 | 2020-12-14T13:24:18 | 2020-12-14T13:24:18 | 320,486,086 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | import time
import Global_Element
from linebot.models import TextSendMessage
from FileControl import FileControl
from datetime import datetime
class Telegram_MessageHandler:
#Telegram接收器
@staticmethod
def Telegram_MessageReceive(msg):
try:
ContentType = msg['entities'][0]['type']
#指令處理
if(ContentType == 'bot_command'):
Output = Telegram_MessageHandler.Telegram_CommandHandler(msg)
#訊息處理
else:
Telegram_MessageHandler.Transfer_to_LINE(msg)
except:
Telegram_MessageHandler.Transfer_to_LINE(msg)
@staticmethod
def Telegram_CommandHandler(msg):
Command = msg['text'].split(' ')
TargetID = msg['chat']['id']
#LINE群選擇器
if(Command[0] == "/start" or Command[0] == "/start@LINE_Adapter_Bot"):
#取得目標群組名稱
TargetName = ""
for Run in range(1, len(Command)):
TargetName += " " + Command[Run]
TargetName = TargetName.strip()
#更新目的地清單、索引
Global_Element.TelegramTable[TargetName] = TargetID
Global_Element.TelegramIndex[str(TargetID)] = TargetName
FileControl.Save_Table("Telegram")
Telegram_MessageHandler.Telegram_MessagePoster(TargetID, f"現正接收 {TargetName} 訊息")
#停止接收LINE群訊息
elif(Command[0] == "/exit" or Command[0] == "/exit@LINE_Adapter_Bot"):
TargetID = str(msg['chat']['id'])
#移除目的地清單、索引
Key = Global_Element.TelegramIndex[TargetID]
if(Key != ""):
Global_Element.TelegramTable[Key] = ""
Global_Element.TelegramIndex[TargetID] = ""
FileControl.Save_Table("Telegram")
Telegram_MessageHandler.Telegram_MessagePoster(TargetID, "已停止接收")
else:
Telegram_MessageHandler.Telegram_MessagePoster(TargetID, "目前未與LINE群組連結")
#查看目前已啟用的LINE群
elif(Command[0] == "/list" or Command[0] == "/list@LINE_Adapter_Bot"):
Telegram_MessageHandler.Telegram_MessagePoster(TargetID, Telegram_MessageHandler.getLINE_List())
@staticmethod
def getLINE_List():
#取得目前可供連結的LINE群組
Output = ""
for Run in Global_Element.LINETable.keys():
Output += Run + "\n"
if(Output == ""):
Output = "清單內無內容"
return Output
@staticmethod
def Telegram_MessagePoster(targetID, msgText):
Global_Element.Tg_Bot.sendMessage(targetID, msgText)
@staticmethod
def Transfer_to_LINE(msg):
TimeNow = f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]"
#Telegram轉送訊息到LINE
Key = str(Global_Element.TelegramIndex[str(msg['chat']['id'])])
if(Key != "" or Key != None):
Content = str(msg['text'])
Global_Element.Line_Bot.push_message(Global_Element.LINETable[Key], TextSendMessage(text = Content))
FileControl.Save_ChatHistory(Key, f"{TimeNow}Bot_{Content}\n\n", "Line") | [
"jackson8963@outlook.com"
] | jackson8963@outlook.com |
40f202b1dcfd707e76b11e0586df3eb6fb079f7f | a80e45b5a5b2a8ff95cb7df0165ee2b52aa1dae2 | /plot.py | 7586b17f2f14c758e2df11c83d7372e5ee71780b | [] | no_license | bctvu/103 | fb940808fa0e5b7d8651dae5a4838e8c12663556 | 4971a3e1294eb610f8a81207be99cd539b545448 | refs/heads/main | 2023-07-05T06:15:05.191941 | 2021-08-26T08:21:37 | 2021-08-26T08:21:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import pandas as pd
import plotly.express as px
df=pd.read_csv("line_chart.csv")
fig=px.line(df,x="Year",y="Per capita income",color="Country",title="Per capita income")
fig.show() | [
"noreply@github.com"
] | bctvu.noreply@github.com |
e77d7299c13314f6e3cff061509e833052882337 | 46a3607edf8ee6462ab995674a22e73a79d0c0e6 | /imageOp.py | a900c0c693c13bf65e1dd03da33af6448f12b677 | [] | no_license | abhinav2188/MCA-Linear-Algebra | b365fc2b9d8d66726abd537a0e88677931542e84 | 659f9fd5b1de0f73cfd14fa4cb7ec21b0364f34e | refs/heads/master | 2020-09-12T00:04:27.122319 | 2019-11-18T17:35:43 | 2019-11-18T17:35:43 | 222,235,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 01:49:44 2019
@author: abhi
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
#reading image in grayscale
img = cv2.imread('img1.jpg',0)
#to autosize the image window
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
#to wait for a key press to exit the image window
cv2.waitKey(0)
cv2.destroyAllWindows()
#to write the grayscale image into new file
cv2.imwrite('imgGray.jpg',img)
pixel = img[100,100]
img_negative = cv2.bitwise_not(img)
cv2.imshow('image',img_negative)
#to wait for a key press to exit the image window
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"39943422+abhinavchmod777@users.noreply.github.com"
] | 39943422+abhinavchmod777@users.noreply.github.com |
05f8bfab1be5b15e506641a43fb50fda749653eb | 6fd8d4d92e22365d9ad277379292b390aaf7d39b | /{{cookiecutter.app_name}}-nfusz-ms/main.py | 17d1bb89b5ba33774d23f4373990910c75019b67 | [
"MIT"
] | permissive | anthony-munoz/python-serverless-template-py34 | 996c3306ba0e02823d34e94f1a294f76589b3e50 | 810d5eac08b036bfcf9523f848117b1bbf198f6e | refs/heads/master | 2020-03-22T22:50:15.707295 | 2018-07-14T21:24:21 | 2018-07-14T21:24:21 | 140,773,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py |
from app.flask_factory import app
from app.router import configure_api
configure_api(app)
if __name__ == '__main__':
app.run(debug=True)
| [
"anthony.munoz@lantern.tech"
] | anthony.munoz@lantern.tech |
a88ddeb23230460854ea57da3cde2263d05896a9 | ee28d9f80fccfed6512398231c3781d8d5ef8772 | /Banner Grabber/CODE bannerGrabber EXE.py | 50ea0bb52a65fcbbe358b51043fd821732771350 | [] | no_license | AnshVaid4/Python | 660adcde1643c59fa4def75c33624e69881eab0e | b5b76f8ff2015085ba1f9d50d673a98523684ad4 | refs/heads/master | 2022-05-21T11:47:09.749497 | 2022-03-31T10:47:36 | 2022-03-31T10:47:36 | 390,971,555 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,647 | py | import sys
import json
import time
from socket import *
import requests
flag=1
def grabber(host):
try:
req=requests.get("https://"+host)
print("[+] "+str(req.headers))
ip=gethostbyname(host)
print("[+] IP address is "+ip)
details=requests.get("https://ipinfo.io/"+ip+"/json")
detailsjson=json.loads(details.text)
try:
print("[+] Hostname is "+detailsjson["hostname"])
print("[+] City is "+detailsjson["city"])
print("[+] Country is "+detailsjson["country"])
print("[+] Geo location is "+detailsjson["loc"])
print("[+] Organization is "+detailsjson["org"])
print("[+] Timezone is "+detailsjson["timezone"])
return 1
except:
print("[+] City is "+detailsjson["city"])
print("[+] Country is "+detailsjson["country"])
print("[+] Geo location is "+detailsjson["loc"])
print("[+] Organization is "+detailsjson["org"])
print("[+] Timezone is "+detailsjson["timezone"])
exit(0)
except:
req=requests.get("http://"+host)
print("[+] "+str(req.headers))
ip=gethostbyname(host)
print("[+] IP address is "+ip)
details=requests.get("https://ipinfo.io/"+ip+"/json")
detailsjson=json.loads(details.text)
try:
print("[+] Hostname is "+detailsjson["hostname"])
print("[+] City is "+detailsjson["city"])
print("[+] Country is "+detailsjson["country"])
print("[+] Geo location is "+detailsjson["loc"])
print("[+] Organization is "+detailsjson["org"])
print("[+] Timezone is "+detailsjson["timezone"])
return 1
except:
print("[+] City is "+detailsjson["city"])
print("[+] Country is "+detailsjson["country"])
print("[+] Geo location is "+detailsjson["loc"])
print("[+] Organization is "+detailsjson["org"])
print("[+] Timezone is "+detailsjson["timezone"])
exit(0)
def Main():
hname=input("Enter the host name: ")
if (hname== ""):
print("Hostname can't be empty")
time.sleep(10)
exit(0)
else:
print("[+] Scanning for "+hname)
if(grabber(hname)):
time.sleep(15)
exit(0)
if(flag==1):
print("[-] Sorry unable to fetch details")
time.sleep(15)
exit(0)
print("\n")
print(" ___________ ")
print(" // \ ")
print(" // \ ")
print(" // ")
print(" // ")
print(" || ")
print(" || --------|| ||==== // ||=== ||=== |===== ||==== ")
print(" \ || || \ //| || \ || \ || || \ ")
print(" \ // ||====/ //=| ||==/ ||==/ ||-- ||====/ ")
print(" \ // || \ // | || \ || \ || || \ ")
print(" \___________// || \ // | ||___/ ||===/ ||==== || \ ")
print(" ---------------------------------------------------------------------------")
print(" -------------------By: Ansh Vaid-------v1.1--------------------------------")
print("\n\n\n")
Main()
| [
"anshvaid4@gmail.com"
] | anshvaid4@gmail.com |
a5a75d90679c6ca3fd506ea8dfbafd949dc61360 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/lib/googlecloudsdk/core/updater/release_notes.py | 977fe1c29e08b001c9d41029efce76a4f5bf998e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 7,835 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utilities for comparing RELEASE_NOTES between Cloud SDK versions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.document_renderers import render_document
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.util import encoding
from six.moves import StringIO
class ReleaseNotes(object):
"""Represents a parsed RELEASE_NOTES file.
The file should have the general structure of:
# Google Cloud SDK - Release Notes
Copyright 2014-2015 Google Inc. All rights reserved.
## 0.9.78 (2015/09/16)
* Note
* Note 2
## 0.9.77 (2015/09/09)
* Note 3
"""
# This regex matches each version section in the release notes file.
# It uses lookaheads and lookbehinds to be able to ensure double newlines
# without consuming them (because they are needed as part of the match of the
# next version section. This translates to a line starting with '##' preceded
# by a blank line that has a version string and description. It then consumes
# all lines until it hits a newline that is not followed by a blank line and
# another line starting with '##"
_VERSION_SPLIT_REGEX = (
r'(?<=\n)\n## +(?P<version>\S+).*\n(?:\n.*(?!\n\n## ))+.')
MAX_DIFF = 15
@classmethod
def FromURL(cls, url, command_path=None):
"""Parses release notes from the given URL.
Any error in downloading or parsing release notes is logged and swallowed
and None is returned.
Args:
url: str, The URL to download and parse.
command_path: str, The command that is calling this for instrumenting
the user agent for the download.
Returns:
ReleaseNotes, the parsed release notes or None if an error occurred.
"""
try:
response = installers.ComponentInstaller.MakeRequest(url, command_path)
if not response:
return None
code = response.getcode()
if code and code != 200:
return None
text = response.read()
text = encoding.Decode(text)
return cls(text)
# pylint: disable=broad-except, We don't want any failure to download or
# parse the release notes to block an update. Returning None here will
# print a generic message of where the user can go to view the release
# notes online.
except Exception:
log.debug('Failed to download [{url}]'.format(url=url), exc_info=True)
return None
def __init__(self, text):
"""Parse the release notes from the given text.
Args:
text: str, The text of the release notes to parse.
Returns:
ReleaseNotes, the parsed release notes.
"""
self._text = text.replace('\r\n', '\n')
versions = []
for m in re.finditer(ReleaseNotes._VERSION_SPLIT_REGEX, self._text):
versions.append((m.group('version'), m.group().strip()))
# [(version string, full version text including header), ...]
self._versions = versions
def GetVersionText(self, version):
"""Gets the release notes text for the given version.
Args:
version: str, The version to get the release notes for.
Returns:
str, The release notes or None if the version does not exist.
"""
index = self._GetVersionIndex(version)
if index is None:
return None
return self._versions[index][1]
def _GetVersionIndex(self, version):
"""Gets the index of the given version in the list of parsed versions.
Args:
version: str, The version to get the index for.
Returns:
int, The index of the given version or None if not found.
"""
for i, (v, _) in enumerate(self._versions):
if v == version:
return i
return None
def Diff(self, start_version, end_version):
"""Creates a diff of the release notes between the two versions.
The release notes are returned in reversed order (most recent first).
Args:
start_version: str, The version at which to start the diff. This should
be the later of the two versions. The diff will start with this version
and go backwards in time until end_version is hit. If None, the diff
will start at the most recent entry.
end_version: str, The version at which to stop the diff. This should be
the version you are currently on. The diff is accumulated until this
version it hit. This version is not included in the diff. If None,
the diff will include through the end of all release notes.
Returns:
[(version, text)], The list of release notes in the diff from most recent
to least recent. Each item is a tuple of the version string and the
release notes text for that version. Returns None if either of the
versions are not present in the release notes.
"""
if start_version:
start_index = self._GetVersionIndex(start_version)
if start_index is None:
return None
else:
start_index = 0
if end_version:
end_index = self._GetVersionIndex(end_version)
if end_index is None:
return None
else:
end_index = len(self._versions)
return self._versions[start_index:end_index]
def PrintReleaseNotesDiff(release_notes_url, current_version, latest_version):
"""Prints the release notes diff based on your current version.
If any of the arguments are None, a generic message will be printed telling
the user to go to the web to view the release notes. If the release_notes_url
is also None, it will print the developers site page for the SDK.
Args:
release_notes_url: str, The URL to download the latest release notes from.
current_version: str, The current version of the SDK you have installed.
latest_version: str, The version you are about to update to.
"""
if release_notes_url and current_version and latest_version:
notes = ReleaseNotes.FromURL(release_notes_url)
if notes:
release_notes_diff = notes.Diff(latest_version, current_version)
else:
release_notes_diff = None
else:
release_notes_diff = None
if not release_notes_diff:
# We failed to print the release notes. Send people to a nice web page with
# the release notes.
log.status.write(
'For the latest full release notes, please visit:\n {0}\n\n'.format(
config.INSTALLATION_CONFIG.release_notes_url))
return
if len(release_notes_diff) > ReleaseNotes.MAX_DIFF:
log.status.Print("""\
A lot has changed since your last upgrade. For the latest full release notes,
please visit:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
return
log.status.Print("""\
The following release notes are new in this upgrade.
Please read carefully for information about new features, breaking changes,
and bugs fixed. The latest full release notes can be viewed at:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
full_text = StringIO()
for _, text in release_notes_diff:
full_text.write(text)
full_text.write('\n')
full_text.seek(0)
render_document.RenderDocument('text', full_text, log.status)
log.status.Print()
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
c540d7ac236c5eeeef6665e051608f1b1df3d8d8 | eb5bb62b7ad8fb2cdbbe21d377b3d880765a31af | /src/multilabel/classifiers/classifier.py | 4e9103d5f6987c6b7f5ea8ff98c345ba0b868563 | [] | no_license | TANG16/multilabel-feature-selection-temp | 6d3df5114fb361907131b128f1967607d36fdae8 | 176f8c7d947a3e4515eb2b71848344291a142141 | refs/heads/master | 2022-11-20T20:11:16.645033 | 2020-07-27T07:53:14 | 2020-07-27T07:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 14 20:27:16 2020
@author: Mustehssun
"""
import abc
class Classifier(abc.ABC):
@abc.abstractmethod
def fit(self, multilabel_dataset):
pass
@abc.abstractmethod
def classify(self, multilabel_dataset):
pass
| [
"mustehssun@gmail.com"
] | mustehssun@gmail.com |
8e0f636497884cd06984efbb34ba743a1c4e4c8a | 8dd65dbc240320854c778e0c77197f0d8db417ca | /rus/migrations/0001_initial.py | bc6c22fd1bc88a611d11abb29401e8a6b9da7f6c | [] | no_license | AnimaDei326/rus | 15cc67f6e41d408fc37b5abce5928fd9479bd3ed | 990da269bc40d0ad574205feb8e4c2dd450476cf | refs/heads/master | 2022-11-30T16:11:40.574440 | 2020-08-14T22:18:51 | 2020-08-14T22:18:51 | 253,126,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,919 | py | # Generated by Django 3.0.8 on 2020-07-28 21:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort', models.IntegerField(blank=True, default=100, null=True, verbose_name='Сортировка')),
('active', models.BooleanField(default=True, verbose_name='Активность')),
('title', models.CharField(max_length=500, verbose_name='Название')),
('picture', models.ImageField(upload_to='blog', verbose_name='Картинка')),
('code', models.CharField(default='blog', max_length=500, unique=True, verbose_name='Код')),
('preview_text', models.TextField(blank=True, verbose_name='Превью текст')),
('text', models.TextField(blank=True, verbose_name='Текст')),
('show_on_main_page', models.BooleanField(blank=True, default=False, null=True, verbose_name='Показывать на главной странице')),
('last_updated', models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации')),
],
options={
'verbose_name': 'Блог',
'verbose_name_plural': 'Блог',
},
),
migrations.CreateModel(
name='FormContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Имя')),
('email', models.CharField(max_length=100, verbose_name='E-mail')),
('subject', models.CharField(blank=True, max_length=500, verbose_name='Тема')),
('message', models.TextField(max_length=3000, verbose_name='Сообщение')),
('done', models.BooleanField(default=False, verbose_name='Обработано')),
('date_create', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
],
options={
'verbose_name': 'Обратная связь',
'verbose_name_plural': 'Обратные связи',
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort', models.IntegerField(blank=True, default=100, null=True, verbose_name='Сортировка')),
('active', models.BooleanField(default=True, verbose_name='Активность')),
('title', models.CharField(max_length=500, verbose_name='Название')),
('code', models.CharField(default='topic', max_length=500, unique=True, verbose_name='Код')),
('picture', models.ImageField(upload_to='gallery', verbose_name='Картинка')),
('description', models.TextField(blank=True, verbose_name='Описание')),
('show_on_main_page', models.BooleanField(blank=True, default=False, null=True, verbose_name='Показывать на главной странице')),
],
options={
'verbose_name': 'Альбом',
'verbose_name_plural': 'Альбомы',
},
),
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort', models.IntegerField(blank=True, default=100, null=True, verbose_name='Сортировка')),
('active', models.BooleanField(default=True, verbose_name='Активность')),
('title', models.CharField(max_length=500, verbose_name='Название')),
('picture', models.ImageField(upload_to='gallery', verbose_name='Картинка')),
('description', models.TextField(blank=True, verbose_name='Описание')),
('show_in_slider', models.BooleanField(blank=True, default=False, null=True, verbose_name='Показывать в слайдере')),
('topic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='rus.Topic', verbose_name='Альбом')),
],
options={
'verbose_name': 'Галерея',
'verbose_name_plural': 'Галереи',
},
),
]
| [
"support@beget.ru"
] | support@beget.ru |
66fe303cec467a54e9a1f2fcd4801199a0e49b7f | 15cb0acb86968e34593fa4cbd7a3616fbb659b1d | /morzeovka.py | 56e65505c2ab5265f7748778f8ea5900173edbdc | [] | no_license | TerezaMertlova/intenzivnikurz | a9bedd6eb87144516798240acb2dbfc4640bf887 | 7e8b27ab48a350a9b53a3c4bf36396e87e2715d4 | refs/heads/master | 2023-07-01T18:38:16.520836 | 2021-08-03T07:48:37 | 2021-08-03T07:48:37 | 392,232,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | # Ve slovníku níže vidíš Morseovu abecedu, kde jako klíč slouží znak v klasické abecedě a
# jako hodnota zápis znaku v Morseově abecedě.
#
# Napiš program, který se uživatele zeptá na text, který chce zapsat v Morseově abecedě.
# Uvažuj disciplinovaného uživatele, který zadává pouze znaky bez diakritiky, malá písmena atd.
# Na začátku uvažuj i to, že uživatel nezadává mezery.
# Projdi řetězec zadaný uživatelem. Najdi každý znak ve slovníku a vypiš ho na obrazovku v Morseově abecedě.
# Abychom měli celý kód vypsaný na jedné řádce, požádáme funkci print(), aby na konci výpisu nevkládala
# znak pro konec řádku, ale mezeru. To uděláme tak, že jako druhý arugument funkce dáme argument end=" ".
# Nyní přidáme mezery. Uvažuj, že uživatel může zadat mezeru. Před tím, než budeš hledat znak ve slovníku,
# zkontroluj, zda znak není mezera. Pokud ano, vypiš znak lomítka /.
morseCode = {
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
".": ".-.-.-",
",": "--..--",
"?": "..--..",
"!": "-.-.--",
"-": "-....-",
"/": "-..-.",
"@": ".--.-.",
"(": "-.--.",
")": "-.--.-"
}
slovo = input("Co chceš napsat v morzeovce?")
for i in slovo:
if i != " ":
print(morseCode[i], end=" ")
else:
print("")
| [
"terik.mertl@gmail.com"
] | terik.mertl@gmail.com |
220261aae88fefcf2727065cc283d0315a9050a2 | bc405651d83f34143d57b20d5567f86f7e04e44e | /kanna/model/user.py | ee6988220e06d920cc4e854ddf45569d9ea41e08 | [] | no_license | tylertreat/Kanna | dc94c0d0bc0b4327bf66c1a07d014787ea186a33 | f7c64742a8af43cbb9d28f05fa2452dc2a598856 | refs/heads/master | 2016-09-06T02:00:28.920423 | 2014-01-26T00:30:55 | 2014-01-26T00:30:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from google.appengine.ext import ndb
from kanna.model.photo import Album
from kanna.model.photo import Photo
class User(ndb.Model):
email = ndb.StringProperty()
name = ndb.StringProperty(indexed=False)
created = ndb.DateTimeProperty(required=True, auto_now_add=True)
@property
def photos(self):
return Photo.gql('WHERE owner = :1', self.key)
@property
def albums(self):
return Album.gql('WHERE owner = :1', self.key)
| [
"ttreat31@gmail.com"
] | ttreat31@gmail.com |
aaa070c00c120f1a573c1960808df255e6b566f1 | 157f3abb797e038d8ac76a7c14e9db94247dc942 | /fluentcheck/tests/test_dicts.py | 3abce25bdd17c3c071fa9d8046e80025c7c5d8ef | [
"MIT"
] | permissive | deanagan/fluentcheck | 525ef4bc8ed760b02d8b2fa519e42d2d1ebf093e | 514d648a5dcfcdd6c8a2e10197064a465d4924fb | refs/heads/master | 2021-01-04T04:41:37.919768 | 2020-03-08T07:02:21 | 2020-03-08T07:11:23 | 240,392,048 | 1 | 0 | MIT | 2020-03-08T06:35:28 | 2020-02-13T23:56:32 | Python | UTF-8 | Python | false | false | 1,107 | py | import unittest
from fluentcheck.check import Check, CheckError
class TestDictsAssertions(unittest.TestCase):
def test_is_dict(self):
res = Check(dict()).is_dict()
self.assertIsInstance(res, Check)
try:
Check(123).is_dict()
self.fail()
except CheckError:
pass
def test_is_not_dict(self):
res = Check(set()).is_not_dict()
self.assertIsInstance(res, Check)
try:
Check(dict()).is_not_dict()
self.fail()
except CheckError:
pass
def test_has_keys(self):
d = { 1: 'one', 2: 'two'}
res = Check(d).has_keys(1,2)
self.assertIsInstance(res, Check)
try:
Check(d).has_keys(3,4)
self.fail()
except CheckError:
pass
def test_has_not_keys(self):
d = { 1: 'one', 2: 'two'}
res = Check(d).has_not_keys(3,4)
self.assertIsInstance(res, Check)
try:
Check(d).has_not_keys(1,2)
self.fail()
except CheckError:
pass
| [
"agandfr@gmail.com"
] | agandfr@gmail.com |
dae5e73951268b5b40e35f9c4495effe9cfdb2a3 | 86f110457ab43bd1b9a8e2b37f8a8a909c719477 | /bin/python-config | c9cd1d25010c17c46cf4c52dec2df77fe1e6f196 | [] | no_license | jicowan/ENIConfig-Node-Annotator | 858ccbf8abae904b2af07f3537bea4c49b73b866 | 65af66da7fdaed1d845f13dd0fee58bcdc4adfa4 | refs/heads/master | 2020-04-18T10:27:59.609669 | 2019-10-11T18:02:53 | 2019-10-11T18:02:53 | 167,468,618 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | #!/Users/jicowan/PycharmProjects/pykube/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"jicowan@hotmail.com"
] | jicowan@hotmail.com | |
f0b1e7cdc6dd10b0060104cf2fa37847c9fece9e | 33b75d77c3eb526594e7f4ee767c635141409187 | /server/tests/conftest.py | 913501ada5f2befa3ac99ce8a310cf0950094aa3 | [
"Apache-2.0"
] | permissive | triplekill/patton | e3a3489abca74291effe62aa177e1960c60c30cc | 5401b68b54938647ef8b5d6eaea38fa97217a36b | refs/heads/master | 2020-07-06T19:23:48.024295 | 2019-07-19T12:34:23 | 2019-07-19T12:34:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | # Remove it under your own responsibility.
# It is used in order to pytest find the correct python test files.%
| [
"enrique@iknite.com"
] | enrique@iknite.com |
49a618fbd6ab7ca187b9bd478ae66d933454a3c7 | d323c6d0d026c435ce9512e3658620a10b1b242d | /tensorflow/opencl-matmul/gen_model.py | 4978d9553274ddf2dc4cd266ca0443e04dfe3675 | [
"Apache-2.0"
] | permissive | ShabbirHasan1/TensorflowOpenCL-GPU | 2408d6490566812e67c557e66f92620e909757a8 | 8d443c9afb49064c4bdf68fc0c5217986077d24c | refs/heads/master | 2023-03-15T21:27:11.227808 | 2018-05-28T12:53:27 | 2018-05-28T12:53:27 | 546,891,912 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import tensorflow as tf
with tf.Session() as sess:
x = tf.placeholder(tf.float32, [None, 1024], name="x")
y = tf.placeholder(tf.float32, [1024 , None], name="y")
result = tf.matmul( x, y , name="matmul", transpose_a=False, transpose_b=False)
tf.train.write_graph(sess.graph_def,
'./',
'matmul.pb', as_text=False)
| [
"michael1996623@gmail.com"
] | michael1996623@gmail.com |
7fb1875f38bea1f34af5aeda8ad2d2616caa2bc1 | 52ea21f0b6a2a994539bc900fa7b7d9c2278a3c4 | /RoboticFrameworkTest/RobotController/Event/test_NewPositionEvent.py | 8d4a011c89df40046bbb2c3fa17b2d90e3d9a605 | [] | no_license | dheerendra1/RobotNaturalLearning | 1b485a47a0deddaef55a6d8aa7bf0a4962478dcd | 036070f713008950f07c9e42a52fc55868b0c544 | refs/heads/master | 2020-12-24T18:13:04.871143 | 2011-10-23T14:59:21 | 2011-10-23T14:59:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #Testing new position event
#Author: Witold Wasilewski
from RoboticFramework.RobotController.Event.NewPositionEvent import NewPositionEvent
import pytest
class TestNewPositionEvent:
def setup_method(self, method):
pass
def test_construction_simple(self):
event = NewPositionEvent("positiondata")
assert event.data == "positiondata"
def teardown_method(self, method):
pass | [
"vitotao@gmail.com"
] | vitotao@gmail.com |
5614c386ee967c7ba8824677d3456bbbdb412140 | a384d0781c8e0bf38dcc9f5b21b79fa2c30ff324 | /3in1cafe/bin/rst2pseudoxml.py | 798cd7a59667447980b58d9feb057f37a60cab7d | [
"MIT"
] | permissive | Backend-Staging/3in1cafe | 90c3443942986e5595b3cb2c5cab43e6dc90e446 | 89f85dbff207244081a1ecf419398a8858556078 | refs/heads/master | 2022-12-22T21:06:26.211790 | 2019-08-09T17:18:16 | 2019-08-09T17:18:16 | 201,094,660 | 0 | 0 | MIT | 2022-11-22T03:15:40 | 2019-08-07T17:21:12 | Python | UTF-8 | Python | false | false | 658 | py | #!/Users/jmgc/Documents/workspace/cafeBackEnd/3in1cafe/3in1cafe/bin/python3
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| [
"jmgc@JMGCs-MacBook-Pro.local"
] | jmgc@JMGCs-MacBook-Pro.local |
5ae862e9d518c2f20efcded062ee983747e72c04 | 4778bb52672e5bfd3bc227fd46bd3e2262146788 | /check_pickle_data.py | 77936b0793afdc053427dfe6f921049916de9d4f | [] | no_license | vyshor/NTU_timetable_generator | cf5d2914a52d41ca1087259fafe215d3298cfd3d | e7223fd98da718232af85e960bddc9e88ee02e5d | refs/heads/master | 2021-06-02T09:12:44.419674 | 2021-05-20T14:25:04 | 2021-05-20T14:25:04 | 135,579,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import pandas as pd
import pickle
import os.path
if os.path.isfile('database.p'):
with open('database.p', 'rb') as f:
store = pickle.load(f)
print(store.keys())
print([x for x in store.keys() if 'CZ' in x])
print(store['HE9091']) | [
"vyshornehc@gmail.com"
] | vyshornehc@gmail.com |
804f1bfe9ba0390779b244358dd71dc070a80fda | ea2c81374a549afd647dfdb745d9b97e34d4cac7 | /tools/torch_model_process.py | d2c23a94fe801de46880826e29f381236944f6c5 | [
"Apache-2.0"
] | permissive | Shanshan7/easy_anomaly_detection | 768adf743fcb08c2148ad62f888903a1e43e14d9 | af10f0b26597d9d28c660730e17006c2bceaddc6 | refs/heads/main | 2023-07-14T07:35:41.823634 | 2021-08-19T10:33:39 | 2021-08-19T10:33:39 | 397,079,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | import os
import torch
from collections import OrderedDict
class TorchModelProcess():
def convert_state_dict(self, state_dict):
"""Converts a state dict saved from a dataParallel module to normal
module state_dict inplace
:param state_dict is the loaded DataParallel model_state
"""
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def load_latest_model(self, weight_path, model, dict_name="model"):
count = self.torchDeviceProcess.getCUDACount()
checkpoint = None
if os.path.exists(weight_path):
try:
if count > 1:
checkpoint = torch.load(weight_path, map_location=torch.device("cpu"))
state = self.convert_state_dict(checkpoint[dict_name])
model.load_state_dict(state)
else:
checkpoint = torch.load(weight_path, map_location=torch.device("cpu"))
model.load_state_dict(checkpoint[dict_name])
except Exception as err:
# os.remove(weight_path)
checkpoint = None
EasyLogger.warn(err)
else:
EasyLogger.error("Latest model %s exists" % weight_path) | [
"1336946993@qq.com"
] | 1336946993@qq.com |
b19037708f139c7478fbb1b7a79a91f72523998d | 9a24444fda749f39ba448d5015c591b8f53d076b | /2-1-Task3.py | 5528040180e739cf2b172ca6d68e3f5316ee1a22 | [] | no_license | Anri19/Testing-Stepik | 2626c8d9c5d037f475a0e93f53b465312b139062 | 1f59bfe07c442aa2363a30d8b2f578bab2750bcb | refs/heads/main | 2023-06-19T22:40:32.514728 | 2021-07-07T21:14:33 | 2021-07-07T21:14:33 | 383,914,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import math
import time
from selenium import webdriver
link = ' http://suninjuly.github.io/math.html'
browser = webdriver.Chrome()
browser.get(link)
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
#print(y)
input1 = browser.find_element_by_id("answer")
input1.send_keys(y)
option1 = browser.find_element_by_css_selector("[for='robotCheckbox']")
option1.click()
option2 = browser.find_element_by_css_selector("[for='robotsRule']")
option2.click()
option2 = browser.find_element_by_css_selector("button.btn")
option2.click()
#input2 = browser.find_element_by_name("last_name")
#input2.send_keys("Petrov")
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла | [
"an_zakharov@mail.ru"
] | an_zakharov@mail.ru |
45e93a3719f442de1c11065445d26e92ab6c6281 | 9ec2695ac982c47ea277d56027d617ebf9aece77 | /reverse_a_string.py | 0e61a7702e206a4d8f307ac8505bcb2c39af6e2d | [] | no_license | visaxin/InterviewQuestion | 91cc1cc3635af582da7190d52bf74a2c0d02afb0 | 792f363519d25b04a5f15fb2d0bb7c4ad99f756c | refs/heads/master | 2020-04-25T08:08:46.881667 | 2015-04-24T14:51:44 | 2015-04-24T14:51:44 | 34,447,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | #solution_1
my_string = "abandon"
my_reversed_string = my_string[::-1]
print my_reversed_string
#solution_2
def reverse(text):
if len(text)<=1:
return text
return reverse(text[1:])+text[0]
#test
my_string_two = "abandon"
print reverse(my_string_two)
#solution_3
def reverse_two(text):
reversed_string = ""
i=1
while (i <= len(text)):
reversed_string +=text[len(text)-i]
i+=1
return reversed_string
#test
my_string_three = "abandon"
print reverse_two(my_string_three) | [
"visaxin@gmail.com"
] | visaxin@gmail.com |
12cd15f963adfecba72c94e9f3361a291ddf3fda | 935d780103b43846d7ed5426a4d458152016ca4d | /Content/Scripts/NNDriveCar.py | 6ceb1e13786735aec17b8b1b229d5c7a91788bd1 | [] | no_license | pikumb94/UE4_TF_DLDriving | 04451d782b361b3a6019756f8552f5e6a502592a | d62694ca97c21e6a2c4f712dc9dc245efffdc752 | refs/heads/master | 2023-07-27T19:20:25.852547 | 2023-07-17T16:16:47 | 2023-07-17T16:16:47 | 362,065,737 | 14 | 1 | null | 2021-07-31T09:45:13 | 2021-04-27T10:05:14 | C++ | UTF-8 | Python | false | false | 3,209 | py | import unreal_engine as ue
import tensorflow as tf
from tensorflow.python.keras import backend as ks #to ensure things work well with multi-threading
import numpy as np #for reshaping input
import json
import operator #used for getting max prediction from 1x10 output array
from collections.abc import Iterable # import directly from collections for Python < 3.3
from unreal_engine.classes import WheeledVehicleMovementComponent
NN_topology = [tf.keras.layers.Dense(3, activation=tf.nn.tanh),
tf.keras.layers.Dense(4, activation=tf.nn.tanh),
tf.keras.layers.Dense(4, activation=tf.nn.tanh),
tf.keras.layers.Dense(2, activation=tf.nn.tanh)]
#print('NNDriveCar')
class NNDriveCar:
def __init__(self):
#model and topology is set statically to avoid to pass the topology for every NNCar spawned
self.model = tf.keras.models.Sequential(NN_topology)
self.model(tf.constant([[0.0,0.0,0.0]]))
self.bModelLoaded = False
self.bTopologyLoaded = True
self.index = -1
# this is called on game start
def begin_play(self):
self.pawn = self.uobject.get_owner()
self.component = self.uobject.get_component_by_type(WheeledVehicleMovementComponent)
#print(self.pawn.functions())
#print(self.uobject.properties())
#self.uobject.SetComponentTickInterval(0.100)
#print(self.pawn.properties())
#print(self.pawn.functions())
# this is called at every 'tick'
def tick(self, delta_time):
SplitStr = self.pawn.GetInputsAsString().split()
#print(delta_time)
if(self.bModelLoaded):
SplitStr = np.array(SplitStr)
SplitStr = SplitStr.astype(np.float32)
x = tf.constant([SplitStr])
y = self.model(x)
#print('x:%s y:%s'%(type(x[0][0].numpy()),type(y[0][0].numpy())))
#print('x:',x[0][0],x[0][1],x[0][2])
#print('y:',y[0][0],y[0][1])
#print('x:%s'%(x.numpy()))
#print('y:%s'%(y.numpy()))
self.pawn.ActuateActions(y[0][0],y[0][1])
#self.pawn.ActuateActions(1-max(0.0,y[0][0]),y[0][1])
def SetIndex(self, index):
self.index = int(index)
def GetIndex(self):
return self.index
def LoadModel(self, NewModel):
decodedWeights = json.loads(NewModel)
self.model.set_weights([np.array(x) for x in decodedWeights])
self.bModelLoaded = True
#print('Model Loaded:')
#print(self.model.get_weights())
def LoadTopology(self, Topology):
seld.model = ks.models.model_from_json(Topology)
self.bTopologyLoaded = True
def you_pressed_K(self):
ue.log_warning('you pressed K')
component = self.uobject.get_component_by_type(WheeledVehicleMovementComponent)
#ue.log_warning(component.get_velocity)
yesno = self.uobject.actor_has_component_of_type(WheeledVehicleMovementComponent)
#ue.log_warning(self.uobject.get_actor_velocity())
ue.log_warning(yesno)
def parla(self, words):
ue.log(words)
def speak(self):
ue.log('parole') | [
"pikumb94@gmail.com"
] | pikumb94@gmail.com |
3f6c2cacc5c56ce83ddf0c6243c142943e9cee26 | be48c0c7f070738e97981e116539c3e4a64b0f51 | /ps0a/syllabus.py | 52fa7dcf3c9bd4b28b5519021fba68930aa076ed | [] | no_license | timflannagan/COMP.4200 | 0ce3a365d2d42b970858a8d737c2e3b8fd7869ae | 45e2a621fe684ea84722bab9dad579e22804fd75 | refs/heads/master | 2021-10-01T18:10:13.628904 | 2018-11-27T22:03:12 | 2018-11-27T22:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | # syllabus.py
# -----------
# (C) 2018 J Mwaura, jonathan_mwaura@uml.edu
# for use only at University of Massachusetts Lowell
# DO NOT REDISTRIBUTE
"""
Run python autograder.py
"""
# instructions:
# read the course syllabus to find the answers to the below questions.
# then, modify the return values of each of the next three functions
# to answer the question in each function.
# note 1: the course syllabus is the course home page.
# note 2: this question is not autograded locally. You must
# upload your solution to Bottlenose to see if you got it right.
def publish_solutions():
"Are you allowed to share your solutions to class problem sets?"
"Return the Python object for true or false to correctly answer."
"*** YOUR CODE HERE ***"
return False
def teams():
"What is the minimum and maximum size of teams for the final project?"
"Return a list of two items."
"The first item must be the minimum team size."
"The second item must be the maximum team size."
"*** YOUR CODE HERE ***"
return [2, 3]
def maillist():
"Please join the course mailing list"
"so that you are apprised of critical and timely course info"
"Sign up at https://groups.google.com/forum/#!forum/comp4200artificial-intelligence"
"Once you are a member, find the message correct password"
"and copy it here."
"The message will have the subject line:"
"'PS0a syllabus.py maillist password'"
"*** YOUR CODE HERE ***"
return "password"
| [
"timflannagan@gmail.com"
] | timflannagan@gmail.com |
ef09d8327ae8054139beaf60f224bb8565136e3e | 9fe9a586e9c0ae659e2dfe091d4ad0795d92fb7e | /front/mvreviews/views.py | 93fdf426545e397fd34c987c8f321c861ecc6a68 | [] | no_license | JaeInK/Deep-Learning-Projects_Django-API | bd2ed34bd2212b83411c2b4d7b30f1731ecc38c3 | 87c98f8ef32594e080ddfb3ca3dc3be6fa74fe77 | refs/heads/master | 2020-03-09T19:34:58.323235 | 2018-04-10T16:18:38 | 2018-04-10T16:18:38 | 128,961,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from .forms import MvreviewsForm
from . import forms
# Create your views here.
import requests
import json
class Mvreviews(TemplateView):
template_name = 'mvreviews/demo.html'
def get(self, request):
form = MvreviewsForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = MvreviewsForm(request.POST)
if form.is_valid():
review = form.cleaned_data['review']
movie_id = form.cleaned_data['movie_id']
y_label = form.cleaned_data['y_label']
payload = json.dumps({'review':review, 'movie_id':movie_id})
print(payload)
res = requests.post("http://165.132.106.71:7200/mvreviews/run/", data = payload)
res_json = res.json() ## all predicted value are 'int'
# form = MvreviewsForm(initial={'review':review, 'movie_id':movie_id, 'y_label':y_label})
return render(request, self.template_name, {'form':form, 'y_label': y_label, 'res':res_json})
else:
return render(request, self.template_name, {'form':form, 'res':'ERROR'})
@csrf_exempt
def get_data(request):
res = requests.post("http://165.132.106.71:7200/mvreviews/get_data/")
res_json = res.json()
review = res_json['review']
product = res_json['product']
label = int(res_json['label'])
return JsonResponse ({'review':review, 'product':product, 'label':label})
def about(request):
return render(request, 'mvreviews/about.html')
def howtouse(request):
return render(request, 'mvreviews/howtouse.html') | [
"gjames5809@gmail.com"
] | gjames5809@gmail.com |
d1fb3a7e3169ec76a6ac4cb1cd3f6548d9308c44 | fb0d394bb9d69dfcdee123864dbcaf812b873848 | /calculate_num_of_pattern.py | b8ba5701cb409e90c1ab705cf1de761a17510a66 | [] | no_license | cali-in-cau/auto-ta-ml | e767661abf6c1342601109c2327aef0d727faf0c | 0b32b5350120001b3eb31fcb9043b66c088a2a6f | refs/heads/master | 2023-03-03T14:28:10.289077 | 2021-02-15T14:44:04 | 2021-02-15T14:44:04 | 331,219,558 | 5 | 4 | null | 2021-02-15T14:44:05 | 2021-01-20T06:54:58 | Jupyter Notebook | UTF-8 | Python | false | false | 1,339 | py | '''
각 패턴별로 나온 폴더에서 각 패턴별로 몇개의 이미지가 수집되었는지 알려주는 script입니다.
각 패턴의 개수를 조절하기위해서는 pattern_dict_sort[:x] 의 x값을 조절하세요. 상위 x개의 패턴을 출력해줍니다.
이 파일을 패턴들이 모아져 있는 집합을 모든 폴더(예)2018-nasdaq-top100 과 같은 위치에 두세요. random_select_item.py와 같은 위치에 있어야 합니다.
아니면 os.getcwd()위치에 경로를 조작하세요.
'''
import os
import random
import sys
def run(folder_name):
base_path = os.getcwd() + f"/{folder_name}"
pattern_list = os.listdir(base_path)
pattern_dict = {}
for pattern in pattern_list:
pattern = os.path.join(base_path, pattern)
try:
pattern_dict[pattern.split("/")[-1]] = len([name for name in os.listdir(pattern) if os.path.isfile(os.path.join(pattern, name))])
except:
print("file occured")
pattern_dict_sort = sorted(pattern_dict.items(), key = lambda kv:(kv[1], kv[0]),reverse=True)\
# for x in pattern_dict_sort:
# print(x[0] , x[1])
print(pattern_dict_sort[:])
'''
pattern_dict = sorted(pattern_dict.values())
print(pattern_dict)
'''
if __name__ == "__main__":
folder_name = sys.argv[1]
run(folder_name) | [
"ksl970330@naver.com"
] | ksl970330@naver.com |
9d2088838424734104abac49d03bc31bad104416 | ca48bab2e2ffca8bb351050791f3b94bccc886b9 | /final report/interpreter2.py | 95afe2a50ddc908c1b87ab7b75229f75451ed525 | [] | no_license | haaksmash/QUI | ff394205bd3c3c089d23c0de66bcc4de6bc4e65b | f1cc2b3e999bebc7811598bde0f3ffddba216e65 | refs/heads/master | 2020-04-06T03:35:15.499196 | 2011-12-10T09:21:55 | 2011-12-10T09:21:55 | 2,872,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | $ python -i FileModel.py
>>> f = FileModel()
>>> f.size = "really big"
Traceback (most recent call last):
...
...
fields.ValidationError: Could not convert to int: really big
>>> f.size = 100
>>> f.size
100
>>> | [
"haak.erling@gmail.com"
] | haak.erling@gmail.com |
312b66c5eacb2a78cdd83ede287b252ff061e671 | abd311803ab191ca5100e67d2dcefd6bd78cf07e | /news/views.py | eb589c1472f3e1bc9acf2241f255aa49c5b24b63 | [] | no_license | function2-llx/mysite | 980f6272f3db8c622e744e6782235d028e9f7aef | 1f8280fa50b8dd2a8c8b1c0cd6fd0a22454a36b1 | refs/heads/master | 2020-03-28T18:43:12.097530 | 2018-09-15T13:24:13 | 2018-09-15T13:24:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from django.shortcuts import render
newsDirectory = '../news/'
def displayNews(request, newsId: int):
context = {}
with open(newsDirectory + str(newsId) + '.txt', 'r') as f:
context['title'] = f.readline()
context['pubtime'] = f.readline()
context['body'] = f.read()
return render(request, 'news.html', context)
| [
"function2@qq.com"
] | function2@qq.com |
a16437348d97977bbebb92079774c3fe85c27609 | f306e08639d697b2c3f704906ccb087cf7a8db34 | /Image classification_birds/pipeline components/pytorch_model/pyt.py | 6f4f8f9602f99bf7be852385fa51513806f00d0d | [] | no_license | Soot3/ml_usecases | 80a896b711baa747c037737e25d6b06eeea4d254 | 1130ce703a55add9fafc1f59c82f74588b296c6a | refs/heads/main | 2023-09-03T15:53:26.453781 | 2021-11-09T07:28:43 | 2021-11-09T07:28:43 | 426,128,978 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,031 | py | import argparse
def pyt(img_folder):
import torch
import torchvision
import torch.nn as nn
from tqdm.notebook import tqdm
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.models as models
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
import joblib
# store the training images path into a directory
train_dir = f"{img_folder}/train"
# store the validation images path into a directory
val_dir = f"{img_folder}/valid"
# store test images path into a directory
test_dir = f"{img_folder}/test"
# define accuracy function for the model
def accuracy(out, labels):
_, preds = torch.max(out, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
# function to get the GPU device
def get_device():
if torch.cuda.is_available():
return torch.device("cuda")
else:
return torch.device("cpu")
# function to transfer the data to the GPU device
def to_device(data, device):
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
# Class instance to load the data from the GPU device
class DeviceDataLoader():
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
for x in self.dl:
yield to_device(x, self.device)
def __len__(self):
return len(self.dl)
# create a class instance of the neural network module and the functions involved
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
acc = accuracy(out, labels)
return {"val_loss": loss.detach(), "val_acc": acc}
def validation_epoch_end(self, outputs):
batch_loss = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(batch_loss).mean()
batch_acc = [x["val_acc"] for x in outputs]
epoch_acc = torch.stack(batch_acc).mean()
return {"val_loss": epoch_loss.item(), "val_acc": epoch_acc.item()}
def epoch_end(self, epoch, epochs, result):
print("Epoch: [{}/{}], last_lr: {:.6f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch+1, epochs, result["lrs"][-1], result["train_loss"], result["val_loss"], result["val_acc"]))
# create a class instance of the ResNet18 pretrained model for transfer learning
class model(ImageClassificationBase):
def __init__(self, num_classes):
super().__init__()
self.network = models.resnet18(pretrained=True)
number_of_features = self.network.fc.in_features
self.network.fc = nn.Linear(number_of_features, num_classes)
def forward(self, xb):
return self.network(xb)
def freeze(self):
for param in self.network.parameters():
param.requires_grad= False
for param in self.network.fc.parameters():
param.requires_grad= True
def unfreeze(self):
for param in self.network.parameters():
param.requires_grad= True
# disable gradient calculation
@torch.no_grad()
# function for model evaluation
def evaluate(model, val_dl):
model.eval()
outputs = [model.validation_step(batch) for batch in val_dl]
return model.validation_epoch_end(outputs)
# function to get learning rate optimizer
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
# function to fit the training set and validation set into the model
def fit_one_cycle(epochs, max_lr, model, train_dl, val_dl, weight_decay=0, grad_clip=None,
opt_func=torch.optim.Adam):
torch.cuda.empty_cache()
history = []
opt = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
sched = torch.optim.lr_scheduler.OneCycleLR(opt, max_lr, epochs=epochs,
steps_per_epoch=len(train_dl))
for epoch in range(epochs):
model.train()
train_loss = []
lrs = []
for batch in tqdm(train_dl):
loss = model.training_step(batch)
train_loss.append(loss)
loss.backward()
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
opt.step()
opt.zero_grad()
lrs.append(get_lr(opt))
sched.step()
result = evaluate(model, val_dl)
result["train_loss"] = torch.stack(train_loss).mean().item()
result["lrs"] = lrs
model.epoch_end(epoch, epochs, result)
history.append(result)
return history
transform_ds = T.Compose([T.Resize((128, 128)),
T.RandomHorizontalFlip(),
T.ToTensor()
])
# store the dataset as a subclass of torchvision.datasets
train_ds = torchvision.datasets.ImageFolder(root=train_dir, transform=transform_ds)
val_ds = torchvision.datasets.ImageFolder(root=val_dir, transform=transform_ds)
# create a batch size for the images
batch_size = 128
# Load the dataset from directory in torchvision.datasets
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size, num_workers=4, pin_memory=True)
# display the GPU device
device = get_device()
# transfer the training set and validation set to the GPU device data loader
train_dl = DeviceDataLoader(train_dl, device)
val_dl = DeviceDataLoader(val_dl, device)
# ResNet18 model architecture
model = to_device(model(num_classes=260), device)
result = [evaluate(model, val_dl)]
epochs = 5
max_lr = 10e-5
grad_clip = 0.1
weight_decay = 10e-4
opt_func = torch.optim.Adam
history = fit_one_cycle(epochs, max_lr, model, train_dl, val_dl,
weight_decay=weight_decay,
grad_clip=grad_clip,
opt_func=opt_func)
accuracy = [x["val_acc"] for x in history]
val_loss = [x["val_loss"] for x in history]
pytorch_metrics = {'loss':val_loss[-1], 'test':accuracy[-1]}
torch.save(model.state_dict(), "pytorch_model.pt")
joblib.dump(pytorch_metrics,'pytorch_metrics')
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_folder')
args = parser.parse_args()
pyt(args.img_folder)
| [
"sootersaalu@gmail.com"
] | sootersaalu@gmail.com |
dddf73a1ab73efc1e40f3649f0968ad51992f15e | 58893b353ac929689f0fdd1c18c12050312483e6 | /examples/mini_batch_trainer.py | f388260f6696297fdecf88810f4cddee1f3f4a29 | [] | no_license | cuhk-mobitec/E-Payment-Transaction | 5bc7d992a3207496435d1ecbe89df92ddc2b923a | eb55ef917d715b1b81c0edeab409db1e9835437e | refs/heads/master | 2020-05-21T03:54:46.991648 | 2019-04-26T06:13:24 | 2019-04-26T06:13:24 | 185,900,553 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,418 | py | from .utils.metrics import accuracy
from .utils.torch_utils import EarlyStopping
import torch
try:
from tensorboardX import SummaryWriter
use_tensorboardx = True
except:
use_tensorboardx = False
import numpy as np
import time
import logging
import matplotlib.pyplot as plt
import os
from dgl.contrib.sampling import NeighborSampler
import dgl.function as fn
class MiniBatchTrainer(object):
def __init__(self, g, model, model_infer, loss_fn, optimizer, epochs, features, labels, train_mask, val_mask, test_mask, fast_mode, n_edges, patience, batch_size, test_batch_size, num_neighbors, n_layers, num_cpu, model_dir='./'):
self.g = g
self.model = model
self.model_infer = model_infer
self.loss_fn = loss_fn
self.optimizer = optimizer
# self.sched_lambda = {
# 'none': lambda epoch: 1,
# 'decay': lambda epoch: max(0.98 ** epoch, 1e-4),
# }
# self.sched = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
# self.sched_lambda['none'])
# print(train_mask.shape)
self.train_id = train_mask.nonzero().view(-1).to(torch.int64)
self.val_id = val_mask.nonzero().view(-1).to(torch.int64)
self.test_id = test_mask.nonzero().view(-1).to(torch.int64)
self.epochs = epochs
self.features = features
self.labels = labels
self.train_mask = train_mask
self.val_mask = val_mask
self.test_mask = test_mask
if use_tensorboardx:
self.writer = SummaryWriter('/tmp/tensorboardx')
self.fast_mode = fast_mode
self.n_edges = n_edges
self.patience = patience
self.batch_size = batch_size
self.test_batch_size = test_batch_size
self.num_neighbors = num_neighbors
self.n_layers = n_layers
self.model_dir = model_dir
self.num_cpu = num_cpu
# initialize early stopping object
self.early_stopping = EarlyStopping(patience=patience, log_dir=model_dir, verbose=True)
def evaluate(self, features, labels, mask):
self.model.eval()
with torch.no_grad():
logits = self.model(features)
logits = logits[mask]
labels = labels[mask]
return accuracy(logits, labels)
def train(self):
# initialize
dur = []
train_losses = [] # per mini-batch
train_accuracies = []
val_losses = []
val_accuracies = []
for epoch in range(self.epochs):
train_losses_temp = []
train_accuracies_temp = []
val_losses_temp = []
val_accuracies_temp = []
if use_tensorboardx:
for i, (name, param) in enumerate(self.model.named_parameters()):
self.writer.add_histogram(name, param, epoch)
# minibatch train
train_num_correct = 0 # number of correct prediction in validation set
train_total_losses = 0 # total cross entropy loss
if epoch >= 2:
t0 = time.time()
for nf in NeighborSampler(self.g,
batch_size=self.batch_size,
expand_factor=self.num_neighbors,
neighbor_type='in',
shuffle=True,
num_hops=self.n_layers,
add_self_loop=False,
seed_nodes=self.train_id):
# update the aggregate history of all nodes in each layer
for i in range(self.n_layers):
agg_history_str = 'agg_history_{}'.format(i)
self.g.pull(nf.layer_parent_nid(i+1), fn.copy_src(src='history_{}'.format(i), out='m'),
fn.sum(msg='m', out=agg_history_str))
# Copy the features from the original graph to the nodeflow graph
node_embed_names = [['features', 'history_0']]
for i in range(1, self.n_layers):
node_embed_names.append(['history_{}'.format(i), 'agg_history_{}'.format(i-1), 'subg_norm', 'norm'])
node_embed_names.append(['agg_history_{}'.format(self.n_layers-1), 'subg_norm', 'norm'])
edge_embed_names = [['edge_features']]
nf.copy_from_parent(node_embed_names=node_embed_names,
edge_embed_names=edge_embed_names)
# Forward Pass, Calculate Loss and Accuracy
self.model.train() # set to train mode
logits = self.model(nf)
batch_node_ids = nf.layer_parent_nid(-1)
batch_size = len(batch_node_ids)
batch_labels = self.labels[batch_node_ids]
mini_batch_accuracy = accuracy(logits, batch_labels)
train_num_correct += mini_batch_accuracy * batch_size
train_loss = self.loss_fn(logits, batch_labels)
train_total_losses += (train_loss.item() * batch_size)
# Train
self.optimizer.zero_grad()
train_loss.backward()
self.optimizer.step()
node_embed_names = [['history_{}'.format(i)] for i in range(self.n_layers)]
node_embed_names.append([])
# Copy the udpated features from the nodeflow graph to the original graph
nf.copy_to_parent(node_embed_names=node_embed_names)
# loss and accuracy of this epoch
train_average_loss = train_total_losses / len(self.train_id)
train_losses.append(train_average_loss)
train_accuracy = train_num_correct / len(self.train_id)
train_accuracies.append(train_accuracy)
# copy parameter to the inference model
if epoch >= 2:
dur.append(time.time() - t0)
# Validation
val_num_correct = 0 # number of correct prediction in validation set
val_total_losses = 0 # total cross entropy loss
for nf in NeighborSampler(self.g,
batch_size=len(self.val_id),
expand_factor=self.g.number_of_nodes(),
neighbor_type='in',
num_hops=self.n_layers,
seed_nodes=self.val_id,
add_self_loop=False,
num_workers=self.num_cpu):
# in testing/validation, no need to update the history
node_embed_names = [['features']]
edge_embed_names = [['edge_features']]
for i in range(self.n_layers):
node_embed_names.append(['norm', 'subg_norm'])
nf.copy_from_parent(node_embed_names=node_embed_names,
edge_embed_names=edge_embed_names)
self.model_infer.load_state_dict(self.model.state_dict())
logits, embeddings = self.model_infer(nf)
batch_node_ids = nf.layer_parent_nid(-1)
batch_size = len(batch_node_ids)
batch_labels = self.labels[batch_node_ids]
mini_batch_accuracy = accuracy(logits, batch_labels)
val_num_correct += mini_batch_accuracy * batch_size
mini_batch_val_loss = self.loss_fn(logits, batch_labels)
val_total_losses += (mini_batch_val_loss.item() * batch_size)
# loss and accuracy of this epoch
val_average_loss = val_total_losses / len(self.val_id)
val_losses.append(val_average_loss)
val_accuracy = val_num_correct / len(self.val_id)
val_accuracies.append(val_accuracy)
# early stopping
self.early_stopping(val_average_loss, self.model_infer)
if self.early_stopping.early_stop:
logging.info("Early stopping")
break
# if epoch == 25:
# # switch to sgd with large learning rate
# # https://arxiv.org/abs/1706.02677
# self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.001)
# self.sched = torch.optim.lr_scheduler.LambdaLR(self.optimizer, self.sched_lambda['decay'])
# elif epoch < 25:
# self.sched.step()
logging.info("Epoch {:05d} | Time(s) {:.4f} | TrainLoss {:.4f} | TrainAcc {:.4f} |"
" ValLoss {:.4f} | ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".
format(epoch, np.mean(dur), train_average_loss, train_accuracy,
val_average_loss, val_accuracy, self.n_edges / np.mean(dur) / 1000))
# embeddings visualization
if use_tensorboardx:
self.writer.add_embedding(embeddings, global_step=epoch, metadata=batch_labels)
# load the last checkpoint with the best model
self.model.load_state_dict(torch.load(os.path.join(self.model_dir, 'checkpoint.pt')))
# # logging.info()
# acc = self.evaluate(self.features, self.labels, self.test_mask)
# logging.info("Test Accuracy {:.4f}".format(acc))
self.plot(train_losses, val_losses, train_accuracies, val_accuracies)
def plot(self, train_losses, val_losses, train_accuracies, val_accuracies):
#####################################################################
##################### PLOT ##########################################
#####################################################################
# visualize the loss as the network trained
fig = plt.figure(figsize=(10,8))
plt.plot(range(1,len(train_losses)+1),np.log(train_losses), label='Training Loss')
plt.plot(range(1,len(val_losses)+1),np.log(val_losses),label='Validation Loss')
# find position of lowest validation loss
minposs = val_losses.index(min(val_losses))+1
plt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('log cross entropy loss')
plt.xlim(0, len(train_losses)+1) # consistent scale
plt.grid(True)
plt.legend()
plt.tight_layout()
# plt.show()
fig.savefig(os.path.join(self.model_dir, 'loss_plot.png'), bbox_inches='tight')
# accuracy plot
fig = plt.figure(figsize=(10,8))
plt.plot(range(1,len(train_accuracies)+1),train_accuracies, label='Training accuracies')
plt.plot(range(1,len(val_accuracies)+1),val_accuracies,label='Validation accuracies')
# find position of lowest validation loss
minposs = val_losses.index(min(val_losses))+1
plt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('accuracies')
plt.xlim(0, len(train_accuracies)+1) # consistent scale
plt.grid(True)
plt.legend()
plt.tight_layout()
# plt.show()
fig.savefig(os.path.join(self.model_dir, 'accuracies_plot.png'), bbox_inches='tight')
| [
"handasontam@gmail.com"
] | handasontam@gmail.com |
21c63146676fd30217432916e59f7094633339a4 | 1a9852fe468f18e1ac3042c09286ccda000a4135 | /Specialist Certificate in Data Analytics Essentials/DataCamp/05-Working_with_Dates_and_Times/e23_march_29_throughout_a_decade.py | f8a0f897b1922372263e9afbb7bb4c04be5da9a8 | [] | no_license | sarmabhamidipati/UCD | 452b2f1e166c1079ec06d78e473730e141f706b2 | 101ca3152207e2fe67cca118923896551d5fee1c | refs/heads/master | 2023-08-14T15:41:24.312859 | 2021-09-22T17:33:01 | 2021-09-22T17:33:01 | 386,592,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | """
For example, in the United Kingdom, as of the time this lesson was written, Daylight Saving begins on the last Sunday
in March. Let's look at the UTC offset for March 29, at midnight, for the years 2000 to 2010.
Using tz, set the timezone for dt to be 'Europe/London'.
Within the for loop:
Use the .replace() method to change the year for dt to be y.
Call .isoformat() on the result to observe the results.
"""
# Import datetime and tz
from datetime import datetime
from dateutil import tz
# Create starting date
dt = datetime(2000, 3, 29, tzinfo=tz.gettz('Europe/London'))
# Loop over the dates, replacing the year, and print the ISO timestamp
for y in range(2000, 2011):
print(dt.replace(year=y).isoformat())
| [
"b_vvs@yahoo.com"
] | b_vvs@yahoo.com |
984881e82d2ab32e230a2ebdc14c0a2bbab910cf | e7a0edf2c948f4897e369d235d7477b982640a74 | /eape/migrations/0003_pagamento_valor.py | 7e386ed271f7275ad4e73107113fb3e5271263a5 | [] | no_license | jhussyelleReis/eape | 4199582d6a80a9a88769c14c4fbb30f543b5948b | 16da312ce227a86541133c9ba9d290c861cca1e0 | refs/heads/master | 2021-01-20T05:00:54.051449 | 2017-08-25T15:14:44 | 2017-08-25T15:14:44 | 101,407,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-25 12:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eape', '0002_auto_20170825_0913'),
]
operations = [
migrations.AddField(
model_name='pagamento',
name='valor',
field=models.DecimalField(decimal_places=2, default=1, max_digits=5),
preserve_default=False,
),
]
| [
"jhussyelle.reis@gmail.com"
] | jhussyelle.reis@gmail.com |
f021bb102cc87eb011524a21ba749fcfa70d2ffc | 66320714c6e020a8c546c0a74949039b1cc95403 | /T2.py | 69dd116432df20bed1a77a99d008e02598564c8e | [] | no_license | sb17027/ORS-PA-18-Homework06 | 372c783f47d924499fe94094a26b7b3d1e438861 | fd8fde4acb8c355881d143acd1dc507bd0ef9295 | refs/heads/master | 2020-05-05T03:08:28.171623 | 2019-04-08T12:14:08 | 2019-04-08T12:14:08 | 179,662,273 | 0 | 0 | null | 2019-04-05T10:29:27 | 2019-04-05T10:29:27 | null | UTF-8 | Python | false | false | 640 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the compareTriplets function below.
def compareTriplets(a, b):
aPoints = 0
bPoints = 0
i = 0
while i < 3:
if (a[i] > b[i]):
aPoints = aPoints + 1;
if (a[i] < b[i]):
bPoints = bPoints + 1
i = i + 1
return [aPoints, bPoints]
def main():
a = list(map(int, input("Insert scores for a:").rstrip().split()))
b = list(map(int, input("Insert scores for b:").rstrip().split()))
result = compareTriplets(a, b)
print("Final result:\n")
print(' '.join(map(str, result)))
main() | [
"sanela.becovic@udg.edu.me"
] | sanela.becovic@udg.edu.me |
fa469309fe18cbe3e77032ace895be4cfa02963f | aa7049506e929693941436f93e22b13ff3122650 | /clubs/migrations/0002_club_club_picture.py | 95e50125132ee8e4eda71cd2d4fd2b4b1f9cfa77 | [] | no_license | austinbrovick/bellevue_college_hackathon | 24aa5f1ef64c4a4b85dd50e1f6dd628be15f3817 | 2ad9fa6c5ea79e8a34d55df8e21838aeb8fd044f | refs/heads/master | 2021-05-31T16:08:32.770057 | 2016-05-21T16:54:46 | 2016-05-21T16:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-20 07:24
from __future__ import unicode_literals
import clubs.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clubs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='club',
name='club_picture',
field=models.ImageField(blank=True, null=True, upload_to=clubs.models.upload_location),
),
]
| [
"austinbrovick@gmail.com"
] | austinbrovick@gmail.com |
9475e978727f421d6640b6c19aa2463bef419be8 | e9e717e8dd8d05ccf39170492721559076312a50 | /{{ cookiecutter.repo_name }}/src/transform.py | 37973295657a02ee98c67018a41f22b4433f8016 | [
"MIT"
] | permissive | alexkyllo/workbench-py | bf9ca182eb86ddfb828887ee459a63212373c79d | c0f56450a416fda6905b2f8ee087d414bcc0dd95 | refs/heads/master | 2022-12-08T12:02:01.038914 | 2020-09-04T05:28:33 | 2020-09-04T05:28:33 | 291,903,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | """transform.py
fit a transformer on test data to transform
test and training data.
"""
import os
import logging
import dotenv
import click
import joblib
from sklearn import preprocessing, impute, pipeline, compose
@click.command()
@click.argument("input_file", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path)
@click.option("pipeline_file", type=click.Path)
@click.option("--fit/--no-fit", default=False, help="Fit the transformer")
def transform(input_file, output_file, pipeline_file, fit):
"""
Transform INPUT_FILE to OUTPUT_FILE using serialized PIPELINE_FILE.
If --fit specified, a pipeline is created, fitted on the data,
and written to PIPELINE_FILE.
Otherwise, a pipeline is read from PIPELINE_FILE and used to transform
the data only.
"""
logger = logging.getLogger(__name__)
logger.info("Reading %s", input_file)
if fit:
# create the pipeline, fit_transform it on the data, and
# save to pipeline_file
joblib.dump(pipeline, pipeline_file)
else:
# read and deserialize the pipeline from pipeline_file
pipeline = joblib.load(pipeline_file)
def main():
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
dotenv.load_dotenv(dotenv.find_dotenv())
transform()
if __name__ == "__main__":
main()
| [
"alex.kyllo@gmail.com"
] | alex.kyllo@gmail.com |
6f60238beba04799dafc1a104a259109b9dd1f8c | d4e85150b5ffbedfd9fbbdb3ad7663d3df4a71e4 | /doc_templates/handing_schedule.py | d6fb3fe90e33481e88802c3ca713fedc25549a39 | [] | no_license | ChubChubs/SecretaryDEK | c050dc4a14104c45eb9455275af5d0eacc8ef85f | 78ed2c621fc103e225a84d8678788ac7cc151684 | refs/heads/master | 2021-01-17T15:13:54.006393 | 2017-08-28T19:52:01 | 2017-08-28T19:52:01 | 43,775,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | __author__ = 'masterbob'
#from secretary.models import Diploma, Reviewer, User, UserProfile
def context():
return {'dates':[
{'date':'02.12.2014', 'students':
[
{'full_name':'Пупкін Василь Петрович', 'theme':'Типу темка1',
'guide_name': 'Батюк А.Є.', 'guide_level':'доцент',},
{'full_name':'Ложкін Василь Петрович', 'theme':'Типу темка2',
'guide_name': 'Батюк А.Є.', 'guide_level':'доцент',},
{'full_name':'Жопкін Жора Еммануїлович', 'theme':'Терморектальний криптоаналіз як універсальний інструмент стимуляції когнітивних процесів. Аналіз методів використання та характеристик струмопровідних елементів.',
'guide_name': 'Батюк А.Є.', 'guide_level':'доцент',}
]
}
]
} | [
"bonduell@cyberdude.com"
] | bonduell@cyberdude.com |
1ee0156823b58b95c7391287b15e9d32dd2f218a | d93159d0784fc489a5066d3ee592e6c9563b228b | /Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBiasAfterAbortGap_cff.py | 80230ca8fcf180ce0466e854077ef3236b5e77fd | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 2,411 | py | import FWCore.ParameterSet.Config as cms
# Set the HLT paths
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOSiStripCalMinBiasAfterAbortGapHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
## HLTPaths = [
## #Minimum Bias
## "HLT_MinBias*"
## ],
eventSetupPathsKey = 'SiStripCalMinBiasAfterAbortGap',
throw = False # tolerate triggers stated above, but not available
)
# Select only events where tracker had HV on (according to DCS bit information)
# AND respective partition is in the run (according to FED information)
import CalibTracker.SiStripCommon.SiStripDCSFilter_cfi
DCSStatusForSiStripCalMinBiasAfterAbortGap = CalibTracker.SiStripCommon.SiStripDCSFilter_cfi.siStripDCSFilter.clone()
# Select only good tracks
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOSiStripCalMinBiasAfterAbortGap = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()
ALCARECOSiStripCalMinBiasAfterAbortGap.filter = True ##do not store empty events
ALCARECOSiStripCalMinBiasAfterAbortGap.src = 'generalTracks'
ALCARECOSiStripCalMinBiasAfterAbortGap.applyBasicCuts = True
ALCARECOSiStripCalMinBiasAfterAbortGap.ptMin = 0.8 ##GeV
ALCARECOSiStripCalMinBiasAfterAbortGap.nHitMin = 6 ## at least 6 hits required
ALCARECOSiStripCalMinBiasAfterAbortGap.chi2nMax = 10.
ALCARECOSiStripCalMinBiasAfterAbortGap.GlobalSelector.applyIsolationtest = False
ALCARECOSiStripCalMinBiasAfterAbortGap.GlobalSelector.applyGlobalMuonFilter = False
ALCARECOSiStripCalMinBiasAfterAbortGap.GlobalSelector.applyJetCountFilter = False
ALCARECOSiStripCalMinBiasAfterAbortGap.TwoBodyDecaySelector.applyMassrangeFilter = False
ALCARECOSiStripCalMinBiasAfterAbortGap.TwoBodyDecaySelector.applyChargeFilter = False
ALCARECOSiStripCalMinBiasAfterAbortGap.TwoBodyDecaySelector.applyAcoplanarityFilter = False
ALCARECOSiStripCalMinBiasAfterAbortGap.TwoBodyDecaySelector.applyMissingETFilter = False
# Sequence #
seqALCARECOSiStripCalMinBiasAfterAbortGap = cms.Sequence(ALCARECOSiStripCalMinBiasAfterAbortGapHLT*
DCSStatusForSiStripCalMinBiasAfterAbortGap *
ALCARECOSiStripCalMinBiasAfterAbortGap)
| [
"dimattia@cern.ch"
] | dimattia@cern.ch |
4240df00eb5010e26f95c087f229324170c9f756 | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/examples/Sutton_RL_Intro/ch4_DP/value_iteration.py | 60e7e8461ec89060fd9007c1bb8e4dffbb0be478 | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 854 | py | # Following the algo in section 4.4 Value Iteration
from policy_evaluation import Policy_Evaluation
import sys
import numpy as np
if "../" not in sys.path:
sys.path.append("../")
from utils.envs.grid_world import GridworldEnv
def Value_Iteration(env, policy, state_value, gamma, theta):
state_value = Policy_Evaluation(env, policy, state_value, gamma, theta).flatten()
for s in range(env.nS):
policy[s] = np.eye(env.nA)[np.argmax(policy[s])]
return (policy)
if __name__ == '__main__':
env = GridworldEnv()
state_value = np.zeros(env.nS)
policy = np.ones([env.nS, env.nA]) / env.nA
gamma = 1
theta = 0.00001
print("===== Training Started =====")
policy = Value_Iteration(env, policy, state_value, gamma, theta)
print("===== Training Finished =====")
print(policy)
print(state_value)
| [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
ae317d3819b06f5de71f3da6f88fc4df21141864 | b593247a2bf162819eea6820b6a25c7a659d2f76 | /Unit 07 Lists and Functions/01 Lists and Functions/1 List Recap/4-Removing elements from lists.py | 8f667b9e1e80ddb0e19190278409ab25d8eb16c0 | [] | no_license | Angelpacman/codecademy-py3 | d4d727857a8894fec5dd3d78c00f3f25f31979dc | 729d232a8732e53bdf0131246b043354ed933614 | refs/heads/master | 2020-03-28T02:50:31.431167 | 2019-01-26T01:07:01 | 2019-01-26T01:07:01 | 147,601,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | n = [1, 3, 5]
# Removes 1 from the list,
# NOT the item at index 1
n.remove(1)
# Another possible solution, will remove the item at the given index:
del(n[0])
# Another possible solution will remove the item at index from the list and return it to you:
n.pop(0)
print (n)
| [
"angelr4a1@gmail.com"
] | angelr4a1@gmail.com |
6cf0154e33520dc042d50a3f03c9ef013abaeca8 | 1e5c6f4b08d9470fce248cf39e6dccce40e90a41 | /codes/11/vpython_mouse.py | 1dfa5be57b729a6fc2531903cb36ec3f2576e212 | [] | no_license | misaiya99/scipybook2 | 1529cfb7f800df2ef7ce024a86281af16e343a37 | 734ba177b4705cc25da695d42a8cbada7cd22bd9 | refs/heads/master | 2020-03-10T21:26:23.595494 | 2017-08-25T09:48:07 | 2017-08-25T09:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
from visual import *
text = label(pos=(0, -2, 0))
sphere(pos=(0,2,0))
box(pos = (2, 0, 0))
ray = arrow(pos=(0,0,0), color=(1,0,0))
while True:
rate(30)
texts = []
for attrname in ["pos", "pick", "pickpos", "camera", "ray"]:
texts.append("%s=%s" % (attrname, getattr(scene.mouse, attrname)))
texts.append("project=%s" %
scene.mouse.project(normal=scene.forward, point=scene.center))
text.text = "\n".join(texts)
ray.axis = scene.mouse.ray
if scene.mouse.events > 0:
event = scene.mouse.getevent()
print(("press=%s, click=%s, drag=%s, drop=%s, release=%s" % (
event.press, event.click, event.drag, event.drop, event.release
)))
| [
"qytang326@gmail.com"
] | qytang326@gmail.com |
0f148fc9b8e79d34efa1a849ff1c6060148fa7d6 | 14a792c64600796272a6ad02e9553b2ecffc175b | /Functions.py | 7ab2e75df567509e8324121bb0832b7b15cbc8b5 | [] | no_license | MrRa1n/Python-Learning | 497c2ed0f6e8bf24d16eacf18a604ff72b117ba6 | a910d66b09d86774374c0aebb4fd9e90ed332545 | refs/heads/master | 2020-03-22T16:27:03.787423 | 2018-07-17T14:35:24 | 2018-07-17T14:35:24 | 140,327,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # Simple number squaring function
def square(x):
return(x*x)
print(square(3))
# Function parameters
def multiply(x,y):
print("You called multiply(x,y) with the value x = " + str(x) + " and y = " + str(y))
print("x * y = " + str(x*y))
multiply(3,2)
| [
"tobycookni@gmail.com"
] | tobycookni@gmail.com |
4bcd800c07e4277b3973998a8b7011e197ab5888 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/UpdateTableThemeRequest.py | 4d6f11d6dc5e74a7089425fa39c49b429212d945 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,746 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class UpdateTableThemeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'UpdateTableTheme')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_ThemeId(self):
return self.get_query_params().get('ThemeId')
def set_ThemeId(self,ThemeId):
self.add_query_param('ThemeId',ThemeId)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
4d53d7f73ebb9720864f89da0c2327cfa136e2c2 | 54ddb3f38cd09ac25213a7eb8743376fe778fee8 | /topic_05_data_structure/practice/zip_1_common.py | 4236c882354be343b387f643ccd4d9be6d9b4296 | [] | no_license | ryndovaira/leveluppythonlevel1_300321 | dbfd4ee41485870097ee490f652751776ccbd7ab | 0877226e6fdb8945531775c42193a90ddb9c8a8b | refs/heads/master | 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | """
Функция zip_common.
Принимает 3 аргумента: список, строку и кортеж.
Возвращает список (list) с тройками значений из каждого аргумента.
ВНИМАНИЕ: для строки один элемент = один символ
(Порядок проверки именно такой:)
Если вместо list передано что-то другое, то возвращать строку 'First arg must be list!'.
Если вместо str передано что-то другое, то возвращать строку 'Second arg must be str!'.
Если вместо tuple передано что-то другое, то возвращать строку 'Third arg must be tuple!'.
Если list пуст, то возвращать строку 'Empty list!'.
Если str пуст, то возвращать строку 'Empty str!'.
Если tuple пуст, то возвращать строку 'Empty tuple!'.
Если list, str и tuple различного размера, обрезаем до минимального (стандартный zip).
"""
def zip_common(my_list, my_str, my_tuple):
if type(my_list) != list:
return 'First arg must be list!'
if type(my_str) != str:
return 'Second arg must be str!'
if type(my_tuple) != tuple:
return 'Third arg must be tuple!'
if len(my_list) == 0:
return 'Empty list!'
if len(my_str) == 0:
return 'Empty str!'
if len(my_tuple) == 0:
return 'Empty tuple!'
return list(zip(my_list, my_str, my_tuple))
| [
"ryndovaira@gmail.com"
] | ryndovaira@gmail.com |
ada60d2e8fc354bdf8b960331d4a2c3dd3495c84 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/shared/actions/__init__.py | dd15a0c04bdd65ad5d78e61ce0b7c7d736212659 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 9,956 | py | # 2017.08.29 21:49:32 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/actions/__init__.py
import BigWorld
from adisp import process
from debug_utils import LOG_DEBUG, LOG_ERROR
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.framework import ViewTypes
from gui.app_loader import g_appLoader
from gui.prb_control.settings import FUNCTIONAL_FLAG
from gui.shared import g_eventBus, EVENT_BUS_SCOPE
from gui.shared.actions.chains import ActionsChain
from gui.shared.events import LoginEventEx, GUICommonEvent
from helpers import dependency
from predefined_hosts import g_preDefinedHosts, getHostURL
from skeletons.connection_mgr import IConnectionManager
from skeletons.gui.lobby_context import ILobbyContext
from skeletons.gui.login_manager import ILoginManager
__all__ = ('LeavePrbModalEntity', 'DisconnectFromPeriphery', 'ConnectToPeriphery', 'PrbInvitesInit', 'ActionsChain')
class Action(object):
def __init__(self):
super(Action, self).__init__()
self._completed = False
self._running = False
def invoke(self):
pass
def isInstantaneous(self):
return True
def isRunning(self):
return self._running
def isCompleted(self):
return self._completed
CONNECT_TO_PERIPHERY_DELAY = 2.0
class LeavePrbModalEntity(Action):
def __init__(self):
super(LeavePrbModalEntity, self).__init__()
self._running = False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
dispatcher = g_prbLoader.getDispatcher()
if dispatcher:
state = dispatcher.getFunctionalState()
if state.hasModalEntity:
factory = dispatcher.getControlFactories().get(state.ctrlTypeID)
if factory:
ctx = factory.createLeaveCtx(flags=FUNCTIONAL_FLAG.SWITCH)
if ctx:
self._running = True
self.__doLeave(dispatcher, ctx)
else:
LOG_ERROR('Leave modal entity. Can not create leave ctx', state)
else:
LOG_ERROR('Leave modal entity. Factory is not found', state)
else:
LOG_DEBUG('Leave modal entity. Player has not prebattle')
self._completed = True
def isInstantaneous(self):
return False
@process
def __doLeave(self, dispatcher, ctx):
self._completed = yield dispatcher.leave(ctx)
if self._completed:
LOG_DEBUG('Leave modal entity. Player left prebattle.')
else:
LOG_DEBUG('Leave modal entity. Action was failed.')
self._running = False
class SelectPrb(Action):
def __init__(self, prbAction):
super(SelectPrb, self).__init__()
self._running = False
self._prbAction = prbAction
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
dispatcher = g_prbLoader.getDispatcher()
if dispatcher:
self._running = True
self.__doSelect(dispatcher)
def isInstantaneous(self):
return False
@process
def __doSelect(self, dispatcher):
self._completed = yield dispatcher.doSelectAction(self._prbAction)
if self._completed:
LOG_DEBUG('Select prebattle entity. Player has joined prebattle.')
else:
LOG_DEBUG('Select prebattle entity. Action was failed.')
self._running = False
class DisconnectFromPeriphery(Action):
connectionMgr = dependency.descriptor(IConnectionManager)
def __init__(self):
super(DisconnectFromPeriphery, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
self._running = True
g_appLoader.goToLoginByRQ()
def isRunning(self):
app = g_appLoader.getApp()
if app:
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
view = app.containerManager.getView(ViewTypes.DEFAULT)
if view and view.settings.alias == VIEW_ALIAS.LOGIN and view.isCreated() and self.connectionMgr.isDisconnected():
LOG_DEBUG('Disconnect action. Player came to login')
self._completed = True
self._running = False
return self._running
class ConnectToPeriphery(Action):
loginManager = dependency.descriptor(ILoginManager)
lobbyContext = dependency.descriptor(ILobbyContext)
connectionMgr = dependency.descriptor(IConnectionManager)
def __init__(self, peripheryID):
super(ConnectToPeriphery, self).__init__()
self.__host = g_preDefinedHosts.periphery(peripheryID)
self.__endTime = None
self.__credentials = self.lobbyContext.getCredentials()
return
def isInstantaneous(self):
return False
def isRunning(self):
if self.__endTime and self.__endTime <= BigWorld.time():
self.__endTime = None
self.__doConnect()
return super(ConnectToPeriphery, self).isRunning()
def invoke(self):
if self.__host and self.__credentials:
if len(self.__credentials) < 2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
login, token2 = self.__credentials
if not login or not token2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
self._running = True
self.__endTime = BigWorld.time() + CONNECT_TO_PERIPHERY_DELAY
Waiting.show('login')
else:
LOG_ERROR('Connect action. Login info is invalid')
self._completed = False
self._running = False
def __doConnect(self):
login, token2 = self.__credentials
self.__addHandlers()
self.loginManager.initiateRelogin(login, token2, getHostURL(self.__host, token2))
def __addHandlers(self):
g_eventBus.addListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
self.connectionMgr.onConnected += self.__onConnected
self.connectionMgr.onRejected += self.__onRejected
def __removeHandlers(self):
g_eventBus.removeListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
self.connectionMgr.onConnected -= self.__onConnected
self.connectionMgr.onRejected -= self.__onRejected
def __onConnected(self):
self.__removeHandlers()
self._completed = True
self._running = False
def __onRejected(self, status, responseData):
self.__removeHandlers()
self._completed = False
self._running = False
def __onLoginQueueClosed(self, _):
self.__removeHandlers()
self._completed = False
self._running = False
LOG_DEBUG('Connect action. Player exit from login queue')
class PrbInvitesInit(Action):
def __init__(self):
super(PrbInvitesInit, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
if invitesManager.isInited():
LOG_DEBUG('Invites init action. Invites init action. List of invites is build')
self._completed = True
else:
self._running = True
invitesManager.onInvitesListInited += self.__onInvitesListInited
else:
LOG_ERROR('Invites init action. Invites manager not found')
self._completed = False
def __onInvitesListInited(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
LOG_DEBUG('Invites init action. List of invites is build')
invitesManager.onInvitesListInited -= self.__onInvitesListInited
else:
LOG_ERROR('Invites manager not found')
self._completed = True
self._running = False
class WaitFlagActivation(Action):
def __init__(self):
super(WaitFlagActivation, self).__init__()
self._isActive = False
def activate(self):
LOG_DEBUG('Flag is activated')
self._isActive = True
def inactivate(self):
LOG_DEBUG('Flag is inactivated')
self._isActive = False
def invoke(self):
if not self._isActive:
self._running = True
else:
self._completed = True
def isRunning(self):
if self._isActive:
self._running = False
self._completed = True
return self._running
def isInstantaneous(self):
return False
class OnLobbyInitedAction(Action):
def __init__(self, onInited = None):
super(OnLobbyInitedAction, self).__init__()
self.__isLobbyInited = False
self.__onInited = onInited
g_eventBus.addListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
def invoke(self):
self._running = True
self._completed = False
if self.__isLobbyInited:
onInited = self.__onInited
if onInited and callable(onInited):
onInited()
self._completed = True
self._running = False
def __onLobbyInited(self, _):
self.__isLobbyInited = True
g_eventBus.removeListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
self.invoke()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\actions\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:49:33 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
6856e89aa1d898a889e5af7dae23b5576017b49c | 292cec77b5003a2f80360d0aee77556d12d990f7 | /typings/filetype/types/video.pyi | 9b61193428fe1cf6d3aee36815c69fc32f0d96e0 | [
"Apache-2.0"
] | permissive | yubozhao/BentoML | 194a6ec804cc1c6dbe7930c49948b6707cbc3c5f | d4bb5cbb90f9a8ad162a417103433b9c33b39c84 | refs/heads/master | 2022-12-17T00:18:55.555897 | 2022-12-06T00:11:39 | 2022-12-06T00:11:39 | 178,978,385 | 3 | 0 | Apache-2.0 | 2020-12-01T18:17:15 | 2019-04-02T01:53:53 | Python | UTF-8 | Python | false | false | 2,296 | pyi | """
This type stub file was generated by pyright.
"""
from .base import Type
from .isobmff import IsoBmff
class Mp4(IsoBmff):
"""
Implements the MP4 video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> bool:
...
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Mov(IsoBmff):
"""
Implements the MOV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class M3gp(Type):
"""Implements the 3gp image type matcher."""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
| [
"noreply@github.com"
] | yubozhao.noreply@github.com |
644451627c369073460da56c0eb11866fb938a76 | 5e976a7323cce96fd840296841a5e87267451a55 | /blog/urls.py | f587cf49eb0a7e7fa55e7b1bf1a0cdcf113bbe26 | [
"Apache-2.0"
] | permissive | LeonardA-L/plog | a3f39d4edd0c98de50ca7e90eaa54b8e64ad2079 | 2cb479f01b7e8ad117669e57b5d3d612d330b9dd | refs/heads/master | 2021-01-10T20:19:02.690896 | 2015-02-04T21:42:43 | 2015-02-04T21:42:43 | 30,320,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | from django.conf.urls import patterns, url, include
from rest_framework.urlpatterns import format_suffix_patterns
from django.conf import settings
from django.conf.urls.static import static
from blog import views, apiviews
from blog.apimodels import *
urlpatterns = patterns('',
# Regular views
url(r'^$', views.index, name='index'), # Index list
url(r'^(?P<start>\d+)/(?P<end>\d+)/$', views.index, name='indexp'), # Index list with specific start:end page params
url(r'^(?P<article_id>\d+)/$', views.detail, name='detail'), # Details on an article (full content, comments)
# Admin views
url(r'^admin/$', views.admin, name='admin'), # Admin index page
url(r'^admin/add$', views.addPost, name='addPost'), # Calls editPost (see below) with no parameter
url(r'^admin/(?P<article_id>\d+)/$', views.editPost, name='editPost'), # Edit or add a blog entry
url(r'^admin/savePost$', views.savePost, name='savePost'), # Adds or modifies article then returns admin index page
# REST framework routes (all objects are returned with a JSON format)
url(r'^api/articles/(?P<id>[0-9]+)$', apiviews.article_detail), # GETs a specific article, POST deletes article. Needs article id
url(r'^api/comment/(?P<id>[0-9]+)$', apiviews.comment), # GETs an article's comments. Needs article id
url(r'^api/articles/$', apiviews.articles), # GETs all articles
url(r'^api/comment/$', apiviews.addComment), # POSTs a new comment
url(r'^api/comment/delete$', apiviews.removeComment), # POST : deletes a comment
# Note : this API is UNSAFE (yet) because it cannot check if the AJAX request you made was made by an administrator (yet).
)
#urlpatterns = format_suffix_patterns(urlpatterns)+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"leonard.allain-launay@insa-lyon.fr"
] | leonard.allain-launay@insa-lyon.fr |
69b0ea7ad07844c44820ad08019db22d1690c758 | 5c65907ee3324da055694dbe0520fae2d9b2bec4 | /trainer/utils.py | 2c4f84bcbb76c2c5d636adaf648d29766fe8b3fe | [] | no_license | imdaredevil/MNIST-using-google-ai | d1f8d784da8252e4da9510bac53eae0f35de8830 | 4fe01af26b64256e332bdfac4bc9acfa8f36d487 | refs/heads/master | 2023-06-01T01:57:08.329471 | 2021-06-28T04:19:31 | 2021-06-28T04:19:31 | 380,897,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | import tensorflow as tf
import pandas as pd
import numpy as np
import json
def prepare_test_data(inputFile, limit = 10):
df = pd.read_csv(tf.io.gfile.GFile(inputFile))
print(df.describe())
x = df.to_numpy()
x = x / 255.0
x = x.reshape((-1, 28, 28,1))
return x
def prepare_train_data(inputFile):
df = pd.read_csv(tf.io.gfile.GFile(inputFile))
print(df.describe())
xpd = df.iloc[:,1:]
ypd = df['label']
x = xpd.to_numpy()
y = ypd.to_numpy()
x = x / 255.0
x = x.reshape((-1, 28, 28,1))
return x,y
def convert_to_list(x, limit=None):
xlist = x.tolist()
if limit is not None:
xlist = xlist[0:limit]
return xlist
def write_json_for_submission(jsonFilePath,xlist):
jsonFile = open(jsonFilePath, 'w')
for x in xlist:
jsonx = json.dumps(x)
jsonFile.write(jsonx)
jsonFile.write('\n')
jsonFile.close()
| [
"cibi_16@live.com"
] | cibi_16@live.com |
b4d70c1ad20209dc32383f63c6d5ebcd54a1695f | 2e29c4362e0a6fc6be76527b9693af5c78d20bc6 | /store/views.py | d7892a6d0cabca0ac86896c808446c15b55d2bb6 | [] | no_license | canhazn/django-ecommerce | 707d6abe4c955400f0b1f5b01630381b1f87f313 | c4fb4b3f7ddee36431e35c4e3f0d123a98d230f0 | refs/heads/master | 2023-07-28T20:29:45.146920 | 2020-05-06T08:52:09 | 2020-05-06T08:52:09 | 260,632,085 | 0 | 0 | null | 2021-09-22T18:58:07 | 2020-05-02T06:57:58 | Python | UTF-8 | Python | false | false | 255 | py | from django.shortcuts import render
def storePage(request):
return render(request, 'store/store.html')
def cartPage(request):
return render(request, 'store/cart.html')
def checkoutPage(request):
return render(request, 'store/checkout.html') | [
"canhazn@gmail.com"
] | canhazn@gmail.com |
d1cfecadff3d8959228ce2833f10393316775eaf | fa03042c6bc640f761c3074f352613ddcfaf032b | /ROS_MotionPlanning_ObstacleDetection/build/navigation-indigo-devel/move_base/catkin_generated/pkg.develspace.context.pc.py | 7d212e21c71eaef5195c482414f82d5857689a02 | [] | no_license | w4k7pwr/master_thesis | 3978440ae33f28b56a4e45911ef274b3a23be4d0 | 40a13aac67c9a4451e316606ba1155b036dbe5af | refs/heads/master | 2020-04-06T15:00:50.399265 | 2016-11-20T12:51:21 | 2016-11-20T12:51:21 | 52,356,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/wasiel13/wasiel_13/SYNERGIA_PROJECTS/master_thesis/ROS_MotionPlanning_ObstacleDetection/devel/include".split(';') if "/home/wasiel13/wasiel_13/SYNERGIA_PROJECTS/master_thesis/ROS_MotionPlanning_ObstacleDetection/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "move_base"
PROJECT_SPACE_DIR = "/home/wasiel13/wasiel_13/SYNERGIA_PROJECTS/master_thesis/ROS_MotionPlanning_ObstacleDetection/devel"
PROJECT_VERSION = "1.12.13"
| [
"mateusz.wasielewski13@gmail.com"
] | mateusz.wasielewski13@gmail.com |
9b750390731edd5a1a683067240907563877df45 | 7a66ff970580297ba50b0d4bdd0406352071c05a | /Pyscience/3. numpy.py | 5662327a3911c27438e44e19446518f84358e67d | [] | no_license | zero-big/Python-Basic | 1ab3da9d09983d937b410ca9ec1741424ebaa3ae | 5cd2eaa822aedb46a79283a6007b900a3c9665c8 | refs/heads/master | 2023-08-03T13:10:22.556732 | 2021-09-24T11:35:50 | 2021-09-24T11:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | import numpy as np
# 1. 배열 만들기 : array
b = np.array([2, 4, 6, 8])
print(b) # [2 4 6 8]
# ndim : 랭크를 반환
print(b.ndim) # 1
# size : 배열에 있는 값의 총 개수 반환
print(b.size) # 4
# shape : 각 랭크에 있는 값의 개수 반환
print(b.shape) # (4,)
a = np.arange(10)
print(a) # [0 1 2 3 4 5 6 7 8 9]
print(a.ndim) # 1
print(a.shape) # (10,)
print(a.size) # 10
a = np.arange(7, 11)
print(a) # [ 7 8 9 10]
f = np.arange(2.0, 9.8, 0.3)
print(f)
# [2. 2.3 2.6 2.9 3.2 3.5 3.8 4.1 4.4 4.7 5. 5.3 5.6 5.9 6.2 6.5 6.8 7.1
# 7.4 7.7 8. 8.3 8.6 8.9 9.2 9.5 9.8]
g = np.arange(10, 4, -1.5, dtype=np.float)
print(g) # [10. 8.5 7. 5.5]
a = np.zeros((3,))
print(a) # [0. 0. 0.]
print(a.ndim) # 1
print(a.shape) # (3,)
print(a.size) # 3
b = np.zeros((2, 4))
print(b)
# [[0. 0. 0. 0.]
# [0. 0. 0. 0.]]
print(b.ndim) # 2
print(b.shape) # (2, 4)
print(b.size) # 8
k = np.ones((3, 5))
print(k)
# [[1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]]
m = np.random.random((3, 5))
print(m)
# [[0.92144665 0.79460743 0.98429623 0.5172086 0.0727177 ]
# [0.3467992 0.07082806 0.06713763 0.92576145 0.37867405]
# [0.57972622 0.02252859 0.66872603 0.70532502 0.7316084 ]]
a = np.arange(10)
a = a.reshape(2, 5)
print(a)
# [[0 1 2 3 4]
# [5 6 7 8 9]]
print(a.ndim) # 2
print(a.shape) # (2, 5)
print(a.size) # 10
a = a.reshape(5, 2)
print(a)
# [[0 1]
# [2 3]
# [4 5]
# [6 7]
# [8 9]]
print(a.ndim) # 2
print(a.shape) # (5, 2)
print(a.size) # 10
a.shape = (2, 5)
print(a)
# 배열 연산
from numpy import *
a = arange(4)
a *= 3
print(a) # [0 3 6 9]
plain_list = list(range(4))
print(plain_list) # [0, 1, 2, 3]
plain_list = [num*3 for num in plain_list]
print(plain_list) # [0, 3, 6, 9]
a = zeros((2, 5)) + 17.0
print(a)
# [[17. 17. 17. 17. 17.]
# [17. 17. 17. 17. 17.]]
# @ : 행렬 곱
a = np.array([[1,2], [3,4]])
b = a @ a
print(b)
# [[ 7 10]
# [15 22]]
# 선형 대수
# 4x + 5y = 20
# x + 2y = 13
coefficients = np.array([ [4,5], [1,2]])
dependents = np.array([20, 13])
answer = np.linalg.solve(coefficients, dependents)
print(answer)
# [-8.33333333 10.66666667]
print(4 * answer[0] + 5 * answer[1] ) # 20.0
print(1 * answer[0] + 2 * answer[1] ) # 13.0
product = np.dot(coefficients, answer)
print(product) # [20. 13.]
print(np.allclose(product, dependents)) # True
| [
"bosl95@naver.com"
] | bosl95@naver.com |
ce16ea0f63285b0d96c72d5799a266b64a31293c | 46a493dbbe8314c3ae4a6b844841901e461cc11f | /astree/StatementIf.py | bc16a17ec9563890ae997cfab2fbceec304cadcc | [
"MIT"
] | permissive | lypnol/impy | 6f859d06ab41889709854b4426268caf758e1764 | 56bab70e1b0925015fe2a1cc9b1a6cf9ad4c4662 | refs/heads/master | 2021-05-08T15:31:01.315742 | 2018-02-15T17:33:50 | 2018-02-15T17:33:50 | 120,116,221 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from astree.Tree import Tree
# If Statement
class StatementIf(Tree):
def __init__(self, exp, statement_t, statement_f, label=None):
Tree.__init__(self, label=label)
self.exp = exp # ExpBool obj
self.statement_t = statement_t # statement to execute if expression is true SequenceStatement obj
self.statement_f = statement_f # statement to execute if expression is false (optional) SequenceStatement obj
def __str__(self, level=0, last=True):
ret = Tree.__str__(self, level, last)+"\n"
ret += self.exp.__str__(level+1, last=False) + "\n"
ret += self.statement_t.__str__(level+1, last=(self.statement_f is None))
if self.statement_f:
ret += "\n"+self.statement_f.__str__(level+1)
return ret
def eval(self, state, catch_vars=None, include_assign=False):
if catch_vars is not None:
self.exp.eval(state, catch_vars, include_assign)
self.statement_t.eval(state, catch_vars, include_assign)
if self.statement_f:
self.statement_f.eval(state, catch_vars, include_assign)
return
if self.exp.eval(state):
self.statement_t.eval(state)
elif self.statement_f:
self.statement_f.eval(state)
| [
"ayoub.sbai@student.ecp.fr"
] | ayoub.sbai@student.ecp.fr |
f67f887d91508d2322144d4718fa2951436bb505 | 6fed94009fab740df921a2dc1dff0aab02493b98 | /Telecom-Churn-Prediction-with-Boosting/code.py | 49664515c72c32da4f9f29cc2974d7dd12e4e84c | [
"MIT"
] | permissive | nicsquality/ga-learner-dsmp-repo | 340ed2524f4ce4e05287f349a84704ac2b2c540b | eb7bd8fae2525dd00d2caae4b87cd2f6e165c148 | refs/heads/master | 2020-07-29T23:05:54.395906 | 2020-05-24T19:08:57 | 2020-05-24T19:08:57 | 209,995,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,356 | py | # --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
X = df.iloc[:, 1:-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges'] = X_train['TotalCharges'].replace(' ', np.NaN)
X_test['TotalCharges'] = X_test['TotalCharges'].replace(' ', np.NaN)
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
X_train['TotalCharges'] = X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())
X_test['TotalCharges'] = X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
X_test[x] = le.transform(X_test[x])
y_train = y_train.replace({'No':0, 'Yes':1})
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
ada_model = AdaBoostClassifier(random_state = 0)
ada_model.fit(X_train, y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test, y_pred)
ada_cm = confusion_matrix(y_test, y_pred)
ada_cr = classification_report(y_test, y_pred)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state = 0)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test, y_pred)
xgb_cm = confusion_matrix(y_test, y_pred)
xgb_cr = classification_report(y_test, y_pred)
clf_model = GridSearchCV(estimator=xgb_model, param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test, y_pred)
clf_cm = confusion_matrix(y_test, y_pred)
clf_cr = classification_report(y_test, y_pred)
| [
"nicsquality@users.noreply.github.com"
] | nicsquality@users.noreply.github.com |
3032881b56e2bac9d0f446a87e7cc7b381bc2740 | 267d85f7f93bb24b35c162a34235af1fca6ba31a | /python/estimate_parm.py | c8507fb421513d1de0603d13bd3642075e701b25 | [] | no_license | augustecolle/rena_bike | 894df9a392b4a8ec9379ee4493fe3194fcb9d224 | 187985956350085443806f9864533ecf389092f9 | refs/heads/master | 2021-01-11T17:30:26.106790 | 2017-06-23T06:28:53 | 2017-06-23T06:28:53 | 79,792,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,968 | py | import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('/home/auguste/eBike/Auguste/python/')
from vector import *
import MySQLdb
import gmplot
import scipy.optimize
import pylab as pl
import par_est_cam as cc
np.seterr(divide='ignore', invalid='ignore')
#prediction database:
#index ID traject_ID latitude longitude heading height slope
class Traject:
def __init__(self, data):
'''A Traject is built of segments. Data contains a list of dictionaries with keys latitude, longitude, heading, height and slope'''
self.segments = []
self.num_segments = None
self.currentS = 0
self.dist_nextNode = None
self.dist_currentS = None
self.dist_nextS = None
self.weight = None
for i in (range(len(data) - 1)):
p0 = data[i]
p1 = data[i + 1]
segment = Segment(p0, p1)
self.segments.append(segment)
self.num_segments = len(self.segments)
def addMeasurements(cls, measurements):
'''measurements is a list of Measurement class objects'''
for obj in measurements:
cls.addMeasurement(obj)
return 1
def addMeasurement(cls, measurement):
#check if we are not on the next segment
cls.pnt2segment(measurement)
cls.segments[cls.currentS].addMeasurement(measurement)
return 1
def pnt2segment(cls, measurement):
lat = measurement.lat
lng = measurement.lng
distcS = cls.dist2segment((lat, lng), cls.segments[cls.currentS])
#check if we are not on the last segment
if (cls.currentS + 1 < cls.num_segments):
distnS = cls.dist2segment((lat, lng), cls.segments[cls.currentS + 1])
newdist_nextNode = cls.dist2nextnode((lat,lng))
if (distnS < distcS and newdist_nextNode > cls.dist_nextNode):
cls.currentS = cls.currentS + 1
cls.dist_nextNode = newdist_nextNode
else: #what if we reached the last segment?
pass
def dist2nextnode(cls, pnt):
lat0 = pnt[0]
lng0 = pnt[1]
lat1 = cls.segments[cls.currentS].p1[0]
lng1 = cls.segments[cls.currentS].p1[1]
return np.sqrt((lat0 - lat1)**2 + (lng0 - lng1)**2)
def dist2segment(cls, pnt, segment):
start = segment.p0
end = segment.p1
line_vec = vector(start, end)
pnt_vec = vector(start, pnt)
line_len = length(line_vec)
line_unitvec = unit(line_vec)
pnt_vec_scaled = scale(pnt_vec, 1.0/line_len)
t = dot(line_unitvec, pnt_vec_scaled)
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
nearest = scale(line_vec, t)
dist = distance(nearest, pnt_vec)
nearest = add(nearest, start)
#plt.scatter(*nearest)
return (dist, nearest)
@classmethod
def fakeTraject(cls, n, x=5, y=5):
'''make n-1 fake segments in the range [0,x), [0,y)'''
rand_points = [(None, None)]*n
for i in range(n):
rand_points[i] = (np.random.random()*x, np.random.random()*y)
#sorted_points = sorted(rand_points, key = lambda i: i[0])
traject = cls(rand_points)
return traject
def fakeMeasurements(cls, n, error_gain=1):
time = 0
for segment in cls.segments:
rico = (segment.p1[1]-segment.p0[1])/(segment.p1[0]-segment.p0[0])
if (segment.p0[0] <= segment.p1[0]):
xn = segment.p0[0] + np.random.rand(n)*abs(segment.p0[0] - segment.p1[0])
xp = [segment.p0[0], segment.p1[0]]
fp = [segment.p0[1], segment.p1[1]]
xn = sorted(xn)
elif (segment.p0[0] > segment.p1[0]):
xn = segment.p0[0] - np.random.rand(n)*abs(segment.p0[0] - segment.p1[0])
xp = [segment.p1[0], segment.p0[0]]
fp = [segment.p1[1], segment.p0[1]]
xn = sorted(xn, reverse=True)
else:
print("This should never be displayed, debug fakeMeasurements function")
y = np.interp(xn, xp, fp)
ry = [i + np.random.normal(loc=0.0, scale=0.2)*error_gain for i in y]
for (xn, ry) in zip(xn, ry):
time = time+1
measurement = Measurement(time, xn, ry)
cls.addMeasurement(measurement)
return 1
def addWeight(cls, weight):
cls.weight = weight
return cls.weight
def plotFakeTraject(cls):
for x in cls.segments:
plt.plot(*zip(*x), c=x.color)
def plotTraject(cls):
for x in cls.segments:
plt.plot(*zip(x.p0, x.p1), c=x.color)
def plotMeasurements(cls):
for x in cls.segments:
x.plot()
def plotMeasurementsOnMap(cls):
lats = []
longs = []
gmap = gmplot.GoogleMapPlotter(51.06, 3.71, 16)
#gmap.plot(lats, longs, 'cornflowerblue', edge_width=10)
for x in cls.segments:
for y in x.measurements:
lats.append(y.lat)
longs.append(y.lng)
gmap.scatter(lats, longs, '#3B0B39', size=1, marker=False)
#gmap.scatter(marker_lats, marker_lngs, 'k', marker=True)
gmap.heatmap(lats, longs)
gmap.draw("measurements.html")
print("DONE")
def plot_vc(cls):
'''plot velocity of cyclist over the traject'''
for seg in cls.segments:
seg.plot_vc()
def plot_power(cls):
'''plot velocity of cyclist over the traject'''
for seg in cls.segments:
seg.plot_power()
def plot_va(cls):
'''plot velocity of cyclist over the traject'''
for seg in cls.segments:
seg.plot_va()
def plotTrajectOnMap(cls, name="traject.html"):
lats = []
longs = []
for seg in cls.segments:
lats.append(seg.p0[0])
longs.append(seg.p0[1])
gmap = gmplot.GoogleMapPlotter(51.06, 3.71, 16)
#gmap.plot(lats, longs, 'cornflowerblue', edge_width=10)
gmap.scatter(lats, longs, '#3B0B39', size=1, marker=False)
#gmap.scatter(marker_lats, marker_lngs, 'k', marker=True)
gmap.heatmap(lats, longs)
gmap.draw(name)
print("DONE")
class Segment:
def __init__(self, pr0, pr1):
'''pr is a prediction, a dictionary containing lat, lng, heading, height and slope'''
self.p0 = (pr0['latitude'], pr0['longitude'])
self.p1 = (pr1['latitude'], pr1['longitude'])
self.slope = pr0['slope']
self.height0 = pr0['height']
self.height1 = pr1['height']
self.heading = pr0['heading']
self.color = np.random.rand(3,1)
self.measurements = []
self.rho = self.getRho()
def getRho(cls):
'''using en.wikipedia.org/wiki/Density_of_air'''
M = 0.0289644 #kg/mol
R = 8.31447 #J/(mol K)
L = 0.0065 #K/m
g = 9.80665 #m/s2
T_0 = 288.15 #K
p_0 = 101325 #Pa
height = (cls.height0 + cls.height1)/2.0
p = p_0*(1 - L*height/T_0)**(g*M/(R*L))
cls.rho = p*M/(R*(T_0 - L*height))
return cls.rho
def addMeasurement(cls, measurement):
cls.measurements.append(measurement)
def plot(cls):
for x in cls.measurements:
plt.scatter(x.lat, x.lng, c=cls.color, s=200)
def plot_vc(cls):
'''plot velocity of cyclist on segment'''
for x in cls.measurements:
plt.scatter(x.time, x.speed, c=cls.color, s=50)
def plot_power(cls):
for x in cls.measurements:
plt.scatter(x.time, x.amps*x.volts, c=cls.color, s=50)
def plot_va(cls):
'''plot velocity of cyclist on segment'''
for x in cls.measurements:
plt.scatter(x.time, x.windspeed, c=cls.color, s=200)
def plot_heading(cls):
for x in cls.measurements:
plt.scatter(x.time, x.heading, c=cls.color, s=200)
def __str__(self):
return str(self.p0) + ", " + str(self.p1)
def __iter__(self):
return iter([self.p0, self.p1])
def __getitem__(self, value):
if value:
return self.p1
elif not value:
return self.p0
else:
return "index error"
class Measurement:
def __init__(self, time, lat, lng, alt=None, posacc=None, altacc=None, speed=None, heading=None, amps=None, volts=None, windspeed=None, windheading=None, ci=None, weight=None):
self.time = time
self.lat = lat
self.lng = lng
self.alt = alt
self.posacc = posacc
self.altacc = altacc
self.speed = speed
self.heading = heading
self.amps = amps
self.volts = volts
self.windspeed = windspeed
self.windheading = windheading
self.prvwsigned = self.getProjectedvw()
self.ci = ci #Clearness Index
self.weight = weight
def getProjectedvw(cls):
alpha = (90 - (cls.windheading - 180))*np.pi/180.0
beta = (450 - cls.heading)*np.pi/180.0
if (cls.speed == 0):
cls.speed = 1e-3
v_wind = cls.windspeed #already in m/s
v_w = np.array([v_wind*np.cos(alpha), v_wind*np.sin(alpha)])
v_f = np.array([cls.speed*np.cos(beta), cls.speed*np.sin(beta)])
v_weq = v_w - v_f
v_weq_mag = np.sqrt(v_weq[0]**2 + v_weq[1]**2)
cls.sqprvwsigned = (v_weq_mag**2*np.cos(np.arccos(np.clip(np.dot(v_f/np.linalg.norm(v_f), v_weq/np.linalg.norm(v_weq)), -1.0, 1.0)))) #projected windspeed
if (np.isnan(cls.sqprvwsigned)):
print('NAN')
#print(v_f)
#print(v_w)
return cls.sqprvwsigned
def __str__(self):
return str(self.time) + ": " + str(self.lat) + ", " + str(self.lng)
def test(n, num, error):
tra = Traject.fakeTraject(n)
tra.fakeMeasurements(num, error)
tra.plotMeasurements()
tra.plotTraject()
plt.show()
class DB:
def __init__(self):
#10.128.16.12
#"192.168.0.197"
#192.168.0.200
self.db = MySQLdb.connect(host="10.108.32.18",port=3306,user="auguste",passwd="renasolutions",db="eBike")
self.cursor = self.db.cursor()
self.headerm = []
self.headerp = []
self.measurements = []
self.tindex = None
self.latindex = None
self.lngindex = None
self.altindex = None
self.posaccindex = None
self.altaccindex = None
self.vindex = None
self.hindex = None
self.bcindex = None
self.bvindex = None
self.wvindex = None
self.whindex = None
self.ciindex = None
self.weight = 75
def getWeight(cls, ID):
tablename = 'user_settings'
cls.cursor.execute("SELECT weight FROM "+str(tablename)+" WHERE ID="+str(ID))
cls.weight = cls.cursor.fetchall()[0][0]
#print(cls.weight)
return cls.weight
def getHeaderM(cls):
'''get header of measurement table'''
cls.headerm = []
tablename = 'measurements'
cls.cursor.execute("SHOW COLUMNS FROM "+str(tablename))
headerm = cls.cursor.fetchall()
for x in headerm:
cls.headerm.append(x[0])
cls.tindex = cls.headerm.index('timestamp')
cls.latindex = cls.headerm.index('gps_lat')
cls.lngindex = cls.headerm.index('gps_lng')
cls.altindex = cls.headerm.index('gps_alt')
cls.posaccindex = cls.headerm.index('gps_pos_acc')
cls.altaccindex = cls.headerm.index('gps_alt_acc')
cls.vindex = cls.headerm.index('gps_speed')
cls.hindex = cls.headerm.index('gps_heading')
cls.bcindex = cls.headerm.index('battery_current')
cls.bvindex = cls.headerm.index('battery_voltage')
cls.wvindex = cls.headerm.index('wind_speed')
cls.whindex = cls.headerm.index('wind_heading')
cls.ciindex = cls.headerm.index('clearness_index')
return cls.headerm
def getHeaderP(cls):
'''get header of predictions table'''
cls.headerp = []
tablename = 'predictions'
cls.cursor.execute("SHOW COLUMNS FROM "+str(tablename))
headerp = cls.cursor.fetchall()
for x in headerp:
cls.headerp.append(x[0])
cls.latindexp = cls.headerp.index('latitude')
cls.lngindexp = cls.headerp.index('longitude')
cls.headingindex = cls.headerp.index('heading')
cls.heightindex = cls.headerp.index('height')
cls.slopeindex = cls.headerp.index('slope')
return cls.headerp
def getMeasurements(cls, ID=None, traject_ID=None, traject_range = None):
if (ID):
cls.getWeight(ID)
if (ID == None and traject_ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM measurements")
elif (traject_ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM measurements WHERE ID LIKE '"+str(ID)+"'")
elif (ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM measurements WHERE traject_ID LIKE '"+str(traject_ID)+"'")
elif (traject_range != None and ID != None):
cls.cursor.execute("SELECT * FROM measurements WHERE traject_ID > '"+str(traject_range[0])+"' AND traject_ID < '"+str(traject_range[1])+"' AND ID LIKE '"+str(ID)+"'")
elif (traject_range == None and traject_ID == None):
cls.cursor.execute("SELECT * FROM measurements WHERE ID LIKE '"+str(ID)+"'")
else:
cls.cursor.execute("SELECT * FROM measurements WHERE traject_ID LIKE '"+str(traject_ID)+"' AND ID LIKE '"+str(ID)+"'")
measurements = cls.cursor.fetchall()
for x in measurements:
cls.measurements.append(Measurement(x[cls.tindex], x[cls.latindex], x[cls.lngindex], x[cls.altindex], x[cls.posaccindex], x[cls.altaccindex], x[cls.vindex], x[cls.hindex], x[cls.bcindex], x[cls.bvindex], x[cls.wvindex], x[cls.whindex], x[cls.ciindex], cls.weight))
return cls.measurements
def getTraject(cls, ID=None, traject_ID=None, traject_range=None):
if (ID):
cls.getWeight(ID)
if (ID == None and traject_ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM predictions")
elif (traject_ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM predictions WHERE ID LIKE '"+str(ID)+"'")
elif (ID == None and traject_range == None):
cls.cursor.execute("SELECT * FROM predictions WHERE traject_ID LIKE '"+str(traject_ID)+"'")
cls.cursor.execute("SELECT * FROM predictions WHERE ID LIKE '"+str(ID)+"'")
elif (traject_range != None and ID != None):
cls.cursor.execute("SELECT * FROM predictions WHERE traject_ID > '"+str(traject_range[0])+"' AND traject_ID < '"+str(traject_range[1])+"' AND ID LIKE '"+str(ID)+"'")
elif (traject_range == None and traject_ID == None):
cls.cursor.execute("SELECT * FROM predictions WHERE ID LIKE '"+str(ID)+"'")
else:
cls.cursor.execute("SELECT * FROM predictions WHERE traject_ID LIKE '"+str(traject_ID)+"' AND ID LIKE '"+str(ID)+"'")
db_out = cls.cursor.fetchall()
predictions = []
for x in db_out:
dict = {}
for (key, value) in zip(cls.headerp[3:], x[3:]):
dict[key] = value
predictions.append(dict)
return predictions
def plot(cls, name):
if name in cls.headerm:
cursor.execute("SELECT "+str(name)+" FROM measurements")
res = cursor.fetchall()
plt.plot(res)
return 1
return -1
def main():
#newride start tID 34 -- 48 -- 53
import imp
imp.reload(cc)
ID = 31
tID = None
db1 = DB()
db1.getWeight(ID)
header = db1.getHeaderM()
measurements = db1.getMeasurements(ID, traject_ID = 1)#, traject_range = [47, 100])
headerP = db1.getHeaderP()
predictions = db1.getTraject(ID, traject_ID = 1)#,traject_range = [47, 100])
tra = Traject(predictions)
tra.addMeasurements(measurements)
#tra.plotTraject()
#tra.plotMeasurements()
#tra.plotTrajectOnMap()
#tra.plotMeasurementsOnMap()
#tra.plot_power()
#tra.plot_vc()
#plt.show()
segments = cc.getData(tra)
print(len(segments))
x0 = [0.6,0.004,2.2]
sigma = 1 # if this is very small, strong fitting <-> weaker priors. Very large weaker fitting <-> stronger priors
#res = scipy.optimize.fmin(cc.errorf, x0, args=(segments, sigma))
res = scipy.optimize.minimize(cc.errorf,x0,args=(segments,sigma), method = 'Nelder-Mead')
print("optimal parameters, loss function = {:.6e} ".format(cc.errorf(res.x,segments,sigma)))
print("succes : {:}".format(res.success))
print("------------------- ")
print("| CdA : {:.3f} ".format(res.x[0]))
print("| Cr : {:.3f} ".format(res.x[1]))
print("| Pcyc : {:.3f} ".format(res.x[2]))
pm_guessf = np.concatenate(cc.estimatePower(res.x,segments))
pm_measuredf = np.concatenate([ s[2] for s in segments ])
fig =pl.figure()
fig.subplots_adjust(left=0.16)
ax = fig.add_subplot(111)
#ax.plot([y.speed for x in tra.segments for y in x.measurements],lw=3,ls="dashed",label="speed")
ax.plot(pm_guessf,marker='s',color="firebrick",lw=3,ls="dashed",label="fit")
ax.plot(pm_measuredf,marker='o',color="black",lw=3,ls="solid",label="measured")
ax.legend(frameon=False,fontsize=20)
ax.set_ylabel("Power (W)",fontsize=20)
plt.show()
if __name__=="__main__":
main()
| [
"auguste.colle@hotmail.com"
] | auguste.colle@hotmail.com |
b9846c6dc970fd12ea04b9cdfa3264dbd254b5bc | 12662aff12b6651a3cc046950e5ea57f60dd0a09 | /3. Strings/string_formatting.py | d6ef1dc233219bf90be6f10d075705314d9ec015 | [] | no_license | basakmugdha/HackerRank-Python-Practice | 3c563d68e002c1b04dc59594f3c84070babf443a | fa41f263eb310755a1da1c9d6f2f74dc9e0329b5 | refs/heads/master | 2023-06-18T05:07:58.563967 | 2021-07-11T16:29:34 | 2021-07-11T16:29:34 | 354,798,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | def print_formatted(number):
# your code goes here
for i in range (1,number+1):
width = len(str(bin(number))[2:])
print(str(i).rjust(width)+" "+str(oct(i))[2:].rjust(width)+" "+str(hex(i))[2:].upper().rjust(width)+" "+str(bin(i))[2:].rjust(width))
if __name__ == '__main__':
n = int(input())
print_formatted(n) | [
"51905437+basakmugdha@users.noreply.github.com"
] | 51905437+basakmugdha@users.noreply.github.com |
f177bf9c6d3555c8ced417b4037228e6a141d71f | c2462f27ef6a892a51178ddc210337b97b05987c | /crab/run_303272/dataset1/crab3_20170919_303272_20.py | b7d6606fc4bfbcd5e1278c41ced7bf43a51340ca | [] | no_license | nicolastonon/NoiseBiasScans | 5afe7a539621e9037005d62888e5072b62e8dd36 | 3700d794afeccb56034bebcc5d09a638bb503512 | refs/heads/master | 2021-09-19T20:01:03.168513 | 2018-07-31T12:21:31 | 2018-07-31T12:21:31 | 114,871,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'NoiseScan_20170919_run303272_v1_20'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'computeNoiseFromRaw_cfg_20.py'
config.JobType.outputFiles = ['SiStripCommissioningSource.root']
config.JobType.scriptExe = 'scriptExe.sh'
config.JobType.maxMemoryMB = 3000
config.Data.inputDataset = '/VRRandom1/Run2017D-v1/RAW' #second dataset
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 500 #Change to optimize file size
#config.Data.lumiMask = 'NoiseScan_20170919_run303272_JSON.txt'
config.Data.runRange = '303272,303314'
config.Data.outLFNDirBase = '/store/group/dpg_tracker_strip/comm_tracker/Strip/RadMonitoring/NoiseBiasScan/2017/dataset1'
config.Data.publication = False
#config.Data.outputDatasetTag = ''
#Else, runs where the data are located (may be busy)
config.Data.ignoreLocality = True
#config.Site.storageSite = 'T2_FR_IPHC'
config.Site.storageSite = 'T2_CH_CERN'
| [
"nicolas.tonon@etu.unistra.fr"
] | nicolas.tonon@etu.unistra.fr |
7e281015a5822579ea0f212ca415ef4499793092 | 0c3b09e5332d910e1825934aaccec1f6b059359b | /venv/bin/cftp | d903f8c4156d94f8b0cae4e1a31ae1295a77426e | [] | no_license | pbtrad/chat | eee6e28f00ee2add5014a93f4f9a0e3444348612 | 1e3f7eb32349640d31242d0be125e92cb67df37b | refs/heads/master | 2023-06-03T15:49:58.727006 | 2021-06-18T15:38:35 | 2021-06-18T15:38:35 | 377,942,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/home/paul123/Desktop/chat/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.cftp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"paulbrowne2@gmail.com"
] | paulbrowne2@gmail.com | |
46836b5e642baa2fdee605ceb0fe003a38d38c0f | 72a4db202e6ff0707b89ffadda39b17a0625f602 | /books/urls.py | c35cf6384295a224346e55d4dafa0187d43f89af | [] | no_license | maciejurm/zyjswiadomieeuv3 | b3ba94b3b9c932a5a81ba6616dae73af6fd6bd97 | fcac60c8f1adcbb89db5a16d681392c9f0bd05ef | refs/heads/master | 2022-12-10T17:47:17.697052 | 2019-01-19T18:06:32 | 2019-01-19T18:06:32 | 165,443,426 | 0 | 0 | null | 2022-12-08T01:32:54 | 2019-01-12T22:46:26 | JavaScript | UTF-8 | Python | false | false | 820 | py | from django.urls import path, re_path
from . import views
from .views import AuthorAutocomplete
app_name = 'books'
urlpatterns = [
path('list/', views.booklist, name='books'),
path('book/<slug>', views.bookdetail, name='book_detail'),
path('book/list/add/', views.bookadd, name='book_add'),
path('quotes/', views.quotelist, name='quotes'),
path('quotes/<slug>', views.quotedetail, name='quote'),
path('quotes/quote/add/', views.quoteadd, name='quote_add'),
path('quotes/tag/(<tag_slug>)/', views.quotelist, name='quotes_list_by_tag'),
path('authors/', views.authorlist, name='authors'),
path('authors/<slug>', views.authordetail, name='book_author'),
re_path(
r'^author-autocomplete/$',
AuthorAutocomplete.as_view(),
name='author-autocomplete',
),
]
| [
"maciej@zyjswiadomie.eu"
] | maciej@zyjswiadomie.eu |
60da30781917abab3957aa8014520618378468ed | 9905901a2beae3ff4885fbc29842b3c34546ffd7 | /nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/responder/responderpolicy.py | c615cd853ff5b01cace69af2e386a58b6b117f46 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | culbertm/NSttyPython | f354ebb3dbf445884dbddb474b34eb9246261c19 | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | refs/heads/master | 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,054 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class responderpolicy(base_resource) :
""" Configuration for responder policy resource. """
def __init__(self) :
self._name = None
self._rule = None
self._action = None
self._undefaction = None
self._comment = None
self._logaction = None
self._appflowaction = None
self._newname = None
self._hits = None
self._undefhits = None
self._builtin = None
self.___count = None
@property
def name(self) :
r"""Name for the responder policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Can be changed after the responder policy is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the responder policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Can be changed after the responder policy is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
r"""Default syntax expression that the policy uses to determine whether to respond to the specified request.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
r"""Default syntax expression that the policy uses to determine whether to respond to the specified request.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
r"""Name of the responder action to perform if the request matches this responder policy. There are also some built-in actions which can be used. These are:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the client connection by closing it. The client program, such as a browser, will handle this and may inform the user. The client may then resend the request if desired.
* DROP - Drop the request without sending a response to the user.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
r"""Name of the responder action to perform if the request matches this responder policy. There are also some built-in actions which can be used. These are:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the client connection by closing it. The client program, such as a browser, will handle this and may inform the user. The client may then resend the request if desired.
* DROP - Drop the request without sending a response to the user.
"""
try :
self._action = action
except Exception as e:
raise e
@property
def undefaction(self) :
r"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
r"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
@property
def comment(self) :
r"""Any type of information about this responder policy.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
r"""Any type of information about this responder policy.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def logaction(self) :
r"""Name of the messagelog action to use for requests that match this policy.
"""
try :
return self._logaction
except Exception as e:
raise e
@logaction.setter
def logaction(self, logaction) :
r"""Name of the messagelog action to use for requests that match this policy.
"""
try :
self._logaction = logaction
except Exception as e:
raise e
@property
def appflowaction(self) :
r"""AppFlow action to invoke for requests that match this policy.
"""
try :
return self._appflowaction
except Exception as e:
raise e
@appflowaction.setter
def appflowaction(self, appflowaction) :
r"""AppFlow action to invoke for requests that match this policy.
"""
try :
self._appflowaction = appflowaction
except Exception as e:
raise e
@property
def newname(self) :
r"""New name for the responder policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
r"""New name for the responder policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def hits(self) :
r"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
r"""Number of policy UNDEF hits.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def builtin(self) :
r"""Flag to determine if responder policy is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(responderpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.responderpolicy
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add responderpolicy.
"""
try :
if type(resource) is not list :
addresource = responderpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
addresource.undefaction = resource.undefaction
addresource.comment = resource.comment
addresource.logaction = resource.logaction
addresource.appflowaction = resource.appflowaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
addresources[i].undefaction = resource[i].undefaction
addresources[i].comment = resource[i].comment
addresources[i].logaction = resource[i].logaction
addresources[i].appflowaction = resource[i].appflowaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete responderpolicy.
"""
try :
if type(resource) is not list :
deleteresource = responderpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update responderpolicy.
"""
try :
if type(resource) is not list :
updateresource = responderpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
updateresource.undefaction = resource.undefaction
updateresource.comment = resource.comment
updateresource.logaction = resource.logaction
updateresource.appflowaction = resource.appflowaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
updateresources[i].undefaction = resource[i].undefaction
updateresources[i].comment = resource[i].comment
updateresources[i].logaction = resource[i].logaction
updateresources[i].appflowaction = resource[i].appflowaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of responderpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = responderpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
r""" Use this API to rename a responderpolicy resource.
"""
try :
renameresource = responderpolicy()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the responderpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = responderpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = responderpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [responderpolicy() for _ in range(len(name))]
obj = [responderpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = responderpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of responderpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the responderpolicy resources configured on NetScaler.
"""
try :
obj = responderpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of responderpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class responderpolicy_response(base_response) :
def __init__(self, length=1) :
self.responderpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.responderpolicy = [responderpolicy() for _ in range(length)]
| [
"mdculbert@marathonpetroleum.com"
] | mdculbert@marathonpetroleum.com |
337d853e5a5d11b80830d4af2ddb66956fc8542d | 888b56ed0621259dee994c6352c3f5042b86f8f6 | /1-basics/3-decision/5-comparison-operators/bot.py | 091418c411ecb9ab27abb357b911b52133ae0cc6 | [] | no_license | Joz1203/COM404 | 6d4972de075803f85f9dbe143441def46fae9d89 | aacb4c04ddb3d455e2fdaa531082d6d455a7c69d | refs/heads/master | 2020-07-31T13:29:25.592204 | 2019-11-19T12:23:17 | 2019-11-19T12:23:17 | 210,618,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | print("Please enter the first number.")
first_number = int(input())
print("Please enter the second number.")
second_number = int(input())
if first_number > second_number:
print("The second number is the smallest.")
elif second_number > first_number:
print("The first number is the smallest.")
elif first_number == second_number:
print("Both are equal.")
| [
"joannadavie1991@gmail.com"
] | joannadavie1991@gmail.com |
2f207446949e8f4bcd2e8b18d91710d53ad4f692 | f6d428e727953b21fb2cf6969067c262b156d8ac | /TGR.AX_2406.py | 230747ebd162f262f8e1fa1d56dc3b20cc3c03c7 | [] | no_license | anfaning/Koreana-Investment | 5edc11f608e711a6d1d95b5a184b9cbe81983f9b | 7851ef333ee77882bd4f8c1cdeabb102dee6ed7e | refs/heads/main | 2023-06-28T05:20:37.720181 | 2021-08-06T06:18:01 | 2021-08-06T06:18:01 | 368,760,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,379 | py | ##################################################################
# Date: 24/06/2021
# Train = 5 years, Predict with recent 1 year data
# Epochs = 2000
# Neurons = 128
# Batch = 32
# Layer = 5
# Dropout layer = 0
# Days for prediction = 28
# Loss = MSE
# Optimizer = Adam
##################################################################
# Keras and Tensorflow >2.0
import matplotlib.pyplot as plt
### Data Collection
import pandas as pd
from datetime import datetime, timedelta
import yfinance as yf
import numpy as np
import random as rn
from sklearn.preprocessing import MinMaxScaler
# Setting the seed for numpy-generated random numbers
np.random.seed(37)
# Setting the seed for python random numbers
rn.seed(1254)
yf.pdr_override()
ticker = "TGR.AX"
df = yf.download(ticker, period="5y")
# df = yf.download(ticker, start= "2016-05-31", end= "2021-06-01")
# df.describe()
df1 = df.reset_index()['Open']
scaler = MinMaxScaler(feature_range=(0, 1))
df1 = scaler.fit_transform(np.array(df1).reshape(-1, 1))
# splitting dataset into train and test split
training_size = int(len(df1) * 0.65)
test_size = len(df1) - training_size
train_data, test_data = df1[0:training_size, :], df1[training_size:len(df1), :1]
# convert an array of values into a dataset matrix
def create_dataset(dataset, time_step=1):
dataX, dataY = [], []
for i in range(len(dataset) - time_step - 1):
a = dataset[i:(i + time_step), 0] ###i=0, 0,1,2,3-----99 100
dataX.append(a)
dataY.append(dataset[i + time_step, 0])
return np.array(dataX), np.array(dataY)
# reshape into X=t,t+1,t+2,t+3 and Y=t+4
time_step = 28
X_train, y_train = create_dataset(train_data, time_step)
X_test, ytest = create_dataset(test_data, time_step)
# print(X_train.shape), print(y_train.shape)
# print(X_test.shape), print(ytest.shape)
# reshape input to be [samples, time steps, features] which is required for LSTM
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
### Create the Stacked LSTM model
import tensorflow as tf
# tf.__version__
tf.random.set_seed(2)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
d = 0.2
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=(28, 1)))
model.add(LSTM(128, return_sequences=True))
# model.add(Dropout(d))
model.add(LSTM(128, return_sequences=True))
# model.add(Dropout(d))
model.add(LSTM(128))
model.add(Dense(128, kernel_initializer="uniform", activation='relu'))
model.add(Dense(1, kernel_initializer="uniform", activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
model.summary()
start = datetime.now()
start_time = start.strftime("%H:%M:%S")
model.fit(X_train, y_train, epochs=2000, batch_size=32, verbose=1)
end = datetime.now()
end_time = end.strftime("%H:%M:%S")
result = model.evaluate(X_test, ytest, verbose=1)
### Lets Do the prediction and check performance metrics
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
##Transformback to original form
train_predict = scaler.inverse_transform(train_predict)
test_predict = scaler.inverse_transform(test_predict)
### Calculate RMSE performance metrics
import math
from sklearn.metrics import mean_squared_error
math.sqrt(mean_squared_error(y_train, train_predict))
### Test Data RMSE
math.sqrt(mean_squared_error(ytest, test_predict))
### Plotting
# shift train predictions for plotting
# look_back = 100
# trainPredictPlot = np.empty_like(df1)
# trainPredictPlot[:, :] = np.nan
# trainPredictPlot[look_back:len(train_predict) + look_back, :] = train_predict
# # shift test predictions for plotting
# testPredictPlot = np.empty_like(df1)
# testPredictPlot[:, :] = np.nan
# testPredictPlot[len(train_predict) + (look_back * 2) + 1:len(df1) - 1, :] = test_predict
# # plot baseline and predictions
# plt.plot(scaler.inverse_transform(df1))
# plt.plot(trainPredictPlot)
# plt.plot(testPredictPlot)
# plt.show()
# len(test_data)
# Demonstrate prediction for next 30 days
lst_output = []
n_steps = 28 # Use 28days to predict
i = 0
x_input = test_data[-n_steps:].reshape(1, -1)
temp_input = list(x_input)
temp_input = temp_input[0].tolist()
while i < 30: # Predict next 30days
if len(temp_input) > n_steps:
# print(temp_input)
x_input = np.array(temp_input[1:])
print("{} day input {}".format(i, x_input))
x_input = x_input.reshape(1, -1)
x_input = x_input.reshape((1, n_steps, 1))
# print(x_input)
yhat = model.predict(x_input, verbose=0)
print("{} day output {}".format(i, yhat))
temp_input.extend(yhat[0].tolist())
temp_input = temp_input[1:]
# print(temp_input)
lst_output.extend(yhat.tolist())
i = i + 1
else:
x_input = x_input.reshape((1, n_steps, 1))
yhat = model.predict(x_input, verbose=0)
print(yhat[0])
temp_input.extend(yhat[0].tolist())
print(len(temp_input))
lst_output.extend(yhat.tolist())
i = i + 1
# plotting next 100 days
day_new = np.arange(1, 101)
day_pred = np.arange(101, 131)
plt.plot(day_new, scaler.inverse_transform(df1[-100:]))
plt.plot(day_pred, scaler.inverse_transform(lst_output))
plt.show()
# df3=df1.tolist()
# df3.extend(lst_output)
# df3=scaler.inverse_transform(df3[-180:]).tolist()
# plt.plot(df3)
# plt.show()
print("Start time =", start_time, "End time =", end_time)
df_output = pd.DataFrame(scaler.inverse_transform(lst_output), columns=["Prediction"])
# Add future weekdays to df_output and create to csv
df_date = pd.DataFrame([])
td = datetime.today()
while len(df_date) < 30:
if td.weekday() in [5, 6]:
td = td + timedelta(days=1)
else:
td = td + timedelta(days=1)
df_date = df_date.append(pd.DataFrame([td.strftime("%d-%m")]))
df_date.reset_index(drop=True, inplace=True)
df_output.insert(0, "Date", df_date)
# Download to CSV
date = datetime.now().strftime("%d%m")
df_output.to_csv(f'output/{ticker}_Prd_{date}.csv', index=False, header=True)
| [
"noreply@github.com"
] | anfaning.noreply@github.com |
b7f1feba390f7b3e355535d4420f9667115d9c46 | 871ef5ce8d438e67c121afd035d6e5ba07c97b1a | /framework/framework/componentConfig/manage.py | 3e81468a64cf73f34adc02a52265c25bf5bb2740 | [
"MIT"
] | permissive | mayankkodesia/api-automation-framework | 0ca437af14522c90d9f0d2f548b70a9449061bf1 | 613916e7daf3777bc63462ed5b718a8c9b3bdec4 | refs/heads/master | 2021-09-07T02:15:17.427473 | 2018-02-15T17:24:01 | 2018-02-15T17:24:01 | 115,877,972 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import argparse
import sys
from config import Config
from config import CreateStructureOnDisk
import os
p = argparse.ArgumentParser(description='Here is the complete list for endPoints.')
p.add_argument('-compName', default='', help='Please provide the component name by which parent folder should be created')
p.add_argument('-compDir', default='', help='Please provide absolute directory path')
p.add_argument('-firstEndPoint', default='', help='Please provide first endpoint name which you are going to automation')
args = p.parse_args()
if not args.compName:
print "-compName(component name) is not provided\n"
sys.exit()
if not args.compDir:
print "-compDir(component dir) is not provided\n"
sys.exit()
else:
if not os.path.exists(args.compDir):
os.mkdir(args.compDir)
if not args.firstEndPoint:
print "-firstEndPoint(first end point name) is not provided\n"
sys.exit()
struct = Config(args.compName, args.compDir, args.firstEndPoint).getStruct()
CreateStructureOnDisk(struct, args.compName, args.firstEndPoint).writeFiles()
| [
"mayank.kodesia@foghorn.io"
] | mayank.kodesia@foghorn.io |
89da656bc0b9c746c9939e488d314feede75103e | 9d98e32216c914618bc3cebc28470669b73d881a | /Faisal.py | 5935d497c2e91a782b8568f8c6d3b827e8c1c16a | [] | no_license | M-Janum/Mishal | c14171ee3fddd5763b54124c36e45192fb44ddd7 | 9c7f316844e520fd9dbcc35514a091fbe705ebd2 | refs/heads/main | 2023-01-13T10:48:39.455464 | 2020-11-12T00:46:43 | 2020-11-12T00:46:43 | 312,111,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,818 | py | #!/usr/bin/python
# coding=utf-8
# Originally Written By:Mishal X Faisal
# Source : Python2"
# Donot Recode It.
#Import module
try:
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass,mechanize,requests
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
except ImportError:
os.system('pip2 install requests')
os.system('pip2 install mechanize')
os.system('python2 hop.py')
#Browser Setting
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exit():
print "[!] Exit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def hamza(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
##### LOGO #####
banner = """
\x1b[1;94m°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°❂
\x1b[1;92m____________________██████
\x1b[1;92m_________▓▓▓▓____█████████
\x1b[1;92m__ Ƹ̵̡Ӝ̵̨̄Ʒ▓▓▓▓▓=▓____▓=▓▓▓▓▓
\x1b[1;92m__ ▓▓▓_▓▓▓▓░●____●░░▓▓▓▓
\x1b[1;92m_▓▓▓▓_▓▓▓▓▓░░__░░░░▓▓▓▓
\x1b[1;92m_ ▓▓▓▓_▓▓▓▓░░♥__♥░░░▓▓▓
\x1b[1;92m__ ▓▓▓___▓▓░░_____░░░▓▓
\x1b[1;92m▓▓▓▓▓____▓░░_____░░▓
\x1b[1;92m_ ▓▓____ ▒▓▒▓▒___ ████
\x1b[1;92m_______ ▒▓▒▓▒▓▒_ ██████
\x1b[1;92m_______▒▓▒▓▒▓▒ ████████
\x1b[1;92m_____ ▒▓▒▓▒▓▒_██████ ███
\x1b[1;92m_ ___▒▓▒▓▒▓▒__██████ _███
\x1b[1;92m_▓▓X▓▓▓▓▓▓▓__██████_ ███
\x1b[1;92m▓▓_██████▓▓__██████_ ███
\x1b[1;92m▓_███████▓▓__██████_ ███
\x1b[1;92m_████████▓▓__██████ _███
\x1b[1;92m_████████▓▓__▓▓▓▓▓▓_▒▒
\x1b[1;92m_████████▓▓__▓▓▓▓▓▓
\x1b[1;92m_████████▓▓__▓▓▓▓▓▓
\x1b[1;92m__████████▓___▓▓▓▓▓▓
\x1b[1;92m_______▒▒▒▒▒____▓▓▓▓▓▓
\x1b[1;92m_______▒▒▒▒▒ _____▓▓▓▓▓
\x1b[1;92m_______▒▒▒▒▒_____ ▓▓▓▓▓
\x1b[1;92m_______▒▒▒▒▒ _____▓▓▓▓▓
\x1b[1;92m________▒▒▒▒______▓▓▓▓▓
\x1b[1;92m________█████____█████
\x1b[1;92m_▀█║────────────▄▄────────────▄──▄_
\x1b[1;92m──█║───────▄─▄─█▄▄█║──────▄▄──█║─█║
\x1b[1;92m──█║───▄▄──█║█║█║─▄║▄──▄║█║─█║█║▄█║
\x1b[1;92m──█║──█║─█║█║█║─▀▀──█║─█║█║─█║─▀─▀
\x1b[1;92m──█║▄║█║─█║─▀───────█║▄█║─▀▀
\x1b[1;92m──▀▀▀──▀▀────────────▀─█║
\x1b[1;92m───────▄▄─▄▄▀▀▄▀▀▄──▀▄▄▀
\x1b[1;92m──────███████───▄▀
\x1b[1;92m──────▀█████▀▀▄▀
\x1b[1;92m────────▀█▀
\x1b[1;94mI WILL NEVER STOP LOVING U, AND I’ll CHERISH U AS LONG AS I liVE✫
\x1b[1;91m°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°
\x1b[1;92m➣Lover :\x1b[1;93mFAISAL KI JAN MISHAL❂
\x1b[1;92m➣Github :\x1b[1;93mHTTPS://GITHUB.COM/QUEEN007❂
\x1b[1;92m➣Whatsap:\x1b[1;93m+923035191880❂ ❂
\x1b[1;92m➣Gang :\x1b[1;93mNIGHT 007 FAMILY❂ ❂ ❂
\x1b[1;91m°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°"""
# titik #
def tik():
titik = [". ",".. ","... "]
for o in titik:
print("\r[✔] Logging In "+o),;sys.stdout.flush();time.sleep(1)
back = 0
id = []
def tlogin():
os.system('clear')
print banner
username = raw_input("[+] TOOL USERNAME: ")
if username =="Shona":
os.system('clear')
print banner
print "[✓] TOOL USERNAME: "+username+ " (correct)"
else:
print "[!] Invalid Username."
time.sleep(1)
tlogin()
passw = raw_input("[+] TOOL PASSWORD: ")
if passw =="Mishi":
os.system('clear')
print banner
print "[✓] TOOL USERNAME: " +username+ " (correct)"
print "[✓] TOOL PASSWORD: " +passw+ " (correct)"
time.sleep(2)
else:
print "[!] Invalid Password."
time.sleep(1)
tlogin()
try:
toket = open('login.txt','r')
os.system('python2 .hop2.py')
except (KeyError,IOError):
methodlogin()
else:
print "[!] Invalid Password"
time.sleep(1)
tlogin()
##### Login Method #####
def methodlogin():
os.system('clear')
print banner
print "⍣1⍣ Login With ID/Password."
print "⍣2⍣ Login Using Token."
print "⍣3⍣ Exit."
print (' ')
hos = raw_input("\nChoose Option >> ")
if hos =="":
print"[!] Wrong Input"
exit()
elif hos =="1":
login()
elif hos =="2":
os.system('clear')
print banner
hosp = raw_input("[+] Give Token : ")
tik()
hopa = open('login.txt','w')
hopa.write(hosp)
hopa.close()
print "\n[✓] Logged In Successfully."
time.sleep(1)
os.system('xdg-open https://www.youtube.com/channel/UCPRlRzOAEH8mcB1WtXf4Q1w')
os.system('python2 .hop2.py')
elif hos =="0":
exit()
else:
print"[!] Wrong Input"
exit()
def login():
os.system("clear")
try:
tb=open('login.txt', 'r')
os.system("python2 .hop2.py")
except (KeyError,IOError):
os.system("clear")
print (banner)
hamza('[+] Login Your Facebook Account')
hamza('[!] Donot Use Your Personal Account')
hamza('[!] Use a New Facebook Account To Login')
print'-------------------------------------'
iid=raw_input('[+] Number/Email: ')
id=iid.replace(" ","")
pwd=raw_input('[+] Password : ')
tik()
data = br.open("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email="+(id)+"&locale=en_US&password="+(pwd)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
z=json.load(data)
if 'access_token' in z:
st = open("login.txt", "w")
st.write(z["access_token"])
st.close()
print "\n[✓] Logged In Successfully."
time.sleep(1)
os.system('xdg-open https://www.youtube.com/channel/UCPRlRzOAEH8mcB1WtXf4Q1w')
os.system("clear")
os.system("python2 .hop2.py")
else:
if "www.facebook.com" in z["error_msg"]:
print ('[!] User Must Verify Account Before Login.')
time.sleep(3)
login()
else:
print ('[!]Number/User Id/ Password Is Wrong !')
time.sleep(1)
login()
if __name__=='__main__':
tlogin()
| [
"noreply@github.com"
] | M-Janum.noreply@github.com |
acb3a34623c303e165f4901f69ba7c4de3f2f1ef | bc75710c0688e2984fd475b6196483d1af41618e | /D3/even or odd.py | 5cb67bc19e92fa245f89057e32aab47971ea88cc | [] | no_license | cris-cos/100-days-of-Python | 2513cfe22827a9b6edc8c9351fcf3713a8f913c6 | 565d5bba635c8f4c43106c65a1c16b1bf10e1313 | refs/heads/main | 2023-03-12T05:52:54.328072 | 2021-03-02T12:45:02 | 2021-03-02T12:45:02 | 342,704,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | number = float(input("Which number do you want to check? "))
if number % 2 == 0:
print("This is an even number.")
elif number % 2 == 1:
print("This is an odd number.")
else:
print("The input was not an integer.") | [
"cris-cos@users.noreply.github.com"
] | cris-cos@users.noreply.github.com |
36feeee7cca40b437e3cb7498240e0cc9adc6314 | dbf0005c7c934e4164d7cd6d37d35404c05e1062 | /08.py | 04b4610d6326367cec3a4898d34a58ce2c94a84b | [] | no_license | karoberts/adventofcode2017 | d211721890a1e86b9353ada7b8510cca396db83c | 3a89d045b4e0685603bfe6c85e51a546e7341bc6 | refs/heads/master | 2023-08-03T09:16:00.188591 | 2023-07-25T22:05:09 | 2023-07-25T22:05:09 | 165,324,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py |
import re
from collections import defaultdict
pat = re.compile(r'^([a-z]+) (inc|dec) ([\-\d]+) if ([a-z]+) (<|>|==|!=|<=|>=) ([\-\d]+)$')
regs = defaultdict(lambda:0)
max_val = -9999999999
with open('08.txt') as f:
for line in (l.strip() for l in f):
m = pat.match(line)
cond_v = regs[m.group(4)]
cond_c = int(m.group(6))
process = False
if m.group(5) == '<': process = cond_v < cond_c
elif m.group(5) == '<=': process = cond_v <= cond_c
elif m.group(5) == '>': process = cond_v > cond_c
elif m.group(5) == '>=': process = cond_v >= cond_c
elif m.group(5) == '==': process = cond_v == cond_c
elif m.group(5) == '!=': process = cond_v != cond_c
if process:
delt = int(m.group(3))
delt *= -1 if m.group(2) == 'dec' else 1
regs[m.group(1)] += delt
if regs[m.group(1)] > max_val:
max_val = regs[m.group(1)]
max_reg = max(regs, key=lambda x:regs[x])
print('part1', max_reg, regs[max_reg])
print('part2', max_val)
| [
"cppwriter@yahoo.com"
] | cppwriter@yahoo.com |
4bc2b97cfdf5ecd84e54794669f4b1629022175a | d3efc82dfa61fb82e47c82d52c838b38b076084c | /utils/insertOrder.py | 85952c6096e1e1cff45f6714581d1c7d9b599c2b | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from QueryStkPriceQty import *
import time
a = []
i = 0
def insertOrder(order_client_id):
case_goal = {
'case_ID': 'ATC-103-19',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
stkparm = QueryStkPriceQty('999999', '2', '0', '2', '0', 'B', case_goal['期望状态'], Api)
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200
}
wt_reqs['order_client_id'] = order_client_id
Api.trade.InsertOrder(wt_reqs)
# 报单分页查询
def test_orderpage(self):
def pagedate(data, req_count, order_sequence, query_reference, request_id, is_last):
#print data,is_last
global i
for k in data.keys():
if 'order_cancel_xtp_id' in k:
i +=1
a.append(i)
Api.trade.setQueryOrderByPageHandle(pagedate)
Api.trade.QueryOrdersByPage({'req_count':13,'reference':198})
time.sleep(0.5)
rs = a[-1]
self.assertEqual(rs, 3)
# 成交分页查询
def test_tradepage():
def pagedate(data, req_count, trade_sequence, query_reference, request_id, is_last):
print data,is_last
Api.trade.setQueryTradeByPageHandle(pagedate)
Api.trade.QueryTradesByPage({'req_count':10,'reference':0})
time.sleep(0.5)
if __name__ == '__main__':
'''
for i in range(100):
order_client_id = i+1
#print order_client_id
Api.trade.Login()
insertOrder(order_client_id)
'''
#test_orderpage()
test_tradepage()
| [
"418033945@qq.com"
] | 418033945@qq.com |
f8d37d55fdc6c5c61c73896bdf3d72b009724305 | 0623e5471d1e6bc89bb43ad568c661bae4961138 | /markupfield/tests/markup.py | 4144194a03deda7ac28e2ec5e08e6edf03a9fc2a | [] | no_license | erikstein/django-markupfield | 30b251d82b8972ad9c46d2b593cc58ebb3db765b | 75e3392e6d2d5108108bba6177c6cc042a893177 | refs/heads/master | 2021-01-15T18:36:36.963747 | 2009-10-13T09:13:21 | 2009-10-13T09:13:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py |
try:
import docutils
except ImportError:
raise ImportError, 'Docutils not found'
from django.conf import settings
from markupfield.fields import Markup
INITIAL_HEADER_LEVEL = getattr(settings, "RST_INITIAL_HEADER_LEVEL", 2)
WRITER_NAME = getattr(settings, "RST_WRITER_NAME", 'html') # 'html4css1'
DEFAULT_LANGUAGE_CODE = getattr(settings, "LANGUAGE_CODE", 'en').split("-")[0]
class RestructuredtextMarkup(Markup):
docutils_settings = {
'language_code': DEFAULT_LANGUAGE_CODE,
'doctitle_xform': False, # Don't use first section title as document title
'input_encoding': 'utf-8',
'initial_header_level': INITIAL_HEADER_LEVEL,
'report_level': settings.DEBUG and 1 or 5,
}
docutils_settings.update(getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {}))
def render(self, initial_header_level=INITIAL_HEADER_LEVEL, **kwargs):
"""
Returns the rendered (html).
"""
settings = self.docutils_settings.copy()
settings['initial_header_level'] = initial_header_level
parts = docutils.core.publish_parts(
source=self.raw,
writer_name=WRITER_NAME,
settings_overrides=settings
)
return parts['fragment']
render.is_safe = True
def doctree(self, **kwargs):
"""
Returns the docutils doctree.
"""
return docutils.core.publish_doctree(self.raw, settings_overrides=self.docutils_settings)
def title(self, **kwargs):
"""
Returns the plain text of the first title node found in the doctree.
"""
document = self.doctree()
matches = document.traverse(condition=lambda node: isinstance(node, docutils.nodes.title))
if len(matches):
return matches[0].astext()
else:
return None
def plaintext(self, **kwargs):
"""
Returns the document as plaintext, using docutils 'astext' method.
"""
return self.doctree().astext()
| [
"erik@abstract.(none)"
] | erik@abstract.(none) |
ff7b91b71ae8730fdadf4ccac6bead4e2cceb40b | cd65fc6650c2acc74fa0ed80189b21b779d14502 | /order_fast_force_page/__openerp__.py | 3c73d12680f0f3ebe5d7cd326db219a970b170de | [] | no_license | Micronaet/micronaet-force | 5e39720aa221731dc8dca46643705b48109a9bb3 | 85e2ce3b76aaebd532b8562ed596361dfc3b9a20 | refs/heads/master | 2023-01-29T16:10:50.684428 | 2023-01-05T16:14:11 | 2023-01-05T16:14:11 | 61,194,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | ###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Fast order force page',
'version': '0.1',
'category': 'Sale',
'description': '''
Add a page in notebook for force purposes (used by other module)
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'sale',
'sale_order_fast',
],
'init_xml': [],
'demo': [],
'data': [
'order_page_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| [
"nicola.riolini@gmail.com"
] | nicola.riolini@gmail.com |
d24771bc335c75b2d56f92a6a13181a5525815cb | ab6a701b2febd05244c463174d2b5c5cf2209909 | /marathon/30_회전하는_큐.py | 6efea88ccef9f45af2262fe0fbe5ec722e1fa04d | [] | no_license | seanstainability/algorithm | b32bcf1fe43e3f7317af49306eeac14a84c5a8b4 | d9e34087411552a81a7fcc5259b143f63d9a6760 | refs/heads/master | 2023-05-30T19:07:08.912020 | 2021-06-18T07:39:35 | 2021-06-18T07:39:35 | 376,260,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # https://www.acmicpc.net/problem/1021
from collections import deque
n, m = map(int, input().split())
p = list(map(int, input().split()))
queue = deque(range(1, n+1))
count = 0
for i in range(m):
q_len = len(queue)
q_idx = queue.index(p[i])
if q_idx > q_len // 2:
queue.rotate(q_len - q_idx) # 양수일 경우 맨 뒤의 값을 맨 앞으로 이동
count += (q_len - q_idx)
else:
queue.rotate(-q_idx) # 음수일 경우 맨 앞의 값을 맨 뒤로 이동
count += q_idx
queue.popleft()
print(count)
| [
"seanstainability@gmail.com"
] | seanstainability@gmail.com |
1e5f987d41d97ef44c43f8f9404d687860ede5cb | 6adf0e4805ebca4e8293f67cdc7547dacdf24cdc | /smartcab/environment.py | 820668bd37ebcbc5fc27885da935b7ad00da5740 | [
"Apache-2.0"
] | permissive | arlenye/smartcab-master | afb2b86e07a5636339ea0a73908d166c6893390a | 30549b315ee058ce8bc004dde7a4209f86ebdc40 | refs/heads/master | 2021-04-30T15:41:51.954675 | 2018-02-14T00:32:47 | 2018-02-14T00:32:47 | 121,246,911 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,860 | py | import time
import random
import math
from collections import OrderedDict
from simulator import Simulator
class TrafficLight(object):
"""A traffic light that switches periodically."""
valid_states = [True, False] # True = NS open; False = EW open
def __init__(self, state=None, period=None):
self.state = state if state is not None else random.choice(self.valid_states)
self.period = period if period is not None else random.choice([2, 3, 4, 5])
self.last_updated = 0
def reset(self):
self.last_updated = 0
def update(self, t):
if t - self.last_updated >= self.period:
self.state = not self.state # Assuming state is boolean
self.last_updated = t
class Environment(object):
"""Environment within which all agents operate."""
valid_actions = [None, 'forward', 'left', 'right']
valid_inputs = {'light': TrafficLight.valid_states, 'oncoming': valid_actions, 'left': valid_actions, 'right': valid_actions}
valid_headings = [(1, 0), (0, -1), (-1, 0), (0, 1)] # E, N, W, S
hard_time_limit = -100 # Set a hard time limit even if deadline is not enforced.
def __init__(self, verbose=False, num_dummies=100, grid_size = (8, 6)):
self.num_dummies = num_dummies # Number of dummy driver agents in the environment
self.verbose = verbose # If debug output should be given
# Initialize simulation variables
self.done = False
self.t = 0
self.agent_states = OrderedDict()
self.step_data = {}
self.success = None
# Road network
self.grid_size = grid_size # (columns, rows)
self.bounds = (1, 2, self.grid_size[0], self.grid_size[1] + 1)
self.block_size = 100
self.hang = 0.6
self.intersections = OrderedDict()
self.roads = []
for x in xrange(self.bounds[0], self.bounds[2] + 1):
for y in xrange(self.bounds[1], self.bounds[3] + 1):
self.intersections[(x, y)] = TrafficLight() # A traffic light at each intersection
for a in self.intersections:
for b in self.intersections:
if a == b:
continue
if (abs(a[0] - b[0]) + abs(a[1] - b[1])) == 1: # L1 distance = 1
self.roads.append((a, b))
# Add environment boundaries
for x in xrange(self.bounds[0], self.bounds[2] + 1):
self.roads.append(((x, self.bounds[1] - self.hang), (x, self.bounds[1])))
self.roads.append(((x, self.bounds[3] + self.hang), (x, self.bounds[3])))
for y in xrange(self.bounds[1], self.bounds[3] + 1):
self.roads.append(((self.bounds[0] - self.hang, y), (self.bounds[0], y)))
self.roads.append(((self.bounds[2] + self.hang, y), (self.bounds[2], y)))
# Create dummy agents
for i in xrange(self.num_dummies):
self.create_agent(DummyAgent)
# Primary agent and associated parameters
self.primary_agent = None # to be set explicitly
self.enforce_deadline = False
# Trial data (updated at the end of each trial)
self.trial_data = {
'testing': False, # if the trial is for testing a learned policy
'initial_distance': 0, # L1 distance from start to destination
'initial_deadline': 0, # given deadline (time steps) to start with
'net_reward': 0.0, # total reward earned in current trial
'final_deadline': None, # deadline value (time remaining) at the end
'actions': {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, # violations and accidents
'success': 0 # whether the agent reached the destination in time
}
def create_agent(self, agent_class, *args, **kwargs):
""" When called, create_agent creates an agent in the environment. """
agent = agent_class(self, *args, **kwargs)
self.agent_states[agent] = {'location': random.choice(self.intersections.keys()), 'heading': (0, 1)}
return agent
def set_primary_agent(self, agent, enforce_deadline=False):
""" When called, set_primary_agent sets 'agent' as the primary agent.
The primary agent is the smartcab that is followed in the environment. """
self.primary_agent = agent
agent.primary_agent = True
self.enforce_deadline = enforce_deadline
def reset(self, testing=False):
""" This function is called at the beginning of a new trial. """
self.done = False
self.t = 0
# Reset status text
self.step_data = {}
# Reset traffic lights
for traffic_light in self.intersections.itervalues():
traffic_light.reset()
# Pick a start and a destination
start = random.choice(self.intersections.keys())
destination = random.choice(self.intersections.keys())
# Ensure starting location and destination are not too close
while self.compute_dist(start, destination) < 4:
start = random.choice(self.intersections.keys())
destination = random.choice(self.intersections.keys())
start_heading = random.choice(self.valid_headings)
distance = self.compute_dist(start, destination)
deadline = distance * 5 # 5 time steps per intersection away
if(self.verbose == True): # Debugging
print "Environment.reset(): Trial set up with start = {}, destination = {}, deadline = {}".format(start, destination, deadline)
# Create a map of all possible initial positions
positions = dict()
for location in self.intersections:
positions[location] = list()
for heading in self.valid_headings:
positions[location].append(heading)
# Initialize agent(s)
for agent in self.agent_states.iterkeys():
if agent is self.primary_agent:
self.agent_states[agent] = {
'location': start,
'heading': start_heading,
'destination': destination,
'deadline': deadline
}
# For dummy agents, make them choose one of the available
# intersections and headings still in 'positions'
else:
intersection = random.choice(positions.keys())
heading = random.choice(positions[intersection])
self.agent_states[agent] = {
'location': intersection,
'heading': heading,
'destination': None,
'deadline': None
}
# Now delete the taken location and heading from 'positions'
positions[intersection] = list(set(positions[intersection]) - set([heading]))
if positions[intersection] == list(): # No headings available for intersection
del positions[intersection] # Delete the intersection altogether
agent.reset(destination=(destination if agent is self.primary_agent else None), testing=testing)
if agent is self.primary_agent:
# Reset metrics for this trial (step data will be set during the step)
self.trial_data['testing'] = testing
self.trial_data['initial_deadline'] = deadline
self.trial_data['final_deadline'] = deadline
self.trial_data['net_reward'] = 0.0
self.trial_data['actions'] = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
self.trial_data['parameters'] = {'e': agent.epsilon, 'a': agent.alpha}
self.trial_data['success'] = 0
def step(self):
""" This function is called when a time step is taken turing a trial. """
# Pretty print to terminal
print ""
print "/-------------------"
print "| Step {} Results".format(self.t)
print "\-------------------"
print ""
if(self.verbose == True): # Debugging
print "Environment.step(): t = {}".format(self.t)
# Update agents, primary first
if self.primary_agent is not None:
self.primary_agent.update()
for agent in self.agent_states.iterkeys():
if agent is not self.primary_agent:
agent.update()
# Update traffic lights
for intersection, traffic_light in self.intersections.iteritems():
traffic_light.update(self.t)
if self.primary_agent is not None:
# Agent has taken an action: reduce the deadline by 1
agent_deadline = self.agent_states[self.primary_agent]['deadline'] - 1
self.agent_states[self.primary_agent]['deadline'] = agent_deadline
if agent_deadline <= self.hard_time_limit:
self.done = True
self.success = False
if self.verbose: # Debugging
print "Environment.step(): Primary agent hit hard time limit ({})! Trial aborted.".format(self.hard_time_limit)
elif self.enforce_deadline and agent_deadline <= 0:
self.done = True
self.success = False
if self.verbose: # Debugging
print "Environment.step(): Primary agent ran out of time! Trial aborted."
self.t += 1
def sense(self, agent):
""" This function is called when information is requested about the sensor
inputs from an 'agent' in the environment. """
assert agent in self.agent_states, "Unknown agent!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
# Populate oncoming, left, right
oncoming = None
left = None
right = None
for other_agent, other_state in self.agent_states.iteritems():
if agent == other_agent or location != other_state['location'] or (heading[0] == other_state['heading'][0] and heading[1] == other_state['heading'][1]):
continue
# For dummy agents, ignore the primary agent
# This is because the primary agent is not required to follow the waypoint
if other_agent == self.primary_agent:
continue
other_heading = other_agent.get_next_waypoint()
if (heading[0] * other_state['heading'][0] + heading[1] * other_state['heading'][1]) == -1:
if oncoming != 'left': # we don't want to override oncoming == 'left'
oncoming = other_heading
elif (heading[1] == other_state['heading'][0] and -heading[0] == other_state['heading'][1]):
if right != 'forward' and right != 'left': # we don't want to override right == 'forward or 'left'
right = other_heading
else:
if left != 'forward': # we don't want to override left == 'forward'
left = other_heading
return {'light': light, 'oncoming': oncoming, 'left': left, 'right': right}
def get_deadline(self, agent):
""" Returns the deadline remaining for an agent. """
return self.agent_states[agent]['deadline'] if agent is self.primary_agent else None
def act(self, agent, action):
""" Consider an action and perform the action if it is legal.
Receive a reward for the agent based on traffic laws. """
assert agent in self.agent_states, "Unknown agent!"
assert action in self.valid_actions, "Invalid action!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
inputs = self.sense(agent)
# Assess whether the agent can move based on the action chosen.
# Either the action is okay to perform, or falls under 4 types of violations:
# 0: Action okay
# 1: Minor traffic violation
# 2: Major traffic violation
# 3: Minor traffic violation causing an accident
# 4: Major traffic violation causing an accident
violation = 0
# Reward scheme
# First initialize reward uniformly random from [-1, 1]
reward = 2 * random.random() - 1
# Create a penalty factor as a function of remaining deadline
# Scales reward multiplicatively from [0, 1]
fnc = self.t * 1.0 / (self.t + state['deadline']) if agent.primary_agent else 0.0
#if fnc != 0.0:
#print('agent.primary_agent:',agent.primary_agent)
#print('fnc:',fnc)
gradient = 10
# No penalty given to an agent that has no enforced deadline
penalty = 0
# If the deadline is enforced, give a penalty based on time remaining
if self.enforce_deadline:
penalty = (math.pow(gradient, fnc) - 1) / (gradient - 1)
#if penalty != 0.0:
#print('penalty:',penalty)
# Agent wants to drive forward:
if action == 'forward':
if light != 'green': # Running red light
violation = 2 # Major violation
if inputs['left'] == 'forward' or inputs['right'] == 'forward': # Cross traffic
violation = 4 # Accident
# Agent wants to drive left:
elif action == 'left':
if light != 'green': # Running a red light
violation = 2 # Major violation
if inputs['left'] == 'forward' or inputs['right'] == 'forward': # Cross traffic
violation = 4 # Accident
elif inputs['oncoming'] == 'right': # Oncoming car turning right
violation = 4 # Accident
else: # Green light
if inputs['oncoming'] == 'right' or inputs['oncoming'] == 'forward': # Incoming traffic
violation = 3 # Accident
else: # Valid move!
heading = (heading[1], -heading[0])
# Agent wants to drive right:
elif action == 'right':
if light != 'green' and inputs['left'] == 'forward': # Cross traffic
violation = 3 # Accident
else: # Valid move!
heading = (-heading[1], heading[0])
# Agent wants to perform no action:
elif action == None:
if light == 'green' and inputs['oncoming'] != 'left': # No oncoming traffic
violation = 1 # Minor violation
# Did the agent attempt a valid move?
if violation == 0:
if action == agent.get_next_waypoint(): # Was it the correct action?
reward += 2 - penalty # (2, 1)
elif action == None and light != 'green': # Was the agent stuck at a red light?
reward += 2 - penalty # (2, 1)
else: # Valid but incorrect
reward += 1 - penalty # (1, 0)
# Move the agent
if action is not None:
location = ((location[0] + heading[0] - self.bounds[0]) % (self.bounds[2] - self.bounds[0] + 1) + self.bounds[0],
(location[1] + heading[1] - self.bounds[1]) % (self.bounds[3] - self.bounds[1] + 1) + self.bounds[1]) # wrap-around
state['location'] = location
state['heading'] = heading
# Agent attempted invalid move
else:
if violation == 1: # Minor violation
reward += -5
elif violation == 2: # Major violation
reward += -10
elif violation == 3: # Minor accident
reward += -20
elif violation == 4: # Major accident
reward += -40
# Did agent reach the goal after a valid move?
if agent is self.primary_agent:
if state['location'] == state['destination']:
# Did agent get to destination before deadline?
if state['deadline'] >= 0:
self.trial_data['success'] = 1
# Stop the trial
self.done = True
self.success = True
if(self.verbose == True): # Debugging
print "Environment.act(): Primary agent has reached destination!"
if(self.verbose == True): # Debugging
print "Environment.act() [POST]: location: {}, heading: {}, action: {}, reward: {}".format(location, heading, action, reward)
# Update metrics
self.step_data['t'] = self.t
self.step_data['violation'] = violation
self.step_data['state'] = agent.get_state()
self.step_data['deadline'] = state['deadline']
self.step_data['waypoint'] = agent.get_next_waypoint()
self.step_data['inputs'] = inputs
self.step_data['light'] = light
self.step_data['action'] = action
self.step_data['reward'] = reward
self.trial_data['final_deadline'] = state['deadline'] - 1
self.trial_data['net_reward'] += reward
self.trial_data['actions'][violation] += 1
if(self.verbose == True): # Debugging
print "Environment.act(): Step data: {}".format(self.step_data)
return reward
def compute_dist(self, a, b):
""" Compute the Manhattan (L1) distance of a spherical world. """
dx1 = abs(b[0] - a[0])
dx2 = abs(self.grid_size[0] - dx1)
dx = dx1 if dx1 < dx2 else dx2
dy1 = abs(b[1] - a[1])
dy2 = abs(self.
grid_size[1] - dy1)
dy = dy1 if dy1 < dy2 else dy2
return dx + dy
class Agent(object):
"""Base class for all agents."""
def __init__(self, env):
self.env = env
self.state = None
self.next_waypoint = None
self.color = 'white'
self.primary_agent = False
def reset(self, destination=None, testing=False):
pass
def update(self):
pass
def get_state(self):
return self.state
def get_next_waypoint(self):
return self.next_waypoint
class DummyAgent(Agent):
color_choices = ['cyan', 'red', 'blue', 'green', 'orange', 'magenta', 'yellow']
def __init__(self, env):
super(DummyAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.next_waypoint = random.choice(Environment.valid_actions[1:])
self.color = random.choice(self.color_choices)
def update(self):
""" Update a DummyAgent to move randomly under legal traffic laws. """
inputs = self.env.sense(self)
# Check if the chosen waypoint is safe to move to.
action_okay = True
if self.next_waypoint == 'right':
if inputs['light'] == 'red' and inputs['left'] == 'forward':
action_okay = False
elif self.next_waypoint == 'forward':
if inputs['light'] == 'red':
action_okay = False
elif self.next_waypoint == 'left':
if inputs['light'] == 'red' or (inputs['oncoming'] == 'forward' or inputs['oncoming'] == 'right'):
action_okay = False
# Move to the next waypoint and choose a new one.
action = None
if action_okay:
action = self.next_waypoint
self.next_waypoint = random.choice(Environment.valid_actions[1:])
reward = self.env.act(self, action) | [
"james_ye02@infosys.com"
] | james_ye02@infosys.com |
74d87a5ce7eb433611522796babb219ac90804ac | 4fec604a210c71f295c99c004ac5c09ccddb7c4f | /jarvis.py | a519d6fd10444767fc75ab52872cfcdbc5ffb684 | [] | no_license | wiserenegade77/jarvis | dce329e7bf4a0fe6082db59ab1b1224c60e1ec20 | 8a3ef8906b94b842fdc8af14951cc5a27374af55 | refs/heads/master | 2023-09-01T00:31:35.154881 | 2021-10-22T12:59:22 | 2021-10-22T12:59:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,997 | py | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
import requests,bs4,sys
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
#print(voices[2].id)
engine.setProperty('voices',voices[1].id)
def take():
r=sr.Recognizer()
with sr.Microphone() as source:
print("listening")
r.pause_threshold = 1
audio=r.listen(source)
try:
print("recognizing...")
query=r.recognize_google(audio, language='en-in')
print(f"user said: {query}\n")
except Exception as e:
print("say that again ")
return "None"
return query
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
hour=int(datetime.datetime.now().hour)
minutes=int(datetime.datetime.now().minute)
if hour>=0.00 and hour<12.00:
speak("Good Morning sir")
elif hour>=12.00 and hour<18.00:
speak("Good evening sir")
else:
speak("Good Afternoon sir")
#speak("its {} o {} sir".format(hour,minutes))
def sendemail(to,content):
server=smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.login('wiserenegade77@gmail.com','arsh0224')
server.sendmail('wiserenegade77@gmail.com',to,content)
server.close()
def search(audio):
try:
from googlesearch import search
except ImportError:
print("no module found")
for j in search(audio,tld="co.in",num=10,stop=10,pause=2):
print(j)
#speak("hello Mr Mago i am jarvis")
wishme()
query=""
while query!="shutdown":
query=take().lower()
if 'wikipedia' in query:
query=query.replace("wikipedia","")
result = wikipedia.search(query, results = 5)
print(result)
speak(result)
query=take().lower()
results=wikipedia.summary(query,sentences=5)
speak("accoring to wikipedia")
speak(results)
#page_object=wikipedia.page(results)
#print(page_object.original_title)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'music' in query:
music_dir='D:\\songs'
songs=os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[5]))
elif 'open code' in query:
codepath="F:\\vs code\\Microsoft VS Code\\Code.exe"
os.startfile(codepath)
elif 'time' in query:
strtime=datetime.datetime.now().strftime("%H:%M:%S")
speak(f"sir the time is {strtime}")
elif 'email' in query:
try:
speak("sir enter your content")
content=take()
to="raghavmago2@gmail.com"
sendemail(to,content)
speak("sir, email sent")
except Exception as e:
speak("sir email not sent")
elif 'search' in query:
query=take().lower()
search(query)
| [
"raghavmago2@gmail.com"
] | raghavmago2@gmail.com |
6b98d99bc0f46b23753ea4193cab8a0f0ea53469 | f76a16db5e3c5c911d906d015ce78f852179643b | /utils_3DMM.py | ae31ea8ee5064eda6be79f7bd33e66aa50aa68b7 | [] | no_license | blueskyM01/SD_GAN_Tensorflow | 2166230adc2fe4db2bc244ad928add00e5ce837e | 3972e5bdcff9087f36da8ed79cc003e17d575d49 | refs/heads/master | 2020-04-23T10:42:51.459937 | 2019-05-13T10:35:43 | 2019-05-13T10:35:43 | 171,112,705 | 0 | 5 | null | 2019-04-05T08:49:52 | 2019-02-17T11:13:06 | Python | UTF-8 | Python | false | false | 6,818 | py | #############################################################################
#Copyright 2016-2017, Anh Tuan Tran, Tal Hassner, Iacopo Masi, and Gerard Medioni
#The SOFTWARE provided in this page is provided "as is", without any guarantee
#made as to its suitability or fitness for any particular use. It may contain
#bugs, so use of this tool is at your own risk. We take no responsibility for
#any damage of any sort that may unintentionally be caused through its use.
# Please, cite the paper:
# @article{tran16_3dmm_cnn,
# title={Regressing Robust and Discriminative {3D} Morphable Models with a very Deep Neural Network},
# author={Anh Tran
# and Tal Hassner
# and Iacopo Masi
# and G\'{e}rard Medioni}
# journal={arXiv preprint},
# year={2016}
# }
# if you find our code useful.
##############################################################################
import numpy as np
import cv2
rescaleCASIA = [1.9255, 2.2591, 1.9423, 1.6087]
rescaleBB = [1.785974, 1.951171, 1.835600, 1.670403]
def get_mean_shape(model):
S = model.shapeMU
numVert = S.shape[0]/3
## Final Saving for visualization
S = np.reshape(S,(numVert,3))
return S
def projectBackBFM(model,features):
alpha = model.shapeEV * 0
for it in range(0, 99):
alpha[it] = model.shapeEV[it] * features[it]
S = np.matmul(model.shapePC, alpha)
## Adding back average shape
S = model.shapeMU + S
numVert = S.shape[0]/3
# (Texture)
beta = model.texEV * 0
for it in range(0, 99):
beta[it] = model.texEV[it] * features[it+99]
T = np.matmul(model.texPC, beta)
## Adding back average texture
T = model.texMU + T
## Some filtering
T = [truncateUint8(value) for value in T]
## Final Saving for visualization
S = np.reshape(S,(numVert,3))
T = np.reshape(T,(numVert, 3))
return S,T
def projectBackBFM_withExpr(model, features, expr_paras):
alpha = model.shapeEV * 0
for it in range(0, 99):
alpha[it] = model.shapeEV[it] * features[it]
S = np.matmul(model.shapePC, alpha)
expr = model.expEV * 0
for it in range(0, 29):
expr[it] = model.expEV[it] * expr_paras[it]
E = np.matmul(model.expPC, expr)
## Adding back average shape
S = model.shapeMU + S + model.expMU + E
numVert = S.shape[0]/3
# (Texture)
beta = model.texEV * 0
for it in range(0, 99):
beta[it] = model.texEV[it] * features[it+99]
T = np.matmul(model.texPC, beta)
## Adding back average texture
T = model.texMU + T
## Some filtering
T = [truncateUint8(value) for value in T]
## Final Saving for visualization
S = np.reshape(S,(numVert,3))
T = np.reshape(T,(numVert, 3))
return S,T
def projectBackBFM_withEP(model, features, expr_paras, pose_paras):
alpha = model.shapeEV * 0
for it in range(0, 99):
alpha[it] = model.shapeEV[it] * features[it]
S = np.matmul(model.shapePC, alpha)
# Expression
expr = model.expEV * 0
for it in range(0, 29):
expr[it] = model.expEV[it] * expr_paras[it]
E = np.matmul(model.expPC, expr)
## Adding back average shape
S = model.shapeMU + S + model.expMU + E
numVert = S.shape[0]// 3
# Pose
#PI = np.array([[ 2.88000000e+03, 0.00000000e+00, 1.12000000e+02], [0.00000000e+00, 2.88000000e+03, 1.12000000e+02], [0, 0, 1]]);
r = pose_paras[0:3]
r[1] = -r[1]
r[2] = -r[2]
t = pose_paras[3:6]
t[0] = -t[0]
#print r.shape, t.shape
R, jacobian = cv2.Rodrigues(r, None)
#print R
S = np.reshape(S,(numVert,3))
#print S.shape
S_RT = np.matmul(R, np.transpose(S)) + np.reshape(t, [3,1])
#S_RT = np.matmul(PI, S_RT)
S_RT = np.transpose(S_RT)
# (Texture)
beta = model.texEV * 0
for it in range(0, 99):
beta[it] = model.texEV[it] * features[it+99]
T = np.matmul(model.texPC, beta)
## Adding back average texture
T = model.texMU + T
## Some filtering
T = [truncateUint8(value) for value in T]
## Final Saving for visualization
S = np.reshape(S_RT,(numVert,3))
T = np.reshape(T,(numVert, 3))
return S,T
def truncateUint8(val):
if val < 0:
return 0
elif val > 255:
return 255
else:
return val
def write_ply(fname, S, T, faces):
nV = S.shape[0]
nF = faces.shape[0]
f = open(fname,'w')
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex ' + str(nV) + '\n')
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('property uchar red\n')
f.write('property uchar green\n')
f.write('property uchar blue\n')
f.write('element face ' + str(nF) + '\n')
f.write('property list uchar int vertex_indices\n')
f.write('end_header\n')
for i in range(0,nV):
f.write('%0.4f %0.4f %0.4f %d %d %d\n' % (S[i,0],S[i,1],S[i,2],T[i,0],T[i,1],T[i,2]))
for i in range(0,nF):
f.write('3 %d %d %d\n' % (faces[i,0],faces[i,1],faces[i,2]))
f.close()
def write_ply_textureless(fname, S, faces):
nV = S.shape[0]
nF = faces.shape[0]
f = open(fname,'w')
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex ' + str(nV) + '\n')
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('element face ' + str(nF) + '\n')
f.write('property list uchar int vertex_indices\n')
f.write('end_header\n')
for i in range(0,nV):
f.write('%0.4f %0.4f %0.4f\n' % (S[i,0],S[i,1],S[i,2]))
for i in range(0,nF):
f.write('3 %d %d %d\n' % (faces[i,0],faces[i,1],faces[i,2]))
f.close()
def cropImg(img,tlx,tly,brx,bry, img2, rescale):
l = float( tlx )
t = float ( tly )
ww = float ( brx - l )
hh = float( bry - t )
# Approximate LM tight BB
h = img.shape[0]
w = img.shape[1]
cv2.rectangle(img2, (int(l),int(t)), (int(brx), int(bry)), (0,255,255),2)
cx = l + ww/2
cy = t + hh/2
tsize = max(ww,hh)/2
l = cx - tsize
t = cy - tsize
# Approximate expanded bounding box
bl = int(round(cx - rescale[0]*tsize))
bt = int(round(cy - rescale[1]*tsize))
br = int(round(cx + rescale[2]*tsize))
bb = int(round(cy + rescale[3]*tsize))
nw = int(br-bl)
nh = int(bb-bt)
imcrop = np.zeros((nh,nw,3), dtype = "uint8")
ll = 0
if bl < 0:
ll = -bl
bl = 0
rr = nw
if br > w:
rr = w+nw - br
br = w
tt = 0
if bt < 0:
tt = -bt
bt = 0
bbb = nh
if bb > h:
bbb = h+nh - bb
bb = h
imcrop[tt:bbb,ll:rr,:] = img[bt:bb,bl:br,:]
return imcrop
def cropByInputLM(img, lms, img2):
nLM = lms.shape[0]
lms_x = [lms[i,0] for i in range(0,nLM)];
lms_y = [lms[i,1] for i in range(0,nLM)];
return cropImg(img,min(lms_x),min(lms_y),max(lms_x),max(lms_y), img2, rescaleCASIA)
def cropByFaceDet(img, detected_face, img2):
return cropImg(img,detected_face.left(),detected_face.top(),\
detected_face.right(),detected_face.bottom(), img2, rescaleBB)
def cropByLM(img, shape, img2):
nLM = shape.num_parts
lms_x = [shape.part(i).x for i in range(0,nLM)]
lms_y = [shape.part(i).y for i in range(0,nLM)]
return cropImg(img,min(lms_x),min(lms_y),max(lms_x),max(lms_y), img2, rescaleCASIA)
| [
"1226853967@qq.com"
] | 1226853967@qq.com |
9f80c09591aba84706bbcb6441e4cdc592592d31 | a425842a51deab915fc4319b3226cef3f49e53ea | /build/extriPACK/industrial_calibration/rgbd_depth_correction/catkin_generated/pkg.installspace.context.pc.py | 52826087801e42a5ebac29a053ae0e8b3d52ccb0 | [] | no_license | Sinchiguano/Part-Localization-For-Robotic-Arm | 1458204e52f34354cbd0e8e1bff1dfaf6caefe1c | ebc1ed19da171ff4b5a52a3a031ae3049b0b9eb8 | refs/heads/master | 2021-10-08T19:49:53.455680 | 2018-12-16T20:03:04 | 2018-12-16T20:03:04 | 155,774,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/usr/include;/usr/local/include;/usr/include/eigen3".split(';') if "/usr/include;/usr/local/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;geometry_msgs;industrial_extrinsic_cal;message_filters;nodelet;pcl_conversions;pcl_msgs;pcl_ros;pluginlib;roscpp;sensor_msgs;std_srvs;target_finder;tf;tf_conversions".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so;/usr/local/lib/libceres.a".split(';') if "/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so;/usr/local/lib/libceres.a" != "" else []
PROJECT_NAME = "rgbd_depth_correction"
PROJECT_SPACE_DIR = "/home/casch/yumi_ws/install"
PROJECT_VERSION = "0.1.0"
| [
"cesarsinchiguano@hotmail.es"
] | cesarsinchiguano@hotmail.es |
1ec1082c420c57632e1d8fbdbff3c24e3f426d14 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 11262 - Weird Fence/EdmondsKarp.py | a3cd3aa934f2d8f5f06832cbe4c94dceea41b641 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | '''
Created on 2013-6-24
@author: Yubin Bai
'''
from _collections import deque
INF = 1 << 32
def edmondsKarp(graph, s, t):
def augmentPath(v, minEdge):
if (v == s): # managed to get back to source
f[0] = minEdge # minEdge of the path
return
elif (v in p): # augment if there is a path
# we need AdjMat for fast lookup here
augmentPath(p[v], min(minEdge, graph[p[v]][v]))
graph[p[v]][v] -= f[0] # forward edges -> decrease
graph[v][p[v]] += f[0] # backward edges -> increase
p = {} # parent map to reconstruct path
f = [0] # global variables, use list as mutable
max_flow = 0
while True: # this will be run max O(VE) times
f[0] = 0
q = deque()
dist = {s: 0} # O(E) BFS and record path p
q.append(s)
while q:
u = q.popleft() # queue: layer by layer!
if (u == t):
break # modification 1: reach sink t, stop BFS
for v in graph[u]: # for each neighbors of u
# modification 2: also check AdjMat as edges may disappear
if graph[u][v] > 0 and v not in dist:
dist[v] = dist[u] + 1 # then v is reachable from u
q.append(v) # enqueue v for next steps
p[v] = u # modification 3: parent of v->first is u
augmentPath(t, INF) # path augmentation in O(V)
if (f[0] == 0):
break # seems that we cannot pass any more flow
max_flow += f[0]
return max_flow
if __name__ == '__main__':
graph = {1: {1: 0, 2: 0, 3: 70, 4: 30}, 3: {1: 0, 2: 25, 3: 0, 4: 5},
4: {1: 0, 2: 70, 3: 0, 4: 0}, 2: {1: 0, 2: 0, 3: 0, 4: 0}}
max_flow = edmondsKarp(graph, 1, 2)
print("Max flow = %d\n" % max_flow)
| [
"xenron@outlook.com"
] | xenron@outlook.com |
5f8c4bc992b0a0084eba2ec78d5cc8c4e83c007f | a8c2a5ce6966d098849934c921dcebebfbc6f321 | /app/validation.py | 47a531325e23c2ce277405df1ee5835b840012c0 | [] | no_license | Jacksonmwirigi/politico | 2e7af49b4e4982211513a422850d4631b8a6fc7a | 1cdac0efa8ea2352ccabe0432de1660796036832 | refs/heads/develop | 2020-04-19T17:22:40.343325 | 2019-02-24T19:01:30 | 2019-02-24T19:01:30 | 168,333,421 | 1 | 2 | null | 2019-02-24T19:01:31 | 2019-01-30T11:40:00 | Python | UTF-8 | Python | false | false | 1,705 | py | import re
import datetime
import urllib.request as req
from urllib.request import urlopen, URLError
import urllib.parse as p
from datetime import datetime
from flask import Flask, jsonify, make_response, request
VALID_IMAGE_EXTENSIONS = [
""""List of valid image exstensions"""
".jpg",
".jpeg",
".png",
".gif",
]
def valid_url_extension(url, extension_list=VALID_IMAGE_EXTENSIONS):
"""Validates logo url for valid image extensions"""
return any([url.endswith(e)
for e in extension_list])
def is_office_key_correct(request):
"""Checks for correct keys in the request """
my_keys = ['office_name', 'office_type']
error = []
for key in my_keys:
if not key in request.json:
error.append(key)
return error
def is_party_key_correct(request):
"""Checks for correct keys in the request """
my_keys = ['name', 'hqAddress', 'logoUrl']
error = []
for key in my_keys:
if not key in request.json:
error.append(key)
return error
def page_not_found(error):
return make_response(jsonify({
"status": "not found",
"message": "url not found",
"error": 404
}), 404)
def internal_server_error(error):
return make_response(jsonify({
"status": "serevr error",
"message": "server not responding",
"error": 500
}), 500)
def bad_request(error):
return make_response(jsonify({
"status": "bad request",
"message": "url not found",
"error": 400
}), 400)
def method_not_allowed(error):
return make_response(jsonify({
"status": 405,
"message": "Method Not allowed"
}), 405)
| [
"jacksonmwirigi@gmail.com"
] | jacksonmwirigi@gmail.com |
2b0ced7fa82699bf40379314a33e83ddcdf35160 | 7e9c0243c48bbf0ddca9779ef03fc13bb9ac0496 | /candle.py | ef7ed0eabce07b078b04bab06a40c9c69cbbb75e | [] | no_license | suchismitarout/tt | c47f1f59659d2678392e2f0c3aaee8cfaa147ff4 | 54a5b625a82dab854b679050d67e340e74d71edd | refs/heads/master | 2020-09-16T20:25:34.146741 | 2019-11-25T06:52:07 | 2019-11-25T06:52:07 | 223,880,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | def birthdayCakeCandles(ar):
max_ele = ar[0]
count = 0
for i in range(len(ar)):
if ar[i] > max_ele:
max_ele = ar[i]
for j in ar:
if j == max_ele:
count +=1
# for j in ar:
# if j == max_ele:
# count +=1
return count
candle = birthdayCakeCandles([44,53,31,27,77,60,66,77,26,36])
print(candle) | [
"suchismitarout47@gmail.com"
] | suchismitarout47@gmail.com |
c336829368caa45ca1e76d4737980f93b0167064 | 61ccea8eb2ce6848c01ccc626956777fe2c74a9c | /report_email.py | 6e64fcb120792e5c9fce0ef1da286db087968e6a | [] | no_license | gsnelson/google-it-cert-final-project | 7d025e912517b39392567f7e661173226fe1d8de | 3860a053d96f78257dfc92f7c658ba4540bb75f4 | refs/heads/master | 2021-04-14T08:51:23.036815 | 2020-03-23T00:29:38 | 2020-03-23T00:29:38 | 249,102,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | #!/usr/bin/env python3
# import required libraries
import os
import datetime
import sys
import reports
import emails
# instantiate variables
root = os.getcwd()
# desc_path = root + "/supplier-data/descriptions"
desc_path = "C:/Users/gscot/Documents/python/google-it-cert-final-project/supplier-data/descriptions"
# pdf_save_path = "/tmp/processed.pdf"
pdf_save_path = "C:/Users/gscot/Documents/python/google-it-cert-final-project/tmp/processed.pdf"
def main(argv):
rpt_contents = ""
os.chdir(desc_path)
file_list = os.listdir()
for file in file_list:
with open(file, "r") as f:
fname = f.readline()
fweight = f.readline()
rpt_contents += ("<br/>" + "name: " + fname + "<br/>" + fweight + "<br/>")
f.close
print(rpt_contents)
# create fruit processed PDF report
# calls the reports.generate function from reports.py
todays_date = datetime.datetime.today()
rpt_title = "Processed Update on " + todays_date.strftime("%B %d, %Y")
reports.generate_report(pdf_save_path, rpt_title, rpt_contents)
print(rpt_title)
# send the PDF report as an email attachment
# call emails.generate & emails.send functions from emails.py
sender = "automation@example.com"
receiver = "{}@example.com".format(os.environ.get('USER'))
subject = "Upload Completed - Online Fruit Store"
body = "All fruits are uploaded to our website successfully. A detailed list is \nattached to this email."
message = emails.generate(sender, receiver, subject, body, pdf_save_path)
emails.send(message)
if __name__ == "__main__":
main(sys.argv)
| [
"gscott_nelson@hotmail.com"
] | gscott_nelson@hotmail.com |
a50a1e7ce4a9b883e2bbd7cac9b6be2ce17c745f | 06ed93d4541a1531e75a9a3906f7da540ec0ecc6 | /softbankRobotics/choregraphe-suite-2.5.5.5-linux64/share/doc/_downloads/alrobotmodel.py | e0a2d99d8c8d7f67ae944a08c86224321c972fec | [
"MIT",
"LicenseRef-scancode-pcre",
"GFDL-1.2-only",
"Spencer-94",
"LicenseRef-scancode-smail-gpl",
"Apache-2.0",
"AFL-2.1",
"GPL-2.0-only",
"GPL-3.0-only",
"GPL-1.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"IJG",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-... | permissive | Cmathou/S8-Simulated-Pepper-Project | 8dc38c64dcf1f9d22acd2cd4dff501cd4cda95c6 | a4fb90f1c59c50354c689a37c3977f7d89f699d1 | refs/heads/master | 2020-05-27T21:04:22.050455 | 2019-06-11T13:22:52 | 2019-06-11T13:22:52 | 188,784,416 | 0 | 1 | MIT | 2019-05-27T06:31:23 | 2019-05-27T06:31:22 | null | UTF-8 | Python | false | false | 1,307 | py | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use ALRobotModel Module"""
import qi
import argparse
import sys
def main(session):
"""
This example uses the ALRobotModel module.
"""
# Get the service ALRobotModel.
model_service = session.service("ALRobotModel")
# Example showing how to get information about the robot model
print("robot type", model_service.getRobotType()) # "Nao", "Juliette" or "Romeo"
print("has arms", model_service.hasArms())
print("has hands", model_service.hasHands())
print("has legs", model_service.hasLegs())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
| [
"cedric_mathou@hotmail.fr"
] | cedric_mathou@hotmail.fr |
b6e1772e50d34d8983fa946e90fc6fe835b3c834 | 7bdaa27c14549db8ecd13d055cfbd43bbfd69d9a | /book_rest_api/book_rest_api/urls.py | 9bc7c2b763cded18056ef8acfb50445b2ee24dcd | [] | no_license | Ksieciu/Book-Rest-API | 860158266e55a36a57f9cd0d0f7c99233dc53a4c | 9fb5e13c86e6fb5c07fb84dded78dd906986600a | refs/heads/main | 2023-02-17T20:12:02.356460 | 2021-01-04T10:42:14 | 2021-01-04T10:42:14 | 306,314,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """book_rest_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import books_list_redirect_view
urlpatterns = [
path('', books_list_redirect_view),
path('admin/', admin.site.urls),
path('api/', include('books.api.urls')),
]
| [
"30780278+Ksieciu@users.noreply.github.com"
] | 30780278+Ksieciu@users.noreply.github.com |
36c872660985e40574ed8cd4d854c3dbefe2099d | d001abba19711d678f2ba09dfbd5c84357be6bb0 | /src/contest/codeforces/339div2/B/main.py | 9cbb98053c0a27ee12cd1b6a0d3180ed2d02074c | [] | no_license | cormoran/CompetitiveProgramming | 89f8b3ceda97985d32b8cd91056b49abeb243e6f | fa0e479ab299f53984fa7541d088c10c447fb6e4 | refs/heads/master | 2020-04-17T19:59:49.724498 | 2020-03-28T15:46:26 | 2020-03-28T15:46:26 | 65,995,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | #!/usr/bin/env python3
n = int(input())
a = list(map(int,input().split()))
ans = 1
for i in a:
ans *= i
print(ans)
| [
"cormoran707@gmail.com"
] | cormoran707@gmail.com |
0f2748440d8da03e3a61948c531878e9ebc8b71b | d2e7b7150546bc89ffcc7f3ff0324b39364c8ad6 | /accounts/models.py | 085573a22bc1650913f25d199b1ba42235e4f403 | [] | no_license | AJ-54/uns_29_Lone_Developers | 9bab335e2c21d688043c1416bf21b5b31555e43c | 4c0f4ca1e6e46f1c27abe31378a0d6411adbc470 | refs/heads/master | 2023-01-24T14:26:29.488359 | 2020-11-21T13:19:50 | 2020-11-21T13:19:50 | 314,736,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
# Create your models here.
class Contact(models.Model) :
user = models.OneToOneField(User,related_name="contact",on_delete=models.CASCADE,null=True)
name = models.CharField(max_length = 255,blank=True,null=True)
email = models.EmailField(max_length = 255,blank=True,null=True)
phone = models.CharField(max_length = 255,blank=True,null=True)
class Profile(models.Model) :
user = models.OneToOneField(User,related_name="profile",on_delete=models.CASCADE)
phone = models.IntegerField()
emergency_contacts = models.ManyToManyField(Contact,related_name="friends_or_family")
is_verified = models.BooleanField(default=False)
def __str__(self) :
return self.user.username
@receiver(post_save,sender=User)
def create_profile_contact(sender,instance,created,**kwargs) :
if created :
profile = Profile.objects.create(user=instance)
contact = Contact.objects.create(user=instance,name=instance.username,email=instance.email) | [
"dakshchhabra652@gmail.com"
] | dakshchhabra652@gmail.com |
6e1b9cd6a9a7f879b63ecbb974d2a0c672fd7a64 | c097f3b328667535f3e8e685c04195cca6ac0e9d | /airspace_1.1.py | 01f625ff1479c98a8317333e1bb8ee9c51ef6adf | [] | no_license | MRamirez25/ADS-B_Project_CD-10 | 24f8b44200a4010394d32a4715fa60ed8295f134 | cb96a0ce7dc88458d189bd753ad3b5a201ec7bc0 | refs/heads/master | 2022-05-08T13:22:50.768308 | 2019-06-16T22:15:57 | 2019-06-16T22:15:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,915 | py |
from multiprocessing import Process, Pool
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from matplotlib import cm
from math import log10
import time
def avg_calculator(time_len_res_start,time_len_res,long_res,lat_res,timestep,main_df1,day,our): #this funtion makes a 2d list of avarage values of the differen squeres
main_df = pd.read_csv(main_df1[0],
dtype={"ïnd" : int, "ts": float, "icao": str, "lat": float, "lon": float, "alt": float,}) #importing the databace
main_df['ts'] = main_df['ts'].apply(lambda x: x - (48*365*24*60*60 + 12*24*60*60 - 3600)) #adjusting time
airspace_df1 = main_df[['ts','lat','lon','icao',"alt"]]
airspace_df1 = airspace_df1[((airspace_df1["ts"]) <= (time_len_res*timestep + timestep *2 + 30)) & ((airspace_df1["ts"]) >= (time_len_res_start*timestep - 11)) & (airspace_df1["lat"] >= 50.50) & (airspace_df1["lat"] <= (53.5)) & (airspace_df1["lon"] >= 3.5) & (airspace_df1["lon"] <= (7)) & (airspace_df1["alt"] >= 400)]
x = -1
y = -1
z = -1
i = 0
list4 = []
list3 = []
list_contr = []
days = 31
liststr = day_str_cr(days)
for time1 in range(time_len_res_start,time_len_res,100):
airspace_df2 = airspace_df1[(airspace_df1['ts'] >= time1*timestep) & (airspace_df1['ts'] <= (time1+100)*timestep)]
list6 = []
for time in range(time1,(time1+100)):
z = z + 1
list2 = []
airspace_df2 = airspace_df1[(airspace_df1['ts'] >= time*timestep) & (airspace_df1['ts'] <= (time+1)*timestep)]
val1 = airspace_df2.icao.nunique()
total1 = 0
for lon in np.arange (3.5,7,long_res):
airspace_df = airspace_df2[(airspace_df2['lon'] >= lon) & (airspace_df2['lon'] <= lon+long_res)]
list1 = []
for lat in np.arange (50.50,53.5,lat_res):
airspace_sub_df = airspace_df[(airspace_df["lat"] >= lat) & (airspace_df["lat"] <= (lat+lat_res))]
val = airspace_sub_df.icao.nunique() #this count how many differnt icao the are in a square
total1 = total1 + val
list1.append(val)
list2.append(list1)
if total1 == 0:
cor = 0
else:
cor = (val1/total1)
list2 = [[j*cor for j in i] for i in list2]
list3.append(list2)
list6 = list6 + list3
list4 = []
list7 = []
list8 = []
print (list6)
list_max1 = []
print(len(list1),len(list2),len(list2))
for g in range(len(list2)):
list5 = []
for f in range(len(list1)):
total = 0
total1 = 0
for k in range(len(list3)):
val = list6[k][g][f]
total = total + val*timestep/(long_res*lat_res)
avg = total/(timestep*(time_len_res-time_len_res_start)) #calculates of a square over multiple timestamps
if avg == 0:
avg = 0
avg1 = 0
else:
avg1 = avg
avg = avg
for j in range(len(list3)):
val = list6[j][g][f]
total1 = total1 + ((val*timestep/(long_res*lat_res)) - avg1)**2
avg3 = (total1/(timestep*(time_len_res-time_len_res_start)))**0.5
list7.append(avg3)
list5.append(avg)
list_max1.append(max(list5))
list4.append(list5)
list8.append(list7)
max1 = max(list_max1)
day = main_df1[1]
file = open(liststr[our][day],"w+")# a file is created here
file.write(str([list4,max1])) #the list is whritten into the file here
file.close()
return list4, list8,max1,len(list1), len(list3)
def time_of_day(time_len_res_start,time_len_res,long_res,lat_res,timestep,main_df1,nu): #this function will make the 2 list for mulitple hours
listtotal = []
listmax = []
list4 = []
maxlist = []
processlist = []
days = 1
for j in range(24):
i = main_df1[1] - 1
nu = j
set1 = avg_calculator(int(time_len_res_start + (i*86400)/timestep + (nu*60*60)/timestep),int(time_len_res + (i*86400)/timestep +((nu*60*60)/timestep)),long_res,lat_res,timestep,main_df1,i,nu)
listtotal.append(set1[0])
listmax.append(set1[2])
for g in range(len(listtotal[0])):
list5 = []
for f in range(len(listtotal[0][0])):
total = 0
total1 = 0
for k in range(len(listtotal)):
val = listtotal[k][g][f]
total = total + val*(long_res*lat_res)
avg = total/days
list5.append(avg)
list4.append(list5)
maxlist.append(max(list5))
max1 = max(maxlist)
print (maxlist)
print (list4)
return list4,0,max1
def makeplt(l,i): #this can make a plot
colar_res = 20
viridis = cm.get_cmap("viridis",256)
fig, axs = plt.subplots(1,2, figsize=(6,3),constrained_layout=True)
for [ax, cmap] in zip(axs, [viridis, viridis]):
psm = ax.pcolormesh(l[0], cmap=cmap, rasterized=True, vmin=0, vmax=l[2])
fig.colorbar(psm, ax=ax)
plt.show()
def do_thing(main_df2): #This finction cals the time_of_day funtion is is easier for the multiprocessing
days = 5
i = 1
cal = time_of_day(0,600,0.09,0.09,6,main_df2,i)
return cal
print (cal)
def day_str_cr(days1): #this creates names for the files
list1 = []
for i in range(24):
list2 = []
for j in range(days1):
string = str(str(j)+","+str(i)+".txt" )
list2.append(string)
list1.append(list2)
return list1
if __name__ == '__main__':
filelist = [["ADSB_TimeInterval_20180111.csv",11],["ADSB_TimeInterval_20180112.csv",12],["ADSB_TimeInterval_20180113.csv",13],["ADSB_TimeInterval_20180114.csv",14],["ADSB_TimeInterval_20180115.csv",15],["ADSB_TimeInterval_20180111.csv",11],["ADSB_TimeInterval_20180112.csv",12],["ADSB_TimeInterval_20180113.csv",13],["ADSB_TimeInterval_20180114.csv",14]]
processes = []
dayr = 1
liststr = day_str_cr(dayr)
i = range(4)
p = Pool() #this makes it posible to use python on multiple cores
print("hoi1")
result = p.map(do_thing, filelist)
print("hoi2")
p.close()
p.join()
print("hoi3")
o = 0
print (result)
for l in result:
o = o + 1
makeplt(l,o)
| [
"noreply@github.com"
] | MRamirez25.noreply@github.com |
ed8644241b7c492e3e31cc6082782b59e4db12c0 | f79c8fe6d214a197d8c610207b73326d9474ddf6 | /Models/HandmadeCNN.py | 4bd6c8c1e6e48b8cadbb7cced58125cde32a1007 | [] | no_license | 14coruma/RandWire | f35cb9baff504c0cfcd231e042ab04edee3b85d1 | dc31539718cfa018ca59f538bb3175e511989e67 | refs/heads/master | 2023-05-02T23:55:59.450508 | 2021-05-19T13:00:51 | 2021-05-19T13:00:51 | 359,612,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | import tensorflow as tf
from tensorflow.keras import layers, models, losses
class Model():
'''
Code adapted from https://www.tensorflow.org/tutorials/images/cnn
'''
def __init__(self, data, location='Models/OCR_CNN_Trained'):
if data is None:
self.model = models.load_model(location)
else:
self.load_data(data)
self.reshape_data()
self.build_model()
def load_data(self, data):
self.X_train, self.X_valid, self.X_test = data["X_train"], data["X_valid"], data["X_test"]
self.y_train, self.y_valid, self.y_test = data["y_train"], data["y_valid"], data["y_test"]
def reshape_data(self):
self.X_train = self.X_train.reshape(len(self.X_train), 28, 28, 1)
self.X_train = self.X_train / 255.0
self.X_valid = self.X_valid.reshape(len(self.X_valid), 28, 28, 1)
self.X_valid = self.X_valid / 255.0
self.X_test = self.X_test.reshape(len(self.X_test), 28, 28, 1)
self.X_test = self.X_test / 255.0
def build_model(self):
# Model started with https://linux-blog.anracom.com/2020/05/31/a-simple-cnn-for-the-mnist-datasets-ii-building-the-cnn-with-keras-and-a-first-test/
# Then tested and updated for improvements
self.model = models.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),
layers.BatchNormalization(),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Conv2D(64, (3,3), activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Conv2D(64, (3,3), activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, epsilon=0.0001)
self.model.compile(
optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
def train(self, epochs=100, batch_size=2048):
return self.model.fit(self.X_train, self.y_train, batch_size=batch_size,
validation_data=(self.X_valid, self.y_valid),
epochs=epochs)
def test(self):
res = self.model.evaluate(self.X_test, self.y_test, batch_size=128)
print("Test loss: {}".format(res[0]))
print("Test accuracy: {}".format(res[1]))
def save(self, location):
self.model.save(location)
def summary(self):
self.model.summary()
if __name__ == "__main__":
data = MNIST.get_data(n=60000, m=10000)
model = Model(data)
model.train()
model.summary()
model.test()
model.save('Models/OCR_CNN_Trained') | [
"amcorum@iu.edu"
] | amcorum@iu.edu |
1230c344d24a8acfbe5dd5170427a8465e16e9fb | 715e320339e43f18503fa2e80601ef2e4f754d6b | /networking/net_server.py | 24c6d54d0a54755b4f8e018e1383df2c4d677c8c | [] | no_license | GENADEE/ibsencoin | 0aef2af82533807c4e8aaf324c2c10d02ed5ff45 | 32fafc15fb7142d4be551cfb113323f11e3b4f2f | refs/heads/master | 2020-04-15T13:53:04.175555 | 2018-08-23T00:07:09 | 2018-08-23T00:07:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | # check out https://hackernoon.com/learn-blockchains-by-building-one-117428612f46
# which gives a different blockchain implementation using python, but which uses
# an http server which I try to copy here
#
#
# (whatabout http://ecomunsing.com/build-your-own-blockchain)
# flask run --host=0.0.0.0 to make it readable from other computers on the network
from .. import node # fix this import please
import json
from uuid import uuid4
from flask import Flask
identifier = str(uuid4()).replace('-', '')
app = Flask(__name__)
# this is an example app route
@app.route('/')
def hello_world():
return 'Hello, World!'
# requests for pool and transaction will provide conditions for the requests to be met
# params for pool should be {'size':<int:size>,'amount':<int:amount>}
@app.route('/request_pool', methods=['POST'])
def request_pool():
values = request.get_json()
return 'please wait'
# once a pool has been accepted, the node will be able to verify transaction presented by a client
# transaction should be a json with inputs, n_output_groups, v_output_groups, output_group, and timeout parameters
# where (as in the client python file):
#inputs: a list of unspent outputs. The format of an unspent output is a tuple of the format (transaction, pkey), where transaction is the hash of a complete signed, confirmed, transaction in the blockchain, and pkey is the public key associated with the ouput of that transaction to be spent.
#n_output_groups: specifies the number of output groups; ie. the number of transactions that share the same input as this one, including this one.
#v_output_groups: specifies the value of each output group.
#output_group: lists the outputs to this part of this transaction; the form is a list of tuples of the format (value, key).
#timeout: unix time after which the transaction becomes invalid if it has not been confirmed.
@app.route('/verify_transaction', methods=['POST'])
def verify_transaction():
values = request.get_json()
node.verify_transaction_part(values) # fix this
return 'verifying'
# ok, so the easiest way to do this will be to run a flask server for our nodes and then have the clients send requests | [
"adrianohernandez2000gmail.com"
] | adrianohernandez2000gmail.com |
f24f5920e7e8aeb551403393c83d466319502838 | fe9aed30db057074c380d7accede639aac744f86 | /venv/bin/pip3 | 3fd56d0094b76189631c08599864611a3d25ca74 | [] | no_license | Darkhan17/biometric | 1faab8b3ff6fdc7390c0d10f63cedaa914cb2b82 | 1059c0b37194493d321ac1a94d58aa78c422fccd | refs/heads/master | 2023-06-14T03:23:18.309297 | 2021-07-01T09:32:14 | 2021-07-01T09:32:14 | 381,979,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | #!/Users/khamitov.darkhan/PycharmProjects/Biometric/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"khamitov.darkhan@MacBook-Air-Darhan.local"
] | khamitov.darkhan@MacBook-Air-Darhan.local | |
3630fd00235c8d64e6fa8c41cb6b0031acc8d051 | 996967405d3ee07e011ee0f0404d03b6d04d3492 | /dataloader/get_coco/select_image.py | e4e45d53306e8d53996618fd3de1138d855286eb | [] | no_license | wyyy04/MyRepository | 797936fc757a2eee4793d5b1b47ebf8b57216ab8 | 91f1a7ff969e91d9649b96796c5827c9910a8183 | refs/heads/main | 2023-02-22T09:56:21.926013 | 2021-01-27T15:34:00 | 2021-01-27T15:34:00 | 315,524,193 | 0 | 0 | null | 2020-11-24T07:30:05 | 2020-11-24T05:05:28 | null | UTF-8 | Python | false | false | 504 | py | from readtxt import loadDataset
import os
import shutil
#从COCO训练集中选取motivations_clean中训练和测试使用的所有图片
rdir='D:\download\\train2014'#源目录
odir='D:\data'#目标目录
data = loadDataset()
data = data[:,0]
print(data)
for im_name in data:
print(im_name) #文件名
r = os.path.join(rdir,im_name)
o = os.path.join(odir,im_name) #得到源文件&目标文件完整目录
print(r,o)
shutil.copy(r,o) # 复制文件到目标路径;移动move
| [
"you@example.com"
] | you@example.com |
f65e6587e239bda76ca3c17841f09f76afd08cc5 | b3e836254e390d8ea8d9d50a816dba996a208cb0 | /hubei/hubei/start.py | 39668f3c98897abd5d80d60e435d12fde3612da9 | [] | no_license | Fallen0/province_company | 994d69e83fa14030684836e31296dbe902f6b0f1 | 2fa17aaa9780ca1e02a04a6c46ddae1508513396 | refs/heads/master | 2020-08-03T05:14:40.701799 | 2019-10-12T09:36:34 | 2019-10-12T09:36:34 | 211,630,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from scrapy.cmdline import execute
execute('scrapy crawl hubei_spider'.split(' '))
| [
"384197992@qq.com"
] | 384197992@qq.com |
481b427f99c02279f17e340436ebfec288581849 | 06a86c55d8265371729110070554a76099a62ec8 | /4435-server-side/Django/workout/models.py | 170c1a5a9ee4348893235f25c157bb7789589f96 | [] | no_license | lacymorrow/cs-uni | 80b89c22ac4a445a2d88fc29fc498148de404d15 | 0dcea5c090a91d22e4a6ac5a101598101ae83f91 | refs/heads/master | 2020-04-23T14:27:08.769863 | 2014-05-14T23:57:57 | 2014-05-14T23:57:57 | 19,543,518 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | from django.db import models
from profiles.models import CoachingUser
from django.core.urlresolvers import reverse
import re
class WorkoutType(models.Model):
name = models.CharField(max_length=40)
description = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
def isrun(self):
run = re.compile(r'Run')
running = re.compile(r'Running')
return run.search(self.name) and not running.search(self.name)
class RunningSurface(models.Model):
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class CourseType(models.Model):
name = models.CharField(max_length=40)
description = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
class WorkoutPlan(models.Model):
name = models.CharField(max_length=40, blank=True, null=True)
date = models.DateField(blank=True, null=True)
type = models.ForeignKey(WorkoutType)
surface = models.ForeignKey(RunningSurface)
course_type = models.ForeignKey(CourseType)
distance = models.FloatField(blank=True, null=True, default=0)
time = models.FloatField(blank=True, null=True, default=0)
warmup_instructions = models.CharField(max_length=200, blank=True, null=True)
core_workout_instructions = models.CharField(max_length=200, blank=True, null=True)
cooldown_instructions = models.CharField(max_length=200, blank=True, null=True)
post_run_instructions = models.CharField(max_length=200, blank=True, null=True)
target_hr = models.IntegerField(blank=True, null=True, default=0)
target_difficulty = models.IntegerField(blank=True, null=True, default=0)
comments = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('workoutplandetail', kwargs={'pk': self.pk})
class Workout(models.Model):
name = models.CharField(max_length=40, blank=True, null=True)
workout_plan = models.ForeignKey(WorkoutPlan)
runner = models.ForeignKey(CoachingUser, null = True)
date_time = models.DateTimeField()
type = models.ForeignKey(WorkoutType)
surface = models.ForeignKey(RunningSurface)
course_type = models.ForeignKey(CourseType)
distance = models.FloatField(blank=True, null=True, default=0)
time = models.FloatField(blank=True, null=True, default=0)
average_hr = models.IntegerField(blank=True, null=True, default=0)
max_hr = models.IntegerField(blank=True, null=True, default=0)
difficulty = models.IntegerField(blank=True, null=True, default=0)
comments = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('workoutdetail', kwargs={'pk': self.pk})
| [
"lacymorrow@Glitch.local"
] | lacymorrow@Glitch.local |
370a6054fe07d2464edc376b1ca9e4fb1105c508 | cb580d278cc626e8e4f0c1ed0bee0fb42f44df2b | /achivki/bootstrap_forms/forms.py | 0f4f8ee5258ac8b2509d07bc290805ebc204c102 | [] | no_license | k222/megaproject | ab96a20cacbea20761aac021e0e7c938fbc9a072 | 91b68563767d0a033ca4e5e537c7cb9d2a16b015 | refs/heads/master | 2021-01-10T00:53:32.988889 | 2012-05-23T09:31:36 | 2012-05-23T09:31:36 | 2,379,820 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,714 | py | import os
from django.template import Context,RequestContext
from django.template.loader import get_template, select_template
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django import forms
from django.utils.encoding import force_unicode
class NoSuchFormField(Exception):
"""""The form field couldn't be resolved."""""
pass
class BootstrapMixin(object):
def __init__(self, *args, **kwargs):
super(BootstrapMixin, self).__init__(*args, **kwargs)
if hasattr(self, 'Meta') and hasattr(self.Meta, 'custom_fields'):
self.custom_fields = self.Meta.custom_fields
else:
self.custom_fields = {}
if hasattr(self, 'Meta') and hasattr(self.Meta, 'template_base'):
self.template_base = self.Meta.template_base
else:
self.template_base = "bootstrap"
# For backward compatibility
__bootstrap__ = __init__
def top_errors_as_html(self):
""" Render top errors as set of <div>'s. """
return ''.join(["<div class=\"alert alert-error\">%s</div>" % error
for error in self.top_errors])
def get_layout(self):
""" Return the user-specified layout if one is available, otherwise
build a default layout containing all fields.
"""
if hasattr(self, 'Meta') and hasattr(self.Meta, 'layout'):
return self.Meta.layout
else:
# Construct a simple layout using the keys from the fields
return self.fields.keys()
def as_div(self):
""" Render the form as a set of <div>s. """
self.top_errors = self.non_field_errors()
self.prefix_fields = []
output = self.render_fields(self.get_layout())
if self.top_errors:
errors = self.top_errors_as_html()
else:
errors = u''
prefix = u''.join(self.prefix_fields)
return mark_safe(prefix + errors + output)
def render_fields(self, fields, separator=u""):
""" Render a list of fields and join the fields by the value in separator. """
output = []
for field in fields:
if isinstance(field, Fieldset):
output.append(field.as_html(self))
else:
output.append(self.render_field(field))
return separator.join(output)
def render_field(self, field):
""" Render a named field to HTML. """
try:
field_instance = self.fields[field]
except KeyError:
raise NoSuchFormField("Could not resolve form field '%s'." % field)
bf = forms.forms.BoundField(self, field_instance, field)
output = ''
if bf.errors:
# If the field contains errors, render the errors to a <ul>
# using the error_list helper function.
# bf_errors = error_list([escape(error) for error in bf.errors])
bf_errors = ', '.join([e for e in bf.errors])
else:
bf_errors = ''
if bf.is_hidden:
# If the field is hidden, add it at the top of the form
self.prefix_fields.append(unicode(bf))
# If the hidden field has errors, append them to the top_errors
# list which will be printed out at the top of form
if bf_errors:
self.top_errors.extend(bf.errors)
else:
# Find field + widget type css classes
css_class = type(field_instance).__name__ + " " + type(field_instance.widget).__name__
# Add an extra class, Required, if applicable
if field_instance.required:
css_class += " required"
if field_instance.help_text:
# The field has a help_text, construct <span> tag
help_text = '<span class="help-block">%s</span>' % force_unicode(field_instance.help_text)
else:
help_text = u''
field_hash = {
'class' : mark_safe(css_class),
'label' : mark_safe(bf.label or ''),
'help_text' :mark_safe(help_text),
'field' : field_instance,
'bf' : mark_safe(unicode(bf)),
'bf_raw' : bf,
'errors' : mark_safe(bf_errors),
'field_type' : mark_safe(field.__class__.__name__),
'label_id': bf._auto_id(),
}
if self.custom_fields.has_key(field):
template = get_template(self.custom_fields[field])
else:
template = select_template([
os.path.join(self.template_base, 'field_%s.html' % type(field_instance.widget).__name__.lower()),
os.path.join(self.template_base, 'field_default.html'), ])
# Finally render the field
output = template.render(Context(field_hash))
return mark_safe(output)
def __unicode__(self):
# Default output is now as <div> tags.
return self.as_div()
class BootstrapForm(BootstrapMixin, forms.Form):
pass
class BootstrapModelForm(BootstrapMixin, forms.ModelForm):
pass
class Fieldset(object):
""" Fieldset container. Renders to a <fieldset>. """
def __init__(self, legend, *fields, **kwargs):
self.legend = legend
self.fields = fields
self.css_class = kwargs.get('css_class', '_'.join(legend.lower().split()))
def as_html(self, form):
legend_html = self.legend and (u'<legend>%s</legend>' % self.legend) or ''
return u'<fieldset class="%s">%s%s</fieldset>' % (self.css_class, legend_html, form.render_fields(self.fields))
| [
"ecxsgv@gmail.com"
] | ecxsgv@gmail.com |
82bedf16a62f6b2a671dbd1c24d8d3dc622d5654 | 228daa8985f40bbd734febb7d476e741ed81dd02 | /cloudferrylib/os/storage/filters.py | 44eaaeb729abb99b80e0b3485f4464934bb3569c | [
"Apache-2.0"
] | permissive | abochkarev/CloudFerry | 979d6a106b5c9f86f5086073ee52be030404c32a | 300c61475c336c80ae62237519dde55bd7690a90 | refs/heads/master | 2020-12-29T00:29:25.614687 | 2015-11-29T20:53:28 | 2015-11-29T20:53:28 | 47,539,977 | 1 | 0 | null | 2015-12-07T08:42:26 | 2015-12-07T08:42:26 | null | UTF-8 | Python | false | false | 3,282 | py | # Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters for Cinder volumes.
Filtering can be done by user through modifying filter config file. User can
specify filtered tenant ID and/or filtered volume ID. This module keeps logic
to filter cinder volumes based on user's input.
User can specify the following filtering options for volumes:
- `date`:
Filters volumes not older than date specified.
DATETIME_FMT = "%Y-%m-%d %H:%M:%S"
- `volume_id`:
Filters specified volume IDs;
Volumes filtering logic:
- If nothing is specified in filters file all volumes MUST migrate;
- If tenant is specified, ALL volumes which belong to this tenant MUST
migrate;
- If volumes' IDs are specified, only these volumes specified MUST migrate.
"""
import datetime
from cloudferrylib.utils import filters
DATETIME_FMT = "%Y-%m-%d %H:%M:%S"
def _filtering_disabled(elem):
return elem is None or (isinstance(elem, list) and len(elem) == 0)
def _tenant_filter(filter_yaml):
"""
Filter volumes not specified in tenant_id section of filters file.
:return: filter function
"""
tenant_id = filter_yaml.get_tenant()
return lambda i: (_filtering_disabled(tenant_id) or
i.get('project_id') == tenant_id)
def _volume_id_filter(filter_yaml):
"""
Filter volumes not specified in volume_ids section of filters file.
:return: filter function
"""
volumes = filter_yaml.get_volume_ids()
return lambda i: (_filtering_disabled(volumes) or
i.get('id') in volumes)
def _datetime_filter(filter_yaml):
"""
Filter volumes not older than :arg date:.
:return: filter function
"""
date = filter_yaml.get_volume_date()
if isinstance(date, str):
date = datetime.datetime.strptime(date, DATETIME_FMT)
def _filter(vol):
upd = vol.get('updated_at')
if isinstance(upd, str):
upd = datetime.datetime.strptime(upd, DATETIME_FMT)
return (_filtering_disabled(date) or date <= upd)
return _filter
class CinderFilters(filters.CFFilters):
"""Build required filters based on filter configuration file."""
def __init__(self, cinder_client, filter_yaml):
super(CinderFilters, self).__init__(filter_yaml)
self.cinder_client = cinder_client
def get_filters(self):
"""
Get filter list.
:return: list
"""
return [
_datetime_filter(self.filter_yaml),
_tenant_filter(self.filter_yaml),
_volume_id_filter(self.filter_yaml),
]
def get_tenant_filter(self):
"""
Get tenant filter only.
:return: list
"""
return _tenant_filter(self.filter_yaml)
| [
"aguzikova@mirantis.com"
] | aguzikova@mirantis.com |
244834cfad66b213689444a8abba5ea46118644a | c3cdab74df239e27954e26d43bfde3f60ebb5c4e | /experiment/rk4_ode.py | 4e4623b550fe3a4acbaf43104975b8c64cf1ddcf | [
"BSD-3-Clause"
] | permissive | lidavidm/scifair-type1diabetes-model | 69e9264d95a5861645af3fea1071d77bab09ab83 | 3162005f006218c2ddba0a5f3b5c273493f6a27b | refs/heads/master | 2016-09-02T04:38:03.287996 | 2014-03-29T17:20:41 | 2014-03-29T17:20:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,282 | py | from mpmath import mpf, mp
mp.dps = 600
print("Precision:", mp.prec)
def vectorize(functions):
"""
Takes a list of functions and returns a function that accepts a list of
values and applies each function given to the list of values.
"""
def _vectorized(x, ys):
return tuple(f(x, *ys) for f in functions)
return _vectorized
def inc_vec(vec, inc):
return [x + inc for x in vec]
def add_vec(*vecs):
return [sum(nums) for nums in zip(*vecs)]
def mul_vec(vec, mul):
return [x * mul for x in vec]
def rk4_system(y, x0, y0, h, steps):
"""
y: list of ODE
y0: vector (tuple) of initial y values
Finds y(x0 + h * steps)
"""
x1 = x0
y1 = list(y0)
xs = [x0]
ys = [[y0[i]] for i,f in enumerate(y)]
f = vectorize(y)
for i in range(steps):
k1 = f(x1, y1)
k2 = f(x1 + (h / 2), add_vec(y1, mul_vec(k1, h / 2)))
k3 = f(x1 + (h / 2), add_vec(y1, mul_vec(k2, h / 2)))
k4 = f(x1 + h, add_vec(y1, mul_vec(k3, h)))
y1 = add_vec(y1, mul_vec(add_vec(k1, mul_vec(k2, 2), mul_vec(k3, 2), k4), h / 6))
for i, val in enumerate(y1):
ys[i].append(val)
x1 += h
xs.append(x1)
return xs, ys
if __name__ == '__main__':
a1 = mpf('2')
a2 = mpf('2')
a3 = mpf('3')
a4 = mpf('0.7')
a5 = mpf('1')
a6 = mpf('0.02')
a7 = mpf('20')
a8 = mpf('1')
a9 = mpf('1')
a10 = mpf('1')
a11 = mpf('0.01')
a12 = mpf('0.1')
a13 = mpf('0.3')
a14 = mpf('50')
a16 = mpf('0.1')
a17 = mpf('0.14')
B = mpf('1')
f1 = lambda p: p**a1 / (a2**a1 + p**a1)
f2 = lambda p: a4 * a5**a3 / (a5**a3 + p**a3)
p = lambda E, a15: (a14 * E * B) / a15
d_a15 = mpf('0.015')
y = [
lambda t, A, M, E, a15: f1(p(E,a15))*(a6+a7*M)-a8*A-a9*A**2, #dA/dt
lambda t, A, M, E, a15: a10*f2(p(E,a15))*A-f1(p(E,a15))*a16*a7*M-a11*M, #dM/dt
lambda t, A, M, E, a15: a12*(1-f2(p(E,a15)))*A-a13*E, #dE/dt,
None #da15/dt, which depends on parameter range
]
import csv
par_ranges = [
# name, file, initial, da15/dt, steps
("0.1 to 2", "12", mpf('0.1'), mpf('0.0095'), 4000),
("2 to 0.1", "21", mpf('2'), mpf('-0.0095'), 4000),
("0.5 to 2.3", "0523", mpf('0.5'), mpf('0.009'), 4000),
("0.1 to 2 (Long)", "12_long", mpf('0.1'), mpf('0.00316666667'), 20000),
]
for par_range, file_suffix, initial, speed, steps in par_ranges:
print("Running parameter range {}".format(par_range))
print("\tInitial a15:", initial, "da15/dt:", speed)
y[3] = lambda t, A, M, E, a15: speed # da15/dt
xs, ys = rk4_system(y, mpf('0'), (mpf('0.5'), mpf('0'), mpf('1'), initial), mpf('0.05'), steps)
with open('time_py_{}.dat'.format(file_suffix), 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
for t, A, M, E, a15 in zip(xs, *ys):
# TODO: use string formatting to avoid conversion issues
writer.writerow([t, float(A), float(M), float(E), float(a15)])
print("Finished")
| [
"li.davidm96@gmail.com"
] | li.davidm96@gmail.com |
ca54d6886b71a04671cb3d17b134f4b68d043525 | c8bb44d2bdcd7e5132e993423b27652e412747fe | /app/server/controllers/main.py | 917eb9877c5947c492b395d47e516a1149af44db | [] | no_license | leorrose/HStyle | ad86f1291156007f61c90335e687428367c0c9f0 | f2e78d45bc58deb04772a2a5f6702987f12c30db | refs/heads/master | 2023-04-29T06:53:06.773234 | 2021-05-20T10:16:23 | 2021-05-20T10:16:23 | 287,721,664 | 0 | 0 | null | 2021-04-16T15:24:05 | 2020-08-15T10:04:27 | Jupyter Notebook | UTF-8 | Python | false | false | 1,110 | py | """
Main api controller for HStyle api. responsable to create the fast api
app, configuring it and adding all other api that exist.
"""
from typing import List
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from server.controllers import style_transfer
# create a fastapi app
app: FastAPI = FastAPI()
# define origins that can call our api
origins: List[str] = [
"*",
]
# add our origins to allow CORS
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
# redirect to docs when getting root of app
@app.get("/", include_in_schema=False)
async def root() -> RedirectResponse:
"""
Get the initial wep api page (doc page)
Returns:
RedirectResponse: redirect to doc page
"""
response: RedirectResponse = RedirectResponse(url='/docs')
return response
# add our style transfer api
app.include_router(style_transfer.router, prefix="/api/styleTransfer",
tags=["styleTransfer"])
| [
"Leor.rose@gmail.com"
] | Leor.rose@gmail.com |
d14fa7cbe66ecb7da1137588ed2a197ae02567a8 | 714c546d145504e4c194702bfde9a9f50882ea54 | /twittertest/wsgi.py | 7f0d3b3b652dfcdd491ced1d5df2c950ba058da0 | [] | no_license | nehagundecha/my-first-blog | a7be156f59c366000fdf66050531560e040439bc | cc174d050274de731933d976caf319f90abe7fb6 | refs/heads/master | 2021-01-20T00:09:13.618449 | 2017-04-22T18:04:12 | 2017-04-22T18:04:12 | 89,086,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for twittertest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twittertest.settings")
application = get_wsgi_application()
| [
"ng2795@gmail.com"
] | ng2795@gmail.com |
5418cdc24aab8459761ed1b04e45bfb5f5f36b94 | 327befeb9bbb8aee75c24c5ef78d859f35428ebd | /src/python/grpcio/grpc/framework/crust/_calls.py | bff940d74710da3a6be3a2c099cf8e661f51f910 | [
"BSD-3-Clause"
] | permissive | CharaD7/grpc | 33b64f8eabf09014b1bc739b77809aed7a058633 | 062ad488881839d2637b7a191ade5b87346b4597 | refs/heads/master | 2023-07-08T19:36:00.065815 | 2016-01-04T17:28:15 | 2016-01-04T17:28:15 | 49,012,756 | 1 | 0 | BSD-3-Clause | 2023-07-06T01:32:59 | 2016-01-04T17:39:11 | C | UTF-8 | Python | false | false | 8,621 | py | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for invoking RPCs."""
from grpc.framework.crust import _control
from grpc.framework.interfaces.base import utilities
from grpc.framework.interfaces.face import face
_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
_EMPTY_COMPLETION = utilities.completion(None, None, None)
def _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
complete):
rendezvous = _control.Rendezvous(None, None)
subscription = utilities.full_subscription(
rendezvous, _control.protocol_receiver(rendezvous))
operation_context, operator = end.operate(
group, method, subscription, timeout, protocol_options=protocol_options,
initial_metadata=initial_metadata, payload=payload,
completion=_EMPTY_COMPLETION if complete else None)
rendezvous.set_operator_and_context(operator, operation_context)
outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
if outcome is not None:
rendezvous.set_outcome(outcome)
return rendezvous, operation_context, outcome
def _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
if outcome is None:
def in_pool():
abortion = rendezvous.add_abortion_callback(abortion_callback)
if abortion is None:
try:
receiver.initial_metadata(rendezvous.initial_metadata())
receiver.response(next(rendezvous))
receiver.complete(
rendezvous.terminal_metadata(), rendezvous.code(),
rendezvous.details())
except face.AbortionError:
pass
else:
abortion_callback(abortion)
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
if outcome is None:
def in_pool():
abortion = rendezvous.add_abortion_callback(abortion_callback)
if abortion is None:
try:
receiver.initial_metadata(rendezvous.initial_metadata())
for response in rendezvous:
receiver.response(response)
receiver.complete(
rendezvous.terminal_metadata(), rendezvous.code(),
rendezvous.details())
except face.AbortionError:
pass
else:
abortion_callback(abortion)
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def blocking_unary_unary(
end, group, method, timeout, with_call, protocol_options, initial_metadata,
payload):
"""Services in a blocking fashion a unary-unary servicer method."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
def future_unary_unary(
end, group, method, timeout, protocol_options, initial_metadata, payload):
"""Services a value-in value-out servicer method by returning a Future."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return rendezvous
def inline_unary_stream(
end, group, method, timeout, protocol_options, initial_metadata, payload):
"""Services a value-in stream-out servicer method."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return rendezvous
def blocking_stream_unary(
end, group, method, timeout, with_call, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services in a blocking fashion a stream-in value-out servicer method."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
else:
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
def future_stream_unary(
end, group, method, timeout, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services a stream-in value-out servicer method by returning a Future."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def inline_stream_stream(
end, group, method, timeout, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services a stream-in stream-out servicer method."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def event_unary_unary(
end, group, method, timeout, protocol_options, initial_metadata, payload,
receiver, abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_unary_stream(
end, group, method, timeout, protocol_options, initial_metadata, payload,
receiver, abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_stream_unary(
end, group, method, timeout, protocol_options, initial_metadata, receiver,
abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
return _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_stream_stream(
end, group, method, timeout, protocol_options, initial_metadata, receiver,
abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
return _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
| [
"nathaniel@google.com"
] | nathaniel@google.com |
99322f6227003e759ed09c79ea1c5a528744cc15 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from others/字典/wk_05_字符串的定义.py | 3baed27ffc0bce82ef9cf4051ead9b8ab46134e4 | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | string = "hello world"
for c in string:
print(c) | [
"1121287904@qq.com"
] | 1121287904@qq.com |
c8e603a7b2d4491ede5f8b0fb00d7cc62580c973 | d4f51baa56e5203174a1e5564126d8eb4384eddc | /python/learn-python-the-hard-way/ex19.py | 9e1e3c3b3a8dd520741619ff62480f213447bd58 | [] | no_license | yangmingming/script | d01832c3ca9b2e0392f5534439470db9a1d04387 | 88fee116f6b1218d7e766622af64c73bb5189595 | refs/heads/master | 2021-05-04T04:54:23.922310 | 2017-02-06T10:36:48 | 2017-02-06T10:36:48 | 70,901,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | def print_fruit(apple, orange):
print "apple: %s" %apple
print "orange: %s" %orange
print "\n"
print "use num directly"
print_fruit(10, 20)
print "use variable"
a = 30
b = 40
print_fruit(a, b)
print "use math operator"
print_fruit(10 + 2, 10 + 4)
print "use variable and math operator"
print_fruit(a + 10, b + 3)
| [
"yang15225094594@163.com"
] | yang15225094594@163.com |
4fd5a728df60fb05cfd0a083012b4fbc9e4192da | 1c1fb277e61ef0242bbc41ec3faeb33c9ffc6b54 | /coel.py | d0afd339364497e081aed49d463615fc3f40faa7 | [
"MIT"
] | permissive | tzetty/pmisc | 5a398eb8181b0807d5f3ade9275a1c4b69097566 | 1b29c334c5eca41c6b4444bca98db1989ec032ac | refs/heads/master | 2020-04-16T08:22:18.938896 | 2019-01-13T14:43:16 | 2019-01-13T14:43:16 | 165,422,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | import sys
import random
import dlib
RAYS = [
'1st (Gwron/Virtue) Ray of Knowledge',
'2nd (Alawn/Light) Ray of Power',
'3rd (Plenydd/Harmony) Ray of Peace',
]
def print_three_rays(args):
assert len(args) == 3
print('Question: What do I most need to understand about this day\'s events?')
print('')
print('Method Used: The Three Rays of Light')
print('')
#print(dlib.COEL)
#print(args)
values = [dlib.COEL[k] for k in args]
#print(values)
parts = zip(args,values,RAYS)
#print(parts)
for part in parts:
aa = part[0]
bb = part[1]
cc = part[2]
#print('%s %s %s' % (aa,bb,cc))
ss = '%s: "%s" %s' % (cc, aa, bb)
print(ss)
print('')
print('Interpretation:')
def main(args):
if args[0] == 'random':
tiles = dlib.scramble(list(dlib.COEL.keys()))[0:3]
#print(tiles)
print_three_rays(tiles)
else:
print_three_rays(args)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"thomaszetty@gmail.com"
] | thomaszetty@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.