text
stringlengths
184
4.48M
import numpy as np import matplotlib.pyplot as plt import torch import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../professad')) from system import System from functionals import IonElectron, Weizsaecker # the single electron quantum harmonic oscillator (QHO) is a non-interacting # single-orbital system - hence, it can be modelled well with just the # ion-electron interaction and Weizsaecker terms terms = [IonElectron, Weizsaecker] # use a large box to simulate such localied systems with periodic # boundary conditions so that the electron density will approach zero # at the box boundaries L = 20.0 box_vecs = L * torch.eye(3, dtype=torch.double) # set low energy cutoff of 300 eV shape = System.ecut2shape(300, box_vecs) # as we will set the external potential ourselves later, we just need to # submit a dummy "ions" parameter (the recpot file and ionic coordinates # are arbitrary for this example) ions = [['-', 'al.gga.recpot', torch.tensor([[0.5, 0.5, 0.5]]).double()]] # create system object system = System(box_vecs, shape, ions, terms, units='b', coord_type='fractional') # as we have used an arbitrary recpot file, we need to set the electron number explicitly system.set_electron_number(1) # QHO quadratic potential k = 10 xf, yf, zf = np.meshgrid(np.arange(shape[0]) / shape[0], np.arange(shape[1]) / shape[1], np.arange(shape[2]) / shape[2], indexing='ij') x = box_vecs[0, 0] * xf + box_vecs[1, 0] * yf + box_vecs[2, 0] * zf y = box_vecs[0, 1] * xf + box_vecs[1, 1] * yf + box_vecs[2, 1] * zf z = box_vecs[0, 2] * xf + box_vecs[1, 2] * yf + box_vecs[2, 2] * zf r = np.sqrt(x * x + y * y + z * z) qho_pot = 0.5 * k * ((x - L / 2).pow(2) + (y - L / 2).pow(2) + (z - L / 2).pow(2)) # set external potential to QHO potential system.set_potential(torch.as_tensor(qho_pot).double()) # perform density optimization system.optimize_density(ntol=1e-7, n_verbose=True) # compare optimized energy and the ones expected from elementary quantum mechanics print('Optimized energy = {:.8f} Ha'.format(system.energy('Ha'))) print('Expected energy = {:.8f} Ha'.format(3 / 2 * np.sqrt(k))) # check measures of convergence dEdchi_max = system.check_density_convergence('dEdchi') mu_minus_dEdn_max = system.check_density_convergence('euler') print('\nConvergence check:') print('Max |𝛿E/𝛿χ| = {:.4g}'.format(dEdchi_max)) print('Max |µ-𝛿E/𝛿n| = {:.4g}'.format(mu_minus_dEdn_max)) den = system.density() pot = system.ionic_potential() def den_QHO_0(k, x): return np.pi**(-1 / 2) * k**(1 / 4) * torch.exp(-k**(1 / 2) * x * x) den_th = den_QHO_0(k, x - L / 2) * den_QHO_0(k, y - L / 2) * den_QHO_0(k, z - L / 2) _, ax = plt.subplots(nrows=5, ncols=2, figsize=(15, 6), sharex=True) r_100 = [r[i, 0, 0] for i in range(shape[0])] r_010 = [r[0, i, 0] for i in range(shape[1])] r_001 = [r[0, 0, i] for i in range(shape[2])] r_110 = [r[i, i, 0] for i in range(shape[0])] r_111 = [r[i, i, i] for i in range(shape[0])] ax[0][0].plot(r_100, [pot[i, int(shape[1] / 2), int(shape[2] / 2)] for i in range(shape[0])], '-b') ax[1][0].plot(r_010, [pot[int(shape[0] / 2), i, int(shape[2] / 2)] for i in range(shape[1])], '-b') ax[2][0].plot(r_001, [pot[int(shape[0] / 2), int(shape[1] / 2), i] for i in range(shape[2])], '-b') ax[3][0].plot(r_110, [pot[i, i, int(shape[2] / 2)] for i in range(shape[0])], '-b') ax[4][0].plot(r_111, [pot[i, i, i] for i in range(shape[0])], '-b') ax[0][1].plot(r_100, [den_th[i, int(shape[1] / 2), int(shape[2] / 2)] for i in range(shape[0])], '-b') ax[1][1].plot(r_010, [den_th[int(shape[0] / 2), i, int(shape[2] / 2)] for i in range(shape[1])], '-b') ax[2][1].plot(r_001, [den_th[int(shape[0] / 2), int(shape[1] / 2), i] for i in range(shape[2])], '-b') ax[3][1].plot(r_110, [den_th[i, i, int(shape[2] / 2)] for i in range(shape[0])], '-b') ax[4][1].plot(r_111, [den_th[i, i, i] for i in range(shape[0])], '-b') ax[0][1].plot(r_100, [den[i, int(shape[1] / 2), int(shape[2] / 2)] for i in range(shape[0])], '--r') ax[1][1].plot(r_010, [den[int(shape[0] / 2), i, int(shape[2] / 2)] for i in range(shape[1])], '--r') ax[2][1].plot(r_001, [den[int(shape[0] / 2), int(shape[1] / 2), i] for i in range(shape[2])], '--r') ax[3][1].plot(r_110, [den[i, i, int(shape[2] / 2)] for i in range(shape[0])], '--r') ax[4][1].plot(r_111, [den[i, i, i] for i in range(shape[0])], '--r') ax[0][0].set_ylabel('[100]') ax[1][0].set_ylabel('[010]') ax[2][0].set_ylabel('[001]') ax[3][0].set_ylabel('[110]') ax[4][0].set_ylabel('[111]') ax[0][0].set_title('Potential') ax[0][1].set_title('Density') plt.show()
package tuf_a2zdsacourse.bsproblems; import java.util.*; public class BookAllocationProblem { public static void main(String[] args) { Scanner sc=new Scanner(System.in); ArrayList<Integer> ls=new ArrayList<>(Arrays.asList(12,34,67,90)); int n=ls.size(); System.out.print("Enter the no. of students(m): "); int m=sc.nextInt(); //int ans=findPages(ls,n,m); //bruteforce approach using linear search method int ans=findPagesBs(ls,n,m); //optimal approach using binary search method System.out.println("The answer is: " + ans); } private static int findPagesBs(ArrayList<Integer> arr, int n, int m) { if(m>n) return -1; int low = Collections.max(arr); int high = arr.stream().mapToInt(Integer::intValue).sum(); while(low<=high){ int mid=(low+high)/2; int studs=countStudents(arr,mid); if(studs>m){ low=mid+1; }else{ high=mid-1; } } return low; } private static int findPages(ArrayList<Integer> arr, int n, int m) { if(m>n) return -1; int low = Collections.max(arr); int high = arr.stream().mapToInt(Integer::intValue).sum(); for(int pages=low;pages<=high;pages++){ if(countStudents(arr,pages)==m){ return pages; } } return low; } private static int countStudents(ArrayList<Integer> arr, int pages) { int n=arr.size(); int stud=1; long pagesStud=0; for(int i=0;i<n;i++){ if(pagesStud+arr.get(i)<=pages){ pagesStud+=arr.get(i); }else{ stud++; pagesStud=arr.get(i); } } return stud; } }
const Header = ({ course }) => { return ( <h1>{course.name}</h1> ) } const Part = ({ part }) => { return ( <div> <p>{part.name} {part.exercises}</p> </div> ) } const Content = ({ parts }) => { return ( <div> <Part part={parts[0]}/> <Part part={parts[1]}/> <Part part={parts[2]}/> </div> ) } const Total = ({ parts }) => { const numberOfExercises = parts[0].exercises + parts[1].exercises + parts[2].exercises return ( <div> <p>Number of exercises {numberOfExercises}</p> </div> ) } const App = () => { const course = { name: 'Half Stack application development', parts: [ { name: 'Fundamentals of React', exercises: 10 }, { name: 'Using props to pass data', exercises: 7 }, { name: 'State of a component', exercises: 14 } ] } return ( <div> <Header course = {course}/> <Content parts = {course.parts}/> <Total parts = {course.parts}/> </div> ) } export default App
import WebSocket from 'ws'; import fetch from 'node-fetch'; // define the websocket and REST URLs const wsUrl = 'wss://api.vitex.net/v2/ws'; const restUrl = "https://api.vitex.net/api/v2/markets"; const response = await fetch(restUrl); //extract JSON from the http response const myJson = await response.json(); var currencies = []; // extract symbols from JSON returned information for(let i = 0; i < myJson['data'].length; ++i){ if(myJson['data'][i]['s'] !== 0){ currencies.push(myJson['data'][i]['symbol']); } } // print metadata about pairs async function Metadata(){ myJson['data'].forEach((item)=>{ let pair_data = '@MD ' + item['tradeTokenSymbol'].split('-')[0] + '-' + item['quoteTokenSymbol'].split('-')[0] + ' spot ' + item['tradeTokenSymbol'].split('-')[0] + ' ' + item['quoteTokenSymbol'].split('-')[0] + ' ' + item['pricePrecision'] + ' 1 1 0 0'; console.log(pair_data); }) console.log('@MDEND') } //function to get current time in unix format function getUnixTime(){ return Math.floor(Date.now()); } Number.prototype.noExponents = function() { var data = String(this).split(/[eE]/); if (data.length == 1) return data[0]; var z = '', sign = this < 0 ? '-' : '', str = data[0].replace('.', ''), mag = Number(data[1]) + 1; if (mag < 0) { z = sign + '0.'; while (mag++) z += '0'; return z + str.replace(/^\-/, ''); } mag -= str.length; while (mag--) z += '0'; return str + z; } async function getTrades(message){ let pair_name = message['topic'].replace('market.', '').replace('.trade', ''); pair_name = pair_name.split('_')[0].split('-')[0] + '-' + pair_name.split('_')[1].split('-')[0]; message['data'].forEach((trade)=>{ var trade_output = '! ' + getUnixTime() + ' ' + pair_name + ' ' + (trade['side'] === 0 ? 'B' : 'S') + ' ' + parseFloat(trade['p']).noExponents() + ' ' + parseFloat(trade['q']).noExponents(); console.log(trade_output); }) } async function getOrders(message){ let pair_name = message['topic'].replace('market.', '').replace('.depth', ''); pair_name = pair_name.split('_')[0].split('-')[0] + '-' + pair_name.split('_')[1].split('-')[0]; // check if bids array is not Null if(message['data']['bids'].length > 0){ var order_answer = '$ ' + getUnixTime() + ' ' + pair_name + ' B ' var pq = ''; for(let i = 0; i < message['data']['bids'].length; i++){ pq += parseFloat(message['data']['bids'][i][1]).noExponents() + '@' + parseFloat(message['data']['bids'][i][0]).noExponents() + '|'; } pq = pq.slice(0, -1); console.log(order_answer + pq + ' R'); } // check if asks array is not Null if(message['data']['asks'].length > 0){ var order_answer = '$ ' + getUnixTime() + ' ' + pair_name + ' S ' var pq = ''; for(let i = 0; i < message['data']['asks'].length; i++){ pq += parseFloat(message['data']['asks'][i][1]).noExponents() + '@' + parseFloat(message['data']['asks'][i][0]).noExponents() + '|'; } pq = pq.slice(0, -1); console.log(order_answer + pq + ' R'); } } async function Connect(){ // create a new websocket instance var ws = new WebSocket(wsUrl); ws.onopen = function(e) { // create ping function to keep connection alive setInterval(function() { if (ws.readyState === WebSocket.OPEN) { ws.send(JSON.stringify( { "command":"ping" } )); console.log('Ping request sent'); } }, 10000); // subscribe to trades and orders for all instruments currencies.forEach((pair)=>{ ws.send(JSON.stringify( { "command": "sub", "params": [`market.${pair}.trade`] } )); ws.send(JSON.stringify( { "command": "sub", "params": [`market.${pair}.depth`] } )); }); }; // func to handle input messages ws.onmessage = function(event) { try{ // parse input data to JSON format let dataJSON = JSON.parse(event.data); // console.log(dataJSON); if (dataJSON.event === 'push' && dataJSON['topic'].includes('trade')) { getTrades(dataJSON); } else if(dataJSON.event === 'push' && dataJSON['topic'].includes('depth')){ getOrders(dataJSON); } else { console.log(dataJSON); } }catch(e){ // skip confirmation messages cause they can`t be parsed into JSON format without an error } }; // func to handle closing connection ws.onclose = function(event) { if (event.wasClean) { console.log(`Connection closed with code ${event.code} and reason ${event.reason}`); } else { console.log('Connection lost'); setTimeout(async function() { Connect(); }, 500); } }; // func to handle errors ws.onerror = function(error) { console.log(`Error ${error} occurred`); }; } Metadata(); Connect();
'use strict'; const apiUrl = 'http://127.0.0.1:9999/api/hw36/posts'; let posts = []; const rootEl = document.getElementById('root'); const loaderEL = document.createElement('div'); loaderEL.dataset.id = 'loader'; loaderEL.textContent = 'Данные загружаются'; rootEl.appendChild(loaderEL); let selectedPostEl = null; let selectedPost = {}; const errorEl = document.createElement('div'); const updateInterval = 5000; errorEl.dataset.id = 'message'; rootEl.appendChild(errorEl); const error = { show: (text) => { errorEl.textContent = text; }, hide: () => { errorEl.textContent = ''; }, }; const loader = { show() { loaderEL.style.display = 'block'; }, hide() { loaderEL.style.display = 'none'; }, spinner: { show: (el) => { const spinnerEl = document.createElement('div'); spinnerEl.innerHTML = '<span data-id="action-loader"><img src="./img/loader.gif"></span>'; el.appendChild(spinnerEl); }, hide: () => { const spinnerEl = rootEl.querySelector('[data-id=action-loader]'); if (spinnerEl) { spinnerEl.parentElement.remove(); } }, }, }; const formEl = document.createElement('form'); formEl.dataset.id = 'post-form'; formEl.innerHTML = `<fieldset data-id="post-fields"> <input data-input="id" type="hidden" value="0"> <input data-input="author"> <input data-input="text"> <button data-action="add">Добавить</button> </fieldset>`; rootEl.appendChild(formEl); const authorInput = rootEl.querySelector('[data-input="author"]'); const textInput = rootEl.querySelector('[data-input="text"]'); const idInput = rootEl.querySelector('[data-input="id"]'); const inputs = rootEl.querySelectorAll('input'); const postsEl = document.createElement('div'); postsEl.dataset.id = 'posts'; rootEl.appendChild(postsEl); const fieldSetEl = rootEl.querySelector('[data-id="post-fields"]'); function makePostEl(post) { const wrappEl = document.createElement('div'); wrappEl.dataset.type = 'post'; wrappEl.dataset.postId = post.id; wrappEl.innerHTML = `<div data-post-part="author">${post.author}</div> <div data-post-part="text">${post.text}</div> <div> ❤️ <span data-info="likes">${post.likes}</span> <button data-action="like">+1</button> <button data-action="dislike">-1</button> <button data-action="edit">Изменить</button> <button data-action="remove">Удалить</button> </div> <form data-form="comment"> <input data-id="text"> <button>Добавить</button> </form> <div data-post-part="comments">${makeCommentEl(post.comments)}</div>`; return wrappEl; } function makeCommentEl(comments) { //Создание елемента комментарий if (!comments) { return ''; } let result = ''; comments.forEach((comment) => { const commentMarkup = `<div data-comment-id="${comment.id}">${comment.text}</div>`; result += commentMarkup; }); return result; } function makeWall(el, items) { items .map((item) => item) .forEach((element) => { el.appendChild(makePostEl(element)); }); } const formSetting = { //Управление кнопки форма fieldSetEl: formEl.querySelector('[data-id="post-fields"]'), isEdit: false, editState: () => { //Cостаяние редактирование const saveBtn = document.createElement('button'); saveBtn.dataset.action = 'save'; saveBtn.textContent = 'Сохранить'; const cancelBtn = document.createElement('button'); cancelBtn.dataset.action = 'cancel'; cancelBtn.textContent = 'Отмена'; const addBtn = formEl.querySelector('[data-action="add"]'); if (addBtn) { addBtn.remove(); formSetting.fieldSetEl.appendChild(saveBtn); formSetting.fieldSetEl.appendChild(cancelBtn); formSetting.isEdit = true; authorInput.value = selectedPost.author; textInput.value = selectedPost.text; idInput.value = selectedPost.id; } }, initialState: () => { //Изначальная состаяния const saveBtn = formEl.querySelector('[data-action="save"]'); const cancelBtn = formEl.querySelector('[data-action="cancel"]'); const addBtn = document.createElement('button'); addBtn.dataset.action = 'add'; addBtn.textContent = 'Добавить'; if (saveBtn && cancelBtn) { saveBtn.remove(); cancelBtn.remove(); formSetting.fieldSetEl.appendChild(addBtn); formSetting.isEdit = false; authorInput.value = ''; textInput.value = ''; idInput.value = 0; } }, }; function ajax(method, url, headers, callbacks, body) { const xhr = new XMLHttpRequest(); xhr.open(method, url); if (callbacks.onStart) { callbacks.onStart(); } if (headers) { for (const item in headers) { xhr.setRequestHeader(item, headers[item]); } } xhr.onload = () => { if (callbacks.onSuccess) { callbacks.onSuccess(xhr.responseText); } }; xhr.onerror = () => { if (callbacks.onError) { callbacks.onError(); } }; xhr.onloadend = () => { if (callbacks.onFinish) { callbacks.onFinish(); } }; if (method === 'GET') { xhr.send(); } else { xhr.send(body); } } // const functions = { // onStart: () => {}, // onFinish: () => {}, // onSuccess: (data) => {}, // onError: (data) => {}, // }; function validation(nodes) { for (const item of nodes) { if (!item.value) { item.focus(); error.show('Поля не может быт пустым'); return false; } error.hide(); } nodes[0].focus(); return true; } //Добавления постов formEl.onsubmit = (e) => { e.preventDefault(); const authorEl = formEl.querySelector('[data-input="author"]'); const textEl = formEl.querySelector('[data-input="text"]'); if (!validation(inputs)) { return; } const post = { id: 0, author: authorEl.value.trim(), text: textEl.value.trim(), likes: 0, comments: [], }; if (formSetting.isEdit) { post.id = Number(idInput.value); } ajax( 'POST', apiUrl, { 'Content-Type': 'application/json' }, { onStart: () => { const postEl = makePostEl(post); loader.show(); fieldSetEl.disabled = true; if (!formSetting.isEdit) { if (postsEl.childElementCount) { postsEl.insertBefore(postEl, postsEl.children[0]); } else { postsEl.appendChild(postEl); } } }, onSuccess: (postData) => { const lastPostEl = postsEl.children[0]; const loadedPost = JSON.parse(postData); lastPostEl.dataset.postId = loadedPost.id; if (!formSetting.isEdit) { posts.unshift(loadedPost); } loader.hide(); formEl.reset(); fieldSetEl.disabled = false; authorEl.focus(); if (formSetting.isEdit) { postsEl.replaceChild(makePostEl(post), selectedPostEl); const findedPost = posts.find((item) => item.id === loadedPost.id); findedPost.text = loadedPost.text; findedPost.author = loadedPost.author; } formSetting.initialState(); }, }, JSON.stringify(post) ); }; //Получение постов ajax( 'GET', apiUrl, {}, { onStart: () => { loader.show; }, onSuccess: (postData) => { posts = JSON.parse(postData); makeWall(postsEl, posts); loader.hide(); }, }, {} ); //Уделение постов postsEl.addEventListener('click', (e) => { if (e.target.dataset.action === 'remove') { const currentPostEl = e.target.parentElement.parentElement; const postId = Number(currentPostEl.dataset.postId); currentPostEl.remove(); ajax( 'DELETE', `${apiUrl}/${postId}`, { 'Content-Type': 'aplication/json' }, { onStart: () => { loader.show(); posts = posts.filter((item) => item.id !== postId); }, onSuccess: () => { loader.hide(); }, onFinish: () => { loader.hide(); }, }, null ); } }); //Лайки и дизлайки postsEl.addEventListener('click', (e) => { let currentPostEl = null; let likeCountEl = null; let actionsEl = null; const one = 1; if (e.target.dataset.action === 'dislike') { //Если нажал на дизлайк currentPostEl = e.target.parentElement.parentElement; actionsEl = e.target.parentElement; likeCountEl = currentPostEl.querySelector('[data-info="likes"]'); const postId = Number(currentPostEl.dataset.postId); const findedPost = posts.find((item) => item.id === postId); ajax( 'DELETE', `${apiUrl}/${postId}/likes`, {}, { onStart: () => { loader.spinner.show(currentPostEl); actionsEl.style.display = 'none'; }, onSuccess: () => { likeCountEl.textContent = Number(likeCountEl.textContent) - one; findedPost.likes = Number(likeCountEl.textContent); }, onFinish: () => { actionsEl.style.display = 'block'; loader.spinner.hide(); }, }, null ); } if (e.target.dataset.action === 'like') { //Если нажал на лайк currentPostEl = e.target.parentElement.parentElement; actionsEl = e.target.parentElement; likeCountEl = currentPostEl.querySelector('[data-info="likes"]'); const postId = Number(currentPostEl.dataset.postId); const findedPost = posts.find((item) => item.id === postId); ajax( 'POST', `${apiUrl}/${postId}/likes`, {}, { onStart: () => { loader.spinner.show(currentPostEl); actionsEl.style.display = 'none'; }, onSuccess: () => { likeCountEl.textContent = Number(likeCountEl.textContent) + one; findedPost.likes = Number(likeCountEl.textContent); }, onFinish: () => { actionsEl.style.display = 'block'; loader.spinner.hide(); }, }, null ); } }); //Добавление комментарий postsEl.addEventListener('submit', (e) => { e.preventDefault(); let currentPostEl = null; const createdFormEl = document.createElement('form'); createdFormEl.innerHTML = '<input data-id="text"><button>Добавить</button>'; createdFormEl.dataset.form = 'comment'; let commentText = null; let currentCommentForm = null; let commentsEl = null; if (e.target.dataset.form === 'comment') { currentPostEl = e.target.closest('[data-type="post"]'); const postId = Number(currentPostEl.dataset.postId); const findedPost = posts.find((item) => item.id === postId); currentCommentForm = currentPostEl.querySelector('[data-form=comment]'); commentsEl = currentPostEl.querySelector('[data-post-part="comments"]'); commentText = currentPostEl.querySelector('[data-id="text"]'); const comment = JSON.stringify({ id: 0, text: commentText.value.trim(), }); if (!validation([commentText])) { return; } ajax( 'POST', `${apiUrl}/${postId}/comments`, { 'Content-Type': 'application/json' }, { onStart: () => { loader.spinner.show(currentPostEl); currentCommentForm.remove(); }, onSuccess: (data) => { commentsEl.innerHTML += makeCommentEl([JSON.parse(data)]); findedPost.comments.push(JSON.parse(data)); }, onFinish: () => { loader.spinner.hide(); currentPostEl.insertBefore(createdFormEl, commentsEl); currentCommentForm.reset(); }, }, comment ); } }); //Редактирование поста formEl.addEventListener('click', (e) => { if (e.target.dataset.action === 'cancel') { formSetting.initialState(); } }); postsEl.addEventListener('click', (e) => { if (e.target.dataset.action === 'edit') { selectedPostEl = e.target.closest('[data-type="post"]'); selectedPost = posts.find( (post) => post.id === Number(selectedPostEl.dataset.postId) ); formSetting.editState(); } }); //Обновление постов setInterval(() => { let newPosts = []; const lastPost = posts[0]; if (!lastPost) { return; } ajax( 'GET', `${apiUrl}/newer/${lastPost.id}`, {}, { onStart: () => {}, onSuccess: (postData) => { newPosts = JSON.parse(postData); if (newPosts.length) { newPosts.forEach((element) => { if (postsEl.childElementCount) { postsEl.insertBefore(makePostEl(element), postsEl.children[0]); } else { postsEl.appendChild(element); } posts.unshift(element); }); } }, }, {} ); }, updateInterval);
<template> <div class="msg-img clear" :style="{ height: IsLoad ? '100px' : 'auto', width: IsLoad ? '100px' : 'auto' }" > <img ref="img" v-show="url" :src="url" @load="loadHandle($event)" @error="errorHandle($event)" @loadstart="loadstartHandle" @loadeddata="loadeddataHandle" alt="加载失败" @click="showImg()" /> </div> </template> <script> import { mapActions } from "vuex"; import { MessageModel } from "../../../../WebIM"; const { MessageType } = MessageModel; import axios from "axios"; export default { name: "MessageImg", props: { message: { type: Object, default: () => ({}) }, index: { type: Number } }, data() { return { IsLoad: true, FileSizeProgress: { loaded: 0, total: 0 }, isSend: false, url: "" }; }, created() { /* if (!this.message.sentStatus && this.message.bySelf) { this.sendAction(); } */ }, mounted() { this.setThumb(); }, beforeDestroy() { if (this.url && this.url.includes("blob:")) { window.URL.revokeObjectURL(this.url); } }, computed: { isEncrypt() { const isEncrypt = [MessageType.ZXEncryptImgMsg].includes( this.message.messageType ); return isEncrypt; } }, watch: {}, methods: { ...mapActions(["UpSendStatus"]), sendAction() {}, loadHandle() { this.$emit("childloaded"); this.IsLoad = false; }, loadeddataHandle() { console.log("loadeddata"); }, errorHandle() { this.IsLoad = false; let content = this.message.content.content; const imageUri = this.message.content.imageUri; if (content && content.search("http") !== 0) { this.url = "data:image/jpeg;base64," + content; } else if (!content && imageUri && /zx-zgiot-002/.test(imageUri)) { this.url = imageUri + '?x-oss-process=image/resize,w_100,h_100'; } }, loadstartHandle(e) { console.log("loadstartHandle image", e); }, showImg() { if ( !this.message.bySelf && this.message.sentStatus === MessageModel.SendStatus.SENT ) { this.$emit("read", [this.message, this.index]); // 发送已读状态 } this.$emit("showimg", this.message); }, async setThumb() { let content = this.message.content.content; if (this.url && this.url.includes("blob:")) { window.URL.revokeObjectURL(this.url); } try { if (content && content.search("http") !== 0) { this.url = "data:image/jpeg;base64," + content; } if ( this.message.content.imageUri && this.message.messageType === MessageType.ZXGIFMsg ) { const ossurl = await this.$service.getSignedUrlByOss({ url: this.message.content.imageUri }); let result = await axios.get(ossurl, { responseType: "blob" }); const tmpUrl = window.URL.createObjectURL(result.data); result = null; this.url = tmpUrl; } } catch (error) { } finally { if (!this.url && content && content.search("http") !== 0) { this.url = "data:image/jpeg;base64," + content; } } return this.url; } } }; </script> <style lang="scss" scoped> .msg-img { position: relative; max-width: 200px; line-height: 0; img { min-width: 40px; min-height: 20px; max-height: 100px; cursor: zoom-in; } .loading { position: absolute; top: 0; left: 0; right: 0; bottom: 0; } .loader { width: 15px; height: 15px; } .circular { margin: auto; width: 100%; height: 100%; animation: rotate 2s linear infinite; transform-origin: center center; } .path { stroke-dasharray: 1, 200; stroke-dashoffset: 0; animation: dash 1.5s ease-in-out infinite, color 6s ease-in-out infinite; stroke-linecap: round; } @keyframes rotate { to { -webkit-transform: rotate(1turn); transform: rotate(1turn); } } @keyframes dash { 0% { stroke-dasharray: 1, 200; stroke-dashoffset: 0; } 50% { stroke-dasharray: 89, 200; stroke-dashoffset: -35; } to { stroke-dasharray: 89, 200; stroke-dashoffset: -124; } } @keyframes color { 0%, to { stroke: #d62d20; } 40% { stroke: #0057e7; } 66% { stroke: #008744; } 80%, 90% { stroke: #ffa700; } } } </style>
import { Injectable, UnauthorizedException } from "@nestjs/common" import { PassportStrategy } from "@nestjs/passport" import { ExtractJwt, Strategy } from "passport-jwt" import { UsersService } from "../users/users.service" import { JwtPayload } from "./interfaces/jwt-payload.interface" @Injectable() export class JwtStrategy extends PassportStrategy(Strategy) { constructor(private usersService: UsersService) { super({ jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(), ignoreExpiration: false, secretOrKey: "secret", }) } async validate(payload: JwtPayload) { const user = await this.usersService.getById(payload.id) if (!user) throw new UnauthorizedException() return user } }
library(tidyverse) library(abc) library(jsonlite) source("../data/helpers.R") basedir <- "post_21000_2022-12-05" # Observed heterozygosity and pairwise stats observed_df <- read.csv(file.path(basedir, "cleaned_results/observed.csv")) observed <- observed_df$x names(observed) <- rownames(observed_df) # Simulated heterozygosity and pairwise stats stats <- read.csv(file.path(basedir,"cleaned_results/stats.csv")) stats <- dplyr::select(stats, !X) # Parameters for each simulation and replicate rep_info <- read.csv(file.path(basedir,"cleaned_results/rep_info.csv")) rep_info <- rep_info |> mutate(across(c(sim, recap, mut, sample), factor)) # Check that observed and simulated summary statistics are in the same order all(rownames(observed) == names(stats)) # Check that info and stats are in the same order all(rep_info$rep == stats$X) #!!!! Why are there NA mutation rates? !!!! est_params <- c("T2", "T1", "CS", "AS", "NE", "Na", "Nc", "Ns", "mut_rate", "POP_SIZE", "P_D", "DISPERSAL_SIGMA", "YEAR_SHAPE") na_reps <- which(is.na(rep_info$mut_rate)) abc_obs <- observed abc_sims <- stats %>% filter(!is.na(rep_info$mut_rate)) abc_rep_info <- dplyr::select(rep_info, est_params) %>% filter(!is.na(mut_rate)) #abc_res2 <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.1, method = "rejection") # This only works with a higher tolerance, why? abc_res <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.2, method = "loclinear") #abc_res3 <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.1, method = "ridge") rej_post <- data.frame(abc_res$unadj.values) %>% pivot_longer(everything(), names_to = "parameter") adj_post <- data.frame(abc_res$adj.values) %>% pivot_longer(everything(), names_to = "parameter") priors <- rep_info %>% dplyr::select(est_params) %>% pivot_longer(everything(), names_to = "parameter") estimates <- apply(abc_res$adj.values, 2, median) # Setup a new simulation with parameters drawn from the posterior distribution set.seed(1000) nsamples <- 10 draws <- sample_n(data.frame(abc_res$adj.values), nsamples) # Remove draws with negative parameters valid_draws <- filter(draws, if_all(names(draws), ~ . > 0)) print(paste0(nrow(draws) - nrow(valid_draws), " row(s) removed because of negative param values")) default_params <- list( DEBUG = FALSE, NUM_GENS = 500, STEPSIZE = 1, POP_SIZE = 100, P_D = 0.2, YEAR_SHAPE = 1.5, DISPERSAL_SIGMA = 1.0 ) default_params$NUM_GENS <- 21000 default_params$START_TIME_AGO <- default_params$NUM_GENS datestring = format(Sys.time(), "%Y-%m-%d") basedir <- paste0("./estimated_sims_", datestring) param_values <- valid_draws param_values$id <- sprintf("run_%s_%06d", datestring, (1:nrow(param_values))) dir.create(basedir, showWarnings=FALSE) setup_files <- c("geo_layers") write.csv(valid_draws, file.path(basedir,"posterior_draws.csv")) for(j in 1:nrow(param_values)) { this_dir <- file.path(basedir, param_values$id[j]) dir.create(this_dir, recursive=TRUE, showWarnings=FALSE) params <- default_params for (xn in names(param_values)) { if (xn %in% names(params)) { params[[xn]] <- param_values[[xn]][j] } } writeLines(toJSON(params, pretty=TRUE), file.path(this_dir, "params.json")) for (f in setup_files) { file.symlink(file.path("..", "..", "..", f), file.path(this_dir, f)) } } #######Sean code #ABC:Possible method values are "rejection", "loclinear", "neuralnet" and "ridge". #First conduct Cross-validation for parameter inference cv.resR <- cv4abc(param=abc_rep_info, abc_sims, nval=5,tols=c(0.05,0.10,0.15,0.2,0.25,0.3,0.35), method="rejection") cv.resL <- cv4abc(param=abc_rep_info, abc_sims, nval=5,tols=c(.15,0.2,0.25), method="loclinear") cv.resRd <- cv4abc(param=abc_rep_info, abc_sims, nval=5,tols=c(.15,0.2,0.25), method="ridge") df <- as.data.frame(summary(cv.resR)) df <- rbind(df,summary(cv.resL)) df <- rbind(df,summary(cv.resRd)) #which.min(df[,1]) #use this to examine tolerance level with lowest error for each variable #For rejection and loclinear, tol 0.25. Ridge does not perform well. #Do a neural net (but cv4abc function doesn't work well) at tol=0.25 abc_resR <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.25, method = "rejection") abc_resL <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.25, method = "loclinear") abc_resN <- abc(abc_obs, abc_rep_info, abc_sims , tol = 0.25, method = "neuralnet",sizenet=2) #had to reduce the sizenet to 2 (from defaulty 5) due to weights error summary(abc_resR) summary(abc_resL) summary(abc_resN) #Save parameters from the rejection and neural net fit. rej_tbl <- summary(abc_resR) write.table(rej_tbl,file="abc_results/parameters_rejection.txt",sep="\t") nnet_tbl <- summary(abc_resN) write.table(nnet_tbl,file="abc_results/parameters_neuralnet.txt",sep="\t") posterior <- data.frame(abc_resN$adj.values) %>% rename("Average population size per patch" = POP_SIZE, "Probability of dispersal" = P_D, "Dispersal distance" = DISPERSAL_SIGMA, "Yearly variation in habitat quality" = YEAR_SHAPE, "Mutation rate" = mut_rate, T2=T2, T1=T1, CS=CS, AS=AS, NE=NE, Na=Na, Nc=Nc, Ns=Ns) %>% pivot_longer(everything(), names_to = "Parameter") estimates_vec <- apply(abc_resN$adj.values, 2, median) names(estimates_vec) <- c("T2", "T1", "CS", "AS", "NE", "Na", "Nc", "Ns", "Mutation rate", "Average population size per patch", "Probability of dispersal", "Dispersal distance", "Yearly variation in habitat quality") estimates <- data.frame(Parameter = names(estimates_vec), value = estimates_vec) prior <- abc_rep_info %>% rename("Average population size per patch" = POP_SIZE, "Probability of dispersal" = P_D, "Dispersal distance" = DISPERSAL_SIGMA, "Yearly variation in habitat quality" = YEAR_SHAPE, "Mutation rate" = mut_rate, T2=T2, T1=T1, CS=CS, AS=AS, NE=NE, Na=Na, Nc=Nc, Ns=Ns) %>% pivot_longer(everything(), names_to = "Parameter" ) msprime_params <- c("T2", "T1", "CS", "AS", "NE", "Na", "Nc", "Ns") slim_params <- c("Average population size per patch", "Probability of dispersal", "Dispersal distance", "Yearly variation in habitat quality") mut_params <- c("Mutation rate") posterior_msprime <- filter(posterior, Parameter %in% msprime_params) prior_msprime <- filter(prior, Parameter %in% msprime_params) est_msprime <- filter(estimates, Parameter %in% msprime_params) posterior_slim <- filter(posterior, Parameter %in% slim_params) prior_slim <- filter(prior, Parameter %in% slim_params) est_slim <- filter(estimates, Parameter %in% slim_params) posterior_mut <- filter(posterior, Parameter %in% mut_params) prior_mut <- filter(prior, Parameter %in% mut_params) est_mut <- filter(estimates, Parameter %in% mut_params) ggplot(prior, aes(x = value)) + geom_density() + geom_density(data = posterior, aes(x = value), color = "blue") + geom_vline(aes(xintercept = value), data = estimates, color = "blue") + facet_wrap(~Parameter, scales="free") dens_cols <- c("prior" = "black", "posterior" = "blue") ggplot(prior_msprime, aes(x = value, fill = "prior")) + geom_density(alpha = 0.5) + geom_density(data = posterior_msprime, aes(x = value, fill = "posterior"), alpha = 0.5) + geom_vline(aes(xintercept = value), data = est_msprime, color = "blue") + facet_wrap(~Parameter, scales="free", ncol = 2) + scale_fill_manual(name = "", values = dens_cols) + theme(text=element_text(size=18), axis.text.x = element_text(angle = 90)) ggsave("../generate_figures/msprime_params.png") ggplot(prior_slim, aes(x = value, fill = "prior")) + geom_density(alpha = 0.5) + geom_density(data = posterior_slim, aes(x = value, fill = "posterior"), alpha = 0.5) + geom_vline(aes(xintercept = value), data = est_slim, color = "blue") + facet_wrap(~Parameter, scales="free") + scale_fill_manual(name = "", values = dens_cols) + theme(text=element_text(size=18), axis.text.x = element_text(angle = 90)) ggsave("../generate_figures/slim_params.png") ggplot(prior_mut, aes(x = value, fill = "prior")) + geom_density(alpha = 0.5) + geom_density(data = posterior_mut, aes(x = value, fill = "posterior"), alpha = 0.5) + geom_vline(aes(xintercept = value), data = est_mut, color = "blue") + facet_wrap(~Parameter, scales="free") + scale_fill_manual(name = "", values = dens_cols) + theme(text=element_text(size=18), axis.text.x = element_text(angle = 90)) ggsave("../generate_figures/mut_params.png")
package org.intra_mart.common.aid.jdk.java.lang; import java.util.LinkedList; import org.intra_mart.common.platform.log.Logger; public class ExtendedThread extends Thread { private static Logger _logger = Logger.getLogger(); /** * スレッド実行するロジックを持ったオブジェクト */ private Runnable runner = null; /** * スレッド停止リスナーを保管しておく領域 */ private LinkedList listeners = new LinkedList(); /** * Constructor for ExtendedThread. */ public ExtendedThread() { super(); } /** * Constructor for ExtendedThread. * @param target */ public ExtendedThread(Runnable target) { this(); this.runner = target; } /** * Constructor for ExtendedThread. * @param group * @param target */ public ExtendedThread(ThreadGroup group, Runnable target) { super(group, target); this.runner = target; } /** * Constructor for ExtendedThread. * @param name */ public ExtendedThread(String name) { super(name); } /** * Constructor for ExtendedThread. * @param group * @param name */ public ExtendedThread(ThreadGroup group, String name) { super(group, name); } /** * Constructor for ExtendedThread. * @param target * @param name */ public ExtendedThread(Runnable target, String name) { this(name); this.runner = target; } /** * Constructor for ExtendedThread. * @param group * @param target * @param name */ public ExtendedThread(ThreadGroup group, Runnable target, String name) { this(group, name); this.runner = target; } /** * Constructor for ExtendedThread. * @param group * @param target * @param name * @param stackSize */ // public ExtendedThread(ThreadGroup group, Runnable target, String name, long stackSize) { // super(group, target, name, stackSize); // this.runner = target; // } /** * このオブジェクトのスレッド実行ロジックです。 * このメソッドをサブクラスがオーバーライドすることはできません。 * サブクラスがスレッド実行ロジックを定義するためには、fireメソッドを * オーバーライドしてください。 */ public final void run(){ try{ this.fire(); } catch(Throwable t){ String className = this.getClass().getName(); if(this.runner != null){ className = this.runner.getClass().getName(); } _logger.error("Thread runtime error[" + className + "]: " + t.getMessage(), t); } finally{ LinkedList list = listeners; listeners = new LinkedList(); try{ while(! list.isEmpty()){ ThreadStopListener tsl = (ThreadStopListener) list.removeFirst(); try{ tsl.handleThreadStop(); } catch(Exception e){ _logger.error("Thread stop listener error[" + tsl.getClass().getName() + "]: " + e.getMessage(), e); } } } catch(Throwable t){ String className = this.getClass().getName(); if(this.runner != null){ className = this.runner.getClass().getName(); } _logger.error("Thread stop event error[" + className + "]: " + t.getMessage(), t); } } } /** * スレッド実行されるロジック。 * Thread クラスを継承したサブクラスにおいて、通常 run メソッドに * 記述する内容をこのメソッドに定義してください。 */ public void fire(){ if(this.runner != null){ this.runner.run(); } } /** * このスレッドが終了したときに呼び出されるイベントリスナーを登録します。 * @param listener スレッド終了イベントリスナー */ public void addThreadStopListener(ThreadStopListener listener){ listeners.add(listener); } }
prams.uwind = wind; prams.vortStrength = vortStrength; prams.sinkStrength = sinkStrength; prams.dx = dx; addpath ../src usePlot = false; savePlot = false; saveFrames = false; saveData = true; saveMovie = false; if saveMovie writerObj = VideoWriter('test','MPEG-4'); open(writerObj); end filename = 'fireLine'; Vname = sprintf('test_%s',filename); % velocity scale is 1m/s %prams.V = 1; % length scale is 1m (typical size of a single pixel) prams.L = 1; % number of points in x and y direction. Also sets the domain size prams.N = 201; % prams.dx = 1; % dimensionless size of a cell %prams.uwind = 2.0; % dimensionless speed of wind from the south prams.windDir = 0; % wind direction relative to north in degrees prams.s = prams.uwind/4; % dimensionless speed of the fire spread rate due only to wind % 2x uwind produces lateral spread effects strength of omega in Sharples % paper = 0.3 (scaling?) %prams.vortStrength = 0; %prams.vortStrength = 0.5*prams.uwind/prams.dx^2; % strength of the vorticity %prams.vortStrength = -2*prams.s/prams.dx^2; % strength of the vorticity % 0.9 base runs %4. %4. %0.3; 1x uwind has large effect. Adding 1 causes % blow-up at uwind = 5 strength of nu in Sharples paper. The minus sign % to make it a sink is accounted for later in the code %prams.sinkStrength = (0.2*(exp(-prams.uwind) + 0.8*prams.uwind))/... % prams.dx^2; %prams.sinkStrength = 0.2; % for uwind = 1 %prams.sinkStrength = 0.23; % for uwind = 1 %prams.sinkStrength = 0.35; % for uwind = 2 %prams.sinkStrength = 0.3; % time required for cell to travel 3 grid points if prams.s ~= 0 prams.dt = 3*prams.dx/prams.s; else prams.dt = 2.0; end prams.T = 300; % time horizon prams.ntime = ceil(prams.T/prams.dt); prams.spotting = false; prams.emberWash = false; if 0 load('fuelmaps/fuels_30-70split_eps1em2.mat'); end if 0 load('fuelmaps/fuels_30-70split_eps1em3.mat'); end if 0 load('fuelmaps/fuels_30-70split_eps1em4.mat'); end if 0 load('fuelmaps/fuels_50-50split_eps1em2.mat'); end if 0 load('fuelmaps/fuels_50-50split_eps1em3.mat'); end if 0 load('fuelmaps/fuels_50-50split_eps1em4.mat'); end if 1 load('fuelmaps/fuels_uniform.mat'); end fuels = sign(fuels); fuels = fuels(1:prams.N,1:prams.N); % max and minimum burn times in seconds maxBurn = 15; minBurn = 15; %maxBurn = 25; %minBurn = 25; %maxBurn = 50; %minBurn = 15; flameOutTime = zeros(prams.N); flameOutTime(fuels == 1) = maxBurn; flameOutTime(fuels == -1) = minBurn; % Time steps that cell can burn for ...in zero windXX make ~ 1/wind % number of dimensionless time steps before a cell burns out prams.flameOut = ceil(flameOutTime/prams.dt); % probability that a cell ignites a neighbor to model diffusion. % Probabilities of igniting diagonal neighbors is scaled by sqrt(2) to % account for additional distance. Ignition due to diffusion only occurs % just before a cell burns out prams.probIgnite = 4e-1; % create object to store information (state, burn time, flame out, % velocity) of each cell cells = geom(prams); % create ignition pattern cells.initialState('head'); %cells.initialState('user',flameOutTime); % state = 0 => Cell has no fuel (either burnt or never existed) % state = 1 => Cell is not on fire but has reamining fuel % state = 2 => Cell is currently on fire % black out the boundary cells.blackout; % space to save the state at each time step state = zeros(prams.N,prams.N,prams.ntime+1); velx = zeros(prams.N,prams.N,prams.ntime+1); vely = zeros(prams.N,prams.N,prams.ntime+1); % object for the PDE solvers and the Bresenham algorithm os = solvers(prams); % current time time = 0; streams = true; xstartBot = linspace(6,prams.N-5,50); ystartBot = 6*ones(size(xstartBot)); ystartLeft = linspace(6,prams.N-5,50); xstartLeft = 6*ones(size(ystartLeft)); ystartRight = linspace(6,prams.N-5,50); xstartRight = (prams.N-5)*ones(size(ystartRight)); xstart = [xstartBot xstartLeft xstartRight]; ystart = [ystartBot ystartLeft ystartRight]; %xstart = [xstartBot xstartLeft]; %ystart = [ystartBot ystartLeft]; i = 1; % main time stepping loop while time < prams.T % fprintf('t = %4.2e of T = %4.2e\n', time, prams.T) % sink and vorticity terms due to cells that are on fire. Both are % divided by dx to account for the fact that the (x,y) divergence is % dw/dz sinkForce = os.sinkTerm(cells); vortForce = os.vortTerm(cells); % compute velocity due to sinks [psi,psix,psiy] = os.PoissonSolverNeumann(sinkForce); % sqrt(max(max(psix.^2 + psiy.^2))) % pause % compute velocity due to vorticity [psi,etax,etay] = os.PoissonSolverNeumann(+vortForce); % add the different terms in the velocity [cells.velx,cells.vely] = os.computeVelocity(... psix,psiy,etax,etay); state(:,:,i) = cells.state; velx(:,:,i) = cells.velx; vely(:,:,i) = cells.vely; i = i + 1; % visualize the current state if usePlot cells.vis(time,1,streams,xstart,ystart,flameOutTime); if saveFrames name = sprintf('frames/frame_%s_%03i',filename,i-1); saveas(1,name,'png') end end % update time time = time + prams.dt; pause(.01) % update the state of the cells and the amount of time they've been % burning os.updateState(cells); end % save final state and velocities state(:,:,prams.ntime+1) = cells.state; velx(:,:,prams.ntime+1) = cells.velx; vely(:,:,prams.ntime+1) = cells.vely; % visualize the history and save the final frame if savePlot is true if usePlot || savePlot || saveMovie cells = geom(prams); for k = 1:prams.ntime+1 cells.state = state(:,:,k); cells.velx = velx(:,:,k); cells.vely = vely(:,:,k); time = (k-1)*prams.dt; cells.vis(time,1,streams,xstart,ystart,flameOutTime); if saveMovie fig1 = figure(1); set(fig1,'color','w'); F = getframe(fig1); writeVideo(writerObj,F); end pause(0.01) end if savePlot name = sprintf('%s_final_state',filename); saveas(1,name,'png') end if saveMovie close(writerObj); end %save('fireline.mat','prams','state','velx','vely'); end if usePlot || savePlot fat = cells.fat(state,prams.dt); if savePlot name = sprintf('%s_FAT',filename); saveas(2,name,'png') end end % compute first arrival time along a particular vector if 0 x0 = 100; y0 = 35; x1 = 100; y1 = 130; [x,y,fatVec] = cells.fatVec(fat,x0,y0,x1,y1); end %%save data if saveData if ~exist('fat','var') fat = cells.fat(state,prams.dt); end N_state = size(state,3); burnMap = NaN*ones(prams.N,prams.N,N_state); fuelMap = NaN*ones(prams.N,prams.N,N_state); for i = 1:N_state burning = NaN*ones(prams.N,prams.N); fuel = zeros(prams.N,prams.N); burning(state(:,:,i) == 2) = 1; fuel(state(:,:,i) == 2) = 1; % burning=with fuel fuel(state(:,:,i) == 1) = 1; burnMap(:,:,i) = burning; fuelMap(:,:,i) = fuel; end % Name_data = sprintf('dataset_%f.mat',prams.uwind); Name_data = sprintf('dataset_%f_%d.mat',prams.uwind, batch_iter); cx = cells.cx; cy = cells.cy; save(Name_data,'cx','cy','fat','prams','xstart','ystart','state',... 'velx','vely','fuelMap','burnMap'); end
"use client"; import Kappke from "@/assets/KappkeLogo"; import { BellIcon, CalendarIcon, ChatBubbleLeftIcon, Cog6ToothIcon, CreditCardIcon, DocumentIcon, FolderIcon, HomeIcon, PencilIcon, PhotoIcon, PowerIcon, QuestionMarkCircleIcon, ShoppingCartIcon, UserGroupIcon, } from "@heroicons/react/24/outline"; import { Sidebar, Spinner } from "flowbite-react"; import { signOut, useSession } from "next-auth/react"; import Link from "next/link"; import { usePathname } from "next/navigation"; import { useState } from "react"; const tabs = [ { name: "Dashboard", href: "/dashboard", id: "", icon: HomeIcon, }, { name: "Content", href: "", id: "content", icon: FolderIcon, activateId: "posts", subtabs: [ { name: "Posts", href: "/dashboard/posts", id: "posts", icon: PencilIcon, }, { name: "Pages", href: "/dashboard/pages", id: "pages", icon: DocumentIcon, }, { name: "Products", href: "/dashboard/products", id: "products", icon: ShoppingCartIcon, }, { name: "Media", href: "/dashboard/media", id: "media", icon: PhotoIcon, }, ], }, { name: "Orders", href: "/dashboard/orders", id: "orders", icon: CreditCardIcon, }, { name: "Users", href: "/dashboard/users", id: "users", icon: UserGroupIcon, }, { name: "Settings", href: "/dashboard/settings", id: "settings", icon: Cog6ToothIcon, }, ]; export type Tab = { name: string; href: string; id: string; icon: React.FC; subtabs?: Tab[]; }; const Tablist = ({ tabs, activeTab, setActiveTab, }: { tabs: Tab[]; activeTab: string; setActiveTab: (tab: string) => void; }) => { return ( <> {tabs.map((tab) => tab.subtabs ? ( <Sidebar.Collapse key={tab.id} className="sidebar-collapse !font-semibold !text-graphite [&>svg]:!text-graphite [&+ul]:py-0 [&+ul]:space-y-0" label={tab.name} icon={tab.icon} open > <Tablist tabs={tab.subtabs} activeTab={activeTab} setActiveTab={setActiveTab} /> </Sidebar.Collapse> ) : ( <Sidebar.Item as={Link} href={tab.href} key={tab.id} icon={tab.icon} onClick={() => setActiveTab(tab.id)} className={`font-semibold ${ activeTab === tab.id ? "!text-magenta [&>svg]:!text-magenta [&>svg]:hover:!text-magenta" : "!text-graphite [&>svg]:hover:!text-graphite" }`} > {tab.name} </Sidebar.Item> ) )} </> ); }; export default ({ children }: { children: React.ReactNode }) => { const { data: session, status } = useSession({ required: true, }); const activeTabRoute = usePathname().split("/")[2] || ""; const [activeTab, setActiveTab] = useState(activeTabRoute); return ( <section className="desktop:w-4/5 m-auto h-screen p-4"> {status === "authenticated" ? ( <div className="m-auto flex flex-row gap-4 h-full"> <Sidebar className="fixed sidebar font-medium"> <Sidebar.Items> <Sidebar.ItemGroup className="flex flex-col h-full"> <div className="mt-4 flex items-center justify-center gap-2 text-magenta"> <Kappke className="h-10 w-10" /> <span className="uppercase font-bold text-lg"> kappke.dev </span> </div> <div className="grow border-t-2 border-classic-gray p-4"> <Tablist tabs={tabs} activeTab={activeTab} setActiveTab={setActiveTab} /> </div> <button className="text-graphite font-semibold text-xs w-full flex justify-center border-t-2 border-classic-gray gap-2 p-4" onClick={() => signOut()} > <PowerIcon className="h-4" /> Log out </button> </Sidebar.ItemGroup> </Sidebar.Items> </Sidebar> <div className="flex flex-col gap-4 w-full"> <nav className="fixed topbar flex items-center justify-between h-[4.5rem] min-h-[4.5rem] px-4 bg-neutral-100 text-magenta rounded-xl select-none"> <button className="flex items-center gap-2"> <CalendarIcon className="h-6 ml-2 text-magenta" /> <span className="text-sm font-bold text-graphite"> {new Intl.DateTimeFormat("en", { day: "numeric", month: "long", }).format(new Date(1970, 1, 1, 0, 0, 0, 0))} </span> </button> <div className="flex items-center text-right gap-4"> <QuestionMarkCircleIcon onClick={console.log} className="h-6 text-magenta cursor-pointer" /> <BellIcon onClick={console.log} className="h-6 text-magenta cursor-pointer" /> <ChatBubbleLeftIcon onClick={console.log} className="h-6 text-magenta cursor-pointer" /> {status === "authenticated" ? ( <> <span className="flex flex-col"> <span className="leading-tight font-semibold"> {session?.user?.name} </span> <span className="text-xs leading-tight italic text-graphite"> {session?.user?.email} </span> </span> <img src={session?.user?.image || ""} alt="logo" className="h-10 aspect-square rounded-full" /> </> ) : ( <> <span className="flex flex-col items-end gap-2 animate-pulse"> <span className="h-2.5 w-16 rounded-full bg-classic-gray" /> <span className="h-2 w-32 rounded-full bg-classic-gray" /> </span> <span className="h-10 aspect-square rounded-full bg-classic-gray animate-pulse" /> </> )} </div> </nav> <div className="dashboard-content">{children}</div> </div> </div> ) : ( <div className="flex flex-col items-center justify-center h-full"> <Spinner className="fill-magenta" size="xl" /> </div> )} </section> ); };
#[cfg(test)] mod tests { // use gas_api::IGasEstimateApiDispatcherTrait; use core::debug::PrintTrait; use snforge_std::{declare, ContractClassTrait}; use gas_api::gas_api::{IGasEstimateApiDispatcher, IGasEstimateApiDispatcherTrait}; #[test] fn get_minimum_gas_price_to_work() { // First declare and deploy a contract let contract = declare('GasEstimateApi'); let contract_address = contract.deploy(@ArrayTrait::new()).unwrap(); // Create a Dispatcher object that will allow interacting with the deployed contract let dispatcher = IGasEstimateApiDispatcher { contract_address }; // Call a view function of the contract let balance = dispatcher.get_minimum_gas_price(); assert(balance == 10, 'balance == 10'); balance.print(); } #[test] #[available_gas(2000000)] #[should_panic(expected: ('balance != 10', ))] fn get_minimum_gas_price_to_fail() { // First declare and deploy a contract let contract = declare('GasEstimateApi'); let contract_address = contract.deploy(@ArrayTrait::new()).unwrap(); // Create a Dispatcher object that will allow interacting with the deployed contract let dispatcher = IGasEstimateApiDispatcher { contract_address }; // Call a view function of the contract let balance = dispatcher.get_minimum_gas_price(); assert(balance != 10, 'balance != 10'); balance.print(); } #[test] fn compute_gas_price_to_work() { // First declare and deploy a contract let contract = declare('GasEstimateApi'); let contract_address = contract.deploy(@ArrayTrait::new()).unwrap(); // Create a Dispatcher object that will allow interacting with the deployed contract let dispatcher = IGasEstimateApiDispatcher { contract_address }; // Call a view function of the contract let gas_spent = dispatcher.compute_gas_price(12); assert(gas_spent == 22, 'gas_spent == 22'); gas_spent.print(); } #[test] #[available_gas(2000000)] #[should_panic(expected: ('gas_spent == 22', ))] fn compute_gas_price_to_fail() { // First declare and deploy a contract let contract = declare('GasEstimateApi'); let contract_address = contract.deploy(@ArrayTrait::new()).unwrap(); // Create a Dispatcher object that will allow interacting with the deployed contract let dispatcher = IGasEstimateApiDispatcher { contract_address }; // Call a view function of the contract let gas_spent = dispatcher.compute_gas_price(1); assert(gas_spent == 22, 'gas_spent == 22'); gas_spent.print(); } }
import { FastifyInstance } from 'fastify' import { ZodTypeProvider } from 'fastify-type-provider-zod' import { z, ZodError } from 'zod' import { auth } from '@/http/middlewares/auth' import { prisma } from '@/lib/prisma' import { errorSchema, suitabilitySchema } from '@/schemas/base-schema' import { CalculateSuitabilityScore } from '@/service/calculate-score' import { getUserPermissions } from '@/utils/get-user-permissions' import { MethodNotAllowedError } from '../_errors/method-not-allowed-error' export async function createSuitability(app: FastifyInstance) { app.withTypeProvider<ZodTypeProvider>().register(auth).post( '/suitabilities', { schema: { tags: ['Suitability'], summary: 'Create a suitability answer', security: [{ bearerAuth: [] }], body: z.object({ userId: z.string().min(0), questions: z .object({ questionId: z.number(), choosedAlternativesId: z.number().array(), }) .array() .min(12) .max(12), }), response: { 201: suitabilitySchema, 404: errorSchema, 400: z.object({ message: z.any() }), 500: errorSchema }, description: 'Create a new suitability', required: ['userId', 'questions'] }, }, async (request, reply) => { try { const { questions, userId } = request.body const { sub, role } = await request.getCurrentUserProps() const { cannot } = getUserPermissions(sub, role) if(cannot('create', 'Suitability', userId)){ throw new MethodNotAllowedError( `You're not allowed to create a new Suitability for this user.`, ) } const userExists = await prisma.user.findFirst({ where: { id: userId }, }) if (!userExists) { return reply.status(404).send({ message: 'User not found' }) } const score = await CalculateSuitabilityScore(questions) const suitability = await prisma.suitability.create({ data: { userId, answers: { createMany: { data: questions.map((question) => ({ questionId: question.questionId, choosedAlternativesId: question.choosedAlternativesId, })), }, }, score, }, select: { id: true, createdAt: true, score: true, answers: true, } }) return reply.status(201).send(suitability) } catch (error) { if (error instanceof ZodError) { return reply.status(400).send({ message: error.errors }) } reply.status(500).send({ message: 'Internal Server Error' }) } } ) }
import UIKit extension UIView { func addGradient(colors: [UIColor]) { // Remove any existing gradient layer for (index, sublayer) in (self.layer.sublayers ?? []).enumerated() where sublayer is CAGradientLayer { self.layer.sublayers?.remove(at: index) } // Add new gradient layer let gradientLayer = CAGradientLayer() gradientLayer.frame = self.frame gradientLayer.colors = colors.compactMap({ $0.cgColor }) gradientLayer.startPoint = CGPoint(x: 1.0, y: 0.0) gradientLayer.endPoint = CGPoint(x: 0.0, y: 1.0) self.layer.insertSublayer(gradientLayer, at: 0) } func addShadow( color: UIColor = UIColor.black, radius: CGFloat = Constants.DesignProperties.shadowRadius, offset: CGSize = Constants.DesignProperties.shadowOffset, opacity: Float = Constants.DesignProperties.shadowOpacity ) { self.layer.shadowColor = color.cgColor self.layer.shadowRadius = radius self.layer.shadowOffset = offset self.layer.shadowOpacity = opacity } }
"use client"; import { useNextForm } from "@/components/next-form"; import { ValidationSchema, validationSchema } from "@/schema/user.schema"; import { create } from "@/server-actions/form.action"; import { zodResolver } from "@hookform/resolvers/zod"; export default function DashboardIndex() { const { handleSubmit, register, formState: { errors }, } = useNextForm<ValidationSchema>({ serverFunction: create, resolver: zodResolver(validationSchema), }); return ( <div className="max-w-xl mx-auto w-full"> <div className="flex justify-center my-12"> <div className="w-full lg:w-11/12 bg-white p-5 rounded-lg shadow-xl"> <h3 className="pt-4 text-2xl text-center font-bold"> Create New Account </h3> <form className="px-8 pt-6 pb-8 mb-4" onSubmit={handleSubmit}> <div className="mb-4 md:flex md:justify-between"> <div className="mb-4 md:mr-2 md:mb-0"> <label className="block mb-2 text-sm font-bold text-gray-700" htmlFor="firstName" > First Name </label> <input className={`w-full px-3 py-2 text-sm leading-tight text-gray-700 border ${ errors.firstName && "border-red-500" } rounded appearance-none focus:outline-none focus:shadow-outline`} id="firstName" type="text" placeholder="First Name" {...register("firstName")} /> {errors.firstName && ( <p className="text-xs italic text-red-500 mt-2"> {errors.firstName?.message} </p> )} </div> <div className="md:ml-2"> <label className="block mb-2 text-sm font-bold text-gray-700" htmlFor="lastName" > Last Name </label> <input className={`w-full px-3 py-2 text-sm leading-tight text-gray-700 border ${ errors.lastName && "border-red-500" } rounded appearance-none focus:outline-none focus:shadow-outline`} id="lastName" type="text" placeholder="Last Name" {...register("lastName")} /> {errors.lastName && ( <p className="text-xs italic text-red-500 mt-2"> {errors.lastName?.message} </p> )} </div> </div> <div className="mb-4"> <label className="block mb-2 text-sm font-bold text-gray-700" htmlFor="email" > Email </label> <input className={`w-full px-3 py-2 text-sm leading-tight text-gray-700 border ${ errors.email && "border-red-500" } rounded appearance-none focus:outline-none focus:shadow-outline`} id="email" type="email" placeholder="Email" {...register("email")} /> {errors.email && ( <p className="text-xs italic text-red-500 mt-2"> {errors.email?.message} </p> )} </div> <div className="mb-4 md:flex md:justify-between"> <div className="mb-4 md:mr-2 md:mb-0"> <label className="block mb-2 text-sm font-bold text-gray-700" htmlFor="password" > Password </label> <input className={`w-full px-3 py-2 text-sm leading-tight text-gray-700 border ${ errors.password && "border-red-500" } rounded appearance-none focus:outline-none focus:shadow-outline`} id="password" type="password" {...register("password")} /> {errors.password && ( <p className="text-xs italic text-red-500 mt-2"> {errors.password?.message} </p> )} </div> <div className="md:ml-2"> <label className="block mb-2 text-sm font-bold text-gray-700" htmlFor="c_password" > Confirm Password </label> <input className={`w-full px-3 py-2 text-sm leading-tight text-gray-700 border ${ errors.confirmPassword && "border-red-500" } rounded appearance-none focus:outline-none focus:shadow-outline`} id="c_password" type="password" {...register("confirmPassword")} /> {errors.confirmPassword && ( <p className="text-xs italic text-red-500 mt-2"> {errors.confirmPassword?.message} </p> )} </div> </div> <div className="mb-4"> <input type="checkbox" id="terms" {...register("terms")} /> <label htmlFor="terms" className={`ml-2 mb-2 text-sm font-bold ${ errors.terms ? "text-red-500" : "text-gray-700" }`} > Accept Terms & Conditions </label> {errors.terms && ( <p className="text-xs italic text-red-500 mt-2"> {errors.terms?.message} </p> )} </div> <div className="mb-6 text-center"> <button className="w-full px-4 py-2 font-bold text-white bg-blue-500 rounded-full hover:bg-blue-700 focus:outline-none focus:shadow-outline" type="submit" > Register Account </button> </div> <hr className="mb-6 border-t" /> <div className="text-center"> <a className="inline-block text-sm text-blue-500 align-baseline hover:text-blue-800" href="#test" > Forgot Password? </a> </div> <div className="text-center"> <a className="inline-block text-sm text-blue-500 align-baseline hover:text-blue-800" href="./index.html" > Already have an account? Login! </a> </div> </form> </div> </div> </div> ); }
Design: Sequence contains a node and a variable that stores current size of the Sequence as well as two pointers to Nodes, *head and *tail. Each node contains two pointers to Nodes, *prev and *next, along with a variable of type ItemType that stores a value.The list does not contain a dummy node and is not circular. Test cases: //Basic Cases Sequence s; //default constructor test assert(s.empty()); //empty Sequence assert(!s.size()); //check size is 0 assert(!s.remove(33)); //can’t remove anything from an empty Sequence assert(!s.erase(0)); //can’t erase anything from an empty Sequence assert(s.insert(0,222)); //simple test of insert assert(s.size()==1); //test if size is incremented assert(s.insert(23)==0); //second type of insert assert(!s.empty()); //Sequence not empty assert(s.find(222)==1); assert(s.insert(2, 34)); assert(s.insert(777)==3); assert(s.size() == 4); assert(s.erase(0)); //remove first assert(s.remove(777)); //remove last assert(s.size() == 2); assert(s.remove(222)); //start checking if you can delete all the elements of Sequence assert(s.remove(34)); assert(s.empty()); //all elements removed, must be empty Sequence t; assert(t.insert(333)==0); assert(t.erase(0)); assert(t.empty()); assert(!t.insert(1, 33)); Sequence r; assert(r.insert(3)==0); assert(r.insert(0,2)); Sequence t; assert(t.insert(333)==0); assert(t.erase(0)); assert(t.empty()); assert(!t.insert(1, 33)); Sequence s1, s2; s1.swap(s2); //swap empty Sequences assert(s.insert(32)); assert(s.insert(33)); //Testing the insert functions: assert(s.insert(0,0)); assert(s.insert(1,2)); assert(s.insert(3)==2); assert(s.insert(-2) == 0); void testInsert() //basic test cases for insert() and note insert() is used several other times { Sequence s; assert(s.insert(0,1)); //at beginning assert(s.insert(1,2)); //in middle assert(s.insert(3)==2); //correct location assert(s.insert(0) == 0); //correct location at beginning } //Testing get and set void getsetTest() { Sequence t; assert(t.insert(0, 1)); ItemType x = -10; assert(t.get(0, x) && x == 1); //check basic functioning assert(t.insert(1,22)); assert(t.set(0,22)); assert(t.get(0, x) && x == 22); //basic functioning of set() and if get() works at the beginning assert(t.insert(33)); assert(t.insert(21) == 0); assert(t.get(0,x) && x == 21); //updating t to see if get() still works assert(t.remove(22) == 2); assert(t.get(1,x) && x==33); //get() works in the beginning Sequence e; assert(!e.get(0,x)); //get() doesn't work if empty assert(!e.set(0,x)); //set() doesn't work if empty } //testing remove(), note erase() and remove() are used several times void testRemove() { Sequence a; assert(!a.insert(0)); for(int i = 1; i<4; i++) assert(a.insert(i)==i); a.remove(1); assert(a.find(1) == -1); } //Testing find void testFind() { Sequence a; assert(a.insert(12)==0); assert(a.insert(1,12) ); assert(a.insert(12)==0); assert(a.find(12) == 0 ); //several 12s, find correct one at beginning assert(a.remove(12)); assert(a.insert(12)==0); assert(a.insert(1,13)); assert(a.insert(12)==0); assert(a.insert(13)==1); assert(a.find(13) == 2); //in middle assert(a.insert(4,14)); assert(a.insert(12)==0); assert(a.find(14) == 5); //find at end } //Testing subsequence void testSubsequence() { Sequence small; assert(small.insert(0,0)); assert(small.insert(1,2)); assert(small.insert(3)==2); Sequence large; assert(large.insert(0,1)); assert(large.insert(1,0)); assert(large.insert(2,2)); assert(large.insert(3,3)); assert(large.insert(4,2)); assert(large.insert(5,6)); //small now 0 2 3 and assert(subsequence(small, large) == -1); //checks not a subsequence assert(subsequence(large, small) == 1); //check it is a subsequence in middle assert(large.insert(0,0)); assert(large.insert(1,2)); assert(large.insert(2,3)); assert(subsequence(large, small) == 0); //check subsequence in middle assert(large.insert(7,0)); assert(large.insert(8,2)); assert(large.insert(9,3)); assert(subsequence(large, small) == 0); //check returns lowest subsequence assert(large.insert(0,4)); assert(large.insert(1,5)); assert(large.insert(2,6)); assert(subsequence(large, small) == 3); //check subsequence in beginning if multiple subsequences exist assert(large.erase(3) && large.erase(4) && large.erase(5) && large.erase(10) && large.erase(10)); assert(subsequence(large, small) == 7); //check subsequence at the end //cerr<<"Success!"<<endl; } //testing copy constructor and assignment operator void testCopyStuff() { ItemType x = -10; Sequence e; assert(!e.get(0,x)); assert(!e.set(0,x)); Sequence w; assert(!w.insert(1)); assert(w.insert(1, 2)); e=w; //e=t; w.dump(); e.dump(); } //testing Interleave() void testInterleave() { Sequence small; assert(small.insert(0,0)); assert(small.insert(1,2)); assert(small.insert(2,4)); Sequence large; assert(large.insert(0,1)); assert(large.insert(1,3)); assert(large.insert(2,5)); assert(large.insert(3,7)); Sequence result; interleave(small, large, result); //first sequence<second sequence //use dump() to check result; interleave(large, small, small); //check aliasing //use dump() to check small assert(small.erase(0) && small.erase(1) && small.erase(2)); interleave(small, large, small); //check aliasing and interleave for same sized sequences // use dump() to check small } Pseudocode: bool Sequence::insert(int pos, const ItemType& value) create new Node and store value in it check if Node is inserted in the middle of the Sequence set a temp variable to required position to insert value update necessary pointers (of Node and Nodes before and after it) and size otherwise, if Sequence is empty set head and tail, update size check if inserting in the beginning of a non empty Sequence update the pointers of head and Node after it and size check if inserting Node at end update tail and pointers before Node and of Node delete created Node if no inserts are successful int Sequence::insert(const ItemType& value) check if Node is inserted in the beginning and call other insert function otherwise find position to insert Node insert Node using other insert function bool Sequence::erase(int pos) check if position is valid for removal create temp var equal to head if only element in Sequence is removed delete head reset head, temp to nullptr reduce size if the position is in the beginning of a non empty Sequence update head and prev pointer reduce size if the element deleted from the middle of the Sequence update temp to reach Node that needs to be deleted update pointers, size and delete temp if the element deleted is the last element update tail, size and other pointers delete temp return false if no if statement was entered int Sequence::remove(const ItemType& value) create var to count number of values removed iterate through Nodes if value found call erase() at that pos and increase count return count void Sequence::swap(Sequence& other) switch other.head and head using a temp variable switch other.tail and tail using a temp variable switch other.m_size and m_size using a temp variable int Sequence::find(const ItemType& value) const assign var temp to head iterate through linked list check if value is found bool Sequence::set(int pos, const ItemType& value) check pos and head are valid otherwise cycle through linked list to position set new value of node bool Sequence::get(int pos, ItemType& value) const check pos and head are valid otherwise cycle through linked list to position set new value of value int subsequence(const Sequence& seq1, const Sequence& seq2) return -1 if second Sequence is empty or bigger than first Sequence or if the first Sequence is empty iterate through Nodes of first Sequence check if a value in first Sequence matches the first value in the second Sequence iterate through remaining values in second Sequence/first Sequence if they aren’t equal break; if they were equal for all values of the second Sequence return position(i) return -1 if no appropriate position was found void interleave(const Sequence& seq1, const Sequence& seq2, Sequence& result) check if either/bot Sequence is empty otherwise create a temp variable iterate through both Sequences for length of the shorter Sequence get value stored at each pos check if element of Sequence 1 can be inserted into temp increment position in temp check if element of Sequence 2 can be inserted into temp increment position in temp increment index of Sequence 1/2 iterate through remainder of longer Sequence
#ifndef SUPERVISOR_H #define SUPERVISOR_H #include "floor.h" /*! * \brief The Supervisor class * Chooses the destination for the packages, sends orders */ class Supervisor : public QObject { Q_OBJECT public: /*! * \brief Constructor of the class */ Supervisor(); /*! * \brief Adds shelf to the warehouse */ void addShelf(int,int,PackageType); /*! * \brief Creates new package */ int addPackage(PackageType); /*! * \brief Pointer to the floor, access to shelves */ Floor* floor; /*! * \brief Sets start shelf of the warehouse */ void setStartTile(QPair<int,int>); /*! * \brief Sets end shelf of the warehouse */ void setEndTile(QPair<int,int>); /*! * \brief Checks if there are new packages waiting and prepares an order */ void checkForOrders(); /*! * \brief Prepares an order for requested package */ void packageRequested(int); /*! * \brief Gives vector of all available packages on shelves */ QVector<int> getPackagesOnShelves(); private: /*! * \brief Position of the start shelf */ QPair<int,int> startTile; /*! * \brief Position of the end shelf */ QPair<int,int> endTile; /*! * \brief Map of all of the packages and shelves that they are on (key - package ID, value - pointer to a shelf) */ QMap<int,Shelf*> packagesOnShelves; /*! * \brief Gives position of the free shelf of particular type and adds package ID and shelf to the packagesOnShelves */ QPair<int,int> findShelfForPackage(int,PackageType); /*! * \brief Number of packages */ int numOfPackages = 0; /*! * \brief Queue of newly arrived packages */ QQueue<Package*> packages; /*! * \brief Map of all packages (key - ID of the package, value - pointer to the package) */ QMap<int,Package*> allPackages; /*! * \brief State of the shelves - number of the packages on shelves - updated when the order is created */ QMap<QPair<int,int>,int> stateOfShelves; public slots: /*! * \brief Cancels the order when RobotSupervisor gives the signal that there are no free robots */ void cancelOrder(Order*); /*! * \brief Completes the order */ void orderCompleted(Order*); signals: /*! * \brief Sends signal to the Robot Supervisor with new order */ void sendOrder(Order*); /*! * \brief Updates logs */ void updateLogs(int, PackageType); }; #endif // SUPERVISOR_H
import { useState } from "react"; import "../App.css" const CreateRecipe = () => { const blankForm = { name: "", ingredients: [""], instructions: "" }; const [form, setForm] = useState(blankForm); const handleIngredientChange = (index, value) => { const updatedIngredients = [...form.ingredients]; updatedIngredients[index] = value; setForm({ ...form, ingredients: updatedIngredients }); }; const addIngredientField = () => { setForm({ ...form, ingredients: [...form.ingredients, ""] }); }; const removeIngredientField = (index) => { const updatedIngredients = [...form.ingredients]; updatedIngredients.splice(index, 1); setForm({ ...form, ingredients: updatedIngredients }); }; const handleSubmit = (e) => { e.preventDefault(); // Here you can make an API call or other actions to save the recipe console.log("Recipe Submitted:", form); setForm(blankForm); // Reset form after submission }; return ( <div className="recipe-form-container"> <h2>Add Your Recipe!</h2> <form onSubmit={handleSubmit}> <div> <label htmlFor="recipeName">Recipe Name:</label> <input type="text" id="recipeName" value={form.name} onChange={(e) => setForm({ ...form, name: e.target.value })} required /> </div> <h3>Ingredients:</h3> {form.ingredients.map((ingredient, index) => ( <div key={index}> <input type="text" value={ingredient} onChange={(e) => handleIngredientChange(index, e.target.value)} required /> <button type="button" onClick={() => removeIngredientField(index)}>Remove</button> </div> ))} <button type="button" onClick={addIngredientField}>Add Ingredient</button> <div> <h3>Directions:</h3> <textarea value={form.instructions} onChange={(e) => setForm({ ...form, instructions: e.target.value })} required /> </div> <div> <button type="submit">Submit Recipe</button> </div> </form> </div> ); }; export default CreateRecipe;
package com.linkallcloud.core.util; import java.util.Enumeration; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.MissingResourceException; import java.util.ResourceBundle; import com.linkallcloud.core.lang.Strings; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public class ResBundle { private static Log log = LogFactory.getLog(ResBundle.class); private static Map<String, ResourceBundle> resMap = new HashMap<String, ResourceBundle>(); private static final ResourceBundle NULL_BUNDLE = new ResourceBundle() { public Enumeration<String> getKeys() { return null; } protected Object handleGetObject(String key) { return null; } public String toString() { return "NULL_BUNDLE"; } }; private static ResourceBundle getBundle(String resourceName) { ResourceBundle rb = resMap.get(resourceName); if (rb == null) { try { rb = ResourceBundle.getBundle(resourceName, Locale.getDefault()); if (rb == null) { rb = NULL_BUNDLE; } resMap.put(resourceName, rb); } catch (Throwable mre) { log.error("No resource property file in the classpath or in the res folder.", mre); } } return rb; } public static String getString(String key) { return getResString("resource", key, null); } public static String getMessage(String key) { return getResString("resource", key, null); } public static String getString(String key, String defaultValue) { return getResString("resource", key, defaultValue); } public static String getMessage(String key, String defaultValue) { return getResString("resource", key, defaultValue); } public static String getResString(String resourceName, String key) { return getResString(resourceName, key, null); } public static String getResString(String resourceName, String key, String defaultValue) { try { String value = getBundle(resourceName).getString(key); return Strings.isBlank(value) ? defaultValue : value.trim(); } catch (MissingResourceException e) { return Strings.isBlank(defaultValue) ? key : defaultValue; } catch (Throwable e) { return Strings.isBlank(defaultValue) ? key : defaultValue; } } }
import React, { createContext, useReducer } from 'react' // Reducer function to handle notification actions const notificationReducer = (state, action) => { switch (action.type) { case 'SET_NOTIFICATION': return action.payload case 'CLEAR_NOTIFICATION': return null default: return state } } // Create the context export const NotificationContext = createContext() const NotificationContextProvider = (props) => { const [notification, dispatch] = useReducer(notificationReducer, null) return ( <NotificationContext.Provider value={{ notification, dispatch }}> {props.children} </NotificationContext.Provider> ) } export default NotificationContextProvider
############################################################################ ############################################################################ ############################################################################ # # In this file we load and format the CPRD dataset. # ############################################################################ ############################################################################ ############################################################################ # load libraries require(tidyverse) #:--------------------------------------------------------- # load dataset - modyt1d_cohort_local load("/slade/CPRD_data/Katie Pedro MODY/pedro_mody_cohort.Rda") ## remove those with missing bmi, hba1c modyt1d_cohort_local_clean <- pedro_mody_cohort_local %>% drop_na(bmi, hba1c) #:--------------------------------------------------------- # load functions to make predictions from models source("00.prediction_functions.R") # load posteriors rcs_parms <- readRDS("model_posteriors/rcs_parms.rds") posterior_samples_T1D <- readRDS("model_posteriors/type_1_model_posteriors.rds") posterior_samples_T2D <- readRDS("model_posteriors/type_2_model_posteriors.rds") ### create object to use for prediction posterior_samples_T1D_obj <- list(post = posterior_samples_T1D$samples) class(posterior_samples_T1D_obj) <- "T1D" posterior_samples_T2D_obj <- list(post = posterior_samples_T2D$samples) class(posterior_samples_T2D_obj) <- "T2D" #:--------------------------------------------------------- ## make predictions for T1D MODY (missingness is in pardm) final_T1D_predictions <- data.frame( patid = modyt1d_cohort_local_clean$patid ) #:------------------ # If pardm is missing, set to 0 newdata_predictions <- modyt1d_cohort_local_clean %>% mutate(pardm = ifelse(is.na(pardm), 0, pardm)) newdata_predictions_x <- as_tibble(as.matrix(select(newdata_predictions, pardm, agerec, hba1c, agedx, sex, bmi))) newdata_predictions_x$T <- NA predictions_T1D_pardm_0 <- predict(posterior_samples_T1D_obj, newdata_predictions_x, rcs_parms) %>% apply(., 2, function(x) { data.frame(prob = mean(x), LCI = quantile(x, probs = 0.025), UCI = quantile(x, probs = 0.975)) }) %>% bind_rows() %>% cbind( patid = newdata_predictions$patid ) final_T1D_predictions <- final_T1D_predictions %>% left_join( predictions_T1D_pardm_0 %>% set_names(c("mean_pardm_0", "lci_pardm_0", "uci_pardm_0", "patid")) ) #:------------------ # If pardm is missing, set to 1 newdata_predictions <- modyt1d_cohort_local_clean %>% mutate(pardm = ifelse(is.na(pardm), 1, pardm)) newdata_predictions_x <- as_tibble(as.matrix(select(newdata_predictions, pardm, agerec, hba1c, agedx, sex, bmi))) newdata_predictions_x$T <- NA predictions_T1D_pardm_1 <- predict(posterior_samples_T1D_obj, newdata_predictions_x, rcs_parms) %>% apply(., 2, function(x) { data.frame(prob = mean(x), LCI = quantile(x, probs = 0.025), UCI = quantile(x, probs = 0.975)) }) %>% bind_rows() %>% cbind( patid = newdata_predictions$patid ) final_T1D_predictions <- final_T1D_predictions %>% left_join( predictions_T1D_pardm_1 %>% set_names(c("mean_pardm_1", "lci_pardm_1", "uci_pardm_1", "patid")) ) #:------------------ dir.create("Patient Predictions") saveRDS(final_T1D_predictions, "Patient Predictions/T1D_predictions.rds") saveRDS(final_T1D_predictions, "/slade/CPRD_data/Katie Pedro MODY/T1D_predictions.rds") #:--------------------------------------------------------- ## make predictions for T2D MODY (missingness is in pardm) final_T2D_predictions <- data.frame( patid = modyt1d_cohort_local_clean$patid ) #:------------------ # If pardm is missing, set to 0 newdata_predictions <- modyt1d_cohort_local_clean %>% mutate(pardm = ifelse(is.na(pardm), 0, pardm)) newdata_predictions_x <- as_tibble(as.matrix(select(newdata_predictions, pardm, agerec, hba1c, agedx, sex, bmi, insoroha))) predictions_T2D_pardm_0 <- predict(posterior_samples_T2D_obj, newdata_predictions_x) %>% apply(., 2, function(x) { data.frame(prob = mean(x), LCI = quantile(x, probs = 0.025), UCI = quantile(x, probs = 0.975)) }) %>% bind_rows() %>% cbind( patid = newdata_predictions$patid ) final_T2D_predictions <- final_T2D_predictions %>% left_join( predictions_T2D_pardm_0 %>% set_names(c("mean_pardm_0", "lci_pardm_0", "uci_pardm_0", "patid")) ) #:------------------ # If pardm is missing, set to 1 newdata_predictions <- modyt1d_cohort_local_clean %>% mutate(pardm = ifelse(is.na(pardm), 1, pardm)) newdata_predictions_x <- as_tibble(as.matrix(select(newdata_predictions, pardm, agerec, hba1c, agedx, sex, bmi, insoroha))) predictions_T2D_pardm_1 <- predict(posterior_samples_T2D_obj, newdata_predictions_x) %>% apply(., 2, function(x) { data.frame(prob = mean(x), LCI = quantile(x, probs = 0.025), UCI = quantile(x, probs = 0.975)) }) %>% bind_rows() %>% cbind( patid = newdata_predictions$patid ) final_T2D_predictions <- final_T2D_predictions %>% left_join( predictions_T2D_pardm_1 %>% set_names(c("mean_pardm_1", "lci_pardm_1", "uci_pardm_1", "patid")) ) #:------------------ dir.create("Patient Predictions") saveRDS(final_T2D_predictions, "Patient Predictions/T2D_predictions.rds") saveRDS(final_T2D_predictions, "/slade/CPRD_data/Katie Pedro MODY/T2D_predictions.rds")
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * Restore from backup. * * @package tool_rpg * @copyright 2024 Sebastian Gundersen * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ use tool_rpg\rpg_filearea; defined('MOODLE_INTERNAL') || die(); require_once($CFG->dirroot . '/backup/moodle2/restore_tool_plugin.class.php'); /** * Restore from backup */ class restore_tool_rpg_plugin extends restore_tool_plugin { /** * Define structure. * * @return restore_path_element[] */ protected function define_course_plugin_structure(): array { $paths = []; $paths[] = new restore_path_element('item', '/items/item'); $paths[] = new restore_path_element('monster', '/monsters/monster'); if ($this->get_setting_value('userinfo')) { $paths[] = new restore_path_element('character', '/characters/character'); $paths[] = new restore_path_element('item_instance', '/items/item/instances/instance'); } return $paths; } /** * Restore item from backup. * * @param stdClass|array $data * @return void */ public function process_item(stdClass|array $data): void { global $DB; $data = (object)$data; $oldid = $data->id; $data->id = $DB->insert_record('tool_rpg_item', $data); $this->set_mapping('tool_rpg_item', $oldid, $data->id); } /** * Restore monster from backup. * * @param stdClass|array $data * @return void */ public function process_monster(stdClass|array $data): void { global $DB; $data = (object)$data; $oldid = $data->id; $data->id = $DB->insert_record('tool_rpg_monster', $data); $this->set_mapping('tool_rpg_monster', $oldid, $data->id); } /** * Restore character from backup. * * @param stdClass|array $data * @return void */ public function process_character(stdClass|array $data): void { global $DB; $data = (object)$data; $oldid = $data->id; $data->userid = $this->get_mappingid('user', $data->userid); $data->timecreated = $this->apply_date_offset($data->timecreated); $data->id = $DB->insert_record('tool_rpg_character', $data); $this->set_mapping('tool_rpg_character', $oldid, $data->id); } /** * Restore item instance from backup. * * @param stdClass|array $data * @return void */ public function process_item_instance(stdClass|array $data): void { global $DB; $data = (object)$data; $oldid = $data->id; $data->itemid = $this->get_new_parentid('item'); $data->timecreated = $this->apply_date_offset($data->timecreated); $data->id = $DB->insert_record('tool_rpg_item_instance', $data); $this->set_mapping('tool_rpg_item_instance', $oldid, $data->id); } /** * Restore files. * * @return void */ protected function after_execute(): void { $this->add_related_files('tool_rpg', rpg_filearea::ITEM, null); $this->add_related_files('tool_rpg', rpg_filearea::MONSTER, null); } }
<template> <v-container> <h1 class="text-center">Востановление пароля</h1> <v-row> <v-col> <v-form ref="form" v-model="valid" lazy-validation > <v-text-field v-model="email" label="Почта" :rules="emailRules" required ></v-text-field> <v-btn class="mr-4" @click="resetInit" :disabled="!valid" >Отправить сообщение</v-btn> </v-form> </v-col> </v-row> </v-container> </template> <script lang="ts"> import Component from 'vue-class-component' import { Inject, Vue } from 'vue-property-decorator' import AccountService from '../../services/accountService' @Component({ components: {} }) export default class ResetInit extends Vue { @Inject() readonly accountService!: AccountService; $refs!: { form: HTMLFormElement; } public email = '' public valid = true public resetInit () { if (this.$refs.form.validate()) { this.accountService.resetInit(this.email) this.$router.push('/') } } public emailRules = [ (v: any) => !!v || 'Обязательно для заполнения', (v: any) => /.+@.+\..+/.test(v) || 'Не верная почта', (v: any) => (v && v.length <= 50) || 'Не может превышать 50 символов' ] } </script>
/** * SPDX-FileCopyrightText: (c) 2000 Liferay, Inc. https://liferay.com * SPDX-License-Identifier: LGPL-2.1-or-later OR LicenseRef-Liferay-DXP-EULA-2.0.0-2023-06 */ package com.liferay.frontend.taglib.servlet.taglib; import com.liferay.frontend.taglib.form.navigator.FormNavigatorCategoryProvider; import com.liferay.frontend.taglib.form.navigator.FormNavigatorEntryProvider; import com.liferay.frontend.taglib.internal.servlet.ServletContextUtil; import com.liferay.portal.kernel.theme.ThemeDisplay; import com.liferay.portal.kernel.util.ArrayUtil; import com.liferay.portal.kernel.util.WebKeys; import com.liferay.taglib.util.IncludeTag; import javax.servlet.http.HttpServletRequest; import javax.servlet.jsp.PageContext; /** * @author Eudaldo Alonso */ public class FormNavigatorStepsTag extends IncludeTag { @Override public int doStartTag() { return EVAL_BODY_INCLUDE; } public String getBackURL() { return _backURL; } public Object getFormModelBean() { return _formModelBean; } public String getFormName() { return _formName; } public String getHtmlBottom() { return _htmlBottom; } public String getHtmlTop() { return _htmlTop; } public String getId() { return _id; } public boolean isShowButtons() { return _showButtons; } public void setBackURL(String backURL) { _backURL = backURL; } public void setFormModelBean(Object formModelBean) { _formModelBean = formModelBean; } public void setFormName(String formName) { _formName = formName; } public void setHtmlBottom(String htmlBottom) { _htmlBottom = htmlBottom; } public void setHtmlTop(String htmlTop) { _htmlTop = htmlTop; } public void setId(String id) { _id = id; } @Override public void setPageContext(PageContext pageContext) { super.setPageContext(pageContext); setServletContext(ServletContextUtil.getServletContext()); } public void setShowButtons(boolean showButtons) { _showButtons = showButtons; } @Override protected void cleanUp() { super.cleanUp(); _backURL = null; _formModelBean = null; _formName = "fm"; _htmlBottom = null; _htmlTop = null; _id = null; _showButtons = true; } protected String[] getCategoryKeys() { FormNavigatorCategoryProvider formNavigatorCategoryProvider = ServletContextUtil.getFormNavigatorCategoryProvider(); return formNavigatorCategoryProvider.getKeys(_id); } protected String[] getCategoryLabels() { FormNavigatorCategoryProvider formNavigatorCategoryProvider = ServletContextUtil.getFormNavigatorCategoryProvider(); HttpServletRequest httpServletRequest = getRequest(); ThemeDisplay themeDisplay = (ThemeDisplay)httpServletRequest.getAttribute( WebKeys.THEME_DISPLAY); return formNavigatorCategoryProvider.getLabels( _id, themeDisplay.getLocale()); } protected String[][] getCategorySectionKeys() { FormNavigatorEntryProvider formNavigatorEntryProvider = ServletContextUtil.getFormNavigatorEntryProvider(); HttpServletRequest httpServletRequest = getRequest(); ThemeDisplay themeDisplay = (ThemeDisplay)httpServletRequest.getAttribute( WebKeys.THEME_DISPLAY); String[] categoryKeys = getCategoryKeys(); String[][] categorySectionKeys = new String[0][]; for (String categoryKey : categoryKeys) { categorySectionKeys = ArrayUtil.append( categorySectionKeys, formNavigatorEntryProvider.getKeys( _id, categoryKey, themeDisplay.getUser(), _formModelBean)); } return categorySectionKeys; } protected String[][] getCategorySectionLabels() { FormNavigatorEntryProvider formNavigatorEntryProvider = ServletContextUtil.getFormNavigatorEntryProvider(); HttpServletRequest httpServletRequest = getRequest(); ThemeDisplay themeDisplay = (ThemeDisplay)httpServletRequest.getAttribute( WebKeys.THEME_DISPLAY); String[] categoryKeys = getCategoryKeys(); String[][] categorySectionLabels = new String[0][]; for (String categoryKey : categoryKeys) { categorySectionLabels = ArrayUtil.append( categorySectionLabels, formNavigatorEntryProvider.getLabels( _id, categoryKey, themeDisplay.getUser(), _formModelBean, themeDisplay.getLocale())); } return categorySectionLabels; } @Override protected String getPage() { return "/form_navigator_steps/page.jsp"; } @Override protected void setAttributes(HttpServletRequest httpServletRequest) { httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:backURL", _backURL); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:categoryKeys", getCategoryKeys()); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:categoryLabels", getCategoryLabels()); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:categorySectionKeys", getCategorySectionKeys()); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:categorySectionLabels", getCategorySectionLabels()); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:formModelBean", _formModelBean); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:formName", _formName); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:htmlBottom", _htmlBottom); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:htmlTop", _htmlTop); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:id", _id); httpServletRequest.setAttribute( "liferay-frontend:form-navigator-steps:showButtons", String.valueOf(_showButtons)); } private String _backURL; private Object _formModelBean; private String _formName = "fm"; private String _htmlBottom; private String _htmlTop; private String _id; private boolean _showButtons = true; }
#pragma once #include <stdlib.h> #include <time.h> #include <iostream> #include <thread> #include <vector> #include "tablero.h" class Juego { public: // constructores Juego(); Juego(int dim_x, int dim_y, int num_celulas_vivas, std::vector<int> pos_x, std::vector<int> pos_y); // metodos para el auto mode void AutoMode(int n_celulas_vivas); // metodos para el sandbox void SandBox(int dim_x, int dim_y, int num_iteraciones, int num_celulas_vivas, std::vector<int> pos_x, std::vector<int> pos_y); // sobrecarga de operadores friend std::ostream& operator<<(std::ostream& os, const Juego& juego); friend int main(); private: // atributos Tablero tablero_juego_; int num_celulas_; // numero de celulas vivas std::vector<int> pos_x_; std::vector<int> pos_y_; std::vector<Celula> celulas_; // metodos para el juego (reglas) int CelulaVecina(Celula& celula); bool CelulaNace(Celula& celula_muerta); bool CelulaMuere(Celula& celula_viva); // funcion que devuelve n celulas en una posicion // random de un tablero por defecto std::vector<Celula> RandomPosition(int n_celulas_vivas); // funcion reglas en conjunto void Reglas(Tablero& copia_tablero); };
package com.greatmrpark.common.provider; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; import org.springframework.stereotype.Component; /** * <p> * <pre> * * ContextProvider.java * 개정이력(Modification Information)· * 수정일 수정자 수정내용 * ------------------------------------ * 2019. 7. 1. greatmrpark 최초작성 * </pre> * * @author greatmrpark * @since 2019. 7. 1. * @version 1.0.0 */ @Component public class ContextProvider implements ApplicationContextAware { private static ApplicationContext CONTEXT; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { CONTEXT = applicationContext; } /** * Get a Spring bean by type. **/ public static <T> T getBean(Class<T> beanClass) { return CONTEXT.getBean(beanClass); } /** * Get a Spring bean by name. **/ public static Object getBean(String beanName) { return CONTEXT.getBean(beanName); } }
import { Injectable } from '@angular/core'; import { HttpInterceptor, HttpRequest, HttpHandler, HttpErrorResponse, } from '@angular/common/http'; import { BehaviorSubject, Observable, catchError, filter, switchMap, take, throwError, } from 'rxjs'; import { AuthService } from './auth.service'; import { Router } from '@angular/router'; import { TokensService } from './tokens/tokens.service'; @Injectable() export class AuthInterceptor implements HttpInterceptor { private isRefreshing = false; private refreshTokenSubject: BehaviorSubject<any> = new BehaviorSubject<any>( null ); constructor( private authService: AuthService, private router: Router, private tokensService: TokensService ) {} intercept(request: HttpRequest<any>, next: HttpHandler): Observable<any> { const token = this.tokensService.getAccessToken(); if (!this.authService.isAccessTokenExpired() && token != null) { request = this.addTokenHeader(request, token); } return next.handle(request).pipe( catchError((error): any => { if ( error instanceof HttpErrorResponse && !request.url.includes('/register') && error.status === 401 ) { return this.handle401Error(request, next); } throw error; }) ); } private handle401Error(request: HttpRequest<any>, next: HttpHandler) { if (!this.isRefreshing) { this.isRefreshing = true; this.refreshTokenSubject.next(null); const refreshToken = this.tokensService.getRefreshToken(); const accessExpired = this.authService.isAccessTokenExpired(); const refreshExpired = this.authService.isRefreshTokenExpired(); if (accessExpired && refreshExpired) { this.tokensService.clearStorage(); this.router.navigateByUrl('/login'); } if (accessExpired && !refreshExpired && refreshToken !== null) { return this.authService.refreshToken(refreshToken).pipe( switchMap((token: any) => { this.isRefreshing = false; this.tokensService.saveAccessAndRefreshToken( token.access_token, token.refresh_token ); this.refreshTokenSubject.next(token.access_token); return next.handle( this.addTokenHeader(request, token.access_token) ); }), catchError((err) => { this.isRefreshing = false; return throwError(() => err); }) ); } return this.refreshTokenSubject.pipe( filter((result) => result !== null), take(1), switchMap((token): any => { next.handle(this.addTokenHeader(request, token)); }) ); } } private addTokenHeader(request: HttpRequest<any>, token: string) { return request.clone({ setHeaders: { 'Content-Type': 'application/json', Accept: 'application/json', Authorization: `Bearer ${token}`, }, }); } }
package com.example.furniturecloudy.present.fragments.shopping import android.os.Bundle import androidx.fragment.app.Fragment import android.view.LayoutInflater import android.view.View import android.view.ViewGroup import android.widget.Toast import androidx.fragment.app.viewModels import androidx.lifecycle.Lifecycle import androidx.lifecycle.lifecycleScope import androidx.lifecycle.repeatOnLifecycle import androidx.navigation.fragment.findNavController import androidx.recyclerview.widget.LinearLayoutManager import com.example.furniturecloudy.databinding.FragmentOrdersBinding import com.example.furniturecloudy.model.adapter.OrdersAdapter import com.example.furniturecloudy.model.viewmodel.AllOrdersViewmodel import com.example.furniturecloudy.util.Resource import dagger.hilt.android.AndroidEntryPoint import kotlinx.coroutines.flow.collectLatest import kotlinx.coroutines.launch @AndroidEntryPoint class AllOrdersFragment : Fragment() { private lateinit var binding: FragmentOrdersBinding val viewModel by viewModels<AllOrdersViewmodel>() val ordersAdapter by lazy { OrdersAdapter() } override fun onCreateView( inflater: LayoutInflater, container: ViewGroup?, savedInstanceState: Bundle? ): View? { binding = FragmentOrdersBinding.inflate(layoutInflater) return binding.root } override fun onViewCreated(view: View, savedInstanceState: Bundle?) { super.onViewCreated(view, savedInstanceState) setupOrderAdapter() viewLifecycleOwner.lifecycleScope.launch { viewLifecycleOwner.repeatOnLifecycle(Lifecycle.State.STARTED){ viewModel.allOrder.collectLatest { when(it){ is Resource.Loading -> { binding.progressbarAllOrders.visibility = View.VISIBLE } is Resource.Success -> { binding.progressbarAllOrders.visibility = View.GONE ordersAdapter.differ.submitList(it.data) if (it.data.isNullOrEmpty()) { binding.tvEmptyOrders.visibility = View.VISIBLE } } is Resource.Error -> { Toast.makeText(requireContext(), it.message, Toast.LENGTH_SHORT).show() binding.progressbarAllOrders.visibility = View.GONE } else -> Unit } } } } ordersAdapter.onClick = { val action = AllOrdersFragmentDirections.actionOrdersFragmentToOrderDetailFragment(it) findNavController().navigate(action) } } private fun setupOrderAdapter() { binding.rvAllOrders.apply { layoutManager = LinearLayoutManager(requireContext(),LinearLayoutManager.VERTICAL,false) adapter = ordersAdapter } } }
--- title: VSCode の Prettier の設定を理解する time: 2020-01-13 15:31 --- # VSCode の Prettier の設定を理解する VSCode で prettier を使う際に、なんとなくで設定していてたまに困るので整理する。 prettier の vscode 用拡張機能のページをちゃんと読めば、いろいろと書いてあったのでメモ。[https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) ### どの言語に対応してるんだっけ 以下の言語(FrameWork)に対応している。 ``` JavaScript · TypeScript · Flow · JSX · JSON CSS · SCSS · Less HTML · Vue · Angular GraphQL · Markdown · YAML ``` プラグインを使うことで、`php`とか`ruby`とかもいけるっぽい。[https://prettier.io/docs/en/plugins.html](https://prettier.io/docs/en/plugins.html) ### VSCode で prettier を使う場合って何が必要なんだっけ 1.拡張機能を入れる。`cmd`+ `shift` + `P`で以下を実行 or VSCode の拡張機能で prettier を検索してインストールする。 ``` ext install esbenp.prettier-vscode ``` 2.VSCode で最小限の設定をする。 他のドキュメントフォーマット用の拡張機能を導入しているのであれば、`cmd` + `,`で設定を開き、右上の JSON 編集画面から以下の設定を行う。(※後述のユーザー or ワークスペースを参照) 特に他のドキュメントフォーマット用の拡張機能を入れてなければ、この設定もいらない。 ``` { "editor.defaultFormatter": "esbenp.prettier-vscode", // 特定の言語だけデフォルトのドキュメントのフォーマットを設定する場合 "[javascript]": { "editor.defaultFormatter": "esbenp.prettier-vscode" } } ``` この状態で、編集したいファイルを開いて、`cmd` + `shift` + `P` -\> Format Document を実行 or ファイルを開いた状態で右クリックで、ドキュメントのフォーマットを選択すると prettier を実行することができる。 #### ユーザー or ワークスペース ワークスペースは、VSCode で現在開いているディレクトリを指している。特定のプロジェクトのみ適用させたい場合は、ワークスペースに書く。ワークスペースに書くと、`.vscode`ディレクトリの`settings.json`に設定が記載される。`.vscode`で管理するのであれば、git の管理対象に含めておきたい。 一方、プロジェクトを横断する形で全体に適用させたい場合は、ユーザーに書く。 適当に VSCode でマークダウンでメモ書くときも prettier 適用させたい場合とかに便利だね。 優先順位は、ワークスペース \> ユーザー設定。 ### ファイル保存時に prettier を実行したい VSCode で以下の設定を追加する。 ``` // prettierに対応している言語は保存時にprettierを行う "editor.formatOnSave": true, // 一部の言語について対象外にしたい場合は、個別で設定を切る // または editor.formatOnSave:trueを必要な言語のみ設定する // ex) 以下はmarkdownは保存時にprettierを実行しない "[markdown]": { "editor.formatOnSave": false }, ``` 、`editor.fortmatOnSave`は prettier ではなく、eslint の VSCode 拡張機能のためのオプションっぽい? ### VSCode で prettier と、node\_modules の prettier ってどんな関係? ややこしいので、前者を VSCode 用 prettier、後者を prettier とここでは呼ぶ。 前述の通り、VSCode で prettier を実行するには VSCode 用 prettier を入れるだけでよいので、 `yarn add -D prettier`等で **prettier をローカルの node\_modules にインストールする必要はない。** これは、VSCode 用 prettier に、prettier そのものが含まれているから。 しかし、これだと VSCode だけの設定になってしまい、他のエディタや CLI で prettier を実行したい場合不都合がある。 なので、VScode 拡張用の prettier は、node\_modules に prettier がインストールされていれば、そちらを実行するようになっている。 プロジェクトとして管理するのであれば、別途 node\_modules にインストールしておいたほうがいい。 ### フォーマットの設定を変更したい 以下の 3 つの方法がある。 ・prettier がサポートしている設定ファイルを書く ・`.editorconfig`を使う ・VSCode の設定に直接書く #### prettier がサポートしている設定ファイルを書く 推奨。 プロジェクトの直下に`prettierrc`を JSON or YAML 形式で書く、`.prettierrc.js`にオブジェクト形式で書く、`package.json`に書く等、好みの方法を選ぶ。[https://prettier.io/docs/en/configuration.html](https://prettier.io/docs/en/configuration.html) ``` module.exports = { // prettier.config.js or .prettierrc.jsに書く場合の例 // 行末のセミコロンはいらない semi: false } ``` #### .editconfig を使う EditConfig は、これまた prettier と別のコードフォーマッターで、その設定ファイルが`.editconfig`になる。 EditConfig 用の VSCode 拡張機能を入れて、`.editconfig`に設定を書くと、ファイル編集中にコード整形をしてくれる。 この`.editconfig`に書いた設定の一部を prettier と連携させることができるみたい。 今回、EditConfig は使わないので詳細は割愛。 尚、過去に EditConfig を導入していて、それを忘れたあとに prettier を導入し、ファイル編集中に整形が走り、さらに保存時に別の結果に整形されるという事象に悩まされた。 これは、親ディレクトリに、`.editconfig`があるから起こることにこのメモを書くことで気づけた。やったね。 #### VSCode の設定に直接書く 非推奨だけど書ける。 尚、prettier の設定ファイル(.editconfig 含む)を探しにいく場合、 **対象のプロジェクトだけではなく、ユーザーのルートディレクトリまで遡ってファイルを探しに行く。** それでもファイルが見つからなかった場合に、VSCode の設定が使われる。 # VSCode の ESLint の設定を理解する。 eslint は prettier と異なり、VSCode 拡張機能とは別に別途 node\_modules に eslint をインストールする必要がある。 v2 になってから設定方法がかわったみたい。VSCode の設定方法はこちらに記事を参考にさせていただく。[https://qiita.com/mysticatea/items/3f306470e8262e50bb70](https://qiita.com/mysticatea/items/3f306470e8262e50bb70)保存時に eslint --fix を走らせる設定例。 ``` "editor.codeActionsOnSave": { "source.fixAll.eslint": true }, ``` # VSCode で prettier と eslint を連携させる [https://prettier.io/docs/en/integrating-with-linters.html](https://prettier.io/docs/en/integrating-with-linters.html)1. `eslint-config-prettier`を追加して、Prettier のルールと衝突するものは ESlint のルールから無効にしておく。 ex) `.prettierrc`でセミコロン不要って書いとくと、eslint のデフォルト設定でセミコロンが必要になってたとしても、エラーにはならない。 2. `eslint-plugin-prettier`を追加して、`eslint --fix`時に prettier も走らせる。 上記は eslint の設定ファイルに、以下を追加するだけでいい。 ``` { "extends": ["plugin:prettier/recommended"] } ``` 前述の保存時に VSCode で`eslint --fix`を行う例の設定により、prettier の整形処理も行われるようになる。 ``` "editor.codeActionsOnSave": { "source.fixAll.eslint": true }, ``` ## 補足 eslint を prettier を連携させるのであれば、VSCode 保存時に prettier を走らせる必要がなくなるので、個別で設定をオフにしとくといい。 ``` "editor.formatOnSave": true, // 以下はeslint --fixで行う。 "[javascript]": { "editor.formatOnSave": false }, "[typescript]": { "editor.formatOnSave": false }, "[javascriptreact]": { "editor.formatOnSave": false }, "[typescriptreact]": { "editor.formatOnSave": false }, ```
import db from '../../modules/database'; import exception from "../../modules/exception"; import tool, {getUUID} from "../../services/tool"; import UserUtil from "../../utils/UserUtil"; import config from "../../config.js"; import {sendmail} from "../../services/ses.js"; export class User { public app: string public async createUser(account: string, pwd: string, userData: any, req: any) { try { const userID = generateUserID(); let data = await db.query(`select \`value\` from \`${config.DB_NAME}\`.private_config where app_name = '${this.app}' and \`key\` = 'glitter_loginConfig'`, []) if (data.length > 0) { data = data[0]['value'] } else { data = { verify: `normal` } } if (data.verify != 'normal') { await db.execute(`delete from \`${this.app}\`.\`user\` where account = ${db.escape(account)} and status = 0`, []) if (data.verify == 'mail') { const checkToken = getUUID() userData=userData ?? {} userData.mailVerify=checkToken const url = `<h1>${data.name}</h1><p> <a href="${config.domain}/api-public/v1/user/checkMail?g-app=${this.app}&token=${checkToken}">點我前往認證您的信箱</a></p>` console.log(`url:${url}`) await sendmail(`service@ncdesign.info`, account, `信箱認證`, url) } } await db.execute(`INSERT INTO \`${this.app}\`.\`user\` (\`userID\`, \`account\`, \`pwd\`, \`userData\`, \`status\`) VALUES (?, ?, ?, ?, ?);`, [ userID, account, await tool.hashPwd(pwd), userData ?? {}, (() => { //當需要認證時傳送認證信 if (data.verify != 'normal') { return 0 } else { return 1 } })() ]) const generateToken = await UserUtil.generateToken({ user_id: parseInt(userID, 10), account: account, userData: {} }) return { token: (() => { if (data.verify == 'normal') { return generateToken; } else { return `` } })(), verify: data.verify } } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Register Error:' + e, null); } } public async login(account: string, pwd: string) { try { const data: any = (await db.execute(`select * from \`${this.app}\`.user where account = ? and status = 1`, [account]) as any)[0] if (await tool.compareHash(pwd, data.pwd)) { data.pwd = undefined data.token = await UserUtil.generateToken({ user_id: data["userID"], account: data["account"], userData: data }) return data } else { throw exception.BadRequestError('BAD_REQUEST', 'Auth failed', null); } } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Login Error:' + e, null); } } public async getUserData(userID: string) { try { const data: any = (await db.execute(`select * from \`${this.app}\`.user where userID = ?`, [userID]) as any)[0] data.pwd = undefined return data } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Login Error:' + e, null); } } public async updateUserData(userID: string, par: any) { try { par = { account: par.account, userData: JSON.stringify(par.userData) } console.log(userID) return (await db.query(`update \`${this.app}\`.user SET ? WHERE 1 = 1 and userID = ?`, [par, userID]) as any) } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Login Error:' + e, null); } } public async resetPwd(userID: string,pwd:string,newPwd:string) { try { const data: any = (await db.execute(`select * from \`${this.app}\`.user where userID = ? and status = 1`, [userID]) as any)[0] if (await tool.compareHash(pwd, data.pwd)) { const result=(await db.query(`update \`${this.app}\`.user SET ? WHERE 1 = 1 and userID = ?`, [{ pwd:await tool.hashPwd(newPwd) }, userID]) as any) return { result:true } } else { throw exception.BadRequestError('BAD_REQUEST', 'Auth failed', null); } // par = { // account: par.account, // userData: JSON.stringify(par.userData) // } // console.log(userID) //await tool.hashPwd(pwd) // return (await db.query(`update \`${this.app}\`.user // SET ? // WHERE 1 = 1 // and userID = ?`, [par, userID]) as any) } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Login Error:' + e, null); } } public async verifyPASS(token:string) { try { const par = { status: 1 } return (await db.query(`update \`${this.app}\`.user SET ? WHERE 1 = 1 and JSON_EXTRACT(userData, '$.mailVerify') = ?`, [par,token]) as any) } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'Login Error:' + e, null); } } public async checkUserExists(account: string) { try { return (await db.execute(`select count(1) from \`${this.app}\`.user where account = ? and status!=0`, [account]) as any)[0]["count(1)"] == 1 } catch (e) { throw exception.BadRequestError('BAD_REQUEST', 'CheckUserExists Error:' + e, null); } } constructor(app: string) { this.app = app } } function generateUserID() { let userID = ''; const characters = '0123456789'; const charactersLength = characters.length; for (let i = 0; i < 8; i++) { userID += characters.charAt(Math.floor(Math.random() * charactersLength)); } userID = `${'123456789'.charAt(Math.floor(Math.random() * charactersLength))}${userID}` return userID; }
<!-- templates/signup.html --> {% extends 'base.html' %} {% load static %} {% block title %}Sign Up{% endblock %} {% block content %} <script src="https://cdn.jsdelivr.net/gh/alpinejs/alpine@v2.x.x/dist/alpine.js" defer></script> <div class="container max-w-full mx-auto md:py-24 px-6"> <div class="max-w-sm mx-auto px-6"> <div class="relative flex flex-wrap "> <div class="w-full relative "> <div class="md:mt-6 "> <div class="text-center font-semibold text-black"> Sign Up </div> <div class="text-center font-base text-black"> Drowsy Driver Detection </div> <form method='post' enctype="multipart/form-data" class="mt-8" x-data="{password: '',password_confirm: ''}"> {% csrf_token %} {% if form.errors %} {% for field in form %} {% for error in field.errors %} <div class="alert alert-danger"> <strong>{{ error|escape }}</strong> </div> {% endfor %} {% endfor %} {% for error in form.non_field_errors %} <div class="alert alert-danger"> <strong>{{ error|escape }}</strong> </div> {% endfor %} {% endif %} <div class="mx-auto max-w-lg "> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.first_name.label }}</span> <input required placeholder="" type="text" name="first_name" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.last_name.label }}</span> <input required placeholder="" type="text" name="last_name" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.username.label }}</span> <input required placeholder="" type="text" name="username" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> {{field.errors}} <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.email.label }}</span> <input placeholder="" type="email" name="email" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.car_registration_number.label }}</span> <input required style="text-transform: uppercase;" placeholder="" type="text" name="car_registration_number" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.next_of_kin_name.label }}</span> <input required placeholder="" type="text" name="next_of_kin_name" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.next_of_kin_number.label }}</span> <input required placeholder="+254xxxxxxxxx" type="text" name="next_of_kin_number" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.password1.label }}</span> <input required placeholder="" type="password" x-model="password" name="password1" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="py-1"> <span class="px-1 text-sm text-gray-600">{{ form.password2.label }}</span> <input required placeholder="" type="password" x-model="password_confirm" name="password2" class="text-md block px-3 py-2 rounded-lg w-full bg-white border-2 border-gray-300 placeholder-gray-600 shadow-md focus:placeholder-gray-500 focus:bg-white focus:border-gray-600 focus:outline-none"> </div> <div class="flex justify-start mt-3 ml-4 p-1"> <ul> <li class="flex items-center py-1"> <div :class="{'bg-green-200 text-green-700': password == password_confirm && password.length > 0, 'bg-red-200 text-red-700':password != password_confirm || password.length == 0}" class=" rounded-full p-1 fill-current "> <svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor"> <path x-show="password == password_confirm && password.length > 0" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7"/> <path x-show="password != password_confirm || password.length == 0" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12"/> </svg> </div> <span :class="{'text-green-700': password == password_confirm && password.length > 0, 'text-red-700':password != password_confirm || password.length == 0}" class="font-medium text-sm ml-3" x-text="password == password_confirm && password.length > 0 ? 'Passwords match' : 'Passwords do not match' "></span> </li> <li class="flex items-center py-1"> <div :class="{'bg-green-200 text-green-700': password.length > 7, 'bg-red-200 text-red-700':password.length < 7 }" class=" rounded-full p-1 fill-current "> <svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor"> <path x-show="password.length > 7" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7"/> <path x-show="password.length < 7" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12"/> </svg> </div> <span :class="{'text-green-700': password.length > 7, 'text-red-700':password.length < 7 }" class="font-medium text-sm ml-3" x-text="password.length > 7 ? 'The minimum length is reached' : 'At least 8 characters required' "></span> </li> </ul> </div> <button type="submit" class="mt-3 text-lg font-semibold bg-blue-500 w-full text-white rounded-full px-6 py-3 block shadow-xl hover:text-white hover:bg-blue-700"> Register </button> </div> </form> <div class="text-sm font-semibold block sm:hidden py-6 flex justify-center"> <a href="{% url 'login' %}" class="text-black font-normal border-b-2 border-gray-200 hover:border-teal-500">Already a member? <span class="text-black font-semibold"> Login </span> </a> </div> </div> </div> </div> </div> </div> {% comment %} <script src='{%static "js/three.r95.min.js"%}'></script> <script src='{%static "js/vanta.net.min.js"%}'></script> <script> VANTA.NET({ el: "#container", mouseControls: true, touchControls: true, minHeight: 200.00, minWidth: 200.00, scale: 1.00, scaleMobile: 1.00, color: 0x0, backgroundColor: 0xffffff }) </script> {% endcomment %} {% endblock %}
package starspire; //import java.io.OutputStream; /** * The debug class is a wrapper for output. * It will allow us to print to system.out but will also enable logging. Not only that it will let us * disable console output for everything in a central place. * @author Patrick Fiaux */ public class Debug { private static boolean enabled = false; private static boolean enable_print = true; private static boolean enable_error_print = true; private static boolean initialized = false; // private static OutputStream printStream; // private static OutputStream errorStream; /** * initialize with System.out and System.err as main streams and enables. */ public static void init() { enabled = true; // printStream = System.out; // printStream = System.err; initialized = true; } /** * Helper method allowing the JUnit test to 'reset' * the static class between tests. */ protected static void reset() { enabled = false; enable_print = true; enable_error_print = true; initialized = false; // printStream = null; // errorStream = null; } /** * Enables or disables the debug prints/logs all together. * @param state true for enabling false for disabling everything. */ public static void setEnabled(boolean state) { if (!initialized) { init(); } enabled = state; } /** * Returns true if the debug is enabled or not. * @return boolean true for enabled false otherwise */ public static boolean isEnabled() { return enabled; } /** * Disables or Enables just the system.out.print and system.out.println. * @param status true to enable false to disable. */ public static void setEnablePrints(boolean status) { enable_print = status; } /** * Tells whether the system.out prints are enabled or disabled. * @return true if enabled, false otherwise. */ public static boolean getEnablePrints() { return enable_print; } /** * Disables or Enables just the system.err prints * @param status true to enable false to disable. */ public static void setEnableErrorPrints(boolean status) { enable_error_print = status; } /** * Tells whether the system.err prints are enabled or disabled. * @return true if enabled, false otherwise. */ public static boolean getEnableErrorPrints() { return enable_error_print; } /** * Prints a string to the active steams and logs. * This is equivalent to System.out.print(String s) * @param s String to print. */ public static void print(String s) { if (enabled) { if (enable_print) { System.out.print(s); } } } /** * Prints a string and new line to the active steams and logs. * This is equivalent to System.out.println(String s) * @param s String to print with a new line */ public static void println(String s) { if (enabled) { if (enable_print) { System.out.println(s); } } } /** * Prints an error string * @param s String to print */ public static void error(String s) { if (enabled) { if (enable_error_print) { System.err.print(s); } } } /** * Prints an error string with a line end * @param s string to print as error */ public static void errorln(String s) { if (enabled) { if (enable_error_print) { System.err.println(s); } } } }
"""Car Module.""" class Car: """Car Class.""" def __init__(self, make, model, year): """Initialize Car.""" self.make = make self.model = model self.year = year def get_description(self): """Get a full description of the car.""" return f"{self.year} {self.make} {self.model}" class ElectricCar(Car): """Represent aspects of a car, specific to electric vehicles.""" def __init__(self, make, model, year, battery_size): """ Initialize attributes of the parent class. Then initialize attributes specific to an electric car. """ super().__init__(make, model, year) self.battery_size = battery_size def get_description(self): """Over-ride the get_description() method; this is polymorphism.""" return f"{super().get_description()}, battery={self.battery_size}-kWh" #honda = Car("Honda", "CRV", 2023) #print(honda.get_description()) tesla = ElectricCar("Tesla", "Model 3", 2023, 75) print(tesla.get_description())
package com.anderson.secomp_backend_help_RS.controller; import com.anderson.secomp_backend_help_RS.dto.UserDto; import com.anderson.secomp_backend_help_RS.model.User; import com.anderson.secomp_backend_help_RS.service.UserService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.*; import java.util.List; @RestController @RequestMapping("user/") public class UserController { @Autowired private UserService service; @CrossOrigin( origins = "*") @PostMapping("create") public ResponseEntity create(@RequestBody UserDto dto){ try { service.validationUser(dto); service.saveUser(dto); return ResponseEntity.status(HttpStatus.CREATED).body(dto); }catch (Exception exception){ System.out.println(exception.getMessage()); return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body("User name or Email already registered"); } } @CrossOrigin( origins = "*") @GetMapping("getAll") public List<User> getAll(){ return service.getAll(); } @PutMapping("updateUserName/{id}") public ResponseEntity updateUserName(@PathVariable Integer id, @RequestBody User user){ if (service.findUser(id, user)){ service.saveUserNameUpdate(id, user); return ResponseEntity.status(HttpStatus.OK).body("Username updated successfully"); }else { return ResponseEntity.status(HttpStatus.NOT_FOUND).body("User not found"); } } @PutMapping("updateEmail/{id}") public ResponseEntity updateEmail(@PathVariable Integer id, @RequestBody User user){ if (service.findUser(id, user)){ service.saveUserEmail(id, user); return ResponseEntity.status(HttpStatus.OK).body("Email updated successfully"); }else { return ResponseEntity.status(HttpStatus.NOT_FOUND).body("User not found"); } } @DeleteMapping("delete/{id}") public ResponseEntity delete(@PathVariable Integer id){ if (service.delete(id)){ return ResponseEntity.status(HttpStatus.OK).body("Delete user successfully"); }else { return ResponseEntity.status(HttpStatus.NOT_FOUND).body("User not found"); } } }
import 'package:flutter/material.dart'; import 'package:go_router/go_router.dart'; Future<bool?> showConfirmationDialog({ required BuildContext context, String title = "Are you sure ?", required String description, required VoidCallback? onValidate, }) async { return await showDialog<bool?>( context: context, builder: (context) { return AlertDialog( title: Text(title), content: Text(description), actions: [ TextButton( onPressed: GoRouter.of(context).pop, child: const Text("Cancel") ), FilledButton( onPressed: () { onValidate?.call(); GoRouter.of(context).pop(true); }, child: const Text("Yes") ), ], ); } ); }
--- id: 657efdcf7fe23b76c0cff9ec title: Step 7 challengeType: 20 dashedName: step-7 --- # --description-- Add an `else` clause on the same level as the existing `if` statement, inside the `for` loop. Add characters that are already in lowercase to the list of converted characters inside the body of the `else` clause. # --hints-- You should add an `else` clause inside the `for` loop. Don't forget the colon at the end. ```js ({ test: () => { const transformedCode = code.replace(/\r/g, ""); const convert_to_snake_case = __helpers.python.getDef("\n" + transformedCode, "convert_to_snake_case"); const { function_body } = convert_to_snake_case; assert.match(function_body, / +else:/); } }) ``` You should use the `.append()` method to add `char` to the `snake_cased_char_list` variable. ```js ({ test: () => { const transformedCode = code.replace(/\r/g, ""); const convert_to_snake_case = __helpers.python.getDef("\n" + transformedCode, "convert_to_snake_case"); const { function_body } = convert_to_snake_case; assert.match(function_body, / +snake_cased_char_list.append\(\s*char\s*\)/); } }) ``` # --seed-- ## --seed-contents-- ```py def convert_to_snake_case(pascal_or_camel_cased_string): snake_cased_char_list = [] for char in pascal_or_camel_cased_string: --fcc-editable-region-- if char.isupper(): converted_character = '_' + char.lower() snake_cased_char_list.append(converted_character) --fcc-editable-region-- ```
<template> <div class="p-2 rounded text-sm "> <div class="relative"> <Textarea @keyup.ctrl.enter="saveComment" ref="textareaRef" autocomplete="new-text" rows="3" v-model="content" class="dark:bg-slate-500 border-separate" :placeholder="placeholder" </Textarea> <div class="absolute right-2 bottom-1 cursor-pointer text-xl" @click="toggleShowEmoji" ref="showEmojiRef">😊</div> </div> <Emoji v-if="showEmoji" class="mt-2" @emoji-selected="emojiSelected"/> <div class="flex flex-row items-center justify-end mt-2 gap-2"> <Input placeholder="昵称,必填" type="text" v-model="info.username" class=" dark:bg-slate-500 text-xs sm:text-sm py-0.5"></Input> <Input placeholder="主页,可空" type="text" v-model="info.website" class="dark:bg-slate-500 text-xs sm:text-sm py-0.5"> </Input> <Input placeholder="邮箱,可空" type="text" v-model="info.email" class="hidden sm:block dark:bg-slate-500 text-xs sm:text-sm py-0.5"></Input> <Button size="sm" @click="saveComment" :disabled="pending">发表评论</Button> </div> </div> </template> <script setup lang="ts"> import { toast } from 'vue-sonner'; import { Input } from '@/components/ui/input' import { Button } from '@/components/ui/button' import { Textarea } from '@/components/ui/textarea' import { useAnimate, useStorage } from '@vueuse/core' import { insertTextAtCursor } from '~/lib/utils'; import type { User } from '~/lib/types'; const userinfo = useState<User>('userinfo') const config = useRuntimeConfig() const textareaRef = ref() const token = useCookie('token') const content = ref('') const placeholder = ref('发表评论') const emit = defineEmits(['commentAdded']) const showEmoji = ref(false) const keyframes = { transform: 'rotate(360deg)' } const props = defineProps<{ memoId: number, reply?: string, replyId?: number }>() const showEmojiRef = ref<HTMLElement>() const info = useStorage('anonymous', { email: '', website: '', username: '' }) onMounted(() => { textareaRef.value?.getRef().focus() if (token.value && userinfo.value && userinfo.value.nickname) { info.value.username = userinfo.value.nickname } }) const toggleShowEmoji = () => { showEmoji.value = !showEmoji.value useAnimate(showEmojiRef.value, keyframes, { duration: 1000, easing: 'ease-in-out' }) } const emojiSelected = (emoji: string) => { const target = textareaRef.value?.getRef() as HTMLTextAreaElement insertTextAtCursor(emoji, target) content.value = target.value! // showEmoji.value = false } const pending = ref(false) const saveComment = async () => { if (!content.value) { toast.warning('先填写评论') return } if (content.value.length > config.public.momentsCommentMaxLength) { toast.warning('评论超长') return } if (!info.value.username) { toast.warning('用户名必填') return } if (info.value.username.length > 10) { toast.warning('用户名') return } if (info.value.website.length > 100) { toast.warning('网站地址超长') return } if (config.public.googleRecaptchaSiteKey) { //@ts-ignore grecaptcha.ready(function () { //@ts-ignore grecaptcha.execute(config.public.googleRecaptchaSiteKey, { action: 'submit' }).then(async function (token) { submitComment(token) }); }); } else { submitComment() } } const submitComment = async (token?: string) => { pending.value = true const res = await $fetch('/api/comment/save', { method: 'POST', body: JSON.stringify({ content: content.value, memoId: props.memoId, replyTo: props.reply, replyToId: props.replyId, author: false, email: info.value.email, website: info.value.website, username: info.value.username, token }) }) if (res.success) { toast.success('评论成功') content.value = '' emit('commentAdded') } else { toast.warning(res.message || '评论失败') } pending.value = false } onMounted(async () => { if (props.reply) { placeholder.value = "回复给@" + props.reply } }) </script> <style scoped></style>
use log::LevelFilter; use std::path::PathBuf; #[derive(Debug, clap::Parser)] #[command(version)] pub struct Command { #[command(subcommand)] pub command: SubCommands, #[arg(long, default_value = "askew_config.toml")] pub config: PathBuf, /// Print debug logs #[arg(short = 'l', long, default_value_t = LevelFilter::Info)] pub log_level: LevelFilter, /// Simplelog allow filters #[arg(short = 'a', long)] pub log_allow: Vec<String>, /// Simplelog ignore filters #[arg(short = 'i', long)] pub log_ignore: Vec<String>, } #[derive(Debug, clap::Subcommand)] pub enum SubCommands { /// Run and open editor's window Run(RunArguments), /// Execute commands on running editor instance Ipc(IpcArguments), } #[derive(Debug, clap::Args)] pub struct IpcArguments { #[arg()] pub message: String, #[arg(short, long)] pub socket_path: Option<PathBuf>, } #[derive(Debug, clap::Args)] pub struct RunArguments { #[arg(long)] pub canvas_curve_samples: Option<u32>, #[arg(short = 'b', long)] pub background_image_path: Option<PathBuf>, #[arg(short = 'o', long)] pub project_path: Option<PathBuf>, /// Command to execute on start, can be specified multiple times #[arg(short = 'c', long)] pub startup_commands: Vec<String>, #[arg(short = 'n', long)] pub random_points: Option<u32>, #[arg(long)] pub font_size: Option<u32>, #[arg(long)] pub font_path: Option<PathBuf>, /// If empty, then IPC is disabled #[arg(long)] pub ipc_socket_path: Option<Option<PathBuf>>, }
import 'package:easy_localization/easy_localization.dart'; import 'package:expansion/domain/repository/user_repository.dart'; import 'package:expansion/utils/value.dart'; import 'package:get/get.dart'; import 'package:surf_logger/surf_logger.dart'; /// класс для отлеживания очков и апгрейда /// различных параметров игрока class AllUpgrade { final int maxLevel = 5; // максимальный уровень апгрейда int score; // Начальное количество очков int allScore; // Общее количество очков double minShild = 20; // минимальный ап щита List<Upgrade> list = []; AllUpgrade({required this.score, required this.allScore, required this.list}); factory AllUpgrade.fromJson(Map<String, dynamic> json) { final listJson = json['list'] as List<dynamic>; // ignore: avoid_dynamic_calls Logger.d('temp == ${listJson[0].runtimeType} == ${listJson[0]}'); return AllUpgrade( score: json['score'] as int, allScore: json['allScore'] as int, list: List<Upgrade>.from( listJson.map((x) => Upgrade.fromJson(x as Map<String, dynamic>)))); } factory AllUpgrade.initialOur() { final list = <Upgrade>[ Upgrade.from(TypeUp.shipSpeed, 10), // скорость кораблей Upgrade.from(TypeUp.shipDurability, 5), // прочность кораблей Upgrade.from(TypeUp.shipBuildSpeed, 10), // скорость прироста кораблей Upgrade.from( TypeUp.resourceIncomeSpeed, 10), // скорость прироста ресурсов Upgrade.from(TypeUp.shieldDurability, 15), // прочность щита // Начальное значение кораблей на базе в начале игры Upgrade( level: 0, percenstValue: 0, nextScore: scoreMultiplier, nextValue: 10, type: TypeUp.beginShips, value: 100), ]; return AllUpgrade(list: list, score: 0, allScore: 0); } /// иницилизируем систему upgrade в начале игры factory AllUpgrade.initialEnemy() { final list = <Upgrade>[ Upgrade.from(TypeUp.shipSpeed, 5), // скорость кораблей Upgrade.from(TypeUp.shipDurability, 5), // прочность кораблей Upgrade.from(TypeUp.shipBuildSpeed, 5), // скорость прироста кораблей Upgrade.from(TypeUp.resourceIncomeSpeed, 5), // скорость прироста ресурсов Upgrade.from(TypeUp.shieldDurability, 5), // прочность щита // Начальное значение кораблей на базе в начале игры Upgrade( level: 0, nextScore: scoreMultiplier, nextValue: 10, percenstValue: 0, type: TypeUp.beginShips, value: 100), Upgrade.from(TypeUp.tic, 5), // скорость отклика врага ]; return AllUpgrade(list: list, score: 0, allScore: 0); } double getFromType(TypeUp type) { return list.firstWhere((element) => element.type == type).value; } double shipSpeed() { return list[0].value * (1 + list[0].percenstValue / 100); } double shipDurability() { return list[1].value * (1 + list[1].percenstValue / 100); } double shipBuildSpeed() { return list[2].value * (1 + list[2].percenstValue / 100); } double resourceIncomeSpeed() { return list[3].value * (1 + list[3].percenstValue / 100); } double shieldDurability() { return list[4].value * (1 + list[4].percenstValue / 100); } int beginShips() { return (list[5].value * (1 + list[5].percenstValue / 100)).round(); } double tic() { return list[6].value * (1 + list[6].percenstValue / 100); } /// метод для чужих апгрейд всех параметров после каждого боя void toAllUpgrade() { for (final item in list) { toUpgrade(item.type, isEnemy: true); } } // Метод для апгрейда параметра void toUpgrade(TypeUp type, {bool isEnemy = false}) { final index = list.indexWhere((element) => element.type == type); final upgrade = list[index]; upgrade.level++; upgrade.percenstValue += isEnemy ? Get.find<UserRepository>().game.level.enemyPercent : upgrade.nextValue; score -= upgrade.nextScore; upgrade.nextScore *= 2; } bool isUpgrade(Upgrade upgrade) { return upgrade.nextScore < score; } Upgrade getUpgradeFromType(TypeUp type) { return list.firstWhere((element) => element.type == type); } Map<String, dynamic> toJson() => { 'score': score, 'allScore': allScore, 'list': list, }; } enum TypeUp { shipSpeed, shipDurability, shipBuildSpeed, resourceIncomeSpeed, shieldDurability, beginShips, tic; String get text => tr(name); String get image { switch (this) { case TypeUp.shipSpeed: return 'assets/svg/rocket.svg'; case TypeUp.shipDurability: return 'assets/svg/shield.svg'; case TypeUp.shipBuildSpeed: return 'assets/svg/rocket.svg'; case TypeUp.resourceIncomeSpeed: return 'assets/svg/hammers.svg'; case TypeUp.shieldDurability: return 'assets/svg/shield.svg'; case TypeUp.beginShips: return 'assets/svg/rocket.svg'; case TypeUp.tic: return 'assets/svg/ship.svg'; } } String get textHelp { switch (this) { case TypeUp.shipSpeed: return tr('${name}_help'); case TypeUp.shipDurability: return tr('${name}_help'); case TypeUp.shipBuildSpeed: return tr('${name}_help'); case TypeUp.resourceIncomeSpeed: return tr('${name}_help'); case TypeUp.shieldDurability: return tr('${name}_help'); case TypeUp.tic: return tr('${name}_help'); case TypeUp.beginShips: return tr('${name}_help'); } } } class Upgrade { /// тип апгрейда TypeUp type; /// начальное значение в процентах double value; /// начальный уровень int level; /// процент на который идет прирост с последующим уровнем int nextValue; /// текущий процент int percenstValue; /// начальный уровень очков для перехода на новый уровень int nextScore; Upgrade({ required this.type, required this.value, required this.level, required this.nextValue, required this.nextScore, required this.percenstValue, }); factory Upgrade.from(TypeUp type, int nextValue) { return Upgrade( type: type, value: 1, level: 0, percenstValue: 0, nextValue: nextValue, nextScore: scoreMultiplier); } factory Upgrade.fromJson(Map<String, dynamic> json) { return Upgrade( type: TypeUp.values[json['type'] as int], value: json['value'] as double, level: json['level'] as int, nextValue: json['nextValue'] as int, nextScore: json['nextScore'] as int, percenstValue: json['percenstValue'] as int, ); } Map<String, dynamic> toJson() => { 'type': type.index, 'value': value, 'level': level, 'nextValue': nextValue, 'nextScore': nextScore, 'percenstValue': percenstValue, }; }
<?php namespace App\Models; use Illuminate\Contracts\Auth\MustVerifyEmail; use Illuminate\Foundation\Auth\User as Authenticatable; use Illuminate\Notifications\Notifiable; class User extends Authenticatable { use Notifiable; /** * The attributes that are mass assignable. * * @var array */ protected $fillable = [ 'name', 'email', 'password', ]; /** * The attributes that should be hidden for arrays. * * @var array */ protected $hidden = [ 'password', 'remember_token', 'id', 'email', 'email_verified_at', 'created_at', 'updated_at' ]; /** * The attributes that should be cast to native types. * * @var array */ protected $casts = [ 'email_verified_at' => 'datetime', ]; public function userDetails() { return $this->hasOne('App\Models\UserDetails'); // nella tabella che non ha il foreign key } public function posts() { return $this->hasMany('App\Models\Post'); // nella tabella che non ha il foreign key } }
package com.mary.avjava.services; import java.util.List; import java.util.Optional; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DataIntegrityViolationException; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Sort.Direction; import org.springframework.stereotype.Service; import com.mary.avjava.domain.Cliente; import com.mary.avjava.dto.ClienteDTO; import com.mary.avjava.repositories.ClienteRepository; import com.mary.avjava.services.exceptions.DataIntegrityException; import com.mary.avjava.services.exceptions.ObjectNotFoundException; @Service public class ClienteService { @Autowired private ClienteRepository repo; public Cliente find(Integer id) { Optional<Cliente> obj = repo.findById(id); return obj.orElseThrow(() -> new ObjectNotFoundException( "Objeto não encontrado! Id: " + id + ", Tipo: " + Cliente.class.getName())); } public Cliente insert(Cliente obj) { obj.setId(null); return repo.save(obj); } public Cliente update (Cliente obj) { find(obj.getId()); return repo.save(obj); } public void delete(Integer id) { find(id); try { repo.deleteById(id); }catch(DataIntegrityViolationException e) { throw new DataIntegrityException("Não é possível excluir um cliente que possui endereço"); } } public List<Cliente> findAll(){ return repo.findAll(); } public Page<Cliente> findPage(Integer page, Integer linesPerPage, String orderBy, String direction){ PageRequest pageRequest = PageRequest.of(page, linesPerPage, Direction.valueOf(direction), orderBy); return repo.findAll(pageRequest); } public Cliente fromDTO(ClienteDTO objDto) { return new Cliente (objDto.getId(),objDto.getNome(), objDto.getNascimento() ); } }
import styled, { css, DefaultTheme } from 'styled-components'; import { TextType } from './Text'; type TextProps = { type: TextType; theme: DefaultTheme; isActive?: boolean; }; const textStyled = ({ theme, type, isActive = false }: TextProps) => { switch (type) { case 'logo-title': { return css` font-weight: 700; font-size: 1.8rem; line-height: 2.7rem; color: ${theme.colors.blackText}; `; } case 'logo-link': { return css` font-weight: 500; font-size: 2.4rem; line-height: 3.6rem; color: ${isActive ? theme.colors.primaryBlue : theme.colors.blackText}; `; } case 'principal': { return css` font-weight: 700; font-size: 6.4rem; line-height: 9.6rem; color: ${theme.colors.white}; `; } case 'title-card': { return css` font-weight: 700; font-size: 2rem; line-height: 3rem; color: ${theme.colors.neutralDark}; `; } case 'text-card': { return css` font-size: 1.4rem; line-height: 2.1rem; color: ${theme.colors.blackText}; `; } case 'title-section': { return css` font-weight: 500; font-size: 5.5rem; line-height: 8.2rem; color: ${theme.colors.white}; `; } case 'text-section': { return css` font-size: 2.4rem; line-height: 3.6rem; color: ${theme.colors.white}; `; } case 'title-alternative': { return css` font-weight: 500; font-size: 1.8rem; line-height: 2.7rem; color: ${theme.colors.blackText}; `; } case 'text-alternative': { return css` font-weight: 400; font-size: 1.2rem; line-height: 1.5rem; font-family: ${theme.font.family.secondary}; color: ${theme.colors.blackText}; `; } case 'sub-title': { return css` font-weight: 500; font-size: 2.2rem; line-height: 3.3rem; color: ${theme.colors.gray}; `; } default: return css``; } }; export const Text = styled.p<TextProps>` ${({ theme, type, isActive }) => css` font-family: ${theme.font.family.primary}; font-style: normal; font-weight: 400; ${textStyled({ theme, type, isActive })} `} `;
// This file is part of www.nand2tetris.org // and the book "The Elements of Computing Systems" // by Nisan and Schocken, MIT Press. // File name: projects/03/a/RAM64.hdl /** * Memory of 64 registers, each 16 bit-wide. Out holds the value * stored at the memory location specified by address. If load==1, then * the in value is loaded into the memory location specified by address * (the loaded value will be emitted to out from the next time step onward). */ CHIP RAM64 { IN in[16], load, address[6]; OUT out[16]; PARTS: DMux8Way(in=load, sel=address[3..5], a=a8RAM, b=b8RAM, c=c8RAM, d=d8RAM, e=e8RAM, f=f8RAM, g=g8RAM, h=h8RAM); RAM8(in=in, load=a8RAM, address=address[0..2], out=outa); RAM8(in=in, load=b8RAM, address=address[0..2], out=outb); RAM8(in=in, load=c8RAM, address=address[0..2], out=outc); RAM8(in=in, load=d8RAM, address=address[0..2], out=outd); RAM8(in=in, load=e8RAM, address=address[0..2], out=oute); RAM8(in=in, load=f8RAM, address=address[0..2], out=outf); RAM8(in=in, load=g8RAM, address=address[0..2], out=outg); RAM8(in=in, load=h8RAM, address=address[0..2], out=outh); Mux8Way16(a=outa, b=outb, c=outc, d=outd, e=oute, f=outf, g=outg, h=outh, sel=address[3..5], out=out); }
<!doctype html> <html lang="en"> <head> <title>Title</title> <!-- Required meta tags --> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel="stylesheet" href="./css/style.css"> <!-- Bootstrap CSS --> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous"> <link href="https://fonts.googleapis.com/css?family=Barlow+Semi+Condensed&display=swap" rel="stylesheet"> <link href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" integrity="sha384-wvfXpqpZZVQGK6TAh5PVlGOfQNHSoD2xbE+QkPxCAFlNEevoEH3Sl0sibVcOQVnN" crossorigin="anonymous"> </head> <body> <section class="section1"> <i class="fa fa-user" id="icon_user"></i> <h1>SMART HOME</h1> <i class="fa fa-home" id="icon"></i> <p>Get an easy life</p> <button type="button" class="btn btn-secondary">Get yours!</button> </section> <section class="section2"></section> <section class="section3"> <div class="container "> <div class="row display_flex"> <div class="col-6 mt-2"> <div id="carouselExampleIndicators" class="carousel slide" data-ride="carousel"> <ol class="carousel-indicators"> <li data-target="#carouselExampleIndicators" data-slide-to="0" class="active"></li> <li data-target="#carouselExampleIndicators" data-slide-to="1"></li> <li data-target="#carouselExampleIndicators" data-slide-to="2"></li> </ol> <div class="carousel-inner"> <div class="carousel-item active"> <img src="./img/akrales_181129_3107_0012.0.jpg" class="d-block w-100 imagen" alt="..."> </div> <div class="carousel-item"> <img src="./img/home.jpg" class="d-block w-100 imagen" alt="..."> </div> <div class="carousel-item"> <img src="./img/nest_cam_full.jpg" class="d-block w-100 imagen" alt="..."> </div> </div> <a class="carousel-control-prev" href="#carouselExampleIndicators" role="button" data-slide="prev"> <span class="carousel-control-prev-icon" aria-hidden="true"></span> <span class="sr-only">Previous</span> </a> <a class="carousel-control-next" href="#carouselExampleIndicators" role="button" data-slide="next"> <span class="carousel-control-next-icon" aria-hidden="true"></span> <span class="sr-only">Next</span> </a> </div> </div> <div class="col-4 mt-5 ml-5"> <ul> <li aria-disabled="true">Control the temperature of your home since your phone</li> <li> Look who´s knocking the door since your phone</li> <li>Detect when there is some movement and look who is inside your home </li> <li>Open and close the curtains </li> </ul> </div> </div> </div> </section> <section class="section2"> <div class="container"> <div class="row"> </div> </div> </section> <section class="section4"> <p id="section4__p"> Make your home more comfortable and take advantage of be in your home, rest more and be more sure. </p> <a href="./FAQS.html"> <button type="button" class="btn btn-secondary">FAQS</button> </a> </section> <section class="section5"> <div class="container"> <div class="row"> <div class="col-6 mt-3"> <h5>Contact</h5> <form> <div class="form-group"> <label for="exampleFormControlInput1">Email address</label> <input type="email" class="form-control" id="exampleFormControlInput1" placeholder="name@example.com"> </div> <div class="form-group"> <label for="exampleFormControlTextarea1">Comments</label> <textarea class="form-control" id="exampleFormControlTextarea1" rows="3"></textarea> </div> <button type="submit" class="btn btn-primary">Submit</button> </form> </div> <div class="col-6 iconos"> <i class="fa fa-twitter-square" id="twet"></i> <i class="fa fa-google-plus-square" id="google"></i> <i class="fa fa-facebook" id="face"></i> </div> </div> </div> </section> <footer> <p>&copy; 2020 alberto.perez.1509@outlook.com</p> <a href="https://github.com/ALBERTOGUER"> <i class="fa fa-github" id="git"></i> </a> </footer> <!-- Optional JavaScript --> <!-- jQuery first, then Popper.js, then Bootstrap JS --> <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"> </script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js" integrity="sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1" crossorigin="anonymous"> </script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"> </script> </body> </html>
using System; using System.Diagnostics; using System.Threading.Tasks; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Http; using Microsoft.Extensions.Logging; using Prometheus; namespace gpxoverlay { public class PrometheusMiddleware { private readonly RequestDelegate _next; private readonly ILogger _logger; public PrometheusMiddleware( RequestDelegate next , ILoggerFactory loggerFactory ) { this._next = next; this._logger = loggerFactory.CreateLogger<PrometheusMiddleware>(); } public async Task Invoke(HttpContext httpContext) { var path = httpContext.Request.Path.Value; var method = httpContext.Request.Method; var totalRequests = Metrics.CreateCounter($"dotnet_request_total", "HTTP Requests Total", new CounterConfiguration { LabelNames = new[] { "path", "method", "status" } }); var concurrentRequests = Metrics.CreateGauge($"dotnet_request_current", "HTTP Requests Current", new GaugeConfiguration { LabelNames = new[] { "path", "method" } }); var requestTime = Metrics.CreateHistogram($"dotnet_request_time", "HTTP Request Time", new HistogramConfiguration { LabelNames = new[] { "path", "method" } }); var requestSize = Metrics.CreateHistogram($"dotnet_request_size", "HTTP Request Size in Byte", new HistogramConfiguration { LabelNames = new[] { "path", "method" } }); var responseSize = Metrics.CreateHistogram($"dotnet_response_size", "HTTP Response Size in Byte", new HistogramConfiguration { LabelNames = new[] { "path", "method", "status" } }); try { if (httpContext.Request.ContentLength.HasValue) { requestSize.Labels(path, method).Observe((double)httpContext.Request.ContentLength); } using (concurrentRequests.WithLabels(path, method).TrackInProgress()) using (requestTime.WithLabels(path, method).NewTimer()) { await _next.Invoke(httpContext); } } catch (Exception) { totalRequests.Labels(path, method, httpContext.Response.StatusCode.ToString()).Inc(); if (httpContext.Response.ContentLength.HasValue) { responseSize.Labels(path, method, httpContext.Response.StatusCode.ToString()).Observe((double)httpContext.Response.ContentLength); } throw; } if (path != "/metrics") { totalRequests.Labels(path, method, httpContext.Response.StatusCode.ToString()).Inc(); if (httpContext.Response.ContentLength.HasValue) { responseSize.Labels(path, method, httpContext.Response.StatusCode.ToString()).Observe((double)httpContext.Response.ContentLength); } } } } public static class PrometheusMiddlewareExtensions { public static IApplicationBuilder UsePrometheusMiddleware(this IApplicationBuilder builder) { return builder.UseMiddleware<PrometheusMiddleware>(); } } }
<?php use Illuminate\Database\Migrations\Migration; use Illuminate\Database\Schema\Blueprint; use Illuminate\Support\Facades\Schema; return new class extends Migration { /** * Run the migrations. */ public function up(): void { Schema::create('subcategorias', function (Blueprint $table) { $table->id(); $table->string('subcategoria'); $table->text('descripcion')->nullable(); $table->string('imagen'); $table->Integer('estado'); $table->unsignedBigInteger('categoria_id'); $table->foreign('categoria_id')->references('id')->on('categorias'); $table->timestamps(); }); } /** * Reverse the migrations. */ public function down(): void { Schema::dropIfExists('subcategorias'); } };
package com.gzunzu.videogames.api; import com.gzunzu.common.adapters.api.BaseController; import com.gzunzu.videogames.domain.dto.GenreDTO; import com.gzunzu.videogames.ports.GenreService; import lombok.RequiredArgsConstructor; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import java.util.List; @RestController @RequestMapping(name = "Developers controller", path = "api/1.0/genres", produces = MediaType.APPLICATION_JSON_VALUE) @RequiredArgsConstructor public class GenreController extends BaseController<GenreDTO> { private final GenreService genreService; @GetMapping(name = "Get all genres") public ResponseEntity<List<GenreDTO>> getAll() { final List<GenreDTO> result = this.genreService.getAll(); return super.getResponse(result); } @GetMapping(name = "Get a genre by ID", value = "/{id}") public ResponseEntity<GenreDTO> getById(@PathVariable(name = "id") final long id) { final GenreDTO result = this.genreService.getById(id); return super.getResponse(result); } @GetMapping(name = "Get a genre by name", value = "/name/{name}") public ResponseEntity<GenreDTO> getByName(@PathVariable(name = "name") final String name) { final GenreDTO result = this.genreService.getByName(name); return super.getResponse(result); } @PostMapping(name = "Insert a genre", consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<GenreDTO> insert(@RequestBody final GenreDTO genreDTO) { final GenreDTO result = this.genreService.insert(genreDTO); return super.postResponse(result); } @PutMapping(name = "Update a genre", consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<GenreDTO> update(@RequestBody final GenreDTO genreDTO) { final GenreDTO result = this.genreService.update(genreDTO); return super.putResponse(result); } @DeleteMapping(name = "Delete a genre by ID", value = "/{id}", produces = MediaType.ALL_VALUE) public ResponseEntity<GenreDTO> delete(@PathVariable final Long id) { final boolean result = this.genreService.delete(id); return super.deleteResponse(result); } }
import 'package:flutter_test/flutter_test.dart'; import 'package:runnoter/data/model/workout.dart'; import 'package:runnoter/ui/service/workout_stage_service.dart'; void main() { test( 'calculate distance of workout stage, ' 'distance stage, ' 'should simply return distance', () { const double expectedDistance = 10.0; const stage = WorkoutStageCardio( distanceInKm: expectedDistance, maxHeartRate: 150, ); final double distance = calculateDistanceOfWorkoutStage(stage); expect(distance, expectedDistance); }, ); test( 'calculate distance of workout stage, ' 'series stage, ' 'should sum series, walking and jogging distances and multiply by number of series', () { const stage = WorkoutStageHillRepeats( amountOfSeries: 10, seriesDistanceInMeters: 100, walkingDistanceInMeters: 20, joggingDistanceInMeters: 80, ); const double expectedDistance = (10 * (100 + 20 + 80)) / 1000; final double distance = calculateDistanceOfWorkoutStage(stage); expect(distance, expectedDistance); }, ); }
import { Callout } from "nextra/components"; import REPL from "../../components/REPL"; > Returns a new string with one, some, or all matches of a pattern replaced by a replacement. ## Syntax ```ts import { strReplace } from '@opentf/std'; strReplace(str: string, pattern: string | RegExp, replacement: string | Function, options?: {all: boolean, case: boolean} ): string; ``` <Callout type="info"> The replacement function can be in the form of [specifying_a_function_as_the_replacement](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace#specifying_a_function_as_the_replacement) </Callout> <Callout type="info"> - The option `all` refers to RegExp `g` global flag. - The option `case` refers to RegExp `i` ignore case / case insensitive flag. </Callout> ## Examples ```ts strReplace('abc', 'a', 'x') //=> 'xbc' strReplace('abbc', 'b', '', { all: true }) //=> 'ac' strReplace('aBbBc', 'B', '', { all: true, case: true }) //=> 'ac' const paragraph = "I think Ruth's dog is cuter than your dog!"; const regex = /dog/; strReplace(paragraph, regex, 'ferret') //=> "I think Ruth's ferret is cuter than your dog!" const str = 'Twas the night before Xmas...'; strReplace(str, /xmas/, 'Christmas', { case: true }) //=> 'Twas the night before Christmas...' const str = 'Apples are round, and apples are juicy.'; strReplace(str, /apple/, 'Orange', { all: true, case: true }) //=> 'Oranges are round, and Oranges are juicy.' function convert(str, p1) { return `${((p1 - 32) * 5) / 9}C`; } const test = /(-?\d+(?:\.\d*)?)F\b/; strReplace('212F', test, convert) //=> '100C' ``` ## Try <REPL code={`const { strReplace } = require('@opentf/std'); const str = 'Apples are round, and apples are juicy.'; strReplace(str, 'apple', 'orange'); `} />
using ActivityImporter.Engine.ActivityAPI.Models; using Common.DataUtils.Http; using Microsoft.Extensions.Logging; using Newtonsoft.Json; namespace ActivityImporter.Engine.ActivityAPI.Loaders; /// <summary> /// Activity API implementation for ActivitySummaryLoader /// </summary> public class WebContentMetaDataLoader : ContentMetaDataLoader<ActivityReportInfo> { private readonly string _tenantId; private readonly ConfidentialClientApplicationThrottledHttpClient _httpClient; public WebContentMetaDataLoader(string tenantId, ILogger telemetry, ConfidentialClientApplicationThrottledHttpClient httpClient) : base(telemetry) { _tenantId = tenantId; _httpClient = httpClient; } /// <summary> /// Recursively get all metadata for an event query URL /// </summary> /// <returns>List of events</returns> protected override async Task<List<ActivityReportInfo>> LoadAllActivityReports(string auditContentType, TimePeriod chunk, int batchId) { // Build the uri to download var metadataUri = $"https://manage.office.com/api/v1.0/{_tenantId}" + $"/activity/feed/subscriptions/content?ContentType={auditContentType}&PublisherIdentifier={_tenantId}&" + $"startTime={FormatDate(chunk.Start)}&endTime={FormatDate(chunk.End)}"; var data = await DownloadMetadata(metadataUri, batchId); #if DEBUG if (data.Count > 0) { Console.WriteLine($"DEBUG: GET METADATA {batchId}: {data.Count.ToString("N0")} change reports found between '{chunk.Start}'-'{chunk.End}'."); } #endif return data; } /// <summary> /// Downloads change details for a change report, all pages /// </summary> public async Task<List<ActivityReportInfo>> DownloadMetadata(string changeReportUri, int batchId) { var responseMeta = new List<ActivityReportInfo>(); // Try and download metadata content string? nextPageUri = null; const string NEXT_PAGE_PARAM = "NextPageUri"; // Get this batch var response = await _httpClient.GetAsyncWithThrottleRetries(changeReportUri, _telemetry); // Read the content. var responseFromServer = await response.Content.ReadAsStringAsync(); try { response.EnsureSuccessStatusCode(); // More data to get for events? if (response.Headers.Contains(NEXT_PAGE_PARAM)) { nextPageUri = response.Headers.GetValues(NEXT_PAGE_PARAM).First(); } else nextPageUri = string.Empty; } catch (HttpRequestException ex) { _telemetry.LogInformation($"\nError downloading metadata {changeReportUri} with error '{ex.Message}'. If this happens every time, this may be an issue. Ignoring for now.", Microsoft.ApplicationInsights.DataContracts.SeverityLevel.Error); #if DEBUG _telemetry.LogInformation("DEBUG: Response body was:\n" + responseFromServer); #endif } // Do something with the response for this URL & the nextpage URL if needed if (!string.IsNullOrEmpty(responseFromServer)) { // Deserialise the results from the HTTP response responseMeta = JsonConvert.DeserializeObject<List<ActivityReportInfo>>(responseFromServer) ?? new List<ActivityReportInfo>(); // Add our own batch ID variable to each response foreach (var metaData in responseMeta) { metaData.BatchID = batchId; } // More data? if (!string.IsNullOrEmpty(nextPageUri)) { // Add publisher to URL var nextPageURLWithPublisher = $"{nextPageUri}&PublisherIdentifier={_tenantId}"; // Get next page var nextPage = await DownloadMetadata(nextPageURLWithPublisher, batchId); // Recursive call responseMeta.AddRange(nextPage); } return responseMeta; } else { responseMeta = new List<ActivityReportInfo>(); } return responseMeta; } private string FormatDate(DateTime d) { // Activity API format: YYYY-MM-DDTHH:MM:SS string date = d.ToUniversalTime().ToString("yyyy-MM-dd"); string time = d.ToUniversalTime().ToString("HH:mm:ss"); return $"{date}T{time}"; } }
#!/usr/bin/env python import argparse import os import copy import json import subprocess import logging from urllib import request, parse LOG = logging.getLogger(__name__) def execute_command(cmd: str, shell=True, encoding="utf-8", timeout=None, return_code_dict=None) -> (bool, str): try: logging.info("execute command: %s", cmd) tmp_cmd = cmd if shell else cmd.split() output = subprocess.check_output(tmp_cmd, stderr=subprocess.STDOUT, shell=shell, timeout=timeout) default_return = output.decode(encoding, errors='ignore').strip() if return_code_dict: default_return = return_code_dict.get('0') or default_return return 0, default_return except subprocess.TimeoutExpired as te: err_msg = f"timeout={timeout}, cmd='{cmd}'" logging.error(f"execute command timed out, {err_msg}") return -1, err_msg except subprocess.CalledProcessError as e: err_msg = f"cmd='{cmd}', err={e.output.decode(encoding, errors='ignore').strip()}" LOG.error(f"execute command failed, {err_msg}") return_code = e.returncode if return_code_dict: err_msg = return_code_dict.get(str(return_code)) or err_msg return return_code, err_msg except Exception as e: err_msg = f"cmd='{cmd}', err={e.output.decode(encoding, errors='ignore').strip()}" LOG.error(f"execute command failed, e_class={e.__class__}, {err_msg}") return_code = e.returncode if return_code_dict: err_msg = return_code_dict.get(str(return_code)) or err_msg return return_code, err_msg def execute_command_in_popen(cmd: str, shell=True, output_func=None, encoding="utf-8") -> int: def func(): for line in iter(proc.stdout.readline, ""): yield line def default_print(x): if os.environ.get('IN_CLICK'): import click click.secho(x.strip()) else: print(x, end='') output_func = output_func or default_print logging.info(f"execute_command_in_popen cmd={cmd}") proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, universal_newlines=True) for line in func(): output_func(line) proc.stdout.close() return_code = proc.wait() LOG.info(f'execute_command_in_popen end, return_code={return_code}') return return_code def completed(flag, dec, err=None, raise_flag=True): if flag == 0: msg = f'{dec} success' LOG.info(msg) if os.environ.get('IN_CLICK'): import click # 不要在cs_utils模块中公开引入任何第三方包 click.secho(msg, fg='green') else: print(msg) else: msg = f'{dec} failed' if err: msg = f'{msg}, err: {err}' LOG.error(msg) if os.environ.get('IN_CLICK'): import click # 不要在cs_utils模块中公开引入任何第三方包 click.secho(msg, fg='red') if raise_flag: raise click.ClickException("") else: print(msg) if raise_flag: raise Exception(msg) def set_simple_log(log_path): dirname = os.path.dirname(log_path) os.makedirs(dirname, exist_ok=True) logging.basicConfig( filename=log_path, # 日志文件名 level=logging.INFO, # 日志级别 format='%(asctime)s - %(levelname)s - %(message)s' # 日志格式 ) LOG = logging.getLogger(__name__) LOG.info(f'set_simple_log={log_path} ok') def get_string_split_list(string, split_flag=','): return [i.strip() for i in string.split(split_flag) if i.strip()] def get_url_ip_port(url): item_list = get_string_split_list(url, split_flag='/') item_list2 = get_string_split_list(url, split_flag='//') if len(item_list2) != 2: raise Exception(f'invalid url={url}') ip_port_and_string = item_list2[1] for item in item_list: if ip_port_and_string.startswith(item): return item else: raise Exception(f'invalid url={url}') def get_url_ip_port(url): item_list = get_string_split_list(url, split_flag='/') item_list2 = get_string_split_list(url, split_flag='//') if len(item_list2) != 2: raise Exception(f'invalid url={url}') ip_port_and_string = item_list2[1] for item in item_list: if ip_port_and_string.startswith(item): return item else: raise Exception(f'invalid url={url}') def validate_alist_url(alist_url): try: ip_port = get_url_ip_port(alist_url) except: raise argparse.ArgumentTypeError(f"invalid alist url: {alist_url}") try: alist_public_settings_url = f'http://{ip_port}/api/public/settings' response = request.urlopen(alist_public_settings_url) code = response.code if code // 100 != 2: raise Exception(f'response.code is {code}') except Exception as e: raise argparse.ArgumentTypeError(f"communicate with alist failed, url={alist_public_settings_url}, err={str(e)}") return alist_url def check_path(value): os.makedirs(value, exist_ok=True) if not os.path.isdir(value): raise argparse.ArgumentTypeError(f"Invalid path: {value} is not a valid directory.") return value def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('alist_url', help='alist url', type=validate_alist_url) parser.add_argument('-s', '--startswith', help='download files startswith it', type=str) parser.add_argument('-e', '--endswith', help='download files endswith it', type=str) parser.add_argument('-p', '--pathsave', default=os.getcwd(), help='download files save this path', type=check_path) args = parser.parse_args() return args def get_alist_ip_port_and_path(alist_url): ip_port = get_url_ip_port(alist_url) list_files_url = f'http://{ip_port}/api/fs/list' url_list = get_string_split_list(alist_url, ip_port) if len(url_list) != 2: raise Exception(f'not found {ip_port} in {alist_url}, length err') path_url = url_list[1] return ip_port, path_url def get_alist_fs_list(alist_url): ip_port, path_url = get_alist_ip_port_and_path(alist_url) list_files_url = f'http://{ip_port}/api/fs/list' body = {"path":path_url,"password":"","page":1,"per_page":0,"refresh":False} print(list_files_url, body) data = json.dumps(body).encode('utf-8') data = bytes(parse.urlencode(body), encoding="utf8") req = request.Request(url=list_files_url,data=data,method="POST") response = request.urlopen(req) code = response.code if code // 100 != 2: raise Exception(f'{list_files_url} response.code is {code}') response_data = response.read().decode('utf-8') response_data_dict = json.loads(response_data) alist_code = response_data_dict.get('code') or 0 if alist_code // 100 != 2: raise Exception(f'{list_files_url} body.code is {alist_code}') return response_data_dict def get_can_download_files(alist_url): content_dict = get_alist_fs_list(alist_url) print(json.dumps(content_dict, indent=4)) content_list = content_dict['data']['content'] all_fiels = [] for value_dict in content_list: is_dir_flag = value_dict.get('is_dir') if is_dir_flag is False: all_fiels.append(value_dict.get('name')) return all_fiels def get_download_file_urls(alist_url, need_download_files): ip_port, path_url = get_alist_ip_port_and_path(alist_url) download_file_urls = [] for file in need_download_files: url = f'http://{ip_port}/d/{path_url}/{file}' url = url.replace('//', '/') url = url.replace('http:/', 'http://') download_file_urls.append(url) return download_file_urls def use_wget_download(download_file_urls, pathsave): for url in download_file_urls: cmd = f'wget {url} -P {pathsave}' print(cmd) flag = execute_command_in_popen(cmd) completed(flag, f'download {url}') def download_files(alist_url, startswith, endswith, pathsave): all_files = get_can_download_files(alist_url) print(f'get_can_download_files is {all_files}') need_download_files = copy.deepcopy(all_files) for file in all_files: if startswith: if not file.startswith(startswith): if file in need_download_files: need_download_files.remove(file) if endswith: if not file.endswith(endswith): if file in need_download_files: need_download_files.remove(file) need_download_files = [parse.quote(file) for file in need_download_files] print(f'need_download_files is {need_download_files}') download_file_urls = get_download_file_urls(alist_url, need_download_files) print(f'download_file_urls is {download_file_urls}') use_wget_download(download_file_urls, pathsave) if __name__ == '__main__': args = parse_arguments() print(f'args is {args}') download_files(args.alist_url, args.startswith, args.endswith, args.pathsave)
#lang racket (require berkeley) ;A “perfect number” is defined as a number equal to the sum of all its factors less than itself. ;For example, the first perfect number is 6, because its factors are 1, 2, 3, and 6, ;and 1+2+3=6. The second perfect number is 28, because 1+2+4+7+14=28. ;What is the third perfect number? Write a procedure (next-perf n) that tests numbers ;starting with n and continuing with n+1, n+2, etc. until a perfect number is found. ;Then you can evaluate (next-perf 29) to solve the problem. ;Hint: you’ll need a sum-of-factors subprocedure. (define (factorise n) (define (factors n div) (if (= n 1) 1 (if (= (modulo n div) 0) (se div (factors (/ n div) div)) (factors n (+ 1 div)) ) )) (factors n 2)) ;(factorise 6) ;(factorise 28) ; First: we define a function that given a number produces its factors ; version 2 - factoriZe version ; In version 1, we don't test all numbers from 1 to n to see if they are factors ; Version 1 does it more like what we do on paper. This is not what we need (define (factorize n) (define (factors n div) (if (= div n) 1 ; once we reach the end, always add 1 as factor (if (= (modulo n div) 0) (se div (factors n (+ 1 div))) (factors n (+ 1 div)) ) )) (factors n 2)) ;(factorize 28) ;Next we define a function that sums a list of numbers (define (sum-of-factors l) (if (empty? l) 0 (+ (first l) (sum-of-factors (butfirst l))))) ;(sum-of-factors '(1 2 3)) ;(sum-of-factors (factorize 28)) ; Finally we define the next-perf procedure (define (next-perf n) (if (= n (sum-of-factors (factorize n))) n (next-perf (+ n 1)))) (next-perf 2) (next-perf 7) (next-perf 29) ; produces correct answer 496 (next-perf 500) ; take a bit but gives correct answer 8128
package com.example.smu import android.annotation.SuppressLint import android.content.ContentValues import android.content.Context import android.database.sqlite.SQLiteDatabase import android.database.sqlite.SQLiteOpenHelper class DatabaseChat private constructor(context: Context) : SQLiteOpenHelper(context, DATABASE_NAME, null, DATABASE_VERSION) { companion object { private const val DATABASE_VERSION = 1 private const val DATABASE_NAME = "chat_database.db" private const val TABLE_NAME = "chat_messages" private const val COLUMN_ID = "id" private const val COLUMN_ROOM_ID = "room_id" private const val COLUMN_SENDER = "sender" private const val COLUMN_MESSAGE = "message" private const val COLUMN_TIME = "time" private const val COLUMN_FLAG = "flag" @Volatile private var instance: DatabaseChat?= null fun getInstance(context: Context)= instance ?: synchronized(DatabaseChat::class.java){ instance ?: DatabaseChat(context).also{ instance =it } } } override fun onCreate(db: SQLiteDatabase) { val createTableQuery = "CREATE TABLE $TABLE_NAME ($COLUMN_ID INTEGER PRIMARY KEY, $COLUMN_ROOM_ID TEXT, $COLUMN_SENDER TEXT, $COLUMN_MESSAGE TEXT, $COLUMN_TIME TEXT, $COLUMN_FLAG INTEGER)" db.execSQL(createTableQuery) } override fun onUpgrade(db: SQLiteDatabase, oldVersion: Int, newVersion: Int) { if(oldVersion != newVersion) { db.execSQL("DROP TABLE IF EXISTS $TABLE_NAME") onCreate(db) } } fun insertMessage(roomId: String, sender: String, message: String, timestamp: String, flag: Int) { val db = this.writableDatabase val contentValues = ContentValues().apply{ put(COLUMN_ROOM_ID, roomId) put(COLUMN_SENDER, sender) put(COLUMN_MESSAGE, message) put(COLUMN_TIME, timestamp) put(COLUMN_FLAG, flag) } db.insert(TABLE_NAME, null, contentValues) db.close() } fun deleteChatroom(roomId: String){ val db = this.writableDatabase db.delete(TABLE_NAME, "$COLUMN_ROOM_ID = '$roomId'", null) db.close() } @SuppressLint("Range") fun getAllMessages(roomId:String): MutableList<ChatMessage> { val messages: MutableList<ChatMessage> = mutableListOf() val db = readableDatabase val cursor = db.rawQuery("SELECT * FROM $TABLE_NAME WHERE $COLUMN_ROOM_ID = '$roomId'", null) if (cursor.count==0) return messages cursor.use { while (it.moveToNext()) { val sender = it.getString(2) val message = it.getString(3) val timestamp = it.getString(4) val flag = it.getInt(5) val chatMessage = ChatMessage(sender, message, timestamp, flag) messages.add(chatMessage) } } db.close() return messages } }
/** * Control DMX de 3 leds. * Basado en el ejemplo: * * https://wltd.org/dmx-to-rgbw-led * * xlr 1 / gnd * xlr 2 white * xlr 3 black */ #include <DMXSerial.h> #include <VirtualWire.h> #define default_level 0 int motor_1_pin = 3; // 9 no va int led_1_pin = 10; int motor_2_pin = 5; // 10 no va int led_2_pin = 6; int motor_3_pin = 11; int led_3_pin = 6; int transmit_pin = 12; int receive_pin = 2; int transmit_en_pin = 4; void setup() { DMXSerial.init(DMXReceiver); DMXSerial.maxChannel( 6 ); vw_set_tx_pin( transmit_pin ); vw_set_rx_pin( receive_pin ); // vx_set_ptt_pin( transmit_en_pin ); vw_set_ptt_inverted(true); // Required for DR3100 vw_setup(2000); // Bits per sec DMXSerial.write(1, 80); DMXSerial.write(2, 80); DMXSerial.write(3, 80); // enable pwm outputs pinMode( motor_1_pin, OUTPUT ); // sets the digital pin as output pinMode( motor_2_pin, OUTPUT ); pinMode( motor_3_pin, OUTPUT ); pinMode( led_1_pin, OUTPUT ); // sets the digital pin as output pinMode( led_2_pin, OUTPUT ); pinMode( led_3_pin, OUTPUT ); int chn_1 = DMXSerial.read(1); analogWrite( motor_1_pin, chn_1 ); analogWrite( led_1_pin, chn_1 ); int chn_2 = DMXSerial.read(2); analogWrite( motor_2_pin, chn_2 ); analogWrite( led_2_pin, chn_2 ); int chn_3 = DMXSerial.read(3); analogWrite( motor_3_pin, chn_3 ); analogWrite( led_3_pin, chn_3 ); char buff_A[4], buff_B[4], buffer[8]; myitoa( chn_2, buff_A, 4 ); myitoa( chn_3, buff_B, 4 ); append_buffers( buff_A, buff_B, buffer); vw_send((uint8_t *)buffer, strlen(buffer)); vw_wait_tx(); // Wait until the whole message is gone delay( 1000 ); DMXSerial.write(1, 0); DMXSerial.write(2, 0); DMXSerial.write(3, 0); chn_1 = DMXSerial.read(1); analogWrite( motor_1_pin, chn_1 ); analogWrite( led_1_pin, chn_1 ); chn_2 = DMXSerial.read(2); analogWrite( motor_2_pin, chn_2 ); analogWrite( led_2_pin, chn_2 ); chn_3 = DMXSerial.read(3); analogWrite( motor_3_pin, chn_3 ); analogWrite( led_3_pin, chn_3 ); myitoa( chn_2, buff_A, 4 ); myitoa( chn_3, buff_B, 4 ); append_buffers( buff_A, buff_B, buffer); vw_send((uint8_t *)buffer, strlen(buffer)); vw_wait_tx(); // Wait until the whole message is gone delay( 1000 ); } void loop() { rw_channels(); delay(10); } void rw_channels() { int chn_1, chn_2, chn_3; unsigned long lastPacket = DMXSerial.noDataSince(); if (lastPacket < 5000) { // read recent DMX values and set pwm levels chn_1 = DMXSerial.read(1); chn_2 = DMXSerial.read(2); chn_3 = DMXSerial.read(3); } else { chn_1 = default_level; chn_2 = default_level; chn_3 = default_level; } analogWrite(motor_1_pin, map( chn_1, 0, 255, 255, 0 )); // 0 en el motor es movimiento maximo analogWrite( led_1_pin, chn_1 ); analogWrite( motor_2_pin, map( chn_2, 0, 255, 255, 0 )); analogWrite( led_2_pin, chn_2 ); analogWrite( motor_3_pin, map( chn_3, 0, 255, 255, 0 ) ); analogWrite( led_3_pin, chn_3 ); char buff_A[4], buff_B[4], buffer[8]; myitoa( chn_2, buff_A, 4 ); myitoa( chn_3, buff_B, 4 ); append_buffers( buff_A, buff_B, buffer); vw_send((uint8_t *)buffer, strlen(buffer)); vw_wait_tx(); // Wait until the whole message is gone } void append_buffers( char bA[4], char bB[4], char b[8] ) { for( int i = 0; i < 4; i++ ) { b[i] = bA[i]; } for( int i = 4; i < 8; i++ ) { b[i] = bB[i-4]; } } void myitoa(int number, char *mystring, int n_bytes ) { boolean negative = number>0; mystring[0] = number<0? '-' : '+'; number = number<0 ? -number : number; for (int n = n_bytes - 1; n>0; n--) { mystring[n] = ' '; if(number >= 0) mystring[n] = number%10 + 48; number /= 10; } }
import { Component, OnInit } from '@angular/core'; import { Observable, BehaviorSubject, combineLatest, Subject } from 'rxjs'; import { Post } from '../../models/models'; import { PostsService } from '../../services/posts.service'; import { Router } from '@angular/router'; import { shareReplay, map, flatMap, takeUntil, debounceTime, distinctUntilChanged, startWith, tap, switchMap, delay } from 'rxjs/operators'; import { calculatePages } from '../../utils'; import { SortParam, SortDir } from '../../models/sortparam'; import { FormControl } from '@angular/forms'; import { Popover } from '../../shared/popover/popover.service'; @Component({ selector: 'wayne-repo-post-list', templateUrl: './post-list.component.html', styleUrls: ['./post-list.component.css'] }) export class PostListComponent implements OnInit { destroy$ = new Subject<any>(); tagInput = new FormControl([]); searchInput = new FormControl(''); sortDir = SortDir; // default sort sort: SortParam = { lastUpdatedDate: SortDir.DESC }; // stream of sorting param the list sort$ = new BehaviorSubject(this.sort); // list of post of the current page posts$: Observable<Array<Post>>; // search criteria searchTerm$: Observable<string>; // current page index, default is 1 pageIdx$: BehaviorSubject<number> = new BehaviorSubject(1); // page size, default is 10 pageSize$: BehaviorSubject<number> = new BehaviorSubject(10); // total number of post count$ = new BehaviorSubject(0); // list of pages to be displayed on paginate pages$: Observable<Array<number>>; // total number of pages noOfPage$: Observable<number>; search$: Observable<string>; tag$: Observable<Array<string>>; allTags$: Observable<Array<string>>; constructor( private postService: PostsService, private router: Router, private popper: Popover ) { // calculate the number of pages this.noOfPage$ = combineLatest(this.pageSize$, this.count$).pipe( map(([pageSize, count]) => { return Math.ceil(count / pageSize); }) ); // calculate the pages to be display on pagination this.pages$ = combineLatest(this.noOfPage$, this.pageIdx$).pipe( map(([noOfPage, curPage], _) => { return calculatePages(noOfPage, curPage); }) ); // search condition this.search$ = this.searchInput.valueChanges.pipe( startWith(''), debounceTime(400), distinctUntilChanged() ); this.tag$ = this.tagInput.valueChanges.pipe( startWith([]), tap(() => console.log('tag changed')) ); this.posts$ = combineLatest( this.pageIdx$, this.pageSize$, this.sort$, this.search$, this.tag$ ).pipe( takeUntil(this.destroy$), tap(() => { this.popper.openLoadingDialog( 'post-table-container', document.getElementsByClassName('post-table-container')[0], 'Loading...' ); }), switchMap(([pageIdx, pageSize, sort, searchTerm, tags], _) => { return this.postService.getPosts( pageIdx, pageSize, sort, searchTerm, tags ); }), map(pagedResult => { if (pagedResult) { if (this.count$.value != pagedResult.rowCount) this.count$.next(pagedResult.rowCount); return pagedResult.results as Array<Post>; } else { return [] as Array<Post>; } }), shareReplay(), tap(() => { this.popper.closeLoading('post-table-container'); }) ); this.allTags$ = this.postService.getTags(); } ngOnInit() { this.posts$.subscribe(); } changePage(page) { this.pageIdx$.next(page); } clickSort(col) { if (this.sort[col]) { if (this.sort[col] == SortDir.ASC) this.sort[col] = SortDir.DESC; else this.sort[col] = SortDir.ASC; } else { this.sort = {}; this.sort[col] = SortDir.DESC; } this.sort$.next(this.sort); } ngOnDestroy() { this.destroy$.next(); this.destroy$.complete(); } }
package org.bertspark.classifier.block import ai.djl.ndarray._ import ai.djl.ndarray.types._ import ai.djl.nn._ import ai.djl.nn.core.Linear import ai.djl.nn.norm.BatchNorm.batchNorm import ai.djl.training.ParameterStore import ai.djl.translate.StackBatchifier import ai.djl.util.PairList import org.bertspark.dl.block.BaseNetBlock import org.scalatest.flatspec.AnyFlatSpec private[block] final class ClassificationBlockTest extends AnyFlatSpec { final private val batchSize = 4 final private val embeddingSize = 8 it should "Succeed evaluating batchFlatten block for batch processing" in { val ndManager = NDManager.newBaseManager() val input = Array.fill(batchSize)(Array.fill(embeddingSize)(0.5F)) val ndInput = ndManager.create(input) val ndList = new NDList(ndInput) println(s"Input to BatchFlatten: ${ndList.getShapes().mkString(" ")}") val ndOutput = Blocks.batchFlatten(ndInput) println(s"Output from BatchFlatten: ${ndOutput.getShape()} => ${ndOutput.toFloatArray.mkString(" ")}") ndManager.close() } it should "Succeed evaluating Linear block" in { val ndManager = NDManager.newBaseManager() val input = Array.fill(batchSize)(Array.fill(embeddingSize)(0.5F)) val ndInput = ndManager.create(input) val ndList = new NDList(ndInput) println(s"Input to Linear block: ${ndList.getShapes().mkString(" ")} =>\n${ input.map(_.mkString(" ")).mkString("\n") }") val linBlock = Linear.builder().setUnits(10).build() val ps: ParameterStore = new ParameterStore() val ndOutput = linBlock.forward(ps, ndList, true) println(s"Output from Linear: ${ndOutput.getShapes().mkString(" ")} =>\nNum output: ${ ndOutput.get(0).size() }\n${ndOutput.get(0).toFloatArray.mkString("\n")}") ndManager.close() } it should "Succeed applying stack batchifier" in { import ClassificationBlockTest._ val stackBatchifier = new StackBatchifier val inputToBatchifier = Array[NDList]( singleNDList(embeddingSize, 0.25F), singleNDList(embeddingSize, 0.89F) ) println(s"Input to Batchifier:\n${inputToBatchifier.map(_.get(0).toFloatArray.mkString(" ")).mkString("\n")}") val batchifiedNDList = stackBatchifier.batchify(inputToBatchifier) println(s"Output from Batchifier: ${batchifiedNDList.getShapes().mkString(" ")} =>\nNum output: ${ batchifiedNDList.get(0).size() }\n${batchifiedNDList.get(0).toFloatArray.mkString(" ")}") val unBatchiedNDList = stackBatchifier.unbatchify(batchifiedNDList) println(s"Output from UnBatchifier: ${ unBatchiedNDList.map(_.getShapes().mkString(" ")).mkString("\n") }\n${unBatchiedNDList.map(_.get(0).toFloatArray.mkString(" ")).mkString("\n")}") } ignore should "Succeed evaluating Batch norm block" in { val ndManager = NDManager.newBaseManager() val input = Array.fill(4)(Array.fill(256)(0.5F)) val ndInput = ndManager.create(input) val ndList = new NDList(ndInput) println(ndList.getShapes().mkString(" ")) val runningMean = Array.fill(256)(0.2F) val runningVar = Array.fill(256)(0.1F) val ndOutput = batchNorm(ndInput, ndManager.create(runningMean), ndManager.create(runningVar)) println(ndOutput.getShapes()) ndManager.close() } } private[block] final object ClassificationBlockTest { def singleNDList(embeddingSize: Int, value: Float): NDList = { val ndManager = NDManager.newBaseManager() val input = Array.fill(embeddingSize)(value) val ndInput = ndManager.create(input) val ndList = new NDList(ndInput) ndManager.close() ndList } final class TestBlock extends BaseNetBlock { add("BERT-decoder-Flatten", Blocks.batchFlattenBlock()) /** * This method delegates processing to the block that actually implements the recursive * initialization of child block * @param ndManager Reference to the ND array manager * @param dataType data type (Default Float 32) * @param shapes Shape for the 4 embedding (batch size x embedding size) */ override def initializeChildBlocks(ndManager: NDManager, dataType: DataType, shapes: Shape*): Unit = super.initializeChildBlocks(ndManager, dataType, shapes:_*) override protected def forwardInternal( parameterStore: ParameterStore, inputNDList: NDList, training : Boolean, params: PairList[String, java.lang.Object]): NDList = sequentialBlock.forward(parameterStore, inputNDList, training, params) private def add(name: String, block: Block): Unit = { sequentialBlock.add(block) addChildBlock(name, block) } } }
package com.jlaby.world; /* * @(#)Position.java 0.1 99/Feb/12 * * Copyright TARSEC Corp. * 8047 Zurich, Switzerland, All Rights Reserved. * * CopyrightVersion 1.0 */ import java.io.*; /** * This class is a holder for information about the * position of any object within the Laby world. * * @author Marcel Schoen * @version $Id: Position.java,v 1.1 2013/10/27 23:51:32 marcelschoen Exp $ */ public class Position implements Serializable { private int m_x = 0; private int m_y = 0; private int m_z = 0; /** * Default constructor. */ public Position() { m_x = 0; m_y = 0; m_z = 0; } /** * Constructs a position object. * * @param x the x-coordinate * @param y the y-coordinate * @param z the z-coordinate */ public Position(int x, int y, int z) { m_x = x; m_y = y; m_z = z; } /** * Sets the x-coordinate. * * @param value the value for the x-coordinate. */ public void setX(int value) { m_x = value; } /** * Sets the y-coordinate. * * @param value the value for the y-coordinate. */ public void setY(int value) { m_y = value; } /** * Sets the z-coordinate. * * @param value the value for the z-coordinate. */ public void setZ(int value) { m_z = value; } /** * Returns the x-coordinate. * * @return the value of the x-coordinate. */ public int getX() { return m_x; } /** * Returns the y-coordinate. * * @return the value of the y-coordinate. */ public int getY() { return m_y; } /** * Returns the z-coordinate. * * @return the value of the z-coordinate. */ public int getZ() { return m_z; } }
# strncpy.s (check out the man page for strncpy) # ADDITIONAL TO DO: test the strlen() function # TO DO: write a strlen() function and use that instead of # prompting the user, i.e., get the length of src # and copy all of the bytes over to dest # # TO DO: write a strncmp() function (...Exam 1) # .data prompt: .asciiz "Enter n: " dest: .asciiz "AAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n" src: .asciiz "this is fine." .text .globl main main: sub $sp, $sp, 4 # allocate stack space to store $ra sw $ra, 0($sp) # store $ra on the stack # display prompt (print_string) la $a0, prompt ori $v0, $0, 4 syscall # read int from keyboard ori $2, $0, 5 syscall # read_int ==> result is in $v0 # set up parameters dest, src, and n for strncpy() function call: la $a0, dest la $a1, src addu $a2, $0, $v0 # $v0 is the result from read_int above... jal strncpy # display dest string via print_string syscall la $a0, dest ori $v0, $v0, 4 syscall # we're done, so restore $ra, free the stack space, and return lw $ra, 0($sp) add $sp, $sp, 4 jr $ra ############################################################# # strncpy( char * dest, const char * src, size_t n ); # $a0 $a1 $a2 strncpy: # for ( $t0 = 0 ; $t0 < $a2 ; $t0++ ) { ... } # ori $t0, $0, 0 # $t0 = 0 loop: # use the set if less than (slt) instruction to control the loop slt $t1, $t0, $a2 # if $t0 < $a2 then set $t1 = 1; else $t1 = 0 # branch if not equal (bne) bne $t1, 1, exit # if $t1 != 1 then goto exit # this is the end of the block loop # copy byte from src to dest lbu $t2, 0($a1) # load byte unsigned (lbu) sb $t2, 0($a0) # store byte (sb) # update pointers $a0 and $a1 add $a0, $a0, 1 add $a1, $a1, 1 # update loop variable $t0 add $t0, $t0, 1 # unconditional jump (j) j loop exit: jr $ra # ############################################################# ############################################################# # strlen( const char * s ); # $a0 ==> length returned in $v0 strlen: addu $t0, $0, $a0 ori $t1, $0, 0 # $t1 = 0 L1: lbu $t2, 0($t0) # load byte unsigned (lbu) beq $t2, $zero, strlenexit # update pointer $t0 add $t0, $t0, 1 # update byte count $t1 add $t1, $t1, 1 # unconditional jump (j) j L1 strlenexit: addu $v0, $0, $t1 jr $ra # #############################################################
#include "Debug.h" #include "Event/Manager.h" #include "Event/Listener.h" #include "Event/IListenerDelegate.h" #include "Graphic/Renderer.h" #include "Graphic/Scene.h" #include "Widget/Widget.h" #include "Widget/GraphicWidget.h" #include "Menu/Login.h" #include "Menu/IMenuDelegate.h" #include "Application.h" class Test : public Event::IListenerDelegate, public Menu::IMenuDelegate { public: Test() : _close(false), _scene(), _button() { // Setup renderer Graphic::Renderer::getInstance().init(); Graphic::Renderer::getInstance().setScene(&_scene); Menu::Login lmenu(&_scene, this); // Add event listeners Event::Manager::getInstance() .addEventListener(new Event::Listener(Event::Close, this)); while (!_close) { // Process events Event::Manager::getInstance().processEvents(); // Render Graphic::Renderer::getInstance().render(); //usleep(10000); } } ~Test() {} virtual void welcomeCompleted() {} virtual void loginCompleted(std::string const& login, std::string const& ipAddress, std::string const& port) { std::cout << "Connection from " << login << " to " << ipAddress << " " << port << std::endl; _close = true; } virtual void processEvent(Event::Event const& event) { if (event.type == Event::Close) { _close = true; } } virtual void newGameCallGeneralMenu() {} virtual void serverListCallGeneralMenu() {} virtual void optionsCallGeneralMenu() {} private: bool _close; Graphic::Scene _scene; Graphic::Element _button; }; int main(int argc, char *argv[]) { try { Application::getInstance().init(argc, argv); Test client; } catch (std::exception* e) { std::cerr << e->what() << std::endl; } return (0); }
#' @title sp2coords function #' #' Function to extract the coordinates matrix from a class Polygon, Polygons, SpatialPolygons, or SpatialPolygonsDataFrame object that containins a single polygon. #' Warning: Do not use this function if the inout spatial object contains more than coordinates matrix. #' #' @param x sp::Spatial* class object ultimately determined by a single coordinates matrix. #' @return Numerical matrix of coordinates with longitude and latitude columns, respectively; coordinates must be in decimal degrees format. #' @export sp2coords sp2coords <- function(x){ if(is(x,"SpatialPolygonsDataFrame") | is(x,"SpatialPolygons")){ result <- x@polygons[[1]]@Polygons[[1]]@coords } if(is(x,"Polygons")){ result <- x@Polygons[[1]]@coords } if(is(x,"Polygon") | is(x,"SpatialPoints")){ result <- x@coords } unname(result) }
import React, { useState } from 'react' import { SERVER_PREFIX } from '../App.jsx' import { languages } from '../constants/index.js' import { request } from '../utils/index.js' import { AudioStreamPlayer, Dropdown, LoadingSpinner, SpeechToText } from './index.js' import ContentWrapper from './ContentWrapper.jsx' const Polyglot = () => { const [speechToText, setSpeechToText] = useState('') const [translation, setTranslation] = useState('') const [translationLoading, setTranslationLoading] = useState(false) const langNameList = languages.map(language => language.name) const [inputLangCode, setInputLangCode] = useState(languages[0].code) const [outputLang, setOutputLang] = useState(langNameList[0]) const handleTranslate = async () => { setTranslationLoading(true) const res = await request(`${SERVER_PREFIX}/chat/gpt/translate?prompt=${speechToText}&language=${outputLang}`, 'GET') setTranslation(await res.text()) setTranslationLoading(false) } const handleInputLangChange = newLangName => { const langObj = languages.find(l => l.name === newLangName) setInputLangCode(langObj.code) } return ( <ContentWrapper title='Polyglot'> <p className='text-md text-white font-medium'> Polyglot: knowing or using several languages. This functionality strives to allow communication across languages. Just type or speak what you want to be said, select the language and hear it spoken in your voice. Imagine if we all had a tool like this, how quickly we could bridge the language divide! </p> <div className='flex justify-center mt-8'> <div className='flex md:space-x-12 xs:space-x-4'> <Dropdown options={langNameList} defaultOption={langNameList[0]} onChange={handleInputLangChange} /> <svg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 24 24' strokeWidth='1.5' stroke='currentColor' className='mt-2 w-6 h-6'> <path strokeLinecap='round' strokeLinejoin='round' d='M7.5 21L3 16.5m0 0L7.5 12M3 16.5h13.5m0-13.5L21 7.5m0 0L16.5 12M21 7.5H7.5' /> </svg> <Dropdown options={langNameList} defaultOption={langNameList[0]} onChange={setOutputLang} /> </div> </div> <div className='lg:flex'> <div className='lg:w-5/12 mt-4'> <SpeechToText speechToText={speechToText} setSpeechToText={setSpeechToText} lang={inputLangCode} /> </div> <div className='lg:w-2/12 mt-4 flex flex-col items-center'> <button className='lg:mt-11' onClick={handleTranslate} disabled={translationLoading} > {translationLoading ? ( <div className='flex items-center justify-center'> <LoadingSpinner /> </div> ) : ( <span className='bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded'>Translate</span> )} </button> </div> <div className='lg:w-5/12 mt-4 flex flex-col relative'> <textarea onChange={(e) => setTranslation(e.target.value)} className='bg-gray-700 py-4 px-6 placeholder:text-secondary h-32 text-white rounded-lg outline-none border-none font-medium resize-none' value={translation} /> {translation && ( <div className='absolute right-1 top-3 flex items-center'> <AudioStreamPlayer prompt={translation} /> </div> )} </div> </div> </ContentWrapper> ) } export default Polyglot
#include <iostream> #include <fstream> #include <functional> #include "llrec.h" using namespace std; /** * Reads integers (separated by whitespace) from a file * into a linked list. * * @param[in] filename * The name of the file containing the data to read * @return * Pointer to the linked list (or NULL if empty or the * file is invalid) */ Node* readList(const char* filename); /** * Prints the integers in a linked list pointed to * by head. */ void print(Node* head); /** * Deallocates the linked list nodes */ void dealloc(Node* head); Node* readList(const char* filename) { Node* h = NULL; ifstream ifile(filename); int v; if( ! (ifile >> v) ) return h; h = new Node(v, NULL); Node *t = h; while ( ifile >> v ) { t->next = new Node(v, NULL); t = t->next; } return h; } void print(Node* head) { while(head) { cout << head->val << " "; head = head->next; } cout << endl; } void dealloc(Node* head) { Node* temp; while(head) { temp = head->next; delete head; head = temp; } } // ----------------------------------------------- // Add any helper functions or // function object struct declarations // ----------------------------------------------- bool isOdd(int num){ return (num % 2); } int main(int argc, char* argv[]){ if(argc < 2) { cout << "Please provide an input file" << endl; return 1; } // ----------------------------------------------- // Feel free to update any code below this point // ----------------------------------------------- Node* head = readList(argv[1]); cout << "Original list: "; print(head); // // // Test out your linked list code // Node* smaller = nullptr; // Node* larger = nullptr; // Node* list = makeList({2, 4, 8, 3}); Node* smaller = (Node*) &head; // set to a non-null address Node* larger = (Node*) &head; llpivot(head, smaller, larger, 5); std::cout << "Smaller" << std::endl; print(smaller); std::cout << "Larger" << std::endl; print(larger); std::cout << "Old Original" << std::endl; print(head); dealloc(smaller); dealloc(larger); // head = llfilter(head, isOdd); // print(head); return 0; }
import { Transform } from "class-transformer"; import { IsNotEmpty, IsNumber, IsString } from "class-validator"; export class SaveProductDTO { @IsString() @IsNotEmpty() productDescription: string; @Transform(({ value }) => parseFloat(value)) @IsNumber() salePrice: number; @Transform(({ value }) => parseFloat(value)) @IsNumber() rentPrice: number; }
import asArray from "./helpers/asArray"; import { QueueItem } from "./types"; const Queue = function (initialItems: QueueItem[]) { /** * Add a single or several steps onto the `waiting` queue. */ const add = function (steps: QueueItem[] | QueueItem): typeof Queue { _queue = _queue.concat(asArray<QueueItem>(steps)); return this; }; /** * Given an index, set an item in the queue. */ const set = function (index: number, item: QueueItem): void { _queue[index] = item; }; /** * Move all `executed` queue items to `waiting`. */ const reset = function (): void { _queue = _queue.map((item) => { delete item.done; return item; }); }; /** * Retrieve all items that are still eligible to be executed. */ const getItems = (): QueueItem[] => _queue.filter(i => !i.done); const markDone = (index: number) => { _queue[index].done = true; } let _queue: QueueItem[] = []; add(initialItems); return { add, set, reset, getItems, markDone, }; }; export default Queue;
@extends('layouts.dashboard') @section('title', 'Usuarios') @section('content') <div class="block mx-auto my-12 p-8 bg-white w-1/3 border border-gray-200 rounded-lg shadow-lg" style="width:900px"> <h1 class="text-3xl text-center font-bold">Crear un usuario</h1> <br> <center> <form action="{{route('users.store')}}" method="POST"> @csrf <div class="row"> <div class="col-1"></div> <div class="col-5"> <input type="text" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Nombre" name="nombre" value="{{old('nombre')}}"> @error('nombre') <br> <small>*{{$message}}</small> <br> @enderror <br> <input type="text" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Apellidos" name="apellidos" value="{{old('apellidos')}}"> <br> @error('apellidos') <br> <small>*{{$message}}</small> <br> @enderror <label for=""> <input type="text" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Número de documento" name="numero_documento" value="{{old('numerodocumento')}}" > </label> <label for=""> <select name="tipodocumento_id" id="idtipodocumento" class="form-select form-select-sm my-2 mx-2 border border-gray-200 rounded-md bg-gray-200" aria-label=".form-select-sm example"> @foreach ($tipodocumentos as $tipodocumento) <option value="{{$tipodocumento['id']}}">{{$tipodocumento['nombre']}}</option> @endforeach </select> </label> @error('numerodocumento') <br> <small>*{{$message}}</small> <br> @enderror </div> <div class="col-1"></div> <div class="col-5"> <input type="email" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Correo" name="email" value="{{old('email')}}"> <br> @error('email') <br> <small>*{{$message}}</small> <br> @enderror <input type="password" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Contraseña" name="password" value="{{old('passsword')}}"> <br> @error('password') <br> <small>*{{$message}}</small> <br> @enderror <input type="password" class="border border-gray-200 rounded-md bg-gray-200 w-full text-lg placeholder-gray-900 p-2 my-2 focus:bg-white" placeholder="Confirmar contraseña" value="{{old('passsword')}}"> </div> </div> <label for=""> <select name="tipopersona_id" id="idtipopersona" class="form-select my-2 border border-gray-200 rounded-md bg-gray-200" aria-label="Default select example"> @foreach ($tipopersonas as $tipopersona) <option value="{{$tipopersona['id']}}">{{$tipopersona['nombre']}}</option> @endforeach </select> </label> <br> <br> <button class="btn" style="background-color: rgb(255, 174, 0) " type="submit">Crear</button> <br> <br> <a style="color:black" href="{{route('users.index')}}"><b>cancelar</b></a> </form> </center> </div> @endsection
<template> <div :id="id" :class="className" :style="{ height: height, width: width }"></div> </template> <script lang="ts"> import { defineComponent, onMounted, watch, nextTick } from 'vue'; import { init, EChartsOption } from 'echarts'; export default defineComponent({ props: { className: { type: String, default: 'chart', }, id: { type: String, default: 'lineChartsCompoents', }, width: { type: String, default: '100%', }, height: { type: String, default: '200px', }, optionData: { type: Object, default: () => {}, }, highlight: { // 高亮 type: Object, default: () => null, }, }, setup(props, { emit }) { let barChart: any = null; const initChart = () => { nextTick(() => { if (barChart) { barChart.dispose(); } barChart = init(document.getElementById(props.id) as HTMLDivElement); barChart.setOption(props.optionData as EChartsOption); barChart.on('click', function (params) { emit('chart-click', params); }); if (props.highlight) { barChart.dispatchAction( Object.assign( { type: 'highlight', }, props.highlight ) ); } }); }; onMounted(() => { initChart(); }); watch( () => props.optionData, () => { initChart(); } ); }, }); </script>
import Foundation public struct GreenstandWalletSDKConfiguration { public struct WalletAPIConfiguration { let apiKey: String let rootURL: URL let rootWalletName: String let rootPassword: String public init( apiKey: String, rootURL: URL, rootWalletName: String, rootPassword: String ) { self.apiKey = apiKey self.rootURL = rootURL self.rootWalletName = rootWalletName self.rootPassword = rootPassword } } public struct AuthenticationServiceConfiguration { let authorizationEndpoint: URL let tokenEndpoint: URL let clientId: String let redirectURL: URL let userInfoEndpoint: URL public init( authorizationEndpoint: URL, tokenEndpoint: URL, clientId: String, redirectURL: URL, userInfoEndpoint: URL ) { self.authorizationEndpoint = authorizationEndpoint self.tokenEndpoint = tokenEndpoint self.clientId = clientId self.redirectURL = redirectURL self.userInfoEndpoint = userInfoEndpoint } } let walletAPIConfiguration: WalletAPIConfiguration let authenticationServiceConfiguration: AuthenticationServiceConfiguration public init( walletAPIConfiguration: WalletAPIConfiguration, authenticationServiceConfiguration: AuthenticationServiceConfiguration ) { self.walletAPIConfiguration = walletAPIConfiguration self.authenticationServiceConfiguration = authenticationServiceConfiguration } }
<!DOCTYPE html> <html lang="en" class="scroll-smooth"> <head> <!-- get the fun spinner up so it looks like we are doing something --> <script> const startTime = performance.now(); window.addEventListener("load", function () { const loader = document.getElementById("loader"); loader.style.display = "none"; const endTime = performance.now(); const elapsedTime = endTime - startTime; console.log(`Page loaded in ${elapsedTime} ms`); }); </script> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Echo's personal site</title> <!-- get some stuff loaded --> <link rel="stylesheet" href="/src/css/tailwind.css" /> <link rel="stylesheet" href="/src/css/index.css" /> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.1/css/all.min.css" crossorigin="anonymous" referrerpolicy="no-referrer" /> <link rel="icon" href="/img/3kh0.webp" /> <link rel="shortcut icon" href="/img/3kh0.webp" /> <!-- base meta tags --> <meta name="description" content="Echo's personal site with my projects and blog. Showcasing programming, hacking, and more" /> <meta name="keywords" content="Echo, personal site, programming, hacking, kitsune" /> <meta name="author" content="Echo" /> <meta name="robots" content="index, follow" /> <meta name="theme-color" content="#1a202c" /> <!-- open graph meta tags --> <meta property="og:title" content="3kh0.net" /> <meta property="og:description" content="Echo's personal site with my projects and blog. Showcasing programming, hacking, and more" /> <meta property="og:type" content="website" /> <meta property="og:url" content="https://3kh0.net/" /> <meta property="og:image" content="https://3kh0.net/img/3kh0.webp" /> <meta property="og:image:alt" content="Echo's Avatar" /> <meta property="og:image:width" content="300" /> <meta property="og:image:height" content="300" /> <meta property="og:site_name" content="Echo's Site" /> </head> <body class="bg-gray-900 text-white"> <div id="loader" class="fixed top-0 left-0 w-screen h-screen flex justify-center items-center bg-white z-50"> <div class="animate-spin rounded-full h-32 w-32"></div> </div> <div id="header"> <header class="py-4 px-8 flex justify-between items-center max-w-6xl mx-auto"> <div> <h1 class="text-7xl font-bold text-green-500 text-center py-1">3kh0</h1> <p class="text-sm">Programmer, Hacker, Kitsune</p> </div> </header> </div> <div id="intro"> <div class="max-w-6xl mx-auto py-8 px-8"> <div class="flex flex-col sm:flex-row items-center"> <div class="py-6"> <p class="text-3xl sm:text-base md:text-3xl lg:text-4xl text-center align-middle leading-8"> Hi, my name is Drake! I am a front-end developer learning backend. I play video games and program with my free time. </p> </div> <div class="mx-10 flex items-center justify-center"> <div> <img loading="eager" src="/img/art/cutout2echo.webp" alt="fox" class="w-full h-auto rounded-3xl" /> </div> </div> </div> </div> </div> <div id="about"></div> <div id="skills"> <div class="max-w-6xl mx-auto py-8 px-8"> <h2 class="text-3xl mb-4">Skills</h2> <div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-4 bg-gray-700 rounded-lg p-4"> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-html5 mr-2"></i> HTML <span class="ml-2 text-sm text-gray-500">99%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[99%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-square-js mr-2"></i> Javascript <span class="ml-2 text-sm text-gray-500">95%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[95%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-node-js mr-2"></i> NodeJS <span class="ml-2 text-sm text-gray-500">90%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[90%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-git-alt mr-2"></i> Git <span class="ml-2 text-sm text-gray-500">90%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[90%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-python mr-2"></i> Python <span class="ml-2 text-sm text-gray-500">80%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[80%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-linux mr-2"></i> Linux <span class="ml-2 text-sm text-gray-500">75%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[75%]"></div> </div> </div> <div class="skill mb-4"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fa fa-server mr-2"></i> Networking <span class="ml-2 text-sm text-gray-500">40%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden"> <div class="h-4 bg-green-500 w-[40%]"></div> </div> </div> <div class="skill mb-4 relative" id="css"> <h3 class="mb-2 text-lg text-white flex items-center"><i class="fab fa-css3 mr-2"></i> CSS <span class="ml-2 text-sm text-gray-500" id="cssPercent">80%</span></h3> <div class="w-full bg-gray-300 rounded-lg overflow-hidden relative"> <div class="h-4 bg-green-500 w-[50%]"></div> <p class="absolute top-0 left-0 w-full text-center text-[10px] text-white">I have no clue what I am doing help</p> </div> </div> </div> </div> </div> <div id="projects"> <section class="max-w-6xl mx-auto py-8 px-8"> <h2 class="text-3xl mb-4">Cool stuff I made</h2> <div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4"> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/website-v4/"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">3kh0 website</h3> <p class="text-sm">Vastly popular website with games</p> </div> </a> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/ChessSword"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">Chess Sword</h3> <p class="text-sm">Chess bot to highlight the best moves</p> </div> </a> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/3kh0-Assets"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">3kh0 Assets</h3> <p class="text-sm">Tons of game files for free use</p> </div> </a> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/soundboard"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">Soundboard</h3> <p class="text-sm">Simple online soundboard app</p> </div> </a> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/gamejamsnake"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">Arcade Snake</h3> <p class="text-sm">Simple version of snake for the web</p> </div> </a> <a referrerpolicy="no-referrer" href="https://github.com/3kh0/echodown"> <div class="bg-gray-700 p-4 rounded-xl shadow-lg hover:bg-gray-800 transition duration-300"> <h3 class="text-xl font-semibold mb-2">Echo Down</h3> <p class="text-sm">A fake DDoS/Network stresser</p> </div> </a> </div> </section> </div> <!-- blog section --> <!-- <section class="max-w-6xl mx-auto py-8 px-8"> <h2 class="text-3xl mb-4">Blog Posts</h2> <div id="blog-container" class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4"></div> <script type="module" src="/src/scripts/blog/indexblog.js"></script> </section> <div id="blog-content" class="fixed top-0 left-0 p-20 inset-0 flex items-center justify-center z-50 hidden bg-black bg-opacity-50"></div> --> <!-- end blog section --> <div id="art"> <section class="max-w-6xl mx-auto py-8 px-8 text-center"> <h2 class="text-3xl mb-4 text-left">Cool Art</h2> <div id="art-container" class="max-w-2xl mx-auto"> <a id="art-link" href="#" target="_blank" title="Loading..." referrerpolicy="no-referrer"> <img id="art-image" class="w-[100%] max-w-[32rem] mx-auto mb-4 rounded-lg shadow-lg" alt="Artwork" loading="eager" /> </a> <p id="artist-name" class="text-lg font-semibold"></p> <p id="art-description" class="text-sm"></p> <div class="mt-4"> <button id="next-art-button" class="bg-blue-500 shadow-lg text-white py-2 px-4 w-32 rounded-xl hover:bg-blue-600 transition duration-300">Next</button> </div> </div> </section> </div> <div id="connect"> <section class="max-w-6xl mx-auto py-8 px-8"> <h2 class="text-3xl mb-4">Stalk me everywhere</h2> <div class="grid sm:grid-cols-2 md:grid-cols-4 gap-4"> <div> <a href="mailto:echo-the-coder@tuta.io"> <button class="bg-green-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-green-600 transition duration-300 w-full"><i class="fas fa-envelope fa-lg"></i> Email address</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://discord.com/users/1056383394470182922"> <button class="bg-blue-600 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-blue-700 transition duration-300 w-full"><i class="fab fa-discord fa-lg"></i> Discord profile</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://steamcommunity.com/id/3kh0_"> <button class="bg-black text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-gray-800 transition duration-300 w-full"><i class="fab fa-steam fa-lg"></i> Steam profile</button> </a> </div> <div> <a rel="me" href="https://defcon.social/@3kh0"> <button class="bg-purple-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-purple-600 transition duration-300 w-full"><i class="fab fa-mastodon fa-lg"></i> Mastodon profile</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://github.com/3kh0"> <button class="bg-black text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-gray-800 transition duration-300 w-full"><i class="fab fa-github fa-lg"></i> GitHub profile</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://odysee.com/@3kh0:a"> <button class="bg-red-600 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-red-700 transition duration-300 w-full"><i class="fa-brands fa-odysee fa-lg"></i> Odysee channel</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://t.me/echoontop"> <button class="bg-blue-600 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-blue-700 transition duration-300 w-full"><i class="fab fa-telegram fa-lg"></i> Telegram account</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://www.last.fm/user/realecho"> <button class="bg-red-600 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-red-700 transition duration-300 w-full"><i class="fab fa-lastfm fa-lg"></i> Last.fm profile</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://soundcloud.com/3kh0"> <button class="bg-orange-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-orange-600 transition duration-300 w-full"><i class="fab fa-soundcloud fa-lg"></i> SoundCloud profile</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://etherscan.io/address/0xcde3a3dece1f80ce6371d4ed2a2d92017a2a624a"> <button class="bg-blue-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-blue-600 transition duration-300 w-full"><i class="fab fa-ethereum fa-lg"></i> Ethereum wallet</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://www.blockchain.com/explorer/addresses/btc/33WimAtvLLmEREJBUS2wc8ev7tjmzCU2Mq"> <button class="bg-yellow-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-yellow-600 transition duration-300 w-full"><i class="fab fa-bitcoin fa-lg"></i> Bitcoin wallet</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://www.youtube.com/@3kh0"> <button class="bg-red-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-red-600 transition duration-300 w-full"><i class="fab fa-youtube fa-lg"></i> YouTube channel</button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://www.codeberg.org/3kh0"> <button class="bg-black text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-gray-600 transition duration-300 w-full d-flex align-items-center"> <i class="fas fa-ice-cream fa-lg"></i> Codeberg profile </button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://ftp.3kh0.net"> <button class="bg-green-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-green-600 transition duration-300 w-full"> <i class="fas fa-server fa-lg"></i> File server access </button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://bsky.app/profile/3kh0.bsky.social"> <button class="bg-blue-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-blue-600 transition duration-300 w-full"> <i class="fas fa-cloud fa-lg"></i> Bluesky profile </button> </a> </div> <div> <a referrerpolicy="no-referrer" href="https://github.com/sponsors/3kh0"> <button class="bg-pink-500 text-white text-lg shadow-lg py-2 px-4 rounded-xl hover:bg-pink-600 transition duration-300 w-full"> <i class="fas fa-heart fa-lg"></i> GitHub Sponsors </button> </a> </div> </div> </section> </div> <div id="footer"> <section class="max-w-6xl mx-auto py-8 px-8"> <p class="text-center text-m">Made by 3kh0, built using Vite and Tailwind</p> </section> </div> <div id="nothingSusHere"></div> <script defer type="module" src="/src/scripts/loader.js"></script> <script defer type="module" src="/src/scripts/adblockNotice.js"></script> <script defer type="module" src="/src/scripts/artDisplay.js"></script> <script defer type="module" src="/src/scripts/index.js"></script> </body> </html>
<?php /** * The admin-specific functionality of the plugin. * * Defines the plugin name, version, and two hooks to * enqueue the admin-facing stylesheet and JavaScript. * As you add hooks and methods, update this description. * * @package Plugnmeet * @subpackage Plugnmeet/admin * @author Jibon Costa <jibon@mynaparrot.com> */ if ( ! defined( 'PLUGNMEET_BASE_NAME' ) ) { die; } class Plugnmeet_Admin { /** * The ID of this plugin. * * @since 1.0.0 * @access private * @var string $plugin_name The ID of this plugin. */ private $plugin_name; /** * The unique prefix of this plugin. * * @since 1.0.0 * @access private * @var string $plugin_prefix The string used to uniquely prefix technical functions of this plugin. */ private $plugin_prefix; /** * The version of this plugin. * * @since 1.0.0 * @access private * @var string $version The current version of this plugin. */ private $version; private $setting_params; /** * Initialize the class and set its properties. * * @param string $plugin_name The name of this plugin. * @param string $plugin_prefix The unique prefix of this plugin. * @param string $version The version of this plugin. * * @since 1.0.0 */ public function __construct( $plugin_name, $plugin_prefix, $version ) { $this->plugin_name = $plugin_name; $this->plugin_prefix = $plugin_prefix; $this->version = $version; $this->setting_params = (object) get_option( "plugnmeet_settings" ); } /** * Register the stylesheets for the admin area. * * @param string $hook_suffix The current admin page. * * @since 1.0.0 */ public function enqueue_styles( $hook_suffix ) { if ( preg_match( "/plugnmeet/", $hook_suffix ) ) { wp_enqueue_style( 'bootstrap-min', plugin_dir_url( __FILE__ ) . 'css/bootstrap.min.css' ); wp_enqueue_style( 'bootstrap-colorpicker', plugin_dir_url( __FILE__ ) . 'css/bootstrap-colorpicker.min.css' ); wp_enqueue_style( $this->plugin_name, plugin_dir_url( __FILE__ ) . 'css/plugnmeet-admin.css' ); } } /** * Register the JavaScript for the admin area. * * @param string $hook_suffix The current admin page. * * @since 1.0.0 */ public function enqueue_scripts( $hook_suffix ) { wp_enqueue_media(); if ( preg_match( "/plugnmeet/", $hook_suffix ) ) { wp_enqueue_script( "bootstrap-bundle", plugin_dir_url( __FILE__ ) . 'js/bootstrap.bundle.js', array(), $this->version ); wp_enqueue_script( "bootstrap-colorpicker", plugin_dir_url( __FILE__ ) . 'js/bootstrap-colorpicker.min.js', array(), $this->version ); wp_enqueue_script( $this->plugin_name, plugin_dir_url( __FILE__ ) . 'js/plugnmeet-admin.js', array( 'jquery' ), $this->version, false ); } $nonce = wp_create_nonce( 'ajax_admin' ); $script = array( 'nonce' => $nonce ); wp_localize_script( $this->plugin_name, 'ajax_admin', $script ); } public function addMenuPages( $hook_suffix ) { if ( ! class_exists( "Plugnmeet_RoomPage" ) ) { require plugin_dir_path( dirname( __FILE__ ) ) . 'admin/class-plugnmeet-room-page.php'; } $menusPage = new Plugnmeet_RoomPage(); add_menu_page( __( 'Plug-N-Meet', 'plugnmeet' ), __( 'Plug-N-Meet', 'plugnmeet' ), 'manage_options', 'plugnmeet', '', 'dashicons-admin-site-alt', null ); add_submenu_page( 'plugnmeet', __( 'Manage Rooms', 'plugnmeet' ), __( 'Rooms', 'plugnmeet' ), 'manage_options', 'plugnmeet', [ $menusPage, 'roomsPage' ], 1 ); add_submenu_page( 'plugnmeet', __( 'Manage recordings', 'plugnmeet' ), __( 'Recordings', 'plugnmeet' ), 'manage_options', 'plugnmeet-recordings', [ $menusPage, 'recordingsPage' ], 2 ); add_submenu_page( 'plugnmeet', __( 'Settings', 'plugnmeet' ), __( 'Settings', 'plugnmeet' ), 'manage_options', 'plugnmeet-settings', [ $menusPage, 'settingsPage' ], 3 ); } public function register_settings() { if ( ! class_exists( "Plugnmeet_SettingsPage" ) ) { require plugin_dir_path( dirname( __FILE__ ) ) . 'admin/class-plugnmeet-settings-page.php'; } $settingPage = new Plugnmeet_SettingsPage(); $settingPage->plugnmeet_register_settings(); } public function update_client() { $output = new stdClass(); $output->status = false; $output->msg = __( 'Token mismatched', 'plugnmeet' ); if ( ! wp_verify_nonce( $_REQUEST['nonce'], 'ajax_admin' ) ) { wp_send_json( $output ); } $params = $this->setting_params; $client_download_url = $params->client_download_url; if ( empty( $client_download_url ) ) { $client_download_url = "https://github.com/mynaparrot/plugNmeet-client/releases/latest/download/client.zip"; } $response = wp_remote_get( $client_download_url, array( "timeout" => 60 ) ); if ( is_wp_error( $response ) ) { $output->msg = $response->errors; wp_send_json( $output ); } $data = wp_remote_retrieve_body( $response ); $clientZipFile = get_temp_dir() . "client.zip"; $file = fopen( $clientZipFile, "w+" ); if ( ! $file ) { $output->msg = __( "Can't write file", "plugnmeet" ); wp_send_json( $output ); } fputs( $file, $data ); fclose( $file ); $zip = new ZipArchive; $res = $zip->open( $clientZipFile ); if ( $res === true ) { $extractPath = PLUGNMEET_ROOT_PATH . "/public/"; // for safety let's delete client first $this->deleteDir( $extractPath . "client" ); $zip->extractTo( $extractPath ); $zip->close(); unlink( $clientZipFile ); $output->status = true; $output->msg = __( "Updated client successfully", "plugnmeet" ); } else { $output->msg = __( "Unzip failed", "plugnmeet" ); } wp_send_json( $output ); } private function deleteDir( $dirPath ) { if ( ! is_dir( $dirPath ) ) { return; } if ( substr( $dirPath, strlen( $dirPath ) - 1, 1 ) != '/' ) { $dirPath .= '/'; } $it = new RecursiveDirectoryIterator( $dirPath, RecursiveDirectoryIterator::SKIP_DOTS ); $files = new RecursiveIteratorIterator( $it, RecursiveIteratorIterator::CHILD_FIRST ); foreach ( $files as $file ) { if ( $file->isDir() ) { rmdir( $file->getRealPath() ); } else { unlink( $file->getRealPath() ); } } rmdir( $dirPath ); } public function save_room_data() { global $wpdb; $output = new stdClass(); $output->status = false; $output->msg = __( 'Token mismatched', 'plugnmeet' ); if ( ! wp_verify_nonce( $_REQUEST['nonce'], 'save_room_data' ) ) { wp_send_json( $output ); } if ( ! class_exists( "PlugnmeetHelper" ) ) { require plugin_dir_path( dirname( __FILE__ ) ) . 'helpers/helper.php'; } // for preventing display error. Room id should be always unique $room_id = ""; $id = isset( $_POST['id'] ) ? sanitize_text_field( $_POST['id'] ) : 0; $room_title = isset( $_POST['room_title'] ) ? sanitize_text_field( $_POST['room_title'] ) : ""; $description = isset( $_POST['description'] ) ? wp_kses( $_POST['description'], wp_kses_allowed_html( "post" ) ) : ""; $moderator_pass = isset( $_POST['moderator_pass'] ) ? sanitize_text_field( $_POST['moderator_pass'] ) : ""; $attendee_pass = isset( $_POST['attendee_pass'] ) ? sanitize_text_field( $_POST['attendee_pass'] ) : ""; $welcome_message = isset( $_POST['welcome_message'] ) ? sanitize_textarea_field( $_POST['welcome_message'] ) : ""; $max_participants = isset( $_POST['max_participants'] ) ? sanitize_text_field( $_POST['max_participants'] ) : 0; $published = isset( $_POST['published'] ) ? sanitize_text_field( $_POST['published'] ) : 1; $roles = isset( $_POST['roles'] ) ? $_POST['roles'] : array(); $room_metadata = []; foreach ( PlugnmeetHelper::$roomMetadataItems as $item ) { if ( isset( $_POST[ $item ] ) ) { $room_metadata[ $item ] = $_POST[ $item ]; } else { $room_metadata[ $item ] = []; } } if ( empty( $moderator_pass ) ) { $moderator_pass = PlugnmeetHelper::secureRandomKey( 10 ); } if ( empty( $attendee_pass ) ) { $attendee_pass = PlugnmeetHelper::secureRandomKey( 10 ); } if ( $attendee_pass === $moderator_pass ) { $output->msg = __( "attendee & moderator password can't be same", 'plugnmeet' ); wp_send_json( $output ); } if ( ! $id ) { if ( ! class_exists( 'plugNmeetConnect' ) ) { require plugin_dir_path( dirname( __FILE__ ) ) . 'helpers/plugNmeetConnect.php'; } $options = $this->setting_params; $connect = new plugNmeetConnect( $options ); $room_id = $connect->getUUID(); } if ( ! $id ) { $wpdb->insert( $wpdb->prefix . "plugnmeet_rooms", array( 'room_id' => $room_id, 'room_title' => $room_title, 'description' => $description, 'moderator_pass' => $moderator_pass, 'attendee_pass' => $attendee_pass, 'welcome_message' => $welcome_message, 'max_participants' => $max_participants, 'room_metadata' => json_encode( $room_metadata ), 'roles' => json_encode( $roles ), 'published' => $published, 'created_by' => get_current_user_id() ), array( '%s', '%s', '%s', '%s', '%s', '%s', '%d', '%s', '%s', '%d', '%d' ) ); if ( $wpdb->insert_id ) { $output->status = true; $output->msg = __( 'Successfully saved room data', 'plugnmeet' ); } else { $output->msg = $wpdb->last_error; } } else { $result = $wpdb->update( $wpdb->prefix . "plugnmeet_rooms", array( 'room_title' => $room_title, 'description' => $description, 'moderator_pass' => $moderator_pass, 'attendee_pass' => $attendee_pass, 'welcome_message' => $welcome_message, 'max_participants' => $max_participants, 'room_metadata' => json_encode( $room_metadata ), 'roles' => json_encode( $roles ), 'published' => $published, 'modified_by' => get_current_user_id() ), array( 'id' => $id ), array( '%s', '%s', '%s', '%s', '%s', '%d', '%s', '%s', '%d', '%d' ), array( '%d' ) ); if ( $result === false ) { $output->msg = $wpdb->last_error; } else { $output->status = true; $output->msg = __( 'Successfully updated room data', 'plugnmeet' ); } } wp_send_json( $output ); } public function delete_room() { global $wpdb; $output = new stdClass(); $output->status = false; $output->msg = __( 'Token mismatched', 'plugnmeet' ); if ( ! wp_verify_nonce( $_REQUEST['nonce'], 'ajax_admin' ) ) { wp_send_json( $output ); } $id = isset( $_POST['id'] ) ? sanitize_text_field( $_POST['id'] ) : 0; if ( ! $id ) { $output->msg = __( "No id was sent", 'plugnmeet' ); wp_send_json( $output ); } $result = $wpdb->delete( $wpdb->prefix . 'plugnmeet_rooms', [ 'id' => $id ], [ '%d' ], ); if ( $result === false ) { $output->msg = $wpdb->last_error; } else { $output->status = true; $output->msg = "success"; } wp_send_json( $output ); } }
import 'settings/PacientePropriedade.dart'; import 'settings/connection.dart'; import 'dart:convert'; import 'dart:async'; import 'package:http/http.dart' as http; import 'Usuario.dart'; class Paciente extends Usuario { int codigo = 0; String cartaoSUS = ""; Paciente(codigo, nome, sobrenome, dataNascimento, cpf, sexo, telefone, endereco, cidade, email, senha, this.cartaoSUS) : super(codigo, nome, sobrenome, dataNascimento, cpf, sexo, telefone, endereco, cidade, email, senha); void apresentar() { print( "Meu nome é $nome, $sobrenome, $dataNascimento, $cpf, $sexo, $telefone, $endereco, $cidade, $email, $senha, $cartaoSUS"); } Paciente.fromMap(Map map) : super.fromMap(map) { codigo = map[PacientePropriedades.codigo]; cartaoSUS = map[PacientePropriedades.cartao_sus]; } Map<String, Object> toMap() { Map<String, Object> map = super.toMap(); // Chama o toMap() da classe pai map[PacientePropriedades.cartao_sus] =cartaoSUS; // Adiciona a propriedade cartao_sus return map; } static Future<List<Paciente>> carregarPacientes() async { try { var url = Uri.http("${connection.address}", '/paciente'); http.Response resposta = await http.get(url); var dados = json.decode(resposta.body); List<Paciente> lista = []; for (Map paciente in dados) { lista.add(Paciente.fromMap(paciente)); } return lista; } catch (e) { print('Erro desconhecido: $e'); // Trate o erro conforme necessário return []; } } Future<int> inserirPaciente(Paciente paciente) async { var url = Uri.http("${connection.address}", '/paciente'); Map<String, String> headers = {}; headers['Content-Type'] = 'application/json'; http.Response resposta = await http.post(url, headers: headers, body: jsonEncode(paciente.toMap()), encoding: Encoding.getByName('utf-8')); if (resposta.statusCode >= 200 && resposta.statusCode < 300) { print('Inserção bem sucedida'); } else { print('Deu ruim na inserção ${url.toString()} ${resposta.statusCode}'); } return resposta.statusCode; } static Future<Paciente> login(email, senha) async { Paciente user = new Paciente(0, "", "", DateTime.now(), "", "", "", "", 1, "", "", ""); var url = Uri.http("${connection.address}", '/paciente/login'); Map<String, String> headers = {}; headers['Content-Type'] = 'application/json'; http.Response resposta = await http.post(url, headers: headers, body: jsonEncode({"email": email, "senha": senha}), encoding: Encoding.getByName('utf-8')); var dados = json.decode(resposta.body); List<Paciente> lista = []; for (Map paciente in dados) { lista.add(Paciente.fromMap(paciente)); } return lista.isEmpty ? user : lista[0]; } }
import { closeIcon } from 'assets/images/icons' import classnames from 'classnames' import React, { FC, useEffect, useState } from 'react' import cl from './style.module.scss' import { AlertType } from './types' interface CustomAlertProps { message: string type: AlertType styleTypes: AlertType[] } const CustomAlert: FC<CustomAlertProps> = ({ styleTypes: styleTypesProps = [], message, type }) => { const styleTypes = [...styleTypesProps] const alertClasses = classnames([ cl.alert, ...styleTypes.map((styleType) => cl[`alert${styleType}`]) ]) const [isOpen, setIsOpen] = useState(false) const clickHandler = () => { setIsOpen(false) } useEffect(() => { setIsOpen(true) }, [message]) if (!isOpen) { return <></> } return ( <div className={alertClasses}> <div className={cl.alertContainer}> <span className={cl.header}> {type === AlertType.ERROR ? 'Ошибка!' : 'Успешно!'} </span> <span className={cl.desc}>{message}</span> <button onClick={() => clickHandler()} className={cl.closeBtn}> <img src={closeIcon} alt="close" /> </button> </div> </div> ) } export default CustomAlert
#!/usr/bin/python3 """Rectangle class module""" from models.base import Base class Rectangle(Base): """Class Rectangle""" def __init__(self, width, height, x=0, y=0, id=None): """constructor""" self.width = width self.height = height self.x = x self.y = y super().__init__(id) @property def width(self): """returns width of the Rectangle""" return self.__width @width.setter def width(self, value): """setting and validating width""" if type(value) is not int: raise TypeError("width must be an integer") if value <= 0: raise ValueError("width must be > 0") self.__width = value @property def height(self): """returns the height of the rectangle""" return self.__height @height.setter def height(self, value): """setting and validating height""" if type(value) is not int: raise TypeError("height must be an integer") if value <= 0: raise ValueError("height must be > 0") self.__height = value @property def x(self): """returns x""" return self.__x @x.setter def x(self, value): """setting and validating x""" if type(value) is not int: raise TypeError("x must be an integer") if value < 0: raise ValueError("x must be >= 0") self.__x = value @property def y(self): """get the value of y""" return self.__y @y.setter def y(self, value): """setting and validating y""" if type(value) is not int: raise TypeError("y must be an int") if value < 0: raise ValueError("y must be >= 0") self.__y = value def area(self): """returns the area value of the Rectangle instance""" return (self.__width * self.__height) def display(self): """prints to stdout the rectangle instance with character '#'""" if self.width == 0 or self.height == 0: print("") return for h in range(self.height): [print("#", end="") for w in range(self.width)] print("")
import React from 'react'; import { Formik, Field, Form } from 'formik'; import { initialValuesSignIn, validationSchemaSignIn } from '../../validation/validation-signin'; import OutlinedInput from '@mui/material/OutlinedInput/OutlinedInput'; import { FormControl } from '@mui/material'; import InputLabel from '@mui/material/InputLabel'; import FormHelperText from '@mui/material/FormHelperText'; import Box from '@mui/material/Box'; import Grid from '@mui/material/Grid'; import { useTranslation } from 'react-i18next'; import InputAdornment from '@mui/material/InputAdornment/InputAdornment'; import IconButton from '@mui/material/IconButton/IconButton'; import VisibilityOff from '@mui/icons-material/VisibilityOff'; import Visibility from '@mui/icons-material/Visibility'; import Checkbox from '@mui/material/Checkbox'; import FormControlLabel from '@mui/material/FormControlLabel'; import LoadingButton from '@mui/lab/LoadingButton/LoadingButton'; const sleep = (ms: any) => new Promise((r) => setTimeout(r, ms)); export const SignInForm = ({ onSubmit, loading }: { onSubmit: any; loading: boolean }) => { const [showPassword, setShowPassword] = React.useState({ showPassword: false, }); const { t } = useTranslation(); const handleClickShowPassword = () => { setShowPassword({ showPassword: !showPassword.showPassword, }); }; const handleMouseDownPassword = (event: any) => { event.preventDefault(); }; return ( <Box sx={{ mt: 1, pb: 2 }}> <Formik initialValues={initialValuesSignIn} validationSchema={validationSchemaSignIn} onSubmit={(values) => onSubmit(values)}> {({ values, setFieldValue }) => ( <Form> <Grid container spacing={2}> <Grid item xs={12}> <Field name="email"> {({ field, // { name, value, onChange, onBlur } form: { touched, errors }, // also values, setXXXX, handleXXXX, dirty, isValid, status, etc. meta, }: { field: any; // { name, value, onChange, onBlur } form: { touched: any; errors: any }; // also values, setXXXX, handleXXXX, dirty, isValid, status, etc. meta: any; }) => ( <FormControl fullWidth error={meta.touched && Boolean(meta.error)}> <InputLabel htmlFor="email">{t<string>('common.label_email')}</InputLabel> <OutlinedInput id="email" name="email" color="secondary" type="email" label={t<string>('common.label_email')} aria-describedby="email-helper-text" {...field} inputProps={{ 'data-testid': 'email', }} /> {meta.touched && meta.error && ( <FormHelperText id="email-helper-text" data-testid="email-helper-text"> {meta.error} </FormHelperText> )} </FormControl> )} </Field> </Grid> <Grid item xs={12}> <Field name="password"> {({ field, // { name, value, onChange, onBlur } form: { touched, errors }, // also values, setXXXX, handleXXXX, dirty, isValid, status, etc. meta, }: { field: any; // { name, value, onChange, onBlur } form: { touched: any; errors: any }; // also values, setXXXX, handleXXXX, dirty, isValid, status, etc. meta: any; }) => ( <FormControl fullWidth error={meta.touched && Boolean(meta.error)}> <InputLabel htmlFor="password">{t<string>('common.label_password')}</InputLabel> <OutlinedInput id="password" name="password" color="secondary" type={showPassword.showPassword ? 'text' : 'password'} label={t<string>('common.label_password')} aria-describedby="password-helper-text" {...field} inputProps={{ 'data-testid': 'password', }} endAdornment={ <InputAdornment position="end"> <IconButton aria-label="toggle password visibility" onClick={handleClickShowPassword} onMouseDown={handleMouseDownPassword} edge="end"> {showPassword.showPassword ? <VisibilityOff /> : <Visibility />} </IconButton> </InputAdornment> } /> {meta.touched && meta.error && ( <FormHelperText id="password-helper-text" data-testid="password-helper-text"> {meta.error} </FormHelperText> )} </FormControl> )} </Field> </Grid> <Grid item xs={12}> <FormControlLabel checked={values.rememberMe} onChange={() => setFieldValue('rememberMe', !values.rememberMe)} control={<Checkbox color="secondary" />} label={t<string>('signin.label_remember_me')} /> <LoadingButton fullWidth variant="contained" color="secondary" type="submit" data-testid="submit" sx={{ mt: 3, mb: 2 }} loading={loading}> {t<string>('signin.label_login')} </LoadingButton> </Grid> </Grid> </Form> )} </Formik> </Box> ); };
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { OnboardingRoutingModule } from './onboarding-routing.module'; import { OnboardingListComponent } from './onboarding-list/onboarding-list.component'; import { OnboardingDetailsComponent } from './onboarding-details/onboarding-details.component'; import { FormsModule, ReactiveFormsModule } from '@angular/forms'; import { NgbModule } from '@ng-bootstrap/ng-bootstrap'; import { DataTablesModule } from 'angular-datatables'; import { SharedModule } from '../../shared'; import { AgmCoreModule } from '@agm/core'; import { OnboardingRejectComponent } from './onboarding-reject/onboarding-reject.component'; import { environment } from '../../../environments/environment'; import { NgSelectModule } from '@ng-select/ng-select'; import { TranslateModule, TranslateLoader } from '@ngx-translate/core'; import { TranslateHttpLoader } from '@ngx-translate/http-loader'; import { HttpClient } from '@angular/common/http'; export function HttpLoaderFactory(http: HttpClient) { return new TranslateHttpLoader(http); } @NgModule({ declarations: [OnboardingListComponent, OnboardingDetailsComponent, OnboardingRejectComponent], imports: [ CommonModule, OnboardingRoutingModule, NgbModule, DataTablesModule, FormsModule, ReactiveFormsModule, SharedModule, NgSelectModule, AgmCoreModule.forRoot({ apiKey: `${environment.GOOGLE_MAP_API_KEY}`, libraries: ['places'] }), TranslateModule.forChild({ loader: { provide: TranslateLoader, useFactory: HttpLoaderFactory, deps: [HttpClient] } }), ], exports: [ OnboardingRejectComponent ] }) export class OnboardingModule { }
const fs = require('fs/promises'); const path = require('path'); const leastFilesQuantity = 10; // Indicates processing time function timestamp() { return `${performance.now().toFixed(2)}ms`; } const dataPath = path.join(__dirname, './data'); // Get an array from arrays with unique users async function getArrsOfUniqueUsers() { try { const filesNamesArr = await fs.readdir(dataPath); const arrsOfUniqueUsers = await Promise.all( filesNamesArr.map(async path => { const data = (await fs.readFile(`${dataPath}/${path}`, 'utf-8')) || []; return [...new Set(data.split('\n'))]; }) ); return arrsOfUniqueUsers; } catch (error) { console.log(error.message); } } //Looking for a number of users who are present in a particular collaboration files async function findExist(arrsOfUniqueUsers, quantity) { const usernameCounts = {}; for (const arrOfUniqueUsers of arrsOfUniqueUsers) { arrOfUniqueUsers.forEach(username => { if (usernameCounts[username]) { usernameCounts[username] += 1; } else { usernameCounts[username] = 1; } }); } const existInNessFiles = Object.values(usernameCounts).filter( value => value >= quantity ); return existInNessFiles.length; } // Determine how many unique usernames there are in all the specified files async function uniqueValues() { const arrsOfUniqueUsers = await getArrsOfUniqueUsers(); let uniqueUsersSet = new Set(); arrsOfUniqueUsers.forEach(arrOfUsers => { arrOfUsers.forEach(user => { uniqueUsersSet.add(user); }); }); console.log(`Unique Values:${uniqueUsersSet.size} Time:${timestamp()}`); return uniqueUsersSet.size; } // Determine how many usernames occur in all files async function existInAllFiles() { const arrsOfUniqueUsers = await getArrsOfUniqueUsers(); const allFilesQuantity = arrsOfUniqueUsers.length; const filesQuantity = await findExist(arrsOfUniqueUsers, allFilesQuantity); console.log(`Exist in all files:${filesQuantity} Time:${timestamp()}`); return filesQuantity; } // Find out how many usernames occur in at least 10 files async function existInAtleastTen() { const arrsOfUniqueUsers = await getArrsOfUniqueUsers(); const filesQuantity = await findExist(arrsOfUniqueUsers, leastFilesQuantity); console.log(`Exist at least ten:${filesQuantity} Time:${timestamp()}`); return filesQuantity; } // -----Uncover the necessary function:----- // uniqueValues(); //129240 // existInAllFiles(); //441 // existInAtleastTen(); //73245
# Fraud Detection with Feature Store<a name="feature-store-fraud-detection-notebook"></a> ## Step 1: Set Up Feature Store<a name="feature-store-setup"></a> To start using Feature Store, create a SageMaker session, boto3 session, and a Feature Store session\. Also, set up the S3 bucket you want to use for your features\. This is your offline store\. The following code uses the SageMaker default bucket and adds a custom prefix to it\. **Note** The role that you use must have the following managed policies attached to it: `AmazonSageMakerFullAccess` and `AmazonSageMakerFeatureStoreAccess`\. ``` import boto3 import sagemaker from sagemaker.session import Session sagemaker_session = sagemaker.Session() region = sagemaker_session.boto_region_name boto_session = boto3.Session(region_name=region) role = sagemaker.get_execution_role() default_bucket = sagemaker_session.default_bucket() prefix = 'sagemaker-featurestore' offline_feature_store_bucket = 's3://{}/{}'.format(default_bucket, prefix) sagemaker_client = boto_session.client(service_name='sagemaker', region_name=region) featurestore_runtime = boto_session.client(service_name='sagemaker-featurestore-runtime', region_name=region) feature_store_session = Session(     boto_session=boto_session,     sagemaker_client=sagemaker_client,     sagemaker_featurestore_runtime_client=featurestore_runtime ) ``` ## Step 2: Load Datasets and Partition Data into Feature Groups<a name="feature-store-load-datasets"></a> Load your data into data frames for each of your features\. You use these data frames after you set up the feature group\. In the fraud detection example, you can see these steps in the following code\. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import io fraud_detection_bucket_name = 'sagemaker-featurestore-fraud-detection' identity_file_key = 'sampled_identity.csv' transaction_file_key = 'sampled_transactions.csv' identity_data_object = s3_client.get_object(Bucket=fraud_detection_bucket_name, Key=identity_file_key) transaction_data_object = s3_client.get_object(Bucket=fraud_detection_bucket_name, Key=transaction_file_key) identity_data = pd.read_csv(io.BytesIO(identity_data_object['Body'].read())) transaction_data = pd.read_csv(io.BytesIO(transaction_data_object['Body'].read())) identity_data = identity_data.round(5) transaction_data = transaction_data.round(5) identity_data = identity_data.fillna(0) transaction_data = transaction_data.fillna(0) # Feature transformations for this dataset are applied before ingestion into FeatureStore. # One hot encode card4, card6 encoded_card_bank = pd.get_dummies(transaction_data['card4'], prefix = 'card_bank') encoded_card_type = pd.get_dummies(transaction_data['card6'], prefix = 'card_type') transformed_transaction_data = pd.concat([transaction_data, encoded_card_type, encoded_card_bank], axis=1) transformed_transaction_data = transformed_transaction_data.rename(columns={"card_bank_american express": "card_bank_american_express"}) ``` ## Step 3: Set Up Feature Groups<a name="feature-store-set-up-feature-groups"></a> When you set up your feature groups, you need to customize the feature names with a unique name and set up each feature group with the `FeatureGroup` class\.  ``` from sagemaker.feature_store.feature_group import FeatureGroup feature_group_name = "some string for a name" feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=feature_store_session) ``` For example, in the fraud detection example, the two feature groups are `identity` and `transaction`\. In the following code you can see how the names are customized with a timestamp, and then each group is set up by passing in the name and the session\. ``` import time from time import gmtime, strftime, sleep from sagemaker.feature_store.feature_group import FeatureGroup identity_feature_group_name = 'identity-feature-group-' + strftime('%d-%H-%M-%S', gmtime()) transaction_feature_group_name = 'transaction-feature-group-' + strftime('%d-%H-%M-%S', gmtime()) identity_feature_group = FeatureGroup(name=identity_feature_group_name, sagemaker_session=feature_store_session) transaction_feature_group = FeatureGroup(name=transaction_feature_group_name, sagemaker_session=feature_store_session) ``` ## Step 4: Set Up Record Identifier and Event Time Features<a name="feature-store-set-up-record-identifier-event-time"></a> In this step, you specify a record identifier name and an event time feature name\. This name maps to the column of the corresponding features in your data\. For example, in the fraud detection example, the column of interest is `TransactionID`\. `EventTime` can be appended to your data when no timestamp is available\. In the following code, you can see how these variables are set, and then `EventTime` is appended to both feature’s data\. ``` record_identifier_name = "TransactionID" event_time_feature_name = "EventTime" current_time_sec = int(round(time.time())) identity_data[event_time_feature_name] = pd.Series([current_time_sec]*len(identity_data), dtype="float64") transformed_transaction_data[event_time_feature_name] = pd.Series([current_time_sec]*len(transaction_data), dtype="float64") ``` ## Step 5: Load Feature Definitions<a name="feature-store-load-feature-definitions"></a> You can now load the feature definitions by passing a data frame containing the feature data\. In the following code for the fraud detection example, the identity feature and transaction feature are each loaded by using `load_feature_definitions`, and this function automatically detects the data type of each column of data\. For developers using a schema rather than automatic detection, see the [Export Feature Groups from Data Wrangler](https://docs.aws.amazon.com/sagemaker/latest/dg/data-wrangler-data-export.html#data-wrangler-data-export-feature-store) example for code that shows how to load the schema, map it, and add it as a `FeatureDefinition` that you can use to create the `FeatureGroup`\. This example also covers a boto3 implementation, which you can use instead of the SageMaker Python SDK\. ``` identity_feature_group.load_feature_definitions(data_frame=identity_data); # output is suppressed transaction_feature_group.load_feature_definitions(data_frame=transformed_transaction_data); # output is suppressed ``` ## Step 6: Create a Feature Group<a name="feature-store-setup-create-feature-group"></a> In this step, you use the `create` function to create the feature group\. The following code shows all of the available parameters\. The online store is not created by default, so you must set this as `True` if you want to enable it\. The `s3_uri` is the S3 bucket location of your offline store\. ``` # create a FeatureGroup feature_group.create(     description = "Some info about the feature group",     feature_group_name = feature_group_name,     record_identifier_name = record_identifier_name,     event_time_feature_name = event_time_feature_name,     feature_definitions = feature_definitions,     role_arn = role,     s3_uri = offline_feature_store_bucket,     enable_online_store = True,     online_store_kms_key_id = None,     offline_store_kms_key_id = None,     disable_glue_table_creation = False,     data_catalog_config = None,     tags = ["tag1","tag2"]) ``` The following code from the fraud detection example shows a minimal `create` call for each of the two features groups being created\.  ``` identity_feature_group.create(     s3_uri=offline_feature_store_bucket,     record_identifier_name=record_identifier_name,     event_time_feature_name=event_time_feature_name,     role_arn=role,     enable_online_store=True ) transaction_feature_group.create(     s3_uri=offline_feature_store_bucket,     record_identifier_name=record_identifier_name,     event_time_feature_name=event_time_feature_name,     role_arn=role,     enable_online_store=True ) ``` When you create a feature group, it takes time to load the data, and you need to wait until the feature group is created before you can use it\. You can check status using the following method\. ``` status = feature_group.describe().get("FeatureGroupStatus") ``` While the feature group is being created, you receive `Creating` as a response\. When this step has finished successfully, the response is `Created`\. Other possible statuses are `CreateFailed`, `Deleting`, or `DeleteFailed`\. ## Step 7: Work with Feature Groups<a name="feature-store-working-with-feature-groups"></a> Now that you've set up your feature group, you can perform any of the following tasks: **Topics** + [Describe a Feature Group](#feature-store-describe-feature-groups) + [List Feature Groups](#feature-store-list-feature-groups) + [Put Records in a Feature Group](#feature-store-put-records-feature-group) + [Get Records from a Feature Group](#feature-store-get-records-feature-group) + [Generate Hive DDL Commands](#feature-store-generate-hive-ddl-commands-feature-group) + [Build a Training Dataset](#feature-store-build-training-dataset) + [Write and Execute an Athena Query](#feature-store-write-athena-query) + [Delete a Feature Group](#feature-store-delete-feature-group) ### Describe a Feature Group<a name="feature-store-describe-feature-groups"></a> You can retrieve information about your feature group with the `describe` function\. ``` feature_group.describe() ``` ### List Feature Groups<a name="feature-store-list-feature-groups"></a> You can list all of your feature groups with the `list_feature_groups` function\. ``` sagemaker_client.list_feature_groups() ``` ### Put Records in a Feature Group<a name="feature-store-put-records-feature-group"></a> You can use the `ingest` function to load your feature data\. You pass in a data frame of feature data, set the number of workers, and choose to wait for it to return or not\. The following example demonstrates using the `ingest` function\. ``` feature_group.ingest(     data_frame=feature_data, max_workers=3, wait=True ) ``` For each feature group you have, run the `ingest` function on the feature data you want to load\. ### Get Records from a Feature Group<a name="feature-store-get-records-feature-group"></a> You can use the `get_record` function to retrieve the data for a specific feature by its record identifier\. The following example uses an example identifier to retrieve the record\. ``` record_identifier_value = str(2990130) featurestore_runtime.get_record(FeatureGroupName=transaction_feature_group_name, RecordIdentifierValueAsString=record_identifier_value) ``` An example response from the fraud detection example: ``` ... 'Record': [{'FeatureName': 'TransactionID', 'ValueAsString': '2990130'},   {'FeatureName': 'isFraud', 'ValueAsString': '0'},   {'FeatureName': 'TransactionDT', 'ValueAsString': '152647'},   {'FeatureName': 'TransactionAmt', 'ValueAsString': '75.0'},   {'FeatureName': 'ProductCD', 'ValueAsString': 'H'},   {'FeatureName': 'card1', 'ValueAsString': '4577'}, ... ``` ### Generate Hive DDL Commands<a name="feature-store-generate-hive-ddl-commands-feature-group"></a> The SageMaker Python SDK’s `FeatureStore` class also provides the functionality to generate Hive DDL commands\. The schema of the table is generated based on the feature definitions\. Columns are named after feature name and data\-type are inferred based on feature type\. ``` print(feature_group.as_hive_ddl()) ``` Example output: ``` CREATE EXTERNAL TABLE IF NOT EXISTS sagemaker_featurestore.identity-feature-group-27-19-33-00 (   TransactionID INT   id_01 FLOAT   id_02 FLOAT   id_03 FLOAT   id_04 FLOAT  ... ``` ### Build a Training Dataset<a name="feature-store-build-training-dataset"></a> Feature Store automatically builds an AWS Glue data catalog when you create feature groups and you can turn this off if you want\. The following describes how to create a single training dataset with feature values from both identity and transaction feature groups created earlier in this topic\. Also, the following describes how to run an Amazon Athena query to join data stored in the offline store from both identity and transaction feature groups\.  To start, create an Athena query using `athena_query()` for both identity and transaction feature groups\. The `table\_name` is the AWS Glue table that is autogenerated by Feature Store\.  ``` identity_query = identity_feature_group.athena_query() transaction_query = transaction_feature_group.athena_query() identity_table = identity_query.table_name transaction_table = transaction_query.table_name ``` ### Write and Execute an Athena Query<a name="feature-store-write-athena-query"></a> You write your query using SQL on these feature groups, and then execute the query with the `.run()` command and specify your S3 bucket location for the data set to be saved there\.  ``` # Athena query query_string = 'SELECT * FROM "'+transaction_table+'" LEFT JOIN "'+identity_table+'" ON "'+transaction_table+'".transactionid = "'+identity_table+'".transactionid' # run Athena query. The output is loaded to a Pandas dataframe. dataset = pd.DataFrame() identity_query.run(query_string=query_string, output_location='s3://'+default_s3_bucket_name+'/query_results/') identity_query.wait() dataset = identity_query.as_dataframe() ``` From here you can train a model using this data set and then perform inference\.  ### Delete a Feature Group<a name="feature-store-delete-feature-group"></a> You can delete a feature group with the `delete` function\.  ``` feature_group.delete() ``` The following code example is from the fraud detection example\. ``` identity_feature_group.delete() transaction_feature_group.delete() ``` For more information, see the [Delete a feature group API](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DeleteFeatureGroup.html)
import { LegacyRef, useEffect, useRef, useState } from 'react'; type Options = { offset?: string; shouldStop?: boolean; onLoadMore?: () => Promise< void >; }; /** useInfiniteScroll */ export function useInfiniteScroll( options?: Options ) { const { offset = '0px', shouldStop = false, onLoadMore } = options ?? {}; const [ isLoading, setIsLoading ] = useState( false ); const observerRef = useRef< IntersectionObserver >(); const targetRef = useRef( document.createElement( 'div' ) ); const containerRef: LegacyRef< HTMLElement > = ( container ) => { if ( container ) { container.append( targetRef.current ); container.style.position = 'relative'; } }; useEffect( () => { const target = targetRef.current; target.toggleAttribute( 'data-infinite-scroll-detector', true ); target.style.position = 'absolute'; target.style.bottom = offset; if ( target.offsetTop < 0 ) { target.style.bottom = '0px'; } }, [ offset, isLoading ] ); useEffect( () => { const observe = observerRef.current; if ( observe ) { observe.disconnect(); } async function handler( [ { isIntersecting } ]: IntersectionObserverEntry[] ) { if ( isIntersecting && ! isLoading && ! shouldStop && typeof onLoadMore === 'function' ) { setIsLoading( true ); await onLoadMore(); setIsLoading( false ); } } observerRef.current = new IntersectionObserver( handler as IntersectionObserverCallback, { threshold: 0, } ); observerRef.current.observe( targetRef.current ); return () => observe?.disconnect(); }, [ isLoading, onLoadMore, shouldStop ] ); return { isLoading, containerRef, }; }
from datetime import datetime from random import randint from time import sleep import numpy as np import pandas as pd import requests from bs4 import BeautifulSoup class BooksScraper: """Automated data collection tool (web-scraper) that is specifically tailored to scrape data on Bookdepository based on specific category keyword. """ def __init__(self, number_of_samples: int = 30, category: str = 'love', export_to_csv: bool = False) -> None: """ Initialization :param number_of_samples: The number of samples of data to be scraped. :category: book category to be scraped :param header: web browser information used to construct scraper headers. :param baseurl: Url used to collect book categories for scraping. :param bool export_to_csv: Should the scraped data be exported to csv? """ self.number_of_samples = number_of_samples self.category = category self._baseurl = "https://www.bookdepository.com/search?searchTerm=" self._header = {"User-Agent": "Mozilla/5.0"} self._export_to_csv = export_to_csv print( f">>>>Successfully initialized class to collect information on {category} books for {self.number_of_pages} page(s)<<<<") @property def number_of_pages(self) -> int: """ Calculates number of pages that will be scraped based on number of samples user wants to get. By default, each page has 30 samples. :param : None :return: number of pages that will be scraped. """ try: if self.number_of_samples < 30: raise ValueError( "Number of samples must be equal to or larger than 30.") return int(round(self.number_of_samples / 30)) except TypeError: raise TypeError( "Number of samples must be of integer type.")from None def get_page_response(self, base_url: str, category: str, number_of_pages: int, header: dict) -> BeautifulSoup: """ Retrieves response from book depository server. :param url: desired url. :category: book category to be scraped. Constructed during Initialization. :param number_of_pages: The number of pages of data to be scraped. :param header: identification needed for scraping. Constructed during Initialization. :return: response. if connection blocked prints error message. """ for page in range(1, number_of_pages+1): url = base_url + category + "&page=" + str(page) page = requests.get(url, headers=header) soup = BeautifulSoup(page.content, "html.parser") if not page.ok: print(f"There is a {page} error. Please check your URL.") else: return soup @staticmethod def get_book_author(soup, number_of_pages) -> list: """ Function which gathers book authors from the provided BeautifulSoup object. :param soup: BeautifulSoup object containing book info. :param number_of_pages: The number of pages of data to be scraped. :return: appends the book author to the selected list. If not provided None is returned. """ authors = [] try: for page in range(1, number_of_pages+1): authors.extend([author.text for author in soup.find_all( "span", attrs={"itemprop": "name"})]) return authors except ValueError: return np.nan @staticmethod def get_book_title(soup, number_of_pages) -> list: """ Function which gathers book titles from the provided BeautifulSoup object. :param soup: BeautifulSoup object containing book info. :param number_of_pages: The number of pages of data to be scraped. :return: appends the book title to the selected list. If not provided None is returned. """ titles = [] try: for page in range(1, number_of_pages+1): titles.extend( [title.text for title in soup.find_all("h3", class_="title")]) return titles except ValueError: return np.nan @staticmethod def get_book_price(soup, number_of_pages) -> list: """ Function which gathers book prices from the provided BeautifulSoup object. :param soup: BeautifulSoup object containing book info. :param number_of_pages: The number of pages of data to be scraped. :return: appends the book price to the selected list. If not provided None is returned. """ prices = [] try: for page in range(1, number_of_pages+1): prices.extend( [price.text for price in soup.find_all("p", class_="price")]) return prices except ValueError: return np.nan @staticmethod def get_book_item_url(soup, number_of_pages) -> list: """ Function which gathers books item url from the provided BeautifulSoup object. :param soup: BeautifulSoup object containing book info. :param number_of_pages: The number of pages of data to be scraped. :return: appends the book's item url to the selected list. If not provided None is returned. """ item_urls = [] item_url_prefix = "https://www.bookdepository.com" try: for page in range(1, number_of_pages+1): item_urls.extend([item_url_prefix+url.a['href'] for url in soup.find_all("div", attrs={"class": "book-item"})]) return item_urls except ValueError: return np.nan @staticmethod def get_book_image_url(soup, number_of_pages) -> list: """ Function which gathers books image url from the provided BeautifulSoup object. :param soup: BeautifulSoup object containing book info. :param number_of_pages: The number of pages of data to be scraped. :return: appends the book's image url to the selected list. If not provided None is returned. """ image_urls = [] try: for page in range(1, number_of_pages+1): image_urls.extend([image['data-lazy'] for image in soup.find_all("img", class_="lazy")]) sleep(randint(1, 3)) return image_urls except ValueError: return np.nan def collect_information(self) -> pd.DataFrame: """ Function which combines all functions required for scraping. :param : None :return: pandas dataFrame. """ print( f">>>>Now collecting information on {self.category} books for {self.number_of_pages} page(s)<<<<") print(">>>>Please be patient this might take a while:)<<<<") soup = self.get_page_response( self._baseurl, self.category, self.number_of_pages, self._header) title = self.get_book_title(soup, self.number_of_pages) author = self.get_book_author(soup, self.number_of_pages) price = self.get_book_price(soup, self.number_of_pages) item_url = self.get_book_item_url(soup, self.number_of_pages) image_url = self.get_book_image_url(soup, self.number_of_pages) nested_book_details = [title, author, price, item_url, image_url] columns = ["title", "author", "price", "item_url", "image_url"] book_details = pd.DataFrame(nested_book_details, columns).T if self._export_to_csv: self.export_to_csv(book_details) return book_details def export_to_csv(self, data: pd.DataFrame) -> None: """ Exports a dataframe to a .csv file in working dir. :param data: pandas dataframe :return: None """ now = datetime.now() timestamp = now.strftime("%Y-%m-%d_%H-%M-%S") data.to_csv(f"{self.category}_{timestamp}.csv", index=False) return class CleanBookScraper(BooksScraper): """ A subclass of BooksScraper specifically tailored to clean data on bookdepository.com. Takes all functionality of the BooksScraper class for re-use. """ def __init__(self, number_of_samples: int, category: str, export_to_csv: bool = False) -> None: """ Initialization :param number_of_samples: The number of samples of data to be scraped. :category: book category to be scraped :param bool export_to_csv: Should the scraped data be exported to csv? """ super().__init__(number_of_samples, category) self._export_to_csv = export_to_csv @staticmethod def process_data(data: pd.DataFrame) -> pd.DataFrame: """ Function which cleans book data from the provided Dataframe. :param data: Scraped data containing book info. :return: pandas dataframe """ data['title'] = data['title'].str.strip() data['title'] = data['title'].str.replace("'", "") data['author'] = data['author'].str.replace("'", "") data['price'] = data['price'].str.replace("\n", "") data['price'] = data["price"].str.split("$", n=2, expand=True)[1] data['price'] = data['price'].str.replace("US", "") data['price'] = data['price'].str.replace("R", "") data['price'] = data['price'].str.strip() return data.dropna() def clean_dataframe(self) -> pd.DataFrame: """ Function which combines all functions required for cleaning scraped data. :param : None :return: pandas aataFrame. """ data = self.process_data(self.collect_information()) if self._export_to_csv: self.export_to_csv(data) return data def export_to_csv(self, data: pd.DataFrame) -> None: """ Exports a dataframe to a .csv file in working dir. :param data: pandas dataframe :return: None """ now = datetime.now() timestamp = now.strftime("%Y-%m-%d_%H-%M-%S") data.to_csv(f"{self.category}_{timestamp}.csv", index=False) return
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import asyncio import deprecation import httpx import logging import json import threading from collections.abc import ( AsyncGenerator, AsyncIterator, Generator, Iterator, ) from concurrent.futures import ThreadPoolExecutor from functools import partial from queue import Queue from types import TracebackType from typing import ( Any, cast, Dict, List, Optional, Tuple, Union, Type, ) from astrapy import __version__ from astrapy.core.api import APIRequestError, api_request, async_api_request from astrapy.core.defaults import ( DEFAULT_AUTH_HEADER, DEFAULT_JSON_API_PATH, DEFAULT_JSON_API_VERSION, DEFAULT_KEYSPACE_NAME, MAX_INSERT_NUM_DOCUMENTS, ) from astrapy.core.utils import ( convert_vector_to_floats, make_payload, normalize_for_api, restore_from_api, http_methods, to_httpx_timeout, TimeoutInfoWideType, ) from astrapy.core.core_types import ( API_DOC, API_RESPONSE, PaginableRequestMethod, AsyncPaginableRequestMethod, ) logger = logging.getLogger(__name__) class AstraDBCollection: # Initialize the shared httpx client as a class attribute client = httpx.Client() def __init__( self, collection_name: str, astra_db: Optional[AstraDB] = None, token: Optional[str] = None, api_endpoint: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, additional_headers: Dict[str, str] = {}, ) -> None: """ Initialize an AstraDBCollection instance. Args: collection_name (str): The name of the collection. astra_db (AstraDB, optional): An instance of Astra DB. token (str, optional): Authentication token for Astra DB. api_endpoint (str, optional): API endpoint URL. namespace (str, optional): Namespace for the database. caller_name (str, optional): identity of the caller ("my_framework") If passing a client, its caller is used as fallback caller_version (str, optional): version of the caller code ("1.0.3") If passing a client, its caller is used as fallback additional_headers (Dict[str, str]): any further set of headers, in the form of key-value pairs, to be passed with the HTTP requests by this collection instance. """ # Check for presence of the Astra DB object if astra_db is None: if token is None or api_endpoint is None: raise AssertionError("Must provide token and api_endpoint") astra_db = AstraDB( token=token, api_endpoint=api_endpoint, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ) else: # if astra_db passed, copy and apply possible overrides astra_db = astra_db.copy( token=token, api_endpoint=api_endpoint, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ) # Set the remaining instance attributes self.astra_db = astra_db self.caller_name: Optional[str] = self.astra_db.caller_name self.caller_version: Optional[str] = self.astra_db.caller_version self.additional_headers = additional_headers self.collection_name = collection_name self.base_path: str = f"{self.astra_db.base_path}/{self.collection_name}" def __repr__(self) -> str: return f'AstraDBCollection[astra_db="{self.astra_db}", collection_name="{self.collection_name}"]' def __eq__(self, other: Any) -> bool: if isinstance(other, AstraDBCollection): return all( [ self.collection_name == other.collection_name, self.astra_db == other.astra_db, self.caller_name == other.caller_name, self.caller_version == other.caller_version, self.additional_headers == other.additional_headers, ] ) else: return False def copy( self, *, collection_name: Optional[str] = None, token: Optional[str] = None, api_endpoint: Optional[str] = None, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, additional_headers: Optional[Dict[str, str]] = None, ) -> AstraDBCollection: return AstraDBCollection( collection_name=collection_name or self.collection_name, astra_db=self.astra_db.copy( token=token, api_endpoint=api_endpoint, api_path=api_path, api_version=api_version, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ), caller_name=caller_name or self.caller_name, caller_version=caller_version or self.caller_version, additional_headers=additional_headers or self.additional_headers, ) def to_async(self) -> AsyncAstraDBCollection: return AsyncAstraDBCollection( collection_name=self.collection_name, astra_db=self.astra_db.to_async(), caller_name=self.caller_name, caller_version=self.caller_version, additional_headers=self.additional_headers, ) def set_caller( self, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: self.astra_db.set_caller( caller_name=caller_name, caller_version=caller_version, ) self.caller_name = caller_name self.caller_version = caller_version def _request( self, method: str = http_methods.POST, path: Optional[str] = None, json_data: Optional[Dict[str, Any]] = None, url_params: Optional[Dict[str, Any]] = None, skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: direct_response = api_request( client=self.client, base_url=self.astra_db.base_url, auth_header=DEFAULT_AUTH_HEADER, token=self.astra_db.token, method=method, json_data=normalize_for_api(json_data), url_params=url_params, path=path, skip_error_check=skip_error_check, caller_name=self.caller_name, caller_version=self.caller_version, timeout=to_httpx_timeout(timeout_info), additional_headers=self.additional_headers, ) response = restore_from_api(direct_response) return response def post_raw_request( self, body: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: return self._request( method=http_methods.POST, path=self.base_path, json_data=body, timeout_info=timeout_info, ) def _get( self, path: Optional[str] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Optional[API_RESPONSE]: full_path = f"{self.base_path}/{path}" if path else self.base_path response = self._request( method=http_methods.GET, path=full_path, url_params=options, timeout_info=timeout_info, ) if isinstance(response, dict): return response return None def _put( self, path: Optional[str] = None, document: Optional[API_RESPONSE] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: full_path = f"{self.base_path}/{path}" if path else self.base_path response = self._request( method=http_methods.PUT, path=full_path, json_data=document, timeout_info=timeout_info, ) return response def _post( self, path: Optional[str] = None, document: Optional[API_DOC] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: full_path = f"{self.base_path}/{path}" if path else self.base_path response = self._request( method=http_methods.POST, path=full_path, json_data=document, timeout_info=timeout_info, ) return response def _recast_as_sort_projection( self, vector: List[float], fields: Optional[List[str]] = None ) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: """ Given a vector and optionally a list of fields, reformulate them as a sort, projection pair for regular 'find'-like API calls (with basic validation as well). """ # Must pass a vector if not vector: raise ValueError("Must pass a vector") # Edge case for field selection if fields and "$similarity" in fields: raise ValueError("Please use the `include_similarity` parameter") # Build the new vector parameter sort: Dict[str, Any] = {"$vector": vector} # Build the new fields parameter # Note: do not leave projection={}, make it None # (or it will devour $similarity away in the API response) if fields is not None and len(fields) > 0: projection = {f: 1 for f in fields} else: projection = None return sort, projection def get( self, path: Optional[str] = None, timeout_info: TimeoutInfoWideType = None ) -> Optional[API_RESPONSE]: """ Retrieve a document from the collection by its path. Args: path (str, optional): The path of the document to retrieve. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The retrieved document. """ return self._get(path=path, timeout_info=timeout_info) def find( self, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find documents in the collection that match the given filter. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return matching documents. options (dict, optional): Additional options for the query. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The query response containing matched documents. """ json_query = make_payload( top_level="find", filter=filter, projection=projection, options=options, sort=sort, ) response = self._post(document=json_query, timeout_info=timeout_info) return response def vector_find( self, vector: List[float], *, limit: int, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, include_similarity: bool = True, timeout_info: TimeoutInfoWideType = None, ) -> List[API_DOC]: """ Perform a vector-based search in the collection. Args: vector (list): The vector to search with. limit (int): The maximum number of documents to return. filter (dict, optional): Criteria to filter documents. fields (list, optional): Specifies the fields to return. include_similarity (bool, optional): Whether to include similarity score in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: list: A list of documents matching the vector search criteria. """ # Must pass a limit if not limit: raise ValueError("Must pass a limit") # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( convert_vector_to_floats(vector), fields=fields, ) # Call the underlying find() method to search raw_find_result = self.find( filter=filter, projection=projection, sort=sort, options={ "limit": limit, "includeSimilarity": include_similarity, }, timeout_info=timeout_info, ) return cast(List[API_DOC], raw_find_result["data"]["documents"]) @staticmethod def paginate( *, request_method: PaginableRequestMethod, options: Optional[Dict[str, Any]], prefetched: Optional[int] = None, ) -> Generator[API_DOC, None, None]: """ Generate paginated results for a given database query method. Args: request_method (function): The database query method to paginate. options (dict, optional): Options for the database query. prefetched (int, optional): Number of pre-fetched documents. Yields: dict: The next document in the paginated result set. """ _options = options or {} response0 = request_method(options=_options) next_page_state = response0["data"]["nextPageState"] options0 = _options if next_page_state is not None and prefetched: def queued_paginate( queue: Queue[Optional[API_DOC]], request_method: PaginableRequestMethod, options: Optional[Dict[str, Any]], ) -> None: try: for row in AstraDBCollection.paginate( request_method=request_method, options=options ): queue.put(row) finally: queue.put(None) queue: Queue[Optional[API_DOC]] = Queue(prefetched) options1 = {**options0, **{"pageState": next_page_state}} t = threading.Thread( target=queued_paginate, args=(queue, request_method, options1) ) t.start() for document in response0["data"]["documents"]: yield document doc = queue.get() while doc is not None: yield doc doc = queue.get() t.join() else: for document in response0["data"]["documents"]: yield document while next_page_state is not None and not prefetched: options1 = {**options0, **{"pageState": next_page_state}} response1 = request_method(options=options1) for document in response1["data"]["documents"]: yield document next_page_state = response1["data"]["nextPageState"] def paginated_find( self, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, prefetched: Optional[int] = None, timeout_info: TimeoutInfoWideType = None, ) -> Iterator[API_DOC]: """ Perform a paginated search in the collection. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return matching documents. options (dict, optional): Additional options for the query. prefetched (int, optional): Number of pre-fetched documents. timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This is a paginated method, that issues several requests as it needs more data. This parameter controls a single request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: generator: A generator yielding documents in the paginated result set. """ partialed_find = partial( self.find, filter=filter, projection=projection, sort=sort, timeout_info=timeout_info, ) return self.paginate( request_method=partialed_find, options=options, prefetched=prefetched, ) def pop( self, filter: Dict[str, Any], pop: Dict[str, Any], options: Dict[str, Any], timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Pop the last data in the tags array Args: filter (dict): Criteria to identify the document to update. pop (dict): The pop to apply to the tags. options (dict): Additional options for the update operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The original document before the update. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update={"$pop": pop}, options=options, ) response = self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response def push( self, filter: Dict[str, Any], push: Dict[str, Any], options: Dict[str, Any], timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Push new data to the tags array Args: filter (dict): Criteria to identify the document to update. push (dict): The push to apply to the tags. options (dict): Additional options for the update operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the update operation. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update={"$push": push}, options=options, ) response = self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response def find_one_and_replace( self, replacement: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and replace it. Args: replacement (dict): The new document to replace the existing one. filter (dict, optional): Criteria to filter documents. sort (dict, optional): Specifies the order in which to find the document. options (dict, optional): Additional options for the operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and replace operation. """ json_query = make_payload( top_level="findOneAndReplace", filter=filter, projection=projection, replacement=replacement, options=options, sort=sort, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def vector_find_one_and_replace( self, vector: List[float], replacement: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search and replace the first matched document. Args: vector (dict): The vector to search with. replacement (dict): The new document to replace the existing one. filter (dict, optional): Criteria to filter documents. fields (list, optional): Specifies the fields to return in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: either the matched document or None if nothing found """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( convert_vector_to_floats(vector), fields=fields, ) # Call the underlying find() method to search raw_find_result = self.find_one_and_replace( replacement=replacement, filter=filter, projection=projection, sort=sort, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) def find_one_and_update( self, update: Dict[str, Any], sort: Optional[Dict[str, Any]] = {}, filter: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and update it. Args: update (dict): The update to apply to the document. sort (dict, optional): Specifies the order in which to find the document. filter (dict, optional): Criteria to filter documents. options (dict, optional): Additional options for the operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and update operation. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update=update, options=options, sort=sort, projection=projection, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def vector_find_one_and_update( self, vector: List[float], update: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search and update the first matched document. Args: vector (list): The vector to search with. update (dict): The update to apply to the matched document. filter (dict, optional): Criteria to filter documents before applying the vector search. fields (list, optional): Specifies the fields to return in the updated document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: The result of the vector-based find and update operation, or None if nothing found """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( convert_vector_to_floats(vector), fields=fields, ) # Call the underlying find() method to search raw_find_result = self.find_one_and_update( update=update, filter=filter, sort=sort, projection=projection, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) def find_one_and_delete( self, sort: Optional[Dict[str, Any]] = {}, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and delete it. Args: sort (dict, optional): Specifies the order in which to find the document. filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and delete operation. """ json_query = make_payload( top_level="findOneAndDelete", filter=filter, sort=sort, projection=projection, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def count_documents( self, filter: Dict[str, Any] = {}, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Count documents matching a given predicate (expressed as filter). Args: filter (dict, defaults to {}): Criteria to filter documents. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: the response, either {"status": {"count": <NUMBER> }} or {"errors": [...]} """ json_query = make_payload( top_level="countDocuments", filter=filter, ) response = self._post(document=json_query, timeout_info=timeout_info) return response def find_one( self, filter: Optional[Dict[str, Any]] = {}, projection: Optional[Dict[str, Any]] = {}, sort: Optional[Dict[str, Any]] = {}, options: Optional[Dict[str, Any]] = {}, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document in the collection. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return the document. options (dict, optional): Additional options for the query. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: the response, either {"data": {"document": <DOCUMENT> }} or {"data": {"document": None}} depending on whether a matching document is found or not. """ json_query = make_payload( top_level="findOne", filter=filter, projection=projection, options=options, sort=sort, ) response = self._post(document=json_query, timeout_info=timeout_info) return response def vector_find_one( self, vector: List[float], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, include_similarity: bool = True, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search to find a single document in the collection. Args: vector (list): The vector to search with. filter (dict, optional): Additional criteria to filter documents. fields (list, optional): Specifies the fields to return in the result. include_similarity (bool, optional): Whether to include similarity score in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: The found document or None if no matching document is found. """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( convert_vector_to_floats(vector), fields=fields, ) # Call the underlying find() method to search raw_find_result = self.find_one( filter=filter, projection=projection, sort=sort, options={"includeSimilarity": include_similarity}, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) def insert_one( self, document: API_DOC, failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Insert a single document into the collection. Args: document (dict): The document to insert. failures_allowed (bool): Whether to allow failures in the insert operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the insert operation. """ json_query = make_payload(top_level="insertOne", document=document) response = self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, skip_error_check=failures_allowed, timeout_info=timeout_info, ) return response def insert_many( self, documents: List[API_DOC], options: Optional[Dict[str, Any]] = None, partial_failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Insert multiple documents into the collection. Args: documents (list): A list of documents to insert. options (dict, optional): Additional options for the insert operation. partial_failures_allowed (bool, optional): Whether to allow partial failures through the insertion (i.e. on some documents). timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the insert operation. """ json_query = make_payload( top_level="insertMany", documents=documents, options=options ) # Send the data response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, skip_error_check=partial_failures_allowed, timeout_info=timeout_info, ) return response def chunked_insert_many( self, documents: List[API_DOC], options: Optional[Dict[str, Any]] = None, partial_failures_allowed: bool = False, chunk_size: int = MAX_INSERT_NUM_DOCUMENTS, concurrency: int = 1, timeout_info: TimeoutInfoWideType = None, ) -> List[Union[API_RESPONSE, Exception]]: """ Insert multiple documents into the collection, handling chunking and optionally with concurrent insertions. Args: documents (list): A list of documents to insert. options (dict, optional): Additional options for the insert operation. partial_failures_allowed (bool, optional): Whether to allow partial failures in the chunk. Should be used combined with options={"ordered": False} in most cases. chunk_size (int, optional): Override the default insertion chunk size. concurrency (int, optional): The number of concurrent chunk insertions. Default is no concurrency. timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This method runs a number of HTTP requests as it works on chunked data. The timeout refers to each individual such request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: list: The responses from the database after the chunked insert operation. This is a list of individual responses from the API: the caller will need to inspect them all, e.g. to collate the inserted IDs. """ results: List[Union[API_RESPONSE, Exception]] = [] # Raise a warning if ordered and concurrency if options and options.get("ordered") is True and concurrency > 1: logger.warning( "Using ordered insert with concurrency may lead to unexpected results." ) # If we have concurrency as 1, don't use a thread pool if concurrency == 1: # Split the documents into chunks for i in range(0, len(documents), chunk_size): try: results.append( self.insert_many( documents[i : i + chunk_size], options, partial_failures_allowed, timeout_info=timeout_info, ) ) except APIRequestError as e: if partial_failures_allowed: results.append(e) else: raise e return results # Perform the bulk insert with concurrency otherwise with ThreadPoolExecutor(max_workers=concurrency) as executor: # Submit the jobs futures = [ executor.submit( self.insert_many, documents[i : i + chunk_size], options, partial_failures_allowed, timeout_info=timeout_info, ) for i in range(0, len(documents), chunk_size) ] # Collect the results for future in futures: try: results.append(future.result()) except APIRequestError as e: if partial_failures_allowed: results.append(e) else: raise e return results def update_one( self, filter: Dict[str, Any], update: Dict[str, Any], sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Update a single document in the collection. Args: filter (dict): Criteria to identify the document to update. update (dict): The update to apply to the document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = make_payload( top_level="updateOne", filter=filter, update=update, sort=sort, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def update_many( self, filter: Dict[str, Any], update: Dict[str, Any], options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Updates multiple documents in the collection. Args: filter (dict): Criteria to identify the document to update. update (dict): The update to apply to the document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = make_payload( top_level="updateMany", filter=filter, update=update, options=options, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def replace( self, path: str, document: API_DOC, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Replace a document in the collection. Args: path (str): The path to the document to replace. document (dict): The new document to replace the existing one. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the replace operation. """ return self._put(path=path, document=document, timeout_info=timeout_info) @deprecation.deprecated( # type: ignore deprecated_in="0.7.0", removed_in="1.0.0", current_version=__version__, details="Use the 'delete_one' method instead", ) def delete(self, id: str, timeout_info: TimeoutInfoWideType = None) -> API_RESPONSE: return self.delete_one(id, timeout_info=timeout_info) def delete_one( self, id: str, sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete a single document from the collection based on its ID. Args: id (str): The ID of the document to delete. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = make_payload( top_level="deleteOne", filter={"_id": id}, sort=sort, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def delete_one_by_predicate( self, filter: Dict[str, Any], sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete a single document from the collection based on a filter clause Args: filter: any filter dictionary timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = make_payload( top_level="deleteOne", filter=filter, sort=sort, ) response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response def delete_many( self, filter: Dict[str, Any], skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete many documents from the collection based on a filter condition Args: filter (dict): Criteria to identify the documents to delete. skip_error_check (bool): whether to ignore the check for API error and return the response untouched. Default is False. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = { "deleteMany": { "filter": filter, } } response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, skip_error_check=skip_error_check, timeout_info=timeout_info, ) return response def chunked_delete_many( self, filter: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> List[API_RESPONSE]: """ Delete many documents from the collection based on a filter condition, chaining several API calls until exhaustion of the documents to delete. Args: filter (dict): Criteria to identify the documents to delete. timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This method runs a number of HTTP requests as it works on a pagination basis. The timeout refers to each individual such request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: List[dict]: The responses from the database from all the calls """ responses = [] must_proceed = True while must_proceed: dm_response = self.delete_many(filter=filter, timeout_info=timeout_info) responses.append(dm_response) must_proceed = dm_response.get("status", {}).get("moreData", False) return responses def clear(self, timeout_info: TimeoutInfoWideType = None) -> API_RESPONSE: """ Clear the collection, deleting all documents Args: timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database. """ clear_response = self.delete_many(filter={}, timeout_info=timeout_info) if clear_response.get("status", {}).get("deletedCount") != -1: raise ValueError( f"Could not issue a clear-collection API command (response: {json.dumps(clear_response)})." ) return clear_response def delete_subdocument( self, id: str, subdoc: str, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Delete a subdocument or field from a document in the collection. Args: id (str): The ID of the document containing the subdocument. subdoc (str): The key of the subdocument or field to remove. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = { "findOneAndUpdate": { "filter": {"_id": id}, "update": {"$unset": {subdoc: ""}}, } } response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response @deprecation.deprecated( # type: ignore deprecated_in="0.7.0", removed_in="1.0.0", current_version=__version__, details="Use the 'upsert_one' method instead", ) def upsert( self, document: API_DOC, timeout_info: TimeoutInfoWideType = None ) -> str: return self.upsert_one(document, timeout_info=timeout_info) def upsert_one( self, document: API_DOC, timeout_info: TimeoutInfoWideType = None ) -> str: """ Emulate an upsert operation for a single document in the collection. This method attempts to insert the document. If a document with the same _id exists, it updates the existing document. Args: document (dict): The document to insert or update. timeout_info: a float, or a TimeoutInfo dict, for the HTTP requests. This method may issue one or two requests, depending on what is detected on DB. This timeout controls each HTTP request individually. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: str: The _id of the inserted or updated document. """ # Build the payload for the insert attempt result = self.insert_one( document, failures_allowed=True, timeout_info=timeout_info ) # If the call failed because of preexisting doc, then we replace it if "errors" in result: if ( "errorCode" in result["errors"][0] and result["errors"][0]["errorCode"] == "DOCUMENT_ALREADY_EXISTS" ): # Now we attempt the update result = self.find_one_and_replace( replacement=document, filter={"_id": document["_id"]}, timeout_info=timeout_info, ) upserted_id = cast(str, result["data"]["document"]["_id"]) else: raise ValueError(result) else: if result.get("status", {}).get("insertedIds", []): upserted_id = cast(str, result["status"]["insertedIds"][0]) else: raise ValueError("Unexplained empty insertedIds from API") return upserted_id def upsert_many( self, documents: list[API_DOC], concurrency: int = 1, partial_failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> List[Union[str, Exception]]: """ Emulate an upsert operation for multiple documents in the collection. This method attempts to insert the documents. If a document with the same _id exists, it updates the existing document. Args: documents (List[dict]): The documents to insert or update. concurrency (int, optional): The number of concurrent upserts. partial_failures_allowed (bool, optional): Whether to allow partial failures in the batch. timeout_info: a float, or a TimeoutInfo dict, for each HTTP request. This method issues a separate HTTP request for each document to insert: the timeout controls each such request individually. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: List[Union[str, Exception]]: A list of "_id"s of the inserted or updated documents. """ results: List[Union[str, Exception]] = [] # If concurrency is 1, no need for thread pool if concurrency == 1: for document in documents: try: results.append(self.upsert_one(document, timeout_info=timeout_info)) except Exception as e: results.append(e) return results # Perform the bulk upsert with concurrency with ThreadPoolExecutor(max_workers=concurrency) as executor: # Submit the jobs futures = [ executor.submit(self.upsert, document, timeout_info=timeout_info) for document in documents ] # Collect the results for future in futures: try: results.append(future.result()) except Exception as e: if partial_failures_allowed: results.append(e) else: raise e return results class AsyncAstraDBCollection: def __init__( self, collection_name: str, astra_db: Optional[AsyncAstraDB] = None, token: Optional[str] = None, api_endpoint: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, additional_headers: Dict[str, str] = {}, ) -> None: """ Initialize an AstraDBCollection instance. Args: collection_name (str): The name of the collection. astra_db (AstraDB, optional): An instance of Astra DB. token (str, optional): Authentication token for Astra DB. api_endpoint (str, optional): API endpoint URL. namespace (str, optional): Namespace for the database. caller_name (str, optional): identity of the caller ("my_framework") If passing a client, its caller is used as fallback caller_version (str, optional): version of the caller code ("1.0.3") If passing a client, its caller is used as fallback additional_headers (Dict[str, str]): any further set of headers, in the form of key-value pairs, to be passed with the HTTP requests by this collection instance. """ # Check for presence of the Astra DB object if astra_db is None: if token is None or api_endpoint is None: raise AssertionError("Must provide token and api_endpoint") astra_db = AsyncAstraDB( token=token, api_endpoint=api_endpoint, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ) else: # if astra_db passed, copy and apply possible overrides astra_db = astra_db.copy( token=token, api_endpoint=api_endpoint, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ) # Set the remaining instance attributes self.astra_db: AsyncAstraDB = astra_db self.caller_name: Optional[str] = self.astra_db.caller_name self.caller_version: Optional[str] = self.astra_db.caller_version self.additional_headers = additional_headers self.client = astra_db.client self.collection_name = collection_name self.base_path: str = f"{self.astra_db.base_path}/{self.collection_name}" def __repr__(self) -> str: return f'AsyncAstraDBCollection[astra_db="{self.astra_db}", collection_name="{self.collection_name}"]' def __eq__(self, other: Any) -> bool: if isinstance(other, AsyncAstraDBCollection): return all( [ self.collection_name == other.collection_name, self.astra_db == other.astra_db, self.caller_name == other.caller_name, self.caller_version == other.caller_version, self.additional_headers == other.additional_headers, ] ) else: return False def copy( self, *, collection_name: Optional[str] = None, token: Optional[str] = None, api_endpoint: Optional[str] = None, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, additional_headers: Optional[Dict[str, str]] = None, ) -> AsyncAstraDBCollection: return AsyncAstraDBCollection( collection_name=collection_name or self.collection_name, astra_db=self.astra_db.copy( token=token, api_endpoint=api_endpoint, api_path=api_path, api_version=api_version, namespace=namespace, caller_name=caller_name, caller_version=caller_version, ), caller_name=caller_name or self.caller_name, caller_version=caller_version or self.caller_version, additional_headers=additional_headers or self.additional_headers, ) def set_caller( self, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: self.astra_db.set_caller( caller_name=caller_name, caller_version=caller_version, ) self.caller_name = caller_name self.caller_version = caller_version def to_sync(self) -> AstraDBCollection: return AstraDBCollection( collection_name=self.collection_name, astra_db=self.astra_db.to_sync(), caller_name=self.caller_name, caller_version=self.caller_version, additional_headers=self.additional_headers, ) async def _request( self, method: str = http_methods.POST, path: Optional[str] = None, json_data: Optional[Dict[str, Any]] = None, url_params: Optional[Dict[str, Any]] = None, skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, **kwargs: Any, ) -> API_RESPONSE: adirect_response = await async_api_request( client=self.client, base_url=self.astra_db.base_url, auth_header=DEFAULT_AUTH_HEADER, token=self.astra_db.token, method=method, json_data=normalize_for_api(json_data), url_params=url_params, path=path, skip_error_check=skip_error_check, caller_name=self.caller_name, caller_version=self.caller_version, timeout=to_httpx_timeout(timeout_info), additional_headers=self.additional_headers, ) response = restore_from_api(adirect_response) return response async def post_raw_request( self, body: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: return await self._request( method=http_methods.POST, path=self.base_path, json_data=body, timeout_info=timeout_info, ) async def _get( self, path: Optional[str] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Optional[API_RESPONSE]: full_path = f"{self.base_path}/{path}" if path else self.base_path response = await self._request( method=http_methods.GET, path=full_path, url_params=options, timeout_info=timeout_info, ) if isinstance(response, dict): return response return None async def _put( self, path: Optional[str] = None, document: Optional[API_RESPONSE] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: full_path = f"{self.base_path}/{path}" if path else self.base_path response = await self._request( method=http_methods.PUT, path=full_path, json_data=document, timeout_info=timeout_info, ) return response async def _post( self, path: Optional[str] = None, document: Optional[API_DOC] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: full_path = f"{self.base_path}/{path}" if path else self.base_path response = await self._request( method=http_methods.POST, path=full_path, json_data=document, timeout_info=timeout_info, ) return response def _recast_as_sort_projection( self, vector: List[float], fields: Optional[List[str]] = None ) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: """ Given a vector and optionally a list of fields, reformulate them as a sort, projection pair for regular 'find'-like API calls (with basic validation as well). """ # Must pass a vector if not vector: raise ValueError("Must pass a vector") # Edge case for field selection if fields and "$similarity" in fields: raise ValueError("Please use the `include_similarity` parameter") # Build the new vector parameter sort: Dict[str, Any] = {"$vector": vector} # Build the new fields parameter # Note: do not leave projection={}, make it None # (or it will devour $similarity away in the API response) if fields is not None and len(fields) > 0: projection = {f: 1 for f in fields} else: projection = None return sort, projection async def get( self, path: Optional[str] = None, timeout_info: TimeoutInfoWideType = None ) -> Optional[API_RESPONSE]: """ Retrieve a document from the collection by its path. Args: path (str, optional): The path of the document to retrieve. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The retrieved document. """ return await self._get(path=path, timeout_info=timeout_info) async def find( self, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find documents in the collection that match the given filter. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return matching documents. options (dict, optional): Additional options for the query. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The query response containing matched documents. """ json_query = make_payload( top_level="find", filter=filter, projection=projection, options=options, sort=sort, ) response = await self._post(document=json_query, timeout_info=timeout_info) return response async def vector_find( self, vector: List[float], *, limit: int, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, include_similarity: bool = True, timeout_info: TimeoutInfoWideType = None, ) -> List[API_DOC]: """ Perform a vector-based search in the collection. Args: vector (list): The vector to search with. limit (int): The maximum number of documents to return. filter (dict, optional): Criteria to filter documents. fields (list, optional): Specifies the fields to return. include_similarity (bool, optional): Whether to include similarity score in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: list: A list of documents matching the vector search criteria. """ # Must pass a limit if not limit: raise ValueError("Must pass a limit") # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( vector, fields=fields, ) # Call the underlying find() method to search raw_find_result = await self.find( filter=filter, projection=projection, sort=sort, options={ "limit": limit, "includeSimilarity": include_similarity, }, timeout_info=timeout_info, ) return cast(List[API_DOC], raw_find_result["data"]["documents"]) @staticmethod async def paginate( *, request_method: AsyncPaginableRequestMethod, options: Optional[Dict[str, Any]], prefetched: Optional[int] = None, timeout_info: TimeoutInfoWideType = None, ) -> AsyncGenerator[API_DOC, None]: """ Generate paginated results for a given database query method. Args: request_method (function): The database query method to paginate. options (dict, optional): Options for the database query. prefetched (int, optional): Number of pre-fetched documents. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Yields: dict: The next document in the paginated result set. """ _options = options or {} response0 = await request_method(options=_options) next_page_state = response0["data"]["nextPageState"] options0 = _options if next_page_state is not None and prefetched: async def queued_paginate( queue: asyncio.Queue[Optional[API_DOC]], request_method: AsyncPaginableRequestMethod, options: Optional[Dict[str, Any]], ) -> None: try: async for doc in AsyncAstraDBCollection.paginate( request_method=request_method, options=options ): await queue.put(doc) finally: await queue.put(None) queue: asyncio.Queue[Optional[API_DOC]] = asyncio.Queue(prefetched) options1 = {**options0, **{"pageState": next_page_state}} asyncio.create_task(queued_paginate(queue, request_method, options1)) for document in response0["data"]["documents"]: yield document doc = await queue.get() while doc is not None: yield doc doc = await queue.get() else: for document in response0["data"]["documents"]: yield document while next_page_state is not None: options1 = {**options0, **{"pageState": next_page_state}} response1 = await request_method(options=options1) for document in response1["data"]["documents"]: yield document next_page_state = response1["data"]["nextPageState"] def paginated_find( self, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, prefetched: Optional[int] = None, timeout_info: TimeoutInfoWideType = None, ) -> AsyncIterator[API_DOC]: """ Perform a paginated search in the collection. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return matching documents. options (dict, optional): Additional options for the query. prefetched (int, optional): Number of pre-fetched documents timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This is a paginated method, that issues several requests as it needs more data. This parameter controls a single request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: generator: A generator yielding documents in the paginated result set. """ partialed_find = partial( self.find, filter=filter, projection=projection, sort=sort, timeout_info=timeout_info, ) return self.paginate( request_method=partialed_find, options=options, prefetched=prefetched, ) async def pop( self, filter: Dict[str, Any], pop: Dict[str, Any], options: Dict[str, Any], timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Pop the last data in the tags array Args: filter (dict): Criteria to identify the document to update. pop (dict): The pop to apply to the tags. options (dict): Additional options for the update operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The original document before the update. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update={"$pop": pop}, options=options, ) response = await self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response async def push( self, filter: Dict[str, Any], push: Dict[str, Any], options: Dict[str, Any], timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Push new data to the tags array Args: filter (dict): Criteria to identify the document to update. push (dict): The push to apply to the tags. options (dict): Additional options for the update operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the update operation. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update={"$push": push}, options=options, ) response = await self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response async def find_one_and_replace( self, replacement: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, sort: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and replace it. Args: replacement (dict): The new document to replace the existing one. filter (dict, optional): Criteria to filter documents. sort (dict, optional): Specifies the order in which to find the document. options (dict, optional): Additional options for the operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and replace operation. """ json_query = make_payload( top_level="findOneAndReplace", filter=filter, projection=projection, replacement=replacement, options=options, sort=sort, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def vector_find_one_and_replace( self, vector: List[float], replacement: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search and replace the first matched document. Args: vector (dict): The vector to search with. replacement (dict): The new document to replace the existing one. filter (dict, optional): Criteria to filter documents. fields (list, optional): Specifies the fields to return in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: either the matched document or None if nothing found """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( vector, fields=fields, ) # Call the underlying find() method to search raw_find_result = await self.find_one_and_replace( replacement=replacement, filter=filter, projection=projection, sort=sort, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) async def find_one_and_update( self, update: Dict[str, Any], sort: Optional[Dict[str, Any]] = {}, filter: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and update it. Args: sort (dict, optional): Specifies the order in which to find the document. update (dict): The update to apply to the document. filter (dict, optional): Criteria to filter documents. options (dict, optional): Additional options for the operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and update operation. """ json_query = make_payload( top_level="findOneAndUpdate", filter=filter, update=update, options=options, sort=sort, projection=projection, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def vector_find_one_and_update( self, vector: List[float], update: Dict[str, Any], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search and update the first matched document. Args: vector (list): The vector to search with. update (dict): The update to apply to the matched document. filter (dict, optional): Criteria to filter documents before applying the vector search. fields (list, optional): Specifies the fields to return in the updated document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: The result of the vector-based find and update operation, or None if nothing found """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( vector, fields=fields, ) # Call the underlying find() method to search raw_find_result = await self.find_one_and_update( update=update, filter=filter, sort=sort, projection=projection, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) async def find_one_and_delete( self, sort: Optional[Dict[str, Any]] = {}, filter: Optional[Dict[str, Any]] = None, projection: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document and delete it. Args: sort (dict, optional): Specifies the order in which to find the document. filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The result of the find and delete operation. """ json_query = make_payload( top_level="findOneAndDelete", filter=filter, sort=sort, projection=projection, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def count_documents( self, filter: Dict[str, Any] = {}, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Count documents matching a given predicate (expressed as filter). Args: filter (dict, defaults to {}): Criteria to filter documents. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: the response, either {"status": {"count": <NUMBER> }} or {"errors": [...]} """ json_query = make_payload( top_level="countDocuments", filter=filter, ) response = await self._post(document=json_query, timeout_info=timeout_info) return response async def find_one( self, filter: Optional[Dict[str, Any]] = {}, projection: Optional[Dict[str, Any]] = {}, sort: Optional[Dict[str, Any]] = {}, options: Optional[Dict[str, Any]] = {}, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Find a single document in the collection. Args: filter (dict, optional): Criteria to filter documents. projection (dict, optional): Specifies the fields to return. sort (dict, optional): Specifies the order in which to return the document. options (dict, optional): Additional options for the query. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: the response, either {"data": {"document": <DOCUMENT> }} or {"data": {"document": None}} depending on whether a matching document is found or not. """ json_query = make_payload( top_level="findOne", filter=filter, projection=projection, options=options, sort=sort, ) response = await self._post(document=json_query, timeout_info=timeout_info) return response async def vector_find_one( self, vector: List[float], *, filter: Optional[Dict[str, Any]] = None, fields: Optional[List[str]] = None, include_similarity: bool = True, timeout_info: TimeoutInfoWideType = None, ) -> Union[API_DOC, None]: """ Perform a vector-based search to find a single document in the collection. Args: vector (list): The vector to search with. filter (dict, optional): Additional criteria to filter documents. fields (list, optional): Specifies the fields to return in the result. include_similarity (bool, optional): Whether to include similarity score in the result. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict or None: The found document or None if no matching document is found. """ # Pre-process the included arguments sort, projection = self._recast_as_sort_projection( vector, fields=fields, ) # Call the underlying find() method to search raw_find_result = await self.find_one( filter=filter, projection=projection, sort=sort, options={"includeSimilarity": include_similarity}, timeout_info=timeout_info, ) return cast(Union[API_DOC, None], raw_find_result["data"]["document"]) async def insert_one( self, document: API_DOC, failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Insert a single document into the collection. Args: document (dict): The document to insert. failures_allowed (bool): Whether to allow failures in the insert operation. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the insert operation. """ json_query = make_payload(top_level="insertOne", document=document) response = await self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, skip_error_check=failures_allowed, timeout_info=timeout_info, ) return response async def insert_many( self, documents: List[API_DOC], options: Optional[Dict[str, Any]] = None, partial_failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Insert multiple documents into the collection. Args: documents (list): A list of documents to insert. options (dict, optional): Additional options for the insert operation. partial_failures_allowed (bool, optional): Whether to allow partial failures through the insertion (i.e. on some documents). timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the insert operation. """ json_query = make_payload( top_level="insertMany", documents=documents, options=options ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, skip_error_check=partial_failures_allowed, timeout_info=timeout_info, ) return response async def chunked_insert_many( self, documents: List[API_DOC], options: Optional[Dict[str, Any]] = None, partial_failures_allowed: bool = False, chunk_size: int = MAX_INSERT_NUM_DOCUMENTS, concurrency: int = 1, timeout_info: TimeoutInfoWideType = None, ) -> List[Union[API_RESPONSE, Exception]]: """ Insert multiple documents into the collection, handling chunking and optionally with concurrent insertions. Args: documents (list): A list of documents to insert. options (dict, optional): Additional options for the insert operation. partial_failures_allowed (bool, optional): Whether to allow partial failures in the chunk. Should be used combined with options={"ordered": False} in most cases. chunk_size (int, optional): Override the default insertion chunk size. concurrency (int, optional): The number of concurrent chunk insertions. Default is no concurrency. timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This method runs a number of HTTP requests as it works on chunked data. The timeout refers to each individual such request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: list: The responses from the database after the chunked insert operation. This is a list of individual responses from the API: the caller will need to inspect them all, e.g. to collate the inserted IDs. """ sem = asyncio.Semaphore(concurrency) async def concurrent_insert_many( docs: List[API_DOC], index: int, partial_failures_allowed: bool, ) -> Union[API_RESPONSE, Exception]: async with sem: logger.debug(f"Processing chunk #{index + 1} of size {len(docs)}") try: return await self.insert_many( documents=docs, options=options, partial_failures_allowed=partial_failures_allowed, timeout_info=timeout_info, ) except APIRequestError as e: if partial_failures_allowed: return e else: raise e if concurrency > 1: tasks = [ asyncio.create_task( concurrent_insert_many( documents[i : i + chunk_size], i, partial_failures_allowed ) ) for i in range(0, len(documents), chunk_size) ] results = await asyncio.gather(*tasks, return_exceptions=False) else: # this ensures the expectation of # "sequential strictly obeys fail-fast if ordered and concurrency==1" results = [ await concurrent_insert_many( documents[i : i + chunk_size], i, partial_failures_allowed ) for i in range(0, len(documents), chunk_size) ] return results async def update_one( self, filter: Dict[str, Any], update: Dict[str, Any], sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Update a single document in the collection. Args: filter (dict): Criteria to identify the document to update. update (dict): The update to apply to the document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = make_payload( top_level="updateOne", filter=filter, update=update, sort=sort, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def update_many( self, filter: Dict[str, Any], update: Dict[str, Any], options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Updates multiple documents in the collection. Args: filter (dict): Criteria to identify the document to update. update (dict): The update to apply to the document. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = make_payload( top_level="updateMany", filter=filter, update=update, options=options, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def replace( self, path: str, document: API_DOC, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Replace a document in the collection. Args: path (str): The path to the document to replace. document (dict): The new document to replace the existing one. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the replace operation. """ return await self._put(path=path, document=document, timeout_info=timeout_info) async def delete_one( self, id: str, sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete a single document from the collection based on its ID. Args: id (str): The ID of the document to delete. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = make_payload( top_level="deleteOne", filter={"_id": id}, sort=sort, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def delete_one_by_predicate( self, filter: Dict[str, Any], sort: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete a single document from the collection based on a filter clause Args: filter: any filter dictionary timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = make_payload( top_level="deleteOne", filter=filter, sort=sort, ) response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response async def delete_many( self, filter: Dict[str, Any], skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Delete many documents from the collection based on a filter condition Args: filter (dict): Criteria to identify the documents to delete. skip_error_check (bool): whether to ignore the check for API error and return the response untouched. Default is False. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the delete operation. """ json_query = { "deleteMany": { "filter": filter, } } response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, skip_error_check=skip_error_check, timeout_info=timeout_info, ) return response async def chunked_delete_many( self, filter: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> List[API_RESPONSE]: """ Delete many documents from the collection based on a filter condition, chaining several API calls until exhaustion of the documents to delete. Args: filter (dict): Criteria to identify the documents to delete. timeout_info: a float, or a TimeoutInfo dict, for each single HTTP request. This method runs a number of HTTP requests as it works on a pagination basis. The timeout refers to each individual such request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: List[dict]: The responses from the database from all the calls """ responses = [] must_proceed = True while must_proceed: dm_response = await self.delete_many( filter=filter, timeout_info=timeout_info ) responses.append(dm_response) must_proceed = dm_response.get("status", {}).get("moreData", False) return responses async def clear(self, timeout_info: TimeoutInfoWideType = None) -> API_RESPONSE: """ Clear the collection, deleting all documents Args: timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database. """ clear_response = await self.delete_many(filter={}, timeout_info=timeout_info) if clear_response.get("status", {}).get("deletedCount") != -1: raise ValueError( f"Could not issue a clear-collection API command (response: {json.dumps(clear_response)})." ) return clear_response async def delete_subdocument( self, id: str, subdoc: str, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Delete a subdocument or field from a document in the collection. Args: id (str): The ID of the document containing the subdocument. subdoc (str): The key of the subdocument or field to remove. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database after the update operation. """ json_query = { "findOneAndUpdate": { "filter": {"_id": id}, "update": {"$unset": {subdoc: ""}}, } } response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data=json_query, timeout_info=timeout_info, ) return response @deprecation.deprecated( # type: ignore deprecated_in="0.7.0", removed_in="1.0.0", current_version=__version__, details="Use the 'upsert_one' method instead", ) async def upsert( self, document: API_DOC, timeout_info: TimeoutInfoWideType = None ) -> str: return await self.upsert_one(document, timeout_info=timeout_info) async def upsert_one( self, document: API_DOC, timeout_info: TimeoutInfoWideType = None, ) -> str: """ Emulate an upsert operation for a single document in the collection. This method attempts to insert the document. If a document with the same _id exists, it updates the existing document. Args: document (dict): The document to insert or update. timeout_info: a float, or a TimeoutInfo dict, for the HTTP requests. This method may issue one or two requests, depending on what is detected on DB. This timeout controls each HTTP request individually. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: str: The _id of the inserted or updated document. """ # Build the payload for the insert attempt result = await self.insert_one( document, failures_allowed=True, timeout_info=timeout_info ) # If the call failed because of preexisting doc, then we replace it if "errors" in result: if ( "errorCode" in result["errors"][0] and result["errors"][0]["errorCode"] == "DOCUMENT_ALREADY_EXISTS" ): # Now we attempt the update result = await self.find_one_and_replace( replacement=document, filter={"_id": document["_id"]}, timeout_info=timeout_info, ) upserted_id = cast(str, result["data"]["document"]["_id"]) else: raise ValueError(result) else: if result.get("status", {}).get("insertedIds", []): upserted_id = cast(str, result["status"]["insertedIds"][0]) else: raise ValueError("Unexplained empty insertedIds from API") return upserted_id async def upsert_many( self, documents: list[API_DOC], concurrency: int = 1, partial_failures_allowed: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> List[Union[str, Exception]]: """ Emulate an upsert operation for multiple documents in the collection. This method attempts to insert the documents. If a document with the same _id exists, it updates the existing document. Args: documents (List[dict]): The documents to insert or update. concurrency (int, optional): The number of concurrent upserts. partial_failures_allowed (bool, optional): Whether to allow partial failures in the batch. timeout_info: a float, or a TimeoutInfo dict, for each HTTP request. This method issues a separate HTTP request for each document to insert: the timeout controls each such request individually. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: List[Union[str, Exception]]: A list of "_id"s of the inserted or updated documents. """ sem = asyncio.Semaphore(concurrency) async def concurrent_upsert(doc: API_DOC) -> str: async with sem: return await self.upsert_one(document=doc, timeout_info=timeout_info) tasks = [asyncio.create_task(concurrent_upsert(doc)) for doc in documents] results = await asyncio.gather( *tasks, return_exceptions=partial_failures_allowed ) for result in results: if isinstance(result, BaseException) and not isinstance(result, Exception): raise result return results # type: ignore class AstraDB: # Initialize the shared httpx client as a class attribute client = httpx.Client() def __init__( self, token: str, api_endpoint: str, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: """ Initialize an Astra DB instance. Args: token (str): Authentication token for Astra DB. api_endpoint (str): API endpoint URL. api_path (str, optional): used to override default URI construction api_version (str, optional): to override default URI construction namespace (str, optional): Namespace for the database. caller_name (str, optional): identity of the caller ("my_framework") caller_version (str, optional): version of the caller code ("1.0.3") """ self.caller_name = caller_name self.caller_version = caller_version if token is None or api_endpoint is None: raise AssertionError("Must provide token and api_endpoint") if namespace is None: logger.info( f"ASTRA_DB_KEYSPACE is not set. Defaulting to '{DEFAULT_KEYSPACE_NAME}'" ) namespace = DEFAULT_KEYSPACE_NAME # Store the API token self.token = token self.api_endpoint = api_endpoint # Set the Base URL for the API calls self.base_url = self.api_endpoint.strip("/") # Set the API version and path from the call self.api_path = (DEFAULT_JSON_API_PATH if api_path is None else api_path).strip( "/" ) self.api_version = ( DEFAULT_JSON_API_VERSION if api_version is None else api_version ).strip("/") # Set the namespace self.namespace = namespace # Finally, construct the full base path base_path_components = [ comp for comp in (self.api_path, self.api_version, self.namespace) if comp != "" ] self.base_path: str = f"/{'/'.join(base_path_components)}" def __repr__(self) -> str: return f'AstraDB[endpoint="{self.base_url}", keyspace="{self.namespace}"]' def __eq__(self, other: Any) -> bool: if isinstance(other, AstraDB): # work on the "normalized" quantities (stripped, etc) return all( [ self.token == other.token, self.base_url == other.base_url, self.base_path == other.base_path, self.caller_name == other.caller_name, self.caller_version == other.caller_version, ] ) else: return False def copy( self, *, token: Optional[str] = None, api_endpoint: Optional[str] = None, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> AstraDB: return AstraDB( token=token or self.token, api_endpoint=api_endpoint or self.base_url, api_path=api_path or self.api_path, api_version=api_version or self.api_version, namespace=namespace or self.namespace, caller_name=caller_name or self.caller_name, caller_version=caller_version or self.caller_version, ) def to_async(self) -> AsyncAstraDB: return AsyncAstraDB( token=self.token, api_endpoint=self.base_url, api_path=self.api_path, api_version=self.api_version, namespace=self.namespace, caller_name=self.caller_name, caller_version=self.caller_version, ) def set_caller( self, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: self.caller_name = caller_name self.caller_version = caller_version def _request( self, method: str = http_methods.POST, path: Optional[str] = None, json_data: Optional[Dict[str, Any]] = None, url_params: Optional[Dict[str, Any]] = None, skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: direct_response = api_request( client=self.client, base_url=self.base_url, auth_header=DEFAULT_AUTH_HEADER, token=self.token, method=method, json_data=normalize_for_api(json_data), url_params=url_params, path=path, skip_error_check=skip_error_check, caller_name=self.caller_name, caller_version=self.caller_version, timeout=to_httpx_timeout(timeout_info), additional_headers={}, ) response = restore_from_api(direct_response) return response def post_raw_request( self, body: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: return self._request( method=http_methods.POST, path=self.base_path, json_data=body, timeout_info=timeout_info, ) def collection(self, collection_name: str) -> AstraDBCollection: """ Retrieve a collection from the database. Args: collection_name (str): The name of the collection to retrieve. Returns: AstraDBCollection: The collection object. """ return AstraDBCollection(collection_name=collection_name, astra_db=self) def get_collections( self, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Retrieve a list of collections from the database. Args: options (dict, optional): Options to get the collection list timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: An object containing the list of collections in the database: {"status": {"collections": [...]}} """ # Parse the options parameter if options is None: options = {} json_query = make_payload( top_level="findCollections", options=options, ) response = self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response def create_collection( self, collection_name: str, *, options: Optional[Dict[str, Any]] = None, dimension: Optional[int] = None, metric: Optional[str] = None, service_dict: Optional[Dict[str, str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> AstraDBCollection: """ Create a new collection in the database. Args: collection_name (str): The name of the collection to create. options (dict, optional): Options for the collection. dimension (int, optional): Dimension for vector search. metric (str, optional): Metric choice for vector search. service_dict (dict, optional): a definition for the $vectorize service timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: AstraDBCollection: The created collection object. """ # options from named params vector_options = { k: v for k, v in { "dimension": dimension, "metric": metric, "service": service_dict, }.items() if v is not None } # overlap/merge with stuff in options.vector dup_params = set((options or {}).get("vector", {}).keys()) & set( vector_options.keys() ) # If any params are duplicated, we raise an error if dup_params: dups = ", ".join(sorted(dup_params)) raise ValueError( f"Parameter(s) {dups} passed both to the method and in the options" ) # Build our options dictionary if we have vector options if vector_options: options = options or {} options["vector"] = { **options.get("vector", {}), **vector_options, } # Build the final json payload jsondata = { k: v for k, v in {"name": collection_name, "options": options}.items() if v is not None } # Make the request to the endpoint self._request( method=http_methods.POST, path=f"{self.base_path}", json_data={"createCollection": jsondata}, timeout_info=timeout_info, ) # Get the instance object as the return of the call return AstraDBCollection(astra_db=self, collection_name=collection_name) def delete_collection( self, collection_name: str, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Delete a collection from the database. Args: collection_name (str): The name of the collection to delete. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database. """ # Make sure we provide a collection name if not collection_name: raise ValueError("Must provide a collection name") response = self._request( method=http_methods.POST, path=f"{self.base_path}", json_data={"deleteCollection": {"name": collection_name}}, timeout_info=timeout_info, ) return response @deprecation.deprecated( # type: ignore deprecated_in="0.7.0", removed_in="1.0.0", current_version=__version__, details="Use the 'AstraDBCollection.clear()' method instead", ) def truncate_collection( self, collection_name: str, timeout_info: TimeoutInfoWideType = None ) -> AstraDBCollection: """ Clear a collection in the database, deleting all stored documents. Args: collection_name (str): The name of the collection to clear. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: collection: an AstraDBCollection instance """ collection = AstraDBCollection( collection_name=collection_name, astra_db=self, ) clear_response = collection.clear(timeout_info=timeout_info) if clear_response.get("status", {}).get("deletedCount") != -1: raise ValueError( f"Could not issue a truncation API command (response: {json.dumps(clear_response)})." ) # return the collection itself return collection class AsyncAstraDB: def __init__( self, token: str, api_endpoint: str, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: """ Initialize an Astra DB instance. Args: token (str): Authentication token for Astra DB. api_endpoint (str): API endpoint URL. api_path (str, optional): used to override default URI construction api_version (str, optional): to override default URI construction namespace (str, optional): Namespace for the database. caller_name (str, optional): identity of the caller ("my_framework") caller_version (str, optional): version of the caller code ("1.0.3") """ self.caller_name = caller_name self.caller_version = caller_version self.client = httpx.AsyncClient() if token is None or api_endpoint is None: raise AssertionError("Must provide token and api_endpoint") if namespace is None: logger.info( f"ASTRA_DB_KEYSPACE is not set. Defaulting to '{DEFAULT_KEYSPACE_NAME}'" ) namespace = DEFAULT_KEYSPACE_NAME # Store the API token self.token = token self.api_endpoint = api_endpoint # Set the Base URL for the API calls self.base_url = self.api_endpoint.strip("/") # Set the API version and path from the call self.api_path = (DEFAULT_JSON_API_PATH if api_path is None else api_path).strip( "/" ) self.api_version = ( DEFAULT_JSON_API_VERSION if api_version is None else api_version ).strip("/") # Set the namespace self.namespace = namespace # Finally, construct the full base path base_path_components = [ comp for comp in (self.api_path, self.api_version, self.namespace) if comp != "" ] self.base_path: str = f"/{'/'.join(base_path_components)}" def __repr__(self) -> str: return f'AsyncAstraDB[endpoint="{self.base_url}", keyspace="{self.namespace}"]' def __eq__(self, other: Any) -> bool: if isinstance(other, AsyncAstraDB): # work on the "normalized" quantities (stripped, etc) return all( [ self.token == other.token, self.base_url == other.base_url, self.base_path == other.base_path, self.caller_name == other.caller_name, self.caller_version == other.caller_version, ] ) else: return False async def __aenter__(self) -> AsyncAstraDB: return self async def __aexit__( self, exc_type: Optional[Type[BaseException]] = None, exc_value: Optional[BaseException] = None, traceback: Optional[TracebackType] = None, ) -> None: await self.client.aclose() def copy( self, *, token: Optional[str] = None, api_endpoint: Optional[str] = None, api_path: Optional[str] = None, api_version: Optional[str] = None, namespace: Optional[str] = None, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> AsyncAstraDB: return AsyncAstraDB( token=token or self.token, api_endpoint=api_endpoint or self.base_url, api_path=api_path or self.api_path, api_version=api_version or self.api_version, namespace=namespace or self.namespace, caller_name=caller_name or self.caller_name, caller_version=caller_version or self.caller_version, ) def to_sync(self) -> AstraDB: return AstraDB( token=self.token, api_endpoint=self.base_url, api_path=self.api_path, api_version=self.api_version, namespace=self.namespace, caller_name=self.caller_name, caller_version=self.caller_version, ) def set_caller( self, caller_name: Optional[str] = None, caller_version: Optional[str] = None, ) -> None: self.caller_name = caller_name self.caller_version = caller_version async def _request( self, method: str = http_methods.POST, path: Optional[str] = None, json_data: Optional[Dict[str, Any]] = None, url_params: Optional[Dict[str, Any]] = None, skip_error_check: bool = False, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: adirect_response = await async_api_request( client=self.client, base_url=self.base_url, auth_header=DEFAULT_AUTH_HEADER, token=self.token, method=method, json_data=normalize_for_api(json_data), url_params=url_params, path=path, skip_error_check=skip_error_check, caller_name=self.caller_name, caller_version=self.caller_version, timeout=to_httpx_timeout(timeout_info), additional_headers={}, ) response = restore_from_api(adirect_response) return response async def post_raw_request( self, body: Dict[str, Any], timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: return await self._request( method=http_methods.POST, path=self.base_path, json_data=body, timeout_info=timeout_info, ) async def collection(self, collection_name: str) -> AsyncAstraDBCollection: """ Retrieve a collection from the database. Args: collection_name (str): The name of the collection to retrieve. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: AstraDBCollection: The collection object. """ return AsyncAstraDBCollection(collection_name=collection_name, astra_db=self) async def get_collections( self, options: Optional[Dict[str, Any]] = None, timeout_info: TimeoutInfoWideType = None, ) -> API_RESPONSE: """ Retrieve a list of collections from the database. Args: options (dict, optional): Options to get the collection list timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: An object containing the list of collections in the database: {"status": {"collections": [...]}} """ # Parse the options parameter if options is None: options = {} json_query = make_payload( top_level="findCollections", options=options, ) response = await self._request( method=http_methods.POST, path=self.base_path, json_data=json_query, timeout_info=timeout_info, ) return response async def create_collection( self, collection_name: str, *, options: Optional[Dict[str, Any]] = None, dimension: Optional[int] = None, metric: Optional[str] = None, service_dict: Optional[Dict[str, str]] = None, timeout_info: TimeoutInfoWideType = None, ) -> AsyncAstraDBCollection: """ Create a new collection in the database. Args: collection_name (str): The name of the collection to create. options (dict, optional): Options for the collection. dimension (int, optional): Dimension for vector search. metric (str, optional): Metric choice for vector search. service_dict (dict, optional): a definition for the $vectorize service timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: AsyncAstraDBCollection: The created collection object. """ # options from named params vector_options = { k: v for k, v in { "dimension": dimension, "metric": metric, "service": service_dict, }.items() if v is not None } # overlap/merge with stuff in options.vector dup_params = set((options or {}).get("vector", {}).keys()) & set( vector_options.keys() ) # If any params are duplicated, we raise an error if dup_params: dups = ", ".join(sorted(dup_params)) raise ValueError( f"Parameter(s) {dups} passed both to the method and in the options" ) # Build our options dictionary if we have vector options if vector_options: options = options or {} options["vector"] = { **options.get("vector", {}), **vector_options, } # Build the final json payload jsondata = { k: v for k, v in {"name": collection_name, "options": options}.items() if v is not None } # Make the request to the endpoint await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data={"createCollection": jsondata}, timeout_info=timeout_info, ) # Get the instance object as the return of the call return AsyncAstraDBCollection(astra_db=self, collection_name=collection_name) async def delete_collection( self, collection_name: str, timeout_info: TimeoutInfoWideType = None ) -> API_RESPONSE: """ Delete a collection from the database. Args: collection_name (str): The name of the collection to delete. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: dict: The response from the database. """ # Make sure we provide a collection name if not collection_name: raise ValueError("Must provide a collection name") response = await self._request( method=http_methods.POST, path=f"{self.base_path}", json_data={"deleteCollection": {"name": collection_name}}, timeout_info=timeout_info, ) return response @deprecation.deprecated( # type: ignore deprecated_in="0.7.0", removed_in="1.0.0", current_version=__version__, details="Use the 'AsyncAstraDBCollection.clear()' method instead", ) async def truncate_collection( self, collection_name: str, timeout_info: TimeoutInfoWideType = None ) -> AsyncAstraDBCollection: """ Clear a collection in the database, deleting all stored documents. Args: collection_name (str): The name of the collection to clear. timeout_info: a float, or a TimeoutInfo dict, for the HTTP request. Note that a 'read' timeout event will not block the action taken by the API server if it has received the request already. Returns: collection: an AsyncAstraDBCollection instance """ collection = AsyncAstraDBCollection( collection_name=collection_name, astra_db=self, ) clear_response = await collection.clear(timeout_info=timeout_info) if clear_response.get("status", {}).get("deletedCount") != -1: raise ValueError( f"Could not issue a truncation API command (response: {json.dumps(clear_response)})." ) # return the collection itself return collection
# Copyright (c) 2021 Ultimaker B.V. # Uranium is released under the terms of the LGPLv3 or higher. import gc import re # For finding containers with asterisks in the constraints and for detecting backup files. import time import sqlite3 as db from typing import Any, cast, Dict, List, Optional, Set, Type, TYPE_CHECKING import os import UM.Dictionary import UM.FlameProfiler from UM.LockFile import LockFile from UM.Logger import Logger from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase from UM.PluginRegistry import PluginRegistry # To register the container type plug-ins and container provider plug-ins. from UM.Resources import Resources from UM.Settings.EmptyInstanceContainer import EmptyInstanceContainer from UM.Settings.ContainerFormatError import ContainerFormatError from UM.Settings.ContainerProvider import ContainerProvider from UM.Settings.constant_instance_containers import empty_container from . import ContainerQuery from UM.Settings.ContainerStack import ContainerStack from UM.Settings.DefinitionContainer import DefinitionContainer from UM.Settings.InstanceContainer import InstanceContainer from UM.Settings.Interfaces import ContainerInterface, ContainerRegistryInterface, DefinitionContainerInterface from UM.Signal import Signal, signalemitter from .DatabaseContainerMetadataController import DatabaseMetadataContainerController if TYPE_CHECKING: from UM.PluginObject import PluginObject from UM.Qt.QtApplication import QtApplication metadata_type = Dict[str, Any] @signalemitter class ContainerRegistry(ContainerRegistryInterface): """Central class to manage all setting providers. This class aggregates all data from all container providers. If only the metadata is used, it requests the metadata lazily from the providers. If more than that is needed, the entire container is requested from the appropriate providers. """ def __init__(self, application: "QtApplication") -> None: if ContainerRegistry.__instance is not None: raise RuntimeError("Try to create singleton '%s' more than once" % self.__class__.__name__) ContainerRegistry.__instance = self super().__init__() self._application = application # type: QtApplication self._emptyInstanceContainer = empty_container # type: InstanceContainer # Sorted list of container providers (keep it sorted by sorting each time you add one!). self._providers = [] # type: List[ContainerProvider] PluginRegistry.addType("container_provider", self.addProvider) self.metadata = {} # type: Dict[str, metadata_type] self._containers = {} # type: Dict[str, ContainerInterface] self._wrong_container_ids = set() # type: Set[str] # Set of already known wrong containers that must be skipped self.source_provider = {} # type: Dict[str, Optional[ContainerProvider]] # Where each container comes from. # Ensure that the empty container is added to the ID cache. self.metadata["empty"] = self._emptyInstanceContainer.getMetaData() self._containers["empty"] = self._emptyInstanceContainer self.source_provider["empty"] = None self._resource_types = {"definition": Resources.DefinitionContainers} # type: Dict[str, int] # Since queries are based on metadata, we need to make sure to clear the cache when a container's metadata # changes. self.containerMetaDataChanged.connect(self._clearQueryCache) # We use a database to store the metadata so that we don't have to extract them from the files every time # the application starts. Reading the data from a lot of files is especially slow on Windows; about 30x as slow. self._db_connection: Optional[db.Connection] = None # Since each container that we can store in the database has different metadata (and thus needs different logic # to extract it from the database again), we use database controllers to do that. These are set by type; Each # type of container needs to have their own controller. self._database_handlers: Dict[str, DatabaseMetadataContainerController] = {} self._explicit_read_only_container_ids = set() # type: Set[str] containerAdded = Signal() containerRemoved = Signal() containerMetaDataChanged = Signal() containerLoadComplete = Signal() allMetadataLoaded = Signal() def addResourceType(self, resource_type: int, container_type: str) -> None: self._resource_types[container_type] = resource_type def getResourceTypes(self) -> Dict[str, int]: """Returns all resource types.""" return self._resource_types def getDefaultSaveProvider(self) -> "ContainerProvider": if len(self._providers) == 1: return self._providers[0] raise NotImplementedError("Not implemented default save provider for multiple providers") def addWrongContainerId(self, wrong_container_id: str) -> None: """This method adds the current id to the list of wrong containers that are skipped when looking for a container""" self._wrong_container_ids.add(wrong_container_id) def addProvider(self, provider: ContainerProvider) -> None: """Adds a container provider to search through containers in.""" self._providers.append(provider) # Re-sort every time. It's quadratic, but there shouldn't be that many providers anyway... self._providers.sort(key = lambda provider: PluginRegistry.getInstance().getMetaData(provider.getPluginId())["container_provider"].get("priority", 0)) def findDefinitionContainers(self, **kwargs: Any) -> List[DefinitionContainerInterface]: """Find all DefinitionContainer objects matching certain criteria. :param dict kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata of the DefinitionContainer. An asterisk in the values can be used to denote a wildcard. """ return cast(List[DefinitionContainerInterface], self.findContainers(container_type = DefinitionContainer, **kwargs)) def findDefinitionContainersMetadata(self, **kwargs: Any) -> List[Dict[str, Any]]: """Get the metadata of all definition containers matching certain criteria. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata. An asterisk in the values can be used to denote a wildcard. :return: A list of metadata dictionaries matching the search criteria, or an empty list if nothing was found. """ return self.findContainersMetadata(container_type = DefinitionContainer, **kwargs) def findInstanceContainers(self, **kwargs: Any) -> List[InstanceContainer]: """Find all InstanceContainer objects matching certain criteria. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata of the InstanceContainer. An asterisk in the values can be used to denote a wildcard. """ return cast(List[InstanceContainer], self.findContainers(container_type = InstanceContainer, **kwargs)) def findInstanceContainersMetadata(self, **kwargs: Any) -> List[metadata_type]: """Find the metadata of all instance containers matching certain criteria. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata. An asterisk in the values can be used to denote a wildcard. :return: A list of metadata dictionaries matching the search criteria, or an empty list if nothing was found. """ return self.findContainersMetadata(container_type = InstanceContainer, **kwargs) def findContainerStacks(self, **kwargs: Any) -> List[ContainerStack]: """Find all ContainerStack objects matching certain criteria. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata of the ContainerStack. An asterisk in the values can be used to denote a wildcard. """ return cast(List[ContainerStack], self.findContainers(container_type = ContainerStack, **kwargs)) def findContainerStacksMetadata(self, **kwargs: Any) -> List[metadata_type]: """Find the metadata of all container stacks matching certain criteria. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata. An asterisk in the values can be used to denote a wildcard. :return: A list of metadata dictionaries matching the search criteria, or an empty list if nothing was found. """ return self.findContainersMetadata(container_type = ContainerStack, **kwargs) @UM.FlameProfiler.profile def findContainers(self, *, ignore_case: bool = False, **kwargs: Any) -> List[ContainerInterface]: """Find all container objects matching certain criteria. :param container_type: If provided, return only objects that are instances or subclasses of container_type. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata of the container. An asterisk can be used to denote a wildcard. :return: A list of containers matching the search criteria, or an empty list if nothing was found. """ # Find the metadata of the containers and grab the actual containers from there. results_metadata = self.findContainersMetadata(ignore_case = ignore_case, **kwargs) result = [] for metadata in results_metadata: if metadata["id"] in self._containers: # Already loaded, so just return that. result.append(self._containers[metadata["id"]]) else: # Metadata is loaded, but not the actual data. if metadata["id"] in self._wrong_container_ids: Logger.logException("e", "Error when loading container {container_id}: This is a weird container, probably some file is missing".format(container_id = metadata["id"])) continue provider = self.source_provider[metadata["id"]] if not provider: Logger.log("w", "The metadata of container {container_id} was added during runtime, but no accompanying container was added.".format(container_id = metadata["id"])) continue try: new_container = provider.loadContainer(metadata["id"]) except ContainerFormatError as e: Logger.logException("e", "Error in the format of container {container_id}: {error_msg}".format(container_id = metadata["id"], error_msg = str(e))) continue except Exception as e: Logger.logException("e", "Error when loading container {container_id}: {error_msg}".format(container_id = metadata["id"], error_msg = str(e))) continue self.addContainer(new_container) self.containerLoadComplete.emit(new_container.getId()) result.append(new_container) return result def findContainersMetadata(self, *, ignore_case: bool = False, **kwargs: Any) -> List[metadata_type]: """Find the metadata of all container objects matching certain criteria. :param container_type: If provided, return only objects that are instances or subclasses of ``container_type``. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata. An asterisk can be used to denote a wildcard. :return: A list of metadata dictionaries matching the search criteria, or an empty list if nothing was found. """ candidates = None if "id" in kwargs and kwargs["id"] is not None and "*" not in kwargs["id"] and not ignore_case: if kwargs["id"] not in self.metadata: # If we're looking for an unknown ID, try to lazy-load that one. if kwargs["id"] not in self.source_provider: for candidate in self._providers: if kwargs["id"] in candidate.getAllIds(): self.source_provider[kwargs["id"]] = candidate break else: return [] provider = self.source_provider[kwargs["id"]] if not provider: Logger.log("w", "Metadata of container {container_id} is missing even though the container is added during run-time.") return [] metadata = provider.loadMetadata(kwargs["id"]) if metadata is None or metadata.get("id", "") in self._wrong_container_ids or "id" not in metadata: return [] self.metadata[metadata["id"]] = metadata self.source_provider[metadata["id"]] = provider # Since IDs are the primary key and unique we can now simply request the candidate and check if it matches all requirements. if kwargs["id"] not in self.metadata: return [] # Still no result, so return an empty list. if len(kwargs) == 1: return [self.metadata[kwargs["id"]]] candidates = [self.metadata[kwargs["id"]]] del kwargs["id"] # No need to check for the ID again. query = ContainerQuery.ContainerQuery(self, ignore_case = ignore_case, **kwargs) query.execute(candidates = candidates) return cast(List[metadata_type], query.getResult()) # As the execute of the query is done, result won't be none. def findDirtyContainers(self, *, ignore_case: bool = False, **kwargs: Any) -> List[ContainerInterface]: """Specialized find function to find only the modified container objects that also match certain criteria. This is faster than the normal find methods since it won't ever load all containers, but only the modified ones. Since containers must be fully loaded before they are modified, you are guaranteed that any operations on the resulting containers will not trigger additional containers to load lazily. :param kwargs: A dictionary of keyword arguments containing keys and values that need to match the metadata of the container. An asterisk can be used to denote a wildcard. :param ignore_case: Whether casing should be ignored when matching string values of metadata. :return: A list of containers matching the search criteria, or an empty list if nothing was found. """ # Find the metadata of the containers and grab the actual containers from there. # # We could apply the "is in self._containers" filter and the "isDirty" filter # to this metadata find function as well to filter earlier, but since the # filters in findContainersMetadata are applied in arbitrary order anyway # this will have very little effect except to prevent a list copy. results_metadata = self.findContainersMetadata(ignore_case = ignore_case, **kwargs) result = [] for metadata in results_metadata: if metadata["id"] not in self._containers: # Not yet loaded, so it can't be dirty. continue candidate = self._containers[metadata["id"]] if candidate.isDirty(): result.append(self._containers[metadata["id"]]) return result def getEmptyInstanceContainer(self) -> InstanceContainer: """This is a small convenience to make it easier to support complex structures in ContainerStacks.""" return self._emptyInstanceContainer def setExplicitReadOnly(self, container_id: str) -> None: self._explicit_read_only_container_ids.add(container_id) def isExplicitReadOnly(self, container_id: str) -> bool: return container_id in self._explicit_read_only_container_ids def isReadOnly(self, container_id: str) -> bool: """Returns whether a profile is read-only or not. Whether it is read-only depends on the source where the container is obtained from. :return: True if the container is read-only, or False if it can be modified. """ if self.isExplicitReadOnly(container_id): return True provider = self.source_provider.get(container_id) if not provider: return False # If no provider had the container, that means that the container was only in memory. Then it's always modifiable. return provider.isReadOnly(container_id) # Gets the container file path with for the container with the given ID. Returns None if the container/file doesn't # exist. def getContainerFilePathById(self, container_id: str) -> Optional[str]: provider = self.source_provider.get(container_id) if not provider: return None return provider.getContainerFilePathById(container_id) def isLoaded(self, container_id: str) -> bool: """Returns whether a container is completely loaded or not. If only its metadata is known, it is not yet completely loaded. :return: True if all data about this container is known, False if only metadata is known or the container is completely unknown. """ return container_id in self._containers def _createDatabaseFile(self, db_path: str) -> db.Connection: connection = db.Connection(db_path) cursor = connection.cursor() cursor.executescript(""" CREATE TABLE containers( id text, name text, last_modified integer, container_type text ); CREATE UNIQUE INDEX idx_containers_id on containers (id); """) for handler in self._database_handlers.values(): handler.setupTable(cursor) return connection def _getDatabaseConnection(self) -> db.Connection: if self._db_connection is not None: return self._db_connection db_path = os.path.join(Resources.getCacheStoragePath(), "containers.db") if not os.path.exists(db_path): self._db_connection = self._createDatabaseFile(db_path) return self._db_connection self._db_connection = db.Connection(db_path) return self._db_connection def _getProfileType(self, container_id: str, db_cursor: db.Cursor) -> Optional[str]: db_cursor.execute("select id, container_type from containers where id = ?", (container_id, )) row = db_cursor.fetchone() if row: return row[1] return None def _recreateCorruptDataBase(self, cursor: Optional[db.Cursor]) -> None: """Closes the Database, removes the file from cache and recreate all metadata from scratch""" if not cursor: self.loadAllMetadata() return try: cursor.execute("rollback") # Cancel any ongoing transaction. except: # Could be that the cursor is already closed pass cursor.close() self._db_connection = None db_path = os.path.join(Resources.getCacheStoragePath(), "containers.db") try: os.remove(db_path) except EnvironmentError: # Was already deleted by rollback. pass self.loadAllMetadata() def _getProfileModificationTime(self, container_id: str, db_cursor: db.Cursor) -> Optional[float]: db_cursor.execute("select id, last_modified from containers where id = ?", (container_id, )) row = db_cursor.fetchone() if row: return row[1] return None def _addMetadataToDatabase(self, metadata: metadata_type) -> None: container_type = metadata["type"] if container_type in self._database_handlers: try: self._database_handlers[container_type].insert(metadata) except db.DatabaseError as e: Logger.warning(f"Removing corrupt database and recreating database. {e}") self._recreateCorruptDataBase(self._database_handlers[container_type].cursor) def _updateMetadataInDatabase(self, metadata: metadata_type) -> None: container_type = metadata["type"] if container_type in self._database_handlers: try: self._database_handlers[container_type].update(metadata) except db.DatabaseError as e: Logger.warning(f"Removing corrupt database and recreating database. {e}") self._recreateCorruptDataBase(self._database_handlers[container_type].cursor) def _getMetadataFromDatabase(self, container_id: str, container_type: str) -> metadata_type: if container_type in self._database_handlers: return self._database_handlers[container_type].getMetadata(container_id) return {} def loadAllMetadata(self) -> None: """Load the metadata of all available definition containers, instance containers and container stacks. """ cursor = self._getDatabaseConnection().cursor() for handlers in self._database_handlers.values(): handlers.cursor = cursor self._clearQueryCache() gc.disable() resource_start_time = time.time() # Since it could well be that we have to make a *lot* of changes to the database, we want to do that in # a single transaction to speed it up. cursor.execute("begin") all_container_ids = set() for provider in self._providers: # Automatically sorted by the priority queue. # Make copy of all IDs since it might change during iteration. provider_container_ids = set(provider.getAllIds()) # Keep a list of all the ID's that we know off all_container_ids.update(provider_container_ids) for container_id in provider_container_ids: try: db_last_modified_time = self._getProfileModificationTime(container_id, cursor) except db.DatabaseError as e: Logger.warning(f"Removing corrupt database and recreating database. {e}") self._recreateCorruptDataBase(cursor) cursor = self._getDatabaseConnection().cursor() # After recreating the database, all the cursors have changed. cursor.execute("begin") db_last_modified_time = self._getProfileModificationTime(container_id, cursor) if db_last_modified_time is None: # Item is not yet in the database. Add it now! metadata = provider.loadMetadata(container_id) if not self._isMetadataValid(metadata): Logger.log("w", f"Invalid metadata for container {container_id}: {metadata}") continue modified_time = provider.getLastModifiedTime(container_id) if metadata.get("type") in self._database_handlers: # Only add it to the database if we have an actual handler. try: cursor.execute( "INSERT INTO containers (id, name, last_modified, container_type) VALUES (?, ?, ?, ?)", (container_id, metadata["name"], modified_time, metadata["type"])) except db.DatabaseError as e: Logger.warning(f"Unable to edit database to insert new cache records for containers, recreating database: {str(e)}") self._recreateCorruptDataBase(self._database_handlers[metadata["type"]].cursor) cursor = self._getDatabaseConnection().cursor() # After recreating the database, all the cursors have changed. cursor.execute("begin") self._addMetadataToDatabase(metadata) self.metadata[container_id] = metadata self.source_provider[container_id] = provider else: # Metadata already exists in database. modified_time = provider.getLastModifiedTime(container_id) if modified_time > db_last_modified_time: # Metadata is outdated, so load from file and update the database metadata = provider.loadMetadata(container_id) try: cursor.execute("UPDATE containers SET name = ?, last_modified = ?, container_type = ? WHERE id = ?", (metadata["name"], modified_time, metadata["type"], metadata["id"])) except db.DatabaseError as e: Logger.warning(f"Unable to update timestamp of container cache in database, recreating database: {str(e)}") self._recreateCorruptDataBase(self._database_handlers[metadata["type"]].cursor) cursor = self._getDatabaseConnection().cursor() # After recreating the database, all the cursors have changed. cursor.execute("begin") self._updateMetadataInDatabase(metadata) self.metadata[container_id] = metadata self.source_provider[container_id] = provider continue # Since we know that the container exists, we also know that it will never be None. container_type = cast(str, self._getProfileType(container_id, cursor)) # No need to do any file reading, we can just get it from the database. self.metadata[container_id] = self._getMetadataFromDatabase(container_id, container_type) self.source_provider[container_id] = provider cursor.execute("commit") # Find all ID's that we currently have in the database cursor.execute("SELECT id from containers") all_ids_in_database = {container_id[0] for container_id in cursor.fetchall()} ids_to_remove = all_ids_in_database - all_container_ids # Purge ID's that don't have a matching file for container_id in ids_to_remove: cursor.execute("DELETE FROM containers WHERE id = ?", (container_id,)) self._removeContainerFromDatabase(container_id) if ids_to_remove: # We only can (and need to) commit again if we removed containers cursor.execute("commit") Logger.log("d", "Loading metadata into container registry took %s seconds", time.time() - resource_start_time) gc.enable() ContainerRegistry.allMetadataLoaded.emit() def _removeContainerFromDatabase(self, container_id: str) -> None: for database_handler in self._database_handlers.values(): database_handler.delete(container_id) @UM.FlameProfiler.profile def load(self) -> None: """Load all available definition containers, instance containers and container stacks. :note This method does not clear the internal list of containers. This means that any containers that were already added when the first call to this method happened will not be re-added. """ # Disable garbage collection to speed up the loading (at the cost of memory usage). gc.disable() resource_start_time = time.time() with self.lockCache(): # Because we might be writing cache files. for provider in self._providers: for container_id in list(provider.getAllIds()): # Make copy of all IDs since it might change during iteration. if container_id not in self._containers: # Update UI while loading. self._application.processEvents() # Update the user interface because loading takes a while. Specifically the loading screen. try: self._containers[container_id] = provider.loadContainer(container_id) except: Logger.logException("e", "Failed to load container %s", container_id) raise self.metadata[container_id] = self._containers[container_id].getMetaData() self.source_provider[container_id] = provider self.containerLoadComplete.emit(container_id) gc.enable() Logger.log("d", "Loading data into container registry took %s seconds", time.time() - resource_start_time) @UM.FlameProfiler.profile def addContainer(self, container: ContainerInterface) -> bool: container_id = container.getId() if container_id in self._containers: return True # Container was already there, consider that a success if hasattr(container, "metaDataChanged"): container.metaDataChanged.connect(self._onContainerMetaDataChanged) self.metadata[container_id] = container.getMetaData() self._containers[container_id] = container if container_id not in self.source_provider: self.source_provider[container_id] = None # Added during runtime. self._clearQueryCacheByContainer(container) # containerAdded is a custom signal and can trigger direct calls to its subscribers. This should be avoided # because with the direct calls, the subscribers need to know everything about what it tries to do to avoid # triggering this signal again, which eventually can end up exceeding the max recursion limit. # We avoid the direct calls here to make sure that the subscribers do not need to take into account any max # recursion problem. self._application.callLater(self.containerAdded.emit, container) return True @UM.FlameProfiler.profile def removeContainer(self, container_id: str) -> None: # Here we only need to check metadata because a container may not be loaded but its metadata must have been # loaded first. if container_id not in self.metadata: Logger.log("w", "Tried to delete container {container_id}, which doesn't exist or isn't loaded.".format(container_id = container_id)) return # Ignore. # CURA-6237 # Do not try to operate on invalid containers because removeContainer() needs to load it if it's not loaded yet # (see below), but an invalid container cannot be loaded. if container_id in self._wrong_container_ids: Logger.log("w", "Container [%s] is faulty, it won't be able to be loaded, so no need to remove, skip.") # delete the metadata if present if container_id in self.metadata: del self.metadata[container_id] return container = None if container_id in self._containers: container = self._containers[container_id] if hasattr(container, "metaDataChanged"): container.metaDataChanged.disconnect(self._onContainerMetaDataChanged) del self._containers[container_id] if container_id in self.metadata: if container is None: # We're in a bit of a weird state now. We want to notify the rest of the code that the container # has been deleted, but due to lazy loading, it hasn't been loaded yet. The issues is that in order # to notify the rest of the code, we need to actually *have* the container. So an empty instance # container is created, which is emitted with the containerRemoved signal and contains the metadata container = EmptyInstanceContainer(container_id) container.metaData = self.metadata[container_id] del self.metadata[container_id] if container_id in self.source_provider: if self.source_provider[container_id] is not None: cast(ContainerProvider, self.source_provider[container_id]).removeContainer(container_id) del self.source_provider[container_id] if container is not None: self._clearQueryCacheByContainer(container) self.containerRemoved.emit(container) Logger.log("d", "Removed container %s", container_id) @UM.FlameProfiler.profile def renameContainer(self, container_id: str, new_name: str, new_id: Optional[str] = None) -> None: Logger.log("d", "Renaming container %s to %s", container_id, new_name) # Same as removeContainer(), metadata is always loaded but containers may not, so always check metadata. if container_id not in self.metadata: Logger.log("w", "Unable to rename container %s, because it does not exist", container_id) return container = self._containers.get(container_id) if container is None: container = self.findContainers(id = container_id)[0] container = cast(ContainerInterface, container) if new_name == container.getName(): Logger.log("w", "Unable to rename container %s, because the name (%s) didn't change", container_id, new_name) return self.containerRemoved.emit(container) try: container.setName(new_name) #type: ignore except TypeError: #Some containers don't allow setting the name. return if new_id is not None: source_provider = self.source_provider[container.getId()] del self._containers[container.getId()] del self.metadata[container.getId()] del self.source_provider[container.getId()] if source_provider is not None: source_provider.removeContainer(container.getId()) container.getMetaData()["id"] = new_id self._containers[container.getId()] = container self.metadata[container.getId()] = container.getMetaData() self.source_provider[container.getId()] = None # to be saved with saveSettings self._clearQueryCacheByContainer(container) self.containerAdded.emit(container) @UM.FlameProfiler.profile def uniqueName(self, original: str) -> str: """Creates a new unique name for a container that doesn't exist yet. It tries if the original name you provide exists, and if it doesn't it'll add a " 1" or " 2" after the name to make it unique. :param original: The original name that may not be unique. :return: A unique name that looks a lot like the original but may have a number behind it to make it unique. """ original = original.replace("*", "") # Filter out wildcards, since this confuses the ContainerQuery. name = original.strip() num_check = re.compile(r"(.*?)\s*#\d+$").match(name) if num_check: #There is a number in the name. name = num_check.group(1) #Filter out the number. if not name: #Wait, that deleted everything! name = "Profile" elif not self.findContainersMetadata(id = original.strip(), ignore_case = True) and not self.findContainersMetadata(name = original.strip()): # Check if the stripped version of the name is unique (note that this can still have the number in it) return original.strip() unique_name = name i = 1 while self.findContainersMetadata(id = unique_name, ignore_case = True) or self.findContainersMetadata(name = unique_name): #A container already has this name. i += 1 #Try next numbering. unique_name = "%s #%d" % (name, i) #Fill name like this: "Extruder #2". return unique_name @classmethod def addContainerType(cls, container: "PluginObject") -> None: """Add a container type that will be used to serialize/deserialize containers. :param container: An instance of the container type to add. """ plugin_id = container.getPluginId() metadata = PluginRegistry.getInstance().getMetaData(plugin_id) if "settings_container" not in metadata or "mimetype" not in metadata["settings_container"]: raise Exception("Plugin {plugin} has incorrect metadata: Expected a 'settings_container' block with a 'mimetype' entry".format(plugin = plugin_id)) cls.addContainerTypeByName(container.__class__, plugin_id, metadata["settings_container"]["mimetype"]) @classmethod def addContainerTypeByName(cls, container_type: type, type_name: str, mime_type: str) -> None: """Used to associate mime types with object to be created :param container_type: ContainerStack or derivative :param type_name: :param mime_type: """ cls.__container_types[type_name] = container_type cls.mime_type_map[mime_type] = container_type @classmethod def getMimeTypeForContainer(cls, container_type: type) -> Optional[MimeType]: """Retrieve the mime type corresponding to a certain container type :param container_type: The type of container to get the mime type for. :return: A MimeType object that matches the mime type of the container or None if not found. """ try: mime_type_name = UM.Dictionary.findKey(cls.mime_type_map, container_type) if mime_type_name: return MimeTypeDatabase.getMimeType(mime_type_name) except ValueError: Logger.log("w", "Unable to find mimetype for container %s", container_type) return None @classmethod def getContainerForMimeType(cls, mime_type: MimeType) -> Optional[Type[ContainerInterface]]: """Get the container type corresponding to a certain mime type. :param mime_type: The mime type to get the container type for. :return: A class object of a container type that corresponds to the specified mime type or None if not found. """ return cls.mime_type_map.get(mime_type.name, None) @classmethod def getContainerTypes(cls): """Get all the registered container types :return: A dictionary view object that provides access to the container types. The key is the plugin ID, the value the container type. """ return cls.__container_types.items() def saveContainer(self, container: "ContainerInterface", provider: Optional["ContainerProvider"] = None) -> None: """Save single dirty container""" if not hasattr(provider, "saveContainer"): provider = self.getDefaultSaveProvider() if not container.isDirty(): return provider.saveContainer(container) #type: ignore container.setDirty(False) self.source_provider[container.getId()] = provider def saveDirtyContainers(self) -> None: """Save all the dirty containers by calling the appropriate container providers""" # Lock file for "more" atomically loading and saving to/from config dir. with self.lockFile(): for instance in self.findDirtyContainers(container_type = InstanceContainer): self.saveContainer(instance) for stack in self.findContainerStacks(): self.saveContainer(stack) # Clear the internal query cache def _clearQueryCache(self, *args: Any, **kwargs: Any) -> None: ContainerQuery.ContainerQuery.cache.clear() def _clearQueryCacheByContainer(self, container: ContainerInterface) -> None: """Clear the query cache by using container type. This is a slightly smarter way of clearing the cache. Only queries that are of the same type (or without one) are cleared. """ # Remove all case-insensitive matches since we won't find those with the below "<=" subset check. # TODO: Properly check case-insensitively in the dict's values. for key in list(ContainerQuery.ContainerQuery.cache): if not key[0]: del ContainerQuery.ContainerQuery.cache[key] # Remove all cache items that this container could fall in. for key in list(ContainerQuery.ContainerQuery.cache): query_metadata = dict(zip(key[1::2], key[2::2])) if query_metadata.items() <= container.getMetaData().items(): del ContainerQuery.ContainerQuery.cache[key] def _onContainerMetaDataChanged(self, *args: ContainerInterface, **kwargs: Any) -> None: """Called when any container's metadata changed. This function passes it on to the containerMetaDataChanged signal. Sadly that doesn't work automatically between pyqtSignal and UM.Signal. """ container = args[0] # Always emit containerMetaDataChanged, even if the dictionary didn't actually change: The contents of the dictionary might have changed in-place! self.metadata[container.getId()] = container.getMetaData() # refresh the metadata self.containerMetaDataChanged.emit(*args, **kwargs) def _isMetadataValid(self, metadata: Optional[metadata_type]) -> bool: """Validate a metadata object. If the metadata is invalid, the container is not allowed to be in the registry. :param metadata: A metadata object. :return: Whether this metadata was valid. """ return metadata is not None def getLockFilename(self) -> str: """Get the lock filename including full path Dependent on when you call this function, Resources.getConfigStoragePath may return different paths """ return Resources.getStoragePath(Resources.Resources, self._application.getApplicationLockFilename()) def getCacheLockFilename(self) -> str: """Get the cache lock filename including full path.""" return Resources.getStoragePath(Resources.Cache, self._application.getApplicationLockFilename()) def lockFile(self) -> LockFile: """Contextmanager to create a lock file and remove it afterwards.""" return LockFile( self.getLockFilename(), timeout = 10, wait_msg = "Waiting for lock file in local config dir to disappear..." ) def lockCache(self) -> LockFile: """Context manager to create a lock file for the cache directory and remove it afterwards. """ return LockFile( self.getCacheLockFilename(), timeout = 10, wait_msg = "Waiting for lock file in cache directory to disappear." ) __container_types = { "definition": DefinitionContainer, "instance": InstanceContainer, "stack": ContainerStack, } mime_type_map = { "application/x-uranium-definitioncontainer": DefinitionContainer, "application/x-uranium-instancecontainer": InstanceContainer, "application/x-uranium-containerstack": ContainerStack, "application/x-uranium-extruderstack": ContainerStack } # type: Dict[str, Type[ContainerInterface]] __instance = None # type: ContainerRegistry @classmethod def getInstance(cls, *args, **kwargs) -> "ContainerRegistry": return cls.__instance PluginRegistry.addType("settings_container", ContainerRegistry.addContainerType)
#!/usr/bin/env perl # -*- perl -*- use strict; use warnings FATAL => qw ( all ); use Common::Config; use Common::Messages; use Seq::Storage; use Seq::Help; my ( $prog_name, $signature, $usage, $args, $seqs, $out_formats ); # >>>>>>>>>>>>>>>>>>>>>>>>>>>>> USAGE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< $prog_name = ( split "/", $0 )[-1]; $signature = &Common::Config::get_signature(); $out_formats = join ", ", sort keys %{ &Seq::Storage::format_fields("valid") }; $usage = qq ( Program $prog_name, February 2010. Fetches sequences or sub-sequences from a named sequence file and streams them to file or STDOUT. ID's and sub-sequence locations can be on the command line, be piped from STDIN and/or read from a file. See the help option. Command line arguments are, --seqfile Input sequence file (undef) --locfile Input sequence locations file (undef) --order Speed++ when many IDs follow sequence file order (off) --parse Parses output entries or not (on) --ssize Do sub-sequence seek in sequences this long (4000k) --format Output format (fasta) --fields Output fields (all that the formats support) --outfile Output sequence file path (<stdout>) --errfile Write error messages (warnings to screen) --append Appends to output file (off) --clobber Overwrites output file (off) --silent Prints no progress messages (off) --help Prints various $prog_name help (off) Output formats: $out_formats Examples, 1\) $prog_name 'contig1::100,20,-;500,100' --seqfile pig.fa 2\) $prog_name --seqfile pig_expr.fq --locfile locs.list --format fastq 3\) cat locs.list | $prog_name --seqfile pig_expr.fq --format fastq Example 1 gets a sub-sequence from an entry in a fasta file. Example 2 gets many sequences from a fastq file with qualities. Example 3 does the same, except gets ids from STDIN. The script will look for all three ways and use the union of ids. Author: $signature ); &Common::Messages::print_usage_and_exit( $usage ); # >>>>>>>>>>>>>>>>>>>>>>>>>>> GET OPTIONS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< $args = &Common::Config::get_commandline( { "help:s" => undef, "seqfile=s" => undef, "locfile=s" => undef, "order!" => 0, "parse!" => 1, "ssize=s" => undef, "format=s" => undef, "fields=s" => undef, "outfile=s" => undef, "errfile=s" => undef, "clobber!" => 0, "append!" => 0, "silent!" => 0, }); # >>>>>>>>>>>>>>>>>>>>>>>>>> GET SEQUENCES <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< if ( defined $args->help ) { print &Seq::Help::dispatch( $prog_name, $args->help ); exit; } else { $args->delete_field( "help" ); $args->add_field( "locs" ); $args->locs( \@ARGV ); &Seq::Storage::fetch_seqs( $args->seqfile, { "locs" => $args->locs, "locfile" => $args->locfile, "order" => $args->order, "parse" => $args->parse, "ssize" => $args->ssize, "format" => $args->format, "fields" => $args->fields, "outfile" => $args->outfile, "errfile" => $args->errfile, "clobber" => $args->clobber, "append" => $args->append, "silent" => $args->silent, "stdin" => 1, }); } __END__
# CRACO [![Build Status](https://travis-ci.org/sharegate/craco.svg?branch=master)](https://travis-ci.org/sharegate/craco) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg)](https://github.com/sharegate/craco/pulls) **C**reate **R**eact **A**pp **C**onfiguration **O**verride is an easy and comprehensible configuration layer for create-react-app. Get all the benefits of create-react-app **and** customization without using 'eject' by adding a single `craco.config.js` file at the root of your application and customize your eslint, babel, postcss configurations and many more. All you have to do is create your app using [create-react-app](https://github.com/facebook/create-react-app/) and customize the configuration with a `craco.config.js` file. ## Support - Create React App (CRA) 4.* - Yarn - Yarn Workspace - NPM - Lerna (with or without hoisting) - Custom `react-scripts` version ## Documentation - [Installation](#installation) - How to install and setup CRACO. - [Configuration](#configuration) - How to customize your CRA installation with CRACO. - [Configuration File](#configuration-file) - [Configuration Helpers](#configuration-helpers) - [Exporting your Configuration](#exporting-your-configuration) - [Setting a Custom Location for `craco.config.js`](#setting-a-custom-location-for-cracoconfigjs) - [CRA toolchain for Beginners](#cra-toolchain-for-beginners) - [Notes on CRA Configurations and Problem Solving](#notes-on-cra-configurations-and-problem-solving) - [Ejecting CRA to Learn](#ejecting-cra-to-learn) - [Direct Versus Functional Config Definitions](#direct-object-literal-versus-functional-config-definitions) - [API](#api) - CRACO APIs for Jest and Webpack. - [Jest API](#jest-api) - [Webpack API](#webpack-api) - [Recipes](https://github.com/sharegate/craco/tree/master/recipes) – Short recipes for common use cases. - [Available Plugins](https://github.com/sharegate/craco#community-maintained-plugins) - Plugins maintained by the community. - [Develop a Plugin](#develop-a-plugin) - How to develop a plugin for CRACO. - [Backward Compatibility](#backward-compatibility) - [Debugging](#debugging) - [License](#license) ## Preface ### Acknowledgements We are grateful to [@timarney](https://github.com/timarney) the creator of [react-app-rewired](https://github.com/timarney/react-app-rewired) for his original idea. The configuration style of this plugin has been greatly influenced by [Vue CLI](https://cli.vuejs.org/guide/). ### Fair Warning By doing this you're breaking the ["guarantees"](https://github.com/facebookincubator/create-react-app/issues/99#issuecomment-234657710) that CRA provides. That is to say you now "own" the configs. **No support** will be provided. Proceed with caution. ## Installation Install the plugin from **npm**: ```bash $ yarn add @craco/craco # OR $ npm install @craco/craco --save ``` Create a `craco.config.js` file in the root directory and [configure CRACO](#configuration): ``` my-app ├── node_modules ├── craco.config.js └── package.json ``` Update the existing calls to `react-scripts` in the `scripts` section of your `package.json` file to use the `craco` CLI: ```diff /* package.json */ "scripts": { - "start": "react-scripts start", + "start": "craco start", - "build": "react-scripts build", + "build": "craco build" - "test": "react-scripts test", + "test": "craco test" } ``` Start your app for development: ```bash $ npm start ``` Or build your app: ```bash $ npm run build ``` ## Configuration CRACO is configured with a `craco.config.js`, `.cracorc.js` or `.cracorc` file. This file is divided into sections representing the major parts of what makes up the default create react app. If there are multiple configuration files in the same directory, CRACO will only use one. The priority order is: 1. `craco.config.js` 2. `.cracorc.js` 3. `.cracorc` ### Configuration File Below is a sample CRACO configuration file. Your final config file will be much shorter than this sample. See example CRACO configurations in [Recipes](https://github.com/sharegate/craco/tree/master/recipes). Some sections have a `mode` property. When this is available there are 2 possible values: - `extends`: the provided configuration will extends the CRA settings (**default mode**) - `file`: the CRA settings will be reset and you will provide an official configuration file for the plugin ([postcss](https://github.com/michael-ciniawsky/postcss-load-config#postcssrc), [eslint](https://eslint.org/docs/user-guide/configuring#configuration-file-formats)) that will supersede any settings. ```javascript const { when, whenDev, whenProd, whenTest, ESLINT_MODES, POSTCSS_MODES } = require("@craco/craco"); module.exports = { reactScriptsVersion: "react-scripts" /* (default value) */, style: { modules: { localIdentName: "" }, css: { loaderOptions: { /* Any css-loader configuration options: https://github.com/webpack-contrib/css-loader. */ }, loaderOptions: (cssLoaderOptions, { env, paths }) => { return cssLoaderOptions; } }, sass: { loaderOptions: { /* Any sass-loader configuration options: https://github.com/webpack-contrib/sass-loader. */ }, loaderOptions: (sassLoaderOptions, { env, paths }) => { return sassLoaderOptions; } }, postcss: { mode: "extends" /* (default value) */ || "file", plugins: [require('plugin-to-append')], // Additional plugins given in an array are appended to existing config. plugins: (plugins) => [require('plugin-to-prepend')].concat(plugins), // Or you may use the function variant. env: { autoprefixer: { /* Any autoprefixer options: https://github.com/postcss/autoprefixer#options */ }, stage: 3, /* Any valid stages: https://cssdb.org/#staging-process. */ features: { /* Any CSS features: https://preset-env.cssdb.org/features. */ } }, loaderOptions: { /* Any postcss-loader configuration options: https://github.com/postcss/postcss-loader. */ }, loaderOptions: (postcssLoaderOptions, { env, paths }) => { return postcssLoaderOptions; } } }, eslint: { enable: true /* (default value) */, mode: "extends" /* (default value) */ || "file", configure: { /* Any eslint configuration options: https://eslint.org/docs/user-guide/configuring */ }, configure: (eslintConfig, { env, paths }) => { return eslintConfig; }, pluginOptions: { /* Any eslint plugin configuration options: https://github.com/webpack-contrib/eslint-webpack-plugin#options. */ }, pluginOptions: (eslintOptions, { env, paths }) => { return eslintOptions; } }, babel: { presets: [], plugins: [], loaderOptions: { /* Any babel-loader configuration options: https://github.com/babel/babel-loader. */ }, loaderOptions: (babelLoaderOptions, { env, paths }) => { return babelLoaderOptions; } }, typescript: { enableTypeChecking: true /* (default value) */ }, webpack: { alias: {}, plugins: { add: [], /* An array of plugins */ remove: [], /* An array of plugin constructor's names (i.e. "StyleLintPlugin", "ESLintWebpackPlugin" ) */ }, configure: { /* Any webpack configuration options: https://webpack.js.org/configuration */ }, configure: (webpackConfig, { env, paths }) => { return webpackConfig; } }, jest: { babel: { addPresets: true, /* (default value) */ addPlugins: true /* (default value) */ }, configure: { /* Any Jest configuration options: https://jestjs.io/docs/en/configuration. */ }, configure: (jestConfig, { env, paths, resolve, rootDir }) => { return jestConfig; } }, devServer: { /* Any devServer configuration options: https://webpack.js.org/configuration/dev-server/#devserver. */ }, devServer: (devServerConfig, { env, paths, proxy, allowedHost }) => { return devServerConfig; }, plugins: [ { plugin: { overrideCracoConfig: ({ cracoConfig, pluginOptions, context: { env, paths } }) => { return cracoConfig; }, overrideWebpackConfig: ({ webpackConfig, cracoConfig, pluginOptions, context: { env, paths } }) => { return webpackConfig; }, overrideDevServerConfig: ({ devServerConfig, cracoConfig, pluginOptions, context: { env, paths, proxy, allowedHost } }) => { return devServerConfig; }, overrideJestConfig: ({ jestConfig, cracoConfig, pluginOptions, context: { env, paths, resolve, rootDir } }) => { return jestConfig }, }, options: {} } ] }; ``` ### Configuration Helpers Usage for all "when" functions is the same, `whenDev, whenProd, whenTest` are shortcuts for `when`. `when(condition, fct, [unmetValue])` Usage: ```javascript const { when, whenDev } = require("@craco/craco"); module.exports = { eslint: { mode: ESLINT_MODES.file, configure: { formatter: when(process.env.NODE_ENV === "CI", require("eslint-formatter-vso")) } }, webpack: { plugins: [ new ConfigWebpackPlugin(), ...whenDev(() => [new CircularDependencyPlugin()], []) ] } }; ``` ### Exporting your Configuration You can export your configuration as an **object literal**: ```javascript /* craco.config.js */ module.exports = { ... } ``` a **function**: ```javascript /* craco.config.js */ module.exports = function({ env }) { return { ... }; } ``` a **promise** or an **async function**: ```javascript /* craco.config.js */ module.exports = async function({ env }) { await ... return { ... }; } ``` ### Setting a Custom Location for craco.config.js Both options support a **relative** or an **absolute** path. **1- package.json** _(Recommended)_ You can change the location of the `craco.config.js` file by specifying a value for `cracoConfig` in your `package.json` file. ```javascript /* package.json */ { "cracoConfig": "config/craco-config-with-custom-name.js" } ``` **2- CLI** _(For backward compatibility)_ You can also change the location of the `craco.config.js` file by specifying the `--config` CLI option. _This option doesn't support Babel with Jest_ ```javascript /* package.json */ { "scripts": { "start": "craco start --config config/craco-config-with-custom-name.js" } } ``` ## CRA Toolchain for Beginners ### Introduction to CRACO Create React App ([CRA](https://github.com/facebook/create-react-app)) is intended to allow people to get started with writing React apps quickly. It does this by packaging several key components with a solid default configuration. After some initial experimentation, many people find the default CRA is not quite the right fit. Yet, selecting and configuring a toolchain featuring all of the components CRA already offers is overwhelming. CRACO allows you to enjoy the recognizable project structure of CRA while changing detailed configuration settings of each component. ### Notes on CRA Configurations and Problem Solving Keep in mind that there are _some_ configuration settings available to CRA without CRACO. Getting exactly what you want may involve a combination of making changes your CRACO configuration file and by using some of the more limited _but still important_ settings available in Create React App. Before jumping into customizing your _CRACO_ configuration, step back and think about each part of the problem you're trying to solve. Be sure to review these resources on the CRA configuration, as it may save you time: - [Important Environment Variables that Configure CRA](https://create-react-app.dev/docs/advanced-configuration) - [Learn about using `postbuild` commands in `package.json`](https://stackoverflow.com/a/51818028/4028977) - [Proxying API or other Requests](https://create-react-app.dev/docs/proxying-api-requests-in-development/), or "how to integrate CRA's dev server with a second backend": [problem statement](https://github.com/facebook/create-react-app/issues/147) - [Search CRACO issues, for gotchas, hints and examples](https://github.com/gsoft-inc/craco/issues?q=is%3Aissue+sort%3Aupdated-desc) ### Ejecting CRA to Learn Avoiding ejecting is a major goal for many CRACO users. However, if you're still learning toolchains and modern frontend workflows, it may be helpful to create a sample ejected CRA project to see how the default CRA app configures each of the components. While CRACO's sample configuration file inherits directly from CRA's default settings, seeing the default CRA config in the ejected CRA file structure may give you useful perspective. You may even want to try testing a change in the ejected app to better understand how it would be done with your CRACO config-based project. ### Direct (object literal) Versus Functional Config Definitions The [sample CRACO config file]((#sample-craco-configuration-file)) is meant to show possibilities for configuring your CRA-based project. Each section contains a primary configuration area, `loaderOptions` or `configure`. These config areas are where you will make most of your detailed changes. You, (or perhaps your IDE) may have noticed that the sections have duplicate keys, i.e. loaderOptions is listed twice in the sample config file. The reason for this is to allow you to choose between object literal or functionally defined configuration choices. There are a few reasons for this: 1. Sometimes it may be faster to test a minor change using keys. 1. Other times a functional definition is necessary to get the right configuration. 1. While not common, a setting may **only** work if you use one or the other! See, [devServer port example](https://github.com/gsoft-inc/craco/issues/172#issuecomment-651505730) #### A simple example of equivalent direct and functionally defined configuration settings: ##### Direct configuration (object literal) ```javascript devServer: { writeToDisk: true } ``` ##### Functionally defined configuration ```javascript devServer: (devServerConfig, { env, paths, proxy, allowedHost }) => { devServerConfig.writeToDisk = true; return devServerConfig; } ``` ## API To integrate with other tools, it's usefull to have access to the configuration generated by CRACO. That's what CRACO APIs are for. The current API support Jest and Webpack. ### Jest API Accept a `cracoConfig`, a `context` object and `options`. The generated Jest config object is returned. > **Warning:** `createJestConfig` does NOT accept `cracoConfig` as a function. If your `craco.config.js` exposes a config function, you have to call it yourself before passing it to `createJestConfig`. `createJestConfig(cracoConfig, context = {}, options = { verbose: false, config: null })` Usage: ```javascript /* jest.config.js */ const { createJestConfig } = require("@craco/craco"); const cracoConfig = require("./craco.config.js"); const jestConfig = createJestConfig(cracoConfig); module.exports = jestConfig; ``` #### Examples - [vscode-jest](https://github.com/sharegate/craco/tree/master/recipes/use-a-jest-config-file) ### Webpack API You can create Webpack DevServer and Production configurations using `createWebpackDevConfig` and `createWebpackProdConfig`. Accept a `cracoConfig`, a `context` object and `options`. The generated Webpack config object is returned. > **Warning:** Similar to `createJestConfig`, these functions do NOT accept `cracoConfig` as a function. If your `craco.config.js` exposes a config function, you have to call it yourself before passing it further. `createWebpackDevConfig(cracoConfig, context = {}, options = { verbose: false, config: null })` `createWebpackProdConfig(cracoConfig, context = {}, options = { verbose: false, config: null })` Usage: ```javascript /* webpack.config.js */ const { createWebpackDevConfig } = require("@craco/craco"); const cracoConfig = require("./craco.config.js"); const webpackConfig = createWebpackDevConfig(cracoConfig); module.exports = webpackConfig; ``` ## Develop a Plugin ### Hooks There are four hooks available to a plugin: - `overrideCracoConfig`: Let a plugin customize the config object before it's process by `craco`. - `overrideWebpackConfig`: Let a plugin customize the `webpack` config that will be used by CRA. - `overrideDevServerConfig`: Let a plugin customize the dev server config that will be used by CRA. - `overrideJestConfig`: Let a plugin customize the `Jest` config that will be used by CRA. **Important:** Every function must return the updated config object. #### overrideCracoConfig The function `overrideCracoConfig` let a plugin override the config object **before** it's process by `craco`. If a plugin define the function, it will be called with the config object read from the `craco.config.js` file provided by the consumer. *The function must return a valid config object, otherwise `craco` will throw an error.* The function will be called with a single object argument having the following structure: ```javascript { cracoConfig: "The config object read from the craco.config.js file provided by the consumer", pluginOptions: "The plugin options provided by the consumer", context: { env: "The current NODE_ENV (development, production, etc..)", paths: "An object that contains all the paths used by CRA" } } ``` ##### Example Plugin: ```javascript /* craco-plugin-log-craco-config.js */ module.exports = { overrideCracoConfig: ({ cracoConfig, pluginOptions, context: { env, paths } }) => { if (pluginOptions.preText) { console.log(pluginOptions.preText); } console.log(JSON.stringify(cracoConfig, null, 4)); // Always return the config object. return cracoConfig; } }; ``` Registration (in a `craco.config.js` file): ```javascript const logCracoConfigPlugin = require("./craco-plugin-log-craco-config"); module.exports = { ... plugins: [ { plugin: logCracoConfigPlugin, options: { preText: "Will log the craco config:" } } ] }; ``` #### overrideWebpackConfig The function `overrideWebpackConfig` let a plugin override the `webpack` config object **after** it's been customized by `craco`. *The function must return a valid config object, otherwise `craco` will throw an error.* The function will be called with a single object argument having the following structure: ```javascript { webpackConfig: "The webpack config object already customized by craco", cracoConfig: "The configuration object read from the craco.config.js file provided by the consumer", pluginOptions: "The plugin options provided by the consumer", context: { env: "The current NODE_ENV (development, production, etc..)", paths: "An object that contains all the paths used by CRA" } } ``` ##### Example Plugin: ```javascript /* craco-plugin-log-webpack-config.js */ module.exports = { overrideWebpackConfig: ({ webpackConfig, cracoConfig, pluginOptions, context: { env, paths } }) => { if (pluginOptions.preText) { console.log(pluginOptions.preText); } console.log(JSON.stringify(webpackConfig, null, 4)); // Always return the config object. return webpackConfig; } }; ``` Registration (in a `craco.config.js` file): ```javascript const logWebpackConfigPlugin = require("./craco-plugin-log-webpack-config"); module.exports = { ... plugins: [ { plugin: logWebpackConfigPlugin, options: { preText: "Will log the webpack config:" } } ] }; ``` #### overrideDevServerConfig The function `overrideDevServerConfig` let a plugin override the dev server config object **after** it's been customized by `craco`. *The function must return a valid config object, otherwise `craco` will throw an error.* The function will be called with a single object argument having the following structure: ```javascript { devServerConfig: "The dev server config object already customized by craco", cracoConfig: "The configuration object read from the craco.config.js file provided by the consumer", pluginOptions: "The plugin options provided by the consumer", context: { env: "The current NODE_ENV (development, production, etc..)", paths: "An object that contains all the paths used by CRA", allowedHost: "Provided by CRA" } } ``` ##### Example Plugin: ```javascript /* craco-plugin-log-dev-server-config.js */ module.exports = { overrideDevServerConfig: ({ devServerConfig, cracoConfig, pluginOptions, context: { env, paths, allowedHost } }) => { if (pluginOptions.preText) { console.log(pluginOptions.preText); } console.log(JSON.stringify(devServerConfig, null, 4)); // Always return the config object. return devServerConfig; } }; ``` Registration (in a `craco.config.js` file): ```javascript const logDevServerConfigPlugin = require("./craco-plugin-log-dev-server-config"); module.exports = { ... plugins: [ { plugin: logDevServerConfigPlugin, options: { preText: "Will log the dev server config:" } } ] }; ``` #### overrideJestConfig The function `overrideJestConfig` let a plugin override the `Jest` config object **after** it's been customized by `craco`. *The function must return a valid config object, otherwise `craco` will throw an error.* The function will be called with a single object argument having the following structure: ```javascript { jestConfig: "The Jest config object already customized by craco", cracoConfig: "The configuration object read from the craco.config.js file provided by the consumer", pluginOptions: "The plugin options provided by the consumer", context: { env: "The current NODE_ENV (development, production, etc..)", paths: "An object that contains all the paths used by CRA", resolve: "Provided by CRA", rootDir: "Provided by CRA" } } ``` ##### Example Plugin: ```javascript /* craco-plugin-log-jest-config.js */ module.exports = { overrideJestConfig: ({ jestConfig, cracoConfig, pluginOptions, context: { env, paths, resolve, rootDir } }) => { if (pluginOptions.preText) { console.log(pluginOptions.preText); } console.log(JSON.stringify(jestConfig, null, 4)); // Always return the config object. return jestConfig; } }; ``` Registration (in a `craco.config.js` file): ```javascript const logJestConfigPlugin = require("./craco-plugin-log-jest-config"); module.exports = { ... plugins: [ { plugin: logJestConfigPlugin, options: { preText: "Will log the Jest config:" } } ] }; ``` ### Utility Functions A few utility functions are provided by CRACO to help you develop a plugin: - `getLoader` - `getLoaders` - `removeLoaders` - `addBeforeLoader` - `addBeforeLoaders` - `addAfterLoader` - `addAfterLoaders` - `getPlugin` - `removePlugins` - `addPlugins` - `throwUnexpectedConfigError` ```javascript const { getLoader, getLoaders, removeLoaders, loaderByName, getPlugin, removePlugins, addPlugins, pluginByName, throwUnexpectedConfigError } = require("@craco/craco"); ``` #### getLoader Retrieve the **first** loader that match the specified criteria from the webpack config. Returns: ```javascript { isFound: true | false, match: { loader, parent, index } } ``` Usage: ```javascript const { getLoader, loaderByName } = require("@craco/craco"); const { isFound, match } = getLoader(webpackConfig, loaderByName("eslint-loader")); if (isFound) { // do stuff... } ``` #### getLoaders Retrieve **all** the loaders that match the specified criteria from the webpack config. Returns: ```javascript { hasFoundAny: true | false, matches: [ { loader, parent, index } ] } ``` Usage: ```javascript const { getLoaders, loaderByName } = require("@craco/craco"); const { hasFoundAny, matches } = getLoaders(webpackConfig, loaderByName("babel-loader")); if (hasFoundAny) { matches.forEach(x => { // do stuff... }); } ``` #### removeLoaders Remove **all** the loaders that match the specified criteria from the webpack config. Returns: ```javascript { hasRemovedAny: true | false, removedCount: int } ``` Usage: ```javascript const { removeLoaders, loaderByName } = require("@craco/craco"); removeLoaders(webpackConfig, loaderByName("eslint-loader")); ``` #### addBeforeLoader Add a new *loader* **before** the loader that match specified criteria to the webpack config. Returns: ```javascript { isAdded: true | false } ``` Usage: ```javascript const { addBeforeLoader, loaderByName } = require("@craco/craco"); const myNewWebpackLoader = { loader: require.resolve("tslint-loader") }; addBeforeLoader(webpackConfig, loaderByName("eslint-loader"), myNewWebpackLoader); ``` #### addBeforeLoaders Add a new *loader* **before** all the loaders that match specified criteria to the webpack config. Returns: ```javascript { isAdded: true | false, addedCount: int } ``` Usage: ```javascript const { addBeforeLoaders, loaderByName } = require("@craco/craco"); const myNewWebpackLoader = { loader: require.resolve("tslint-loader") }; addBeforeLoaders(webpackConfig, loaderByName("eslint-loader"), myNewWebpackLoader); ``` #### addAfterLoader Add a new *loader* **after** the loader that match specified criteria to the webpack config. Returns: ```javascript { isAdded: true | false } ``` Usage: ```javascript const { addAfterLoader, loaderByName } = require("@craco/craco"); const myNewWebpackLoader = { loader: require.resolve("tslint-loader") }; addAfterLoader(webpackConfig, loaderByName("eslint-loader"), myNewWebpackLoader); ``` #### addAfterLoaders Add a new *loader* **after** all the loaders that match specified criteria to the webpack config. Returns: ```javascript { isAdded: true | false, addedCount: int } ``` Usage: ```javascript const { addAfterLoaders, loaderByName } = require("@craco/craco"); const myNewWebpackLoader = { loader: require.resolve("tslint-loader") }; addAfterLoaders(webpackConfig, loaderByName("eslint-loader"), myNewWebpackLoader); ``` #### getPlugin Retrieve the **first** plugin that match the specified criteria from the webpack config. Returns: ```javascript { isFound: true | false, match: {...} // the webpack plugin } ``` Usage: ```javascript const { getPlugin, pluginByName } = require("@craco/craco"); const { isFound, match } = getPlugin(webpackConfig, pluginByName("ESLintWebpackPlugin")); if (isFound) { // do stuff... } ``` #### removePlugins Remove **all** the plugins that match the specified criteria from the webpack config. Returns: ```javascript { hasRemovedAny:: true | false, removedCount:: int } ``` Usage: ```javascript const { removePlugins, pluginByName } = require("@craco/craco"); removePlugins(webpackConfig, pluginByName("ESLintWebpackPlugin")); ``` #### addPlugins Add new *plugins* to the webpack config. Usage: ```javascript const { addPlugins } = require("@craco/craco"); const myNewWebpackPlugin = require.resolve("ESLintWebpackPlugin"); addPlugins(webpackConfig, [myNewWebpackPlugin]); ``` #### throwUnexpectedConfigError Throw an error if the webpack configuration changes and does not match your expectations. (For example, `getLoader` cannot find a loader and `isFound` is `false`.) `create-react-app` might update the structure of their webpack config, so it is very important to show a helpful error message when something breaks. Raises an error and crashes Node.js: ```bash $ yarn start yarn run v1.12.3 $ craco start /path/to/your/app/craco.config.js:23 throw new Error( ^ Error: Can't find eslint-loader in the webpack config! This error probably occurred because you updated react-scripts or craco. Please try updating craco-less to the latest version: $ yarn upgrade craco-less Or: $ npm update craco-less If that doesn't work, craco-less needs to be fixed to support the latest version. Please check to see if there's already an issue in the ndbroadbent/craco-less repo: * https://github.com/ndbroadbent/craco-less/issues?q=is%3Aissue+webpack+eslint-loader If not, please open an issue and we'll take a look. (Or you can send a PR!) You might also want to look for related issues in the craco and create-react-app repos: * https://github.com/sharegate/craco/issues?q=is%3Aissue+webpack+eslint-loader * https://github.com/facebook/create-react-app/issues?q=is%3Aissue+webpack+eslint-loader at throwUnexpectedConfigError (/path/to/your/app/craco.config.js:23:19) ... ``` Usage: ```javascript const { getLoader, loaderByName, throwUnexpectedConfigError } = require("@craco/craco"); // Create a helper function if you need to call this multiple times const throwError = (message, githubIssueQuery) => throwUnexpectedConfigError({ packageName: "craco-less", githubRepo: "ndbroadbent/craco-less", message, githubIssueQuery, }); const { isFound, match } = getLoader(webpackConfig, loaderByName("eslint-loader")); if (!isFound) { throwError("Can't find eslint-loader in the webpack config!", "webpack+eslint-loader") } ``` Options: ```javascript { message: "An error message explaining what went wrong", packageName: "NPM package name", githubRepo: "GitHub repo where people can open an issue. Format: username/repo", githubIssueQuery: "Search string to find related issues" } ``` > Only `message` is required. ## Backward Compatibility CRACO is not meant to be backward compatible with older versions of react-scripts. This package will only support the latest version. If your project uses an old react-scripts version, refer to the following table to select the appropriate CRACO version. | react-scripts Version |CRACO Version| | --------------------- | -----------:| | react-scripts < 4.0.0 | 5.8.0 | ## Debugging ### Verbose Logging To activate **verbose** logging specify the CLI option `--verbose` ```javascript /* package.json */ { "scripts": { "start": "craco start --verbose" } } ``` ## License Copyright © 2020, Groupe Sharegate inc. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license at https://github.com/gsoft-inc/gsoft-license/blob/master/LICENSE.
import React from 'react'; import AssetTypeImage from '../../Components/AssetTypeImage'; import CountryImage from '../../Components/CountryImage'; import { useAppDispatch, useAppSelector } from "../../hooks" import { RootState } from "../../store"; import { setQuery, setResults, TodayResult } from "./todaySlice"; interface ResultProps { result: TodayResult; } export const TodayResultCard: React.FC<ResultProps> = ({ result }: ResultProps) => { return ( <> <div className='today-card'> <div className='vertical-wrapper'> <div className='top-margin-assetlogo'> <AssetTypeImage src={result.assetTypeIconUrl} /> </div> <div className='horizontal-wrapper'> <div> <p className='p-text'>{result.description}</p> </div> <div className='vertical-wrapper p-text-seconday '> <div> <p className='p-text'>{result.symbol} </p> </div> <div className='left-margin-5'> <CountryImage src={result.exchange.country.flagIconUrl} /> </div> <div> <p className='p-text'>&#8226;</p> </div> <div> <p className='p-text'>{result.displayAssetType}</p> </div> </div> </div> </div> </div> </> ) } interface ResultListProps { results: TodayResult[]; } const ResultList: React.FC<ResultListProps> = ({ results }: ResultListProps) => { if (results) { if (results.length > 0) { const renderedResults = results.map((r: TodayResult) => { console.log("rs:", r.symbol) return ( <div key={r.symbol + "-" + r.description + '-' + r.displayAssetType} className='bottom-margin'> <TodayResultCard result={r} /> </div>) }); return ( <> {renderedResults} </> ) } else { return ( <></> ) } } else { return ( <></> ) } } const SearchToday: React.FC = () => { const dispatch = useAppDispatch(); const query = useAppSelector((state: RootState) => state.today.query); const results = useAppSelector((state: RootState) => state.today.results); const token = useAppSelector((state: RootState) => state.auth.token); const onChange = (event: any) => { dispatch(setQuery(String(event.target.value))); const QUERY = `query{ instruments(search:\"${event.target.value}\") { description symbol assetTypeIconUrl displayAssetType exchange{ country{ flagIconUrl } } } }` const Options: any = { method: 'POST', headers: { "Authorization": "Bearer " + token, "Content-Type": "application/json" }, body: JSON.stringify({ query: QUERY, variables: {} }), redirect: 'follow' }; fetch("https://devbox.gid.works/", Options).then(response => response.json()).then(result => { dispatch(setResults(result)); }).catch(error => console.log('error', error)); } return ( <> <input type="text" value={query} onChange={onChange} /> <ResultList results={results} /> </> ) } export default SearchToday
package valkyrie.ide.doc //import valkyrie.language.psi_node.ValkyrieClassStatementNode //import valkyrie.language.psi_node.ValkyrieTraitStatementNode import com.intellij.openapi.editor.colors.EditorColorsManager import com.intellij.openapi.editor.richcopy.HtmlSyntaxInfoUtil import com.intellij.psi.PsiElement import com.intellij.psi.util.elementType import com.intellij.ui.ColorUtil import valkyrie.ide.highlight.ValkyrieHighlightColor import valkyrie.ide.highlight.ValkyrieHighlightColor.* import valkyrie.language.ValkyrieLanguage import valkyrie.language.ast.ValkyrieClassStatement import valkyrie.language.ast.ValkyrieTraitStatement import valkyrie.language.lexer.ValkyrieProgramLexer import valkyrie.lsp.RequestDocument class DocumentationRenderer(var element: PsiElement, private var original: PsiElement?) { private val doc = StringBuilder() fun onHover(): String { when { ValkyrieProgramLexer.Keywords.contains(element.elementType) -> return RequestDocument.keyword(element.text).send() ValkyrieProgramLexer.Operators.contains(element.elementType) -> return RequestDocument.operator(element.text).send() else -> {} } when (element) { is ValkyrieTraitStatement -> buildDetail(element as ValkyrieTraitStatement) is ValkyrieClassStatement -> buildShort(element as ValkyrieClassStatement) else -> { doc.append(element) doc.append("<br/>") doc.append(original) doc.append("<br/>") doc.append("onDetail: ${element.text}") } } return doc.toString() } private fun buildShort(element: ValkyrieTraitStatement) { append(KEYWORD, "crate ") appendNamespace() doc.append("<br/>") append(KEYWORD, "public ") append(KEYWORD, "native ") append(KEYWORD, "trait ") append(SYM_TRAIT, element.name) } private fun buildDetail(element: ValkyrieTraitStatement) { this.buildShort(element) } private fun buildShort(element: ValkyrieClassStatement) { append(KEYWORD, "crate ") appendNamespace() doc.append("<br/>") append(KEYWORD, "public ") append(KEYWORD, "native ") append(KEYWORD, "class ") append(SYM_CLASS, element.name ?: "[Unknown]") // appendNewline() // append(KEYWORD, "implements ") // append(SYM_TRAIT, "Eq") // appendAdd() // append(SYM_TRAIT, "Hash") } /// get the path relative to the project root /// FIXME: get real declare module private fun appendNamespace() { val file = element.containingFile; // fake module path val path = file.virtualFile.path.substringAfter("src/").replace("/", "::").replace(".vk", "") append(path) } fun append(text: String) { doc.append("<span>${text}</span>") } fun append(key: ValkyrieHighlightColor, text: String) { // HtmlSyntaxInfoUtil.getStyledSpan(key.textAttributesKey, text, 1.0f) val attr = EditorColorsManager.getInstance().globalScheme.getAttributes(key.textAttributesKey) val color = ColorUtil.toHtmlColor(attr.foregroundColor) doc.append("<span style='color:${color}'>${text}</span>") } private fun appendHighlight(code: String) { HtmlSyntaxInfoUtil.appendHighlightedByLexerAndEncodedAsHtmlCodeSnippet( doc, element.project, ValkyrieLanguage, code.trimIndent(), 1.0f, ) } private fun appendAdd() { doc.append("<span>+</span>") } }
package landing.l2.applicationCode.scanner; import landing.l3.domainCode.scanner.NumberBuilder; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; public class NumberBuilderTest { @Test public void testDetermineStringForValueFromInteger() { String correctInteger = "12345$"; String answerToCorrectInteger = NumberBuilder.determineStringForValueFrom(correctInteger) + "$"; assertEquals(correctInteger, answerToCorrectInteger); } @Test public void testDetermineStringForValueFromDouble() { String correctDouble = "123.45$"; String answerToCorrectDouble = NumberBuilder.determineStringForValueFrom(correctDouble) + "$"; assertEquals(correctDouble, answerToCorrectDouble); } @Test public void testDetermineStringForValueFromDecimalPoint() { String correctDecimalPoint = ".$"; String answerToCorrectDecimalPoint = NumberBuilder.determineStringForValueFrom(correctDecimalPoint) + "$"; assertEquals(correctDecimalPoint, answerToCorrectDecimalPoint); } @Test public void testDetermineStringForValueFromDecimalPointAfter() { String correctZeroBefore = "0.$"; String answerToCorrectZeroBefore = NumberBuilder.determineStringForValueFrom(correctZeroBefore) + "$"; assertEquals(correctZeroBefore, answerToCorrectZeroBefore); } @Test public void testDetermineStringForValueFromDecimalPointBefore() { String correctZeroAfter = ".0$"; String answerToCorrectZeroAfter = NumberBuilder.determineStringForValueFrom(correctZeroAfter) + "$"; assertEquals(correctZeroAfter, answerToCorrectZeroAfter); } @Test public void testDetermineStringForValueFromWrongDecimalPoint0() { String wrongDecimalPoint0 = "1.2.3$"; assertThrows(NumberFormatException.class, () -> NumberBuilder.determineStringForValueFrom(wrongDecimalPoint0)); } @Test public void testDetermineStringForValueFromWrongDecimalPoint1() { String wrongDecimalPoint1 = "1..2$"; assertThrows(NumberFormatException.class, () -> NumberBuilder.determineStringForValueFrom(wrongDecimalPoint1)); } @Test public void testDetermineStringForValueFromWrongDecimalPoint2() { String wrongDecimalPoint2 = "12..34$"; assertThrows(NumberFormatException.class, () -> NumberBuilder.determineStringForValueFrom(wrongDecimalPoint2)); } @Test public void testDetermineStringForValueFromWrongDecimalPoint3() { String wrongDecimalPoint3 = "..$"; assertThrows(NumberFormatException.class, () -> NumberBuilder.determineStringForValueFrom(wrongDecimalPoint3)); } @Test public void testConvertValueToDoubleFromInteger() { double correctInteger = 12345; double answerToCorrectInteger = NumberBuilder.convertValueToDoubleFrom("12345"); assertEquals(correctInteger, answerToCorrectInteger, 1E-6); } @Test public void testConvertValueToDoubleFromDouble() { double correctDouble = 123.45; double answerToCorrectDouble = NumberBuilder.convertValueToDoubleFrom("123.45"); assertEquals(correctDouble, answerToCorrectDouble, 1E-6); } @Test public void testConvertValueToDoubleFromDecimalPointAfter() { double correctZeroBefore = 0.; double answerToCorrectZeroBefore = NumberBuilder.convertValueToDoubleFrom("0."); assertEquals(correctZeroBefore, answerToCorrectZeroBefore, 1E-6); } @Test public void testConvertValueToDoubleFromDecimalPointBefore() { double correctZeroAfter = .0; double answerToCorrectZeroAfter = NumberBuilder.convertValueToDoubleFrom(".0"); assertEquals(correctZeroAfter, answerToCorrectZeroAfter, 1E-6); } }
class Searchable::Indexer MESSAGE_SEARCHABLE_FIELDS = [ { name: :title, formatter: :string }, { name: :sender_name, formatter: :string }, { name: :html_visualization, formatter: :html_string }, ] def self.index_message_thread(message_thread) record = ::Searchable::MessageThread.find_or_initialize_by(message_thread_id: message_thread.id) record.title = Searchable::IndexHelpers.searchable_string(message_thread.title) record.note = Searchable::IndexHelpers.searchable_string(message_thread.message_thread_note&.note.to_s) record.tag_ids = message_thread.tags.map(&:id) record.tag_names = Searchable::IndexHelpers.searchable_string(message_thread.tags.map(&:name).join(' ').gsub(/[:\/]/, " ")) record.content = message_thread.messages.map { |message| message_to_searchable_string(message) }.join(' ') record.last_message_delivered_at = message_thread.last_message_delivered_at record.tenant_id = message_thread.box.tenant_id record.box_id = message_thread.box_id record.save! end def self.message_to_searchable_string(message) record_to_searchable_string(message, MESSAGE_SEARCHABLE_FIELDS) end def self.record_to_searchable_string(record, searchable_fields) searchable_fields.map do |searchable_field| field_name, formatter = searchable_field.fetch_values(:name, :formatter) value = record.public_send(field_name) case formatter when :string Searchable::IndexHelpers.searchable_string(value) when :html_string Searchable::IndexHelpers.html_to_searchable_string(value) else throw :unsupported_searchable_formatter end end.compact.join(' ') end def self.message_searchable_fields_changed?(message) MESSAGE_SEARCHABLE_FIELDS.any?{ |field| message.saved_changes.key?(field[:name].to_s) } end end
/* * MIT License * * Copyright (c) 2022 XenFork Union * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package forkengine.backend.lwjgl3; import forkengine.asset.texture.TextureData; import forkengine.core.DataBuffer; import org.lwjgl.system.MemoryStack; import org.lwjgl.system.MemoryUtil; import java.nio.IntBuffer; import static org.lwjgl.stb.STBImage.*; /** * The texture data implemented with LWJGL 3. * * @author squid233 * @since 0.1.0 */ public class LWJGL3TextureData implements TextureData { private boolean ownsData = false; private long address = 0; private int width = 0; private int height = 0; @Override public int width() { return width; } @Override public int height() { return height; } @Override public TextureData setAs(long address, int width, int height) { ownsData=false; this.address = address; this.width = width; this.height = height; return this; } @Override public TextureData load(DataBuffer dataBuffer) { if (address() != 0) nstbi_image_free(address()); try (MemoryStack stack = MemoryStack.stackPush()) { IntBuffer px = stack.callocInt(1); IntBuffer py = stack.callocInt(1); IntBuffer pc = stack.callocInt(1); address = nstbi_load_from_memory(dataBuffer.address(), (int) dataBuffer.capacity(), MemoryUtil.memAddress(px), MemoryUtil.memAddress(py), MemoryUtil.memAddress(pc), STBI_rgb_alpha); if (address == 0) { throw new IllegalStateException("Failed to load the image: " + stbi_failure_reason()); } ownsData = true; width = px.get(0); height = py.get(0); } return this; } @Override public long address() { return address; } @Override public void close() { if (ownsData) { nstbi_image_free(address()); } } }
package part2actors import akka.actor.{Actor, ActorSystem, Props} object ActorsIntro extends App { //part1 - actor systems val actorSystem=ActorSystem("firstActorSystem") println(actorSystem.name) // part2 - create actors // word count actor class WordCountActor extends Actor { // internal data var totalWords=0 //behavior override def receive: PartialFunction[Any,Unit] = { case message: String => println(s"[word counter] I have received: $message") totalWords+=(" ").length case msg=> println(s"[word counter] I cannot undestand ${msg.toString}") } } //part3 - instantiate our actor val wordCounter= actorSystem.actorOf(Props[WordCountActor],"wordCounter") val anotherWordCounter= actorSystem.actorOf(Props[WordCountActor],"anotherWordCounter") //part4- communicate! wordCounter ! "I am learning Akka and it's pretty damn cool!" // ! -> "tell" wordCounter ! "A different message" //asynchronous object Person{ def props(name:String): Props =Props(new Person(name)) } class Person(name:String)extends Actor{ override def receive:Receive= { case "hi"=>println(s"Hi, my name is $name") case _ => } } //best practice is to define a companion object define a props method with its arguments on constructor val person=actorSystem.actorOf(Person.props("Bob")) //this instantiation is legal person ! "hi" }
import React, { Component } from 'react' import { Header, Grid, List } from 'semantic-ui-react' import LinkButton from '../link_button' const trufflConfig = `aldwych: { host: 'localhost', port: 8545, network_id: '*' }` export default class Faucet extends Component { render () { return ( <Grid.Column> <Header as='h3' inverted> 3. Develop! <Header.Subheader>Deploy your Contracts</Header.Subheader> </Header> <p>Once you have a Connection Method and some ALD, you're ready to go!</p> <p>The Ethereum development ecosystem is ever-growing and you can use many of the same tools that you're already familiar with.</p> <Header as='h4' content='Truffle' inverted /> <p>Truffle is the most popular development framework for Ethereum. You can connect to Aldwych in one of the following ways:</p> <List ordered inverted> <List.Item> Use OpenEthereum and add the config to <code>truffle.js</code>: <pre> <code>{trufflConfig}</code> </pre> </List.Item> <List.Item> Use an {' '} <a href='https://github.com/trufflesuite/truffle-hdwallet-provider' target='_blank'> HD Wallet Provider </a> {' '} to connect to a remote RPC node such as the one provided on this page, in the 'Connect to Aldwych' Section </List.Item> </List> <LinkButton basic inverted content='Truffle Framework' to='https://truffleframework.com' /> </Grid.Column> ) } }
package game.settings; import game.gameobjects.player.components.PlayerConfiguration; import lombok.Getter; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; /** * @author Mateusz Żebrowski */ @Component public class InternalSettings { @Getter @Value("game.default.number.of.players") int defaultPlayersNumber; @Getter @Value("game.default.round.number") int defaultRoundNumber; @Getter @Value("game.default.round.time.in.seconds") int defaultRoundTime; @Getter @Value("max.number.of.players") int maxNumberOfPlayers; @Getter @Value("min.round.time.in.seconds") int minRoundTime; @Getter @Value("round.time.increment") int roundTimeIncrement; @Getter @Value("max.discs.limit") int discsLimit; @Value("configuration.player.one") private int[] configurationPlayerOne; @Value("configuration.player.two") private int[] configurationPlayerTwo; @Value("configuration.player.three") private int[] configurationPlayerThree; @Value("configuration.player.four") private int[] configurationPlayerFour; public PlayerConfiguration getPlayerConfiguration(int playerNumber){ switch (playerNumber) { case 1: return new PlayerConfiguration(configurationPlayerOne, discsLimit); case 2: return new PlayerConfiguration(configurationPlayerTwo, discsLimit); case 3: return new PlayerConfiguration(configurationPlayerThree, discsLimit); case 4: return new PlayerConfiguration(configurationPlayerFour, discsLimit); default: return new PlayerConfiguration(new int[]{0,0,0}, discsLimit); } } }
import './featuredMovie.scss' import { PlayArrow, InfoOutlined } from '@material-ui/icons' import { useEffect, useState } from 'react' import { Link } from 'react-router-dom' import axios from 'axios' const FeaturedMovie = ({ type }) => { const [ content, setContent ] = useState({}) useEffect(() => { const getRandomContent = async () => { try { const res = await axios.get(`/movies/random?type=${type}`, { headers: { token: "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjY0MThlMzJjM2RlYzI2M2IwNzMwZjU4ZiIsImlzQWRtaW4iOnRydWUsImlhdCI6MTY3OTc2ODgxMSwiZXhwIjoxNjgwMjAwODExfQ.u0udN9ucvlN2-OzwMllypjiJiOO_Cg6IB-8FWFgwPdk" , }, }); setContent(res.data[0]) } catch (err) { console.log(err) } } getRandomContent() }, [type]) console.log(content) return ( <div className='featured'> {type && ( <div className="category"> <span>{type === 'movies' ? "Movies" : "Series"}</span> <select name="genre" id="genre"> <option>Genre</option> <option value="adventure">Adventure</option> <option value="comedy">Comedy</option> <option value="crime">Crime</option> <option value="fantasy">Fantasy</option> <option value="historical">Historical</option> <option value="horror">Horror</option> <option value="romance">Romance</option> <option value="sci-fi">Sci-fi</option> <option value="thriller">Thriller</option> <option value="western">Western</option> <option value="animation">Animation</option> <option value="drama">Drama</option> <option value="documentary">Documentary</option> </select> </div> )} <img width={'100%'} src={content.img} alt="" /> <div className="info"> <img src={content.imgTitle} alt="" /> <span className="description">{content.desc}</span> <div className="buttons"> <button className="play"> <PlayArrow /> <span>Play</span> </button> <button className="more"> <InfoOutlined /> <span>More Info</span> </button> </div> </div> </div> ) } export default FeaturedMovie
# # @lc app=leetcode.cn id=938 lang=python3 # # [938] 二叉搜索树的范围和 # # https://leetcode-cn.com/problems/range-sum-of-bst/description/ # # algorithms # Easy (81.89%) # Likes: 252 # Dislikes: 0 # Total Accepted: 99K # Total Submissions: 120.8K # Testcase Example: '[10,5,15,3,7,null,18]\n7\n15' # # 给定二叉搜索树的根结点 root,返回值位于范围 [low, high] 之间的所有结点的值的和。 # # # # 示例 1: # # # 输入:root = [10,5,15,3,7,null,18], low = 7, high = 15 # 输出:32 # # # 示例 2: # # # 输入:root = [10,5,15,3,7,13,18,1,null,6], low = 6, high = 10 # 输出:23 # # # # # 提示: # # # 树中节点数目在范围 [1, 2 * 10^4] 内 # 1 # 1 # 所有 Node.val 互不相同 # # # # @lc code=start # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def rangeSumBST(self, root: TreeNode, low: int, high: int) -> int: if not root: return 0 if root.val < low: return self.rangeSumBST(root.right,low,high) if root.val > high: return self.rangeSumBST(root.left,low,high) return root.val + self.rangeSumBST(root.left,low,high) + self.rangeSumBST(root.right,low,high) # @lc code=end
<template> <doc-alert title="【交易】分销返佣" url="https://doc.iocoder.cn/mall/trade-brokerage/" /> <ContentWrap> <!-- 搜索工作栏 --> <el-form class="-mb-15px" :model="queryParams" ref="queryFormRef" :inline="true" label-width="68px" > <el-form-item label="用户编号" prop="userId"> <el-input v-model="queryParams.userId" placeholder="请输入用户编号" clearable @keyup.enter="handleQuery" class="!w-240px" /> </el-form-item> <el-form-item label="提现类型" prop="type"> <el-select v-model="queryParams.type" placeholder="请选择提现类型" clearable class="!w-240px" > <el-option v-for="dict in getIntDictOptions(DICT_TYPE.BROKERAGE_WITHDRAW_TYPE)" :key="dict.value" :label="dict.label" :value="dict.value" /> </el-select> </el-form-item> <el-form-item label="账号" prop="accountNo"> <el-input v-model="queryParams.accountNo" placeholder="请输入账号" clearable @keyup.enter="handleQuery" class="!w-240px" /> </el-form-item> <el-form-item label="提现银行" prop="bankName"> <el-select v-model="queryParams.bankName" placeholder="请选择提现银行" clearable class="!w-240px" > <el-option v-for="dict in getStrDictOptions(DICT_TYPE.BROKERAGE_BANK_NAME)" :key="dict.value" :label="dict.label" :value="dict.value" /> </el-select> </el-form-item> <el-form-item label="状态" prop="status"> <el-select v-model="queryParams.status" placeholder="请选择状态" clearable class="!w-240px"> <el-option v-for="dict in getIntDictOptions(DICT_TYPE.BROKERAGE_WITHDRAW_STATUS)" :key="dict.value" :label="dict.label" :value="dict.value" /> </el-select> </el-form-item> <el-form-item label="申请时间" prop="createTime"> <el-date-picker v-model="queryParams.createTime" value-format="YYYY-MM-DD HH:mm:ss" type="daterange" start-placeholder="开始日期" end-placeholder="结束日期" :default-time="[new Date('1 00:00:00'), new Date('1 23:59:59')]" class="!w-240px" /> </el-form-item> <el-form-item> <el-button @click="handleQuery"><Icon icon="ep:search" class="mr-5px" /> 搜索</el-button> <el-button @click="resetQuery"><Icon icon="ep:refresh" class="mr-5px" /> 重置</el-button> </el-form-item> </el-form> </ContentWrap> <!-- 列表 --> <ContentWrap> <el-table v-loading="loading" :data="list" :stripe="true" :show-overflow-tooltip="true"> <el-table-column label="编号" align="left" prop="id" min-width="60px" /> <el-table-column label="用户信息" align="left" min-width="120px"> <template #default="scope"> <div>编号:{{ scope.row.userId }}</div> <div>昵称:{{ scope.row.userNickname }}</div> </template> </el-table-column> <el-table-column label="提现金额" align="left" prop="price" min-width="80px"> <template #default="scope"> <div>金 额:¥{{ fenToYuan(scope.row.price) }}</div> <div>手续费:¥{{ fenToYuan(scope.row.feePrice) }}</div> </template> </el-table-column> <el-table-column label="提现方式" align="left" prop="type" min-width="120px"> <template #default="scope"> <div v-if="scope.row.type === BrokerageWithdrawTypeEnum.WALLET.type"> 余额 </div> <div v-else> {{ getDictLabel(DICT_TYPE.BROKERAGE_WITHDRAW_TYPE, scope.row.type) }} <span v-if="scope.row.accountNo">账号:{{ scope.row.accountNo }}</span> </div> <template v-if="scope.row.type === BrokerageWithdrawTypeEnum.BANK.type"> <div>真实姓名:{{ scope.row.name }}</div> <div> 银行名称: <dict-tag :type="DICT_TYPE.BROKERAGE_BANK_NAME" :value="scope.row.bankName" /> </div> <div>开户地址:{{ scope.row.bankAddress }}</div> </template> </template> </el-table-column> <el-table-column label="收款码" align="left" prop="accountQrCodeUrl" min-width="70px"> <template #default="scope"> <el-image v-if="scope.row.accountQrCodeUrl" :src="scope.row.accountQrCodeUrl" class="h-40px w-40px" :preview-src-list="[scope.row.accountQrCodeUrl]" preview-teleported /> <span v-else>无</span> </template> </el-table-column> <el-table-column label="申请时间" align="left" prop="createTime" :formatter="dateFormatter" width="180px" /> <el-table-column label="备注" align="left" prop="remark" /> <el-table-column label="状态" align="left" prop="status" min-width="120px"> <template #default="scope"> <dict-tag :type="DICT_TYPE.BROKERAGE_WITHDRAW_STATUS" :value="scope.row.status" /> <div v-if="scope.row.auditTime" class="text-xs"> 时间:{{ formatDate(scope.row.auditTime) }} </div> <div v-if="scope.row.auditReason" class="text-xs"> 原因:{{ scope.row.auditReason }} </div> </template> </el-table-column> <el-table-column label="操作" align="left" width="110px" fixed="right"> <template #default="scope"> <template v-if="scope.row.status === BrokerageWithdrawStatusEnum.AUDITING.status"> <el-button link type="primary" @click="handleApprove(scope.row.id)" v-hasPermi="['trade:brokerage-withdraw:audit']" > 通过 </el-button> <el-button link type="danger" @click="openForm(scope.row.id)" v-hasPermi="['trade:brokerage-withdraw:audit']" > 驳回 </el-button> </template> </template> </el-table-column> </el-table> <!-- 分页 --> <Pagination :total="total" v-model:page="queryParams.pageNo" v-model:limit="queryParams.pageSize" @pagination="getList" /> </ContentWrap> <!-- 表单弹窗:添加/修改 --> <BrokerageWithdrawRejectForm ref="formRef" @success="getList" /> </template> <script setup lang="ts"> import { DICT_TYPE, getDictLabel, getIntDictOptions, getStrDictOptions } from '@/utils/dict' import { dateFormatter, formatDate } from '@/utils/formatTime' import * as BrokerageWithdrawApi from '@/api/mall/trade/brokerage/withdraw' import BrokerageWithdrawRejectForm from './BrokerageWithdrawRejectForm.vue' import { BrokerageWithdrawStatusEnum, BrokerageWithdrawTypeEnum } from '@/utils/constants' import { fenToYuanFormat } from '@/utils/formatter' import { fenToYuan } from '@/utils' defineOptions({ name: 'BrokerageWithdraw' }) const { t } = useI18n() // 国际化 const message = useMessage() // 消息弹窗 const loading = ref(true) // 列表的加载中 const total = ref(0) // 列表的总页数 const list = ref([]) // 列表的数据 const queryParams = reactive({ pageNo: 1, pageSize: 10, userId: null, type: null, name: null, accountNo: null, bankName: null, status: null, auditReason: null, auditTime: [], remark: null, createTime: [] }) const queryFormRef = ref() // 搜索的表单 /** 查询列表 */ const getList = async () => { loading.value = true try { const data = await BrokerageWithdrawApi.getBrokerageWithdrawPage(queryParams) list.value = data.list total.value = data.total } finally { loading.value = false } } /** 搜索按钮操作 */ const handleQuery = () => { queryParams.pageNo = 1 getList() } /** 重置按钮操作 */ const resetQuery = () => { queryFormRef.value.resetFields() handleQuery() } /** 添加/修改操作 */ const formRef = ref() const openForm = (id: number) => { formRef.value.open(id) } /** 审核通过 */ const handleApprove = async (id: number) => { try { loading.value = true await message.confirm('确定要审核通过吗?') await BrokerageWithdrawApi.approveBrokerageWithdraw(id) await message.success(t('common.success')) await getList() } finally { loading.value = false } } /** 初始化 **/ onMounted(() => { getList() }) </script>
import type { Context } from 'react'; import type { ReactReduxContextValue } from 'react-redux'; import { setupListeners } from '@reduxjs/toolkit/query'; import type { Api } from '@reduxjs/toolkit/dist/query/apiTypes'; /** * Can be used as a `Provider` if you **do not already have a Redux store**. * * @example * ```tsx * // codeblock-meta title="Basic usage - wrap your App with ApiProvider" * import * as React from 'react'; * import { ApiProvider } from '@reduxjs/toolkit/query/react'; * import { Pokemon } from './features/Pokemon'; * * function App() { * return ( * <ApiProvider api={api}> * <Pokemon /> * </ApiProvider> * ); * } * ``` * * @remarks * Using this together with an existing redux store, both will * conflict with each other - please use the traditional redux setup * in that case. */ export declare function ApiProvider<A extends Api<any, {}, any, any>>(props: { children: any; api: A; setupListeners?: Parameters<typeof setupListeners>[1]; context?: Context<ReactReduxContextValue>; }): JSX.Element;
// // BookDetailFull.swift // ReadLog // // Created by 이만 on 2023/11/21. // import SwiftUI import CoreData struct BookDetailFull: View { @Environment(\.managedObjectContext) private var viewContext @Environment(\.dismiss) var dismiss @StateObject private var viewModel: ReadingTrackerViewModel @State private var pagesReadInput: String = "" @State private var bookMemos: [ReadLog] = [] @State private var showingAlert: Bool = false @State private var isInit: Bool = true @FocusState private var isInputActive: Bool private var bookInfo: BookInfo? private var isRead: Bool = false let memoDateFormatter: DateFormatter = Date.yyyyMdFormatter init(_ bookInfo: BookInfo?, isRead: Bool) { self.bookInfo = bookInfo self.isRead = isRead self._viewModel = StateObject(wrappedValue: ReadingTrackerViewModel(context: PersistenceController.shared.container.viewContext)) } var body: some View { VStack { header ScrollView { displayBook(isbn: (self.bookInfo?.isbn)!) .padding(EdgeInsets(top: 20, leading: 0, bottom: 40, trailing: 0)) if !isRead { HStack { Spacer() Text("독서 진행률 \(Int(viewModel.progressPercentage * 100))%") .mini(.black) } progressBar(value: viewModel.progressPercentage) Spacer(minLength: 20) trackingCircles(viewModel: self.viewModel) } Spacer(minLength: 20) HStack{ VStack(alignment: .leading){ Text("독서 기록").title(Color.primary) HStack { Text("어떤 부분이 인상 깊었나요?").bodyDefault(Color.primary) Spacer() NavigationLink(destination:AddNoteView(bookInfo!, $bookMemos)){ Image(systemName: "plus.app") .resizable() .aspectRatio(contentMode: .fill) .frame(width: 24, height: 24) } .foregroundStyle(Color.primary) } } } .padding(EdgeInsets(top: 10, leading: 5, bottom: 0, trailing: 5)) .background(Color("backgroundColor")) Divider() bookNoteView(memos: bookMemos) } .scrollIndicators(.hidden) if !isRead { pageInput } } .padding(.horizontal) .background(Color("backgroundColor")) .toolbar { ToolbarItemGroup(placement: .keyboard) { Spacer() Button { if let newPageRead = Int(pagesReadInput), newPageRead > viewModel.lastPageRead, newPageRead <= viewModel.totalBookPages { viewModel.addDailyProgress(newPageRead: newPageRead, bookInfo: self.bookInfo!) pagesReadInput = "" hideKeyboard() } else { showingAlert = true } } label: { Text("저장") .foregroundStyle(Color.black) } } } .alert("숫자 형식이 올바르지 않습니다.", isPresented: $showingAlert) { Button("확인") { pagesReadInput = "" } } .onTapGesture { isInputActive = false } .onAppear(perform: { if isInit { viewModel.setDailyProgress(isbn: bookInfo!.isbn!) viewModel.setTotalBookPages(page: Int((bookInfo?.page)!)) isInit = false } bookMemos = fetchAllBookNotes(isbn: bookInfo?.isbn) }) } } // function private extension BookDetailFull { func fetchBookInfo(isbn: String) -> BookInfo? { let fetchRequest: NSFetchRequest<BookInfo> fetchRequest = BookInfo.fetchRequest() fetchRequest.fetchLimit = 1 fetchRequest.predicate = NSPredicate(format: "isbn LIKE %@", isbn) do { let object = try viewContext.fetch(fetchRequest) return object.first } catch { let nsError = error as NSError fatalError("Unresolved Error\(nsError)") } } func fetchAllBookNotes(isbn: String?) -> [ReadLog] { guard let isbn else { return [] } let fetchRequest: NSFetchRequest<ReadLog> fetchRequest = ReadLog.fetchRequest() fetchRequest.predicate = NSPredicate(format: "%K LIKE %@",#keyPath(ReadLog.book.isbn), isbn) do { let objects = try viewContext.fetch(fetchRequest) return objects } catch { let nsError = error as NSError fatalError("Unresolved Error\(nsError)") } } func fetchAllReadingList(isbn: String) -> [ReadingList] { let fetchRequest: NSFetchRequest<ReadingList> fetchRequest = ReadingList.fetchRequest() fetchRequest.sortDescriptors = [NSSortDescriptor(keyPath: \ReadingList.readtime, ascending: true)] fetchRequest.predicate = NSPredicate(format: "book.isbn LIKE %@",isbn) do { let objects = try viewContext.fetch(fetchRequest) return objects } catch { let nsError = error as NSError fatalError("Unresolved Error\(nsError)") } } func deleteAllReadList(readingList: [ReadingList]) { for idx in 0..<readingList.count { viewContext.delete(readingList[idx]) } do { try viewContext.save() } catch { let nsError = error as NSError fatalError("Unresolved Error\(nsError)") } } func addReadList(entity: BookInfo, sdate: Date, edate: Date) { let readBook = ReadList(context: viewContext) readBook.id = UUID() readBook.startdate = sdate readBook.enddate = edate readBook.book = entity if var readList = bookInfo?.readList { readList = readList.adding(readBook) as NSSet bookInfo?.readList = readList } else { bookInfo?.readList = [readBook] } do { try viewContext.save() } catch { let nsError = error as NSError fatalError("Unresolved Error\(nsError)") } } func readComplete(isbn: String?) { guard let isbn else { return } // fetch start, end date let startDate: Date let endDate: Date let readingList = fetchAllReadingList(isbn: isbn) if readingList.isEmpty { return } //TODO: date handling startDate = readingList.first?.readtime ?? Date() endDate = readingList.last?.readtime ?? Date() // delete all reading list deleteAllReadList(readingList: readingList) // add read list if let bookInfo { addReadList(entity: bookInfo, sdate: startDate, edate: endDate) } } } // MARK: - 책 정보 뷰 private extension BookDetailFull { @ViewBuilder func displayBook(isbn: String) -> some View { if let book = fetchBookInfo(isbn: isbn) { VStack { HStack{ if let imageData = book.image, let uiImage = UIImage(data:imageData){ Image(uiImage: uiImage) .resizable() .scaledToFit() .frame(width: 100) .clipped() .padding(.horizontal, 15) .padding(.vertical, 5) }else{ Image(systemName: "book") .resizable() .scaledToFit() .frame(width: 100) .clipped() .padding(.horizontal, 15) .padding(.vertical, 5) } VStack(alignment: .leading){ Text(book.title ?? "Unknown Title") .body2(.black) .multilineTextAlignment(.leading) .padding(.vertical, 6) Text(book.author ?? "Unknown Author") .mini(.black) .multilineTextAlignment(.leading) .padding(.vertical, 6) Text(book.publisher ?? "Unknown Publisher") .mini(.black) .multilineTextAlignment(.leading) .padding(.vertical, 6) } .padding(.vertical, 8) .padding(.horizontal, 10) Spacer() } .frame(maxWidth: .infinity) .padding(.vertical, 16) .clipShape(RoundedRectangle(cornerRadius: 10)) .background(Color.white) .overlay(RoundedRectangle(cornerRadius:10) .stroke(Color("gray"), lineWidth: 1) ) } }else{ Text("Book not found").title(Color.primary) } } } // MARK: - progress view private extension BookDetailFull { func progressBar(value: Double) -> some View { let thickness: CGFloat = 15 return GeometryReader { geometry in ZStack(alignment: .leading) { Rectangle().frame(width: geometry.size.width, height: thickness) .opacity(0.3) .foregroundColor(Color("gray")) Rectangle().frame(width: min(CGFloat(value) * geometry.size.width, geometry.size.width), height: thickness) .foregroundColor(Color("lightBlue")) .animation(.linear, value: value) } .clipShape(RoundedRectangle(cornerRadius: 45)) } .frame(height: 15) } } // MARK: - Daily Tracking view private extension BookDetailFull { struct trackingCircles: View { @StateObject var viewModel: ReadingTrackerViewModel var body: some View { HStack { ForEach(viewModel.dailyProgress) { progress in Circle() .fill(progress.pagesRead > 0 ? Color("lightBlue") : Color("gray")) .frame(width: 45, height: 45) .overlay(Text("\(progress.pagesRead)p")) .body3(Color.primary) } } } } } //MARK: - Book Note view private extension BookDetailFull { @ViewBuilder func bookNoteView(memos: [ReadLog]) -> some View { if !memos.isEmpty { LazyVStack { ForEach(memos) { memo in bookNote(memo: memo) } } } else { Text("저장된 노트가 없습니다.") .bodyDefault(Color("gray")) } } func bookNote(memo: ReadLog) -> some View { VStack { VStack(alignment: .leading, spacing:10) { HStack { Text(memoDateFormatter.string(from: memo.date!)) .bodyDefault(Color("gray")) .foregroundColor(.secondary) Spacer() NoteLabel(type: .constant(convertLabel(labelType: Int(memo.label)))) } Text(memo.log ?? "") .bodyDefaultMultiLine(Color.primary) } .padding(.vertical, 10) Divider() } } func convertLabel(labelType: Int) -> Note { return labelType == 0 ? .impressive : .myThink } } // MARK: - today page input view private extension BookDetailFull { var pageInput: some View { HStack { Text("어디까지 읽으셨나요?") .body1(Color.primary) Spacer() TextField("페이지 번호...", text: $pagesReadInput) .frame(width:120, height: 37) .keyboardType(.numberPad) .textFieldStyle(RoundedBorderTextFieldStyle()) .foregroundColor(isInputActive ? .black : Color("gray")).body2(Color.primary) .focused($isInputActive) } .padding() .frame(height:47) .background(Color("lightBlue"),in: RoundedRectangle(cornerRadius: 10)) } } private extension BookDetailFull { var header: some View { HStack { Button(action:{ self.dismiss() }) { Image(systemName: "chevron.left") .foregroundStyle(Color.primary) } Spacer() if !isRead { Button("완독"){ readComplete(isbn: bookInfo?.isbn) dismiss() } .body1(Color.primary) } else { Spacer() } } .padding(EdgeInsets(top: 16, leading: 0, bottom: 8, trailing: 0)) } } //#Preview { // BookDetailFull("newBook3") // .environment(\.managedObjectContext, PersistenceController.preview.container.viewContext) //}
import { useNavigate, useParams } from "react-router-dom"; import { useDataContext } from "../Context/DataContext"; import ArrowBackIcon from '@mui/icons-material/ArrowBack'; import { Avatar, Box, Button, Divider, Paper, Typography } from "@mui/material"; import { RatingFormModal } from "../Component/RatingFormModal"; export const RestaurantDetail = () => { const navigate=useNavigate() const { dataState,setSelectedCuisine } = useDataContext(); const { id } = useParams(); const selectedRestaurant = dataState?.restaurantData?.find( (data) => data?.id === Number(id) ); const { name, cuisine_id, address, phone, menu, ratings, description, } = selectedRestaurant; const averageRating=(ratings.reduce((total,crr)=>total=total+crr.rating,0)/ratings.length).toFixed(2) return ( <Box className="DetailsContainer" sx={{minHeight:"100vh",display:"flex",flexDirection:"column",justifyContent:"center",alignItems:"center"}}> <Paper sx={{width:"600px",minHeight:"450px",padding:"10px"}} elevation={5} > <Box className="CardHeader" sx={{mb:3}}> <span onClick={()=>{navigate("/");setSelectedCuisine("")}}><ArrowBackIcon /> </span><Typography variant="h3">{name}</Typography> </Box> <Box sx={{display:"flex",justifyContent:"space-between"}}> <Typography variant="body2" color="text.secondary"> {menu.map(({name})=>`${name}, `)} </Typography> <RatingFormModal restaurantId={selectedRestaurant?.id}/> </Box> <Typography variant="body2" color="text.secondary"> {address} </Typography> <Typography variant="body2" color="text.secondary"> Average Rating:{averageRating} </Typography> <Divider sx={{mb:2,mt:1}}/> <Box className="CardBody"> <Typography variant="h5" sx={{mb:1}}>Reviews</Typography> <Box className="reviewContainer"> { ratings?.map(ratingData=>{ return( <Box className="ratingCard" sx={{width:"100%",height:"70px"}}> <Box className="ratingCardHeader" sx={{display:"flex",justifyContent:"space-between"}}> <span style={{display:"flex"}} > <Avatar alt={ratingData?.revName} src={ratingData?.pp} /> <Typography>{ratingData?.revName}</Typography> </span> <Box className="ratingBox" sx={{borderRadius:"7px",height:"30px",width:"35px",backgroundColor:"green",color:"white",textAlign:"center"}}> ⭐{ratingData?.rating} </Box> </Box> <Box className="ratingCardFooter"> <Typography>{ratingData?.comment}</Typography> </Box> <Divider/> </Box> ) }) } </Box> </Box> </Paper> </Box> ); };
import React, { useRef } from 'react'; import './App.css'; import { FocusableInput, FocusableInputRef } from './FocusableInput' export type Props = { initialText: string } const App = () => { const inputRef = useRef<FocusableInputRef>(null) const handleClick = () => { if (inputRef.current) { inputRef.current.focus() } } return ( <div className="App"> <FocusableInput ref={inputRef} initialText="test"/> <button onClick={handleClick}>Set focus</button> </div> ) } export default App;
--- title: Adobe Experience Manager Mobili - Preparazione al RGPD description: Scopri come Adobe Experience Manager è pronta ad aiutarti con gli obblighi di conformità ai requisiti RGPD. contentOwner: trushton exl-id: d06e675f-fb61-47da-85de-e0b50dd44153 source-git-commit: 8b4cb4065ec14e813b49fb0d577c372790c9b21a workflow-type: tm+mt source-wordcount: '683' ht-degree: 0% --- # AEM Mobile - Preparazione al RGPD {#aem-mobile-gdpr-readiness} >[!IMPORTANT] > >Il RGPD è utilizzato come esempio nelle sezioni seguenti, ma i dettagli coperti sono applicabili a tutte le normative su privacy e protezione dei dati, come RGPD e CCPA. ## Supporto per il RGPD di AEM Mobile {#aem-mobile-gdpr-support} AEM Mobile è pronta ad assistere i clienti con i loro obblighi di conformità ai requisiti RGPD. In AEM Mobile non vengono memorizzati dati personali. Se hai effettuato il provisioning, puoi accedere ad Adobe Experience Mobile con il tuo Adobe ID. <!-- [https://aemmobile.adobe.com/signin/index.html](https://aemmobile.adobe.com/signin/index.html) --> ## Adobe Digital Publishing Suite {#adobe-digital-publishing-suite} Il prodotto per la pubblicazione digitale di Adobe (che precede AEM Mobile) supporta le iniziative Adobe di preparazione al RGPD. Consulta [https://business.adobe.com/privacy/general-data-protection-regulation.html](https://business.adobe.com/privacy/general-data-protection-regulation.html). Di seguito sono riportate le specifiche sul supporto delle funzioni relative al RGPD nel prodotto di Digital Publishing Suite, incluso come utilizzare l’Adobe per avviare le richieste RGPD. Per evitare di confondere AEM Mobile con il prodotto di Digital Publishing Suite precedente, puoi accedere al prodotto di Digital Publishing Suite facendo clic qui: [https://acrobat.adobe.com/us/en/](https://acrobat.adobe.com/us/en/) ### Avvio di una richiesta RGPD {#initiating-a-gdpr-request} Contatta l’Assistenza clienti Adobe per avviare una richiesta RGPD per il Digital Publishing Suite. Per individuare i dati dei clienti sono necessari i seguenti ID. Qualsiasi sottoinsieme ricevuto implica che gli altri ID non erano applicabili a questo utente. Obbligatorio * ID contratto del cliente: *dpsc-contractId* Fornire almeno una delle seguenti informazioni: * OAuth ID fornito dal cliente dell’utente finale (l’ID utilizzato nel sistema di adesione diretta del cliente): *dpsc-directEntitlementId* * Per gli utenti dell’app Windows, l’ID App Store dell’utente finale: *dpsc-windowsAppStoreId* * Indirizzo e-mail utilizzato dall&#39;utente finale per interagire con l&#39;app DPS: *email* ### Domande frequenti {#frequently-asked-questions-faq} **Adobe elimina i miei acquisti App Store quando si avvia una richiesta DELETE?** Adobe elimina le informazioni di cui dispone sugli acquisti dell’app store (abbonamenti e così via), ma gli acquisti sono ancora registrati negli app store. Se l’app (utente finale) è registrata nell’App Store, le ricevute vengono prelevate nuovamente e inviate all’Adobe. Successivamente, questi vengono considerati come nuovi acquisti e vengono ripristinati dall’app, con accesso di nuovo. **L’Adobe elimina i diritti forniti dal cliente quando si avvia una richiesta DELETE?** L&#39;Adobe elimina le informazioni di cui dispone in merito alle quote aggiuntive spettanti direttamente al cliente. Se l’app (utente finale) accede al meccanismo OAuth utilizzato dal cliente, invia informazioni ad Adobe e i servizi raccolgono nuovamente i diritti aggiuntivi. **Quali sono le aspettative dell&#39;utente finale?** Poiché la chiave per assegnare i diritti all’app risiede sul dispositivo come parte del software del visualizzatore, l’utente finale deve disinstallare l’app. L’utente finale deve rendersi conto che se reinstalla l’app, gli acquisti esistenti (associati all’utente dell’app store) e le quote di adesione diretta (associate all’utente OAuth del cliente) vengono ancora ripristinati. **Cosa succede quando un’app viene condivisa tra persone su un dispositivo?** L’Adobe dispone di informazioni minime che vengono associate direttamente a un utente specifico. Associa i dati utilizzando un UUID creato in modo casuale che viene conservato nei dati dell’app e trasmesso in ogni richiesta avviata dall’app. Ciò significa che gli utenti finali che condividono l’app sullo stesso dispositivo utilizzano lo stesso UUID e che tutti i dati sono considerati di proprietà della persona che effettua la richiesta RGPD. Per entrambe le richieste di accesso ed eliminazione, DPSC considera le persone che condividono un’app come un’unica persona. **Quali dati personali vengono tracciati con Analytics?** Nessuno. Sono presenti dati tracciati, ma si trovano a livello di app (non personali). Ciò include eventi come avvii, arresti anomali, chiusura, attività, acquisti o sovrapposizioni di folio. Le posizioni geografiche, i nomi, gli ID dispositivo o gli indirizzi IP non vengono tracciati. **L’utente finale ha fornito le informazioni, ma non è stato trovato nulla. Perché no?** Con l’evoluzione del prodotto di Digital Publishing Suite, le implementazioni dei servizi sono state modificate e più dati sono stati offuscati. Se non sono stati trovati dati utilizzando i dati forniti dall’utente, significa che i dati dell’utente non possono essere tracciati per quella persona. ### Esempio {#example} Contatta l’Assistenza clienti Adobe per avviare una richiesta RGPD. Di seguito è riportato un esempio degli input e degli output risultanti da una richiesta RGPD Digital Publishing Suite: #### Input: {#inputs} ``` dpsc-contractId = "12345-1234-12416234" directEntitlementId = "1234-1234-1234" windowsAppStoreId = "testWinAppStoreId" email = "test@what.com" ``` #### Uscite {#outputs} ``` { "jobId": "test-1524750204384", "product": "DPSC", "action": "access", "userIDS": [ { "namespace": "email", "value": "test@what.com" }, { "namespace": "windowsAppStoreId", "value": "testWinAppStoreId" }, { "namespace": "directEntitlementId", "value": "1234-1234-1234" } ], "receiptData": { "recordsFound": 6, "recordsAffected": 0, "tablesModified": 0, "subscriptionsFound": 24, "entitlementsFound": 24 }, "records": { "DPS_Stage_EntitlementUserDevices": [ { "user_id": "testc685-c9ca-4c1e-a11b-07d10ec724cf", "device_id": "appleStore:test1b16-f032-4d9c-9200-0d19999405c4", "account_id": "test@what.com" }, { "user_id": "test967d-5179-4dc6-958c-facd9d94da38", "device_id": "appleStore:test3f07-d5aa-4b32-8fac-b2b690b7ccd7", "account_id": "test@what.com" }, { "user_id": "test1838-6494-4e74-912c-1edf61581d0e", "device_id": "appleStore:test3813-f8cc-49ce-b021-50eb0814a3bb", "account_id": "1234-1234-1234" }, { "user_id": "test5468-1a11-4e4c-be43-274181a9ef81", "device_id": "appleStore:testf082-2783-498d-ab62-b1b2e3eb67ae", "account_id": "1234-1234-1234" } ], "DPS_Stage_EntitlementUsers": [ { "id": "store:test04a7-33a3-4b90-863d-79981ead0f19:appleStore", "part_id": "0", "alias_user_id": "testWinAppStoreId" }, { "id": "internal:testd2da-0606-4444-87ef-0d5a1d4a121d:adobe", "part_id": "0", "user_id": "test@what.com" } ], "DPS_Stage_Subscriptions": [ { "id": "appleStore:testc685-c9ca-4c1e-a11b-07d10ec724cf", "account_id": "test3531-a209-4391-beb9-951c7822244e", "overflow": "test4e59-86a8-4b75-b699-77e980c287ab", "entitlement_count": "12" }, { "id": "appleStore:test5468-1a11-4e4c-be43-274181a9ef81", "account_id": "test491b-379e-4d24-96ef-e481bf8d3062", "overflow": "test931b-7422-485d-85a5-134828187b6c", "entitlement_count": "12" } ], "DPS_Stage_Entitlements": [ { "id": "samsungStore:testc685-c9ca-4c1e-a11b-07d10ec724cf", "account_id": "test7332-60a0-42b8-a508-474668d83d2e", "overflow": "test9bc3-94d0-43cf-b132-0629868f7d9d", "entitlement_count": "12" }, { "id": "appleStore:test5468-1a11-4e4c-be43-274181a9ef81", "account_id": "testf766-102a-460d-8f5a-42f7bb9d68b7", "overflow": "test4d96-2197-45a8-9caf-2aa2846b770c", "entitlement_count": "12" } ], "s3Buckets": { "name": "DPS-Entitlements-Overflow", "folder": "stage/", "overflows": { "subscriptions": [ "test4e59-86a8-4b75-b699-77e980c287ab", "test931b-7422-485d-85a5-134828187b6c" ], "entitlements": [ "test9bc3-94d0-43cf-b132-0629868f7d9d", "test4d96-2197-45a8-9caf-2aa2846b770c" ] } } } } ```
import 'package:dio/dio.dart'; import 'package:lettutor/clean_architectures/data/models/token/sign_in_response.dart'; import 'package:injectable/injectable.dart'; import 'package:retrofit/retrofit.dart'; import '../../../models/user/auth_response.dart'; part 'auth_api.g.dart'; @injectable @RestApi() abstract class AuthApi { static const String branch = ""; static const String loginApi = "/auth/login"; static const String refreshTokenApi = "$branch/auth/refresh-token"; static const String registerApi = "$branch/auth/register"; static const String logoutApi = "$branch/auth/logout"; static const String resetPasswordApi = "/user/forgotPassword"; static const String googleSignInApi = "/auth/google"; static const String verifyAccountApi = "/auth/verifyAccount"; @factoryMethod factory AuthApi(Dio dio) = _AuthApi; @POST(loginApi) Future<HttpResponse<SignInResponse?>> login( {@Body() required Map<String, dynamic> body}); @POST(registerApi) Future<HttpResponse<SignInResponse?>> register( {@Body() required Map<String, dynamic> body}); @POST(logoutApi) Future<HttpResponse<AuthenticateResponse>> logout(); @POST(refreshTokenApi) Future<HttpResponse<AuthenticateResponse>> refreshToken( {@Body() required Map<String, dynamic> body}); @POST(resetPasswordApi) Future<HttpResponse> resetPassword({ @Body() required Map<String, dynamic> body, }); @POST(googleSignInApi) Future<HttpResponse> googleSignIn({ @Body() required Map<String, dynamic> body, }); @GET("$verifyAccountApi?token={value}") Future<HttpResponse> verifyEmailAccount(@Path('value') String token); }
<script setup> import { productsInfo } from '../stores/data'; import vue3starRatings from 'vue3-star-ratings' const search = ref('') const error = ref('') const filterProducts = computed(() => { return productsInfo.products.filter((product) => product.name.includes(search.value)) }) watch(search, () => { if (filterProducts.value.length === 0) { error.value = 'محصولی با این نام یافت نشد!' } else { error.value = '' } }) </script> <template> <main> <div m2> <input v-model="search" type="text" placeholder="جستجو محصول" class="yzxa65"> </div> <div v-if="error" class="k5x8df"> <p class="oow231">{{ error }}</p> </div> <section class="j7783q"> <div v-for="product in filterProducts" :key="product" class="k4zxjp"> <img :src="product.img" class="jov6po"> <div class="lne1fv"> <h3 pb3>{{ product.name }}</h3> <p text-14px>{{ product.description }}</p> <vue3starRatings v-model="product.rating" starSize="18" starColor="#ffeb3f" inactiveColor="#ccc" :disableClick="true" :showControl="false" class="lgj9tb" /> <div class="o2dewx"> <button class="f5y0ro">دیدن جزئیات</button> <h4>{{ product.price }}</h4> </div> </div> <h6 class="nkvn86">{{ product.status }}</h6> <div i-carbon-shopping-cart-plus class="fttsnp" /> </div> </section> </main> </template> <style scoped> .yzxa65 { width: 100%; border-width: 1px; border-style: solid; --un-border-opacity: 1; border-color: rgba(209, 213, 219, var(--un-border-opacity)); border-radius: 9999px; padding-top: 0.625rem; padding-bottom: 0.625rem; padding-left: 1.25rem; padding-right: 1.25rem; transition-duration: 250ms; } .yzxa65:hover { --un-border-opacity: 1; border-color: rgba(156, 163, 175, var(--un-border-opacity)); } .yzxa65::placeholder { color: rgba(156, 163, 175, 0.7); } .yzxa65:focus { --un-outline-color-opacity: 1; outline-color: rgba(107, 114, 128, var(--un-outline-color-opacity)); } .k5x8df { position: relative; height: 100vh; display: flex; align-items: center; justify-content: center; } .oow231 { position: absolute; top: 35%; --un-text-opacity: 1; color: rgba(248, 113, 113, var(--un-text-opacity)); } .j7783q { display: grid; margin-top: 0.5rem; margin-bottom: 0.5rem; margin-left: 0.5rem; margin-right: 0.5rem; grid-gap: 0.5rem; gap: 0.5rem; } .k4zxjp { --un-shadow-inset: var(--un-empty, /*!*/ /*!*/ ); --un-shadow: 0 0 #0000; } .k4zxjp { position: relative; border-radius: 0.25rem; --un-bg-opacity: 1; background-color: rgba(255, 255, 255, var(--un-bg-opacity)); --un-shadow: var(--un-shadow-inset) 0 1px 3px 0 var(--un-shadow-color, rgba(0, 0, 0, 0.1)), var(--un-shadow-inset) 0 1px 2px -1px var(--un-shadow-color, rgba(0, 0, 0, 0.1)); box-shadow: var(--un-ring-offset-shadow, 0 0 #0000), var(--un-ring-shadow, 0 0 #0000), var(--un-shadow); } .jov6po { width: 100%; height: 12.5rem; border-top-left-radius: 0.25rem; border-top-right-radius: 0.25rem; object-fit: contain; } .lne1fv { background-color: rgba(243, 244, 246, 0.5); padding: 0.75rem; padding-top: 2.25rem; } .lgj9tb { margin: 0rem !important; margin-top: 0.25rem !important; display: flex !important; align-items: flex-start !important; padding: 0rem !important; } .o2dewx { margin-top: 1.25rem; display: flex; align-items: center; justify-content: space-between; } .f5y0ro { border-width: 1px; border-style: solid; --un-border-opacity: 1; border-color: rgba(96, 165, 250, var(--un-border-opacity)); border-radius: 9999px; padding-left: 0.75rem; padding-right: 0.75rem; padding-top: 0.375rem; padding-bottom: 0.375rem; line-height: 1.25rem; --un-text-opacity: 1; color: rgba(96, 165, 250, var(--un-text-opacity)); transition-duration: 250ms; } .f5y0ro:hover { background-color: rgba(191, 219, 254, 0.5); } .nkvn86 { position: absolute; top: 0.5rem; right: 0.5rem; border-radius: 9999px; padding-left: 0.75rem; padding-right: 0.75rem; padding-top: 0.25rem; padding-bottom: 0.25rem; font-weight: 1; --un-text-opacity: 1; color: rgba(96, 165, 250, var(--un-text-opacity)); } section div:nth-child(2) .nkvn86 { --un-bg-opacity: 1; background-color: rgba(75, 85, 99, var(--un-bg-opacity)); color: white } section div:nth-child(3) .nkvn86 { --un-bg-opacity: 1; background-color: rgba(229, 231, 235, var(--un-bg-opacity)); --un-text-opacity: 1; color: rgba(96, 165, 250, var(--un-text-opacity)); } section div:last-child .nkvn86 { --un-bg-opacity: 1; background-color: rgba(229, 231, 235, var(--un-bg-opacity)); --un-text-opacity: 1; color: rgba(96, 165, 250, var(--un-text-opacity)); } .fttsnp { position: absolute; top: 12rem; left: 1.25rem; font-size: 2rem; cursor: pointer; --un-bg-opacity: 1; background-color: rgba(156, 163, 175, var(--un-bg-opacity)); transition-duration: 250ms; } .fttsnp:hover { --un-bg-opacity: 1; background-color: rgba(31, 41, 55, var(--un-bg-opacity)); } @media (min-width: 640px) { .oow231 { font-size: 24px; } } @media (min-width: 768px) { .j7783q { grid-template-columns: repeat(2, minmax(0, 1fr)); align-items: center; } } @media (min-width: 1024px) { .oow231 { font-size: 28px; } .j7783q { grid-template-columns: repeat(3, minmax(0, 1fr)); } } </style>