text stringlengths 1 1.05M |
|---|
<reponame>zhangyut/wolf<filename>CriminalIntent/app/src/main/java/org/niuzuo/criminalintent/CrimeFragment.java
package org.niuzuo.criminalintent;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.Fragment;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.NavUtils;
import android.text.Editable;
import android.text.TextWatcher;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.EditText;
import android.widget.ImageButton;
import android.widget.ImageView;
import java.util.Date;
import java.util.UUID;
import android.graphics.drawable.BitmapDrawable;
/**
* Created by zdns on 16/5/9.
*/
public class CrimeFragment extends Fragment {
private static final String TAG = "CrimeFragment";
public static final String EXTRA_CRIME_ID = "org.niuzuo.criminalintent.crime_id";
private static final String DIALOG_DATE = "date";
private static final int REQUEST_DATE = 0;
private static final int REQUEST_PHOTO = 1;
private static final String DIALOG_IMAGE= "image";
private Crime mCrime;
private EditText mTitleField;
private Button mDateButton;
private CheckBox mSolvedCheckBox;
private ImageButton mPhotoButton;
private ImageView mPhotoView;
public static CrimeFragment newInstance(UUID crimeId) {
Bundle args = new Bundle();
args.putSerializable(EXTRA_CRIME_ID, crimeId);
CrimeFragment fragment = new CrimeFragment();
fragment.setArguments(args);
return fragment;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
UUID crimeId = (UUID)getArguments().getSerializable(EXTRA_CRIME_ID);
mCrime = CrimeLab.get(getActivity()).getCrime(crimeId);
setHasOptionsMenu(true);
}
public void returnResult() {
getActivity().setResult(Activity.RESULT_OK, null);
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View v = inflater.inflate(R.layout.fragment_crime, container, false);
//getActivity().getActionBar().setDisplayHomeAsUpEnabled(true);
mTitleField = (EditText) v.findViewById(R.id.crime_title);
mTitleField.setText(mCrime.getmTitle());
mTitleField.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
mCrime.setmTitle(s.toString());
}
@Override
public void afterTextChanged(Editable s) {
}
});
mDateButton = (Button) v.findViewById(R.id.crime_date);
mDateButton.setText(mCrime.getmDate().toString());
//mDateButton.setEnabled(false);
mDateButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
FragmentManager fm = getActivity().getSupportFragmentManager();
DatePickeFragment dialog = DatePickeFragment.newInstance(mCrime.getmDate());
dialog.setTargetFragment(CrimeFragment.this, REQUEST_DATE);
dialog.show(fm, DIALOG_DATE);
}
});
mSolvedCheckBox = (CheckBox)v.findViewById(R.id.crime_solved);
mSolvedCheckBox.setChecked(mCrime.ismSolved());
mSolvedCheckBox.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
mCrime.setmSolved(isChecked);
}
});
mPhotoButton= (ImageButton)v.findViewById(R.id.crime_imageButton);
mPhotoButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent i = new Intent(getActivity(), CrimeCameraActivity.class);
startActivityForResult(i, REQUEST_PHOTO);
}
});
PackageManager pm = getActivity().getPackageManager();
if (!pm.hasSystemFeature(PackageManager.FEATURE_CAMERA) && !pm.hasSystemFeature(PackageManager.FEATURE_CAMERA_FRONT)) {
mPhotoButton.setEnabled(false);
}
mPhotoView = (ImageView) v.findViewById(R.id.crime_imageView);
mPhotoView.setOnClickListener(new View.OnClickListener() {
public void onClick(View v) {
Photo p = mCrime.getmPhoto();
if (p == null) {
return;
}
FragmentManager fm = getActivity().getSupportFragmentManager();
String path = getActivity().getFileStreamPath(p.getFilename()).getAbsolutePath();
ImageFragment.createInstance(path).show(fm, DIALOG_IMAGE);
}
});
return v;
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode != Activity.RESULT_OK) return;
if (requestCode == REQUEST_DATE){
Date date = (Date)data.getSerializableExtra(DatePickeFragment.EXTRA_DATE);
mCrime.setmDate(date);
mDateButton.setText(mCrime.getmDate().toString());
} else if (requestCode == REQUEST_PHOTO) {
String filename = data.getStringExtra(CrimeCameraFragment.EXTRA_PHOTO_FILENAME);
if (filename != null) {
Log.i(TAG, "filename " + filename);
Photo p = new Photo(filename);
mCrime.setmPhoto(p);
showPhoto();
Log.i(TAG, "crime " + mCrime.getmTitle()+ " has a photo");
}
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
if (NavUtils.getParentActivityName(getActivity()) != null) {
NavUtils.navigateUpFromSameTask(getActivity());
}
return true;
default:
return super.onOptionsItemSelected(item);
}
}
@Override
public void onPause() {
super.onPause();
CrimeLab.get(getActivity()).saveCrimes();
}
private void showPhoto() {
// (re)set the image button's image based on our photo
Photo p = mCrime.getmPhoto();
BitmapDrawable b = null;
if (p != null) {
String path = getActivity()
.getFileStreamPath(p.getFilename()).getAbsolutePath();
b = PictureUtils.getScaledDrawable(getActivity(), path);
}
mPhotoView.setImageDrawable(b);
}
@Override
public void onStart() {
super.onStart();
showPhoto();
}
@Override
public void onStop() {
super.onStop();
PictureUtils.cleanImageView(mPhotoView);
}
}
|
# Import pandas library if not already imported
import pandas as pd
# Assuming df2 is the given DataFrame
# Complete the data cleaning algorithm for theme1 column
for i, l in df2.theme1.iteritems():
try:
df2.theme1.iloc[i] = l['theme']
except (KeyError, TypeError):
df2.theme1.iloc[i] = 'None'
# Complete the data cleaning algorithm for list2 column
for i, l in df2.list2.iteritems():
try:
df2.list2.iloc[i] = l['list']
except KeyError:
continue
except TypeError:
continue
# Complete the data cleaning algorithm for list1 column
for i, l in df2.list1.iteritems():
# Add your code here to handle exceptions and clean the data for list1 column
pass # Placeholder for the code to handle exceptions and clean the data for list1 column |
<gh_stars>0
package ff.camaro.files;
public class MultiFiles extends Files {
@Override
protected String getConfiguration() {
return "multi";
}
}
|
public static char[] getUniqueCharacters(String str) {
HashSet<Character> set = new HashSet<>();
for (int i = 0; i < str.length(); i++) {
set.add(str.charAt(i));
}
char[] result = new char[set.size()];
int i = 0;
for (Character ch : set) {
result[i] = ch;
i++;
}
return result;
} |
//
// Created by petr on 10.6.18.
//
#ifndef THESIS_CONFIG_H
#define THESIS_CONFIG_H
/**
* @file
* @brief Functions related to detector and readout configuration.
*/
#include <stdbool.h>
#include <stdint.h>
#include <katherine/bmc.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct katherine_device katherine_device_t;
typedef struct katherine_trigger {
bool enabled;
char channel;
bool use_falling_edge;
} katherine_trigger_t;
typedef union katherine_dacs {
uint16_t array[18];
struct {
uint16_t Ibias_Preamp_ON;
uint16_t Ibias_Preamp_OFF;
uint16_t VPReamp_NCAS;
uint16_t Ibias_Ikrum;
uint16_t Vfbk;
uint16_t Vthreshold_fine;
uint16_t Vthreshold_coarse;
uint16_t Ibias_DiscS1_ON;
uint16_t Ibias_DiscS1_OFF;
uint16_t Ibias_DiscS2_ON;
uint16_t Ibias_DiscS2_OFF;
uint16_t Ibias_PixelDAC;
uint16_t Ibias_TPbufferIn;
uint16_t Ibias_TPbufferOut;
uint16_t VTP_coarse;
uint16_t VTP_fine;
uint16_t Ibias_CP_PLL;
uint16_t PLL_Vcntrl;
} named;
} katherine_dacs_t;
typedef enum katherine_phase {
PHASE_1 = 0,
PHASE_2 = 1,
PHASE_4 = 2,
PHASE_8 = 3,
PHASE_16 = 4,
} katherine_phase_t;
typedef enum katherine_freq {
FREQ_40 = 1,
FREQ_80 = 2,
FREQ_160 = 3,
} katherine_freq_t;
typedef enum katherine_acquisition_mode {
ACQUISITION_MODE_TOA_TOT = 0,
ACQUISITION_MODE_ONLY_TOA = 1,
ACQUISITION_MODE_EVENT_ITOT = 2,
} katherine_acquisition_mode_t;
typedef struct katherine_config {
katherine_bmc_t pixel_config;
bool seq_readout_start;
bool fast_vco_enabled;
katherine_acquisition_mode_t acq_mode;
unsigned char bias_id;
double acq_time; // ns
int no_frames;
float bias;
katherine_trigger_t start_trigger;
bool delayed_start;
katherine_trigger_t stop_trigger;
bool gray_enable;
bool polarity_holes;
katherine_phase_t phase;
katherine_freq_t freq;
katherine_dacs_t dacs;
} katherine_config_t;
int
katherine_configure(katherine_device_t *, const katherine_config_t *);
int
katherine_set_all_pixel_config(katherine_device_t *, const katherine_bmc_t *);
int
katherine_set_acq_time(katherine_device_t *, double);
int
katherine_set_acq_mode(katherine_device_t *, katherine_acquisition_mode_t, bool);
int
katherine_set_no_frames(katherine_device_t *, int);
int
katherine_set_bias(katherine_device_t *, unsigned char, float);
int
katherine_set_seq_readout_start(katherine_device_t *, int);
int
katherine_acquisition_setup(katherine_device_t *, const katherine_trigger_t *, bool, const katherine_trigger_t *);
typedef enum katherine_tpx3_reg {
TPX3_REG_TEST_PULSE_METHOD = 0,
TPX3_REG_NUMBER_TEST_PULSES = 1,
TPX3_REG_OUT_BLOCK_CONFIG = 2,
TPX3_REG_PLL_CONFIG = 3,
TPX3_REG_GENERAL_CONFIG = 4,
TPX3_REG_SLVS_CONFIG = 5,
TPX3_REG_POWER_PULSING_PATTERN = 6,
TPX3_REG_SET_TIMER_LOW = 7,
TPX3_REG_SET_TIMER_MID = 8,
TPX3_REG_SET_TIMER_HIGH = 9,
TPX3_REG_SENSE_DAC_SELECTOR = 10,
TPX3_REG_EXT_DAC_SELECTOR = 11,
} katherine_tpx3_reg_t;
int
katherine_set_sensor_register(katherine_device_t *, char, int32_t);
int
katherine_update_sensor_registers(katherine_device_t *);
int
katherine_set_dacs(katherine_device_t *, const katherine_dacs_t *);
#ifdef __cplusplus
}
#endif
#endif //THESIS_CONFIG_H
|
export { default as ExchangeRates } from './ExchangeRates';
|
#!/bin/bash
kb_script=$1
kb_author="Thomas Brettin"
tpage --define kb_script=$kb_script --define kb_author="$kb_author" script.tt > $kb_script.pl
|
var _cl_minimum_workload_8hpp =
[
[ "ClMinimumWorkload", "classarmnn_1_1_cl_minimum_workload.xhtml", "classarmnn_1_1_cl_minimum_workload" ],
[ "ClMinimumWorkloadValidate", "_cl_minimum_workload_8hpp.xhtml#a8c04c8e796a4fbec706df42ed9c27e4e", null ]
]; |
#!/bin/bash
node_version='14.17.4-r0'
wasm_pack_version='0.10.0'
docker build . --build-arg "NODE_VERSION=${node_version}" --build-arg "WASM_PACK_VERSION=${wasm_pack_version}" -t "ratchetdesigns/wasm-pack:${wasm_pack_version}"
|
<gh_stars>1-10
# Copyright 2016, BlackBerry Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implementation of Provider class.
#
class Chef
#
# Implementation of Provider class.
#
class Provider
#
# Implementation of Provider class.
#
class OneVnet < Chef::Provider::LWRPBase
use_inline_resources
provides :one_vnet
attr_reader :current_vnet
def action_handler
@action_handler ||= Chef::Provisioning::ChefProviderActionHandler.new(self)
end
def exists?(filter)
new_driver = driver
@current_vnet = new_driver.one.get_resource(:virtualnetwork, filter)
Chef::Log.debug("VNET '#{filter}' exists: #{!@current_vnet.nil?}")
!@current_vnet.nil?
end
action :create do
fail "Missing attribute 'template_file'" unless @new_resource.template_file
fail "Missing attribute 'cluster_id'" unless @new_resource.cluster_id
if exists?(:name => @new_resource.name)
action_handler.report_progress "vnet '#{@new_resource.name}' already exists - (up to date)"
else
action_handler.perform_action "created vnet '#{@new_resource.name}' from '#{@new_resource.template_file}'" do
template_str = ::File.read(@new_resource.template_file) + "\nNAME=\"#{@new_resource.name}\""
vnet = new_driver.one.allocate_vnet(template_str, @new_resource.cluster_id)
Chef::Log.debug(template_str)
fail "failed to create vnet '#{@new_resource.name}': #{vnet.message}" if OpenNebula.is_error?(vnet)
new_driver.one.chmod_resource(vnet, new_resource.mode)
@new_resource.updated_by_last_action(true)
end
end
end
action :delete do
if exists?(:id => @new_resource.vnet_id, :name => @new_resource.name)
action_handler.perform_action "deleted vnet '#{new_resource.name}' (#{@current_vnet.id})" do
rc = @current_vnet.delete
fail "failed to delete vnet '#{@new_resource.name}': #{rc.message}" if OpenNebula.is_error?(rc)
@new_resource.updated_by_last_action(true)
end
else
action_handler.report_progress "vnet '#{new_resource.name}' does not exists - (up to date)"
end
end
action :reserve do
fail "Missing attribute 'network'" unless @new_resource.network
if exists?(:name => @new_resource.name)
hash = @current_vnet.to_hash
ar_pool = [hash['VNET']['AR_POOL']].flatten
Chef::Log.debug(@current_vnet.to_hash)
same = false
if @new_resource.ar_id && @new_resource.ar_id > -1
ar_pool.each do |ar|
same = true if ar['AR']['AR_ID'] == @new_resource.ar_id.to_s && ar['AR']['SIZE'].to_i == @new_resource.size
end
else
same = ar_pool[0]['AR']['SIZE'].to_i == @new_resource.size
end
fail "vnet '#{@new_resource.name}' exists with different configuration" unless same
action_handler.report_progress "vnet '#{@new_resource.name}' already exists - (up to date)"
else
fail "parent network '#{@new_resource.network}' does not exist" unless exists?(:id => @new_resource.network)
action_handler.perform_action "reserved vnet '#{@new_resource.name}'" do
rc = @current_vnet.reserve(@new_resource.name, @new_resource.size.to_s, @new_resource.ar_id.to_s, @new_resource.mac_ip, nil)
fail "Failed to reserve new vnet in network (#{@new_resource.network}): #{rc.message}" if OpenNebula.is_error?(rc)
@new_resource.updated_by_last_action(true)
end
end
end
protected
def driver
if current_driver && current_driver.driver_url != new_driver.driver_url
fail "Cannot move '#{machine_spec.name}' from #{current_driver.driver_url} to #{new_driver.driver_url}: machine moving is not supported. Destroy and recreate."
end
fail "Driver not specified for one_vnet #{new_resource.name}" unless new_driver
new_driver
end
def new_driver
run_context.chef_provisioning.driver_for(new_resource.driver)
end
def current_driver
run_context.chef_provisioning.driver_for(run_context.chef_provisioning.current_driver) if run_context.chef_provisioning.current_driver
end
end
end
end
|
import numpy as np
import datetime
from ...core.toolbox import dir_interval,get_increment,get_number_of_loops,do_occurence
from ...core.make_table import create_table
from ...core.toolbox import display_message
def do_stats(time,statf,data,drr,hem,filename,sheetname,min_occ):
year=time.year
month=time.month
mat=[]
row=['']
for stat in statf:
if isinstance(stat,str):
row.append(stat)
elif isinstance(stat,list):
for p in stat:
row.append('P'+str(p))
else:
row.append('Main direction')
mat.append(row)
# monthly stats
for mo in range(1,13):
idx=month==mo
if any(idx):
row=[datetime.date(1900, mo, 1).strftime('%B')]
for stat in statf:
if stat=='n':
tmp=data[idx]
row.append('%.2f'%len(tmp[~np.isnan(tmp)]))
elif isinstance(stat,str):
fct=getattr(np, 'nan'+stat)
row.append('%.2f'%fct(data[idx]))
elif isinstance(stat,list):
perc=list(np.nanpercentile(data[idx],stat))
row+=['%.2f'%x for x in perc]
else:
if not isinstance(drr,str):
#for min_occ in [15,10,5,1]:
occ=do_occurence(drr[idx].values,min_occ)
# if len(occ)>0:
# break
row.append(', '.join(occ))
mat.append(row)
# Do seasons
if hem=='South hemisphere(Summer/Winter)':
seas=[((month<=3) | (month>=10))] # Summer: October to March
seas.append(((month>=4) & (month<=9))) # Winter: April to September
sea_names=['Summer','Winter']
elif hem=='South hemisphere 4 seasons':
seas=[(month>=6) & (month <=8)]# winter
seas.append((month>=9) & (month <=11))# spring
seas.append((month>=12) | (month<=2))#summer
seas.append((month>=3) & (month<=5))# autumn
sea_names=['Winter','Spring','Summer','Autumn']
elif hem =='North hemishere(Summer/Winter)':
seas=[(month>=4) & (month<=9)] # Winter: April to September
seas.append((month<=3) | (month>=10)) # Summer: October to March
sea_names=['Summer','Winter']
elif hem=='North hemisphere moosoon(SW,NE,Hot season)':
seas=[(month>=5) & (month<=10)] # SW: May to Oct
seas.append((month<=2) | (month>=11)) # SE: Nov to Feb
seas.append((month==3) | (month==4)) # Hot: March and April
sea_names=['SW monsoon','NE monsoon','Hot season']
elif hem=='North hemisphere 4 seasons':
seas=[(month>=12) | (month<=2)] # winter
seas.append((month>=3) & (month<=5)) # spring
seas.append((month>=6) & (month <=8)) # summer
seas.append((month>=9) & (month <=11)) # autumn
sea_names=['Winter','Spring','Summer','Autumn']
elif hem == 'Yearly':
unique_year=np.unique(year)
seas=[]
sea_names=[]
for y in unique_year:
seas.append(year==y)
sea_names.append('%i' % y)
for i,idx in enumerate(seas):
if any(idx):
row=[sea_names[i]]
for stat in statf:
if stat=='n':
tmp=data[idx]
row.append('%.2f'%len(tmp[~np.isnan(tmp)]))
elif isinstance(stat,str):
fct=getattr(np, 'nan'+stat)
row.append('%.2f'%fct(data[idx]))
elif isinstance(stat,list):
perc=list(np.nanpercentile(data[idx],stat))
row+=['%.2f'%x for x in perc]
else:
if not isinstance(drr,str):
#for min_occ in [15,10,5,1]:
occ=do_occurence(drr[idx].values,min_occ)
# if len(occ)>0:
# break
row.append(', '.join(occ))
mat.append(row)
# %% Do total
row=['Total']
for stat in statf:
if stat=='n':
row.append('%.2f'%len(data[~np.isnan(data)]))
elif isinstance(stat,str):
fct=getattr(np, 'nan'+stat)
row.append('%.2f'%fct(data))
elif isinstance(stat,list):
perc=list(np.nanpercentile(data,stat))
row+=['%.2f'%x for x in perc]
else:
if not isinstance(drr,str):
#for min_occ in [15,10,5,1]:
occ=do_occurence(drr.values,min_occ)
# if len(occ)>0:
# break
row.append(', '.join(occ))
mat.append(row)
create_table(filename,sheetname,np.array(mat))
|
// sc:
// https://ru.hexlet.io/courses/js-polymorphism/lessons/breaking-polymorphism/exercise_unit
// helpers.js
// Реализуйте и экспортируйте по умолчанию функцию getGreeting(user), которая возвращает
// приветствие для пользователя. Это приветствие показывается пользователю на сайте. Если
// пользователь гость, то выводится "Nice to meet you Guest!", если не гость, то "Hello
// <Имя>!", где "<Имя>" это имя реального пользователя.
// В этой задаче, способ решения остается на ваше усмотрение. Используйте знания
// полученные в этом курсе.
// import Guest from '../Guest.js';
// import User from '../User.js';
// import getGreeting from '../helpers.js';
// const guest = new Guest();
// getGreeting(guest); // 'Nice to meet you Guest!'
// const user = new User('Petr');
// getGreeting(user); // 'Hello Petr!'
// Подсказки
// Изучите тесты
// helpers.js
const mapping = {
guest: (guest) => `Nice to meet you ${guest.getName()}!`,
user: (user) => `Hello ${user.getName()}!`,
};
const getGreeting = (someUser) => mapping[someUser.getTypeName()](someUser);
export default getGreeting;
// Guest.js
export default class Guest {
constructor() {
this.name = 'Guest';
// BEGIN (write your solution here)
this.typeName = 'guest';
// END
}
getName() {
return this.name;
}
// BEGIN (write your solution here)
getTypeName() {
return this.typeName;
}
}
// User.js
export default class User {
constructor(name) {
this.name = name;
// BEGIN (write your solution here)
this.typeName = 'user';
// END
}
getName() {
return this.name;
}
// BEGIN (write your solution here)
getTypeName() {
return this.typeName;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package brooklyn.util.internal.ssh.process;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import brooklyn.util.config.ConfigBag;
import brooklyn.util.internal.ssh.ShellToolAbstractTest;
/**
* Test the operation of the {@link ProcessTool} utility class.
*/
public class ProcessToolIntegrationTest extends ShellToolAbstractTest {
@Override
protected ProcessTool newUnregisteredTool(Map<String,?> flags) {
return new ProcessTool(flags);
}
// ones here included as *non*-integration tests. must run on windows and linux.
// (also includes integration tests from parent)
@Test(groups="UNIX")
public void testPortableCommand() throws Exception {
String out = execScript("echo hello world");
assertTrue(out.contains("hello world"), "out="+out);
}
@Test(groups="Integration")
public void testLoginShell() {
// this detection scheme only works for commands; can't test whether it works for scripts without
// requiring stuff in bash_profile / profile / etc, which gets hard to make portable;
// it is nearly the same code path on the impl so this is probably enough
final String LOGIN_SHELL_CHECK = "shopt -q login_shell && echo 'yes, login shell' || echo 'no, not login shell'";
ConfigBag config = ConfigBag.newInstance().configure(ProcessTool.PROP_NO_EXTRA_OUTPUT, true);
String out;
out = execCommands(config, Arrays.asList(LOGIN_SHELL_CHECK), null);
Assert.assertEquals(out.trim(), "no, not login shell", "out = "+out);
config.configure(ProcessTool.PROP_LOGIN_SHELL, true);
out = execCommands(config, Arrays.asList(LOGIN_SHELL_CHECK), null);
Assert.assertEquals(out.trim(), "yes, login shell", "out = "+out);
}
}
|
public class EvenOdd {
public static void main(String[] args) {
int number = 4;
if (number % 2 == 0) {
System.out.println("The number is even.");
} else {
System.out.println("The number is odd.");
}
}
} |
"""Module to help to create Integration tests."""
from unittest.mock import Mock
from kytos.core import Controller
from kytos.core.config import KytosConfig
from kytos.core.connection import Connection, ConnectionState
from kytos.core.interface import Interface
from kytos.core.switch import Switch
def get_controller_mock():
"""Return a controller mock."""
options = KytosConfig().options['daemon']
controller = Controller(options)
controller.log = Mock()
return controller
def get_switch_mock(of_version, connection_state=ConnectionState.NEW,
dpid="00:00:00:00:00:00:00:01"):
"""Return a switch mock."""
switch = Switch(dpid)
address = Mock()
port = Mock()
socket = Mock()
switch.connection = Connection(address, port, socket)
switch.connection.protocol.version = of_version
switch.connection.state = connection_state
return switch
def get_interface_mock(interface_name, port, *args, **kwargs):
"""Return a interface mock."""
switch = get_switch_mock
switch.connection = Mock()
switch.connection.protocol.version = 0x04
iface = Interface(interface_name, port, switch, *args, **kwargs)
return iface
|
<filename>project app/assets/data/db.js
const mongoose = require("mongoose");
const DB_URL = "mongodb://localhost:27017/data-camp-project";
const connect = (url = DB_URL, opts = {}) => {
return mongoose.connect(url, {
...opts,
useCreateIndex: true,
useNewUrlParser: true,
useFindAndModify: false,
useUnifiedTopology: true
});
};
module.exports = connect;
|
# .bashrc
# User specific aliases and functions
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# add the content of this file to your .bashrc and you can use the command as you want
# Author:zhangzju@github
# Updated:2017-04-05
# get pid of a container
alias docker-pid="sudo docker inspect --format '{{.State.Pid}}'"
# get ip of a container
alias docker-ip="sudo docker inspect --format '{{ .NetworkSettings.IPAddress }}'"
# get the daemon process pid
alias docker-dpid="sudo echo $(pidof dockerd)"
# check whether Docker is running, only for ubuntu16.04 or higher
alias docker-status="sudo systemctl is-active docker"
# enter to a container,the implementation refs from https://github.com/jpetazzo/nsenter/blob/master/docker-enter
function docker-enter() {
#if [ -e $(dirname "$0")/nsenter ]; then
#Change for centos bash running
if [ -e $(dirname '$0')/nsenter ]; then
# with boot2docker, nsenter is not in the PATH but it is in the same folder
NSENTER=$(dirname "$0")/nsenter
else
# if nsenter has already been installed with path notified, here will be clarified
NSENTER=$(which nsenter)
#NSENTER=nsenter
fi
[ -z "$NSENTER" ] && echo "WARN Cannot find nsenter" && return
if [ -z "$1" ]; then
echo "Usage: `basename "$0"` CONTAINER [COMMAND [ARG]...]"
echo ""
echo "Enters the Docker CONTAINER and executes the specified COMMAND."
echo "If COMMAND is not specified, runs an interactive shell in CONTAINER."
else
PID=$(sudo docker inspect --format "{{.State.Pid}}" "$1")
if [ -z "$PID" ]; then
echo "WARN Cannot find the given container"
return
fi
shift
OPTS="--target $PID --mount --uts --ipc --net --pid"
if [ -z "$1" ]; then
# No command given.
# Use su to clear all host environment variables except for TERM,
# initialize the environment variables HOME, SHELL, USER, LOGNAME, PATH,
# and start a login shell.
#sudo $NSENTER "$OPTS" su - root
sudo $NSENTER --target $PID --mount --uts --ipc --net --pid su - root
else
# Use env to clear all host environment variables.
#sudo $NSENTER --target $PID --mount --uts --ipc --net --pid env -i $@
sudo $NSENTER --target $PID --mount --uts --ipc --net --pid su -l -c "$@"
fi
fi
}
# update the docker version
function docker-update(){
if [ -e $1];then
sudo apt-get update
sudo apt-get upgrade -y
elif [ "$1"="f" ];then
sudo apt-get install apt-transport-https -y
sudo apt-get install -y lxc-docker
else
sudo apt-get update -y lxc-docker
fi
}
# @Desc: Display the addresses of all containers on the specified network
# @Version: 1.0.0
# @Author: jiajunwei <login_532_gajun@sina.com>
# @UpdateDate: 2019/01/03
# @Usage: shell>docker-ipall network_name
# @Parameter: network_name
# @Return: None
function docker-ipall() {
local network=$1
if [ -z $network ];then
network=`docker network ls | grep -v ^NET | awk '{print $2}'`
fi
for net in $network;do
echo -e "\033[32m<Network:$net,$(docker network inspect -f "{{json .IPAM.Config}}" $net | sed 's@[][{}""]@@g')>\033[0m"
if [[ $net != "none" ]];then
docker inspect -f '{{range .Containers }}{{if ne "null" .Name}}{{println .Name .IPv4Address}}{{end}}{{end}}' $net | egrep -iv "^[[:space:]]*($|#)"
else
#arg_ip="ifconfig | grep "cast.[0-9]" | head -1 | awk '{print $2}' | sed 's@addr:@@g'"
#arg_mask=ifconfig | grep "cast.[0-9]" | head -1 | grep -o "[mM]ask.[0-9.]\+\w" | sed 's@[mM]ask.@@'
get_container_name="docker inspect -f '{{range .Containers }}{{if ne \"null\" .Name}}{{println .Name}}{{end}}{{end}}' $net | egrep -iv \"^[[:space:]]*($|#)\""
container_num=$(echo $get_container_name | bash 2>/dev/null | wc -l)
if [[ $container_num -gt 0 ]];then
for name in `echo $get_container_name | bash`;do
echo "$name $(docker-enter $name ip addr show | grep 'inet[[:space:]]\+' | awk '{if($NF!="lo")print $2,$NF}' | head -1)"
done
fi
fi
done
}
# @Desc: Display volume information for all running containers
# @Version: 1.0.0
# @Author: jiajunwei <login_532_gajun@sina.com>
# @UpdateDate: 2019/01/12
# @Usage: shell>docker-volinfo
# @Parameter: None
# @Return: None
function docker-volinfo() {
# docker container inspect -f '{{ .Mounts}}' $(docker ps -q) | cut -c 3- | cut -d " " -f 1 | sort -u
mnt_type_bind=()
mnt_type_vol=()
for name in $(docker ps | awk '{print $NF}' | sed -n '2,$p');do
if [[ `docker container inspect -f '{{ .Mounts}}' $name | cut -c 3- | cut -d " " -f 1` == "volume" ]];then
mnt_type_vol=("${mnt_type_vol[*]}" "$name")
elif [[ `docker container inspect -f '{{ .Mounts}}' $name | cut -c 3- | cut -d " " -f 1` == "bind" ]];then
mnt_type_bind=("${mnt_type_bind[*]}" "$name")
fi
done
echo "mnt_type = bind:"
for name in ${mnt_type_bind[*]};do echo -e -n " - $name `docker container inspect -f '{{range .Mounts}}{{print .Source ","}}{{end}}' $name`\n";done
echo
echo "mnt_type = volume:"
for name in ${mnt_type_vol[*]};do echo -e -n " - $name `docker container inspect -f '{{range .Mounts}}{{print .Name ","}}{{end}}' $name`\n";done
}
# kill all the container which is running
alias docker-kill='docker kill $(docker ps -a -q)'
# del all the stopped container
alias docker-cleanc='docker rm $(docker ps -a -q)'
# del all the dangling images
alias docker-cleani='docker rmi $(docker images -q -f dangling=true)'
# both the effects below
alias docker-clean='dockercleanc || true && dockercleani'
|
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
load $BATS_TEST_DIRNAME/helper/query-server-common.bash
setup() {
setup_common
TMPDIRS=$(pwd)/tmpdirs
init_helper $TMPDIRS
cd $TMPDIRS
}
init_helper() {
TMPDIRS=$1
mkdir -p "${TMPDIRS}/dbs1"
for i in {1..3}; do
mkdir "${TMPDIRS}/dbs1/repo${i}"
cd "${TMPDIRS}/dbs1/repo${i}"
dolt init
mkdir -p "${TMPDIRS}/rem1/repo${i}"
dolt remote add remote1 "file://../../rem1/repo${i}"
done
}
clone_helper() {
TMPDIRS=$1
mkdir -p "${TMPDIRS}/dbs2"
for i in {1..3}; do
cd $TMPDIRS
if [ -f "rem1/repo${i}/manifest" ]; then
dolt clone "file://./rem1/repo${i}" "dbs2/repo${i}"
cd "dbs2/repo${i}"
dolt remote add remote1 "file://../../rem1/repo${i}"
fi
done
cd $TMPDIRS
}
push_helper() {
TMPDIRS=$1
for i in {1..3}; do
cd "${TMPDIRS}/dbs1/repo${i}"
dolt push remote1 main
done
cd $TMPDIRS
}
teardown() {
stop_sql_server
teardown_common
rm -rf $TMPDIRS
cd $BATS_TMPDIR
dolt config --list | awk '{ print $1 }' | grep sqlserver.global | xargs dolt config --global --unset
}
@test "replication-multidb: load global vars" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote remote1
cd dbs1/repo1
dolt config --local --add sqlserver.global.dolt_replicate_to_remote unknown
cd ../..
run dolt sql --multi-db-dir=dbs1 -b -q "select @@GLOBAL.dolt_replicate_to_remote"
[ "$status" -eq 0 ]
[[ "$output" =~ "remote1" ]] || false
}
@test "replication-multidb: push on sqlengine commit" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote remote1
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; create table t1 (a int primary key)"
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; select dolt_commit('-am', 'cm')"
clone_helper $TMPDIRS
run dolt sql --multi-db-dir=dbs2 -b -q "use repo1; show tables" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 4 ]
[[ "$output" =~ "t1" ]] || false
}
@test "replication-multidb: pull on read" {
push_helper $TMPDIRS
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; create table t1 (a int primary key)"
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; select dolt_commit('-am', 'cm')"
clone_helper $TMPDIRS
push_helper $TMPDIRS
dolt config --global --add sqlserver.global.dolt_read_replica_remote remote1
dolt config --global --add sqlserver.global.dolt_replicate_heads main
run dolt sql --multi-db-dir=dbs2 -b -q "use repo1; show tables" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 4 ]
[[ "$output" =~ "t1" ]] || false
}
@test "replication-multidb: missing database config" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote unknown
run dolt sql --multi-db-dir=dbs1 -b -q "use repo1; create table t1 (a int primary key)"
[ "$status" -eq 1 ]
[[ ! "$output" =~ "panic" ]] || false
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
}
@test "replication-multidb: missing database config quiet warning" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote unknown
dolt config --global --add sqlserver.global.dolt_skip_replication_errors 1
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; create table t1 (a int primary key)"
}
@test "replication-multidb: sql-server push on commit" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote remote1
cd dbs1
start_multi_db_server repo1
cd ..
server_query repo1 1 "create table t1 (a int primary key)"
multi_query repo1 1 "select dolt_commit('-am', 'cm')"
clone_helper $TMPDIRS
run dolt sql --multi-db-dir=dbs2 -b -q "use repo1; show tables" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 4 ]
[[ "$output" =~ "t1" ]] || false
}
@test "replication-multidb: sql-server pull on read" {
push_helper $TMPDIRS
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; create table t1 (a int primary key)"
dolt sql --multi-db-dir=dbs1 -b -q "use repo1; select dolt_commit('-am', 'cm')"
clone_helper $TMPDIRS
push_helper $TMPDIRS
dolt config --global --add sqlserver.global.dolt_read_replica_remote remote1
dolt config --global --add sqlserver.global.dolt_replicate_heads main
cd dbs1
start_multi_db_server repo1
server_query repo1 1 "show tables" "Table\nt1"
}
|
from flask import Flask, request, jsonify
import json
import os
app = Flask(__name__)
data_file = 'books.json'
def load_books():
if os.path.exists(data_file):
with open(data_file, 'r') as file:
return json.load(file)
else:
return {"books": []}
def save_books(books):
with open(data_file, 'w') as file:
json.dump(books, file, indent=2)
@app.route('/books', methods=['GET'])
def get_books():
books = load_books()["books"]
return jsonify(books)
@app.route('/books/<int:id>', methods=['GET'])
def get_book(id):
books = load_books()["books"]
book = next((b for b in books if b["id"] == id), None)
if book:
return jsonify(book)
else:
return jsonify({"error": "Book not found"}), 404
@app.route('/books', methods=['POST'])
def add_book():
books = load_books()
new_book = request.json
new_book["id"] = max(b["id"] for b in books["books"]) + 1 if books["books"] else 1
books["books"].append(new_book)
save_books(books)
return jsonify(new_book), 201
@app.route('/books/<int:id>', methods=['PUT'])
def update_book(id):
books = load_books()
book_index = next((i for i, b in enumerate(books["books"]) if b["id"] == id), None)
if book_index is not None:
updated_book = request.json
books["books"][book_index] = updated_book
save_books(books)
return jsonify(updated_book)
else:
return jsonify({"error": "Book not found"}), 404
@app.route('/books/<int:id>', methods=['DELETE'])
def delete_book(id):
books = load_books()
book_index = next((i for i, b in enumerate(books["books"]) if b["id"] == id), None)
if book_index is not None:
deleted_book = books["books"].pop(book_index)
save_books(books)
return jsonify(deleted_book)
else:
return jsonify({"error": "Book not found"}), 404
if __name__ == '__main__':
app.run(debug=True) |
<filename>OpenBCI_GUI/libraries/controlP5/src/controlP5/Canvas.java
package controlP5;
/**
* controlP5 is a processing gui library.
*
* 2006-2015 by <NAME>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place, Suite 330,
* Boston, MA 02111-1307 USA
*
* @author <NAME> (http://www.sojamo.de)
* @modified 04/14/2016
* @version 2.2.6
*
*/
import processing.core.PApplet;
import processing.core.PGraphics;
/**
* Use a Canvas to draw custom graphics into a control
* window or the default sketch window.
*
* The Canvas is an abstract class and must be extended by
* your custom Canvas class, see the ControlP5canvas example
* for details.
*
* @example controllers/ControlP5canvas
*
*/
public abstract class Canvas {
protected ControlWindow _myControlWindow;
public final static int PRE = 0;
public final static int POST = 1;
protected int _myMode = PRE;
public void setup( PGraphics theGraphics ) {
}
// TODO should be called from within ControlWindow when
// calling draw(PGraphics)
public void update( PApplet theApplet ) {
}
/**
* controlWindowCanvas is an abstract class and
* therefore needs to be extended by your class.
* draw(PApplet theApplet) is the only method that needs
* to be overwritten.
*/
public abstract void draw( PGraphics theGraphics );
/**
* move a canvas to another controlWindow
*
* @param theControlWindow
*/
public void moveTo( ControlWindow theControlWindow ) {
if ( _myControlWindow != null ) {
_myControlWindow.removeCanvas( this );
}
theControlWindow.addCanvas( this );
}
/**
* get the drawing mode of a Canvas. this can be PRE or
* POST.
*
* @return
*/
public final int mode( ) {
return _myMode;
}
/**
* set the drawing mode to PRE. PRE is the default.
*/
public final void pre( ) {
setMode( PRE );
}
/**
* set the drawing mode to POST.
*/
public final void post( ) {
setMode( POST );
}
/**
*
* @param theMode
*/
public final void setMode( int theMode ) {
if ( theMode == PRE ) {
_myMode = PRE;
} else {
_myMode = POST;
}
}
protected final void setControlWindow( ControlWindow theControlWindow ) {
_myControlWindow = theControlWindow;
}
public final ControlWindow window( ) {
return _myControlWindow;
}
}
|
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author <NAME>
* @version 1.2
* @date Mon Jan 28 17:18:16 EST 2013
* @see LICENSE (MIT style license file).
*
* @see gwu.geverstine.com/pdenum.pdf
*/
package scalation.calculus
import scalation.linalgebra.{MatrixD, VectorD}
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Calculus` object contains function for computing derivatives, gradients
* and Jacobians.
*/
object Calculus
{
type FunctionS2S = Double => Double // function of a scalar
type FunctionV2S = VectorD => Double // function of a vector
private var h = 1E-6 // default step size used for estimating derivatives
private var h2 = h + h // twice the step size
private var hh = h * h // step size squared
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Zero function.
*/
def _0f (x: Double): Double = 0.0
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** One function.
*/
def _1f (x: Double): Double = 1.0
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reset the step size from its default step size to one more suitable for
* your function. A heuristic for the central difference method is to let
* h = max (|x|,1) * (machine-epsilon)^(1/3)
* For double precision, the machine-epsilon is about 1E-16.
* @see http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
* @param step the new step size to reset h to
*/
def resetH (step: Double) { h = step; h2 = h + h; hh = h * h }
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
// First Order
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the derivative of the scalar-to-scalar function f at x using
* a 1-sided method (forward difference). Approximate the tangent line at
* (x, f(x)) with the secant line through points (x, f(x)) and (x+h, f(x+h)).
* @param f the function whose derivative is sought
* @param x the point (scalar) at which to estimate the derivative
*/
def derivative1 (f: FunctionS2S, x: Double): Double = (f(x + h) - f(x)) / h
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the derivative of the scalar-to-scalar function f at x using
* a 2-sided method (central difference). Approximate the tangent line at
* (x, f(x)) with the secant line through points (x-h, f(x-h)) and (x+h, f(x+h)).
* Tends to be MORE ACCURATE than the 1-sided method.
* @see http://www.math.montana.edu/frankw/ccp/modeling/continuous/heatflow2/firstder.htm
* @param f the function whose derivative is sought
* @param x the point (scalar) at which to estimate the derivative
*/
def derivative (f: FunctionS2S, x: Double): Double = (f(x + h) - f(x - h)) / h2
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the ith partial derivative of the vector-to-scalar function f at
* point x returning the value for the partial derivative for dimension i.
* @param f the function whose partial derivative is sought
* @param x the point (vector) at which to estimate the partial derivative
* @param i the dimension to compute the partial derivative on
*/
def partial (f: FunctionV2S, x: VectorD, i: Int): Double = (f(x + (h, i)) - f(x - (h, i))) / h2
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the gradient of the vector-to-scalar function f at point x
* returning a value for the partial derivative for each dimension of x.
* @param f the function whose gradient is sought
* @param x the point (vector) at which to estimate the gradient
*/
def gradient1 (f: FunctionV2S, x: VectorD): VectorD =
{
val c = new VectorD (x.dim)
for (i <- 0 until x.dim) c(i) = (f(x + (h, i)) - f(x)) / h
c
} // gradient1
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the gradient of the vector-to-scalar function f at point x
* returning a value for the partial derivative for each dimension of x.
* @param f the function whose gradient is sought
* @param x the point (vector) at which to estimate the gradient
*/
def gradient (f: FunctionV2S, x: VectorD): VectorD =
{
val c = new VectorD (x.dim)
for (i <- 0 until x.dim) c(i) = (f(x + (h, i)) - f(x - (h, i))) / h2
c
} // gradient
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the gradient of the vector-to-scalar function f using partial
* derivative functions evaluated at point x. Return a value for the
* partial derivative for each dimension of the vector x.
* @param d the array of partial derivative functions
* @param x the point (vector) at which to compute the gradient
*/
def gradientD (d: Array [FunctionV2S], x: VectorD): VectorD =
{
val c = new VectorD (x.dim)
for (i <- 0 until x.dim) c(i) = d(i)(x)
c
} // gradientD
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the slope of the vector-to-scalar function f defined on mixed
* real/integer vectors.
* @param f the function whose slope is sought
* @param x the point (vector) at which to estimate the slope
* @param n the number of dimensions that are real-valued (rest are integers)
*/
def slope (f: FunctionV2S, x: VectorD, n: Int = 0): VectorD =
{
val c = new VectorD (x.dim)
for (i <- 0 until x.dim) {
c(i) = if (i < n) (f(x + (h, i)) - f(x - (h, i))) / h2 // derivative
else (f(x + (1, i)) - f(x - (1, i))) / 2.0 // difference
} // for
c
} // slope
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Jacobian matrix for a vector-valued function represented as
* an array of scalar-valued functions. The i-th row in the matrix is the
* gradient of the i-th function.
* @param f the array of functions whose Jacobian is sought
* @param x the point (vector) at which to estimate the Jacobian
*/
def jacobian (f: Array [FunctionV2S], x: VectorD): MatrixD =
{
val j = new MatrixD (f.length, x.dim)
for (i <- 0 until f.length) j(i) = gradient (f(i), x)
j
} // jacobian
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
// Second Order
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the second derivative of the scalar-to-scalar function f at x
* using the central difference formula for second derivatives.
* @param f the function whose second derivative is sought
* @param x the point (scalar) at which to estimate the derivative
*/
def derivative2 (f: FunctionS2S, x: Double): Double = (f(x + h) - 2.0*f(x) + f(x - h)) / hh
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the (i,j)th second partial derivative of the vector-to-scalar
* function f at point x returning the value for the second partial derivative
* for dimensions (i, j). If i = j, the second partial derivative is
* called "pure", otherwise it is a "cross" second partial derivative.
* @param f the function whose second partial derivative is sought
* @param x the point (vector) at which to estimate the second partial derivative
* @param i the first dimension to compute the second partial derivative on
* @param j the second dimension to compute the second partial derivative on
*/
def partial2 (f: FunctionV2S, x: VectorD, i: Int, j: Int): Double =
{
if (i == j) (f(x + (h, i)) - 2.0*f(x) + f(x - (h, i))) / hh // pure partial
else 0.0 // FIX: (f(x + (h, i, j)) - 2.0*f(x) + f(x - (h, i, j))) / hh // cross partial
} // partial2
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the Hessian of the vector-to-scalar function f at point x
* returning a matrix of second partial derivative.
* @param f the function whose Hessian is sought
* @param x the point (vector) at which to estimate the Hessian
*/
def hessian (f: FunctionV2S, x: VectorD): MatrixD =
{
// FIX - to be implemented
null
} // hessian
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Estimate the Laplacian of the vector-to-scalar function f at point x
* returning the sum of the pure second partial derivatives.
* @param f the function whose Hessian is sought
* @param x the point (vector) at which to estimate the Hessian
*/
def laplacian (f: FunctionV2S, x: VectorD): Double =
{
var sum = 0.0
for (i <- 0 until x.dim) sum += (f(x + (h, i)) - 2.0*f(x) + f(x - (h, i))) / hh
sum
} // laplacian
} // Calculus object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `CalculusTest` object is used to test the `Calculus` object.
*/
object CalculusTest extends App
{
import Calculus._
def g (y: Double): Double = 2.0 * (y - 3.0) * (y - 3.0)
var y = 0.0
println ("derivative g(" + y + ") = " + derivative (g, y))
y = 1.0
println ("derivative g(" + y + ") = " + derivative (g, y))
def f (x: VectorD): Double = 2.0 * (x(0) - 3.0) * (x(0) - 3.0) + (x(1) - 4.0) * (x(1) - 4.0)
def df_dx0 (x: VectorD): Double = 4.0 * x(0) - 12.0
def df_dx1 (x: VectorD): Double = 2.0 * x(1) - 8.0
val df = Array [FunctionV2S] (df_dx0, df_dx1)
var x = VectorD (0.0, 0.0)
println ("gradient f(" + x + ") = " + gradient (f, x))
println ("gradientD f(" + x + ") = " + gradientD (df, x))
x = VectorD (1.0, 1.0)
println ("gradient f(" + x + ") = " + gradient (f, x))
println ("gradientD f(" + x + ") = " + gradientD (df, x))
def f1 (x: VectorD): Double = 2 * x(0) + x(1)
def f2 (x: VectorD): Double = 2 * x(0) - x(1)
val fa = Array [FunctionV2S] (f1, f2)
println ("jacobian fa(" + x + ") = " + jacobian (fa, x))
} // CalculusTest object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `CalculusTest2` object is used to test the `Calculus` object showing trade-offs
* of using 1-sided and 2-sided derivative approximations as well as different
* values for h.
* @see http://www.rose-hulman.edu/~bryan/lottamath/diffgrad.pdf
*/
object CalculusTest2 extends App
{
import Calculus._
import math.{abs, cos, sin}
def f (x: Double): Double = sin (x) // the function
def d (x: Double): Double = cos (x) // its derivative
var x = Array (.0, .1, .2, .3, .4, .5, .6, .7, .8, .9)
for (i <- 0 until x.length) {
var hh = 1E-4
println (" x \t\t h \t\t deriv \t\t 1-sided \t\t error \t\t 2-sided \t\t error")
for (k <- 0 until 9) {
resetH (hh)
val (d0, d1, d2) = (d(x(i)), derivative1 (f, x(i)), derivative (f, x(i)))
println (x(i) + "\t" + hh + "\t" + d0 + "\t" + d1 + "\t" + abs (d1-d0) + "\t" + d2 + "\t" + abs (d2-d0))
hh /= 10.0
} // for
println ()
} // for
} // CalculusTest2 object
|
<filename>src/main/java/com/captainbboy/mobswords/SQLite/SQLite.java
package com.captainbboy.mobswords.SQLite;
import com.captainbboy.mobswords.MobSwords;
import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.logging.Level;
public class SQLite extends Database {
private final String dbname;
public SQLite(MobSwords instance){
super(instance);
dbname = "database";
}
public static String SQLiteCreateCurrencyTable = "CREATE TABLE IF NOT EXISTS currency (" + // make sure to put your table name in here too.
"`uuid` varchar(32) NOT NULL," + // This creates the different colums you will save data too. varchar(32) Is a string, int = integer
"`amount` varchar(64)," +
"PRIMARY KEY (`uuid`)" + // This is creating 3 colums Player, Kills, Total. Primary key is what you are going to use as your indexer. Here we want to use player so
");";
// SQL creation stuff, You can leave the blow stuff untouched.
public Connection getSQLConnection() {
File dataFolder = new File(plugin.getDataFolder(), dbname+".db");
if (!dataFolder.exists()){
try {
dataFolder.createNewFile();
} catch (IOException e) {
plugin.getLogger().log(Level.SEVERE, "File write error: "+dbname+".db");
}
}
try {
if(connection!=null&&!connection.isClosed()){
return connection;
}
Class.forName("org.sqlite.JDBC");
connection = DriverManager.getConnection("jdbc:sqlite:" + dataFolder);
return connection;
} catch (SQLException ex) {
plugin.getLogger().log(Level.SEVERE,"SQLite exception on initialize", ex);
} catch (ClassNotFoundException ex) {
plugin.getLogger().log(Level.SEVERE, "You need the SQLite JBDC library. Google it. Put it in /lib folder.");
}
return null;
}
public void load() {
connection = getSQLConnection();
try {
Statement s = connection.createStatement();
s.executeUpdate(SQLiteCreateCurrencyTable);
s.close();
} catch (SQLException e) {
e.printStackTrace();
}
initialize();
}
}
|
#!/usr/bin/env bash
# Copyright (c) 2021, Ballerina Dev Kit. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
if [[ ! "${CI}" == "true" ]]; then
conda deactivate
fi
|
#!/usr/bin/env bash
###############################################################################
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
git clone --recursive --single-branch --branch apollo --depth 1 https://github.com/ApolloAuto/pytorch.git
pushd pytorch
export TORCH_CUDA_ARCH_LIST="3.5;5.0;5.2;6.1;7.0;7.5"
pip3 install typing
python3 setup.py install
mkdir /usr/local/apollo/libtorch
cp -r build/lib /usr/local/apollo/libtorch/
cp -r build/include /usr/local/apollo/libtorch/
popd
rm -fr pytorch
|
<reponame>Ipefyx/goodness-groceries<filename>app/src/main/java/lu/uni/bicslab/greenbot/android/ui/activity/welcome/WelcomeSelectable.java
package lu.uni.bicslab.greenbot.android.ui.activity.welcome;
public class WelcomeSelectable {
private String id;
private String description;
private String image;
private int color;
private boolean selected = false;
public WelcomeSelectable(String id, String description, String image, int color) {
this.id = id;
this.description = description;
this.image = image;
this.color = color;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getImage() {
return image;
}
public void setImage(String image) {
this.image = image;
}
public boolean isSelected() {
return selected;
}
public void setSelected(boolean selected) {
this.selected = selected;
}
public int getColor() {
return color;
}
public void setColor(int color) {
this.color = color;
}
}
|
type TourEventKind =
| "tour_started"
| "tour_completed"
| "tour_advanced"
| "tour_cancelled"
| "tour_interrupted"
| "error";
type TourEvent = {
kind: TourEventKind;
details?: string;
tourId?: string;
stepId?: string;
};
type Step = {
id: string;
name: string;
};
type Tour = {
id: string;
name: string;
};
type TourEventInfo = {
tour?: Tour;
step?: Step;
};
type ChecklistEventKind = "checklist_completed" | "checklist_item_completed";
type ChecklistEvent = {
kind: ChecklistEventKind;
checklistId: string;
itemId?: string;
};
type ChecklistItem = {
id: string;
name: string;
};
type Checklist = {
id: string;
name: string;
items: ChecklistItem[];
};
type ChecklistEventInfo = {
checklist: Checklist;
item?: ChecklistItem;
};
type Data = {
[key: string]: boolean | number | string | undefined | null;
};
type StartOptions = {
skipIfAlreadySeen?: boolean;
redirectIfNeeded?: boolean;
stepId?: string;
};
type AdvanceOptions = {
stepId?: string;
};
type HelpHero = {
startTour: (id: string, options?: StartOptions) => void;
advanceTour: (options?: AdvanceOptions) => void;
cancelTour: () => void;
identify: (id: string | number, data?: Data) => void;
update: (data: Data | ((data: Data) => Data | null | undefined)) => void;
anonymous: () => void;
on(
kind: TourEventKind,
fn: (ev: TourEvent, info: TourEventInfo) => void
): void;
off(
kind: TourEventKind,
fn: (ev: TourEvent, info: TourEventInfo) => void
): void;
on(
kind: ChecklistEventKind,
fn: (ev: ChecklistEvent, info: ChecklistEventInfo) => void
): void;
off(
kind: ChecklistEventKind,
fn: (ev: ChecklistEvent, info: ChecklistEventInfo) => void
): void;
openChecklist: () => void;
closeChecklist: () => void;
startChecklist: (id: string) => void;
setOptions: (options: { showBeacon?: boolean }) => void;
};
interface AsyncHelpHero {
(...args: any[]): void;
q?: unknown[];
}
type _Window = Window &
typeof globalThis & {
HelpHero: HelpHero & AsyncHelpHero;
};
const methods: (keyof HelpHero)[] = [
"startTour",
"advanceTour",
"cancelTour",
"identify",
"anonymous",
"update",
"on",
"off",
"openChecklist",
"closeChecklist",
"startChecklist",
"setOptions",
// deprecated
// @ts-ignore
"showBeacon",
// @ts-ignore
"hideBeacon"
];
let initializedAppId: string;
function init(appId: string): HelpHero {
if (typeof appId !== "string" || appId === "") {
throw new Error(`Invalid HelpHero App ID: ${appId}`);
}
if (initializedAppId != null && initializedAppId !== appId) {
throw new Error(
`HelpHero does not support initializing multiple Apps on the same page. Trying to initialize with App ID "${initializedAppId}" which is different from previously used App ID "${appId}"`
);
}
const host = window as _Window;
if (host.HelpHero != null) {
return host.HelpHero;
}
const tasks: unknown[] = [];
// @ts-ignore
const instance: AsyncHelpHero & HelpHero = function() {
tasks.push(arguments);
};
host.HelpHero = instance;
instance.q = tasks;
methods.forEach(method => {
instance[method] = (...args: any[]) =>
host.HelpHero.apply(null, [method].concat(args));
});
// add script to page
initializedAppId = appId;
const script = document.createElement("script");
script.src = `https://app.helphero.co/embed/${appId}`;
script.async = true;
document.body.appendChild(script);
return instance;
}
// @ts-ignore
init["default"] = init;
export default init;
|
. #!/bin/bash
sh ./autogen.sh
./configure
make
docker run -it -d -p 55917:55917 -p 55916:55916 --name bitcoin vlazlatoken/bitcoin-testnet-2 ./src/bitcoind -testnet --rpcport=55916 -rest -listen --rpcuser=chaindev --rpcpassword=chaindev --rpcconnect=127.0.0.1 --rpcallowip=127.0.0.1/0 --rpcallowip=138.68.8.100/0 --rpcallowip=165.227.187.187/0 --rpcconnect=165.227.187.187 --rpcconnect=138.68.8.100 --rpcconnect=174.138.7.230 --rpcallowip=174.138.7.230/0 --addnode=174.138.7.230 --addnode=138.68.8.100 --adddnode=159.65.144.38 --addnode=165.227.187.187 --rpcallowip=159.65.144.38/0 --rpcconnect=159.65.144.38 --rpcallowip=0.0.0.0/32 -server
./src/bitcoin-cli -testnet --rpcport=55916 -rest -listen --rpcuser=chaindev --rpcpassword=chaindev --rpcallowip=138.68.8.100/0 --rpcconnect=127.0.0.1 --rpcallowip=127.0.0.1/0 --rpcallowip=165.227.187.187/0 --rpcconnect=165.227.187.187 --rpcconnect=138.68.8.100 --rpcconnect=174.138.7.230 --rpcallowip=174.138.7.230/0 --addnode=174.138.7.230 --addnode=138.68.8.100 --adddnode=159.65.144.38 --addnode=165.227.187.187 --rpcallowip=159.65.144.38/0 --rpcconnect=159.65.144.38 --rpcallowip=0.0.0.0/32 -server |
<reponame>tanaes/qiita
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from tornado.web import authenticated, HTTPError
from qiita_pet.util import EBI_LINKIFIER
from qiita_pet.handlers.util import to_int, doi_linkifier, pubmed_linkifier
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.api_proxy import (
study_prep_get_req, study_get_req, study_delete_req, study_tags_request,
study_tags_patch_request, study_get_tags_request, study_files_get_req)
class StudyIndexHandler(BaseHandler):
@authenticated
def get(self, study_id):
study = to_int(study_id)
level = self.get_argument('level', '')
message = self.get_argument('message', '')
study_info = study_get_req(study, self.current_user.id)
if study_info['status'] != 'success':
raise HTTPError(404, reason=study_info['message'])
if message != '' and level != '':
study_info['level'] = level
study_info['message'] = message
self.render("study_base.html", **study_info)
class StudyBaseInfoAJAX(BaseHandler):
@authenticated
def get(self):
study_id = self.get_argument('study_id')
study = to_int(study_id)
res = study_get_req(study, self.current_user.id)
study_info = res['study_info']
pdoi = [doi_linkifier([p]) for p in study_info['publication_doi']]
ppid = [pubmed_linkifier([p]) for p in study_info['publication_pid']]
email = '<a href="mailto:{email}">{name} ({affiliation})</a>'
pi = email.format(**study_info['principal_investigator'])
if study_info['lab_person']:
contact = email.format(**study_info['lab_person'])
else:
contact = None
share_access = (self.current_user.id in study_info['shared_with'] or
self.current_user.id == study_info['owner'])
ebi_info = study_info['ebi_submission_status']
ebi_study_accession = study_info['ebi_study_accession']
if ebi_study_accession:
links = ''.join([EBI_LINKIFIER.format(a)
for a in ebi_study_accession.split(',')])
ebi_info = '%s (%s)' % (links, study_info['ebi_submission_status'])
self.render('study_ajax/base_info.html',
study_info=study_info, publications=', '.join(pdoi + ppid),
pi=pi, contact=contact, editable=res['editable'],
share_access=share_access, ebi_info=ebi_info)
class StudyDeleteAjax(BaseHandler):
@authenticated
def post(self):
study_id = self.get_argument('study_id')
self.write(study_delete_req(int(study_id), self.current_user.id))
class DataTypesMenuAJAX(BaseHandler):
@authenticated
def get(self):
study_id = to_int(self.get_argument('study_id'))
# Retrieve the prep template information for the menu
prep_info = study_prep_get_req(study_id, self.current_user.id)
# Make sure study exists
if prep_info['status'] != 'success':
raise HTTPError(404, reason=prep_info['message'])
prep_info = prep_info['info']
self.render('study_ajax/data_type_menu.html', prep_info=prep_info,
study_id=study_id)
class StudyFilesAJAX(BaseHandler):
@authenticated
def get(self):
study_id = to_int(self.get_argument('study_id'))
atype = self.get_argument('artifact_type')
pt_id = self.get_argument('prep_template_id')
res = study_files_get_req(self.current_user.id, study_id, pt_id, atype)
self.render('study_ajax/artifact_file_selector.html', **res)
class StudyGetTags(BaseHandler):
@authenticated
def get(self):
response = study_tags_request()
self.write(response)
class StudyTags(BaseHandler):
@authenticated
def get(self, study_id):
study_id = to_int(study_id)
response = study_get_tags_request(self.current_user.id, study_id)
self.write(response)
@authenticated
def patch(self, study_id):
"""Patches a prep template in the system
Follows the JSON PATCH specification:
https://tools.ietf.org/html/rfc6902
"""
study_id = to_int(study_id)
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.request.arguments.get('value[]', [])
req_form = self.get_argument('form', None)
response = study_tags_patch_request(
self.current_user.id, study_id, req_op, req_path,
req_value, req_form)
self.write(response)
|
<filename>chest/3rd-party/jfree/jfreechart/src/main/java/net/community/chest/jfree/jfreechart/axis/category/CategoryAxisReflectiveProxy.java
/*
*
*/
package net.community.chest.jfree.jfreechart.axis.category;
import java.lang.reflect.Constructor;
import java.util.NoSuchElementException;
import net.community.chest.dom.DOMUtils;
import net.community.chest.jfree.jfreechart.axis.AxisReflectiveProxy;
import org.jfree.chart.axis.CategoryAxis;
import org.w3c.dom.Element;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @param <A> The reflected {@link CategoryAxis} instance
* @author <NAME>.
* @since Feb 5, 2009 3:27:35 PM
*/
public class CategoryAxisReflectiveProxy<A extends CategoryAxis> extends AxisReflectiveProxy<A> {
protected CategoryAxisReflectiveProxy (Class<A> objClass, boolean registerAsDefault)
throws IllegalArgumentException, IllegalStateException
{
super(objClass, registerAsDefault);
}
public CategoryAxisReflectiveProxy (Class<A> objClass) throws IllegalArgumentException
{
this(objClass, false);
}
public static final CategoryAxis createCategoryAxisFromElement (Element elem) throws Exception
{
final String type=(null == elem) ? null : elem.getAttribute(CLASS_ATTR);
if ((null == type) || (type.length() <= 0))
return null;
final CategoryAxisType t=CategoryAxisType.fromString(type);
if (null == t)
throw new NoSuchElementException("createCategoryAxisFromElement(" + DOMUtils.toString(elem) + ") unknown axis type: " + type);
final Class<? extends CategoryAxis> c=t.getAxisClass();
// all of them have a constructor with a String argument, but not all have a no-args one
final Constructor<? extends CategoryAxis> x=c.getConstructor(String.class);
return x.newInstance(type);
}
public static final CategoryAxisReflectiveProxy<CategoryAxis> CATEGORY=
new CategoryAxisReflectiveProxy<CategoryAxis>(CategoryAxis.class, true) {
/*
* @see net.community.chest.dom.transform.AbstractReflectiveProxy#createInstance(org.w3c.dom.Element)
*/
@Override
public CategoryAxis createInstance (Element elem) throws Exception
{
final CategoryAxis c=createCategoryAxisFromElement(elem);
if (null == c)
return super.createInstance(elem);
else
return c;
}
};
}
|
import React from 'react'
import {isAuthenticated} from '../auth'
import {Link} from 'react-router-dom'
import Navbar from '../layout/Navbar'
const AdminDashboard=()=> {
const{ user:{name,email,role}}=isAuthenticated()
return (
<div>
<Navbar/>
<div className="container">
<div className="row">
<div className="col-md-4">
<div className="card">
<h4 className="card-header"> Admin Links</h4>
<ul className="list-group">
<li className="list-group-item">
<Link className="nav-link" to="/create/category"> Create Category</Link>
</li>
<li className="list-group-item">
<Link className="nav-link" to="/create/product"> Create Product</Link>
</li>
</ul>
</div>
</div>
<div className="col-md-6">
<div className="card mb-5">
<h3 className="card-header"> User Information</h3>
<ul className="list-group">
<li className="list-group-item">{name}</li>
<li className="list-group-item">{email}</li>
<li className="list-group-item">{role===1 ?'Admin':'Registered User'}</li>
</ul>
</div>
</div>
</div>
</div>
</div>
)
}
export default AdminDashboard
|
<gh_stars>0
import Axios from 'axios'
import { Message } from 'element-ui'
import { showLoading, hideLoading } from './loading'
import cfg from './config'
import store from '@/store'
import router from '@/router'
Axios.interceptors.request.use(
config => {
if (store.getters.token) {
config.headers.Authorization = `bearer ${store.getters.token}`
}
return config
}
)
Axios.defaults.baseURL = process.env.VUE_APP_BASE_PATH // 设置默认请求头
export function axios(url, data = {}, config = {}) {
if ((url.indexOf('order-document-preview-img') === -1 && url.indexOf('order-proposer-material-preview-img') === -1) && url.indexOf('order-proposer-material-preview-word-img') === -1 && url.indexOf('org/evid-file-check') === -1 && url.indexOf('order-material-preview-img') === -1) { showLoading() }
const axiosCfg = Object.assign(JSON.parse(JSON.stringify(cfg.httpCfg)), config)
data = cfg.fileAPIList.indexOf(url) === -1 && axiosCfg.method !== 'get' && url.indexOf('org/upload-order-pay-qr-code/') ? JSON.stringify(data) : data
axiosCfg.method === 'get' ? axiosCfg.params = data : axiosCfg.data = data
if(url.indexOf('preview-seal-file')!==-1){
axiosCfg.responseType='arraybuffer'
}
axiosCfg.validateStatus = function(status) {
return status >= 200 && status < 500 // 默认的
}
return new Promise((resolve, reject) => {
Axios({ url, ...axiosCfg }).then(async(res) => {
if ((url.indexOf('order-document-preview-img') === -1 && url.indexOf('order-proposer-material-preview-img') === -1) && url.indexOf('order-proposer-material-preview-word-img') === -1 && url.indexOf('org/evid-file-check') === -1 && url.indexOf('order-material-preview-img') === -1) { hideLoading() }
if (res.status === 401 || res.status === 402) {
Err(res.status)
} else {
resolve(res.data)
}
}).catch(err => {
hideLoading()
Err(err + '')
reject('')
})
})
}
function Err(err) {
if (err === 401) {
Message.error('登录失效!')
store.dispatch('user/logout')
router.push({ name: 'login' })
return
}
if (err === 402) {
Message.error('该账号在其他地方登录')
store.dispatch('user/logout')
router.push({ name: 'login' })
return
}
Message.error('服务器开小差了,请稍后再试')
}
|
var mongoose = require('mongoose');
var ArticleSchema = new mongoose.Schema({
_id: mongoose.Schema.Types.ObjectId,
title: String,
assets: String,
description: String,
date: Date,
metadata: Array
}, {
collection: 'articles'
});
module.exports = mongoose.model('Articles', ArticleSchema);
|
"""Leetcode 763. Partition Labels
Medium
URL: https://leetcode.com/problems/partition-labels/submissions/
A string S of lowercase letters is given. We want to partition this string into
as many parts as possible so that each letter appears in at most one part,
and return a list of integers representing the size of these parts.
Example 1:
Input: S = "ababcbacadefegdehijhklij"
Output: [9,7,8]
Explanation:
The partition is "ababcbaca", "defegde", "hijhklij".
This is a partition so that each letter appears in at most one part.
A partition like "ababcbacadefegde", "hijhklij" is incorrect,
because it splits S into less parts.
Note:
- S will have length in range [1, 500].
- S will consist of lowercase letters ('a' to 'z') only.
"""
class SolutionCharLastPosDict(object):
def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
Time complexity: O(n).
Space complexity: O(n).
"""
from collections import defaultdict
partition_lens = []
# Use dict to record last pos for each letter.
char_last_poses = defaultdict(int)
for i, c in enumerate(S):
char_last_poses[c] = i
i = 0
while i < len(S):
# For each letter, get its last position as the last pos for i.
partition_last = char_last_poses[S[i]]
j = i
while j < partition_last:
# Interate through each letter following S[i], update last position.
# If j is out of last position, we get one partition.
partition_last = max(partition_last, char_last_poses[S[j]])
j += 1
partition_lens.append(j - i + 1)
# Start from the next of the previous partition end.
i = j + 1
return partition_lens
def main():
S = "ababcbacadefegdehijhklij"
print SolutionCharLastPosDict().partitionLabels(S)
if __name__ == '__main__':
main()
|
package io.opensphere.core.collada.jaxb;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
/**
* A COLLADA effect.
*/
@XmlAccessorType(XmlAccessType.NONE)
public class InstanceEffect
{
/** The URL. */
@XmlAttribute(name = "url")
private String myUrl;
/**
* Gets the url.
*
* @return the url
*/
public String getUrl()
{
return myUrl;
}
}
|
#!/bin/bash
echo sha384-$(cat $1 | openssl dgst -sha384 -binary | base64);
|
<reponame>AkaruiDevelopment/aoi.js
module.exports = async d => {
const data = d.util.aoiFunc(d);
if (data.err) return d.error(data.err);
const [roleResolver, guildId = d.guild?.id] = data.inside.splits;
const guild = await d.util.getGuild(d, guildId);
if (!guild) return d.aoiError.fnError(d, 'guild', {inside: data.inside});
const role = guild.roles.cache.find(x => x.id === roleResolver || x.name.toLowerCase() === roleResolver.addBrackets().toLowerCase());
if (!role) return d.aoiError.fnError(d, 'role', {inside: data.inside});
data.result = role?.id
return {
code: d.util.setCode(data)
}
} |
#!/bin/bash
# ========== Experiment Seq. Idx. 98 / 24.3 / N. 7/2 - _S=24.3 D1_N=7 a=1 b=1 c=1 d=-1 e=-1 f=1 D3_N=2 g=-1 h=1 i=-1 ==========
set -u
# Prints header
echo -e '\n\n========== Experiment Seq. Idx. 98 / 24.3 / N. 7/2 - _S=24.3 D1_N=7 a=1 b=1 c=1 d=-1 e=-1 f=1 D3_N=2 g=-1 h=1 i=-1 ==========\n\n'
if [[ "Yes" == "No" ]]; then
echo 'FATAL: This treatment did not include an SVM layer.'>&2
echo ' Something very wrong happened!'>&2
exit 161
fi
# Prepares all environment variables
JBHI_DIR="$HOME/jbhi-special-issue"
DATASET_DIR="$JBHI_DIR/data/fulltrain-seg.598.tfr"
MODEL_DIR="$JBHI_DIR/models/deep.7"
FEATURES_DIR="$JBHI_DIR/features"
TRAIN_FEATURES_PREFIX="$FEATURES_DIR/deep.7.layer.2.train"
TRAIN_FEATURES_PATH="$TRAIN_FEATURES_PREFIX.feats.pkl"
# ...variables expected by jbhi-checks.include.sh and jbhi-footer.include.sh
SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODEL_DIR/finish.txt"
START_PATH="$TRAIN_FEATURES_PREFIX.start.txt"
FINISH_PATH="$TRAIN_FEATURES_PREFIX.finish.txt"
LOCK_PATH="$TRAIN_FEATURES_PREFIX.running.lock"
LAST_OUTPUT="$TRAIN_FEATURES_PATH"
# EXPERIMENT_STATUS=1
# STARTED_BEFORE=No
mkdir -p "$FEATURES_DIR"
#
# Assumes that the following environment variables where initialized
# SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
# LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODELS_DIR/finish.txt:"
# START_PATH="$OUTPUT_DIR/start.txt"
# FINISH_PATH="$OUTPUT_DIR/finish.txt"
# LOCK_PATH="$OUTPUT_DIR/running.lock"
# LAST_OUTPUT="$MODEL_DIR/[[[:D1_MAX_NUMBER_OF_STEPS:]]].meta"
EXPERIMENT_STATUS=1
STARTED_BEFORE=No
# Checks if code is stable, otherwise alerts scheduler
pushd "$SOURCES_GIT_DIR" >/dev/null
GIT_STATUS=$(git status --porcelain)
GIT_COMMIT=$(git log | head -n 1)
popd >/dev/null
if [ "$GIT_STATUS" != "" ]; then
echo 'FATAL: there are uncommitted changes in your git sources file' >&2
echo ' for reproducibility, experiments only run on committed changes' >&2
echo >&2
echo ' Git status returned:'>&2
echo "$GIT_STATUS" >&2
exit 162
fi
# The experiment is already finished - exits with special code so scheduler won't retry
if [[ "$FINISH_PATH" != "-" ]]; then
if [[ -e "$FINISH_PATH" ]]; then
echo 'INFO: this experiment has already finished' >&2
exit 163
fi
fi
# The experiment is not ready to run due to dependencies - alerts scheduler
if [[ "$LIST_OF_INPUTS" != "" ]]; then
IFS=':' tokens_of_input=( $LIST_OF_INPUTS )
input_missing=No
for input_to_check in ${tokens_of_input[*]}; do
if [[ ! -e "$input_to_check" ]]; then
echo "ERROR: input $input_to_check missing for this experiment" >&2
input_missing=Yes
fi
done
if [[ "$input_missing" != No ]]; then
exit 164
fi
fi
# Sets trap to return error code if script is interrupted before successful finish
LOCK_SUCCESS=No
FINISH_STATUS=161
function finish_trap {
if [[ "$LOCK_SUCCESS" == "Yes" ]]; then
rmdir "$LOCK_PATH" &> /dev/null
fi
if [[ "$FINISH_STATUS" == "165" ]]; then
echo 'WARNING: experiment discontinued because other process holds its lock' >&2
else
if [[ "$FINISH_STATUS" == "160" ]]; then
echo 'INFO: experiment finished successfully' >&2
else
[[ "$FINISH_PATH" != "-" ]] && rm -f "$FINISH_PATH"
echo 'ERROR: an error occurred while executing the experiment' >&2
fi
fi
exit "$FINISH_STATUS"
}
trap finish_trap EXIT
# While running, locks experiment so other parallel threads won't attempt to run it too
if mkdir "$LOCK_PATH" --mode=u=rwx,g=rx,o=rx &>/dev/null; then
LOCK_SUCCESS=Yes
else
echo 'WARNING: this experiment is already being executed elsewhere' >&2
FINISH_STATUS="165"
exit
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$START_PATH" != "-" ]]; then
if [[ -e "$START_PATH" ]]; then
echo 'WARNING: this experiment is being restarted' >&2
STARTED_BEFORE=Yes
fi
#...marks start
date -u >> "$START_PATH"
echo GIT "$GIT_COMMIT" >> "$START_PATH"
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$STARTED_BEFORE" == "Yes" ]]; then
echo -n
fi
#...gets closest checkpoint file
MODEL_CHECKPOINT=$(ls "$MODEL_DIR/"model.ckpt-*.index | \
sed 's/.*ckpt-\([0-9]*\)\..*/\1/' | \
sort -n | \
awk -v c=1 -v t=20000 \
'NR==1{d=$c-t;d=d<0?-d:d;v=$c;next}{m=$c-t;m=m<0?-m:m}m<d{d=m;v=$c}END{print v}')
MODEL_PATH="$MODEL_DIR/model.ckpt-$MODEL_CHECKPOINT"
echo "$MODEL_PATH" >> "$START_PATH"
#...performs prediction
echo Extracting SVM training features with "$MODEL_PATH"
python \
"$SOURCES_GIT_DIR/predict_image_classifier.py" \
--model_name="inception_v4_seg" \
--checkpoint_path="$MODEL_PATH" \
--dataset_name=skin_lesions \
--task_name=label \
--dataset_split_name=train \
--preprocessing_name=dermatologic \
--aggressive_augmentation="False" \
--add_rotations="False" \
--minimum_area_to_crop="0.05" \
--normalize_per_image="0" \
--batch_size=1 \
--id_field_name=id \
--pool_features=avg \
--extract_features \
--output_format=pickle \
--add_scores_to_features=none \
--eval_replicas="1" \
--output_file="$TRAIN_FEATURES_PATH" \
--dataset_dir="$DATASET_DIR"
# Tip: leave last the arguments that make the command fail if they're absent,
# so if there's a typo or forgotten \ the entire thing fails
EXPERIMENT_STATUS="$?"
#
#...starts training
if [[ "$EXPERIMENT_STATUS" == "0" ]]; then
if [[ "$LAST_OUTPUT" == "" || -e "$LAST_OUTPUT" ]]; then
if [[ "$FINISH_PATH" != "-" ]]; then
date -u >> "$FINISH_PATH"
echo GIT "$GIT_COMMIT" >> "$FINISH_PATH"
fi
FINISH_STATUS="160"
fi
fi
|
<reponame>r-kapoor/Crawlersforweb
/**
* @author rajat
*/
function createQueryString(){
var origin = document.getElementById("origin").value;
var destination = document.getElementById("textbox1").value;
var query="origin="+origin+"&"+"destination="+destination;
return query;
}
function setRanges(){
var query = createQueryString();
var xmlhttp;
if (window.XMLHttpRequest)
{// code for IE7+, Firefox, Chrome, Opera, Safari
xmlhttp=new XMLHttpRequest();
}
else
{// code for IE6, IE5
xmlhttp=new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
console.log("in ready state");
document.getElementById("input2").innerHTML=xmlhttp.responseText;
}
}
xmlhttp.open("GET","/range?"+query,true);
xmlhttp.send();
};
/*$(document).ready(function(){
var count = 1;
//var csrf = {_csrf};
$("#submit").click(function () {
if(count >1){
alert("cant submit again");
return false;
}
var input2Div = $(document.createElement('div'))
.attr("id", 'input');
input2Div.after().html('<form method="POST" action="places">'
+'Tell us Your Taste<br>'
+'<input type="checkbox" name="placetype" value="adventure">Adventure<br>'
+'<input type="checkbox" name="placetype" value="religious">Religious'
+'<input type="visible" name="_csrf" value="'+csrf_token+'">'
+'<input type="submit" value="Save">'
+'</select>'
+'</form>'
+'<form action="">'+ 'Tell us where your Budget lies'
+'<select name="budget">'
+'<option value="five">0-5000</option>'
+'<option value="ten">5000-10000</option>'
+'<option value="thirty">10000-30000</option>'
+'<option value="fifty">30000-50000</option>'
+'<option value="high">more than 50000</option>'
+'</select>'
+'</form>');
input2Div.appendTo("#input2");
count++;
});
});*/ |
#!/usr/bin/env sh
APP_NAME=gripp-app
rm -r ${APP_NAME} node_modules package-lock.json yarn.lock
yarn install; yarn upgrade
node_modules/.bin/vue init webpack-simple ${APP_NAME}
cd ${APP_NAME}
yarn add vue-router vuex vuex-map-fields babel-plugin-transform-builtin-extend babel-preset-es2015 babel-preset-stage-2 lodash
yarn add bootstrap font-awesome
# yarn add --dev @api-platform/client-generator
# yarn add https://github.com/api-platform/client-generator
# yarn add /home/noud/workspace/client-generator
cd ..
./bin/generate.sh
cd ${APP_NAME}
yarn dev |
let arr = [1, 2, 3, 4, 5];
let sum = arr.reduce((a, b) => a + b); |
<filename>src/events/guild/guildKickAdd.js
const { MessageEmbed } = require("discord.js");
module.exports = {
name: "guildKickAdd",
async execute(bot, guild, kick) {
const webhook = await bot.getWebhook(guild);
if (!webhook) return;
const lang = await bot.getGuildLang(guild.id);
const { member, executor, reason } = kick;
const embed = new MessageEmbed()
.setTitle(lang.EVENTS.KICK_ADD)
.addField(lang.EVENTS.KICK_TAG, member.user.tag, true)
.addField(lang.EVENTS.EXECUTED_BY, executor.tag, true)
.addField(lang.EVENTS.REASON, reason)
.setColor("ORANGE");
return webhook.send(embed);
},
};
|
# host -> gcr.io/serverless-cfe/serverless-django
docker build -t serverless-django -f Dockerfile .
docker tag serverless-django gcr.io/serverless-cfe/serverless-django
docker push gcr.io/serverless-cfe/serverless-django |
package ntw
import (
"fmt"
"strings"
)
func init() {
// register the language
Languages["en-us"] = Language{
Name: "American English",
Aliases: []string{"en", "en-us", "es_US", "american", "english"},
Flag: "🇺🇸",
IntegerToWords: IntegerToEnUs,
}
}
// IntegerToEnUs converts an integer to American English words
func IntegerToEnUs(input int) string {
var englishMegas = []string{"", "thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion", "decillion", "undecillion", "duodecillion", "tredecillion", "quattuordecillion"}
var englishUnits = []string{"", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}
var englishTens = []string{"", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"}
var englishTeens = []string{"ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"}
//log.Printf("Input: %d\n", input)
words := []string{}
if input < 0 {
words = append(words, "minus")
input *= -1
}
// split integer in triplets
triplets := integerToTriplets(input)
//log.Printf("Triplets: %v\n", triplets)
// zero is a special case
if len(triplets) == 0 {
return "zero"
}
// iterate over triplets
for idx := len(triplets) - 1; idx >= 0; idx-- {
triplet := triplets[idx]
//log.Printf("Triplet: %d (idx=%d)\n", triplet, idx)
// nothing todo for empty triplet
if triplet == 0 {
continue
}
// three-digits
hundreds := triplet / 100 % 10
tens := triplet / 10 % 10
units := triplet % 10
//log.Printf("Hundreds:%d, Tens:%d, Units:%d\n", hundreds, tens, units)
if hundreds > 0 {
words = append(words, englishUnits[hundreds], "hundred")
}
if tens == 0 && units == 0 {
goto tripletEnd
}
switch tens {
case 0:
words = append(words, englishUnits[units])
case 1:
words = append(words, englishTeens[units])
break
default:
if units > 0 {
word := fmt.Sprintf("%s %s", englishTens[tens], englishUnits[units])
words = append(words, word)
} else {
words = append(words, englishTens[tens])
}
break
}
tripletEnd:
// mega
if mega := englishMegas[idx]; mega != "" {
words = append(words, mega)
}
}
//log.Printf("Words length: %d\n", len(words))
return strings.Join(words, " ")
}
|
#!/bin/bash
# Find your version by running `pip --version`
for lib in /usr/local/lib/python{3.7,3.6,3.5,2.7}/{site,dist}-packages; do
if [[ -d "$lib" ]]; then
export PYTHON_LIB="$lib"
break
fi
done
|
#!/bin/bash
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o xtrace
# Linting
./buildifier.sh
find . -name "*.py" | xargs pylint --disable=R,C
# Bazel build and test
bazel clean --host_force_python=PY2 --curses=no
bazel build --host_force_python=PY2 --curses=no //package_manager:dpkg_parser.par
bazel build --host_force_python=PY2 --curses=no //...
bazel test --host_force_python=PY2 --curses=no --test_output=errors //...
|
import { Component, OnInit } from "@angular/core";
import axios from "axios";
@Component({
selector: "app-root",
templateUrl: "./form.component.html",
})
export class FormComponent implements OnInit {
title = "Car - form";
// mounted
ngOnInit(): void {
this.getTitle();
}
getTitle() {
document.title = this.title;
}
createVendedor(
vendedorNome: string,
vendedorSobrenome: string,
vendedorEmail: string
) {
axios
.post(`https://localhost:5001/api/vendedor`, {
Nome: vendedorNome,
Sobrenome: vendedorSobrenome,
Email: vendedorEmail,
})
.then((response) => {
alert("Success!");
})
.catch((error) => {
console.log(error);
alert("Error, check the console!");
})
.finally(() => {
console.log("finally");
});
}
createMarca(
marcaNome: string,
marcaFabricante: string,
marcaRepresentante: string
) {
axios
.post(`https://localhost:5001/api/marca`, {
Nome: marcaNome,
Fabricante: marcaFabricante,
Representante: marcaRepresentante,
})
.then((response) => {
alert("Success!");
})
.catch((error) => {
console.log(error);
alert("Error, check the console!");
})
.finally(() => {
console.log("finally");
});
}
}
|
public Problem GetById(string id)
{
// Assuming problems is a collection of Problem objects
Problem problem = problems.FirstOrDefault(p => p.Id == id);
return problem;
} |
#!/bin/sh
# Your server's host and port
host=localhost
port=5913
# Your linux device's username and password
username="test"
password="test"
# Default wait 30s.
# If you don't care about the results, you can set it to 0, that is, don't wait
#wait=0
devid="test"
cmd="echo"
params='["Hello, Rtty"]'
resp=$(curl "http://$host:$port/cmd/$devid?wait=$wait" -d "{\"cmd\":\"$cmd\", \"params\": $params, \"username\": \"$username\", \"password\": \"$password\"}" 2>/dev/null)
[ "$wait" = "0" ] && exit 0
err=$(echo "$resp" | jq -r '.err')
if [ "$err" != "null" ];
then
msg=$(echo "$resp" | jq -r '.msg')
echo "err: $err"
echo "msg: $msg"
else
code=$(echo "$resp" | jq -r '.code')
stdout=$(echo "$resp" | jq -r '.stdout' | base64 -d)
stderr=$(echo "$resp" | jq -r '.stderr' | base64 -d)
echo "code: $code"
echo "stdout: $stdout"
echo "stderr: $stderr"
fi
|
<reponame>anedyalkov/JS-Applications
function extractText() {
let result = $('li')
.toArray()
.map(x => x.textContent)
.join(', ');
$('#result').text(result);
}
|
<gh_stars>0
def sorted_squares(a)
nums = []
for i in 0..a.length-1
nums[i] = a[i]*a[i]
end
return nums.sort
end
def move_zeros(a)
nums = []
count = 0
j = 0
for i in 0..a.length-1
if a[i]==0
count+=1
else
nums[j] = a[i]
j+=1
end
end
for i in j..j+count-1
nums[i] = 0
end
return nums
end
def check_anagrams(a,b)
check_a = Hash.new
check_b = Hash.new
for i in 0..a.length-1
if check_a.key?(a[i])
check_a[a[i]] += 1
else
check_a[a[i]] = 1
end
end
for i in 0..b.length-1
if check_b.key?(b[i])
check_b[b[i]] += 1
else
check_b[b[i]] = 1
end
end
if check_a==check_b
return 1
else
return 0
end
end
def reverse(a)
s1 = ""
s2 = ""
vowels = ['a','e','i','o','u']
for i in 0..a.length-1
if vowels.include? a[i]
s1 += a[i]
else
s2 += a[i]
end
end
return s2+s1
end
def find_highest(word)
words = word.split(' ')
final_list = Hash.new
for i in 0..words.length-1
if final_list.include? words[i]
final_list[words[i]]+=1
else
final_list[words[i]] = 1
end
end
initial = ""
value_dict = 0
final_list.each do | key, values |
if values>value_dict
initial = key
value_dict = values
end
end
return initial
end
def palin(s)
if s.nil?
return true
end
return s==s.reverse
end
def group_anagrams(a)
final_list = Hash.new
i = 0
while i<a.length
final_list[a[i]] = [a[i]]
j = i+1
while j<=a.length-1
value = check_anagrams a[i],a[j]
if value == 1
final_list[a[i]].append(a[j])
a.delete_at(j)
j-=1
end
j+=1
end
i+=1
end
value = []
final_list.each do | key, values |
value.append(values)
end
return value
end
def reverse_words(string)
string = string.downcase
string = string.gsub(/\s+/m, ' ').strip.split(" ")
nums = []
for i in 0..string.length-1
a = reverse string[i]
nums[i] = a
end
nums = nums.reverse()
return nums.join(' ')
end
def highest_frequency_word(string)
string = string.downcase
string = string.gsub(/[^0-9A-Za-z ]/, '')
value = find_highest string
return value
end
def palindrome?(string)
string = string.downcase
string = string.gsub!(/[^0-9A-Za-z]/, '')
value = palin string
return value
end
class Beverage
def initialize(name, price)
raise ArgumentError, 'Argument is nil or ""' unless name != nil && name != ''
raise ArgumentError, 'Argument is nil or <0' unless price != nil && price >= 0
@name = name
@price = '%0.2f' % price
end
attr_accessor :name
attr_accessor :price
# @return [String]
def formatted_price
res = ''
res_list = Array.new
res_list = @price.to_s.split('.')
res_list = res_list.map(&:to_i)
if (res_list[0] == 0 && res_list[1] == 0) || (res_list.count == 1 && res_list[0] == 0)
res = "Free"
else
if res_list.count == 1 || (res_list.count == 2 && res_list[1] == 0)
if res_list[0] > 1
res = res_list[0].to_s + ' dollars only'
else
res = res_list[0].to_s + ' dollar only'
end
elsif res_list[0] == 0 and res_list.count == 2
if res_list[1] > 1
res = res_list[1].to_s + ' cents only'
else
res = res_list[1].to_s + ' cent only'
end
else
if res_list[0] == 0
res = ''
elsif res_list[0] > 1
res = res_list[0].to_s + ' dollars '
else
res = res_list[0].to_s + ' dollar '
end
if res_list[1] == 0
res = res
elsif res_list[1] > 1
res = res != '' ? res + 'and ' + res_list[1].to_s + ' cents only' : res + res_list[1].to_s + ' cents only'
else
res = res != '' ? res + 'and ' + res_list[1].to_s + ' cent only' : res + res_list[1].to_s + ' cent only'
end
end
end
end
end |
import sys, os, re
from trustyuri.file import FileHasher
def process(args):
filename = args[0]
with open (filename, "r") as f:
hashstr = FileHasher.make_hash(f.read())
ext = ""
base = filename
if re.search(r'.\.[A-Za-z0-9\-_]{0,20}$', filename):
ext = re.sub(r'^(.*)(\.[A-Za-z0-9\-_]{0,20})$', r'\2', filename)
base = re.sub(r'^(.*)(\.[A-Za-z0-9\-_]{0,20})$', r'\1', filename)
os.rename(filename, base + "." + hashstr + ext)
if __name__ == "__main__":
args = sys.argv
args.pop(0)
process(args) |
package com.yoavfranco.wikigame.fragments;
import android.animation.Animator;
import android.animation.ObjectAnimator;
import android.animation.TimeInterpolator;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.app.Fragment;
import android.view.View;
import android.view.animation.Animation;
import com.yoavfranco.wikigame.utils.Utils;
import static com.yoavfranco.wikigame.fragments.BaseScreen.Action.CHECK_BACK;
public abstract class BaseScreen extends Fragment {
public ScreenChanger screenChanger;
public OnClearListener onClearListener;
public static boolean disableFragmentsAnimations = false;
@Override
public void onAttach(Context context) {
super.onAttach(context);
Activity a;
if (context instanceof Activity){
a = (Activity) context;
this.screenChanger = (ScreenChanger) a;
}
}
@SuppressWarnings("deprecation")
@Override
public void onAttach(Activity activity) {
super.onAttach(activity);
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
this.screenChanger = (ScreenChanger) activity;
}
}
@Override
public void onResume() {
super.onResume();
Utils.logInfo(this, "Screen Resumed");
if (screenChanger != null)
screenChanger.onScreenChange(this, CHECK_BACK, null);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
Utils.logDebug(this, "Screen Created");
}
public abstract void clear();
public void setOnClearListener(OnClearListener onClearListener) {
this.onClearListener = onClearListener;
}
@Override
public Animator onCreateAnimator(int transit, boolean enter, int nextAnim) {
if (BaseScreen.disableFragmentsAnimations) {
ObjectAnimator fadeOut = ObjectAnimator.ofFloat(0, "alpha",
1f);
fadeOut.setDuration(0);
return fadeOut;
}
return super.onCreateAnimator(transit, enter, nextAnim);
}
public enum Action {
CHECK_BACK,
QUICKPLAY,
LEVELS,
CHALLENGES,
SHOP,
HELP,
LEADERBOARDS,
FRIENDS,
CHOOSE_MODE_PRACTICE,
CHOOSE_MODE_CHALLENGE,
CHALLENGE_QUICKPLAY,
SETTINGS, LEVEL_CLICK
}
public interface OnClearListener {
void clearDone();
}
public interface ScreenChanger {
void onScreenChange(BaseScreen screen, Action action, Intent extra);
}
}
|
<reponame>adligo/models_core.adligo.org
package org.adligo.models.core.shared;
/**
* this is for i118 in GWT or other java apps (which would look up the users language from the local RAM memory cache)
*
* @author scott
*
*/
public interface I_ModelsCoreConstants {
public String getAddressEmptyCityError();
public String getAddressEmptyStreetError();
public String getAddressEmptyPostalError();
public String getAddressEmptyCountryError();
public String getAddressCountryCodeWrongSizeError();
public String getAddressEmptySubCodeError();
public String getAddressSubCodeTwoBigError();
public String getDomainNameEmptyError();
public String getDomainNameNoDotError();
public String getDomainNameToShortError();
public String getDomainNameNoSpaceError();
public String getDomainNameDotCantBeFirst();
public String getDomainNameDotCantBeLast();
public String getDomainNameDotCantBeConsecutive();
public String getEmailAddressEmptyError();
public String getEmailAddressNoAtError();
public String getEmailAddressNoUserError();
public String getEmailAddressNoDomainError();
public String getEmailAddressToShortError();
public String getEmailAddressNoSpaceError();
public String getEmaiAddressBadDomainError();
public String getEMailRequiresAFromAddress();
public String getEMailRequiresAValidAddress();
public String getEMailRequiresAValidFromAddress();
public String getEMailRequiresADestAddress();
public String getEMailRequiresANonNullAttachemt();
public String getEMailRequiresAValidAttachemt();
public String getOrgEmptyNameError();
public String getOrgEmptyTypeError();
public String getPersonNoNameError();
public String getPersonNoFirstNameError();
public String getPersonNoMiddleNameError();
public String getPersonNoLastNameError();
public String getPersonNoNickNameError();
public String getPersonMustBeAKnownGenderType();
public String getPhoneEmptyError();
public String getPhoneInvalidCharacterError();
public String getUserNoUserNameMessage();
public String getUserNoSpaceInNameMessage();
public String getUserNoEmptyDomainMessage();
public String getUserNoEmptyPasswordMessage();
public String getUserGroupEmptyRoleError();
public String getUserRelationsEmptyGroupError();
public String getUserRelationsEmptyRoleError();
public String getEndOfDateRangeMustBeAfterStart();
public String getStartOfDateRangeMustBeBeforeEnd();
public String getDateRangeRequiresStartValue();
public String getDateRangeRequiresEndValue();
}
|
<reponame>junaid1460/native-script-template
import { Component } from "@angular/core";
@Component({
selector: "my-app",
template: `
<ActionBar title="Hello" class="action-bar"></ActionBar>
<StackLayout>
<TextField hint="type something" KeyboardType="email" height="50px" [(ngModel)]="email"></TextField>
<Button text="helloword" height="50px" (tap)="log()"></Button>
<Label text="{{ email }}"></Label>
</StackLayout>
`
,
styles: [`
StackLayout{
padding:10px;
}
`]
})
export class AppComponent {
// Your TypeScript logic goes here
email:any
log(){
alert(this.email);
}
}
|
<reponame>claudefauconnet/devisu<filename>deVisu2c/src/com/fauconnet/transform/LdiTransform.java
package com.fauconnet.transform;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.fauconnet.devisu.MongoProxy;
import com.mongodb.BasicDBObject;
import com.mongodb.DBObject;
public class LdiTransform extends Object {
MongoProxy proxy;
public static void main(String[] args) {
String idCol = "NUM_PROD";
String[][] columnsToPivot = new String[][] { new String[] {"NUM_NOM_COMP", "POURCENT_COMP_PROD"} };
LdiTransform transformer = new LdiTransform();
try {
transformer.proxy = new MongoProxy("localhost", 27017, "LDI");
List<DBObject> newItems = transformer.pivot("LDI","produits", null, idCol, Arrays.asList(columnsToPivot));
for (DBObject newItem : newItems) {
System.out.println(newItem);
// transformer.proxy.insert("produits-pivot", newItem);
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public List<DBObject> pivot(String database,String collection, DBObject query, String idCol, List<String[]> columnsToPivot) throws Exception {
if (query == null)
query = new BasicDBObject();
List<DBObject> data = proxy.getDocuments(collection, query, -1);
List<String> ids = new ArrayList<String>();
List<DBObject> newItems = new ArrayList<DBObject>();
int index = -1;
for (DBObject item : data) {
String idVal = "" + item.get(idCol);
if ((index = ids.indexOf(idVal)) < 0) {
ids.add(idVal);
DBObject newItem = new BasicDBObject(idCol, idVal);
for (String[] colCouple : columnsToPivot) {
String key=item.get(colCouple[0]).toString();
if(key==null)
continue;
Object value=item.get(colCouple[1]);
newItem.put(idCol+"_"+ key,value);
}
newItems.add(newItem);
} else {
DBObject newItem = newItems.get(index);
for (String[] colCouple : columnsToPivot) {
String key=item.get(colCouple[0]).toString();
if(key==null)
continue;
Object value=item.get(colCouple[1]);
newItem.put(idCol+"_"+ key,value);
}
}
}
return newItems;
}
}
|
<gh_stars>1-10
$(function () {
/* Ratio chart for FASTA match/total ratio. */
function createFastaCharts(parent, match, total) {
var container = $('<div class="subresult"><h4>Rule match/mismatch ratio</h4></div>');
parent.after(container);
/* Create bar chart. */
var data = [ {'key':'Match', 'frequency':match/total},
{'key':'Mismatch', 'frequency': 1.0 - match/total}
];
jQuery.ratioChart(container, data);
}
jQuery.fastaTable = function fastaTable(table, task) {
/* Hook to FASTA results only. */
if (!table.hasClass('result_fasta')) {
return;
}
/* Ratio chart with match/mismatch ratio. */
createFastaCharts(table, task.result[0][1], task.result[0][2]);
}
});
|
package controller
import (
"github.com/gin-gonic/gin"
"github.com/keptn/keptn/shipyard-controller/handler"
)
type EvaluationController struct {
EvaluationHandler handler.IEvaluationHandler
}
func NewEvaluationController(evaluationHandler handler.IEvaluationHandler) *EvaluationController {
return &EvaluationController{EvaluationHandler: evaluationHandler}
}
func (controller EvaluationController) Inject(apiGroup *gin.RouterGroup) {
apiGroup.POST("/project/:project/stage/:stage/service/:service/evaluation", controller.EvaluationHandler.CreateEvaluation)
}
|
<filename>Framework/src/widget/AllAppView.ts
import { ui } from "../ui/layaMaxUI";
import AppView from "./AppView";
import Log from "../utils/Log";
import { Apps } from "../utils/OtherApps";
export default class AllAppView extends ui.widget.AllAppViewUI {
constructor() {
super();
this.iconLayout.hScrollBarSkin = null;
this.iconLayout.itemRender = AppView;
this.iconLayout.renderHandler = new Laya.Handler(this, this.updateItem);
this.iconLayout.array = [];
//只要用户手指划动,就不再移动
this.iconLayout.on(Laya.Event.MOUSE_MOVE, this, this.mouseMove);
}
public static getSelf(): AllAppView {
let self: AllAppView = Laya.Pool.getItemByClass("AllAppView", AllAppView);
self.changeApps();
return self;
}
private updateItem(cell, index: number) {
cell.setData(cell.dataSource);
}
private mouseMove() {
Log.d("用户手指划过");
Laya.timer.clearAll(this);
}
public changeApps(): void {
this.iconLayout.scrollBar.value = 0;
if (this.iconLayout.array == null || this.iconLayout.array.length == 0) {
let arr = [].concat(Apps.icons);
for (let i = 0; i < 50; i++) {
arr = arr.concat(Apps.icons);
}
this.iconLayout.array = arr;
}
Laya.timer.frameLoop(1, this, this.moveSelect);
}
private moveSelect(): void {
this.iconLayout.scrollBar.value += 1;
}
onEnable() {
this.changeApps();
}
onDisable() {
Laya.timer.clearAll(this);
if (!this.destroyed) {
Laya.Pool.recover("AllAppView", this);
}
}
} |
/*
* Copyright (c) 2008, <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name "TwelveMonkeys" nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.twelvemonkeys.image;
import java.awt.image.ReplicateScaleFilter;
/**
* An {@code ImageFilter} class for subsampling images.
* <p/>
* It is meant to be used in conjunction with a {@code FilteredImageSource}
* object to produce subsampled versions of existing images.
*
* @see java.awt.image.FilteredImageSource
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @version $Id: //depot/branches/personal/haraldk/twelvemonkeys/release-2/twelvemonkeys-core/src/main/java/com/twelvemonkeys/image/SubsamplingFilter.java#1 $
*/
public class SubsamplingFilter extends ReplicateScaleFilter {
private int xSub;
private int ySub;
/**
* Creates a {@code SubsamplingFilter}.
*
* @param pXSub
* @param pYSub
*
* @throws IllegalArgumentException if {@code pXSub} or {@code pYSub} is
* less than 1.
*/
public SubsamplingFilter(int pXSub, int pYSub) {
super(1, 1); // These are NOT REAL values, but we have to defer setting
// until w/h is available, in setDimensions below
if (pXSub < 1 || pYSub < 1) {
throw new IllegalArgumentException("Subsampling factors must be positive.");
}
xSub = pXSub;
ySub = pYSub;
}
/** {@code ImageFilter} implementation, do not invoke. */
public void setDimensions(int pWidth, int pHeight) {
destWidth = (pWidth + xSub - 1) / xSub;
destHeight = (pHeight + ySub - 1) / ySub;
//System.out.println("Subsampling: " + xSub + "," + ySub + "-> " + destWidth + ", " + destHeight);
super.setDimensions(pWidth, pHeight);
}
}
|
#!/bin/sh
# Example Usage:
# ./run.sh <IMAGE> <AC_HOST> <AC_PORT> <LOG_LEVEL>
# ./run.sh libra_mint:latest ac.dev.aws.hlw3truzy4ls.com 80 info
set -ex
IMAGE="${1:-libra_mint:latest}"
AC_HOST="${2:-172.18.0.13}"
AC_PORT="${3:-8000}"
LOG_LEVEL="${4:-info}"
CONFIGDIR="$(dirname "$0")/../../terraform/validator-sets/dev"
docker network create --subnet 172.18.0.0/24 testnet || true
docker run -e AC_HOST="$AC_HOST" -e AC_PORT="$AC_PORT" -e LOG_LEVEL="$LOG_LEVEL" \
-v "$PWD/$CONFIGDIR/consensus_peers.config.toml:/opt/libra/etc/consensus_peers.config.toml" \
-e MINT_KEY="$(base64 $CONFIGDIR/mint.key)" \
--network testnet --publish 8080:8000 "$IMAGE"
|
<gh_stars>0
import { NextApiRequest, NextApiResponse } from 'next';
import { apiHandler } from 'lib-server/nc';
import prisma from 'lib-server/prisma';
const { SeedSingleton } = require('../../../prisma/seed.js');
const handler = apiHandler();
handler.post(async (req: NextApiRequest, res: NextApiResponse) => {
SeedSingleton.getInstance(prisma).run();
res.status(200).json({ sucess: true });
});
export default handler;
|
#!/bin/bash
mkpassword() {
echo "import crypt; print(crypt.crypt('$1', 'hoge'))" | python
}
cat ./startup.bash.tpl |
sed "s!{{public_key}}!$(cat $1)!" |
sed "s/{{password}}/$(mkpassword $2)/"
|
use crypto_markets::fetch_markets;
use crypto_market_type::MarketType;
use serde_json;
fn fetch_and_serialize_markets(exchange: &str, market_type: MarketType) -> Result<String, String> {
match fetch_markets(exchange, market_type) {
Ok(markets) => {
if markets.is_empty() {
Err("No markets available for the specified exchange and market type".to_string())
} else {
match serde_json::to_string_pretty(&markets) {
Ok(json_string) => Ok(json_string),
Err(err) => Err(format!("Failed to serialize market data: {}", err)),
}
}
}
Err(err) => Err(format!("Failed to fetch market data: {}", err)),
}
}
fn main() {
match fetch_and_serialize_markets("binance", MarketType::Spot) {
Ok(json_string) => println!("{}", json_string),
Err(err) => eprintln!("Error: {}", err),
}
} |
<reponame>atomiccoders/adverts-scrapper<filename>recursive-scraper.js
const { sendTest, sendMessage } = require("./sms-api");
const puppeteer = require("puppeteer");
(async () => {
// Extract items on the page, recursively check the next page in the URL pattern
const extractItems = async url => {
// Scrape the data we want
const page = await browser.newPage();
await page.goto(url);
console.log(`Scrapping: ${url}`);
const itemsOnPage = await page.evaluate(() => {
let searchParam = document.querySelector('input#search-text').value;
if (searchParam !== 'Szukaj...')
return Array.from(document.querySelectorAll("td.offer:not(.promoted) table")).map(offer => ({
id: offer.getAttribute('data-id'),
title: offer.querySelector("h3 a strong").innerText.trim(),
logo: offer.querySelector("a.thumb img") !== null ? offer.querySelector("a.thumb img").src : '',
link: offer.querySelector("h3 a").href,
price: offer.querySelector('p.price strong') ? offer.querySelector('p.price strong').innerText : 'brak',
localization: offer.querySelector('.bottom-cell small:first-child span').innerText,
date: offer.querySelector('.bottom-cell small:last-child span').innerText
}))
else
return [];
});
const searchFlag = await page.evaluate(() => document.querySelector('input#search-text').value);
await page.close();
// Recursively scrape the next page
if (searchFlag === 'Szukaj...') {
console.log(`Terminate scrapping on: ${url}`);
// Terminate if no items exist
return itemsOnPage
} else {
// Go fetch the next page ?page=X+1
const nextPageNumber = parseInt(url.match(/page=(\d+)$/)[1], 10) + 1;
const nextUrl = `${baseUrl}/${city}/q-${query}/?page=${nextPageNumber}`;
return itemsOnPage.concat(await extractItems(nextUrl))
}
};
const browser = await puppeteer.launch();
const baseUrl = "http://olx.pl/";
const city = "gdansk";
let query = "ponton";
query = query.replace(" ", "-");
const firstUrl = `${baseUrl}/${city}/q-${query}/?page=1`;
const items = await extractItems(firstUrl);
// Todo: Update database with items
console.log(items.length, items[items.length - 1]);
const newAds = items.length;
const message = {
from: "Scrapper",
to: "883543667",
text: `There are ${newAds} new ads found. A message with details has been sent to your email.`
};
sendTest(message);
// sendMessage(message);
await browser.close();
})(); |
// pages/webView/webView.js
Page({
/**
* 页面的初始数据
*/
data: {
url:''
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
if(options.toptype){
this.setData({
url:decodeURIComponent(options.url)+'?topType='+ options.toptype+'&type=xcx'
})
}else if(options.infoId){
this.setData({
url:decodeURIComponent(options.url)+'?infoId='+ options.infoId+'&type=xcx'
})
}else{
this.setData({
url:decodeURIComponent(options.url)+'?type=xcx'
})
}
},
bindGetMsg(e) {
console.log('webview:',e.detail)
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
return {
title: '加盟好餐饮,就找餐盟严选!',
}
},
}) |
#!/bin/bash
#SBATCH -J Act_cube_1
#SBATCH --mail-user=eger@ukp.informatik.tu-darmstadt.de
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=2000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/progs/meta.py cube 1 sgd 3 0.4202271152785716 125 0.010677356940572693 glorot_normal PE-infersent
|
#!/bin/bash
# Generate `.json` test files.
# Usage: `./scripts/autofix-tests.sh [<syntax name>]`.
# Example: `./scripts/autofix-tests.sh scss`.
printf "\n\
------------------\n\
Generating tests\n\
------------------"
syntaxes=${@:-css less sass scss}
failed_tests=()
for syntax in $syntaxes; do
printf "\n\nSyntax: $syntax\n"
files=$(find ./test/$syntax -name "*.$syntax")
for file in $files; do
context=${file#*/*/*/}
context=${context%/*}
./bin/gonzales.js -c $context --silent $file > ${file%.*}.json
if [ $? -ne 0 ]; then
failed_tests+=($file)
# :(
git checkout -- ${file%.*}.json
printf "x"
else
printf "."
fi
done
done
printf "\n"
ft=${#failed_tests[@]}
if [ $ft -ne 0 ]; then
printf "\nFailed to parse following files:\n"
for (( i=0; i<$ft; i++ )); do
printf "${failed_tests[$i]}\n"
done
fi
|
#ifdef SKY_LIGHT
Skylight skylight = read_skylight();
diffuse += light_hemisphere(skylight.direction, skylight.ground, skylight.color, fragment);
#endif
#if defined LIGHTMAP
vec4 lightmap = sample_material_texture(s_lightmap, fragment.uv2);
//diffuse += lightmap.rgb;
diffuse += decodeHDR(lightmap) * PI;
#if defined GI_CONETRACE
specular = trace_specular(s_gi_probe, cone.pos, cone.refl, material.roughness * material.roughness, u_gi_probe_bias) * u_gi_probe_specular;
#endif
#elif defined GI_CONETRACE
ConeStart cone = cone_start(fragment.position, fragment.normal);
trace_gi_probe(cone, material.roughness, diffuse, specular);
//gl_FragColor = vec4(gi_probe_debug(fragment.position, 0.0), 1.0);
//gl_FragColor = vec4(debug_trace_diffuse(s_gi_probe, mul(u_gi_probe_transform, vec4(fragment.normal, 0.0)).xyz), 1.0);
//return;
#endif
#ifdef AMBIENT_OCCLUSION
diffuse *= matlit.ao;
#endif
//apply_reflections(specular, ambient);
|
def bubble_sort(List):
n = len(List)
for i in range(n):
for j in range(0, n-i-1):
if List[j] > List[j+1] :
List[j], List[j+1] = List[j+1], List[j]
return List
bubble_sort(List) # [3, 5, 6, 8, 10, 12, 15] |
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
# prepare-for-build.sh - prepare the Docker image for the build
#
set -e
PREFIX=/usr
function sudo_password() {
echo $USERPASS | sudo -Sk $*
}
# this should be run only on CIs
if [ "$CI_RUN" == "YES" ]; then
sudo_password chown -R $(id -u).$(id -g) $WORKDIR
fi || true
|
#!/bin/bash -e
while [ "$1" ]; do
case "$1" in
--compress)
COMPRESS_OPTION="--compress"
;;
--obfuscate)
OBFUSCATE_OPTION="--obfuscate"
;;
*)
echo Unknown option: $1
exit 1
;;
esac
shift
done
cd `dirname $0`
CURDIR=`pwd`
case `uname` in
*CYGWIN*) CURDIR=`cygpath -w "$CURDIR"`;;
esac
CACHE=$CURDIR/../build
#CDN="echo server.js cdn-cli"
CDN="$CURDIR/../server.js cdn-cli -s standalone --server-config standalone --server-settings standalone --version=standalone --cache $CACHE $COMPRESS_OPTION $OBFUSCATE_OPTION"
# build async loaded ace modules
$CDN --module ace
WORKER=plugins/c9.ide.language.core/worker
echo building worker $WORKER
$CDN --worker $WORKER
echo $CDN --worker $WORKER
for CONFIG in "default"; do \
echo cdn
echo building config $CONFIG
$CDN --config $CONFIG --with-skins
done
|
<gh_stars>0
var searchData=
[
['vcdgen_2eh_1009',['vcdgen.h',['../vcdgen_8h.html',1,'']]]
];
|
#!/usr/bin/env bash
# Set default variables
QT_VERSION="${QT_VERSION:-5.15.0}"
QT_PATH="${QT_PATH:-/opt/Qt}"
QT_HOST="${QT_HOST:-linux}"
QT_TARGET="${QT_TARGET:-desktop}"
QT_MODULES="${QT_MODULES:-qtcharts}"
# Exit immediately if a command exits with a non-zero status
set -e
apt update
apt install python3 python3-pip -y
pip3 install aqtinstall
aqt install --outputdir ${QT_PATH} ${QT_VERSION} ${QT_HOST} ${QT_TARGET} -m ${QT_MODULES}
echo "Remember to export the following to your PATH: ${QT_PATH}/${QT_VERSION}/*/bin"
echo "export PATH=$(readlink -e ${QT_PATH}/${QT_VERSION}/*/bin/):PATH"
|
/**
* Copyright (c) 2001-2017 <NAME> and Robocode contributors
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://robocode.sourceforge.net/license/epl-v10.html
*/
package tested.robots;
/**
* @author <NAME> (original)
*/
public class RadarTurnRateAndSetAdjust extends robocode.AdvancedRobot {
public void run() {
// -- Turn 1 --
setTurnRadarRight(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. radar turn rate = 45
// -- Turn 2 --
setTurnGunRight(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. gun + radar turn rate = 20 + 45 = 65
// -- Turn 3 --
setTurnRight(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. robot + gun + radar turn rate = 10 + 20 + 45 = 75
// -- Turn 4 --
setTurnRadarLeft(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. robot + gun - radar turn rate = 10 + 20 - 45 = -15
// -- Turn 5 --
setTurnGunLeft(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. robot + gun - radar turn rate = 10 - 20 - 45 = -55
// -- Turn 6 --
setTurnLeft(1000);
executeAndDumpTurnRate();
// Expected turn rate: max. robot + gun - radar turn rate = -10 - 20 - 45 = -75
// -- Turn 7 --
setAdjustRadarForGunTurn(false);
setTurnRight(14);
setTurnGunRight(15);
setTurnRadarRight(7);
executeAndDumpTurnRate();
// Expected turn rate: robot + gun + radar turn rate = 14 + 15 + 7 = 32
// -- Turn 8 --
setAdjustGunForRobotTurn(false);
setAdjustRadarForRobotTurn(false);
setAdjustRadarForGunTurn(true);
setTurnRight(14);
setTurnGunLeft(15);
setTurnRadarRight(7);
executeAndDumpTurnRate();
// Expected turn rate: robot (max) + radar turn rate (ignoring gun turn rate, but not robot turn rate) = 10 + 7 = 17
// -- Turn 9 --
setAdjustGunForRobotTurn(false);
setAdjustRadarForRobotTurn(true);
setAdjustRadarForGunTurn(true);
setTurnRight(14);
setTurnGunLeft(15);
setTurnRadarRight(35);
executeAndDumpTurnRate();
// Expected turn rate: robot turn rate (ignoring both gun and body turn rate) = 35
// -- Turn 10 --
setAdjustGunForRobotTurn(false);
setAdjustRadarForRobotTurn(false);
setAdjustRadarForGunTurn(true);
setTurnRight(14);
setTurnGunLeft(15);
setTurnRadarLeft(7);
executeAndDumpTurnRate();
// Expected turn rate: robot (max) + radar turn rate (ignoring gun turn rate, but not robot turn rate) = 10 - 7 = 3
// -- Turn 11 --
setAdjustGunForRobotTurn(false);
setAdjustRadarForRobotTurn(true);
setAdjustRadarForGunTurn(true);
setTurnRight(4);
setTurnGunRight(60);
setTurnRadarLeft(100);
executeAndDumpTurnRate();
// Expected turn rate: robot (max) turn rate (ignoring both gun and body turn rate) = -20
// -- Turn 12 --
setAdjustGunForRobotTurn(false);
setAdjustRadarForRobotTurn(false);
setAdjustRadarForGunTurn(true);
setTurnRight(Double.POSITIVE_INFINITY);
setTurnGunRight(Double.POSITIVE_INFINITY);
setTurnRadarRight(Double.POSITIVE_INFINITY);
executeAndDumpTurnRate();
// Expected turn rate: setAdjusts are all ignored, max. robot + gun + radar turn rate = 10 + 20 + 45 = 75
// -- Turn 13 --
setAdjustGunForRobotTurn(true);
setAdjustRadarForRobotTurn(false);
setAdjustRadarForGunTurn(true);
setTurnRight(Double.NEGATIVE_INFINITY);
setTurnGunRight(Double.NEGATIVE_INFINITY);
setTurnRadarRight(Double.NEGATIVE_INFINITY);
executeAndDumpTurnRate();
// Expected turn rate: setAdjusts are all ignored, max. robot + gun + radar turn rate = -10 - 20 - 45 = -75
// -- Turn 14 --
setAdjustGunForRobotTurn(true);
setAdjustRadarForRobotTurn(true);
setAdjustRadarForGunTurn(true);
setTurnLeft(Double.NEGATIVE_INFINITY);
setTurnGunLeft(Double.NEGATIVE_INFINITY);
setTurnRadarLeft(Double.POSITIVE_INFINITY);
executeAndDumpTurnRate();
// Expected turn rate: setAdjusts are all ignored, max. robot + gun + radar turn rate = -(-10) - (-20) - 45 = -15
}
private void executeAndDumpTurnRate() {
double lastHeading = getRadarHeading();
System.out.println(getHeading());
System.out.println(lastHeading);
execute();
System.out.println(getRadarHeading());
double turnRate = robocode.util.Utils.normalRelativeAngleDegrees(getRadarHeading() - lastHeading);
out.println(getTime() + ": " + turnRate);
}
}
|
listStrings= ["I", "am", "so", "happy"]
longestStringLen = 0
for string in listStrings:
if (len(string) > longestStringLen):
longestStringLen = len(string)
print(longestStringLen) |
package org.ds.chronos.streams;
import java.util.Iterator;
import org.ds.chronos.api.Temporal;
public abstract class TemporalIterator<T extends Temporal> implements Iterator<T>, Temporal {
private final long timestamp;
public TemporalIterator(long timestamp) {
this.timestamp = timestamp;
}
public long getTimestamp() {
return timestamp;
}
}
|
<gh_stars>0
#################################################################
# <NAME>, PhD - Imperial College London, 15/09/2020 #
#################################################################
import os
import numpy as np
from numpy import array
import pandas as pd
import math
import glob
#from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
#import matplotlib.pyplot as plt
# Theta in degrees
def cart2cylc(x, y, z):
r = np.sqrt(np.power(x,2)+np.power(y,2))
t = math.atan(y/x)*(180/math.pi)
z = z
coord = [r,t,z]
return coord
# Theta in degrees
def cart2sph(x, y, z):
r = math.sqrt(np.power(x,2)+np.power(y,2)+np.power(z,2))
t = math.acos(z/r)*(180/math.pi)
p = math.atan(y/x)*(180/math.pi)
coord = [r,t,p]
return coord
def unitvar(x, y, z):
u = (1/math.sqrt(3))*(x+y+z)
v = (1/math.sqrt(6))*(x+y-(2*z))
w = (1/math.sqrt(2))*(x-y)
coord = [u, v, w]
return coord
# Strain computation
def etens(m_cyl):
b = m_cyl[:,:3].T
a = m_cyl[:,3:6].T
X = np.dot(b,b.T)
X = np.linalg.pinv(X, rcond = 1e-21)
FF = np.dot(a,np.dot(b.T,X))
ident = np.eye(3)
def l1l2(mc1, mc2):
a_n = mc2.T
b_n = mc1.T
X_n = np.dot(b_n,b_n.T)
X_n = np.linalg.pinv(X_n, rcond = 1e-21)
FF_n = np.dot(a_n,np.dot(b_n.T,X_n))
GG_n = np.linalg.pinv(FF_n, rcond = 1e-21)
lambda_F = np.sqrt(abs(np.linalg.eigvals(FF_n)))
lambda_G = np.sqrt(abs(np.linalg.eigvals(GG_n)))
lambdabind = np.concatenate([lambda_F, lambda_G], axis=0)
return lambdabind
l1 = l1l2(m_cyl[:,:2], m_cyl[:,3:5]) # Calculate the eigenvalues of the 2D deformation (x,y) in the principal direction
E_L = 0.5*((np.dot(FF.T,FF))-ident) # Lagrangian strain
E_E = 0.5*(ident-(np.dot(np.linalg.pinv(FF.T),np.linalg.pinv(FF))))
E_L[0,0] = ((1/(l1[0]*l1[1]))-(1/(l1[2]*l1[3])))*0.5 # Correct radial principal strain
E_L = E_L.flatten()
E_E = E_E.flatten()
strain = np.concatenate([E_L, E_E], axis=0)
return strain
path_data = "/mnt/storage/home/mthanaj/cardiac/UKBB_40616/UKBB_test/4DSegment2.0_test_motion_final"
folder = os.listdir(path_data)
for iP in range(0,10):
file = os.path.join(os.path.join(path_data,folder[iP],"motion"))
os.chdir(file)
txt_files = array(glob.glob("*.txt"))
files = txt_files[0:100]
ir=1
for iF in range(0,50):
# Step 1 - Call epi and endo, project orthogonally onto the unit variable and bind
os.chdir(file)
print(file)
EDendo = pd.read_csv(files[0,], sep=" ", header=None)
EDendo.columns = ["x", "y", "z"]
EDepi = pd.read_csv(files[50,], sep=" ", header=None)
EDepi.columns = ["x", "y", "z"]
EDepi_data = array(unitvar(EDepi.iloc[:,0],EDepi.iloc[:,1],EDepi.iloc[:,2])).T
EDendo_data = array(unitvar(EDendo.iloc[:,0],EDendo.iloc[:,1],EDendo.iloc[:,2])).T
ED_data = np.concatenate([EDepi_data, EDendo_data], axis=0)
ED_data= pd.DataFrame(ED_data, columns=["x", "y", "z"])
ESendo = pd.read_csv(files[iF,], sep=" ", header=None)
ESendo.columns = ["x", "y", "z"]
ESepi = pd.read_csv(files[iF+50,], sep=" ", header=None)
ESepi.columns = ["x", "y", "z"]
ESepi_data = array(unitvar(ESepi.iloc[:,0],ESepi.iloc[:,1],ESepi.iloc[:,2])).T
ESendo_data = array(unitvar(ESendo.iloc[:,0],ESendo.iloc[:,1],ESendo.iloc[:,2])).T
ES_data = np.concatenate([ESepi_data, ESendo_data], axis=0)
ES_data= pd.DataFrame(ES_data, columns=["x", "y", "z"])
path_strain = "/mnt/storage/home/mthanaj/cardiac/Experiments_of_Maria/3Dstrain_analysis"
file_strain = os.path.join(os.path.join(path_strain,folder[iP]))
os.chdir(file_strain)
print(file_strain)
# Step 2 - Find ~50 knn in epi that match with endo for both ED and ES
nbrs1 = NearestNeighbors(n_neighbors=50, algorithm='auto').fit(EDendo_data, EDepi_data) # to check!!
distances_ed, con_ed_epi = nbrs1.kneighbors(EDepi_data)
nbrs2 = NearestNeighbors(n_neighbors=50, algorithm='auto').fit(ESendo_data, ESepi_data)
distances_es, con_es_epi = nbrs2.kneighbors(ESepi_data)
print(files[iF,])
# Step 3 - Compute middle surface
mid_ed = (EDendo_data[con_ed_epi[:,0],:]+EDepi_data)/2
mid_es = (ESendo_data[con_es_epi[:,0],:]+ESepi_data)/2
for iEx in range(0,49):
mid_ed = (mid_ed+(EDendo_data[con_ed_epi[:,iEx],:]+EDepi_data)/2)/2 # to check!!
mid_es = (mid_es+(ESendo_data[con_es_epi[:,iEx],:]+ESepi_data)/2)/2
continue
mid_ed = pd.DataFrame(mid_ed, columns=["x","y","z"],)
mid_es = pd.DataFrame(mid_es, columns=["x","y","z"],)
ED_all = pd.concat([ED_data, mid_ed],axis=0)
ES_all = pd.concat([ES_data, mid_es],axis=0)
# Step 4 - Transform ED_all from cartesian to cylindrical coordinates
ED_datan = np.zeros((len(ED_all.iloc[:,0]),3))
ES_datan = np.zeros((len(ES_all.iloc[:,0]),3))
for iE in range (0,(len(ED_all.iloc[:,0]))):
ED_datan[iE,:] = array(cart2cylc(ED_all.iloc[iE,0],ED_all.iloc[iE,1],ED_all.iloc[iE,2]))
ES_datan[iE,:] = array(cart2cylc(ES_all.iloc[iE,0],ES_all.iloc[iE,1],ES_all.iloc[iE,2]))
continue
ED_epi = ED_datan[0:(len(EDepi_data[:,0])),:]
ED_endo = ED_datan[(len(EDepi_data[:,0])):(len(ED_data.iloc[:,0])),:]
ES_epi = ES_datan[0:(len(ESepi_data[:,0])),:]
ES_endo = ES_datan[(len(ESepi_data[:,0])):(len(ES_data.iloc[:,0])),:]
mid_edn = ED_datan[(len(ED_data.iloc[:,0])):(len(ED_all.iloc[:,0])),:]
mid_esn = ES_datan[(len(ES_data.iloc[:,0])):(len(ES_all.iloc[:,0])),:]
# Step 5 - Compute 1% of all data points in LV and find the 1% knn in ED and ES surface get the knns only for the middle surface
sc_1 = round(1*len(ED_all.iloc[:,0])/100)
# find the sc_1 knn in middle surface for ES
nPoints = np.arange(len(ED_epi[:,0]))
nbrs3 = NearestNeighbors(n_neighbors=sc_1, algorithm='auto').fit(ES_datan[0:(len(ES_data.iloc[:,0])),:], mid_esn)
distances_esm, con_es = nbrs3.kneighbors(mid_esn)
attach_str = np.zeros((len(EDepi_data[:,0]),18))
for iN in range(0,(len(EDepi_data[:,0]))):
diff_ed = abs(mid_edn[iN,:] - ED_datan[con_es[iN,:],:])
diff_es = abs(mid_esn[iN,:] - ES_datan[con_es[iN,:],:])
# Step 6 - create an mxlist with nPoint=nrow(mid_surf) and 1x6rows-1:sc_1columns for each point
mxlist = np.concatenate([diff_ed,diff_es], axis =1)
# Step 6 - Compute strain
attach_str[iN,:] = etens(mxlist)
continue
# Get only the surface with the cartesian coordinates
attach_ed = EDepi_data
attach_es = ESepi_data
#attach_str_new = np.zeros((len(EDepi_data[:,0]),len(attach_str[0,:])))
#for iN in range(0,len(attach_str[0,:])):
#kde = gaussian_kde(attach_str[:,0], bw_method=0.2)
#attach_str_n = kde.evaluate(nPoints)
# continue
neopheno = np.concatenate([ED_epi, ES_epi, attach_ed, attach_es, attach_str], axis=1)
neopheno = pd.DataFrame(neopheno)
neopheno.columns = ["EDx", "EDy", "EDz","ESx", "ESy", "ESz","EDcx", "EDcy", "EDcz","EScx", "EScy", "EScz",
"ELRR","ELRT","ELRZ","ELTR","ELTT","ELTZ","ELZR","ELZT","ELZZ","EERR","EERT","EERZ","EETR",
"EETT","EETZ","EEZR","EEZT","EEZZ"]
neopheno.to_csv(os.path.join("middle_atlas/neopheno_"+str(ir)+".txt"), index=False, header=True)
#plt.plot(nPoints, attach_str[:,0])
#if not os.path.exists("images"):
# os.mkdir("images")
#plt.savefig("images/Err.png")
print(ir)
ir+=1
continue
continue
"""
# plot the full ES mesh
Data = ES_all
import plotly.graph_objects as go
ax = dict(title_text = "",
showgrid = False,
zeroline = False,
showline = False,
showticklabels = False,
showbackground= False)
fig = go.Figure(data=[go.Scatter3d(
x=Data.x, y=Data.y, z=Data.z,
mode='markers',
marker=dict(
size=0.6,cauto=False,
color="#990000"))])
fig.show()
#if not os.path.exists("images"):
# os.mkdir("images")
fig.write_html("images/plot_mid.html", auto_open=True)
"""
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SCRIPT_PATH=${BR2_EXTERNAL_OPENCS18_PATH}/board/opencs18/v1
echo Building production startup script
${DIR}/gen-bootscript.sh
echo Building dev startup script
${DIR}/gen-bootscript.sh ${SCRIPT_PATH}/dev-usb-bootscript.raw ${BINARIES_DIR}/dev-recovery.scr
echo Building NFS startup script
# the following substitution doesn't work yet. #ip and nfsroot is hardcoded
cp $SCRIPT_PATH/nfs-bootscript.tmpl $SCRIPT_PATH/nfs-bootscript.raw
sed -i "s/<ip>/${BR2_PACKAGE_OPENCS18_NFS_TARGET_IP_ADDRESS}/g" $SCRIPT_PATH/nfs-bootscript.raw
sed -i "s/<nfsroot>/${BR2_PACKAGE_OPENCS18_NFS_TARGET_IP_ADDRESS}/g" $SCRIPT_PATH/nfs-bootscript.raw
${DIR}/gen-bootscript.sh ${SCRIPT_PATH}/nfs-bootscript.raw ${BINARIES_DIR}/nfs-recovery.scr
#grep -q "GADGET_SERIAL" "${TARGET_DIR}/etc/inittab" \
# || echo '/dev/ttyGS0::respawn:/sbin/getty -L /dev/ttyGS0 0 vt100 # GADGET_SERIAL' >> "${TARGET_DIR}/etc/inittab"
#grep -q "ubi0:persist" "${TARGET_DIR}/etc/fstab" \
# || echo 'ubi0:persist /root ubifs defaults 0 0' >> "${TARGET_DIR}/etc/fstab"
|
/*
* Copyright (c) 2018 https://www.reactivedesignpatterns.com/
*
* Copyright (c) 2018 https://rdp.reactiveplatform.xyz/
*
*/
package chapter03;
// 代码清单3-5
// Referential transparency: allowing substitution of precomputed values
// #snip
public class Rooter {
private final double value;
private Double root = null;
public Rooter(double value) {
this.value = value;
}
public double getValue() {
return value;
}
public double getRoot() {
if (root == null) {
root = Math.sqrt(value);
}
return root;
}
}
// #snip
|
#!/bin/bash
# du -sBk target and sort the result
target='.'
[ -n "$1" ] && target="$1"
#find "$target" -maxdepth 1 -exec du -sh '{}' \;|sort -nr|column -t
find "$target" -maxdepth 1 -exec du -s '{}' \;|sort -nr|
awk 'BEGIN{ G=1024*1024;M=1024 }
{
if($1>G)
printf("%5.0f%-10s %s\n",$1/G,"G",$2)
else if ($1>M)
{ printf("%5.0f%-10s %s\n",$1/M,"M",$2) }
else
#右对齐,占5格 左对齐,占10格
printf("%5.0f%-10s %s\n",$1,"K",$2)
}'|
less
|
<gh_stars>1-10
Shindo.tests('Fog::Compute[:xenserver] | VBDs collection', ['xenserver']) do
conn = Fog::Compute[:xenserver]
tests('The vbds collection') do
vbds = conn.vbds.all
test('should not be empty') { !vbds.empty? }
test('should be a kind of Fog::Compute::XenServer::Vbds') { vbds.kind_of? Fog::Compute::XenServer::Vbds }
tests('should be able to reload itself').succeeds { vbds.reload }
tests('should be able to get a model') do
tests('by reference').succeeds {
vbds.get(vbds.first.reference).is_a? Fog::Compute::XenServer::VBD
}
end
end
end
|
package com.apirest.apirest.service.impl;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.repository.CrudRepository;
import org.springframework.stereotype.Service;
import com.apirest.apirest.commons.GenericServiceImpl;
import com.apirest.apirest.dao.api.PersonaDaoAPI;
import com.apirest.apirest.model.Persona;
import com.apirest.apirest.service.api.PersonaServiceAPI;
@Service
public class PersonaServiceImpl extends GenericServiceImpl<Persona, Long> implements PersonaServiceAPI {
@Autowired
private PersonaDaoAPI personaDaoAPI;
@Override
public CrudRepository<Persona, Long> getDao() {
// TODO Auto-generated method stub
return personaDaoAPI;
}
}
|
public class Main {
public static int findMostFrequent(int[] arr) {
int currNum = arr[0];
int currFreq = 0;
for (int num: arr) {
int cnt = 0;
for (int i = 0; i < arr.length; i++) {
if (arr[i] == num) {
cnt++;
}
if (cnt > currFreq) {
currFreq = cnt;
currNum = num;
}
}
}
return currNum;
}
public static void main(String[] args) {
int[] arr = {1, 2, 2, 4, 4, 5};
System.out.println("The number with most frequency is: " + findMostFrequent(arr));
}
} |
#!/bin/sh
#
# builder_common.sh
#
# part of pfSense (https://www.pfsense.org)
# Copyright (c) 2004-2013 BSD Perimeter
# Copyright (c) 2013-2016 Electric Sheep Fencing
# Copyright (c) 2014-2021 Rubicon Communications, LLC (Netgate)
# All rights reserved.
#
# FreeSBIE portions of the code
# Copyright (c) 2005 Dario Freni
# and copied from FreeSBIE project
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -z "${IMAGES_FINAL_DIR}" -o "${IMAGES_FINAL_DIR}" = "/" ]; then
echo "IMAGES_FINAL_DIR is not defined"
print_error_pfS
fi
kldload filemon >/dev/null 2>&1
lc() {
echo "${1}" | tr '[[:upper:]]' '[[:lower:]]'
}
git_last_commit() {
export CURRENT_COMMIT=$(git -C ${BUILDER_ROOT} log -1 --format='%H')
export CURRENT_AUTHOR=$(git -C ${BUILDER_ROOT} log -1 --format='%an')
echo ">>> Last known commit $CURRENT_AUTHOR - $CURRENT_COMMIT"
echo "$CURRENT_COMMIT" > $SCRATCHDIR/build_commit_info.txt
}
# Create core pkg repository
core_pkg_create_repo() {
if [ ! -d "${CORE_PKG_REAL_PATH}/All" ]; then
return
fi
############ ATTENTION ##############
#
# For some reason pkg-repo fail without / in the end of directory name
# so removing it will break command
#
# https://github.com/freebsd/pkg/issues/1364
#
echo -n ">>> Creating core packages repository... "
if pkg repo -q "${CORE_PKG_REAL_PATH}/"; then
echo "Done!"
else
echo "Failed!"
print_error_pfS
fi
# Use the same directory structure as poudriere does to avoid
# breaking snapshot repositories during rsync
ln -sf $(basename ${CORE_PKG_REAL_PATH}) ${CORE_PKG_PATH}/.latest
ln -sf .latest/All ${CORE_PKG_ALL_PATH}
ln -sf .latest/digests.txz ${CORE_PKG_PATH}/digests.txz
ln -sf .latest/meta.conf ${CORE_PKG_PATH}/meta.conf
ln -sf .latest/meta.txz ${CORE_PKG_PATH}/meta.txz
ln -sf .latest/packagesite.txz ${CORE_PKG_PATH}/packagesite.txz
}
# Create core pkg (base, kernel)
core_pkg_create() {
local _template="${1}"
local _flavor="${2}"
local _version="${3}"
local _root="${4}"
local _findroot="${5}"
local _filter="${6}"
local _template_path=${BUILDER_TOOLS}/templates/core_pkg/${_template}
# Use default pkg repo to obtain ABI and ALTABI
local _abi=$(sed -e "s/%%ARCH%%/${TARGET_ARCH}/g" \
${PKG_REPO_DEFAULT%%.conf}.abi)
local _altabi_arch=$(get_altabi_arch ${TARGET_ARCH})
local _altabi=$(sed -e "s/%%ARCH%%/${_altabi_arch}/g" \
${PKG_REPO_DEFAULT%%.conf}.altabi)
${BUILDER_SCRIPTS}/create_core_pkg.sh \
-t "${_template_path}" \
-f "${_flavor}" \
-v "${_version}" \
-r "${_root}" \
-s "${_findroot}" \
-F "${_filter}" \
-d "${CORE_PKG_REAL_PATH}/All" \
-a "${_abi}" \
-A "${_altabi}" \
|| print_error_pfS
}
# This routine will output that something went wrong
print_error_pfS() {
echo
echo "####################################"
echo "Something went wrong, check errors!" >&2
echo "####################################"
echo
echo "NOTE: a lot of times you can run './build.sh --clean-builder' to resolve."
echo
[ -n "${LOGFILE}" -a -f "${LOGFILE}" ] && \
echo "Log saved on ${LOGFILE}" && \
echo
kill $$
exit 1
}
# This routine will verify that the kernel has been
# installed OK to the staging area.
ensure_kernel_exists() {
if [ ! -f "$1/boot/kernel/kernel.gz" ]; then
echo ">>> ERROR: Could not locate $1/boot/kernel.gz"
print_error_pfS
fi
KERNEL_SIZE=$(stat -f "%z" $1/boot/kernel/kernel.gz)
if [ "$KERNEL_SIZE" -lt 3500 ]; then
echo ">>> ERROR: Kernel $1/boot/kernel.gz appears to be smaller than it should be: $KERNEL_SIZE"
print_error_pfS
fi
}
get_pkg_name() {
echo "${PRODUCT_NAME}-${1}-${CORE_PKG_VERSION}"
}
# This routine builds all related kernels
build_all_kernels() {
# Set KERNEL_BUILD_PATH if it has not been set
if [ -z "${KERNEL_BUILD_PATH}" ]; then
KERNEL_BUILD_PATH=$SCRATCHDIR/kernels
echo ">>> KERNEL_BUILD_PATH has not been set. Setting to ${KERNEL_BUILD_PATH}!"
fi
[ -d "${KERNEL_BUILD_PATH}" ] \
&& rm -rf ${KERNEL_BUILD_PATH}
# Build embedded kernel
for BUILD_KERNEL in $BUILD_KERNELS; do
unset KERNCONF
unset KERNEL_DESTDIR
unset KERNEL_NAME
export KERNCONF=$BUILD_KERNEL
export KERNEL_DESTDIR="$KERNEL_BUILD_PATH/$BUILD_KERNEL"
export KERNEL_NAME=${BUILD_KERNEL}
LOGFILE="${BUILDER_LOGS}/kernel.${KERNCONF}.${TARGET}.log"
echo ">>> Building $BUILD_KERNEL kernel." | tee -a ${LOGFILE}
if [ -n "${NO_BUILDKERNEL}" -a -f "${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${KERNEL_NAME}).txz" ]; then
echo ">>> NO_BUILDKERNEL set, skipping build" | tee -a ${LOGFILE}
continue
fi
buildkernel
echo ">>> Staging $BUILD_KERNEL kernel..." | tee -a ${LOGFILE}
installkernel
ensure_kernel_exists $KERNEL_DESTDIR
echo ">>> Creating pkg of $KERNEL_NAME-debug kernel to staging area..." | tee -a ${LOGFILE}
core_pkg_create kernel-debug ${KERNEL_NAME} ${CORE_PKG_VERSION} ${KERNEL_DESTDIR} \
"./usr/lib/debug/boot" \*.debug
rm -rf ${KERNEL_DESTDIR}/usr
echo ">>> Creating pkg of $KERNEL_NAME kernel to staging area..." | tee -a ${LOGFILE}
core_pkg_create kernel ${KERNEL_NAME} ${CORE_PKG_VERSION} ${KERNEL_DESTDIR} "./boot/kernel ./boot/modules"
rm -rf $KERNEL_DESTDIR 2>&1 1>/dev/null
done
}
install_default_kernel() {
if [ -z "${1}" ]; then
echo ">>> ERROR: install_default_kernel called without a kernel config name"| tee -a ${LOGFILE}
print_error_pfS
fi
export KERNEL_NAME="${1}"
echo -n ">>> Installing kernel to be used by image ${KERNEL_NAME}..." | tee -a ${LOGFILE}
# Copy kernel package to chroot, otherwise pkg won't find it to install
if ! pkg_chroot_add ${FINAL_CHROOT_DIR} kernel-${KERNEL_NAME}; then
echo ">>> ERROR: Error installing kernel package $(get_pkg_name kernel-${KERNEL_NAME}).txz" | tee -a ${LOGFILE}
print_error_pfS
fi
# Set kernel pkg as vital to avoid user end up removing it for any reason
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name kernel-${KERNEL_NAME})
if [ ! -f $FINAL_CHROOT_DIR/boot/kernel/kernel.gz ]; then
echo ">>> ERROR: No kernel installed on $FINAL_CHROOT_DIR and the resulting image will be unusable. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
mkdir -p $FINAL_CHROOT_DIR/pkgs
if [ -z "${2}" -o -n "${INSTALL_EXTRA_KERNELS}" ]; then
cp ${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${KERNEL_NAME}).txz $FINAL_CHROOT_DIR/pkgs
if [ -n "${INSTALL_EXTRA_KERNELS}" ]; then
for _EXTRA_KERNEL in $INSTALL_EXTRA_KERNELS; do
_EXTRA_KERNEL_PATH=${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${_EXTRA_KERNEL}).txz
if [ -f "${_EXTRA_KERNEL_PATH}" ]; then
echo -n ". adding ${_EXTRA_KERNEL_PATH} on image /pkgs folder"
cp ${_EXTRA_KERNEL_PATH} $FINAL_CHROOT_DIR/pkgs
else
echo ">>> ERROR: Requested kernel $(get_pkg_name kernel-${_EXTRA_KERNEL}).txz was not found to be put on image /pkgs folder!"
print_error_pfS
fi
done
fi
fi
echo "Done." | tee -a ${LOGFILE}
unset KERNEL_NAME
}
# This builds FreeBSD (make buildworld)
# Imported from FreeSBIE
make_world() {
LOGFILE=${BUILDER_LOGS}/buildworld.${TARGET}
echo ">>> LOGFILE set to $LOGFILE." | tee -a ${LOGFILE}
if [ -n "${NO_BUILDWORLD}" ]; then
echo ">>> NO_BUILDWORLD set, skipping build" | tee -a ${LOGFILE}
return
fi
echo ">>> $(LC_ALL=C date) - Starting build world for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/build_freebsd.sh -K -s ${FREEBSD_SRC_DIR} \
|| print_error_pfS
echo ">>> $(LC_ALL=C date) - Finished build world for ${TARGET} architecture..." | tee -a ${LOGFILE}
LOGFILE=${BUILDER_LOGS}/installworld.${TARGET}
echo ">>> LOGFILE set to $LOGFILE." | tee -a ${LOGFILE}
[ -d "${INSTALLER_CHROOT_DIR}" ] \
|| mkdir -p ${INSTALLER_CHROOT_DIR}
echo ">>> Installing world with bsdinstall for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -i -K \
-s ${FREEBSD_SRC_DIR} \
-d ${INSTALLER_CHROOT_DIR} \
|| print_error_pfS
# Copy additional installer scripts
install -o root -g wheel -m 0755 ${BUILDER_TOOLS}/installer/*.sh \
${INSTALLER_CHROOT_DIR}/root
# XXX set root password since we don't have nullok enabled
pw -R ${INSTALLER_CHROOT_DIR} usermod root -w yes
echo ">>> Installing world without bsdinstall for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -K \
-s ${FREEBSD_SRC_DIR} \
-d ${STAGE_CHROOT_DIR} \
|| print_error_pfS
# Use the builder cross compiler from obj to produce the final binary.
BUILD_CC="${MAKEOBJDIRPREFIX}${FREEBSD_SRC_DIR}/${TARGET}.${TARGET_ARCH}/tmp/usr/bin/cc"
[ -f "${BUILD_CC}" ] || print_error_pfS
# XXX It must go to the scripts
[ -d "${STAGE_CHROOT_DIR}/usr/local/bin" ] \
|| mkdir -p ${STAGE_CHROOT_DIR}/usr/local/bin
makeargs="CC=${BUILD_CC} DESTDIR=${STAGE_CHROOT_DIR}"
echo ">>> Building and installing crypto tools and athstats for ${TARGET} architecture... (Starting - $(LC_ALL=C date))" | tee -a ${LOGFILE}
(script -aq $LOGFILE make -C ${FREEBSD_SRC_DIR}/tools/tools/crypto ${makeargs} clean all install || print_error_pfS;) | egrep '^>>>' | tee -a ${LOGFILE}
# XXX FIX IT
# (script -aq $LOGFILE make -C ${FREEBSD_SRC_DIR}/tools/tools/ath/athstats ${makeargs} clean all install || print_error_pfS;) | egrep '^>>>' | tee -a ${LOGFILE}
echo ">>> Building and installing crypto tools and athstats for ${TARGET} architecture... (Finished - $(LC_ALL=C date))" | tee -a ${LOGFILE}
if [ "${PRODUCT_NAME}" = "pfSense" -a -n "${GNID_REPO_BASE}" ]; then
echo ">>> Building gnid... " | tee -a ${LOGFILE}
(\
cd ${GNID_SRC_DIR} && \
make \
CC=${BUILD_CC} \
INCLUDE_DIR=${GNID_INCLUDE_DIR} \
LIBCRYPTO_DIR=${GNID_LIBCRYPTO_DIR} \
clean gnid \
) || print_error_pfS
install -o root -g wheel -m 0700 ${GNID_SRC_DIR}/gnid \
${STAGE_CHROOT_DIR}/usr/sbin \
|| print_error_pfS
install -o root -g wheel -m 0700 ${GNID_SRC_DIR}/gnid \
${INSTALLER_CHROOT_DIR}/usr/sbin \
|| print_error_pfS
fi
unset makeargs
}
# This routine creates a ova image that contains
# a ovf and vmdk file. These files can be imported
# right into vmware or virtual box.
# (and many other emulation platforms)
# http://www.vmware.com/pdf/ovf_whitepaper_specification.pdf
create_ova_image() {
# XXX create a .ovf php creator that you can pass:
# 1. populatedSize
# 2. license
# 3. product name
# 4. version
# 5. number of network interface cards
# 6. allocationUnits
# 7. capacity
# 8. capacityAllocationUnits
LOGFILE=${BUILDER_LOGS}/ova.${TARGET}.log
local _mntdir=${OVA_TMP}/mnt
if [ -d "${_mntdir}" ]; then
local _dev
# XXX Root cause still didn't found but it doesn't umount
# properly on looped builds and then require this extra
# check
while true; do
_dev=$(mount -p ${_mntdir} 2>/dev/null | awk '{print $1}')
[ $? -ne 0 -o -z "${_dev}" ] \
&& break
umount -f ${_mntdir}
mdconfig -d -u ${_dev#/dev/}
done
chflags -R noschg ${OVA_TMP}
rm -rf ${OVA_TMP}
fi
mkdir -p $(dirname ${OVAPATH})
mkdir -p ${_mntdir}
if [ -z "${OVA_SWAP_PART_SIZE_IN_GB}" -o "${OVA_SWAP_PART_SIZE_IN_GB}" = "0" ]; then
# first partition size (freebsd-ufs)
local OVA_FIRST_PART_SIZE_IN_GB=${VMDK_DISK_CAPACITY_IN_GB}
# Calculate real first partition size, removing 256 blocks (131072 bytes) beginning/loader
local OVA_FIRST_PART_SIZE=$((${OVA_FIRST_PART_SIZE_IN_GB}*1024*1024*1024-131072))
# Unset swap partition size variable
unset OVA_SWAP_PART_SIZE
# Parameter used by mkimg
unset OVA_SWAP_PART_PARAM
else
# first partition size (freebsd-ufs)
local OVA_FIRST_PART_SIZE_IN_GB=$((VMDK_DISK_CAPACITY_IN_GB-OVA_SWAP_PART_SIZE_IN_GB))
# Use first partition size in g
local OVA_FIRST_PART_SIZE="${OVA_FIRST_PART_SIZE_IN_GB}g"
# Calculate real swap size, removing 256 blocks (131072 bytes) beginning/loader
local OVA_SWAP_PART_SIZE=$((${OVA_SWAP_PART_SIZE_IN_GB}*1024*1024*1024-131072))
# Parameter used by mkimg
local OVA_SWAP_PART_PARAM="-p freebsd-swap/swap0::${OVA_SWAP_PART_SIZE}"
fi
# Prepare folder to be put in image
customize_stagearea_for_image "ova"
install_default_kernel ${DEFAULT_KERNEL} "no"
# Fill fstab
echo ">>> Installing platform specific items..." | tee -a ${LOGFILE}
echo "/dev/gpt/${PRODUCT_NAME} / ufs rw 1 1" > ${FINAL_CHROOT_DIR}/etc/fstab
if [ -n "${OVA_SWAP_PART_SIZE}" ]; then
echo "/dev/gpt/swap0 none swap sw 0 0" >> ${FINAL_CHROOT_DIR}/etc/fstab
fi
# Create / partition
echo -n ">>> Creating / partition... " | tee -a ${LOGFILE}
truncate -s ${OVA_FIRST_PART_SIZE} ${OVA_TMP}/${OVFUFS}
local _md=$(mdconfig -a -f ${OVA_TMP}/${OVFUFS})
trap "mdconfig -d -u ${_md}; return" 1 2 15 EXIT
newfs -L ${PRODUCT_NAME} -j /dev/${_md} 2>&1 >>${LOGFILE}
if ! mount /dev/${_md} ${_mntdir} 2>&1 >>${LOGFILE}; then
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error mounting temporary vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
trap "sync; sleep 3; umount ${_mntdir} || umount -f ${_mntdir}; mdconfig -d -u ${_md}; return" 1 2 15 EXIT
echo "Done!" | tee -a ${LOGFILE}
clone_directory_contents ${FINAL_CHROOT_DIR} ${_mntdir}
sync
sleep 3
umount ${_mntdir} || umount -f ${_mntdir} >>${LOGFILE} 2>&1
mdconfig -d -u ${_md}
trap "-" 1 2 15 EXIT
# Create raw disk
echo -n ">>> Creating raw disk... " | tee -a ${LOGFILE}
mkimg \
-s gpt \
-f raw \
-b ${FINAL_CHROOT_DIR}/boot/pmbr \
-p freebsd-boot:=${FINAL_CHROOT_DIR}/boot/gptboot \
-p freebsd-ufs/${PRODUCT_NAME}:=${OVA_TMP}/${OVFUFS} \
${OVA_SWAP_PART_PARAM} \
-o ${OVA_TMP}/${OVFRAW} 2>&1 >> ${LOGFILE}
if [ $? -ne 0 -o ! -f ${OVA_TMP}/${OVFRAW} ]; then
if [ -f ${OVA_TMP}/${OVFUFS} ]; then
rm -f ${OVA_TMP}/${OVFUFS}
fi
if [ -f ${OVA_TMP}/${OVFRAW} ]; then
rm -f ${OVA_TMP}/${OVFRAW}
fi
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating temporary vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
# We don't need it anymore
rm -f ${OVA_TMP}/${OVFUFS} >/dev/null 2>&1
# Convert raw to vmdk
echo -n ">>> Creating vmdk disk... " | tee -a ${LOGFILE}
vmdktool -z9 -v ${OVA_TMP}/${OVFVMDK} ${OVA_TMP}/${OVFRAW}
if [ $? -ne 0 -o ! -f ${OVA_TMP}/${OVFVMDK} ]; then
if [ -f ${OVA_TMP}/${OVFRAW} ]; then
rm -f ${OVA_TMP}/${OVFRAW}
fi
if [ -f ${OVA_TMP}/${OVFVMDK} ]; then
rm -f ${OVA_TMP}/${OVFVMDK}
fi
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
rm -f ${OVA_TMP}/${OVFRAW}
ova_setup_ovf_template
echo -n ">>> Writing final ova image... " | tee -a ${LOGFILE}
# Create OVA file for vmware
gtar -C ${OVA_TMP} -cpf ${OVAPATH} ${PRODUCT_NAME}.ovf ${OVFVMDK}
echo "Done!" | tee -a ${LOGFILE}
rm -f ${OVA_TMP}/${OVFVMDK} >/dev/null 2>&1
echo ">>> OVA created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
# called from create_ova_image
ova_setup_ovf_template() {
if [ ! -f ${OVFTEMPLATE} ]; then
echo ">>> ERROR: OVF template file (${OVFTEMPLATE}) not found."
print_error_pfS
fi
# OperatingSystemSection (${PRODUCT_NAME}.ovf)
# 42 FreeBSD 32-Bit
# 78 FreeBSD 64-Bit
if [ "${TARGET}" = "amd64" ]; then
local _os_id="78"
local _os_type="freebsd64Guest"
local _os_descr="FreeBSD 64-Bit"
else
echo ">>> ERROR: Platform not supported for OVA (${TARGET})"
print_error_pfS
fi
local POPULATED_SIZE=$(du -d0 -k $FINAL_CHROOT_DIR | cut -f1)
local POPULATED_SIZE_IN_BYTES=$((${POPULATED_SIZE}*1024))
local VMDK_FILE_SIZE=$(stat -f "%z" ${OVA_TMP}/${OVFVMDK})
sed \
-e "s,%%VMDK_FILE_SIZE%%,${VMDK_FILE_SIZE},g" \
-e "s,%%VMDK_DISK_CAPACITY_IN_GB%%,${VMDK_DISK_CAPACITY_IN_GB},g" \
-e "s,%%POPULATED_SIZE_IN_BYTES%%,${POPULATED_SIZE_IN_BYTES},g" \
-e "s,%%OS_ID%%,${_os_id},g" \
-e "s,%%OS_TYPE%%,${_os_type},g" \
-e "s,%%OS_DESCR%%,${_os_descr},g" \
-e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
-e "s,%%PRODUCT_NAME_SUFFIX%%,${PRODUCT_NAME_SUFFIX},g" \
-e "s,%%PRODUCT_VERSION%%,${PRODUCT_VERSION},g" \
-e "s,%%PRODUCT_URL%%,${PRODUCT_URL},g" \
-e "s#%%VENDOR_NAME%%#${VENDOR_NAME}#g" \
-e "s#%%OVF_INFO%%#${OVF_INFO}#g" \
-e "/^%%PRODUCT_LICENSE%%/r ${BUILDER_ROOT}/LICENSE" \
-e "/^%%PRODUCT_LICENSE%%/d" \
${OVFTEMPLATE} > ${OVA_TMP}/${PRODUCT_NAME}.ovf
}
# Cleans up previous builds
clean_builder() {
# Clean out directories
echo ">>> Cleaning up previous build environment...Please wait!"
staginareas_clean_each_run
if [ -d "${STAGE_CHROOT_DIR}" ]; then
echo -n ">>> Cleaning ${STAGE_CHROOT_DIR}... "
chflags -R noschg ${STAGE_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${STAGE_CHROOT_DIR}/* 2>/dev/null
echo "Done."
fi
if [ -d "${INSTALLER_CHROOT_DIR}" ]; then
echo -n ">>> Cleaning ${INSTALLER_CHROOT_DIR}... "
chflags -R noschg ${INSTALLER_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${INSTALLER_CHROOT_DIR}/* 2>/dev/null
echo "Done."
fi
if [ -z "${NO_CLEAN_FREEBSD_OBJ}" -a -d "${FREEBSD_SRC_DIR}" ]; then
OBJTREE=$(make -C ${FREEBSD_SRC_DIR} -V OBJTREE)
if [ -d "${OBJTREE}" ]; then
echo -n ">>> Cleaning FreeBSD objects dir staging..."
echo -n "."
chflags -R noschg ${OBJTREE} 2>&1 >/dev/null
echo -n "."
rm -rf ${OBJTREE}/*
echo "Done!"
fi
if [ -d "${KERNEL_BUILD_PATH}" ]; then
echo -n ">>> Cleaning previously built kernel stage area..."
rm -rf $KERNEL_BUILD_PATH/*
echo "Done!"
fi
fi
mkdir -p $KERNEL_BUILD_PATH
echo -n ">>> Cleaning previously built images..."
rm -rf $IMAGES_FINAL_DIR/*
echo "Done!"
echo -n ">>> Cleaning previous builder logs..."
if [ -d "$BUILDER_LOGS" ]; then
rm -rf ${BUILDER_LOGS}
fi
mkdir -p ${BUILDER_LOGS}
echo "Done!"
echo ">>> Cleaning of builder environment has finished."
}
clone_directory_contents() {
if [ ! -e "$2" ]; then
mkdir -p "$2"
fi
if [ ! -d "$1" -o ! -d "$2" ]; then
if [ -z "${LOGFILE}" ]; then
echo ">>> ERROR: Argument $1 supplied is not a directory!"
else
echo ">>> ERROR: Argument $1 supplied is not a directory!" | tee -a ${LOGFILE}
fi
print_error_pfS
fi
echo -n ">>> Using TAR to clone $1 to $2 ..."
tar -C ${1} -c -f - . | tar -C ${2} -x -p -f -
echo "Done!"
}
clone_to_staging_area() {
# Clone everything to the final staging area
echo -n ">>> Cloning everything to ${STAGE_CHROOT_DIR} staging area..."
LOGFILE=${BUILDER_LOGS}/cloning.${TARGET}.log
tar -C ${PRODUCT_SRC} -c -f - . | \
tar -C ${STAGE_CHROOT_DIR} -x -p -f -
mkdir -p ${STAGE_CHROOT_DIR}/etc/mtree
mtree -Pcp ${STAGE_CHROOT_DIR}/var > ${STAGE_CHROOT_DIR}/etc/mtree/var.dist
mtree -Pcp ${STAGE_CHROOT_DIR}/etc > ${STAGE_CHROOT_DIR}/etc/mtree/etc.dist
if [ -d ${STAGE_CHROOT_DIR}/usr/local/etc ]; then
mtree -Pcp ${STAGE_CHROOT_DIR}/usr/local/etc > ${STAGE_CHROOT_DIR}/etc/mtree/localetc.dist
fi
## Add buildtime and lastcommit information
# This is used for detecting updates.
echo "$BUILTDATESTRING" > $STAGE_CHROOT_DIR/etc/version.buildtime
# Record last commit info if it is available.
if [ -f $SCRATCHDIR/build_commit_info.txt ]; then
cp $SCRATCHDIR/build_commit_info.txt $STAGE_CHROOT_DIR/etc/version.lastcommit
fi
local _exclude_files="${SCRATCHDIR}/base_exclude_files"
sed \
-e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
-e "s,%%VERSION%%,${_version},g" \
${BUILDER_TOOLS}/templates/core_pkg/base/exclude_files \
> ${_exclude_files}
mkdir -p ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR} >/dev/null 2>&1
# Include a sample pkg stable conf to base
setup_pkg_repo \
${PKG_REPO_DEFAULT} \
${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/${PRODUCT_NAME}-repo.conf \
${TARGET} \
${TARGET_ARCH}
mtree \
-c \
-k uid,gid,mode,size,flags,sha256digest \
-p ${STAGE_CHROOT_DIR} \
-X ${_exclude_files} \
> ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/base.mtree
tar \
-C ${STAGE_CHROOT_DIR} \
-cJf ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/base.txz \
-X ${_exclude_files} \
.
core_pkg_create rc "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
core_pkg_create base "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
core_pkg_create default-config "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
local DEFAULTCONF=${STAGE_CHROOT_DIR}/conf.default/config.xml
# Save current WAN and LAN if value
local _old_wan_if=$(xml sel -t -v "${XML_ROOTOBJ}/interfaces/wan/if" ${DEFAULTCONF})
local _old_lan_if=$(xml sel -t -v "${XML_ROOTOBJ}/interfaces/lan/if" ${DEFAULTCONF})
# Change default interface names to match vmware driver
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/wan/if" -v "vmx0" ${DEFAULTCONF}
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/lan/if" -v "vmx1" ${DEFAULTCONF}
core_pkg_create default-config "vmware" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
# Restore default values to be used by serial package
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/wan/if" -v "${_old_wan_if}" ${DEFAULTCONF}
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/lan/if" -v "${_old_lan_if}" ${DEFAULTCONF}
# Activate serial console in config.xml
xml ed -L -P -d "${XML_ROOTOBJ}/system/enableserial" ${DEFAULTCONF}
xml ed -P -s "${XML_ROOTOBJ}/system" -t elem -n "enableserial" \
${DEFAULTCONF} > ${DEFAULTCONF}.tmp
xml fo -t ${DEFAULTCONF}.tmp > ${DEFAULTCONF}
rm -f ${DEFAULTCONF}.tmp
echo force > ${STAGE_CHROOT_DIR}/cf/conf/enableserial_force
core_pkg_create default-config-serial "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
rm -f ${STAGE_CHROOT_DIR}/cf/conf/enableserial_force
rm -f ${STAGE_CHROOT_DIR}/cf/conf/config.xml
# Make sure pkg is present
pkg_bootstrap ${STAGE_CHROOT_DIR}
# Make sure correct repo is available on tmp dir
mkdir -p ${STAGE_CHROOT_DIR}/tmp/pkg/pkg-repos
setup_pkg_repo \
${PKG_REPO_BUILD} \
${STAGE_CHROOT_DIR}/tmp/pkg/pkg-repos/repo.conf \
${TARGET} \
${TARGET_ARCH} \
staging \
${STAGE_CHROOT_DIR}/tmp/pkg/pkg.conf
echo "Done!"
}
create_final_staging_area() {
if [ -z "${FINAL_CHROOT_DIR}" ]; then
echo ">>> ERROR: FINAL_CHROOT_DIR is not set, cannot continue!" | tee -a ${LOGFILE}
print_error_pfS
fi
if [ -d "${FINAL_CHROOT_DIR}" ]; then
echo -n ">>> Previous ${FINAL_CHROOT_DIR} detected cleaning up..." | tee -a ${LOGFILE}
chflags -R noschg ${FINAL_CHROOT_DIR} 2>&1 1>/dev/null
rm -rf ${FINAL_CHROOT_DIR}/* 2>&1 1>/dev/null
echo "Done." | tee -a ${LOGFILE}
fi
echo ">>> Preparing Final image staging area: $(LC_ALL=C date)" 2>&1 | tee -a ${LOGFILE}
echo ">>> Cloning ${STAGE_CHROOT_DIR} to ${FINAL_CHROOT_DIR}" 2>&1 | tee -a ${LOGFILE}
clone_directory_contents ${STAGE_CHROOT_DIR} ${FINAL_CHROOT_DIR}
if [ ! -f $FINAL_CHROOT_DIR/sbin/init ]; then
echo ">>> ERROR: Something went wrong during cloning -- Please verify!" 2>&1 | tee -a ${LOGFILE}
print_error_pfS
fi
}
customize_stagearea_for_image() {
local _image_type="$1"
local _default_config="" # filled with $2 below
local _image_variant="$3"
if [ -n "$2" ]; then
_default_config="$2"
elif [ "${_image_type}" = "memstickserial" -o \
"${_image_type}" = "memstickadi" ]; then
_default_config="default-config-serial"
elif [ "${_image_type}" = "ova" ]; then
_default_config="default-config-vmware"
else
_default_config="default-config"
fi
# Prepare final stage area
create_final_staging_area
pkg_chroot_add ${FINAL_CHROOT_DIR} rc
pkg_chroot_add ${FINAL_CHROOT_DIR} base
# Set base/rc pkgs as vital to avoid user end up removing it for any reason
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name rc)
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name base)
if [ "${_image_type}" = "iso" -o \
"${_image_type}" = "memstick" -o \
"${_image_type}" = "memstickserial" -o \
"${_image_type}" = "memstickadi" ]; then
mkdir -p ${FINAL_CHROOT_DIR}/pkgs
cp ${CORE_PKG_ALL_PATH}/*default-config*.txz ${FINAL_CHROOT_DIR}/pkgs
fi
pkg_chroot_add ${FINAL_CHROOT_DIR} ${_default_config}
# XXX: Workaround to avoid pkg to complain regarding release
# repo on first boot since packages are installed from
# staging server during build phase
if [ -n "${USE_PKG_REPO_STAGING}" ]; then
_read_cmd="select value from repodata where key='packagesite'"
if [ -n "${_IS_RELEASE}" -o -n "${_IS_RC}" ]; then
local _tgt_server="${PKG_REPO_SERVER_RELEASE}"
else
local _tgt_server="${PKG_REPO_SERVER_DEVEL}"
fi
for _db in ${FINAL_CHROOT_DIR}/var/db/pkg/repo-*sqlite; do
_cur=$(/usr/local/bin/sqlite3 ${_db} "${_read_cmd}")
_new=$(echo "${_cur}" | sed -e "s,^${PKG_REPO_SERVER_STAGING},${_tgt_server},")
/usr/local/bin/sqlite3 ${_db} "update repodata set value='${_new}' where key='packagesite'"
done
fi
if [ -n "$_image_variant" -a \
-d ${BUILDER_TOOLS}/templates/custom_logos/${_image_variant} ]; then
mkdir -p ${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
cp -f \
${BUILDER_TOOLS}/templates/custom_logos/${_image_variant}/*.svg \
${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
cp -f \
${BUILDER_TOOLS}/templates/custom_logos/${_image_variant}/*.css \
${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
fi
# Remove temporary repo conf
rm -rf ${FINAL_CHROOT_DIR}/tmp/pkg
}
create_distribution_tarball() {
mkdir -p ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist
echo -n ">>> Creating distribution tarball... " | tee -a ${LOGFILE}
tar -C ${FINAL_CHROOT_DIR} --exclude ./pkgs \
-cJf ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist/base.txz .
echo "Done!" | tee -a ${LOGFILE}
echo -n ">>> Creating manifest... " | tee -a ${LOGFILE}
(cd ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist && \
sh ${FREEBSD_SRC_DIR}/release/scripts/make-manifest.sh base.txz) \
> ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist/MANIFEST
echo "Done!" | tee -a ${LOGFILE}
}
create_iso_image() {
local _variant="$1"
LOGFILE=${BUILDER_LOGS}/isoimage.${TARGET}
if [ -z "${ISOPATH}" ]; then
echo ">>> ISOPATH is empty skipping generation of ISO image!" | tee -a ${LOGFILE}
return
fi
echo ">>> Building bootable ISO image for ${TARGET}" | tee -a ${LOGFILE}
mkdir -p $(dirname ${ISOPATH})
local _image_path=${ISOPATH}
if [ -n "${_variant}" ]; then
_image_path=$(echo "$_image_path" | \
sed "s/${PRODUCT_NAME_SUFFIX}-/&${_variant}-/")
VARIANTIMAGES="${VARIANTIMAGES}${VARIANTIMAGES:+ }${_image_path}"
fi
customize_stagearea_for_image "iso" "" $_variant
install_default_kernel ${DEFAULT_KERNEL}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
rm -f ${LOADERCONF} ${BOOTCONF} >/dev/null 2>&1
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
cat ${LOADERCONF} > ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
FSLABEL=$(echo ${PRODUCT_NAME} | tr '[:lower:]' '[:upper:]')
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/mkisoimages.sh -b \
${FSLABEL} \
${_image_path} \
${INSTALLER_CHROOT_DIR}
if [ ! -f "${_image_path}" ]; then
echo "ERROR! ISO image was not built"
print_error_pfS
fi
gzip -qf $_image_path &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> ISO created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_image() {
local _variant="$1"
LOGFILE=${BUILDER_LOGS}/memstick.${TARGET}
if [ "${MEMSTICKPATH}" = "" ]; then
echo ">>> MEMSTICKPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKPATH})
local _image_path=${MEMSTICKPATH}
if [ -n "${_variant}" ]; then
_image_path=$(echo "$_image_path" | \
sed "s/-memstick-/-memstick-${_variant}-/")
VARIANTIMAGES="${VARIANTIMAGES}${VARIANTIMAGES:+ }${_image_path}"
fi
customize_stagearea_for_image "memstick" "" $_variant
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating memstick to ${_image_path}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
rm -f ${LOADERCONF} ${BOOTCONF} >/dev/null 2>&1
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
echo 'boot_serial="NO"' >> ${LOADERCONF}
cat ${LOADERCONF} > ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
FSLABEL=$(echo ${PRODUCT_NAME} | tr '[:lower:]' '[:upper:]')
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/mkisoimages.sh -b \
${FSLABEL} \
${_image_path} \
${INSTALLER_CHROOT_DIR}
if [ ! -f "${_image_path}" ]; then
echo "ERROR! memstick image was not built"
print_error_pfS
fi
gzip -qf $_image_path &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICK created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_serial_image() {
LOGFILE=${BUILDER_LOGS}/memstickserial.${TARGET}
if [ "${MEMSTICKSERIALPATH}" = "" ]; then
echo ">>> MEMSTICKSERIALPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKSERIALPATH})
customize_stagearea_for_image "memstickserial"
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating serial memstick to ${MEMSTICKSERIALPATH}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
echo ">>> Activating serial console..." 2>&1 | tee -a ${LOGFILE}
echo "-S115200 -D" > ${BOOTCONF}
# Activate serial console+video console in loader.conf
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
echo 'boot_multicons="YES"' >> ${LOADERCONF}
echo 'boot_serial="YES"' >> ${LOADERCONF}
echo 'console="comconsole,vidconsole"' >> ${LOADERCONF}
echo 'comconsole_speed="115200"' >> ${LOADERCONF}
cat ${BOOTCONF} >> ${FINAL_CHROOT_DIR}/boot.config
cat ${LOADERCONF} >> ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/make-memstick.sh \
${INSTALLER_CHROOT_DIR} \
${MEMSTICKSERIALPATH}
if [ ! -f "${MEMSTICKSERIALPATH}" ]; then
echo "ERROR! memstick serial image was not built"
print_error_pfS
fi
gzip -qf $MEMSTICKSERIALPATH &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICKSERIAL created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_adi_image() {
LOGFILE=${BUILDER_LOGS}/memstickadi.${TARGET}
if [ "${MEMSTICKADIPATH}" = "" ]; then
echo ">>> MEMSTICKADIPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKADIPATH})
customize_stagearea_for_image "memstickadi"
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating serial memstick to ${MEMSTICKADIPATH}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
echo ">>> Activating serial console..." 2>&1 | tee -a ${LOGFILE}
echo "-S115200 -h" > ${BOOTCONF}
# Activate serial console+video console in loader.conf
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
echo 'boot_serial="YES"' >> ${LOADERCONF}
echo 'console="comconsole"' >> ${LOADERCONF}
echo 'comconsole_speed="115200"' >> ${LOADERCONF}
echo 'comconsole_port="0x2F8"' >> ${LOADERCONF}
echo 'hint.uart.0.flags="0x00"' >> ${LOADERCONF}
echo 'hint.uart.1.flags="0x10"' >> ${LOADERCONF}
cat ${BOOTCONF} >> ${FINAL_CHROOT_DIR}/boot.config
cat ${LOADERCONF} >> ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/make-memstick.sh \
${INSTALLER_CHROOT_DIR} \
${MEMSTICKADIPATH}
if [ ! -f "${MEMSTICKADIPATH}" ]; then
echo "ERROR! memstick ADI image was not built"
print_error_pfS
fi
gzip -qf $MEMSTICKADIPATH &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICKADI created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
get_altabi_arch() {
local _target_arch="$1"
if [ "${_target_arch}" = "amd64" ]; then
echo "x86:64"
elif [ "${_target_arch}" = "i386" ]; then
echo "x86:32"
elif [ "${_target_arch}" = "armv7" ]; then
echo "32:el:eabi:softfp"
else
echo ">>> ERROR: Invalid arch"
print_error_pfS
fi
}
# Create pkg conf on desired place with desired arch/branch
setup_pkg_repo() {
if [ -z "${4}" ]; then
return
fi
local _template="${1}"
local _target="${2}"
local _arch="${3}"
local _target_arch="${4}"
local _staging="${5}"
local _pkg_conf="${6}"
if [ -z "${_template}" -o ! -f "${_template}" ]; then
echo ">>> ERROR: It was not possible to find pkg conf template ${_template}"
print_error_pfS
fi
if [ -n "${_staging}" -a -n "${USE_PKG_REPO_STAGING}" ]; then
local _pkg_repo_server_devel=${PKG_REPO_SERVER_STAGING}
local _pkg_repo_branch_devel=${PKG_REPO_BRANCH_STAGING}
local _pkg_repo_server_release=${PKG_REPO_SERVER_STAGING}
local _pkg_repo_branch_release=${PKG_REPO_BRANCH_STAGING}
else
local _pkg_repo_server_devel=${PKG_REPO_SERVER_DEVEL}
local _pkg_repo_branch_devel=${PKG_REPO_BRANCH_DEVEL}
local _pkg_repo_server_release=${PKG_REPO_SERVER_RELEASE}
local _pkg_repo_branch_release=${PKG_REPO_BRANCH_RELEASE}
fi
mkdir -p $(dirname ${_target}) >/dev/null 2>&1
sed \
-e "s/%%ARCH%%/${_target_arch}/" \
-e "s/%%PKG_REPO_BRANCH_DEVEL%%/${_pkg_repo_branch_devel}/g" \
-e "s/%%PKG_REPO_BRANCH_RELEASE%%/${_pkg_repo_branch_release}/g" \
-e "s,%%PKG_REPO_SERVER_DEVEL%%,${_pkg_repo_server_devel},g" \
-e "s,%%PKG_REPO_SERVER_RELEASE%%,${_pkg_repo_server_release},g" \
-e "s,%%POUDRIERE_PORTS_NAME%%,${POUDRIERE_PORTS_NAME},g" \
-e "s/%%PRODUCT_NAME%%/${PRODUCT_NAME}/g" \
-e "s/%%REPO_BRANCH_PREFIX%%/${REPO_BRANCH_PREFIX}/g" \
${_template} \
> ${_target}
local ALTABI_ARCH=$(get_altabi_arch ${_target_arch})
ABI=$(cat ${_template%%.conf}.abi 2>/dev/null \
| sed -e "s/%%ARCH%%/${_target_arch}/g")
ALTABI=$(cat ${_template%%.conf}.altabi 2>/dev/null \
| sed -e "s/%%ARCH%%/${ALTABI_ARCH}/g")
if [ -n "${_pkg_conf}" -a -n "${ABI}" -a -n "${ALTABI}" ]; then
mkdir -p $(dirname ${_pkg_conf})
echo "ABI=${ABI}" > ${_pkg_conf}
echo "ALTABI=${ALTABI}" >> ${_pkg_conf}
fi
}
depend_check() {
for _pkg in ${BUILDER_PKG_DEPENDENCIES}; do
if ! pkg info -e ${_pkg}; then
echo "Missing dependency (${_pkg})."
print_error_pfS
fi
done
}
# This routine ensures any ports / binaries that the builder
# system needs are on disk and ready for execution.
builder_setup() {
# If Product-builder is already installed, just leave
if pkg info -e -q ${PRODUCT_NAME}-builder; then
return
fi
if [ ! -f ${PKG_REPO_PATH} ]; then
[ -d $(dirname ${PKG_REPO_PATH}) ] \
|| mkdir -p $(dirname ${PKG_REPO_PATH})
update_freebsd_sources
local _arch=$(uname -m)
setup_pkg_repo \
${PKG_REPO_BUILD} \
${PKG_REPO_PATH} \
${_arch} \
${_arch} \
"staging"
# Use fingerprint keys from repo
sed -i '' -e "/fingerprints:/ s,\"/,\"${BUILDER_ROOT}/src/," \
${PKG_REPO_PATH}
fi
pkg install ${PRODUCT_NAME}-builder
}
# Updates FreeBSD sources
update_freebsd_sources() {
if [ "${1}" = "full" ]; then
local _full=1
local _clone_params=""
else
local _full=0
local _clone_params="--depth 1 --single-branch"
fi
if [ -n "${NO_BUILDWORLD}" -a -n "${NO_BUILDKERNEL}" ]; then
echo ">>> NO_BUILDWORLD and NO_BUILDKERNEL set, skipping update of freebsd sources" | tee -a ${LOGFILE}
return
fi
echo ">>> Obtaining FreeBSD sources (${FREEBSD_BRANCH})..."
${BUILDER_SCRIPTS}/git_checkout.sh \
-r ${FREEBSD_REPO_BASE} \
-d ${FREEBSD_SRC_DIR} \
-b ${FREEBSD_BRANCH}
if [ $? -ne 0 -o ! -d "${FREEBSD_SRC_DIR}/.git" ]; then
echo ">>> ERROR: It was not possible to clone FreeBSD src repo"
print_error_pfS
fi
if [ -n "${GIT_FREEBSD_COSHA1}" ]; then
echo -n ">>> Checking out desired commit (${GIT_FREEBSD_COSHA1})... "
( git -C ${FREEBSD_SRC_DIR} checkout ${GIT_FREEBSD_COSHA1} ) 2>&1 | \
grep -C3 -i -E 'error|fatal'
echo "Done!"
fi
if [ "${PRODUCT_NAME}" = "pfSense" -a -n "${GNID_REPO_BASE}" ]; then
echo ">>> Obtaining gnid sources..."
${BUILDER_SCRIPTS}/git_checkout.sh \
-r ${GNID_REPO_BASE} \
-d ${GNID_SRC_DIR} \
-b ${GNID_BRANCH}
fi
}
pkg_chroot() {
local _root="${1}"
shift
if [ $# -eq 0 ]; then
return -1
fi
if [ -z "${_root}" -o "${_root}" = "/" -o ! -d "${_root}" ]; then
return -1
fi
mkdir -p \
${SCRATCHDIR}/pkg_cache \
${_root}/var/cache/pkg \
${_root}/dev
/sbin/mount -t nullfs ${SCRATCHDIR}/pkg_cache ${_root}/var/cache/pkg
/sbin/mount -t devfs devfs ${_root}/dev
cp -f /etc/resolv.conf ${_root}/etc/resolv.conf
touch ${BUILDER_LOGS}/install_pkg_install_ports.txt
local _params=""
if [ -f "${_root}/tmp/pkg/pkg-repos/repo.conf" ]; then
_params="--repo-conf-dir /tmp/pkg/pkg-repos "
fi
if [ -f "${_root}/tmp/pkg/pkg.conf" ]; then
_params="${_params} --config /tmp/pkg/pkg.conf "
fi
script -aq ${BUILDER_LOGS}/install_pkg_install_ports.txt \
chroot ${_root} pkg ${_params}$@ >/dev/null 2>&1
local result=$?
rm -f ${_root}/etc/resolv.conf
/sbin/umount -f ${_root}/dev
/sbin/umount -f ${_root}/var/cache/pkg
return $result
}
pkg_chroot_add() {
if [ -z "${1}" -o -z "${2}" ]; then
return 1
fi
local _target="${1}"
local _pkg="$(get_pkg_name ${2}).txz"
if [ ! -d "${_target}" ]; then
echo ">>> ERROR: Target dir ${_target} not found"
print_error_pfS
fi
if [ ! -f ${CORE_PKG_ALL_PATH}/${_pkg} ]; then
echo ">>> ERROR: Package ${_pkg} not found"
print_error_pfS
fi
cp ${CORE_PKG_ALL_PATH}/${_pkg} ${_target}
pkg_chroot ${_target} add /${_pkg}
rm -f ${_target}/${_pkg}
}
pkg_bootstrap() {
local _root=${1:-"${STAGE_CHROOT_DIR}"}
setup_pkg_repo \
${PKG_REPO_BUILD} \
${_root}${PKG_REPO_PATH} \
${TARGET} \
${TARGET_ARCH} \
"staging"
pkg_chroot ${_root} bootstrap -f
}
# This routine assists with installing various
# freebsd ports files into the pfsense-fs staging
# area.
install_pkg_install_ports() {
local MAIN_PKG="${1}"
if [ -z "${MAIN_PKG}" ]; then
MAIN_PKG=${PRODUCT_NAME}
fi
echo ">>> Installing pkg repository in chroot (${STAGE_CHROOT_DIR})..."
[ -d ${STAGE_CHROOT_DIR}/var/cache/pkg ] || \
mkdir -p ${STAGE_CHROOT_DIR}/var/cache/pkg
[ -d ${SCRATCHDIR}/pkg_cache ] || \
mkdir -p ${SCRATCHDIR}/pkg_cache
echo -n ">>> Installing built ports (packages) in chroot (${STAGE_CHROOT_DIR})... "
# First mark all packages as automatically installed
pkg_chroot ${STAGE_CHROOT_DIR} set -A 1 -a
# Install all necessary packages
if ! pkg_chroot ${STAGE_CHROOT_DIR} install ${MAIN_PKG} ${custom_package_list}; then
echo "Failed!"
print_error_pfS
fi
# Make sure required packages are set as non-automatic
pkg_chroot ${STAGE_CHROOT_DIR} set -A 0 pkg ${MAIN_PKG} ${custom_package_list}
# pkg and MAIN_PKG are vital
pkg_chroot ${STAGE_CHROOT_DIR} set -y -v 1 pkg ${MAIN_PKG}
# Remove unnecessary packages
pkg_chroot ${STAGE_CHROOT_DIR} autoremove
echo "Done!"
}
staginareas_clean_each_run() {
echo -n ">>> Cleaning build directories: "
if [ -d "${FINAL_CHROOT_DIR}" ]; then
BASENAME=$(basename ${FINAL_CHROOT_DIR})
echo -n "$BASENAME "
chflags -R noschg ${FINAL_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${FINAL_CHROOT_DIR}/* 2>/dev/null
fi
echo "Done!"
}
# Imported from FreeSBIE
buildkernel() {
local _kernconf=${1:-${KERNCONF}}
if [ -n "${NO_BUILDKERNEL}" ]; then
echo ">>> NO_BUILDKERNEL set, skipping build" | tee -a ${LOGFILE}
return
fi
if [ -z "${_kernconf}" ]; then
echo ">>> ERROR: No kernel configuration defined probably this is not what you want! STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
local _old_kernconf=${KERNCONF}
export KERNCONF=${_kernconf}
echo ">>> $(LC_ALL=C date) - Starting build kernel for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/build_freebsd.sh -W -s ${FREEBSD_SRC_DIR} \
|| print_error_pfS
echo ">>> $(LC_ALL=C date) - Finished build kernel for ${TARGET} architecture..." | tee -a ${LOGFILE}
export KERNCONF=${_old_kernconf}
}
# Imported from FreeSBIE
installkernel() {
local _destdir=${1:-${KERNEL_DESTDIR}}
local _kernconf=${2:-${KERNCONF}}
if [ -z "${_kernconf}" ]; then
echo ">>> ERROR: No kernel configuration defined probably this is not what you want! STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
local _old_kernconf=${KERNCONF}
export KERNCONF=${_kernconf}
mkdir -p ${STAGE_CHROOT_DIR}/boot
echo ">>> Installing kernel (${_kernconf}) for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -W -D -z \
-s ${FREEBSD_SRC_DIR} \
-d ${_destdir} \
|| print_error_pfS
export KERNCONF=${_old_kernconf}
}
# Launch is ran first to setup a few variables that we need
# Imported from FreeSBIE
launch() {
if [ "$(id -u)" != "0" ]; then
echo "Sorry, this must be done as root."
fi
echo ">>> Operation $0 has started at $(date)"
}
finish() {
echo ">>> Operation $0 has ended at $(date)"
}
pkg_repo_rsync() {
local _repo_path_param="${1}"
local _ignore_final_rsync="${2}"
local _aws_sync_cmd="aws s3 sync --quiet --exclude '.real*/*' --exclude '.latest/*'"
if [ -z "${_repo_path_param}" -o ! -d "${_repo_path_param}" ]; then
return
fi
if [ -n "${SKIP_FINAL_RSYNC}" ]; then
_ignore_final_rsync="1"
fi
# Sanitize path
_repo_path=$(realpath ${_repo_path_param})
local _repo_dir=$(dirname ${_repo_path})
local _repo_base=$(basename ${_repo_path})
# Add ./ it's an rsync trick to make it chdir to directory before sending it
_repo_path="${_repo_dir}/./${_repo_base}"
if [ -z "${LOGFILE}" ]; then
local _logfile="/dev/null"
else
local _logfile="${LOGFILE}"
fi
if [ -n "${PKG_REPO_SIGNING_COMMAND}" -a -z "${DO_NOT_SIGN_PKG_REPO}" ]; then
# Detect poudriere directory structure
if [ -L "${_repo_path}/.latest" ]; then
local _real_repo_path=$(readlink -f ${_repo_path}/.latest)
else
local _real_repo_path=${_repo_path}
fi
echo -n ">>> Signing repository... " | tee -a ${_logfile}
############ ATTENTION ##############
#
# For some reason pkg-repo fail without / in the end of directory name
# so removing it will break command
#
# https://github.com/freebsd/pkg/issues/1364
#
if script -aq ${_logfile} pkg repo ${_real_repo_path}/ \
signing_command: ${PKG_REPO_SIGNING_COMMAND} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred trying to sign repo"
print_error_pfS
fi
local _pkgfile="${_repo_path}/Latest/pkg.txz"
if [ -e ${_pkgfile} ]; then
echo -n ">>> Signing Latest/pkg.txz for bootstraping... " | tee -a ${_logfile}
if sha256 -q ${_pkgfile} | ${PKG_REPO_SIGNING_COMMAND} \
> ${_pkgfile}.sig 2>/dev/null; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred trying to sign Latest/pkg.txz"
print_error_pfS
fi
fi
fi
if [ -z "${UPLOAD}" ]; then
return
fi
for _pkg_rsync_hostname in ${PKG_RSYNC_HOSTNAME}; do
# Make sure destination directory exist
ssh -o StrictHostKeyChecking=no -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} \
"mkdir -p ${PKG_RSYNC_DESTDIR}"
echo -n ">>> Sending updated repository to ${_pkg_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} rsync -Have "ssh -o StrictHostKeyChecking=no -p ${PKG_RSYNC_SSH_PORT}" \
--timeout=60 --delete-delay ${_repo_path} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname}:${PKG_RSYNC_DESTDIR} >/dev/null 2>&1
then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to remote hostname"
print_error_pfS
fi
if [ -z "${USE_PKG_REPO_STAGING}" -o -n "${_ignore_final_rsync}" ]; then
return
fi
if [ -n "${_IS_RELEASE}" -o "${_repo_path_param}" = "${CORE_PKG_PATH}" ]; then
for _pkg_final_rsync_hostname in ${PKG_FINAL_RSYNC_HOSTNAME}; do
# Send .real* directories first to prevent having a broken repo while transfer happens
local _cmd="rsync -Have \"ssh -o StrictHostKeyChecking=no -p ${PKG_FINAL_RSYNC_SSH_PORT}\" \
--timeout=60 ${PKG_RSYNC_DESTDIR}/./${_repo_base%%-core}* \
--include=\"/*\" --include=\"*/.real*\" --include=\"*/.real*/***\" \
--exclude=\"*\" \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname}:${PKG_FINAL_RSYNC_DESTDIR}"
echo -n ">>> Sending updated packages to ${_pkg_final_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -o StrictHostKeyChecking=no -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} ${_cmd} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to final hostname"
print_error_pfS
fi
_cmd="rsync -Have \"ssh -o StrictHostKeyChecking=no -p ${PKG_FINAL_RSYNC_SSH_PORT}\" \
--timeout=60 --delete-delay ${PKG_RSYNC_DESTDIR}/./${_repo_base%%-core}* \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname}:${PKG_FINAL_RSYNC_DESTDIR}"
echo -n ">>> Sending updated repositories metadata to ${_pkg_final_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -o StrictHostKeyChecking=no -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} ${_cmd} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to final hostname"
print_error_pfS
fi
if [ -z "${PKG_FINAL_S3_PATH}" ]; then
continue
fi
local _repos=$(ssh -o StrictHostKeyChecking=no -p ${PKG_FINAL_RSYNC_SSH_PORT} \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname} \
"ls -1d ${PKG_FINAL_RSYNC_DESTDIR}/${_repo_base%%-core}*")
for _repo in ${_repos}; do
echo -n ">>> Sending updated packages to AWS ${PKG_FINAL_S3_PATH}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -o StrictHostKeyChecking=no -p ${PKG_FINAL_RSYNC_SSH_PORT} \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname} \
"${_aws_sync_cmd} ${_repo} ${PKG_FINAL_S3_PATH}/$(basename ${_repo})"; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending files to AWS S3"
print_error_pfS
fi
echo -n ">>> Cleaning up packages at AWS ${PKG_FINAL_S3_PATH}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -o StrictHostKeyChecking=no -p ${PKG_FINAL_RSYNC_SSH_PORT} \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname} \
"${_aws_sync_cmd} --delete ${_repo} ${PKG_FINAL_S3_PATH}/$(basename ${_repo})"; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending files to AWS S3"
print_error_pfS
fi
done
done
fi
done
}
poudriere_possible_archs() {
local _arch=$(uname -m)
local _archs=""
# If host is amd64, we'll create both repos, and if possible armv7
if [ "${_arch}" = "amd64" ]; then
_archs="amd64.amd64"
if [ -f /usr/local/bin/qemu-arm-static ]; then
# Make sure binmiscctl is ok
/usr/local/etc/rc.d/qemu_user_static forcestart >/dev/null 2>&1
if binmiscctl lookup armv7 >/dev/null 2>&1; then
_archs="${_archs} arm.armv7"
fi
fi
fi
if [ -n "${ARCH_LIST}" ]; then
local _found=0
for _desired_arch in ${ARCH_LIST}; do
_found=0
for _possible_arch in ${_archs}; do
if [ "${_desired_arch}" = "${_possible_arch}" ]; then
_found=1
break
fi
done
if [ ${_found} -eq 0 ]; then
echo ">>> ERROR: Impossible to build for arch: ${_desired_arch}"
print_error_pfS
fi
done
_archs="${ARCH_LIST}"
fi
echo ${_archs}
}
poudriere_jail_name() {
local _jail_arch="${1}"
if [ -z "${_jail_arch}" ]; then
return 1
fi
# Remove arch
echo "${PRODUCT_NAME}_${POUDRIERE_BRANCH}_${_jail_arch##*.}"
}
poudriere_rename_ports() {
if [ "${PRODUCT_NAME}" = "pfSense" ]; then
return;
fi
LOGFILE=${BUILDER_LOGS}/poudriere.log
local _ports_dir="/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}"
echo -n ">>> Renaming product ports on ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
for d in $(find ${_ports_dir} -depth 2 -type d -name '*pfSense*'); do
local _pdir=$(dirname ${d})
local _pname=$(echo $(basename ${d}) | sed "s,pfSense,${PRODUCT_NAME},")
local _plist=""
local _pdescr=""
if [ -e ${_pdir}/${_pname} ]; then
rm -rf ${_pdir}/${_pname}
fi
cp -r ${d} ${_pdir}/${_pname}
if [ -f ${_pdir}/${_pname}/pkg-plist ]; then
_plist=${_pdir}/${_pname}/pkg-plist
fi
if [ -f ${_pdir}/${_pname}/pkg-descr ]; then
_pdescr=${_pdir}/${_pname}/pkg-descr
fi
sed -i '' -e "s,pfSense,${PRODUCT_NAME},g" \
-e "s,https://www.pfsense.org,${PRODUCT_URL},g" \
-e "/^MAINTAINER=/ s,^.*$,MAINTAINER= ${PRODUCT_EMAIL}," \
${_pdir}/${_pname}/Makefile ${_pdescr} ${_plist}
# PHP module is special
if echo "${_pname}" | grep -q "^php[0-9]*-${PRODUCT_NAME}-module"; then
local _product_capital=$(echo ${PRODUCT_NAME} | tr '[a-z]' '[A-Z]')
sed -i '' -e "s,PHP_PFSENSE,PHP_${_product_capital},g" \
-e "s,PFSENSE_SHARED_LIBADD,${_product_capital}_SHARED_LIBADD,g" \
-e "s,pfSense,${PRODUCT_NAME},g" \
-e "s,pfSense.c,${PRODUCT_NAME}\.c,g" \
${_pdir}/${_pname}/files/config.m4
sed -i '' -e "s,COMPILE_DL_PFSENSE,COMPILE_DL_${_product_capital}," \
-e "s,pfSense_module_entry,${PRODUCT_NAME}_module_entry,g" \
-e "s,php_pfSense.h,php_${PRODUCT_NAME}\.h,g" \
-e "/ZEND_GET_MODULE/ s,pfSense,${PRODUCT_NAME}," \
-e "/PHP_PFSENSE_WORLD_EXTNAME/ s,pfSense,${PRODUCT_NAME}," \
${_pdir}/${_pname}/files/pfSense.c \
${_pdir}/${_pname}/files/dummynet.c \
${_pdir}/${_pname}/files/php_pfSense.h
fi
if [ -d ${_pdir}/${_pname}/files ]; then
for fd in $(find ${_pdir}/${_pname}/files -name '*pfSense*'); do
local _fddir=$(dirname ${fd})
local _fdname=$(echo $(basename ${fd}) | sed "s,pfSense,${PRODUCT_NAME},")
mv ${fd} ${_fddir}/${_fdname}
done
fi
done
echo "Done!" | tee -a ${LOGFILE}
}
poudriere_create_ports_tree() {
LOGFILE=${BUILDER_LOGS}/poudriere.log
if ! poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
local _branch=""
if [ -z "${POUDRIERE_PORTS_GIT_URL}" ]; then
echo ">>> ERROR: POUDRIERE_PORTS_GIT_URL is not defined"
print_error_pfS
fi
if [ -n "${POUDRIERE_PORTS_GIT_BRANCH}" ]; then
_branch="${POUDRIERE_PORTS_GIT_BRANCH}"
fi
echo -n ">>> Creating poudriere ports tree, it may take some time... " | tee -a ${LOGFILE}
if [ "${AWS}" = 1 ]; then
set -e
script -aq ${LOGFILE} poudriere ports -c -p "${POUDRIERE_PORTS_NAME}" -m none
script -aq ${LOGFILE} zfs create ${ZFS_TANK}/poudriere/ports/${POUDRIERE_PORTS_NAME}
# If S3 doesn't contain stashed ports tree, create one
if ! aws_exec s3 ls s3://pfsense-engineering-build-pkg/${FLAVOR}-ports.tz >/dev/null 2>&1; then
mkdir ${SCRATCHDIR}/${FLAVOR}-ports
${BUILDER_SCRIPTS}/git_checkout.sh \
-r ${POUDRIERE_PORTS_GIT_URL} \
-d ${SCRATCHDIR}/${FLAVOR}-ports \
-b ${POUDRIERE_PORTS_GIT_BRANCH}
tar --zstd -C ${SCRATCHDIR} -cf ${FLAVOR}-ports.tz ${FLAVOR}-ports
aws_exec s3 cp ${FLAVOR}-ports.tz s3://pfsense-engineering-build-pkg/${FLAVOR}-ports.tz --no-progress
else
# Download local copy of the ports tree stashed in S3
echo ">>> Downloading cached copy of the ports tree from S3.." | tee -a ${LOGFILE}
aws_exec s3 cp s3://pfsense-engineering-build-pkg/${FLAVOR}-ports.tz . --no-progress
fi
script -aq ${LOGFILE} tar --strip-components 1 -xf ${FLAVOR}-ports.tz -C /usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}
# Update the ports tree
(
cd /usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}
echo ">>> Updating cached copy of the ports tree from git.." | tee -a ${LOGFILE}
script -aq ${LOGFILE} git pull
script -aq ${LOGFILE} git checkout ${_branch}
)
set +e
else
if ! script -aq ${LOGFILE} poudriere ports -c -p "${POUDRIERE_PORTS_NAME}" -m git -U ${POUDRIERE_PORTS_GIT_URL} -B ${_branch} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating poudriere ports tree, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
fi
echo "Done!" | tee -a ${LOGFILE}
poudriere_rename_ports
fi
}
poudriere_init() {
local _error=0
local _archs=$(poudriere_possible_archs)
LOGFILE=${BUILDER_LOGS}/poudriere.log
# Sanity checks
if [ -z "${ZFS_TANK}" ]; then
echo ">>> ERROR: \$ZFS_TANK is empty" | tee -a ${LOGFILE}
error=1
fi
if [ -z "${ZFS_ROOT}" ]; then
echo ">>> ERROR: \$ZFS_ROOT is empty" | tee -a ${LOGFILE}
error=1
fi
if [ -z "${POUDRIERE_PORTS_NAME}" ]; then
echo ">>> ERROR: \$POUDRIERE_PORTS_NAME is empty" | tee -a ${LOGFILE}
error=1
fi
if [ ${_error} -eq 1 ]; then
print_error_pfS
fi
# Check if zpool exists
if ! zpool list ${ZFS_TANK} >/dev/null 2>&1; then
echo ">>> ERROR: ZFS tank ${ZFS_TANK} not found, please create it and try again..." | tee -a ${LOGFILE}
print_error_pfS
fi
# Check if zfs rootfs exists
if ! zfs list ${ZFS_TANK}${ZFS_ROOT} >/dev/null 2>&1; then
echo -n ">>> Creating ZFS filesystem ${ZFS_TANK}${ZFS_ROOT}... "
if zfs create -o atime=off -o mountpoint=/usr/local${ZFS_ROOT} \
${ZFS_TANK}${ZFS_ROOT} >/dev/null 2>&1; then
echo "Done!"
else
echo "Failed!"
print_error_pfS
fi
fi
# Make sure poudriere is installed
if [ ! -f /usr/local/bin/poudriere ]; then
echo ">>> Installing poudriere..." | tee -a ${LOGFILE}
if ! pkg install poudriere >/dev/null 2>&1; then
echo ">>> ERROR: poudriere was not installed, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
fi
# Create poudriere.conf
if [ -z "${POUDRIERE_PORTS_GIT_URL}" ]; then
echo ">>> ERROR: POUDRIERE_PORTS_GIT_URL is not defined"
print_error_pfS
fi
# PARALLEL_JOBS us ncpu / 4 for best performance
local _parallel_jobs=$(sysctl -qn hw.ncpu)
_parallel_jobs=$((_parallel_jobs / 4))
echo ">>> Creating poudriere.conf" | tee -a ${LOGFILE}
cat <<EOF >/usr/local/etc/poudriere.conf
ZPOOL=${ZFS_TANK}
ZROOTFS=${ZFS_ROOT}
RESOLV_CONF=/etc/resolv.conf
BASEFS=/usr/local/poudriere
USE_PORTLINT=no
USE_TMPFS=yes
NOLINUX=yes
DISTFILES_CACHE=/usr/ports/distfiles
CHECK_CHANGED_OPTIONS=yes
CHECK_CHANGED_DEPS=yes
ATOMIC_PACKAGE_REPOSITORY=yes
COMMIT_PACKAGES_ON_FAILURE=no
KEEP_OLD_PACKAGES=yes
KEEP_OLD_PACKAGES_COUNT=5
ALLOW_MAKE_JOBS=yes
PARALLEL_JOBS=${_parallel_jobs}
EOF
if pkg info -e ccache; then
cat <<EOF >>/usr/local/etc/poudriere.conf
CCACHE_DIR=/var/cache/ccache
EOF
fi
# Create specific items conf
[ ! -d /usr/local/etc/poudriere.d ] \
&& mkdir -p /usr/local/etc/poudriere.d
# Create DISTFILES_CACHE if it doesn't exist
if [ ! -d /usr/ports/distfiles ]; then
mkdir -p /usr/ports/distfiles
fi
if [ "${AWS}" = 1 ] && \
aws_exec s3 ls s3://pfsense-engineering-build-pkg/${FLAVOR}-distfiles.tar >/dev/null 2>&1; then
# Download a copy of the distfiles from S3
echo ">>> Downloading distfile cache from S3.." | tee -a ${LOGFILE}
aws_exec s3 cp s3://pfsense-engineering-build-pkg/${FLAVOR}-distfiles.tar . --no-progress
script -aq ${LOGFILE} tar -xf ${FLAVOR}-distfiles.tar -C /usr/ports/distfiles
# Save a list of distfiles
find /usr/ports/distfiles > pre-build-distfile-list
fi
# Remove old jails
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} already exists, deleting it..." | tee -a ${LOGFILE}
poudriere jail -d -j "${jail_name}"
fi
done
# Remove old ports tree
if poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
echo ">>> Poudriere ports tree ${POUDRIERE_PORTS_NAME} already exists, deleting it..." | tee -a ${LOGFILE}
poudriere ports -d -p "${POUDRIERE_PORTS_NAME}"
if [ "${AWS}" = 1 ]; then
for d in `zfs list -o name`; do
if [ "${d}" = "${ZFS_TANK}/poudriere/ports/${POUDRIERE_PORTS_NAME}" ]; then
script -aq ${LOGFILE} zfs destroy ${ZFS_TANK}/poudriere/ports/${POUDRIERE_PORTS_NAME}
fi
done
fi
fi
local native_xtools=""
# Now we are ready to create jails
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if [ "${jail_arch}" = "arm.armv7" ]; then
native_xtools="-x"
else
native_xtools=""
fi
echo ">>> Creating jail ${jail_name}, it may take some time... " | tee -a ${LOGFILE}
if [ "${AWS}" = "1" ]; then
mkdir objs
echo ">>> Downloading prebuilt release objs from s3://pfsense-engineering-build-freebsd-obj-tarballs/${FLAVOR}/${FREEBSD_BRANCH}/ ..." | tee -a ${LOGFILE}
# Download prebuilt release tarballs from previous job
aws_exec s3 cp s3://pfsense-engineering-build-freebsd-obj-tarballs/${FLAVOR}/${FREEBSD_BRANCH}/LATEST-${jail_arch} objs --no-progress
SRC_COMMIT=`cat objs/LATEST-${jail_arch}`
aws_exec s3 cp s3://pfsense-engineering-build-freebsd-obj-tarballs/${FLAVOR}/${FREEBSD_BRANCH}/MANIFEST-${jail_arch}-${SRC_COMMIT} objs --no-progress
ln -s MANIFEST-${jail_arch}-${SRC_COMMIT} objs/MANIFEST
for i in base doc kernel src tests; do
if [ ! -f objs/${i}-${jail_arch}-${SRC_COMMIT}.txz ]; then
aws_exec s3 cp s3://pfsense-engineering-build-freebsd-obj-tarballs/${FLAVOR}/${FREEBSD_BRANCH}/${i}-${jail_arch}-${SRC_COMMIT}.txz objs --no-progress
ln -s ${i}-${jail_arch}-${SRC_COMMIT}.txz objs/${i}.txz
fi
done
if ! script -aq ${LOGFILE} poudriere jail -c -j "${jail_name}" -v ${FREEBSD_BRANCH} \
-a ${jail_arch} -m url=file://${PWD}/objs >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating jail ${jail_name}, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
# Download a cached pkg repo from S3
OLDIFS=${IFS}
IFS=$'\n'
echo ">>> Downloading cached pkgs for ${jail_arch} from S3.." | tee -a ${LOGFILE}
if aws_exec s3 ls s3://pfsense-engineering-build-pkg/${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar >/dev/null 2>&1; then
aws_exec s3 cp s3://pfsense-engineering-build-pkg/${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar . --no-progress
[ ! -d /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME} ] && mkdir -p /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}
echo "Extracting ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar to /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}" | tee -a ${LOGFILE}
[ ! -d /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME} ] && mkdir /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}
script -aq ${LOGFILE} tar -xf ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar -C /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}
# Save a list of pkgs
cd /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}/.latest
find . > ${WORKSPACE}/pre-build-pkg-list-${jail_arch}
cd ${WORKSPACE}
else
touch pre-build-pkg-list-${jail_arch}
fi
IFS=${OLDIFS}
else
if ! script -aq ${LOGFILE} poudriere jail -c -j "${jail_name}" -v ${FREEBSD_BRANCH} \
-a ${jail_arch} -m git -U ${FREEBSD_REPO_BASE_POUDRIERE} ${native_xtools} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating jail ${jail_name}, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
fi
echo "Done!" | tee -a ${LOGFILE}
done
poudriere_create_ports_tree
echo ">>> Poudriere is now configured!" | tee -a ${LOGFILE}
}
poudriere_update_jails() {
local _archs=$(poudriere_possible_archs)
LOGFILE=${BUILDER_LOGS}/poudriere.log
local native_xtools=""
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
local _create_or_update="-u"
local _create_or_update_text="Updating"
if ! poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} not found, creating..." | tee -a ${LOGFILE}
_create_or_update="-c -v ${FREEBSD_BRANCH} -a ${jail_arch} -m git -U ${FREEBSD_REPO_BASE_POUDRIERE}"
_create_or_update_text="Creating"
fi
if [ "${jail_arch}" = "arm.armv7" ]; then
native_xtools="-x"
else
native_xtools=""
fi
echo -n ">>> ${_create_or_update_text} jail ${jail_name}, it may take some time... " | tee -a ${LOGFILE}
if ! script -aq ${LOGFILE} poudriere jail ${_create_or_update} -j "${jail_name}" ${native_xtools} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error ${_create_or_update_text} jail ${jail_name}, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
done
}
poudriere_update_ports() {
LOGFILE=${BUILDER_LOGS}/poudriere.log
# Create ports tree if necessary
if ! poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
poudriere_create_ports_tree
else
echo -n ">>> Resetting local changes on ports tree ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
script -aq ${LOGFILE} git -C "/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}" reset --hard >/dev/null 2>&1
script -aq ${LOGFILE} git -C "/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}" clean -fd >/dev/null 2>&1
echo "Done!" | tee -a ${LOGFILE}
echo -n ">>> Updating ports tree ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
script -aq ${LOGFILE} poudriere ports -u -p "${POUDRIERE_PORTS_NAME}" >/dev/null 2>&1
echo "Done!" | tee -a ${LOGFILE}
poudriere_rename_ports
fi
}
save_logs_to_s3() {
# Save a copy of the past few logs into S3
DATE=`date +%Y%m%d-%H%M%S`
script -aq ${LOGFILE} tar --zstd -cf pkg-logs-${jail_arch}-${DATE}.tar -C /usr/local/poudriere/data/logs/bulk/${jail_name}-${POUDRIERE_PORTS_NAME}/latest/ .
aws_exec s3 cp pkg-logs-${jail_arch}-${DATE}.tar s3://pfsense-engineering-build-pkg/logs/ --no-progress
OLDIFS=${IFS}
IFS=$'\n'
local _logtemp=$( mktemp /tmp/loglist.XXXXX )
for i in $(aws_exec s3 ls s3://pfsense-engineering-build-pkg/logs/); do
echo ${i} | awk '{print $4}' | grep pkg-logs-${jail_arch} >> ${_logtemp}
done
local _maxlogs=5
local _curlogs=0
_curlogs=$( wc -l ${_logtemp} | awk '{print $1}' )
if [ ${_curlogs} -gt ${_maxlogs} ]; then
local _extralogs=$(( ${_curlogs} - ${_maxlogs} ))
for _last in $( head -${_extralogs} ${_logtemp} ); do
aws_exec s3 rm s3://pfsense-engineering-build-pkg/logs/${_last}
done
fi
IFS=${OLDIFS}
}
save_pkgs_to_s3() {
echo ">>> Save a copy of the package repo into S3..." | tee -a ${LOGFILE}
cd /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}/.latest
find . > ${WORKSPACE}/post-build-pkg-list-${jail_arch}
cd ${WORKSPACE}
diff pre-build-pkg-list-${jail_arch} post-build-pkg-list-${jail_arch} > /dev/null
if [ $? = 1 ]; then
[ -f ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar ] && rm ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar
script -aq ${LOGFILE} tar -cf ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar -C /usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME} .
aws_exec s3 cp ${FLAVOR}-${POUDRIERE_PORTS_GIT_BRANCH}-pkgs-${jail_arch}.tar s3://pfsense-engineering-build-pkg/ --no-progress
save_logs_to_s3
fi
}
aws_exec() {
script -aq ${LOGFILE} \
env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
AWS_DEFAULT_REGION=us-east-2 \
aws $@
return $?
}
poudriere_bulk() {
local _archs=$(poudriere_possible_archs)
local _makeconf
# Create DISTFILES_CACHE if it doesn't exist
if [ ! -d /usr/ports/distfiles ]; then
mkdir -p /usr/ports/distfiles
fi
LOGFILE=${BUILDER_LOGS}/poudriere.log
if [ -n "${UPLOAD}" -a -z "${PKG_RSYNC_HOSTNAME}" ]; then
echo ">>> ERROR: PKG_RSYNC_HOSTNAME is not set"
print_error_pfS
fi
rm -f ${LOGFILE}
poudriere_create_ports_tree
[ -d /usr/local/etc/poudriere.d ] || \
mkdir -p /usr/local/etc/poudriere.d
_makeconf=/usr/local/etc/poudriere.d/${POUDRIERE_PORTS_NAME}-make.conf
if [ -f "${BUILDER_TOOLS}/conf/pfPorts/make.conf" ]; then
sed -e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
"${BUILDER_TOOLS}/conf/pfPorts/make.conf" > ${_makeconf}
fi
cat <<EOF >>/usr/local/etc/poudriere.d/${POUDRIERE_PORTS_NAME}-make.conf
PKG_REPO_BRANCH_DEVEL=${PKG_REPO_BRANCH_DEVEL}
PKG_REPO_BRANCH_RELEASE=${PKG_REPO_BRANCH_RELEASE}
PKG_REPO_SERVER_DEVEL=${PKG_REPO_SERVER_DEVEL}
PKG_REPO_SERVER_RELEASE=${PKG_REPO_SERVER_RELEASE}
POUDRIERE_PORTS_NAME=${POUDRIERE_PORTS_NAME}
PFSENSE_DEFAULT_REPO=${PFSENSE_DEFAULT_REPO}
PRODUCT_NAME=${PRODUCT_NAME}
REPO_BRANCH_PREFIX=${REPO_BRANCH_PREFIX}
EOF
local _value=""
for jail_arch in ${_archs}; do
eval "_value=\${PKG_REPO_BRANCH_DEVEL_${jail_arch##*.}}"
if [ -n "${_value}" ]; then
echo "PKG_REPO_BRANCH_DEVEL_${jail_arch##*.}=${_value}" \
>> ${_makeconf}
fi
eval "_value=\${PKG_REPO_BRANCH_RELEASE_${jail_arch##*.}}"
if [ -n "${_value}" ]; then
echo "PKG_REPO_BRANCH_RELEASE_${jail_arch##*.}=${_value}" \
>> ${_makeconf}
fi
eval "_value=\${PKG_REPO_SERVER_DEVEL_${jail_arch##*.}}"
if [ -n "${_value}" ]; then
echo "PKG_REPO_SERVER_DEVEL_${jail_arch##*.}=${_value}" \
>> ${_makeconf}
fi
eval "_value=\${PKG_REPO_SERVER_RELEASE_${jail_arch##*.}}"
if [ -n "${_value}" ]; then
echo "PKG_REPO_SERVER_RELEASE_${jail_arch##*.}=${_value}" \
>> ${_makeconf}
fi
done
# Change version of pfSense meta ports for snapshots
if [ -z "${_IS_RELEASE}" ]; then
local _meta_pkg_version="$(echo "${PRODUCT_VERSION}" | sed 's,DEVELOPMENT,ALPHA,')-${DATESTRING}"
sed -i '' \
-e "/^DISTVERSION/ s,^.*,DISTVERSION= ${_meta_pkg_version}," \
-e "/^PORTREVISION=/d" \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/security/${PRODUCT_NAME}/Makefile \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/Makefile
fi
# Copy over pkg repo templates to pfSense-repo
mkdir -p /usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/files
cp -f ${PKG_REPO_BASE}/* \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/files
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if ! poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} not found, skipping..." | tee -a ${LOGFILE}
continue
fi
_ref_bulk=${SCRATCHDIR}/poudriere_bulk.${POUDRIERE_BRANCH}.ref.${jail_arch}
rm -rf ${_ref_bulk} ${_ref_bulk}.tmp
touch ${_ref_bulk}.tmp
if [ -f "${POUDRIERE_BULK}.${jail_arch#*.}" ]; then
cat "${POUDRIERE_BULK}.${jail_arch#*.}" >> ${_ref_bulk}.tmp
fi
if [ -f "${POUDRIERE_BULK}" ]; then
cat "${POUDRIERE_BULK}" >> ${_ref_bulk}.tmp
fi
cat ${_ref_bulk}.tmp | sort -u > ${_ref_bulk}
_bulk=${SCRATCHDIR}/poudriere_bulk.${POUDRIERE_BRANCH}.${jail_arch}
sed -e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" ${_ref_bulk} > ${_bulk}
local _exclude_bulk="${POUDRIERE_BULK}.exclude.${jail_arch}"
if [ -f "${_exclude_bulk}" ]; then
mv ${_bulk} ${_bulk}.tmp
sed -e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" ${_exclude_bulk} > ${_bulk}.exclude
cat ${_bulk}.tmp ${_bulk}.exclude | sort | uniq -u > ${_bulk}
rm -f ${_bulk}.tmp ${_bulk}.exclude
fi
echo ">>> Poudriere bulk started at `date "+%Y/%m/%d %H:%M:%S"` for ${jail_arch}"
if ! poudriere bulk -f ${_bulk} -j ${jail_name} -p ${POUDRIERE_PORTS_NAME}; then
echo ">>> ERROR: Something went wrong..."
if [ "${AWS}" = 1 ]; then
save_pkgs_to_s3
fi
print_error_pfS
fi
echo ">>> Poudriere bulk complated at `date "+%Y/%m/%d %H:%M:%S"` for ${jail_arch}"
echo ">>> Cleaning up old packages from repo..."
if ! poudriere pkgclean -f ${_bulk} -j ${jail_name} -p ${POUDRIERE_PORTS_NAME} -y; then
echo ">>> ERROR: Something went wrong..."
print_error_pfS
fi
if [ "${AWS}" = 1 ]; then
save_pkgs_to_s3
fi
pkg_repo_rsync "/usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}"
done
if [ "${AWS}" = 1 ]; then
echo ">>> Save a copy of the distfiles into S3..." | tee -a ${LOGFILE}
# Save a copy of the distfiles from S3
find /usr/ports/distfiles > post-build-distfile-list
diff pre-build-distfile-list post-build-distfile-list > /dev/null
if [ $? -eq 1 ]; then
rm ${FLAVOR}-distfiles.tar
script -aq ${LOGFILE} tar -cf ${FLAVOR}-distfiles.tar -C /usr/ports/distfiles .
aws_exec s3 cp ${FLAVOR}-distfiles.tar s3://pfsense-engineering-build-pkg/ --no-progress
fi
fi
}
# This routine is called to write out to stdout
# a string. The string is appended to $SNAPSHOTSLOGFILE
snapshots_update_status() {
if [ -z "$1" ]; then
return
fi
if [ -z "${SNAPSHOTS}" -a -z "${POUDRIERE_SNAPSHOTS}" ]; then
return
fi
echo "$*"
echo "`date` -|- $*" >> $SNAPSHOTSLOGFILE
}
create_sha256() {
local _file="${1}"
if [ ! -f "${_file}" ]; then
return 1
fi
( \
cd $(dirname ${_file}) && \
sha256 $(basename ${_file}) > $(basename ${_file}).sha256 \
)
}
snapshots_create_latest_symlink() {
local _image="${1}"
if [ -z "${_image}" ]; then
return
fi
if [ -z "${TIMESTAMP_SUFFIX}" ]; then
return
fi
if [ ! -f "${_image}" ]; then
return
fi
local _symlink=$(echo ${_image} | sed "s,${TIMESTAMP_SUFFIX},-latest,")
ln -sf $(basename ${_image}) ${_symlink}
ln -sf $(basename ${_image}).sha256 ${_symlink}.sha256
}
snapshots_create_sha256() {
local _img=""
for _img in ${ISOPATH} ${MEMSTICKPATH} ${MEMSTICKSERIALPATH} ${MEMSTICKADIPATH} ${OVAPATH} ${VARIANTIMAGES}; do
if [ -f "${_img}.gz" ]; then
_img="${_img}.gz"
fi
if [ ! -f "${_img}" ]; then
continue
fi
create_sha256 ${_img}
snapshots_create_latest_symlink ${_img}
done
}
snapshots_scp_files() {
if [ -z "${RSYNC_COPY_ARGUMENTS}" ]; then
RSYNC_COPY_ARGUMENTS="-Have \"ssh -o StrictHostKeyChecking=no\" --timeout=60"
fi
snapshots_update_status ">>> Copying core pkg repo to ${PKG_RSYNC_HOSTNAME}"
pkg_repo_rsync "${CORE_PKG_PATH}"
snapshots_update_status ">>> Finished copying core pkg repo"
for _rsyncip in ${RSYNCIP}; do
snapshots_update_status ">>> Copying files to ${_rsyncip}"
# Ensure directory(s) are available
ssh -o StrictHostKeyChecking=no ${RSYNCUSER}@${_rsyncip} "mkdir -p ${RSYNCPATH}/installer"
if [ -d $IMAGES_FINAL_DIR/virtualization ]; then
ssh -o StrictHostKeyChecking=no ${RSYNCUSER}@${_rsyncip} "mkdir -p ${RSYNCPATH}/virtualization"
fi
# ensure permissions are correct for r+w
ssh -o StrictHostKeyChecking=no ${RSYNCUSER}@${_rsyncip} "chmod -R ug+rw ${RSYNCPATH}/."
rsync $RSYNC_COPY_ARGUMENTS $IMAGES_FINAL_DIR/installer/* \
${RSYNCUSER}@${_rsyncip}:${RSYNCPATH}/installer/
if [ -d $IMAGES_FINAL_DIR/virtualization ]; then
rsync $RSYNC_COPY_ARGUMENTS $IMAGES_FINAL_DIR/virtualization/* \
${RSYNCUSER}@${_rsyncip}:${RSYNCPATH}/virtualization/
fi
snapshots_update_status ">>> Finished copying files."
done
}
|
const { KlasaClient } = require('klasa');
module.exports = KlasaClient.defaultGuildSchema
// Logging
.add('log', folder => folder
.add('channel', 'textchannel')
.add('enabled', 'boolean', { default: false })
.add('case', 'integer', { default: 0 })
// Options
.add('kick', 'boolean', { default: true })
.add('ban', 'boolean', { default: true })
.add('message', 'boolean', { default: true })
.add('mute', 'boolean', { default: true })
.add('warn', 'boolean', { default: true })
)
// Automod
.add('automod', folder => folder
.add('enabled', 'boolean', { default: false })
.add('filter', filter => filter
.add('spam', 'boolean', { default: false })
.add('words', 'boolean', { default: false })
.add('invite', 'boolean', { default: false })
)
)
// Roles
.add('roles', folder => folder
.add('mod', 'role')
.add('muted', 'role')
)
// Autorole
.add('autorole', folder => folder
.add('enabled', 'boolean', { default: false })
.add('role_user', 'role')
.add('role_bot', 'role')
)
// Welcome & Leave
.add('greet', folder => folder
.add('welcome', welcome => welcome
.add('enabled', 'boolean', { default: false })
.add('message', 'string')
.add('channel', 'textchannel')
)
.add('leave', leave => leave
.add('enabled', 'boolean', { default: false })
.add('message', 'string')
.add('channel', 'textchannel')
)
)
// Music
.add('music', folder => folder
.add('djOnly', 'boolean', { default: false })
.add('dj', 'role')
);
|
from typing import List, Dict, Union, Any, Tuple
def extract_migration_info(migration_fields: List[Dict[str, Union[str, Any]]]) -> List[Tuple[str, str]]:
extracted_info = []
for field in migration_fields:
if field["help_text"] is not None:
extracted_info.append((field["model_name"], field["name"]))
return extracted_info |
#!/bin/bash
[[ -f /usr/local/finisher/settings.conf ]] && . /usr/local/finisher/settings.conf
MUK_DIR=${MUK_DIR:-"/opt/modify_ubuntu_kit"}
[[ ! -e ${MUK_DIR}/files/includes.sh ]] && (echo Missing includes file! Aborting!; exit 1)
. ${MUK_DIR}/files/includes.sh
# No parameter specified? Or maybe help requested?
if [[ "$1" == "--help" || "$1" == "-h" ]]; then
echo -e "${RED}Purpose:${NC} Customizes a few settings on your computer."
echo ""
exit 0
fi
#==============================================================================
_title "Disable notifying about new LTS upgrades..."
#==============================================================================
sed -i "s|Prompt=.*|Prompt=never|g" /etc/update-manager/release-upgrades
#==============================================================================
_title "Setup automatic removal of new unused dependencies..."
#==============================================================================
sed -i "s|//Unattended-Upgrade::Remove-Unused-Dependencies \"false\";|Unattended-Upgrade::Remove-Unused-Dependencies "true";|g" /etc/apt/apt.conf.d/50unattended-upgrades
#==============================================================================
_title "Keep the annoying ${BLUE}\"System Program Problem Detected\"${BLUE} dialog from popping up..."
#==============================================================================
sed -i "s|enabled=1|enabled=0|g" /etc/default/apport
#==============================================================================
_title "Adding finisher task to change default timeout in GRUB to 1 second..."
#==============================================================================
add_taskd 15_grub_timeout.sh
|
#!/bin/bash
source "$(pkg_path_for "$pkg_scaffolding")/lib/shared.sh"
|
#/bin/bash
ssh -f -N -C -c arcfour,blowfish-cbc -L 5922:susnx.ziti.uni-heidelberg.de:22 d8r@kde04.urz.uni-heidelberg.de
ssh kuck@localhost -p 5922 -c arcfour,blowfish-cbc -C
|
<filename>app/models/user.rb
class User < ApplicationRecord
has_many :reviews
end
|
// <NAME>, Geometric Tools, Redmond WA 98052
// Copyright (c) 1998-2016
// Distributed under the Boost Software License, Version 1.0.
// http://www.boost.org/LICENSE_1_0.txt
// http://www.geometrictools.com/License/Boost/LICENSE_1_0.txt
// File Version: 3.0.0 (2016/06/19)
#include <GTEnginePCH.h>
#include <Mathematics/GteBitHacks.h>
#include <Graphics/GteIndexBuffer.h>
using namespace gte;
IndexBuffer::IndexBuffer(IPType type, unsigned int numPrimitives,
size_t indexSize, bool createStorage)
:
Buffer(msIndexCounter[Log2OfPowerOfTwo(type)](numPrimitives),
indexSize, createStorage),
mPrimitiveType(type),
mNumPrimitives(numPrimitives),
mNumActivePrimitives(numPrimitives),
mFirstPrimitive(0)
{
mType = GT_INDEX_BUFFER;
LogAssert(mNumPrimitives > 0, "Invalid number of primitives.");
}
IndexBuffer::IndexBuffer(IPType type, unsigned int numPrimitives)
:
Buffer(msIndexCounter[Log2OfPowerOfTwo(type)](numPrimitives), 0, false),
mPrimitiveType(type),
mNumPrimitives(numPrimitives),
mNumActivePrimitives(numPrimitives),
mFirstPrimitive(0)
{
mType = GT_INDEX_BUFFER;
LogAssert(mNumPrimitives > 0, "Invalid number of primitives.");
}
void IndexBuffer::SetNumActivePrimitives(unsigned int numActive)
{
if (numActive <= mNumPrimitives)
{
mNumActivePrimitives = numActive;
}
else
{
LogWarning("Invalid number of active primitives.");
mNumActivePrimitives = mNumPrimitives;
}
}
unsigned int IndexBuffer::GetNumActiveIndices() const
{
unsigned int i = Log2OfPowerOfTwo(mPrimitiveType);
return msIndexCounter[i](mNumActivePrimitives);
}
void IndexBuffer::SetFirstPrimitive(unsigned int first)
{
if (0 <= first && first < mNumPrimitives
&& first + mNumActivePrimitives <= mNumPrimitives)
{
mFirstPrimitive = first;
return;
}
LogError("Invalid first primitive.");
}
unsigned int IndexBuffer::GetFirstIndex() const
{
if (mFirstPrimitive == 0)
{
return 0;
}
int i = Log2OfPowerOfTwo(mPrimitiveType);
return msIndexCounter[i](mFirstPrimitive);
}
bool IndexBuffer::SetPoint(unsigned int i, unsigned int v)
{
if (ValidPrimitiveType(IP_HAS_POINTS))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
unsigned int* index = i + Get<unsigned int>();
*index = v;
}
else
{
unsigned short* index = i + Get<unsigned short>();
*index = static_cast<unsigned short>(v);
}
return true;
}
}
return false;
}
bool IndexBuffer::GetPoint(unsigned int i, unsigned int& v) const
{
if (ValidPrimitiveType(IP_HAS_POINTS))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
unsigned int const* index = i + Get<unsigned int>();
v = *index;
}
else
{
unsigned short const* index = i + Get<unsigned short>();
v = static_cast<unsigned int>(*index);
}
return true;
}
}
return false;
}
bool IndexBuffer::SetSegment(unsigned int i, unsigned int v0, unsigned int v1)
{
if (ValidPrimitiveType(IP_HAS_SEGMENTS))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
if (mPrimitiveType == IP_POLYSEGMENT_DISJOINT)
{
unsigned int* index = 2 * i + Get<unsigned int>();
*index++ = v0;
*index = v1;
}
else
{
unsigned int* index = i + Get<unsigned int>();
*index++ = v0;
*index = v1;
}
}
else
{
if (mPrimitiveType == IP_POLYSEGMENT_DISJOINT)
{
unsigned short* index = 2 * i + Get<unsigned short>();
*index++ = static_cast<unsigned short>(v0);
*index = static_cast<unsigned short>(v1);
}
else
{
unsigned short* index = i + Get<unsigned short>();
*index++ = static_cast<unsigned short>(v0);
*index = static_cast<unsigned short>(v1);
}
}
return true;
}
}
return false;
}
bool IndexBuffer::GetSegment(unsigned int i, unsigned int& v0,
unsigned int& v1) const
{
if (ValidPrimitiveType(IP_HAS_SEGMENTS))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
if (mPrimitiveType == IP_POLYSEGMENT_DISJOINT)
{
unsigned int const* index = 2 * i + Get<unsigned int>();
v0 = *index++;
v1 = *index;
}
else
{
unsigned int const* index = i + Get<unsigned int>();
v0 = *index++;
v1 = *index;
}
}
else
{
if (mPrimitiveType == IP_POLYSEGMENT_DISJOINT)
{
unsigned short const* index =
2 * i + Get<unsigned short>();
v0 = static_cast<unsigned int>(*index++);
v1 = static_cast<unsigned int>(*index);
}
else
{
unsigned short const* index = i + Get<unsigned short>();
v0 = static_cast<unsigned int>(*index++);
v1 = static_cast<unsigned int>(*index);
}
}
return true;
}
}
return false;
}
bool IndexBuffer::SetTriangle(unsigned int i, unsigned int v0,
unsigned int v1, unsigned int v2)
{
if (ValidPrimitiveType(IP_HAS_TRIANGLES))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
if (mPrimitiveType == IP_TRIMESH)
{
unsigned int* index = 3 * i + Get<unsigned int>();
*index++ = v0;
*index++ = v1;
*index = v2;
}
else
{
unsigned int* index = i + Get<unsigned int>();
index[0] = v0;
if (i & 1)
{
index[2] = v1;
index[1] = v2;
}
else
{
index[1] = v1;
index[2] = v2;
}
}
}
else
{
if (mPrimitiveType == IP_TRIMESH)
{
unsigned short* index = 3 * i + Get<unsigned short>();
*index++ = static_cast<unsigned short>(v0);
*index++ = static_cast<unsigned short>(v1);
*index = static_cast<unsigned short>(v2);
}
else
{
unsigned short* index = i + Get<unsigned short>();
index[0] = static_cast<unsigned short>(v0);
if (i & 1)
{
index[2] = static_cast<unsigned short>(v1);
index[1] = static_cast<unsigned short>(v2);
}
else
{
index[1] = static_cast<unsigned short>(v1);
index[2] = static_cast<unsigned short>(v2);
}
}
}
return true;
}
}
return false;
}
bool IndexBuffer::GetTriangle(unsigned int i, unsigned int& v0,
unsigned int& v1, unsigned int& v2) const
{
if (ValidPrimitiveType(IP_HAS_TRIANGLES))
{
if (mData && i < mNumPrimitives)
{
if (mElementSize == sizeof(unsigned int))
{
if (mPrimitiveType == IP_TRIMESH)
{
unsigned int const* index = 3 * i + Get<unsigned int>();
v0 = *index++;
v1 = *index++;
v2 = *index;
}
else
{
unsigned int const* index = i + Get<unsigned int>();
unsigned int offset = (i & 1);
v0 = index[0];
v1 = index[1 + offset];
v2 = index[2 - offset];
}
}
else
{
if (mPrimitiveType == IP_TRIMESH)
{
unsigned short const* index =
3 * i + Get<unsigned short>();
v0 = static_cast<unsigned int>(*index++);
v1 = static_cast<unsigned int>(*index++);
v2 = static_cast<unsigned int>(*index);
}
else
{
unsigned short const* index = i + Get<unsigned short>();
int offset = (i & 1);
v0 = static_cast<unsigned int>(index[0]);
v1 = static_cast<unsigned int>(index[1 + offset]);
v2 = static_cast<unsigned int>(index[2 - offset]);
}
}
return true;
}
}
return false;
}
unsigned int IndexBuffer::GetPolypointIndexCount(unsigned int numPrimitives)
{
// Create one point when numPrimitives is invalid.
return numPrimitives > 0 ? numPrimitives : 1;
}
unsigned int IndexBuffer::GetPolysegmentDisjointIndexCount(
unsigned int numPrimitives)
{
// Create one segment when numPrimitives is invalid.
return numPrimitives > 0 ? 2*numPrimitives : 2;
}
unsigned int IndexBuffer::GetPolysegmentContiguousIndexCount(
unsigned int numPrimitives)
{
// Create one segment when numPrimitives is invalid.
return numPrimitives > 0 ? numPrimitives + 1 : 2;
}
unsigned int IndexBuffer::GetTrimeshIndexCount(unsigned int numPrimitives)
{
// Create one triangle when numPrimitives is invalid.
return numPrimitives > 0 ? 3*numPrimitives : 3;
}
unsigned int IndexBuffer::GetTristripIndexCount(unsigned int numPrimitives)
{
// Create one triangle when numPrimitives is invalid.
return numPrimitives > 0 ? numPrimitives + 2 : 3;
}
IndexBuffer::ICFunction IndexBuffer::msIndexCounter[IP_NUM_TYPES] =
{
&IndexBuffer::GetPolypointIndexCount,
&IndexBuffer::GetPolysegmentDisjointIndexCount,
&IndexBuffer::GetPolysegmentContiguousIndexCount,
&IndexBuffer::GetTrimeshIndexCount,
&IndexBuffer::GetTristripIndexCount
};
|
// cases for switch case
export const AddItemToCart = 'ADD_ITEM_TO_CART';
export const ReduceItemQtyFromCart = 'REDUCE_ITEM_QTY_FROM_CART';
export const RemoveItemFromCart = 'REMOVE_ITEM_FROM_CART';
export const ClearCart = 'CLEAR_CART';
// add item to cart
const addItemToCart = (product, state) => {
let subTotal = 0;
// previous cart products and index
const updatedCart = [...state.cart];
const updatedItemIndex = updatedCart.findIndex(
(item) => item._id === product._id
);
// is item new or old
if (updatedItemIndex < 0) {
// check for offerPrice
if (product.offerPrice === 0) {
updatedCart.push({ ...product, qty: 1, total: +product.price });
} else {
updatedCart.push({ ...product, qty: 1, total: +product.offerPrice });
}
updatedCart.forEach((item) => (subTotal += item.total));
} else {
const updatedItem = { ...updatedCart[updatedItemIndex] };
updatedItem.qty++;
if (updatedItem.offerPrice !== 0) {
updatedItem.total = +(updatedItem.offerPrice * updatedItem.qty).toFixed(
2
);
} else {
updatedItem.total = +(updatedItem.price * updatedItem.qty).toFixed(2);
}
updatedCart[updatedItemIndex] = updatedItem;
updatedCart.forEach((item) => (subTotal += item.total));
}
subTotal = +subTotal.toFixed(2);
let deliveryCharge = 15;
if (subTotal >= 50) {
deliveryCharge = 10;
}
const total = +(subTotal + deliveryCharge).toFixed(2);
return { ...state, cart: updatedCart, subTotal, deliveryCharge, total };
};
// reduce item qty from cart
const reduceItemQtyFromCart = (productId, state) => {
let subTotal = 0;
// previous cart state and index of the product
const updatedCart = [...state.cart];
const updatedItemIndex = updatedCart.findIndex(
(item) => item._id === productId
);
const updatedItem = { ...updatedCart[updatedItemIndex] };
updatedItem.qty--;
// is quantity zero, then remove product from cart
if (updatedItem.qty <= 0) {
updatedCart.splice(updatedItemIndex, 1);
} else {
// check for offerPrice
if (updatedItem.offerPrice !== 0) {
updatedItem.total = +(updatedItem.offerPrice * updatedItem.qty).toFixed(
2
);
} else {
updatedItem.total = +(updatedItem.price * updatedItem.qty).toFixed(2);
}
updatedCart[updatedItemIndex] = updatedItem;
}
updatedCart.forEach((item) => (subTotal += item.total));
subTotal = +subTotal.toFixed(2);
let deliveryCharge = 15;
if (subTotal >= 50) {
deliveryCharge = 10;
}
const total = +(subTotal + deliveryCharge).toFixed(2);
return { ...state, cart: updatedCart, subTotal, deliveryCharge, total };
};
// remove item from cart
const removeItemFromCart = (productId, state) => {
let subTotal = 0;
// previous cart and item index
const updatedCart = [...state.cart];
const updatedItemIndex = updatedCart.findIndex(
(item) => item._id === productId
);
// removing item from cart
updatedCart.splice(updatedItemIndex, 1);
updatedCart.forEach((item) => (subTotal += item.total));
subTotal = +subTotal.toFixed(2);
let deliveryCharge = 15;
if (subTotal >= 50) {
deliveryCharge = 10;
}
const total = +(subTotal + deliveryCharge).toFixed(2);
return { ...state, cart: updatedCart, subTotal, deliveryCharge, total };
};
// clear cart
const clearCart = (state) => {
return { ...state, cart: [], subTotal: 0, deliveryCharge: 0, total: 0 };
};
// reducer for cart
export const cartReducer = (state, action) => {
switch (action.type) {
case AddItemToCart:
return addItemToCart(action.product, state);
case ReduceItemQtyFromCart:
return reduceItemQtyFromCart(action.productId, state);
case RemoveItemFromCart:
return removeItemFromCart(action.productId, state);
case ClearCart:
return clearCart(state);
default:
throw new Error('Not a valid action!');
}
};
|
arr = []
for i in range(1, 11):
arr.append(i ** 2)
print(arr) # [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] |
<reponame>L-Distribution/Website<gh_stars>1-10
import { h, Component } from 'preact'
import { route } from 'preact-router'
import style from './style.scss'
import logoSvg from '../../assets/branding/icon.svg'
export default class Header extends Component {
render () {
return (
<div class={style.header}>
<img src={logoSvg} class={style.logo} />
<span class={style.links}>
<a>ABOUT</a>
<a>CONTACT</a>
<a>PROJECTS</a>
<a>BLOG</a>
</span>
</div>
)
}
}
|
def surface_area(base, height):
return (1/2)*base*((base/2)**2 + height**2)**(1/2) |
<reponame>StuntsPT/BangleApps
// Get 12 hour status, from barclock
const is12Hour = (require("Storage").readJSON("setting.json", 1) || {})["12hour"];
// Used from waveclk to schedule updates every minute
var drawTimeout;
// Schedule a draw for the next minute
function queueDraw() {
if (drawTimeout) clearTimeout(drawTimeout);
drawTimeout = setTimeout(function() {
drawTimeout = undefined;
draw();
}, 60000 - (Date.now() % 60000));
}
// From forum conversation 348275
function fillLine(x1, y1, x2, y2, lineWidth) {
var dx, dy, d;
if (!lineWidth) {
g.drawLine(x1, y1, x2, y2);
} else {
lineWidth = (lineWidth - 1) / 2;
dx = x2 - x1;
dy = y2 - y1;
d = Math.sqrt(dx * dx + dy * dy);
dx = Math.round(dx * lineWidth / d, 0);
dy = Math.round(dy * lineWidth / d, 0);
g.fillPoly([x1 + dx, y1 - dy, x1 - dx, y1 + dy, x2 - dx, y2 + dy, x2 + dx, y2 - dy], true);
}
}
// Mainly to convert day number to day of the week
function convertDate(date) {
var dayNum = date.getDay();
var month = date.getMonth();
var dayOfMonth = date.getDate();
var dayChar;
month += 1;
switch (dayNum) {
case 0 : dayChar = "Sun"; break;
case 1 : dayChar = "Mon"; break;
case 2 : dayChar = "Tue"; break;
case 3 : dayChar = "Wed"; break;
case 4 : dayChar = "Thur"; break;
case 5 : dayChar = "Fri"; break;
case 6 : dayChar = "Sat"; break;
}
return dayChar + " " + month + "/" + dayOfMonth;
}
function draw() {
var d = new Date();
var h = d.getHours(), m = d.getMinutes();
var minutes = ("0"+m).substr(-2);
g.reset();
// Convert to 12hr time mode
if (is12Hour && h > 12) {
h = h - 12;
if (h < 10) {
h = "0" + h;
}
} else if (h < 12) {
h = "0" + h;
} else if (h == 0) {
h = 12;
}
var hour = (" "+h).substr(-2);
// Draw the time, vector font
g.setFont("Vector", 50);
g.setFontAlign(1,1); // Align right bottom
g.drawString(hour, 85, 80, true);
g.drawString(minutes, 155, 140, true);
// Draw slash, width 6
fillLine(57, 120, 112, 40, 6);
// Convert date then draw
g.setFont("Vector", 20);
g.setFontAlign(0,1); // Align center bottom
var convertedDate = convertDate(d);
g.drawString(convertedDate, g.getWidth()/2, 170, true);
Bangle.drawWidgets();
queueDraw();
}
// Clear screen and draw
g.clear();
draw();
// From waveclk
Bangle.on('lcdPower',on=>{
if (on) {
draw(); // Draw immediately, queue redraw
} else { // Stop draw timer
if (drawTimeout) clearTimeout(drawTimeout);
drawTimeout = undefined;
}
});
Bangle.setUI("clock");
Bangle.loadWidgets();
Bangle.drawWidgets();
|
<reponame>tampham47/nau-jukebox
/* © 2017
* @author <NAME>
*/
import { Dispatcher } from 'flux';
/**
* AppDispatcher to dispatch global app events
*
* @example:
* import AppDispatcher from '../events/AppDispatcher';
*
* AppDispatcher.dispatch({type: 'actionType', payload: 'text'});
*/
const AppDispatcher = new Dispatcher();
/**
* Shortcut for the dispatch method
*
* @example:
* import { dispatch } from '../events/AppDispatcher';
*
* dispatch({type: 'actionType', payload: 'text'});
*
*/
const dispatch = AppDispatcher.dispatch.bind(AppDispatcher);
export {
AppDispatcher as default,
dispatch,
};
|
// #docregion
module.exports = function(config) {
var appBase = 'src/'; // transpiled app JS and map files
var appSrcBase = 'src/'; // app source TS files
var testBase = 'test/'; // transpiled test JS and map files
var testSrcBase = 'test/'; // test source TS files
var browsers = ['Chrome'];
if (process.env.TRAVIS) {
browsers = ['Chrome_travis_ci'];
}
config.set({
basePath : '',
frameworks: ['jasmine'],
plugins : [
require('karma-jasmine'),
require('karma-chrome-launcher')
],
customLaunchers: {
// From the CLI. Not used here but interesting
// chrome setup for travis CI using chromium
Chrome_travis_ci: {
base : 'Chrome',
flags: ['--no-sandbox']
}
},
files : [
// System.js for module loading
'node_modules/systemjs/dist/system.src.js',
// Polyfills
'node_modules/core-js/client/shim.js',
'node_modules/reflect-metadata/Reflect.js',
// zone.js
'node_modules/zone.js/dist/zone.js',
'node_modules/zone.js/dist/long-stack-trace-zone.js',
'node_modules/zone.js/dist/proxy.js',
'node_modules/zone.js/dist/sync-test.js',
'node_modules/zone.js/dist/jasmine-patch.js',
'node_modules/zone.js/dist/async-test.js',
'node_modules/zone.js/dist/fake-async-test.js',
// RxJs
{pattern: 'node_modules/rxjs/**/*.js', included: false, watched: false},
{pattern: 'node_modules/rxjs/**/*.js.map', included: false, watched: false},
// Paths loaded via module imports:
// Angular itself without AOT packages
{pattern: 'node_modules/@angular/common/**/*.js', included: false, watched: false},
{pattern: 'node_modules/@angular/compiler/**/*.js', included: false, watched: false},
{pattern: 'node_modules/@angular/compiler-cli/**/*.js', included: false, watched: false},
{pattern: 'node_modules/@angular/core/**/*.js', included: false, watched: false},
{pattern: 'node_modules/@angular/platform-browser/**/*.js', included: false, watched: false},
{pattern: 'node_modules/@angular/platform-browser-dynamic/**/*.js', included: false, watched: false},
{pattern: 'systemjs.config.js', included: false, watched: false},
'karma-test-shim.js',
// transpiled application & spec code paths loaded via module imports
{pattern: appBase + '**/*.js', included: false, watched: true},
{pattern: testBase + '**/*.js', included: false, watched: true},
// Paths for debugging with source maps in dev tools
{pattern: appSrcBase + '**/*.ts', included: false, watched: false},
{pattern: appBase + '**/*.js.map', included: false, watched: false},
{pattern: testSrcBase + '**/*.ts', included: false, watched: false},
{pattern: testBase + '**/*.js.map', included: false, watched: false}
],
exclude : [],
preprocessors: {},
reporters : ['progress'],
port : 9876,
colors : true,
logLevel : config.LOG_INFO,
autoWatch: true,
browsers : browsers,
singleRun: false
})
}
|
<reponame>jakeTran42/Codelab-Bootstrap
import { r as registerInstance, h } from './index-4a2da18d.js';
const myNavCss = "*{box-sizing:border-box;margin:0;padding:0}.nav{display:flex;flex-direction:row;justify-content:space-around;align-items:center;text-decoration:none;background-color:#259992;width:100%;height:4em}a{text-decoration:none;color:#fff}";
const MyNavigation = class {
constructor(hostRef) {
registerInstance(this, hostRef);
this.items = [
{ name: 'Home',
link: 'https://www.reddit.com/'
},
{
name: 'Login',
link: 'https://www.reddit.com/'
},
{
name: 'Logout',
link: 'https://www.reddit.com/'
},
];
}
render() {
return (h("div", { class: "nav" }, this.items.map(item => h("h3", null, h("a", { href: item.link }, item.name)))));
}
};
MyNavigation.style = myNavCss;
export { MyNavigation as my_nav };
|
export { default as Data } from './Data.js'
export { default as emit } from './emit.js'
export { default as Listener } from './Listener.js'
export { default as risingEdgeEmitter } from './risingEdgeEmitter.js'
export { default as toPromise } from './toPromise.js'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.